changeset 126:4efe6d36c061

minor edits
author Yoshua Bengio <bengioy@iro.umontreal.ca>
date Wed, 07 May 2008 16:57:48 -0400
parents 2ca8dccba270
children f959ad58facc
files learner.py mlp.py
diffstat 2 files changed, 6 insertions(+), 6 deletions(-) [+]
line wrap: on
line diff
--- a/learner.py	Wed May 07 16:08:18 2008 -0400
+++ b/learner.py	Wed May 07 16:57:48 2008 -0400
@@ -56,7 +56,7 @@
         Optionally, if copy_inputs, the input fields (of the input_dataset) can be made
         visible in the output DataSet returned by this method.
         """
-        raise NotImplementedError
+        raise AbstractFunction()
 
     def attributeNames(self):
         """
@@ -228,9 +228,9 @@
         If a test_stats_collector is provided, then its attributes (test_stats_collector.AttributeNames())
         are also copied into the output dataset attributes.
         """
-        minibatchwise_use_function = minibatchwise_use_functions(input_dataset.fieldNames(),
-                                                                  output_fieldnames,
-                                                                  test_stats_collector)
+        minibatchwise_use_function = self.minibatchwise_use_functions(input_dataset.fieldNames(),
+                                                                      output_fieldnames,
+                                                                      test_stats_collector)
         virtual_output_dataset = ApplyFunctionDataSet(input_dataset,
                                                       minibatchwise_use_function,
                                                       True,DataSet.numpy_vstack,
--- a/mlp.py	Wed May 07 16:08:18 2008 -0400
+++ b/mlp.py	Wed May 07 16:57:48 2008 -0400
@@ -71,13 +71,13 @@
         self.learning_rate = learning_rate # this is the float
         self._learning_rate = t.scalar('learning_rate') # this is the symbol
         self._input = t.matrix('input') # n_examples x n_inputs
-        self._target = t.matrix('target','int32') # n_examples x n_outputs
+        self._target = t.ivector('target') # n_examples x n_outputs
         self._L2_regularizer = t.scalar('L2_regularizer')
         self._W1 = t.matrix('W1')
         self._W2 = t.matrix('W2')
         self._b1 = t.row('b1')
         self._b2 = t.row('b2')
-        self._regularization_term = self._L2_regularizer * (t.dot(self._W1,self._W1) + t.dot(self._W2,self._W2))
+        self._regularization_term = self._L2_regularizer * (t.sum(self._W1*self._W1) + t.sum(self._W2*self._W2))
         self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T)
         self._nll,self._output = crossentropy_softmax_1hot(self._output_activations,self._target)
         self._output_class = t.argmax(self._output,1)