diff mlp.py @ 180:2698c0feeb54

mlp seems to work!
author Yoshua Bengio <bengioy@iro.umontreal.ca>
date Tue, 13 May 2008 15:35:43 -0400
parents 9911d2cc3c01
children 4afb41e61fcf
line wrap: on
line diff
--- a/mlp.py	Tue May 13 15:14:04 2008 -0400
+++ b/mlp.py	Tue May 13 15:35:43 2008 -0400
@@ -87,7 +87,7 @@
         self._b2 = t.row('b2')
         self._regularization_term = self._L2_regularizer * (t.sum(self._W1*self._W1) + t.sum(self._W2*self._W2))
         self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T)
-        self._nll,self._output = crossentropy_softmax_1hot(Print("output_activations")(self._output_activations),self._target_vector)
+        self._nll,self._output = crossentropy_softmax_1hot(self._output_activations,self._target_vector)
         self._output_class = t.argmax(self._output,1)
         self._class_error = t.neq(self._output_class,self._target_vector)
         self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0]
@@ -102,6 +102,9 @@
     def updateMinibatchInputFields(self):
         return ["input","target"]
     
+    def updateMinibatchInputAttributes(self):
+        return OnlineGradientTLearner.updateMinibatchInputAttributes(self)+["L2_regularizer"]
+    
     def updateEndOutputAttributes(self):
         return ["regularization_term"]
 
@@ -141,7 +144,7 @@
         self._n_epochs +=1
         return self._n_epochs>=self._max_n_epochs
 
-    def updateMinibatch(self,minibatch):
+    def debug_updateMinibatch(self,minibatch):
         # make sure all required fields are allocated and initialized
         self.allocate(minibatch)
         input_attributes = self.names2attributes(self.updateMinibatchInputAttributes())