diff deep/stacked_dae/v_sylvain/sgd_optimization.py @ 397:5e11dda78995

Ajout de fonctionnalite pour pouvoir facilement calculer une erreur sur differentes parties de NSIT
author SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca>
date Tue, 27 Apr 2010 20:50:56 -0400
parents 88cb95007670
children 09e1c5872c2b
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/sgd_optimization.py	Tue Apr 27 13:47:33 2010 -0400
+++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py	Tue Apr 27 20:50:56 2010 -0400
@@ -394,17 +394,27 @@
             else:
                 self.classifier.params[idx].value=copy(x)
 
-    def training_error(self,dataset):
+    def training_error(self,dataset,part=0):
         # create a function to compute the mistakes that are made by the model
         # on the validation set, or testing set
         test_model = \
             theano.function(
                 [self.classifier.x,self.classifier.y], self.classifier.errors)
-                
-        iter2 = dataset.train(self.hp.minibatch_size,bufsize=buffersize)
+        #train
+        if part == 0:      
+            iter2 = dataset.train(self.hp.minibatch_size,bufsize=buffersize)
+            name = 'train'
+        #validation
+        if part == 1:
+            iter2 = dataset.valid(self.hp.minibatch_size,bufsize=buffersize)
+            name = 'validation'
+        if part == 2:
+            iter2 = dataset.test(self.hp.minibatch_size,bufsize=buffersize)
+            name = 'test'
         train_losses2 = [test_model(x,y) for x,y in iter2]
         train_score2 = numpy.mean(train_losses2)
-        print(('The training error is %f')%(train_score2*100.))
+        print 'On the ' + name + 'dataset'
+        print(('\t the error is %f')%(train_score2*100.))
     
     #To see the prediction of the model, the real answer and the image to judge    
     def see_error(self, dataset):