diff deep/stacked_dae/v_sylvain/sgd_optimization.py @ 384:8117c0e70db9

Ajout de la faculte de prendre PNIST07
author SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca>
date Tue, 27 Apr 2010 08:20:37 -0400
parents f24b10e43a6f
children 88cb95007670
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/sgd_optimization.py	Tue Apr 27 08:20:12 2010 -0400
+++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py	Tue Apr 27 08:20:37 2010 -0400
@@ -156,7 +156,7 @@
         f.close()
 
 
-    def finetune(self,dataset,dataset_test,num_finetune,ind_test,special=0,decrease=0):
+    def finetune(self,dataset,dataset_test,num_finetune,ind_test,special=0,decrease=0,dataset_test2=None):
         
         if special != 0 and special != 1:
             sys.exit('Bad value for variable special. Must be in {0,1}')
@@ -166,6 +166,10 @@
         if ind_test == 0 or ind_test == 20:
             nom_test = "NIST"
             nom_train="P07"
+        elif ind_test == 2:
+            nom_train = "PNIST07"
+            nom_test = "NIST"
+            nom_test2 = "P07"
         else:
             nom_test = "P07"
             nom_train = "NIST"
@@ -238,8 +242,8 @@
                 if (total_mb_index+1) % validation_frequency == 0: 
                     #minibatch_index += 1
                     #The validation set is always NIST (we want the model to be good on NIST)
-                    if ind_test == 0 | ind_test == 20:
-                        iter=dataset_test.valid(minibatch_size,bufsize=buffersize)
+                    if ind_test == 0 | ind_test == 20 | ind_test == 2:
+                        iter=dataset_test.valid(minibatch_size,bufsize=buffersize)                        
                     else:
                         iter = dataset.valid(minibatch_size,bufsize=buffersize)
                     if self.max_minibatches:
@@ -281,6 +285,13 @@
                             iter2 = itermax(iter2, self.max_minibatches)
                         test_losses2 = [test_model(x,y) for x,y in iter2]
                         test_score2 = numpy.mean(test_losses2)
+                        
+                        #test it on the third test set if there is one
+                        iter3 = dataset_test2.test(minibatch_size, bufsize=buffersize)
+                        if self.max_minibatches:
+                            iter3 = itermax(iter3, self.max_minibatches)
+                        test_losses3 = [test_model(x,y) for x,y in iter3]
+                        test_score3 = numpy.mean(test_losses3)
 
                         self.series["test_error"].\
                             append((epoch, minibatch_index), test_score*100.)
@@ -294,6 +305,10 @@
                               'model %f %%') % 
                                      (epoch, minibatch_index+1,nom_test,
                                       test_score2*100.))
+                        print(('     epoch %i, minibatch %i, test error on dataset %s of best '
+                              'model %f %%') % 
+                                     (epoch, minibatch_index+1,nom_test2,
+                                      test_score3*100.))
                     
                     if patience <= total_mb_index:
                         done_looping = True
@@ -306,7 +321,7 @@
                     break
             
             if decrease == 1:
-                if (ind_test == 21 & epoch % 100 == 0) | ind_test == 20:
+                if (ind_test == 21 & epoch % 100 == 0) | ind_test == 20 | ind_test == 2:
                     learning_rate /= 2 #divide the learning rate by 2 for each new epoch of P07 (or 100 of NIST)
             
             self.series['params'].append((epoch,), self.classifier.all_params)
@@ -324,6 +339,7 @@
                'with test performance %f %% on dataset %s ') %  
                      (best_validation_loss * 100., test_score*100.,nom_train))
         print(('The test score on the %s dataset is %f')%(nom_test,test_score2*100.))
+        print(('The test score on the %s dataset is %f')%(nom_test2,test_score3*100.))
         
         print ('The finetuning ran for %f minutes' % ((end_time-start_time)/60.))
         
@@ -351,6 +367,10 @@
             f = open('params_finetune_P07_then_NIST.txt', 'w')
             cPickle.dump(parameters_finetune,f,protocol=-1)
             f.close()
+        elif ind_test == 2:
+            f = open('params_finetune_PNIST07.txt', 'w')
+            cPickle.dump(parameters_finetune,f,protocol=-1)
+            f.close()
         
 
     #Set parameters like they where right after pre-train or finetune
@@ -376,7 +396,7 @@
         iter2 = dataset.train(self.hp.minibatch_size,bufsize=buffersize)
         train_losses2 = [test_model(x,y) for x,y in iter2]
         train_score2 = numpy.mean(train_losses2)
-        print "Training error is: " + str(train_score2)
+        print(('The training error is %f')%(train_score2*100.))
     
     #To see the prediction of the model, the real answer and the image to judge    
     def see_error(self, dataset):