changeset 281:a8b92a4a708d

rajout de methode reliant toutes les couches cachees a la logistic et changeant seulement les parametres de la logistic durant finetune
author SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca>
date Wed, 24 Mar 2010 14:44:41 -0400
parents c77ffb11f91d
children 698313f8f6e6
files deep/stacked_dae/v_sylvain/sgd_optimization.py
diffstat 1 files changed, 74 insertions(+), 20 deletions(-) [+]
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/sgd_optimization.py	Wed Mar 24 14:44:24 2010 -0400
+++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py	Wed Mar 24 14:44:41 2010 -0400
@@ -95,8 +95,11 @@
             for epoch in xrange(self.hp.pretraining_epochs_per_layer):
                 # go through the training set
                 batch_index=0
+                count=0
+                num_files=0
                 for x,y in dataset.train(self.hp.minibatch_size):
                     c = self.classifier.pretrain_functions[i](x)
+                    count +=1
 
                     self.series["reconstruction_error"].append((epoch, batch_index), c)
                     batch_index+=1
@@ -107,11 +110,21 @@
                     # useful when doing tests
                     if self.max_minibatches and batch_index >= self.max_minibatches:
                         break
-                        
-                print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c
-                sys.stdout.flush()
+                    
+                    #When we pass through the data only once (the case with P07)
+                    #There is approximately 800*1024=819200 examples per file (1k per example and files are 800M)
+                    if self.hp.pretraining_epochs_per_layer == 1 and count%819200 == 0:
+                        print 'Pre-training layer %i, epoch %d, cost '%(i,num_files),c
+                        num_files+=1
+                        sys.stdout.flush()
+                        self.series['params'].append((num_files,), self.classifier.all_params)
+                
+                #When NIST is used
+                if self.hp.pretraining_epochs_per_layer > 1:        
+                    print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c
+                    sys.stdout.flush()
 
-                self.series['params'].append((epoch,), self.classifier.all_params)
+                    self.series['params'].append((epoch,), self.classifier.all_params)
      
         end_time = time.clock()
 
@@ -127,14 +140,19 @@
         f.close()
 
 
-    def finetune(self,dataset,dataset_test,num_finetune,ind_test):
+    def finetune(self,dataset,dataset_test,num_finetune,ind_test,special=0):
+        
+        if special != 0 and special != 1:
+            sys.exit('Bad value for variable special. Must be in {0,1}')
         print "STARTING FINETUNING, time = ", datetime.datetime.now()
 
         minibatch_size = self.hp.minibatch_size
-        if ind_test == 0:
+        if ind_test == 0 or ind_test == 20:
             nom_test = "NIST"
+            nom_train="P07"
         else:
             nom_test = "P07"
+            nom_train = "NIST"
 
 
         # create a function to compute the mistakes that are made by the model
@@ -183,14 +201,21 @@
             minibatch_index = -1
             for x,y in dataset.train(minibatch_size):
                 minibatch_index += 1
-                cost_ij = self.classifier.finetune(x,y)
+                if special == 0:
+                    cost_ij = self.classifier.finetune(x,y)
+                elif special == 1:
+                    cost_ij = self.classifier.finetune2(x,y)
                 total_mb_index += 1
 
                 self.series["training_error"].append((epoch, minibatch_index), cost_ij)
 
                 if (total_mb_index+1) % validation_frequency == 0: 
                     
-                    iter = dataset.valid(minibatch_size)
+                    #The validation set is always NIST
+                    if ind_test == 0:
+                        iter=dataset_test.valid(minibatch_size)
+                    else:
+                        iter = dataset.valid(minibatch_size)
                     if self.max_minibatches:
                         iter = itermax(iter, self.max_minibatches)
                     validation_losses = [validate_model(x,y) for x,y in iter]
@@ -199,8 +224,8 @@
                     self.series["validation_error"].\
                         append((epoch, minibatch_index), this_validation_loss*100.)
 
-                    print('epoch %i, minibatch %i, validation error %f %%' % \
-                           (epoch, minibatch_index+1, \
+                    print('epoch %i, minibatch %i, validation error on %s : %f %%' % \
+                           (epoch, minibatch_index+1,nom_test, \
                             this_validation_loss*100.))
 
 
@@ -233,16 +258,20 @@
                         self.series["test_error"].\
                             append((epoch, minibatch_index), test_score*100.)
 
-                        print(('     epoch %i, minibatch %i, test error of best '
+                        print(('     epoch %i, minibatch %i, test error on dataset %s  (train data) of best '
                               'model %f %%') % 
-                                     (epoch, minibatch_index+1,
+                                     (epoch, minibatch_index+1,nom_train,
                                       test_score*100.))
                                     
                         print(('     epoch %i, minibatch %i, test error on dataset %s of best '
                               'model %f %%') % 
                                      (epoch, minibatch_index+1,nom_test,
                                       test_score2*100.))
-
+                    
+                    if patience <= total_mb_index:
+                        done_looping = True
+                        break
+                    
                     sys.stdout.flush()
 
                 # useful when doing tests
@@ -251,8 +280,7 @@
 
             self.series['params'].append((epoch,), self.classifier.all_params)
 
-            if patience <= total_mb_index:
-                done_looping = True
+            if done_looping == True:    #To exit completly the fine-tuning
                 break
 
         end_time = time.clock()
@@ -261,19 +289,45 @@
                     'test_score':test_score,
                     'num_finetuning_epochs':epoch})
 
-        print(('Optimization complete with best validation score of %f %%,'
-               'with test performance %f %%') %  
-                     (best_validation_loss * 100., test_score*100.))
+        print(('\nOptimization complete with best validation score of %f %%,'
+               'with test performance %f %% on dataset %s ') %  
+                     (best_validation_loss * 100., test_score*100.,nom_train))
         print(('The test score on the %s dataset is %f')%(nom_test,test_score2*100.))
         
         print ('The finetuning ran for %f minutes' % ((end_time-start_time)/60.))
         
+        #Save a copy of the parameters in a file to be able to get them in the future
+        
+        if special == 1:    #To keep a track of the value of the parameters
+            parameters_finetune=[copy(x.value) for x in self.classifier.params]
+            f = open('params_finetune_stanford.txt', 'w')
+            pickle.dump(parameters_finetune,f)
+            f.close()
+        
+        elif ind_test== 0:    #To keep a track of the value of the parameters
+            parameters_finetune=[copy(x.value) for x in self.classifier.params]
+            f = open('params_finetune_P07.txt', 'w')
+            pickle.dump(parameters_finetune,f)
+            f.close()
+
+        elif ind_test== 1:    #For the run with 2 finetunes. It will be faster.
+            parameters_finetune=[copy(x.value) for x in self.classifier.params]
+            f = open('params_finetune_NIST.txt', 'w')
+            pickle.dump(parameters_finetune,f)
+            f.close()
+        
+        elif ind_test== 20:    #To keep a track of the value of the parameters
+            parameters_finetune=[copy(x.value) for x in self.classifier.params]
+            f = open('params_finetune_NIST_then_P07.txt', 'w')
+            pickle.dump(parameters_finetune,f)
+            f.close()
+        
 
     #Set parameters like they where right after pre-train
-    def reload_parameters(self):
+    def reload_parameters(self,which):
         
         #self.parameters_pre=pickle.load('params_pretrain.txt')
-        f = open('params_pretrain.txt')
+        f = open(which)
         self.parameters_pre=pickle.load(f)
         f.close()
         for idx,x in enumerate(self.parameters_pre):