changeset 352:cfb79f9fd1a4

Ajout d'une fonctionnalite pour pouvoir avoir un taux d'apprentissage decroissant dans le pretrain
author SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca>
date Wed, 21 Apr 2010 14:50:59 -0400
parents 799ad23a161f
children bc4464c0894c
files deep/stacked_dae/v_sylvain/sgd_optimization.py
diffstat 1 files changed, 15 insertions(+), 5 deletions(-) [+]
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/sgd_optimization.py	Wed Apr 21 14:07:53 2010 -0400
+++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py	Wed Apr 21 14:50:59 2010 -0400
@@ -88,30 +88,40 @@
         self.pretrain(self.dataset)
         self.finetune(self.dataset)
 
-    def pretrain(self,dataset):
+    def pretrain(self,dataset,decrease=0):
         print "STARTING PRETRAINING, time = ", datetime.datetime.now()
         sys.stdout.flush()
         
         un_fichier=int(819200.0/self.hp.minibatch_size) #Number of batches in a P07 file
 
         start_time = time.clock()  
+        
+        ########  This is hardcoaded. THe 0.95 parameter is hardcoaded and can be changed at will  ###
+        #Set the decreasing rate of the learning rate. We want the final learning rate to
+        #be 5% of the original learning rate. The decreasing factor is linear
+        decreasing = (decrease*self.hp.pretraining_lr)/float(self.hp.pretraining_epochs_per_layer*800000/self.hp.minibatch_size)
+        
         ## Pre-train layer-wise 
         for i in xrange(self.classifier.n_layers):
             # go through pretraining epochs 
+            
+            #To reset the learning rate to his original value
+            learning_rate=self.hp.pretraining_lr
             for epoch in xrange(self.hp.pretraining_epochs_per_layer):
                 # go through the training set
                 batch_index=0
                 count=0
                 num_files=0
                 for x,y in dataset.train(self.hp.minibatch_size):
-                    c = self.classifier.pretrain_functions[i](x)
+                    c = self.classifier.pretrain_functions[i](x,learning_rate)
                     count +=1
 
                     self.series["reconstruction_error"].append((epoch, batch_index), c)
                     batch_index+=1
 
-                    #if batch_index % 100 == 0:
-                    #    print "100 batches"
+                    #If we need to decrease the learning rate for the pretrain
+                    if decrease != 0:
+                        learning_rate -= decreasing
 
                     # useful when doing tests
                     if self.max_minibatches and batch_index >= self.max_minibatches:
@@ -205,7 +215,7 @@
         parameters_finetune=[]
         
         if ind_test == 21:
-            learning_rate = self.hp.finetuning_lr / 5.0
+            learning_rate = self.hp.finetuning_lr / 10.0
         else:
             learning_rate = self.hp.finetuning_lr  #The initial finetune lr