diff deep/stacked_dae/v_sylvain/sgd_optimization.py @ 389:88cb95007670

Ajout d'une option finetune amelioree pour PNIST07
author SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca>
date Tue, 27 Apr 2010 08:43:04 -0400
parents 8117c0e70db9
children 5e11dda78995
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/sgd_optimization.py	Tue Apr 27 08:42:43 2010 -0400
+++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py	Tue Apr 27 08:43:04 2010 -0400
@@ -166,10 +166,14 @@
         if ind_test == 0 or ind_test == 20:
             nom_test = "NIST"
             nom_train="P07"
-        elif ind_test == 2:
+        elif ind_test == 30:
             nom_train = "PNIST07"
             nom_test = "NIST"
             nom_test2 = "P07"
+        elif ind_test == 31:
+            nom_train = "NIST"
+            nom_test = "PNIST07"
+            nom_test2 = "P07"
         else:
             nom_test = "P07"
             nom_train = "NIST"
@@ -218,7 +222,7 @@
         minibatch_index = 0
         parameters_finetune=[]
         
-        if ind_test == 21:
+        if ind_test == 21 | ind_test == 31:
             learning_rate = self.hp.finetuning_lr / 10.0
         else:
             learning_rate = self.hp.finetuning_lr  #The initial finetune lr
@@ -242,7 +246,7 @@
                 if (total_mb_index+1) % validation_frequency == 0: 
                     #minibatch_index += 1
                     #The validation set is always NIST (we want the model to be good on NIST)
-                    if ind_test == 0 | ind_test == 20 | ind_test == 2:
+                    if ind_test == 0 | ind_test == 20 | ind_test == 30:
                         iter=dataset_test.valid(minibatch_size,bufsize=buffersize)                        
                     else:
                         iter = dataset.valid(minibatch_size,bufsize=buffersize)
@@ -321,7 +325,7 @@
                     break
             
             if decrease == 1:
-                if (ind_test == 21 & epoch % 100 == 0) | ind_test == 20 | ind_test == 2:
+                if (ind_test == 21 & epoch % 100 == 0) | ind_test == 20 | ind_test == 30 | (ind_test == 31 & epoch % 100 == 0):
                     learning_rate /= 2 #divide the learning rate by 2 for each new epoch of P07 (or 100 of NIST)
             
             self.series['params'].append((epoch,), self.classifier.all_params)
@@ -367,10 +371,14 @@
             f = open('params_finetune_P07_then_NIST.txt', 'w')
             cPickle.dump(parameters_finetune,f,protocol=-1)
             f.close()
-        elif ind_test == 2:
+        elif ind_test == 30:
             f = open('params_finetune_PNIST07.txt', 'w')
             cPickle.dump(parameters_finetune,f,protocol=-1)
             f.close()
+        elif ind_test == 31:
+            f = open('params_finetune_PNIST07_then_NIST.txt', 'w')
+            cPickle.dump(parameters_finetune,f,protocol=-1)
+            f.close()
         
 
     #Set parameters like they where right after pre-train or finetune