changeset 364:c05680f8c92f

Fixing a wrong commit and committing more files.
author humel
date Thu, 22 Apr 2010 19:50:21 -0400
parents 31641a84e0ae
children 22919039f7ab
files deep/convolutional_dae/salah_exp/config.py deep/convolutional_dae/salah_exp/nist_csda.py deep/convolutional_dae/salah_exp/sgd_optimization.py deep/convolutional_dae/salah_exp/sgd_optimization_new.py deep/convolutional_dae/salah_exp/stacked_convolutional_dae_uit.py
diffstat 5 files changed, 573 insertions(+), 377 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/deep/convolutional_dae/salah_exp/config.py	Thu Apr 22 19:50:21 2010 -0400
@@ -0,0 +1,177 @@
+'''
+These are parameters used by nist_sda.py. They'll end up as globals in there.
+
+Rename this file to config.py and configure as needed.
+DON'T add the renamed file to the repository, as others might use it
+without realizing it, with dire consequences.
+'''
+
+# Set this to True when you want to run cluster tests, ie. you want
+# to run on the cluster, many jobs, but want to reduce the training
+# set size and the number of epochs, so you know everything runs
+# fine on the cluster.
+# Set this PRIOR to inserting your test jobs in the DB.
+TEST_CONFIG = False
+
+NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all'
+NIST_ALL_TRAIN_SIZE = 649081
+# valid et test =82587 82587 
+
+# change "sandbox" when you're ready
+JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_db/rifaisal_csda'
+EXPERIMENT_PATH = "ift6266.deep.convolutional_dae.salah_exp.nist_csda.jobman_entrypoint"
+
+##Pour lancer des travaux sur le cluster: (il faut etre ou se trouve les fichiers)
+##python nist_sda.py jobman_insert
+##dbidispatch --condor --repeat_jobs=2 jobman sql 'postgres://ift6266h10@gershwin/ift6266h10_db/pannetis_finetuningSDA0' .  #C'est le path dans config.py
+
+# reduce training set to that many examples
+REDUCE_TRAIN_TO = None
+# that's a max, it usually doesn't get to that point
+MAX_FINETUNING_EPOCHS = 1000
+# number of minibatches before taking means for valid error etc.
+REDUCE_EVERY = 100
+#Set the finetune dataset
+FINETUNE_SET=1
+#Set the pretrain dataset used. 0: NIST, 1:P07
+PRETRAIN_CHOICE=1
+
+
+if TEST_CONFIG:
+    REDUCE_TRAIN_TO = 1000
+    MAX_FINETUNING_EPOCHS = 2
+    REDUCE_EVERY = 10
+
+
+# This is to configure insertion of jobs on the cluster.
+# Possible values the hyperparameters can take. These are then
+# combined with produit_cartesien_jobs so we get a list of all
+# possible combinations, each one resulting in a job inserted
+# in the jobman DB.
+
+
+JOB_VALS = {'pretraining_lr': [0.01],#, 0.001],#, 0.0001],
+        'pretraining_epochs_per_layer': [10],
+        'kernels' : [[[52,5,5], [32,3,3]], [[52,7,7], [52,3,3]]],
+        'mlp_size' : [[1000],[500]],
+        'imgshp' : [[32,32]],
+        'max_pool_layers' : [[[2,2],[2,2]]],
+        'corruption_levels': [[0.2,0.1]],
+        'minibatch_size': [100],
+        'max_finetuning_epochs':[MAX_FINETUNING_EPOCHS],
+        'max_finetuning_epochs_P07':[1000],
+        'finetuning_lr':[0.1,0.01], #0.001 was very bad, so we leave it out
+        'num_hidden_layers':[2],
+        'finetune_set':[1],
+        'pretrain_choice':[1]
+        }
+
+DEFAULT_HP_NIST = {'pretraining_lr': 0.01,
+        'pretraining_epochs_per_layer': 1,
+        'kernels' : [[4,5,5], [2,3,3]],
+        'mlp_size' : [10],
+        'imgshp' : [32,32],
+        'max_pool_layers' : [[2,2],[2,2]],
+        'corruption_levels': [0.1,0.2],
+        'minibatch_size': 20,
+        'max_finetuning_epochs':MAX_FINETUNING_EPOCHS,
+        'max_finetuning_epochs_P07':1000,
+        'finetuning_lr':0.1, #0.001 was very bad, so we leave it out
+        'num_hidden_layers':2,
+        'finetune_set':1,
+        'pretrain_choice':1,
+        #'reduce_train_to':1000,
+        }
+
+                    
+                    
+##[pannetis@ceylon test]$ python nist_sda.py test_jobman_entrypoint
+##WARNING: untracked file /u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/TMP_DBI/configobj.py
+##WARNING: untracked file /u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/TMP_DBI/utils.py
+##WARNING: untracked file /u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/config.py
+##WARNING: untracked file /u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/config2.py
+##Creating optimizer with state,  DD{'reduce_train_to': 11000, 'pretraining_epochs_per_layer': 2, 'hidden_layers_sizes': 300, 'num_hidden_layers': 2, 'corruption_levels': 0.20000000000000001, 'finetuning_lr': 0.10000000000000001, 'pretrain_choice': 0, 'max_finetuning_epochs': 2, 'version_pylearn': '08b37147dec1', 'finetune_set': -1, 'pretraining_lr': 0.10000000000000001, 'version_ift6266': 'a6b6b1140de9', 'version_theano': 'fb6c3a06cb65', 'minibatch_size': 20}
+##SdaSgdOptimizer, max_minibatches = 11000
+##C##n_outs 62
+##pretrain_lr 0.1
+##finetune_lr 0.1
+##----
+##
+##pretraining with NIST
+##
+##STARTING PRETRAINING, time =  2010-03-29 15:07:43.945981
+##Pre-training layer 0, epoch 0, cost  113.562562494
+##Pre-training layer 0, epoch 1, cost  113.410032944
+##Pre-training layer 1, epoch 0, cost  98.4539954687
+##Pre-training layer 1, epoch 1, cost  97.8658966686
+##Pretraining took 9.011333 minutes
+##
+##SERIE OF 3 DIFFERENT FINETUNINGS
+##
+##
+##finetune with NIST
+##
+##
+##STARTING FINETUNING, time =  2010-03-29 15:16:46.512235
+##epoch 1, minibatch 4999, validation error on P07 : 29.511250 %
+##     epoch 1, minibatch 4999, test error on dataset NIST  (train data) of best model 40.408509 %
+##     epoch 1, minibatch 4999, test error on dataset P07 of best model 96.700000 %
+##epoch 1, minibatch 9999, validation error on P07 : 25.560000 %
+##     epoch 1, minibatch 9999, test error on dataset NIST  (train data) of best model 34.778969 %
+##     epoch 1, minibatch 9999, test error on dataset P07 of best model 97.037500 %
+##
+##Optimization complete with best validation score of 25.560000 %,with test performance 34.778969 % on dataset NIST 
+##The test score on the P07 dataset is 97.037500
+##The finetuning ran for 3.281833 minutes
+##
+##
+##finetune with P07
+##
+##
+##STARTING FINETUNING, time =  2010-03-29 15:20:06.346009
+##epoch 1, minibatch 4999, validation error on NIST : 65.226250 %
+##     epoch 1, minibatch 4999, test error on dataset P07  (train data) of best model 84.465000 %
+##     epoch 1, minibatch 4999, test error on dataset NIST of best model 65.965237 %
+##epoch 1, minibatch 9999, validation error on NIST : 58.745000 %
+##     epoch 1, minibatch 9999, test error on dataset P07  (train data) of best model 80.405000 %
+##     epoch 1, minibatch 9999, test error on dataset NIST of best model 61.341923 %
+##
+##Optimization complete with best validation score of 58.745000 %,with test performance 80.405000 % on dataset P07 
+##The test score on the NIST dataset is 61.341923
+##The finetuning ran for 3.299500 minutes
+##
+##
+##finetune with NIST (done earlier) followed by P07 (written here)
+##
+##
+##STARTING FINETUNING, time =  2010-03-29 15:23:27.947374
+##epoch 1, minibatch 4999, validation error on NIST : 83.975000 %
+##     epoch 1, minibatch 4999, test error on dataset P07  (train data) of best model 83.872500 %
+##     epoch 1, minibatch 4999, test error on dataset NIST of best model 43.170010 %
+##epoch 1, minibatch 9999, validation error on NIST : 79.775000 %
+##     epoch 1, minibatch 9999, test error on dataset P07  (train data) of best model 80.971250 %
+##     epoch 1, minibatch 9999, test error on dataset NIST of best model 49.017468 %
+##
+##Optimization complete with best validation score of 79.775000 %,with test performance 80.971250 % on dataset P07 
+##The test score on the NIST dataset is 49.017468
+##The finetuning ran for 2.851500 minutes
+##
+##
+##finetune with NIST only on the logistic regression on top.
+##        All hidden units output are input of the logistic regression
+##
+##
+##STARTING FINETUNING, time =  2010-03-29 15:26:21.430557
+##epoch 1, minibatch 4999, validation error on P07 : 95.223750 %
+##     epoch 1, minibatch 4999, test error on dataset NIST  (train data) of best model 93.268765 %
+##     epoch 1, minibatch 4999, test error on dataset P07 of best model 96.535000 %
+##epoch 1, minibatch 9999, validation error on P07 : 95.223750 %
+##
+##Optimization complete with best validation score of 95.223750 %,with test performance 93.268765 % on dataset NIST 
+##The test score on the P07 dataset is 96.535000
+##The finetuning ran for 2.013167 minutes
+##Closing remaining open files: /u/pannetis/IFT6266/test/series.h5... done
+##[pannetis@ceylon test]$ 
+
+
+
--- a/deep/convolutional_dae/salah_exp/nist_csda.py	Thu Apr 22 00:49:42 2010 -0400
+++ b/deep/convolutional_dae/salah_exp/nist_csda.py	Thu Apr 22 19:50:21 2010 -0400
@@ -121,24 +121,20 @@
     
     if finetune_choice == 0:
         print('\n\n\tfinetune with NIST\n\n')
-        optimizer.reload_parameters('params_pretrain.txt')
         optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,decrease=decrease_lr)
         channel.save()
     if finetune_choice == 1:
         print('\n\n\tfinetune with P07\n\n')
-        optimizer.reload_parameters('params_pretrain.txt')
         optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0,decrease=decrease_lr)
         channel.save()
     if finetune_choice == 2:
         print('\n\n\tfinetune with P07 followed by NIST\n\n')
-        optimizer.reload_parameters('params_pretrain.txt')
         optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20,decrease=decrease_lr)
         optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21,decrease=decrease_lr)
         channel.save()
     if finetune_choice == 3:
         print('\n\n\tfinetune with NIST only on the logistic regression on top (but validation on P07).\n\
         All hidden units output are input of the logistic regression\n\n')
-        optimizer.reload_parameters('params_pretrain.txt')
         optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1,decrease=decrease_lr)
         
         
@@ -146,7 +142,6 @@
         print('\nSERIE OF 4 DIFFERENT FINETUNINGS')
         print('\n\n\tfinetune with NIST\n\n')
         sys.stdout.flush()
-        optimizer.reload_parameters('params_pretrain.txt')
         optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,decrease=decrease_lr)
         channel.save()
         print('\n\n\tfinetune with P07\n\n')
--- a/deep/convolutional_dae/salah_exp/sgd_optimization.py	Thu Apr 22 00:49:42 2010 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,371 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8
-
-# Generic SdA optimization loop, adapted from the deeplearning.net tutorial
-
-import numpy 
-import theano
-import time
-import datetime
-import theano.tensor as T
-import sys
-import pickle
-
-from jobman import DD
-import jobman, jobman.sql
-from copy import copy
-
-from stacked_dae import SdA
-
-from ift6266.utils.seriestables import *
-
-#For test purpose only
-buffersize=1000
-
-default_series = { \
-        'reconstruction_error' : DummySeries(),
-        'training_error' : DummySeries(),
-        'validation_error' : DummySeries(),
-        'test_error' : DummySeries(),
-        'params' : DummySeries()
-        }
-
-def itermax(iter, max):
-    for i,it in enumerate(iter):
-        if i >= max:
-            break
-        yield it
-
-class SdaSgdOptimizer:
-    def __init__(self, dataset, hyperparameters, n_ins, n_outs,
-                    examples_per_epoch, series=default_series, max_minibatches=None):
-        self.dataset = dataset
-        self.hp = hyperparameters
-        self.n_ins = n_ins
-        self.n_outs = n_outs
-        self.parameters_pre=[]
-   
-        self.max_minibatches = max_minibatches
-        print "SdaSgdOptimizer, max_minibatches =", max_minibatches
-
-        self.ex_per_epoch = examples_per_epoch
-        self.mb_per_epoch = examples_per_epoch / self.hp.minibatch_size
-
-        self.series = series
-
-        self.rng = numpy.random.RandomState(1234)
-
-        self.init_classifier()
-
-        sys.stdout.flush()
-
-    def init_classifier(self):
-        print "Constructing classifier"
-
-        # we don't want to save arrays in DD objects, so
-        # we recreate those arrays here
-        nhl = self.hp.num_hidden_layers
-        layers_sizes = [self.hp.hidden_layers_sizes] * nhl
-        corruption_levels = [self.hp.corruption_levels] * nhl
-
-        # construct the stacked denoising autoencoder class
-        self.classifier = SdA( \
-                          batch_size = self.hp.minibatch_size, \
-                          n_ins= self.n_ins, \
-                          hidden_layers_sizes = layers_sizes, \
-                          n_outs = self.n_outs, \
-                          corruption_levels = corruption_levels,\
-                          rng = self.rng,\
-                          pretrain_lr = self.hp.pretraining_lr, \
-                          finetune_lr = self.hp.finetuning_lr)
-
-        #theano.printing.pydotprint(self.classifier.pretrain_functions[0], "function.graph")
-
-        sys.stdout.flush()
-
-    def train(self):
-        self.pretrain(self.dataset)
-        self.finetune(self.dataset)
-
-    def pretrain(self,dataset):
-        print "STARTING PRETRAINING, time = ", datetime.datetime.now()
-        sys.stdout.flush()
-        
-        un_fichier=int(819200.0/self.hp.minibatch_size) #Number of batches in a P07 file
-
-        start_time = time.clock()  
-        ## Pre-train layer-wise 
-        for i in xrange(self.classifier.n_layers):
-            # go through pretraining epochs 
-            for epoch in xrange(self.hp.pretraining_epochs_per_layer):
-                # go through the training set
-                batch_index=0
-                count=0
-                num_files=0
-                for x,y in dataset.train(self.hp.minibatch_size):
-                    c = self.classifier.pretrain_functions[i](x)
-                    count +=1
-
-                    self.series["reconstruction_error"].append((epoch, batch_index), c)
-                    batch_index+=1
-
-                    #if batch_index % 100 == 0:
-                    #    print "100 batches"
-
-                    # useful when doing tests
-                    if self.max_minibatches and batch_index >= self.max_minibatches:
-                        break
-                    
-                    #When we pass through the data only once (the case with P07)
-                    #There is approximately 800*1024=819200 examples per file (1k per example and files are 800M)
-                    if self.hp.pretraining_epochs_per_layer == 1 and count%un_fichier == 0:
-                        print 'Pre-training layer %i, epoch %d, cost '%(i,num_files),c
-                        num_files+=1
-                        sys.stdout.flush()
-                        self.series['params'].append((num_files,), self.classifier.all_params)
-                
-                #When NIST is used
-                if self.hp.pretraining_epochs_per_layer > 1:        
-                    print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c
-                    sys.stdout.flush()
-
-                    self.series['params'].append((epoch,), self.classifier.all_params)
-     
-        end_time = time.clock()
-
-        print ('Pretraining took %f minutes' %((end_time-start_time)/60.))
-        self.hp.update({'pretraining_time': end_time-start_time})
-        
-        sys.stdout.flush()
-        
-        #To be able to load them later for tests on finetune
-        self.parameters_pre=[copy(x.value) for x in self.classifier.params]
-        f = open('params_pretrain.txt', 'w')
-        pickle.dump(self.parameters_pre,f)
-        f.close()
-
-
-    def finetune(self,dataset,dataset_test,num_finetune,ind_test,special=0,decrease=0):
-        
-        if special != 0 and special != 1:
-            sys.exit('Bad value for variable special. Must be in {0,1}')
-        print "STARTING FINETUNING, time = ", datetime.datetime.now()
-
-        minibatch_size = self.hp.minibatch_size
-        if ind_test == 0 or ind_test == 20:
-            nom_test = "NIST"
-            nom_train="P07"
-        else:
-            nom_test = "P07"
-            nom_train = "NIST"
-
-
-        # create a function to compute the mistakes that are made by the model
-        # on the validation set, or testing set
-        test_model = \
-            theano.function(
-                [self.classifier.x,self.classifier.y], self.classifier.errors)
-        #         givens = {
-        #           self.classifier.x: ensemble_x,
-        #           self.classifier.y: ensemble_y]})
-
-        validate_model = \
-            theano.function(
-                [self.classifier.x,self.classifier.y], self.classifier.errors)
-        #        givens = {
-        #           self.classifier.x: ,
-        #           self.classifier.y: ]})
-
-
-        # early-stopping parameters
-        patience              = 10000 # look as this many examples regardless
-        patience_increase     = 2.    # wait this much longer when a new best is 
-                                      # found
-        improvement_threshold = 0.995 # a relative improvement of this much is 
-                                      # considered significant
-        validation_frequency  = min(self.mb_per_epoch, patience/2)
-                                      # go through this many 
-                                      # minibatche before checking the network 
-                                      # on the validation set; in this case we 
-                                      # check every epoch 
-        if self.max_minibatches and validation_frequency > self.max_minibatches:
-            validation_frequency = self.max_minibatches / 2
-
-        best_params          = None
-        best_validation_loss = float('inf')
-        test_score           = 0.
-        start_time = time.clock()
-
-        done_looping = False
-        epoch = 0
-
-        total_mb_index = 0
-        minibatch_index = 0
-        parameters_finetune=[]
-        
-        if ind_test == 21:
-            learning_rate = self.hp.finetuning_lr / 10.0
-        else:
-            learning_rate = self.hp.finetuning_lr  #The initial finetune lr
-
-
-        while (epoch < num_finetune) and (not done_looping):
-            epoch = epoch + 1
-
-            for x,y in dataset.train(minibatch_size,bufsize=buffersize):
-                minibatch_index += 1
-                
-                
-                if special == 0:
-                    cost_ij = self.classifier.finetune(x,y,learning_rate)
-                elif special == 1:
-                    cost_ij = self.classifier.finetune2(x,y)
-                total_mb_index += 1
-
-                self.series["training_error"].append((epoch, minibatch_index), cost_ij)
-
-                if (total_mb_index+1) % validation_frequency == 0: 
-                    #minibatch_index += 1
-                    #The validation set is always NIST (we want the model to be good on NIST)
-                    if ind_test == 0 | ind_test == 20:
-                        iter=dataset_test.valid(minibatch_size,bufsize=buffersize)
-                    else:
-                        iter = dataset.valid(minibatch_size,bufsize=buffersize)
-                    if self.max_minibatches:
-                        iter = itermax(iter, self.max_minibatches)
-                    validation_losses = [validate_model(x,y) for x,y in iter]
-                    this_validation_loss = numpy.mean(validation_losses)
-
-                    self.series["validation_error"].\
-                        append((epoch, minibatch_index), this_validation_loss*100.)
-
-                    print('epoch %i, minibatch %i, validation error on NIST : %f %%' % \
-                           (epoch, minibatch_index+1, \
-                            this_validation_loss*100.))
-
-
-                    # if we got the best validation score until now
-                    if this_validation_loss < best_validation_loss:
-
-                        #improve patience if loss improvement is good enough
-                        if this_validation_loss < best_validation_loss *  \
-                               improvement_threshold :
-                            patience = max(patience, total_mb_index * patience_increase)
-
-                        # save best validation score, iteration number and parameters
-                        best_validation_loss = this_validation_loss
-                        best_iter = total_mb_index
-                        parameters_finetune=[copy(x.value) for x in self.classifier.params]
-
-                        # test it on the test set
-                        iter = dataset.test(minibatch_size,bufsize=buffersize)
-                        if self.max_minibatches:
-                            iter = itermax(iter, self.max_minibatches)
-                        test_losses = [test_model(x,y) for x,y in iter]
-                        test_score = numpy.mean(test_losses)
-                        
-                        #test it on the second test set
-                        iter2 = dataset_test.test(minibatch_size,bufsize=buffersize)
-                        if self.max_minibatches:
-                            iter2 = itermax(iter2, self.max_minibatches)
-                        test_losses2 = [test_model(x,y) for x,y in iter2]
-                        test_score2 = numpy.mean(test_losses2)
-
-                        self.series["test_error"].\
-                            append((epoch, minibatch_index), test_score*100.)
-
-                        print(('     epoch %i, minibatch %i, test error on dataset %s  (train data) of best '
-                              'model %f %%') % 
-                                     (epoch, minibatch_index+1,nom_train,
-                                      test_score*100.))
-                                    
-                        print(('     epoch %i, minibatch %i, test error on dataset %s of best '
-                              'model %f %%') % 
-                                     (epoch, minibatch_index+1,nom_test,
-                                      test_score2*100.))
-                    
-                    if patience <= total_mb_index:
-                        done_looping = True
-                        break   #to exit the FOR loop
-                    
-                    sys.stdout.flush()
-
-                # useful when doing tests
-                if self.max_minibatches and minibatch_index >= self.max_minibatches:
-                    break
-            
-            if decrease == 1:
-                learning_rate /= 2 #divide the learning rate by 2 for each new epoch
-            
-            self.series['params'].append((epoch,), self.classifier.all_params)
-
-            if done_looping == True:    #To exit completly the fine-tuning
-                break   #to exit the WHILE loop
-
-        end_time = time.clock()
-        self.hp.update({'finetuning_time':end_time-start_time,\
-                    'best_validation_error':best_validation_loss,\
-                    'test_score':test_score,
-                    'num_finetuning_epochs':epoch})
-
-        print(('\nOptimization complete with best validation score of %f %%,'
-               'with test performance %f %% on dataset %s ') %  
-                     (best_validation_loss * 100., test_score*100.,nom_train))
-        print(('The test score on the %s dataset is %f')%(nom_test,test_score2*100.))
-        
-        print ('The finetuning ran for %f minutes' % ((end_time-start_time)/60.))
-        
-        sys.stdout.flush()
-        
-        #Save a copy of the parameters in a file to be able to get them in the future
-        
-        if special == 1:    #To keep a track of the value of the parameters
-            f = open('params_finetune_stanford.txt', 'w')
-            pickle.dump(parameters_finetune,f)
-            f.close()
-        
-        elif ind_test == 0 | ind_test == 20:    #To keep a track of the value of the parameters
-            f = open('params_finetune_P07.txt', 'w')
-            pickle.dump(parameters_finetune,f)
-            f.close()
-               
-
-        elif ind_test== 1:    #For the run with 2 finetunes. It will be faster.
-            f = open('params_finetune_NIST.txt', 'w')
-            pickle.dump(parameters_finetune,f)
-            f.close()
-        
-        elif ind_test== 21:    #To keep a track of the value of the parameters
-            f = open('params_finetune_P07_then_NIST.txt', 'w')
-            pickle.dump(parameters_finetune,f)
-            f.close()
-        
-
-    #Set parameters like they where right after pre-train or finetune
-    def reload_parameters(self,which):
-        
-        #self.parameters_pre=pickle.load('params_pretrain.txt')
-        f = open(which)
-        self.parameters_pre=pickle.load(f)
-        f.close()
-        for idx,x in enumerate(self.parameters_pre):
-            if x.dtype=='float64':
-                self.classifier.params[idx].value=theano._asarray(copy(x),dtype=theano.config.floatX)
-            else:
-                self.classifier.params[idx].value=copy(x)
-
-    def training_error(self,dataset):
-        # create a function to compute the mistakes that are made by the model
-        # on the validation set, or testing set
-        test_model = \
-            theano.function(
-                [self.classifier.x,self.classifier.y], self.classifier.errors)
-                
-        iter2 = dataset.train(self.hp.minibatch_size,bufsize=buffersize)
-        train_losses2 = [test_model(x,y) for x,y in iter2]
-        train_score2 = numpy.mean(train_losses2)
-        print "Training error is: " + str(train_score2)
-
-
-
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/deep/convolutional_dae/salah_exp/sgd_optimization_new.py	Thu Apr 22 19:50:21 2010 -0400
@@ -0,0 +1,394 @@
+#!/usr/bin/python
+# coding: utf-8
+
+import numpy
+import theano
+import time
+import datetime
+import theano.tensor as T
+import sys
+import pickle
+
+from jobman import DD
+import jobman, jobman.sql
+from copy import copy
+
+from stacked_convolutional_dae_uit import CSdA
+
+from ift6266.utils.seriestables import *
+
+buffersize=1000
+
+default_series = { \
+        'reconstruction_error' : DummySeries(),
+        'training_error' : DummySeries(),
+        'validation_error' : DummySeries(),
+        'test_error' : DummySeries(),
+        'params' : DummySeries()
+        }
+
+def itermax(iter, max):
+    for i,it in enumerate(iter):
+        if i >= max:
+            break
+        yield it
+def get_conv_shape(kernels,imgshp,batch_size,max_pool_layers):
+    # Returns the dimension at the output of the convoluational net
+    # and a list of Image and kernel shape for every
+    # Convolutional layer
+    conv_layers=[]
+    init_layer = [ [ kernels[0][0],1,kernels[0][1],kernels[0][2] ],\
+                   [ batch_size , 1, imgshp[0], imgshp[1] ],
+                    max_pool_layers[0] ]
+    conv_layers.append(init_layer)
+
+    conv_n_out = int((32-kernels[0][2]+1)/max_pool_layers[0][0])
+
+    for i in range(1,len(kernels)):
+        layer = [ [ kernels[i][0],kernels[i-1][0],kernels[i][1],kernels[i][2] ],\
+                  [ batch_size, kernels[i-1][0],conv_n_out,conv_n_out ],
+                   max_pool_layers[i] ]
+        conv_layers.append(layer)
+        conv_n_out = int( (conv_n_out - kernels[i][2]+1)/max_pool_layers[i][0])
+    conv_n_out=kernels[-1][0]*conv_n_out**2
+    return conv_n_out,conv_layers
+
+
+
+
+
+class CSdASgdOptimizer:
+    def __init__(self, dataset, hyperparameters, n_ins, n_outs,
+                    examples_per_epoch, series=default_series, max_minibatches=None):
+        self.dataset = dataset
+        self.hp = hyperparameters
+        self.n_ins = n_ins
+        self.n_outs = n_outs
+        self.parameters_pre=[]
+
+        self.max_minibatches = max_minibatches
+        print "CSdASgdOptimizer, max_minibatches =", max_minibatches
+
+        self.ex_per_epoch = examples_per_epoch
+        self.mb_per_epoch = examples_per_epoch / self.hp.minibatch_size
+
+        self.series = series
+
+        self.rng = numpy.random.RandomState(1234)
+        self.init_classifier()
+
+        sys.stdout.flush()
+
+    def init_classifier(self):
+        print "Constructing classifier"
+
+        n_ins,convlayers = get_conv_shape(self.hp.kernels,self.hp.imgshp,self.hp.minibatch_size,self.hp.max_pool_layers)
+
+        self.classifier = CSdA(n_ins_mlp = n_ins,
+                               batch_size = self.hp.minibatch_size,
+                               conv_hidden_layers_sizes = convlayers,
+                               mlp_hidden_layers_sizes = self.hp.mlp_size, 
+                               corruption_levels = self.hp.corruption_levels,
+                               rng = self.rng, 
+                               n_out = self.n_outs,
+                               pretrain_lr = self.hp.pretraining_lr, 
+                               finetune_lr = self.hp.finetuning_lr)
+
+
+
+        #theano.printing.pydotprint(self.classifier.pretrain_functions[0], "function.graph")
+
+        sys.stdout.flush()
+
+    def train(self):
+        self.pretrain(self.dataset)
+        self.finetune(self.dataset)
+
+    def pretrain(self,dataset):
+        print "STARTING PRETRAINING, time = ", datetime.datetime.now()
+        sys.stdout.flush()
+
+        un_fichier=int(819200.0/self.hp.minibatch_size) #Number of batches in a P07 file
+
+        start_time = time.clock()
+        ## Pre-train layer-wise
+        for i in xrange(self.classifier.n_layers):
+            # go through pretraining epochs
+            for epoch in xrange(self.hp.pretraining_epochs_per_layer):
+                # go through the training set
+                batch_index=0
+                count=0
+                num_files=0
+                for x,y in dataset.train(self.hp.minibatch_size):
+                    if x.shape[0] != self.hp.minibatch_size:
+                        continue
+                    c = self.classifier.pretrain_functions[i](x)
+                    count +=1
+
+                    self.series["reconstruction_error"].append((epoch, batch_index), c)
+                    batch_index+=1
+
+                    #if batch_index % 100 == 0:
+                    #    print "100 batches"
+
+                    # useful when doing tests
+                    if self.max_minibatches and batch_index >= self.max_minibatches:
+                        break
+
+                    #When we pass through the data only once (the case with P07)
+                    #There is approximately 800*1024=819200 examples per file (1k per example and files are 800M)
+                    if self.hp.pretraining_epochs_per_layer == 1 and count%un_fichier == 0:
+                        print 'Pre-training layer %i, epoch %d, cost '%(i,num_files),c
+                        num_files+=1
+                        sys.stdout.flush()
+                        self.series['params'].append((num_files,), self.classifier.all_params)
+
+                #When NIST is used
+                if self.hp.pretraining_epochs_per_layer > 1:
+                    print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c
+                    sys.stdout.flush()
+
+                    self.series['params'].append((epoch,), self.classifier.all_params)
+        end_time = time.clock()
+
+        print ('Pretraining took %f minutes' %((end_time-start_time)/60.))
+        self.hp.update({'pretraining_time': end_time-start_time})
+
+        sys.stdout.flush()
+
+        #To be able to load them later for tests on finetune
+        self.parameters_pre=[copy(x.value) for x in self.classifier.params]
+        f = open('params_pretrain.txt', 'w')
+        pickle.dump(self.parameters_pre,f)
+        f.close()
+    def finetune(self,dataset,dataset_test,num_finetune,ind_test,special=0,decrease=0):
+
+        if special != 0 and special != 1:
+            sys.exit('Bad value for variable special. Must be in {0,1}')
+        print "STARTING FINETUNING, time = ", datetime.datetime.now()
+
+        minibatch_size = self.hp.minibatch_size
+        if ind_test == 0 or ind_test == 20:
+            nom_test = "NIST"
+            nom_train="P07"
+        else:
+            nom_test = "P07"
+            nom_train = "NIST"
+
+
+        # create a function to compute the mistakes that are made by the model
+        # on the validation set, or testing set
+        test_model = \
+            theano.function(
+                [self.classifier.x,self.classifier.y], self.classifier.errors)
+        #         givens = {
+        #           self.classifier.x: ensemble_x,
+        #           self.classifier.y: ensemble_y]})
+
+        validate_model = \
+            theano.function(
+                [self.classifier.x,self.classifier.y], self.classifier.errors)
+        #        givens = {
+        #           self.classifier.x: ,
+        #           self.classifier.y: ]})
+        # early-stopping parameters
+        patience              = 10000 # look as this many examples regardless
+        patience_increase     = 2.    # wait this much longer when a new best is
+                                      # found
+        improvement_threshold = 0.995 # a relative improvement of this much is
+                                      # considered significant
+        validation_frequency  = min(self.mb_per_epoch, patience/2)
+                                      # go through this many
+                                      # minibatche before checking the network
+                                      # on the validation set; in this case we
+                                      # check every epoch
+        if self.max_minibatches and validation_frequency > self.max_minibatches:
+            validation_frequency = self.max_minibatches / 2
+        best_params          = None
+        best_validation_loss = float('inf')
+        test_score           = 0.
+        start_time = time.clock()
+
+        done_looping = False
+        epoch = 0
+
+        total_mb_index = 0
+        minibatch_index = 0
+        parameters_finetune=[]
+        learning_rate = self.hp.finetuning_lr
+
+
+        while (epoch < num_finetune) and (not done_looping):
+            epoch = epoch + 1
+
+            for x,y in dataset.train(minibatch_size,bufsize=buffersize):
+
+                minibatch_index += 1
+
+                if x.shape[0] != self.hp.minibatch_size:
+                    print 'bim'
+                    continue
+
+                cost_ij = self.classifier.finetune(x,y)#,learning_rate)
+                total_mb_index += 1
+
+                self.series["training_error"].append((epoch, minibatch_index), cost_ij)
+
+                if (total_mb_index+1) % validation_frequency == 0:
+                    #minibatch_index += 1
+                    #The validation set is always NIST (we want the model to be good on NIST)
+
+                    iter=dataset_test.valid(minibatch_size,bufsize=buffersize)
+ 
+
+                    if self.max_minibatches:
+                        iter = itermax(iter, self.max_minibatches)
+
+                    validation_losses = []
+
+                    for x,y in iter:
+                        if x.shape[0] != self.hp.minibatch_size:
+                            print 'bim'
+                            continue
+                        validation_losses.append(validate_model(x,y))
+
+                    this_validation_loss = numpy.mean(validation_losses)
+
+                    self.series["validation_error"].\
+                        append((epoch, minibatch_index), this_validation_loss*100.)
+
+                    print('epoch %i, minibatch %i, validation error on NIST : %f %%' % \
+                           (epoch, minibatch_index+1, \
+                            this_validation_loss*100.))
+
+
+                    # if we got the best validation score until now
+                    if this_validation_loss < best_validation_loss:
+
+                        #improve patience if loss improvement is good enough
+                        if this_validation_loss < best_validation_loss *  \
+                               improvement_threshold :
+                            patience = max(patience, total_mb_index * patience_increase)
+
+                        # save best validation score, iteration number and parameters
+                        best_validation_loss = this_validation_loss
+                        best_iter = total_mb_index
+                        parameters_finetune=[copy(x.value) for x in self.classifier.params]
+
+                        # test it on the test set
+                        iter = dataset.test(minibatch_size,bufsize=buffersize)
+                        if self.max_minibatches:
+                            iter = itermax(iter, self.max_minibatches)
+                        test_losses = []
+                        test_losses2 = []
+                        for x,y in iter:
+                            if x.shape[0] != self.hp.minibatch_size:
+                                print 'bim'
+                                continue
+                            test_losses.append(test_model(x,y))
+
+                        test_score = numpy.mean(test_losses)
+
+                        #test it on the second test set
+                        iter2 = dataset_test.test(minibatch_size,bufsize=buffersize)
+                        if self.max_minibatches:
+                            iter2 = itermax(iter2, self.max_minibatches)
+                        for x,y in iter2:
+                            if x.shape[0] != self.hp.minibatch_size:
+                                continue
+                            test_losses2.append(test_model(x,y))
+
+                        test_score2 = numpy.mean(test_losses2)
+
+                        self.series["test_error"].\
+                            append((epoch, minibatch_index), test_score*100.)
+
+                        print(('     epoch %i, minibatch %i, test error on dataset %s  (train data) of best '
+                              'model %f %%') %
+                                     (epoch, minibatch_index+1,nom_train,
+                                      test_score*100.))
+
+                        print(('     epoch %i, minibatch %i, test error on dataset %s of best '
+                              'model %f %%') %
+                                     (epoch, minibatch_index+1,nom_test,
+                                      test_score2*100.))
+
+                    if patience <= total_mb_index:
+                        done_looping = True
+                        break   #to exit the FOR loop
+
+                    sys.stdout.flush()
+
+                # useful when doing tests
+                if self.max_minibatches and minibatch_index >= self.max_minibatches:
+                    break
+
+            if decrease == 1:
+                learning_rate /= 2 #divide the learning rate by 2 for each new epoch
+
+            self.series['params'].append((epoch,), self.classifier.all_params)
+
+            if done_looping == True:    #To exit completly the fine-tuning
+                break   #to exit the WHILE loop
+
+        end_time = time.clock()
+        self.hp.update({'finetuning_time':end_time-start_time,\
+                    'best_validation_error':best_validation_loss,\
+                    'test_score':test_score,
+                    'num_finetuning_epochs':epoch})
+
+        print(('\nOptimization complete with best validation score of %f %%,'
+               'with test performance %f %% on dataset %s ') %
+                     (best_validation_loss * 100., test_score*100.,nom_train))
+        print(('The test score on the %s dataset is %f')%(nom_test,test_score2*100.))
+
+        print ('The finetuning ran for %f minutes' % ((end_time-start_time)/60.))
+
+        sys.stdout.flush()
+
+        #Save a copy of the parameters in a file to be able to get them in the future
+
+        if special == 1:    #To keep a track of the value of the parameters
+            f = open('params_finetune_stanford.txt', 'w')
+            pickle.dump(parameters_finetune,f)
+            f.close()
+
+        elif ind_test == 0 | ind_test == 20:    #To keep a track of the value of the parameters
+            f = open('params_finetune_P07.txt', 'w')
+            pickle.dump(parameters_finetune,f)
+            f.close()
+
+
+        elif ind_test== 1:    #For the run with 2 finetunes. It will be faster.
+            f = open('params_finetune_NIST.txt', 'w')
+            pickle.dump(parameters_finetune,f)
+            f.close()
+
+        elif ind_test== 21:    #To keep a track of the value of the parameters
+            f = open('params_finetune_P07_then_NIST.txt', 'w')
+            pickle.dump(parameters_finetune,f)
+            f.close()
+    #Set parameters like they where right after pre-train or finetune
+    def reload_parameters(self,which):
+
+        #self.parameters_pre=pickle.load('params_pretrain.txt')
+        f = open(which)
+        self.parameters_pre=pickle.load(f)
+        f.close()
+        for idx,x in enumerate(self.parameters_pre):
+            if x.dtype=='float64':
+                self.classifier.params[idx].value=theano._asarray(copy(x),dtype=theano.config.floatX)
+            else:
+                self.classifier.params[idx].value=copy(x)
+
+    def training_error(self,dataset):
+        # create a function to compute the mistakes that are made by the model
+        # on the validation set, or testing set
+        test_model = \
+            theano.function(
+                [self.classifier.x,self.classifier.y], self.classifier.errors)
+
+        iter2 = dataset.train(self.hp.minibatch_size,bufsize=buffersize)
+        train_losses2 = [test_model(x,y) for x,y in iter2]
+        train_score2 = numpy.mean(train_losses2)
+        print "Training error is: " + str(train_score2)
--- a/deep/convolutional_dae/salah_exp/stacked_convolutional_dae_uit.py	Thu Apr 22 00:49:42 2010 -0400
+++ b/deep/convolutional_dae/salah_exp/stacked_convolutional_dae_uit.py	Thu Apr 22 19:50:21 2010 -0400
@@ -9,7 +9,8 @@
 from theano.tensor.signal import downsample
 from theano.tensor.nnet import conv 
 
-
+sys.path.append('../../')
+#import ift6266.datasets
 import ift6266.datasets
 from ift6266.baseline.log_reg.log_reg import LogisticRegression