Mercurial > ift6266
changeset 191:3632e6258642
Ajouts mineurs à stacked_dae, juste printé l'heure je crois.
author | fsavard |
---|---|
date | Tue, 02 Mar 2010 14:47:18 -0500 |
parents | 70a9df1cd20e |
children | e656edaedb48 92c9a6c48ce9 39421555993f |
files | deep/stacked_dae/mnist_sda.py deep/stacked_dae/nist_sda.py deep/stacked_dae/sgd_optimization.py deep/stacked_dae/stacked_dae.py deep/stacked_dae/utils.py |
diffstat | 5 files changed, 26 insertions(+), 24 deletions(-) [+] |
line wrap: on
line diff
--- a/deep/stacked_dae/mnist_sda.py Tue Mar 02 09:52:27 2010 -0500 +++ b/deep/stacked_dae/mnist_sda.py Tue Mar 02 14:47:18 2010 -0500 @@ -1,6 +1,7 @@ #!/usr/bin/python # coding: utf-8 +# TODO: This probably doesn't work anymore, adapt to new code in sgd_opt # Parameterize call to sgd_optimization for MNIST import numpy
--- a/deep/stacked_dae/nist_sda.py Tue Mar 02 09:52:27 2010 -0500 +++ b/deep/stacked_dae/nist_sda.py Tue Mar 02 14:47:18 2010 -0500 @@ -25,18 +25,13 @@ from sgd_optimization import SdaSgdOptimizer -SERIES_AVAILABLE = False -try: - from scalar_series import * - SERIES_AVAILABLE = True -except ImportError: - print "Could not import Series" +from ift6266.utils.scalar_series import * TEST_CONFIG = False NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all' -JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_db/fsavard_sda2' +JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_sandbox_db/fsavard_sda2' REDUCE_TRAIN_TO = None MAX_FINETUNING_EPOCHS = 1000 @@ -58,15 +53,15 @@ 'num_hidden_layers':[2,3]} # Just useful for tests... minimal number of epochs -DEFAULT_HP_NIST = DD({'finetuning_lr':0.01, - 'pretraining_lr':0.01, - 'pretraining_epochs_per_layer':1, - 'max_finetuning_epochs':1, - 'hidden_layers_sizes':1000, +DEFAULT_HP_NIST = DD({'finetuning_lr':0.1, + 'pretraining_lr':0.1, + 'pretraining_epochs_per_layer':20, + 'max_finetuning_epochs':2, + 'hidden_layers_sizes':300, 'corruption_levels':0.2, 'minibatch_size':20, - 'reduce_train_to':1000, - 'num_hidden_layers':1}) + #'reduce_train_to':300, + 'num_hidden_layers':2}) def jobman_entrypoint(state, channel): pylearn.version.record_versions(state,[theano,ift6266,pylearn]) @@ -75,12 +70,10 @@ workingdir = os.getcwd() print "Will load NIST" - sys.stdout.flush() nist = NIST(20) print "NIST loaded" - sys.stdout.flush() rtt = None if state.has_key('reduce_train_to'): @@ -89,7 +82,7 @@ rtt = REDUCE_TRAIN_TO if rtt: - print "Reducing training set to ", rtt, " examples" + print "Reducing training set to "+str( rtt)+ " examples" nist.reduce_train_set(rtt) train,valid,test = nist.get_tvt() @@ -107,8 +100,9 @@ # b,b',W for each hidden layer + b,W of last layer (logreg) numparams = nhl * 3 + 2 series_mux = None - if SERIES_AVAILABLE: - series_mux = create_series(workingdir, numparams) + series_mux = create_series(workingdir, numparams) + + print "Creating optimizer with state, ", state optimizer = SdaSgdOptimizer(dataset=dataset, hyperparameters=state, \ n_ins=n_ins, n_outs=n_outs,\ @@ -275,7 +269,9 @@ jobman_insert_nist() elif len(args) > 0 and args[0] == 'test_jobman_entrypoint': - chanmock = DD({'COMPLETE':0}) + def f(): + pass + chanmock = DD({'COMPLETE':0,'save':f}) jobman_entrypoint(DEFAULT_HP_NIST, chanmock) elif len(args) > 0 and args[0] == 'estimate':
--- a/deep/stacked_dae/sgd_optimization.py Tue Mar 02 09:52:27 2010 -0500 +++ b/deep/stacked_dae/sgd_optimization.py Tue Mar 02 14:47:18 2010 -0500 @@ -6,6 +6,7 @@ import numpy import theano import time +import datetime import theano.tensor as T import sys @@ -85,7 +86,7 @@ self.finetune() def pretrain(self): - print "STARTING PRETRAINING" + print "STARTING PRETRAINING, time = ", datetime.datetime.now() sys.stdout.flush() start_time = time.clock() @@ -101,6 +102,8 @@ print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c sys.stdout.flush() + + self.series_mux.append("params", self.classifier.all_params) end_time = time.clock() @@ -110,7 +113,7 @@ sys.stdout.flush() def finetune(self): - print "STARTING FINETUNING" + print "STARTING FINETUNING, time = ", datetime.datetime.now() index = T.lscalar() # index to a [mini]batch minibatch_size = self.hp.minibatch_size
--- a/deep/stacked_dae/stacked_dae.py Tue Mar 02 09:52:27 2010 -0500 +++ b/deep/stacked_dae/stacked_dae.py Tue Mar 02 14:47:18 2010 -0500 @@ -138,8 +138,6 @@ self.params = [ self.W, self.b, self.b_prime ] - - class SdA(object): def __init__(self, train_set_x, train_set_y, batch_size, n_ins, hidden_layers_sizes, n_outs, @@ -147,6 +145,7 @@ # Just to make sure those are not modified somewhere else afterwards hidden_layers_sizes = copy.deepcopy(hidden_layers_sizes) corruption_levels = copy.deepcopy(corruption_levels) + update_locals(self, locals()) self.layers = []