Mercurial > ift6266
changeset 139:7d8366fb90bf
Ajouté des __init__.py dans l'arborescence pour que les scripts puissent être utilisés avec des paths pour jobman, et fait pas mal de modifs dans stacked_dae pour pouvoir réutiliser le travail fait pour des tests où le pretraining est le même.
author | fsavard |
---|---|
date | Mon, 22 Feb 2010 13:38:25 -0500 |
parents | 128507ac4edf |
children | 29fd19d67026 |
files | scripts/stacked_dae/mnist_sda.py scripts/stacked_dae/nist_sda.py scripts/stacked_dae/sgd_optimization.py scripts/stacked_dae/stacked_dae.py scripts/stacked_dae/utils.py |
diffstat | 5 files changed, 450 insertions(+), 165 deletions(-) [+] |
line wrap: on
line diff
--- a/scripts/stacked_dae/mnist_sda.py Sun Feb 21 17:30:38 2010 -0600 +++ b/scripts/stacked_dae/mnist_sda.py Mon Feb 22 13:38:25 2010 -0500 @@ -9,7 +9,7 @@ import theano.tensor as T from theano.tensor.shared_randomstreams import RandomStreams -from stacked_dae import sgd_optimization +from sgd_optimization import SdaSgdOptimizer import cPickle, gzip from jobman import DD @@ -31,12 +31,14 @@ 'pretraining_lr':pretrain_lr, 'pretraining_epochs_per_layer':pretraining_epochs, 'max_finetuning_epochs':training_epochs, - 'hidden_layers_sizes':[1000,1000,1000], - 'corruption_levels':[0.2,0.2,0.2], + 'hidden_layers_sizes':[100], + 'corruption_levels':[0.2], 'minibatch_size':20}) - sgd_optimization(dataset, hyperparameters, n_ins, n_outs) + optimizer = SdaSgdOptimizer(dataset, hyperparameters, n_ins, n_outs) + optimizer.pretrain() + optimizer.finetune() if __name__ == '__main__': - sgd_optimization_mnist() + sgd_optimization_mnist(dataset=MNIST_LOCATION)
--- a/scripts/stacked_dae/nist_sda.py Sun Feb 21 17:30:38 2010 -0600 +++ b/scripts/stacked_dae/nist_sda.py Mon Feb 22 13:38:25 2010 -0500 @@ -6,47 +6,135 @@ import time import theano.tensor as T from theano.tensor.shared_randomstreams import RandomStreams +import copy +import sys import os.path -from sgd_optimization import sgd_optimization +from sgd_optimization import SdaSgdOptimizer from jobman import DD +import jobman, jobman.sql from pylearn.io import filetensor from utils import produit_croise_jobs +TEST_CONFIG = True + NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all' +JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_db/' +REDUCE_TRAIN_TO = None +MAX_FINETUNING_EPOCHS = 1000 +if TEST_CONFIG: + JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_sandbox_db/' + REDUCE_TRAIN_TO = 1000 + MAX_FINETUNING_EPOCHS = 2 + +JOBDB_JOBS = JOBDB + 'fsavard_sda1_jobs' +JOBDB_RESULTS = JOBDB + 'fsavard_sda1_results' +EXPERIMENT_PATH = "ift6266.scripts.stacked_dae.nist_sda.jobman_entrypoint" + +# There used to be +# 'finetuning_lr': [0.00001, 0.0001, 0.001, 0.01, 0.1] +# and +# 'num_hidden_layers':[1,2,3] +# but this is now handled by a special mechanism in SgdOptimizer +# to reuse intermediate results (for the same training of lower layers, +# we can test many finetuning_lr) +JOB_VALS = {'pretraining_lr': [0.1, 0.01, 0.001],#, 0.0001], + 'pretraining_epochs_per_layer': [10,20], + 'hidden_layers_sizes': [300,800], + 'corruption_levels': [0.1,0.2], + 'minibatch_size': [20], + 'max_finetuning_epochs':[MAX_FINETUNING_EPOCHS]} +FINETUNING_LR_VALS = [0.1, 0.01, 0.001]#, 0.0001] +NUM_HIDDEN_LAYERS_VALS = [1,2,3] + # Just useful for tests... minimal number of epochs -DEFAULT_HP_NIST = DD({'finetuning_lr':0.1, - 'pretraining_lr':0.1, +DEFAULT_HP_NIST = DD({'finetuning_lr':0.01, + 'pretraining_lr':0.01, 'pretraining_epochs_per_layer':1, 'max_finetuning_epochs':1, - 'hidden_layers_sizes':[1000,1000], - 'corruption_levels':[0.2,0.2], + 'hidden_layers_sizes':[1000], + 'corruption_levels':[0.2], 'minibatch_size':20}) -def jobman_entrypoint_nist(state, channel): - sgd_optimization_nist(state) +def jobman_entrypoint(state, channel): + state = copy.copy(state) + + print "Will load NIST" + nist = NIST(20) + print "NIST loaded" + + rtt = None + if state.has_key('reduce_train_to'): + rtt = state['reduce_train_to'] + elif REDUCE_TRAIN_TO: + rtt = REDUCE_TRAIN_TO + + if rtt: + print "Reducing training set to ", rtt, " examples" + nist.reduce_train_set(rtt) + + train,valid,test = nist.get_tvt() + dataset = (train,valid,test) + + n_ins = 32*32 + n_outs = 62 # 10 digits, 26*2 (lower, capitals) + + db = jobman.sql.db(JOBDB_RESULTS) + optimizer = SdaSgdOptimizer(dataset, state, n_ins, n_outs,\ + input_divider=255.0, job_tree=True, results_db=db, \ + experiment=EXPERIMENT_PATH, \ + finetuning_lr_to_try=FINETUNING_LR_VALS, \ + num_hidden_layers_to_try=NUM_HIDDEN_LAYERS_VALS) + optimizer.train() + + return channel.COMPLETE + +def estimate_pretraining_time(job): + job = DD(job) + # time spent on pretraining estimated as O(n^2) where n=num hidens + # no need to multiply by num_hidden_layers, as results from num=1 + # is reused for num=2, or 3, so in the end we get the same time + # as if we were training 3 times a single layer + # constants: + # - 20 mins to pretrain a layer with 1000 units (per 1 epoch) + # - 12 mins to finetune (per 1 epoch) + # basically the job_tree trick gives us a 5 times speedup on the + # pretraining time due to reusing for finetuning_lr + # and gives us a second x2 speedup for reusing previous layers + # to explore num_hidden_layers + return (job.pretraining_epochs_per_layer * 20 / (1000.0*1000) \ + * job.hidden_layer_sizes * job.hidden_layer_sizes) + +def estimate_total_time(): + jobs = produit_croise_jobs(JOB_VALS) + sumtime = 0.0 + sum_without = 0.0 + for job in jobs: + sumtime += estimate_pretraining_time(job) + # 12 mins per epoch * 30 epochs + # 5 finetuning_lr per pretraining combination + sum_without = (12*20*len(jobs) + sumtime*2) * len(FINETUNING_LR_VALS) + sumtime += len(FINETUNING_LR_VALS) * len(jobs) * 12 * 20 + print "num jobs=", len(jobs) + print "estimate", sumtime/60, " hours" + print "estimate without tree optimization", sum_without/60, "ratio", sumtime / sum_without def jobman_insert_nist(): - vals = {'finetuning_lr': [0.00001, 0.0001, 0.001, 0.01, 0.1], - 'pretraining_lr': [0.00001, 0.0001, 0.001, 0.01, 0.1], - 'pretraining_epochs_per_layer': [2,5,20], - 'hidden_layer_sizes': [100,300,1000], - 'num_hidden_layers':[1,2,3], - 'corruption_levels': [0.1,0.2,0.4], - 'minibatch_size': [5,20,100]} + jobs = produit_croise_jobs(JOB_VALS) - jobs = produit_croise_jobs(vals) - + db = jobman.sql.db(JOBDB_JOBS) for job in jobs: - insert_job(job) + job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH}) + jobman.sql.insert_dict(job, db) + print "inserted" class NIST: - def __init__(self, minibatch_size, basepath=None): + def __init__(self, minibatch_size, basepath=None, reduce_train_to=None): global NIST_ALL_LOCATION self.minibatch_size = minibatch_size @@ -61,8 +149,9 @@ self.load_train_test() self.valid = [[], []] - #self.split_train_valid() - + self.split_train_valid() + if reduce_train_to: + self.reduce_train_set(reduce_train_to) def get_tvt(self): return self.train, self.valid, self.test @@ -84,6 +173,15 @@ pair[i] = filetensor.read(f) f.close() + def reduce_train_set(self, max): + self.train[0] = self.train[0][:max] + self.train[1] = self.train[1][:max] + + if max < len(self.test[0]): + for ar in (self.test, self.valid): + ar[0] = ar[0][:max] + ar[1] = ar[1][:max] + def split_train_valid(self): test_len = len(self.test[0]) @@ -121,7 +219,7 @@ import time t1 = time.time() - nist = NIST(20) + nist = NIST(20, reduce_train_to=100) t2 = time.time() print "NIST loaded. time delta = ", t2-t1 @@ -129,12 +227,17 @@ train,valid,test = nist.get_tvt() dataset = (train,valid,test) - print "Lenghts train, valid, test: ", len(train[0]), len(valid[0]), len(test[0]) + print train[0][15] + print type(train[0][1]) + + + print "Lengths train, valid, test: ", len(train[0]), len(valid[0]), len(test[0]) n_ins = 32*32 n_outs = 62 # 10 digits, 26*2 (lower, capitals) - sgd_optimization(dataset, hp, n_ins, n_outs) + optimizer = SdaSgdOptimizer(dataset, hp, n_ins, n_outs, input_divider=255.0) + optimizer.train() if __name__ == '__main__': @@ -145,6 +248,17 @@ if len(args) > 0 and args[0] == 'load_nist': test_load_nist() + elif len(args) > 0 and args[0] == 'jobman_insert': + jobman_insert_nist() + elif len(args) > 0 and args[0] == 'test_job_tree': + # dont forget to comment out sql.inserts and make reduce_train_to=100 + print "TESTING JOB TREE" + chanmock = {'COMPLETE':0} + hp = copy.copy(DEFAULT_HP_NIST) + hp.update({'reduce_train_to':100}) + jobman_entrypoint(hp, chanmock) + elif len(args) > 0 and args[0] == 'estimate': + estimate_total_time() else: sgd_optimization_nist()
--- a/scripts/stacked_dae/sgd_optimization.py Sun Feb 21 17:30:38 2010 -0600 +++ b/scripts/stacked_dae/sgd_optimization.py Mon Feb 22 13:38:25 2010 -0500 @@ -1,165 +1,270 @@ #!/usr/bin/python # coding: utf-8 -# Generic SdA optimization loop, adapted slightly from the deeplearning.net tutorial +# Generic SdA optimization loop, adapted from the deeplearning.net tutorial import numpy import theano import time import theano.tensor as T +import copy +import sys from jobman import DD +import jobman, jobman.sql from stacked_dae import SdA -def sgd_optimization(dataset, hyperparameters, n_ins, n_outs): - hp = hyperparameters +def shared_dataset(data_xy): + data_x, data_y = data_xy + #shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX)) + #shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX)) + #shared_y = T.cast(shared_y, 'int32') + shared_x = theano.shared(data_x) + shared_y = theano.shared(data_y) + return shared_x, shared_y - printout_frequency = 1000 - - train_set, valid_set, test_set = dataset +class SdaSgdOptimizer: + def __init__(self, dataset, hyperparameters, n_ins, n_outs, input_divider=1.0,\ + job_tree=False, results_db=None,\ + experiment="",\ + num_hidden_layers_to_try=[1,2,3], \ + finetuning_lr_to_try=[0.1, 0.01, 0.001, 0.0001, 0.00001]): - def shared_dataset(data_xy): - data_x, data_y = data_xy - shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX)) - shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX)) - return shared_x, T.cast(shared_y, 'int32') + self.dataset = dataset + self.hp = copy.copy(hyperparameters) + self.n_ins = n_ins + self.n_outs = n_outs + self.input_divider = numpy.asarray(input_divider, dtype=theano.config.floatX) - test_set_x, test_set_y = shared_dataset(test_set) - valid_set_x, valid_set_y = shared_dataset(valid_set) - train_set_x, train_set_y = shared_dataset(train_set) + self.job_tree = job_tree + self.results_db = results_db + self.experiment = experiment + if self.job_tree: + assert(not results_db is None) + # these hp should not be there, so we insert default values + # we use 3 hidden layers as we'll iterate through 1,2,3 + self.hp.finetuning_lr = 0.1 # dummy value, will be replaced anyway + cl = self.hp.corruption_levels + nh = self.hp.hidden_layers_sizes + self.hp.corruption_levels = [cl,cl,cl] + self.hp.hidden_layers_sizes = [nh,nh,nh] + + self.num_hidden_layers_to_try = num_hidden_layers_to_try + self.finetuning_lr_to_try = finetuning_lr_to_try + + self.printout_frequency = 1000 - # compute number of minibatches for training, validation and testing - n_train_batches = train_set_x.value.shape[0] / hp.minibatch_size - n_valid_batches = valid_set_x.value.shape[0] / hp.minibatch_size - n_test_batches = test_set_x.value.shape[0] / hp.minibatch_size + self.rng = numpy.random.RandomState(1234) + + self.init_datasets() + self.init_classifier() + + def init_datasets(self): + print "init_datasets" + train_set, valid_set, test_set = self.dataset + self.test_set_x, self.test_set_y = shared_dataset(test_set) + self.valid_set_x, self.valid_set_y = shared_dataset(valid_set) + self.train_set_x, self.train_set_y = shared_dataset(train_set) + + # compute number of minibatches for training, validation and testing + self.n_train_batches = self.train_set_x.value.shape[0] / self.hp.minibatch_size + self.n_valid_batches = self.valid_set_x.value.shape[0] / self.hp.minibatch_size + self.n_test_batches = self.test_set_x.value.shape[0] / self.hp.minibatch_size - # allocate symbolic variables for the data - index = T.lscalar() # index to a [mini]batch - - # construct the stacked denoising autoencoder class - classifier = SdA( train_set_x=train_set_x, train_set_y = train_set_y,\ - batch_size = hp.minibatch_size, n_ins= n_ins, \ - hidden_layers_sizes = hp.hidden_layers_sizes, n_outs=10, \ - corruption_levels = hp.corruption_levels,\ - rng = numpy.random.RandomState(1234),\ - pretrain_lr = hp.pretraining_lr, finetune_lr = hp.finetuning_lr ) + def init_classifier(self): + print "Constructing classifier" + # construct the stacked denoising autoencoder class + self.classifier = SdA( \ + train_set_x= self.train_set_x, \ + train_set_y = self.train_set_y,\ + batch_size = self.hp.minibatch_size, \ + n_ins= self.n_ins, \ + hidden_layers_sizes = self.hp.hidden_layers_sizes, \ + n_outs = self.n_outs, \ + corruption_levels = self.hp.corruption_levels,\ + rng = self.rng,\ + pretrain_lr = self.hp.pretraining_lr, \ + finetune_lr = self.hp.finetuning_lr,\ + input_divider = self.input_divider ) - printout_acc = 0.0 + def train(self): + self.pretrain() + if not self.job_tree: + # if job_tree is True, finetuning was already performed + self.finetune() + + def pretrain(self): + print "STARTING PRETRAINING" - start_time = time.clock() - ## Pre-train layer-wise - for i in xrange(classifier.n_layers): - # go through pretraining epochs - for epoch in xrange(hp.pretraining_epochs_per_layer): - # go through the training set - for batch_index in xrange(n_train_batches): - c = classifier.pretrain_functions[i](batch_index) + printout_acc = 0.0 + last_error = 0.0 + + start_time = time.clock() + ## Pre-train layer-wise + for i in xrange(self.classifier.n_layers): + # go through pretraining epochs + for epoch in xrange(self.hp.pretraining_epochs_per_layer): + # go through the training set + for batch_index in xrange(self.n_train_batches): + c = self.classifier.pretrain_functions[i](batch_index) - print c + printout_acc += c / self.printout_frequency + if (batch_index+1) % self.printout_frequency == 0: + print batch_index, "reconstruction cost avg=", printout_acc + last_error = printout_acc + printout_acc = 0.0 + + print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c + + self.job_splitter(i+1, time.clock()-start_time, last_error) + + end_time = time.clock() + + print ('Pretraining took %f minutes' %((end_time-start_time)/60.)) + + # Save time by reusing intermediate results + def job_splitter(self, current_pretraining_layer, pretraining_time, last_error): + + state_copy = None + original_classifier = None + + if self.job_tree and current_pretraining_layer in self.num_hidden_layers_to_try: + for lr in self.finetuning_lr_to_try: + sys.stdout.flush() + sys.stderr.flush() + + state_copy = copy.copy(self.hp) - printout_acc += c / printout_frequency - if (batch_index+1) % printout_frequency == 0: - print batch_index, "reconstruction cost avg=", printout_acc - printout_acc = 0.0 - - print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c - - end_time = time.clock() + self.hp.update({'num_hidden_layers':current_pretraining_layer, \ + 'finetuning_lr':lr,\ + 'pretraining_time':pretraining_time,\ + 'last_reconstruction_error':last_error}) - print ('Pretraining took %f minutes' %((end_time-start_time)/60.)) - # Fine-tune the entire model + original_classifier = self.classifier + print "ORIGINAL CLASSIFIER MEANS",original_classifier.get_params_means() + self.classifier = SdA.copy_reusing_lower_layers(original_classifier, current_pretraining_layer, new_finetuning_lr=lr) + + self.finetune() + + self.insert_finished_job() + + print "NEW CLASSIFIER MEANS AFTERWARDS",self.classifier.get_params_means() + print "ORIGINAL CLASSIFIER MEANS AFTERWARDS",original_classifier.get_params_means() + self.classifier = original_classifier + self.hp = state_copy + + def insert_finished_job(self): + job = copy.copy(self.hp) + job[jobman.sql.STATUS] = jobman.sql.DONE + job[jobman.sql.EXPERIMENT] = self.experiment - minibatch_size = hp.minibatch_size + # don,t try to store arrays in db + job['hidden_layers_sizes'] = job.hidden_layers_sizes[0] + job['corruption_levels'] = job.corruption_levels[0] + + print "Will insert finished job", job + jobman.sql.insert_dict(jobman.flatten(job), self.results_db) + + def finetune(self): + print "STARTING FINETUNING" - # create a function to compute the mistakes that are made by the model - # on the validation set, or testing set - test_model = theano.function([index], classifier.errors, - givens = { - classifier.x: test_set_x[index*minibatch_size:(index+1)*minibatch_size], - classifier.y: test_set_y[index*minibatch_size:(index+1)*minibatch_size]}) + index = T.lscalar() # index to a [mini]batch + minibatch_size = self.hp.minibatch_size - validate_model = theano.function([index], classifier.errors, - givens = { - classifier.x: valid_set_x[index*minibatch_size:(index+1)*minibatch_size], - classifier.y: valid_set_y[index*minibatch_size:(index+1)*minibatch_size]}) + # create a function to compute the mistakes that are made by the model + # on the validation set, or testing set + test_model = theano.function([index], self.classifier.errors, + givens = { + self.classifier.x: self.test_set_x[index*minibatch_size:(index+1)*minibatch_size] / self.input_divider, + self.classifier.y: self.test_set_y[index*minibatch_size:(index+1)*minibatch_size]}) + + validate_model = theano.function([index], self.classifier.errors, + givens = { + self.classifier.x: self.valid_set_x[index*minibatch_size:(index+1)*minibatch_size] / self.input_divider, + self.classifier.y: self.valid_set_y[index*minibatch_size:(index+1)*minibatch_size]}) - # early-stopping parameters - patience = 10000 # look as this many examples regardless - patience_increase = 2. # wait this much longer when a new best is - # found - improvement_threshold = 0.995 # a relative improvement of this much is - # considered significant - validation_frequency = min(n_train_batches, patience/2) - # go through this many - # minibatche before checking the network - # on the validation set; in this case we - # check every epoch + # early-stopping parameters + patience = 10000 # look as this many examples regardless + patience_increase = 2. # wait this much longer when a new best is + # found + improvement_threshold = 0.995 # a relative improvement of this much is + # considered significant + validation_frequency = min(self.n_train_batches, patience/2) + # go through this many + # minibatche before checking the network + # on the validation set; in this case we + # check every epoch - best_params = None - best_validation_loss = float('inf') - test_score = 0. - start_time = time.clock() + best_params = None + best_validation_loss = float('inf') + test_score = 0. + start_time = time.clock() - done_looping = False - epoch = 0 + done_looping = False + epoch = 0 - printout_acc = 0.0 + printout_acc = 0.0 - print "----- START FINETUNING -----" + if not self.hp.has_key('max_finetuning_epochs'): + self.hp.max_finetuning_epochs = 1000 - while (epoch < hp.max_finetuning_epochs) and (not done_looping): - epoch = epoch + 1 - for minibatch_index in xrange(n_train_batches): + while (epoch < self.hp.max_finetuning_epochs) and (not done_looping): + epoch = epoch + 1 + for minibatch_index in xrange(self.n_train_batches): - cost_ij = classifier.finetune(minibatch_index) - iter = epoch * n_train_batches + minibatch_index + cost_ij = self.classifier.finetune(minibatch_index) + iter = epoch * self.n_train_batches + minibatch_index - printout_acc += cost_ij / float(printout_frequency * minibatch_size) - if (iter+1) % printout_frequency == 0: - print iter, "cost avg=", printout_acc - printout_acc = 0.0 + printout_acc += cost_ij / float(self.printout_frequency * minibatch_size) + if (iter+1) % self.printout_frequency == 0: + print iter, "cost avg=", printout_acc + printout_acc = 0.0 - if (iter+1) % validation_frequency == 0: - - validation_losses = [validate_model(i) for i in xrange(n_valid_batches)] - this_validation_loss = numpy.mean(validation_losses) - print('epoch %i, minibatch %i/%i, validation error %f %%' % \ - (epoch, minibatch_index+1, n_train_batches, \ - this_validation_loss*100.)) + if (iter+1) % validation_frequency == 0: + + validation_losses = [validate_model(i) for i in xrange(self.n_valid_batches)] + this_validation_loss = numpy.mean(validation_losses) + print('epoch %i, minibatch %i/%i, validation error %f %%' % \ + (epoch, minibatch_index+1, self.n_train_batches, \ + this_validation_loss*100.)) - # if we got the best validation score until now - if this_validation_loss < best_validation_loss: + # if we got the best validation score until now + if this_validation_loss < best_validation_loss: - #improve patience if loss improvement is good enough - if this_validation_loss < best_validation_loss * \ - improvement_threshold : - patience = max(patience, iter * patience_increase) + #improve patience if loss improvement is good enough + if this_validation_loss < best_validation_loss * \ + improvement_threshold : + patience = max(patience, iter * patience_increase) - # save best validation score and iteration number - best_validation_loss = this_validation_loss - best_iter = iter + # save best validation score and iteration number + best_validation_loss = this_validation_loss + best_iter = iter - # test it on the test set - test_losses = [test_model(i) for i in xrange(n_test_batches)] - test_score = numpy.mean(test_losses) - print((' epoch %i, minibatch %i/%i, test error of best ' - 'model %f %%') % - (epoch, minibatch_index+1, n_train_batches, - test_score*100.)) + # test it on the test set + test_losses = [test_model(i) for i in xrange(self.n_test_batches)] + test_score = numpy.mean(test_losses) + print((' epoch %i, minibatch %i/%i, test error of best ' + 'model %f %%') % + (epoch, minibatch_index+1, self.n_train_batches, + test_score*100.)) - if patience <= iter : + if patience <= iter : done_looping = True break - end_time = time.clock() - print(('Optimization complete with best validation score of %f %%,' - 'with test performance %f %%') % - - (best_validation_loss * 100., test_score*100.)) - print ('The code ran for %f minutes' % ((end_time-start_time)/60.)) + end_time = time.clock() + self.hp.update({'finetuning_time':end_time-start_time,\ + 'best_validation_error':best_validation_loss,\ + 'test_score':test_score, + 'num_finetuning_epochs':epoch}) + print(('Optimization complete with best validation score of %f %%,' + 'with test performance %f %%') % + (best_validation_loss * 100., test_score*100.)) + print ('The finetuning ran for %f minutes' % ((end_time-start_time)/60.)) +
--- a/scripts/stacked_dae/stacked_dae.py Sun Feb 21 17:30:38 2010 -0600 +++ b/scripts/stacked_dae/stacked_dae.py Mon Feb 22 13:38:25 2010 -0500 @@ -6,6 +6,9 @@ import time import theano.tensor as T from theano.tensor.shared_randomstreams import RandomStreams +import copy + +from utils import update_locals class LogisticRegression(object): def __init__(self, input, n_in, n_out): @@ -140,13 +143,16 @@ class SdA(object): def __init__(self, train_set_x, train_set_y, batch_size, n_ins, hidden_layers_sizes, n_outs, - corruption_levels, rng, pretrain_lr, finetune_lr): - + corruption_levels, rng, pretrain_lr, finetune_lr, input_divider=1.0): + update_locals(self, locals()) + self.layers = [] self.pretrain_functions = [] self.params = [] self.n_layers = len(hidden_layers_sizes) + self.input_divider = numpy.asarray(input_divider, dtype=theano.config.floatX) + if len(hidden_layers_sizes) < 1 : raiseException (' You must have at least one hidden layer ') @@ -200,7 +206,7 @@ update_fn = theano.function([index], dA_layer.cost, \ updates = updates, givens = { - self.x : train_set_x[index*batch_size:(index+1)*batch_size]}) + self.x : train_set_x[index*batch_size:(index+1)*batch_size] / self.input_divider}) # collect this function into a list self.pretrain_functions += [update_fn] @@ -225,7 +231,7 @@ self.finetune = theano.function([index], cost, updates = updates, givens = { - self.x : train_set_x[index*batch_size:(index+1)*batch_size], + self.x : train_set_x[index*batch_size:(index+1)*batch_size]/self.input_divider, self.y : train_set_y[index*batch_size:(index+1)*batch_size]} ) # symbolic variable that points to the number of errors made on the @@ -233,23 +239,49 @@ self.errors = self.logLayer.errors(self.y) + @classmethod + def copy_reusing_lower_layers(cls, obj, num_hidden_layers, new_finetuning_lr=None): + assert(num_hidden_layers <= obj.n_layers) + + if not new_finetuning_lr: + new_finetuning_lr = obj.finetune_lr + + new_sda = cls(train_set_x= obj.train_set_x, \ + train_set_y = obj.train_set_y,\ + batch_size = obj.batch_size, \ + n_ins= obj.n_ins, \ + hidden_layers_sizes = obj.hidden_layers_sizes[:num_hidden_layers], \ + n_outs = obj.n_outs, \ + corruption_levels = obj.corruption_levels[:num_hidden_layers],\ + rng = obj.rng,\ + pretrain_lr = obj.pretrain_lr, \ + finetune_lr = new_finetuning_lr, \ + input_divider = obj.input_divider ) + + # new_sda.layers contains only the hidden layers actually + for i, layer in enumerate(new_sda.layers): + original_layer = obj.layers[i] + for p1,p2 in zip(layer.params, original_layer.params): + p1.value = p2.value.copy() + + return new_sda + + def get_params_copy(self): + return copy.deepcopy(self.params) + + def set_params_from_copy(self, copy): + # We don't want to replace the var, as the functions have pointers in there + # We only want to replace values. + for i, p in enumerate(self.params): + p.value = copy[i].value + + def get_params_means(self): + s = [] + for p in self.params: + s.append(numpy.mean(p.value)) + return s + if __name__ == '__main__': import sys args = sys.argv[1:] - if len(args) < 1: - print "Options: mnist, jobman_add, load_nist" - sys.exit(0) - - if args[0] == "jobman_add": - jobman_add() - elif args[0] == "mnist": - sgd_optimization_mnist(dataset=MNIST_LOCATION) - elif args[0] == "load_nist": - load_nist_test() - elif args[0] == "nist": - sgd_optimization_nist() - elif args[0] == "pc": - test_produit_croise_jobs() - -
--- a/scripts/stacked_dae/utils.py Sun Feb 21 17:30:38 2010 -0600 +++ b/scripts/stacked_dae/utils.py Mon Feb 22 13:38:25 2010 -0500 @@ -2,6 +2,12 @@ from jobman import DD +# from pylearn codebase +def update_locals(obj, dct): + if 'self' in dct: + del dct['self'] + obj.__dict__.update(dct) + def produit_croise_jobs(val_dict): job_list = [DD()] all_keys = val_dict.keys() @@ -23,3 +29,29 @@ print produit_croise_jobs(vals) +# taken from http://stackoverflow.com/questions/276052/how-to-get-current-cpu-and-ram-usage-in-python +"""Simple module for getting amount of memory used by a specified user's +processes on a UNIX system. +It uses UNIX ps utility to get the memory usage for a specified username and +pipe it to awk for summing up per application memory usage and return the total. +Python's Popen() from subprocess module is used for spawning ps and awk. + +""" + +import subprocess + +class MemoryMonitor(object): + + def __init__(self, username): + """Create new MemoryMonitor instance.""" + self.username = username + + def usage(self): + """Return int containing memory used by user's processes.""" + self.process = subprocess.Popen("ps -u %s -o rss | awk '{sum+=$1} END {print sum}'" % self.username, + shell=True, + stdout=subprocess.PIPE, + ) + self.stdout_list = self.process.communicate()[0].split('\n') + return int(self.stdout_list[0]) +