Mercurial > ift6266
view deep/stacked_dae/v_sylvain/sgd_optimization.py @ 234:c452e3a0a3b1
Changement de la base de donnees qui sera utilisee
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Sun, 14 Mar 2010 15:17:04 -0400 |
parents | 02ed13244133 |
children | ecb69e17950b |
line wrap: on
line source
#!/usr/bin/python # coding: utf-8 # Generic SdA optimization loop, adapted from the deeplearning.net tutorial import numpy import theano import time import datetime import theano.tensor as T import sys from jobman import DD import jobman, jobman.sql from stacked_dae import SdA from ift6266.utils.seriestables import * ##def shared_dataset(data_xy): ## data_x, data_y = data_xy ## if theano.config.device.startswith("gpu"): ## print "TRANSFERING DATASETS (via shared()) TO GPU" ## shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX)) ## shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX)) ## shared_y = T.cast(shared_y, 'int32') ## else: ## print "WILL RUN ON CPU, NOT GPU, SO DATASETS REMAIN IN BYTES" ## shared_x = theano.shared(data_x) ## shared_y = theano.shared(data_y) ## return shared_x, shared_y ######Les shared seront remplacees utilisant "given" dans les enonces de fonction plus loin def shared_dataset(batch_size, n_in): shared_x = theano.shared(numpy.asarray(numpy.zeros((batch_size,n_in)), dtype=theano.config.floatX)) shared_y = theano.shared(numpy.asarray(numpy.zeros(batch_size), dtype=theano.config.floatX)) return shared_x, shared_y default_series = { \ 'reconstruction_error' : DummySeries(), 'training_error' : DummySeries(), 'validation_error' : DummySeries(), 'test_error' : DummySeries(), 'params' : DummySeries() } class SdaSgdOptimizer: def __init__(self, dataset, hyperparameters, n_ins, n_outs, input_divider=1.0, series=default_series): self.dataset = dataset self.hp = hyperparameters self.n_ins = n_ins self.n_outs = n_outs self.input_divider = input_divider self.series = series self.rng = numpy.random.RandomState(1234) self.init_datasets() self.init_classifier() sys.stdout.flush() def init_datasets(self): print "init_datasets" sys.stdout.flush() #train_set, valid_set, test_set = self.dataset self.test_set_x, self.test_set_y = shared_dataset(self.hp.minibatch_size,self.n_ins) self.valid_set_x, self.valid_set_y = shared_dataset(self.hp.minibatch_size,self.n_ins) self.train_set_x, self.train_set_y = shared_dataset(self.hp.minibatch_size,self.n_ins) # compute number of minibatches for training, validation and testing self.n_train_batches = self.train_set_x.value.shape[0] / self.hp.minibatch_size self.n_valid_batches = self.valid_set_x.value.shape[0] / self.hp.minibatch_size # remove last batch in case it's incomplete self.n_test_batches = (self.test_set_x.value.shape[0] / self.hp.minibatch_size) - 1 def init_classifier(self): print "Constructing classifier" # we don't want to save arrays in DD objects, so # we recreate those arrays here nhl = self.hp.num_hidden_layers layers_sizes = [self.hp.hidden_layers_sizes] * nhl corruption_levels = [self.hp.corruption_levels] * nhl # construct the stacked denoising autoencoder class self.classifier = SdA( \ train_set_x= self.train_set_x, \ train_set_y = self.train_set_y,\ batch_size = self.hp.minibatch_size, \ n_ins= self.n_ins, \ hidden_layers_sizes = layers_sizes, \ n_outs = self.n_outs, \ corruption_levels = corruption_levels,\ rng = self.rng,\ pretrain_lr = self.hp.pretraining_lr, \ finetune_lr = self.hp.finetuning_lr,\ input_divider = self.input_divider ) #theano.printing.pydotprint(self.classifier.pretrain_functions[0], "function.graph") sys.stdout.flush() def train(self): self.pretrain(self.dataset) self.finetune(self.dataset) def pretrain(self,dataset): print "STARTING PRETRAINING, time = ", datetime.datetime.now() sys.stdout.flush() start_time = time.clock() ## Pre-train layer-wise for i in xrange(self.classifier.n_layers): # go through pretraining epochs for epoch in xrange(self.hp.pretraining_epochs_per_layer): # go through the training set for x,y in dataset.train(self.hp.minibatch_size): c = self.classifier.pretrain_functions[i](x) self.series["reconstruction_error"].append((epoch, batch_index), c) print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c sys.stdout.flush() self.series['params'].append((epoch,), self.classifier.all_params) end_time = time.clock() print ('Pretraining took %f minutes' %((end_time-start_time)/60.)) self.hp.update({'pretraining_time': end_time-start_time}) sys.stdout.flush() def finetune(self,dataset): print "STARTING FINETUNING, time = ", datetime.datetime.now() #index = T.lscalar() # index to a [mini]batch minibatch_size = self.hp.minibatch_size # create a function to compute the mistakes that are made by the model # on the validation set, or testing set shared_divider = theano.shared(numpy.asarray(self.input_divider, dtype=theano.config.floatX)) test_model = theano.function([ensemble_x,ensemble_y], self.classifier.errors, givens = { #self.classifier.x: self.test_set_x[index*minibatch_size:(index+1)*minibatch_size] / shared_divider, #self.classifier.y: self.test_set_y[index*minibatch_size:(index+1)*minibatch_size]}) self.classifier.x: ensemble_x, self.classifier.y: ensemble_y}) validate_model = theano.function([ensemble_x,ensemble_y], self.classifier.errors, givens = { #self.classifier.x: self.valid_set_x[index*minibatch_size:(index+1)*minibatch_size] / shared_divider, #self.classifier.y: self.valid_set_y[index*minibatch_size:(index+1)*minibatch_size]}) self.classifier.x: ensemble_x, self.classifier.y: ensemble_y}) # early-stopping parameters patience = 10000 # look as this many examples regardless patience_increase = 2. # wait this much longer when a new best is # found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(self.n_train_batches, patience/2) # go through this many # minibatche before checking the network # on the validation set; in this case we # check every epoch best_params = None best_validation_loss = float('inf') test_score = 0. start_time = time.clock() done_looping = False epoch = 0 while (epoch < self.hp.max_finetuning_epochs) and (not done_looping): epoch = epoch + 1 minibatch_index=int(-1) for x,y in dataset.train(minibatch_size): minibatch_index+=1 cost_ij = self.classifier.finetune(x,y) iter = epoch * self.n_train_batches + minibatch_index self.series["training_error"].append((epoch, minibatch_index), cost_ij) if (iter+1) % validation_frequency == 0: validation_losses = [validate_model(x,y) for x,y in dataset.valid(minibatch_size)] this_validation_loss = numpy.mean(validation_losses) self.series["validation_error"].\ append((epoch, minibatch_index), this_validation_loss*100.) print('epoch %i, minibatch %i/%i, validation error %f %%' % \ (epoch, minibatch_index+1, self.n_train_batches, \ this_validation_loss*100.)) # if we got the best validation score until now if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * \ improvement_threshold : patience = max(patience, iter * patience_increase) # save best validation score and iteration number best_validation_loss = this_validation_loss best_iter = iter # test it on the test set test_losses = [test_model(x,y) for x,y in dataset.test(minibatch_size)] test_score = numpy.mean(test_losses) self.series["test_error"].\ append((epoch, minibatch_index), test_score*100.) print((' epoch %i, minibatch %i/%i, test error of best ' 'model %f %%') % (epoch, minibatch_index+1, self.n_train_batches, test_score*100.)) sys.stdout.flush() self.series['params'].append((epoch,), self.classifier.all_params) if patience <= iter : done_looping = True break end_time = time.clock() self.hp.update({'finetuning_time':end_time-start_time,\ 'best_validation_error':best_validation_loss,\ 'test_score':test_score, 'num_finetuning_epochs':epoch}) print(('Optimization complete with best validation score of %f %%,' 'with test performance %f %%') % (best_validation_loss * 100., test_score*100.)) print ('The finetuning ran for %f minutes' % ((end_time-start_time)/60.))