Mercurial > ift6266
comparison deep/stacked_dae/sgd_optimization.py @ 186:d364a130b221
Ajout du code de base pour scalar_series. Modifications à stacked_dae: réglé un problème avec les input_divider (empêchait une optimisation), et ajouté utilisation des séries. Si j'avais pas déjà commité, aussi, j'ai enlevé l'histoire de réutilisation du pretraining: c'était compliqué (error prone) et ça créait des jobs beaucoup trop longues.
author | fsavard |
---|---|
date | Mon, 01 Mar 2010 11:45:25 -0500 |
parents | b9ea8e2d071a |
children | 3632e6258642 |
comparison
equal
deleted
inserted
replaced
185:b9ea8e2d071a | 186:d364a130b221 |
---|---|
31 def __init__(self, dataset, hyperparameters, n_ins, n_outs, input_divider=1.0, series_mux=None): | 31 def __init__(self, dataset, hyperparameters, n_ins, n_outs, input_divider=1.0, series_mux=None): |
32 self.dataset = dataset | 32 self.dataset = dataset |
33 self.hp = hyperparameters | 33 self.hp = hyperparameters |
34 self.n_ins = n_ins | 34 self.n_ins = n_ins |
35 self.n_outs = n_outs | 35 self.n_outs = n_outs |
36 self.input_divider = numpy.asarray(input_divider, dtype=theano.config.floatX) | 36 self.input_divider = input_divider |
37 | 37 |
38 if not series_mux: | 38 if not series_mux: |
39 series_mux = DummyMux() | 39 series_mux = DummyMux() |
40 print "No series multiplexer set" | 40 print "No series multiplexer set" |
41 self.series_mux = series_mux | 41 self.series_mux = series_mux |
115 index = T.lscalar() # index to a [mini]batch | 115 index = T.lscalar() # index to a [mini]batch |
116 minibatch_size = self.hp.minibatch_size | 116 minibatch_size = self.hp.minibatch_size |
117 | 117 |
118 # create a function to compute the mistakes that are made by the model | 118 # create a function to compute the mistakes that are made by the model |
119 # on the validation set, or testing set | 119 # on the validation set, or testing set |
120 shared_divider = theano.shared(numpy.asarray(self.input_divider, dtype=theano.config.floatX)) | |
120 test_model = theano.function([index], self.classifier.errors, | 121 test_model = theano.function([index], self.classifier.errors, |
121 givens = { | 122 givens = { |
122 self.classifier.x: self.test_set_x[index*minibatch_size:(index+1)*minibatch_size] / self.input_divider, | 123 self.classifier.x: self.test_set_x[index*minibatch_size:(index+1)*minibatch_size] / shared_divider, |
123 self.classifier.y: self.test_set_y[index*minibatch_size:(index+1)*minibatch_size]}) | 124 self.classifier.y: self.test_set_y[index*minibatch_size:(index+1)*minibatch_size]}) |
124 | 125 |
125 validate_model = theano.function([index], self.classifier.errors, | 126 validate_model = theano.function([index], self.classifier.errors, |
126 givens = { | 127 givens = { |
127 self.classifier.x: self.valid_set_x[index*minibatch_size:(index+1)*minibatch_size] / self.input_divider, | 128 self.classifier.x: self.valid_set_x[index*minibatch_size:(index+1)*minibatch_size] / shared_divider, |
128 self.classifier.y: self.valid_set_y[index*minibatch_size:(index+1)*minibatch_size]}) | 129 self.classifier.y: self.valid_set_y[index*minibatch_size:(index+1)*minibatch_size]}) |
129 | 130 |
130 | 131 |
131 # early-stopping parameters | 132 # early-stopping parameters |
132 patience = 10000 # look as this many examples regardless | 133 patience = 10000 # look as this many examples regardless |
159 | 160 |
160 if (iter+1) % validation_frequency == 0: | 161 if (iter+1) % validation_frequency == 0: |
161 | 162 |
162 validation_losses = [validate_model(i) for i in xrange(self.n_valid_batches)] | 163 validation_losses = [validate_model(i) for i in xrange(self.n_valid_batches)] |
163 this_validation_loss = numpy.mean(validation_losses) | 164 this_validation_loss = numpy.mean(validation_losses) |
165 self.series_mux.append("validation_error", this_validation_loss) | |
164 print('epoch %i, minibatch %i/%i, validation error %f %%' % \ | 166 print('epoch %i, minibatch %i/%i, validation error %f %%' % \ |
165 (epoch, minibatch_index+1, self.n_train_batches, \ | 167 (epoch, minibatch_index+1, self.n_train_batches, \ |
166 this_validation_loss*100.)) | 168 this_validation_loss*100.)) |
167 | 169 |
168 | 170 |
179 best_iter = iter | 181 best_iter = iter |
180 | 182 |
181 # test it on the test set | 183 # test it on the test set |
182 test_losses = [test_model(i) for i in xrange(self.n_test_batches)] | 184 test_losses = [test_model(i) for i in xrange(self.n_test_batches)] |
183 test_score = numpy.mean(test_losses) | 185 test_score = numpy.mean(test_losses) |
186 self.series_mux.append("test_error", test_score) | |
184 print((' epoch %i, minibatch %i/%i, test error of best ' | 187 print((' epoch %i, minibatch %i/%i, test error of best ' |
185 'model %f %%') % | 188 'model %f %%') % |
186 (epoch, minibatch_index+1, self.n_train_batches, | 189 (epoch, minibatch_index+1, self.n_train_batches, |
187 test_score*100.)) | 190 test_score*100.)) |
188 | 191 |
189 sys.stdout.flush() | 192 sys.stdout.flush() |
190 | 193 |
191 self.series_mux.append("params", self.classifier.params) | 194 self.series_mux.append("params", self.classifier.all_params) |
192 | 195 |
193 if patience <= iter : | 196 if patience <= iter : |
194 done_looping = True | 197 done_looping = True |
195 break | 198 break |
196 | 199 |