Mercurial > ift6266
comparison deep/stacked_dae/v_sylvain/sgd_optimization.py @ 310:a5abd5c8b4b0
Petits changements pour pouvoir utiliser le GPU
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Thu, 01 Apr 2010 13:44:04 -0400 |
parents | fe5d428c2acc |
children | 403b9e6ecfaa |
comparison
equal
deleted
inserted
replaced
309:60cacb9a70e4 | 310:a5abd5c8b4b0 |
---|---|
16 from copy import copy | 16 from copy import copy |
17 | 17 |
18 from stacked_dae import SdA | 18 from stacked_dae import SdA |
19 | 19 |
20 from ift6266.utils.seriestables import * | 20 from ift6266.utils.seriestables import * |
21 | |
22 #For test purpose only | |
23 buffersize=1000 | |
21 | 24 |
22 default_series = { \ | 25 default_series = { \ |
23 'reconstruction_error' : DummySeries(), | 26 'reconstruction_error' : DummySeries(), |
24 'training_error' : DummySeries(), | 27 'training_error' : DummySeries(), |
25 'validation_error' : DummySeries(), | 28 'validation_error' : DummySeries(), |
201 parameters_finetune=[] | 204 parameters_finetune=[] |
202 | 205 |
203 while (epoch < num_finetune) and (not done_looping): | 206 while (epoch < num_finetune) and (not done_looping): |
204 epoch = epoch + 1 | 207 epoch = epoch + 1 |
205 | 208 |
206 for x,y in dataset.train(minibatch_size): | 209 for x,y in dataset.train(minibatch_size,bufsize=buffersize): |
207 minibatch_index += 1 | 210 minibatch_index += 1 |
208 if special == 0: | 211 if special == 0: |
209 cost_ij = self.classifier.finetune(x,y) | 212 cost_ij = self.classifier.finetune(x,y) |
210 elif special == 1: | 213 elif special == 1: |
211 cost_ij = self.classifier.finetune2(x,y) | 214 cost_ij = self.classifier.finetune2(x,y) |
215 | 218 |
216 if (total_mb_index+1) % validation_frequency == 0: | 219 if (total_mb_index+1) % validation_frequency == 0: |
217 #minibatch_index += 1 | 220 #minibatch_index += 1 |
218 #The validation set is always NIST (we want the model to be good on NIST) | 221 #The validation set is always NIST (we want the model to be good on NIST) |
219 if ind_test == 0 | ind_test == 20: | 222 if ind_test == 0 | ind_test == 20: |
220 iter=dataset_test.valid(minibatch_size) | 223 iter=dataset_test.valid(minibatch_size,bufsize=buffersize) |
221 else: | 224 else: |
222 iter = dataset.valid(minibatch_size) | 225 iter = dataset.valid(minibatch_size,bufsize=buffersize) |
223 if self.max_minibatches: | 226 if self.max_minibatches: |
224 iter = itermax(iter, self.max_minibatches) | 227 iter = itermax(iter, self.max_minibatches) |
225 validation_losses = [validate_model(x,y) for x,y in iter] | 228 validation_losses = [validate_model(x,y) for x,y in iter] |
226 this_validation_loss = numpy.mean(validation_losses) | 229 this_validation_loss = numpy.mean(validation_losses) |
227 | 230 |
245 best_validation_loss = this_validation_loss | 248 best_validation_loss = this_validation_loss |
246 best_iter = total_mb_index | 249 best_iter = total_mb_index |
247 parameters_finetune=[copy(x.value) for x in self.classifier.params] | 250 parameters_finetune=[copy(x.value) for x in self.classifier.params] |
248 | 251 |
249 # test it on the test set | 252 # test it on the test set |
250 iter = dataset.test(minibatch_size) | 253 iter = dataset.test(minibatch_size,bufsize=buffersize) |
251 if self.max_minibatches: | 254 if self.max_minibatches: |
252 iter = itermax(iter, self.max_minibatches) | 255 iter = itermax(iter, self.max_minibatches) |
253 test_losses = [test_model(x,y) for x,y in iter] | 256 test_losses = [test_model(x,y) for x,y in iter] |
254 test_score = numpy.mean(test_losses) | 257 test_score = numpy.mean(test_losses) |
255 | 258 |
256 #test it on the second test set | 259 #test it on the second test set |
257 iter2 = dataset_test.test(minibatch_size) | 260 iter2 = dataset_test.test(minibatch_size,bufsize=buffersize) |
258 if self.max_minibatches: | 261 if self.max_minibatches: |
259 iter2 = itermax(iter2, self.max_minibatches) | 262 iter2 = itermax(iter2, self.max_minibatches) |
260 test_losses2 = [test_model(x,y) for x,y in iter2] | 263 test_losses2 = [test_model(x,y) for x,y in iter2] |
261 test_score2 = numpy.mean(test_losses2) | 264 test_score2 = numpy.mean(test_losses2) |
262 | 265 |
332 #self.parameters_pre=pickle.load('params_pretrain.txt') | 335 #self.parameters_pre=pickle.load('params_pretrain.txt') |
333 f = open(which) | 336 f = open(which) |
334 self.parameters_pre=pickle.load(f) | 337 self.parameters_pre=pickle.load(f) |
335 f.close() | 338 f.close() |
336 for idx,x in enumerate(self.parameters_pre): | 339 for idx,x in enumerate(self.parameters_pre): |
337 self.classifier.params[idx].value=copy(x) | 340 if x.dtype=='float64': |
338 | 341 self.classifier.params[idx].value=theano._asarray(copy(x),dtype=theano.config.floatX) |
339 | 342 else: |
340 | 343 self.classifier.params[idx].value=copy(x) |
341 | 344 |
342 | 345 |
346 | |
347 | |
348 |