Mercurial > ift6266
diff deep/stacked_dae/v_sylvain/sgd_optimization.py @ 310:a5abd5c8b4b0
Petits changements pour pouvoir utiliser le GPU
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Thu, 01 Apr 2010 13:44:04 -0400 |
parents | fe5d428c2acc |
children | 403b9e6ecfaa |
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/sgd_optimization.py Thu Apr 01 13:43:43 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py Thu Apr 01 13:44:04 2010 -0400 @@ -19,6 +19,9 @@ from ift6266.utils.seriestables import * +#For test purpose only +buffersize=1000 + default_series = { \ 'reconstruction_error' : DummySeries(), 'training_error' : DummySeries(), @@ -203,7 +206,7 @@ while (epoch < num_finetune) and (not done_looping): epoch = epoch + 1 - for x,y in dataset.train(minibatch_size): + for x,y in dataset.train(minibatch_size,bufsize=buffersize): minibatch_index += 1 if special == 0: cost_ij = self.classifier.finetune(x,y) @@ -217,9 +220,9 @@ #minibatch_index += 1 #The validation set is always NIST (we want the model to be good on NIST) if ind_test == 0 | ind_test == 20: - iter=dataset_test.valid(minibatch_size) + iter=dataset_test.valid(minibatch_size,bufsize=buffersize) else: - iter = dataset.valid(minibatch_size) + iter = dataset.valid(minibatch_size,bufsize=buffersize) if self.max_minibatches: iter = itermax(iter, self.max_minibatches) validation_losses = [validate_model(x,y) for x,y in iter] @@ -247,14 +250,14 @@ parameters_finetune=[copy(x.value) for x in self.classifier.params] # test it on the test set - iter = dataset.test(minibatch_size) + iter = dataset.test(minibatch_size,bufsize=buffersize) if self.max_minibatches: iter = itermax(iter, self.max_minibatches) test_losses = [test_model(x,y) for x,y in iter] test_score = numpy.mean(test_losses) #test it on the second test set - iter2 = dataset_test.test(minibatch_size) + iter2 = dataset_test.test(minibatch_size,bufsize=buffersize) if self.max_minibatches: iter2 = itermax(iter2, self.max_minibatches) test_losses2 = [test_model(x,y) for x,y in iter2] @@ -334,7 +337,10 @@ self.parameters_pre=pickle.load(f) f.close() for idx,x in enumerate(self.parameters_pre): - self.classifier.params[idx].value=copy(x) + if x.dtype=='float64': + self.classifier.params[idx].value=theano._asarray(copy(x),dtype=theano.config.floatX) + else: + self.classifier.params[idx].value=copy(x)