diff deep/stacked_dae/sgd_optimization.py @ 186:d364a130b221

Ajout du code de base pour scalar_series. Modifications à stacked_dae: réglé un problème avec les input_divider (empêchait une optimisation), et ajouté utilisation des séries. Si j'avais pas déjà commité, aussi, j'ai enlevé l'histoire de réutilisation du pretraining: c'était compliqué (error prone) et ça créait des jobs beaucoup trop longues.
author fsavard
date Mon, 01 Mar 2010 11:45:25 -0500
parents b9ea8e2d071a
children 3632e6258642
line wrap: on
line diff
--- a/deep/stacked_dae/sgd_optimization.py	Fri Feb 26 17:45:52 2010 -0500
+++ b/deep/stacked_dae/sgd_optimization.py	Mon Mar 01 11:45:25 2010 -0500
@@ -33,7 +33,7 @@
         self.hp = hyperparameters
         self.n_ins = n_ins
         self.n_outs = n_outs
-        self.input_divider = numpy.asarray(input_divider, dtype=theano.config.floatX)
+        self.input_divider = input_divider
    
         if not series_mux:
             series_mux = DummyMux()
@@ -117,14 +117,15 @@
 
         # create a function to compute the mistakes that are made by the model
         # on the validation set, or testing set
+        shared_divider = theano.shared(numpy.asarray(self.input_divider, dtype=theano.config.floatX))
         test_model = theano.function([index], self.classifier.errors,
                  givens = {
-                   self.classifier.x: self.test_set_x[index*minibatch_size:(index+1)*minibatch_size] / self.input_divider,
+                   self.classifier.x: self.test_set_x[index*minibatch_size:(index+1)*minibatch_size] / shared_divider,
                    self.classifier.y: self.test_set_y[index*minibatch_size:(index+1)*minibatch_size]})
 
         validate_model = theano.function([index], self.classifier.errors,
                 givens = {
-                   self.classifier.x: self.valid_set_x[index*minibatch_size:(index+1)*minibatch_size] / self.input_divider,
+                   self.classifier.x: self.valid_set_x[index*minibatch_size:(index+1)*minibatch_size] / shared_divider,
                    self.classifier.y: self.valid_set_y[index*minibatch_size:(index+1)*minibatch_size]})
 
 
@@ -161,6 +162,7 @@
                     
                     validation_losses = [validate_model(i) for i in xrange(self.n_valid_batches)]
                     this_validation_loss = numpy.mean(validation_losses)
+                    self.series_mux.append("validation_error", this_validation_loss)
                     print('epoch %i, minibatch %i/%i, validation error %f %%' % \
                            (epoch, minibatch_index+1, self.n_train_batches, \
                             this_validation_loss*100.))
@@ -181,6 +183,7 @@
                         # test it on the test set
                         test_losses = [test_model(i) for i in xrange(self.n_test_batches)]
                         test_score = numpy.mean(test_losses)
+                        self.series_mux.append("test_error", test_score)
                         print(('     epoch %i, minibatch %i/%i, test error of best '
                               'model %f %%') % 
                                      (epoch, minibatch_index+1, self.n_train_batches,
@@ -188,7 +191,7 @@
 
                     sys.stdout.flush()
 
-            self.series_mux.append("params", self.classifier.params)
+            self.series_mux.append("params", self.classifier.all_params)
 
             if patience <= iter :
                 done_looping = True