# HG changeset patch # User SylvainPL # Date 1272415856 14400 # Node ID 5e11dda78995ffee271995dc66c483b404a057cf # Parent 116b2de2c0a4701707454b2c2de54b0ee63c4433 Ajout de fonctionnalite pour pouvoir facilement calculer une erreur sur differentes parties de NSIT diff -r 116b2de2c0a4 -r 5e11dda78995 deep/stacked_dae/v_sylvain/sgd_optimization.py --- a/deep/stacked_dae/v_sylvain/sgd_optimization.py Tue Apr 27 13:47:33 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py Tue Apr 27 20:50:56 2010 -0400 @@ -394,17 +394,27 @@ else: self.classifier.params[idx].value=copy(x) - def training_error(self,dataset): + def training_error(self,dataset,part=0): # create a function to compute the mistakes that are made by the model # on the validation set, or testing set test_model = \ theano.function( [self.classifier.x,self.classifier.y], self.classifier.errors) - - iter2 = dataset.train(self.hp.minibatch_size,bufsize=buffersize) + #train + if part == 0: + iter2 = dataset.train(self.hp.minibatch_size,bufsize=buffersize) + name = 'train' + #validation + if part == 1: + iter2 = dataset.valid(self.hp.minibatch_size,bufsize=buffersize) + name = 'validation' + if part == 2: + iter2 = dataset.test(self.hp.minibatch_size,bufsize=buffersize) + name = 'test' train_losses2 = [test_model(x,y) for x,y in iter2] train_score2 = numpy.mean(train_losses2) - print(('The training error is %f')%(train_score2*100.)) + print 'On the ' + name + 'dataset' + print(('\t the error is %f')%(train_score2*100.)) #To see the prediction of the model, the real answer and the image to judge def see_error(self, dataset):