# HG changeset patch # User SylvainPL # Date 1274919782 14400 # Node ID 09e1c5872c2b4bd2e39e0ccb1f2e1ee00021956b # Parent df56627d539981cf98ee674116e629d5f3cd9df3 Ajout de trois lignes de code pour le calcul de l'erreur standart diff -r df56627d5399 -r 09e1c5872c2b deep/stacked_dae/v_sylvain/sgd_optimization.py --- a/deep/stacked_dae/v_sylvain/sgd_optimization.py Thu May 13 12:15:16 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py Wed May 26 20:23:02 2010 -0400 @@ -395,6 +395,7 @@ self.classifier.params[idx].value=copy(x) def training_error(self,dataset,part=0): + import math # create a function to compute the mistakes that are made by the model # on the validation set, or testing set test_model = \ @@ -415,6 +416,8 @@ train_score2 = numpy.mean(train_losses2) print 'On the ' + name + 'dataset' print(('\t the error is %f')%(train_score2*100.)) + stderr = math.sqrt(train_score2-train_score2**2)/math.sqrt(len(train_losses2)*self.hp.minibatch_size) + print (('\t the stderr is %f')%(stderr*100.)) #To see the prediction of the model, the real answer and the image to judge def see_error(self, dataset):