Mercurial > ift6266
comparison deep/stacked_dae/v_sylvain/sgd_optimization.py @ 325:048898c1ee55
Ajout d'une fonction pour calculer l'erreur effectuee par le modele sur un ensemble pre-determine
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Fri, 09 Apr 2010 15:49:42 -0400 |
parents | 403b9e6ecfaa |
children | 18dc860a4ef4 |
comparison
equal
deleted
inserted
replaced
324:1763c64030d1 | 325:048898c1ee55 |
---|---|
339 for idx,x in enumerate(self.parameters_pre): | 339 for idx,x in enumerate(self.parameters_pre): |
340 if x.dtype=='float64': | 340 if x.dtype=='float64': |
341 self.classifier.params[idx].value=theano._asarray(copy(x),dtype=theano.config.floatX) | 341 self.classifier.params[idx].value=theano._asarray(copy(x),dtype=theano.config.floatX) |
342 else: | 342 else: |
343 self.classifier.params[idx].value=copy(x) | 343 self.classifier.params[idx].value=copy(x) |
344 | 344 |
345 | 345 #Calculate error over the training set (or a part of) |
346 | 346 def training_error(self,data): |
347 | 347 # create a function to compute the mistakes that are made by the model |
348 | 348 # on the validation set, or testing set |
349 test_model = \ | |
350 theano.function( | |
351 [self.classifier.x,self.classifier.y], self.classifier.errors) | |
352 | |
353 iter2 = data.train(self.hp.minibatch_size,bufsize=buffersize) | |
354 train_losses2 = [test_model(x,y) for x,y in iter2] | |
355 train_score2 = numpy.mean(train_losses2) | |
356 print "Training error is: " + str(train_score2) | |
357 | |
358 | |
359 | |
360 | |
361 |