Mercurial > ift6266
diff deep/stacked_dae/v_sylvain/sgd_optimization.py @ 286:1cc535f3e254
correction d'un bug pour affichage des resultats de pre-train avec P07
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Thu, 25 Mar 2010 12:20:27 -0400 |
parents | 28b628f331b2 |
children | fe5d428c2acc |
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/sgd_optimization.py Wed Mar 24 15:14:24 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py Thu Mar 25 12:20:27 2010 -0400 @@ -87,6 +87,8 @@ def pretrain(self,dataset): print "STARTING PRETRAINING, time = ", datetime.datetime.now() sys.stdout.flush() + + un_fichier=int(819200.0/self.hp.minibatch_size) #Number of batches in a P07 batch start_time = time.clock() ## Pre-train layer-wise @@ -113,7 +115,7 @@ #When we pass through the data only once (the case with P07) #There is approximately 800*1024=819200 examples per file (1k per example and files are 800M) - if self.hp.pretraining_epochs_per_layer == 1 and count%819200 == 0: + if self.hp.pretraining_epochs_per_layer == 1 and count%un_fichier == 0: print 'Pre-training layer %i, epoch %d, cost '%(i,num_files),c num_files+=1 sys.stdout.flush()