# HG changeset patch # User SylvainPL # Date 1269534027 14400 # Node ID 1cc535f3e254b76ae2a7324df28c0118dae797d0 # Parent 694e75413413d1ea9711f7dd39e65cf29db63804 correction d'un bug pour affichage des resultats de pre-train avec P07 diff -r 694e75413413 -r 1cc535f3e254 deep/stacked_dae/v_sylvain/sgd_optimization.py --- a/deep/stacked_dae/v_sylvain/sgd_optimization.py Wed Mar 24 15:14:24 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py Thu Mar 25 12:20:27 2010 -0400 @@ -87,6 +87,8 @@ def pretrain(self,dataset): print "STARTING PRETRAINING, time = ", datetime.datetime.now() sys.stdout.flush() + + un_fichier=int(819200.0/self.hp.minibatch_size) #Number of batches in a P07 batch start_time = time.clock() ## Pre-train layer-wise @@ -113,7 +115,7 @@ #When we pass through the data only once (the case with P07) #There is approximately 800*1024=819200 examples per file (1k per example and files are 800M) - if self.hp.pretraining_epochs_per_layer == 1 and count%819200 == 0: + if self.hp.pretraining_epochs_per_layer == 1 and count%un_fichier == 0: print 'Pre-training layer %i, epoch %d, cost '%(i,num_files),c num_files+=1 sys.stdout.flush()