Mercurial > ift6266
comparison deep/stacked_dae/v_sylvain/sgd_optimization.py @ 286:1cc535f3e254
correction d'un bug pour affichage des resultats de pre-train avec P07
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Thu, 25 Mar 2010 12:20:27 -0400 |
parents | 28b628f331b2 |
children | fe5d428c2acc |
comparison
equal
deleted
inserted
replaced
285:694e75413413 | 286:1cc535f3e254 |
---|---|
85 self.finetune(self.dataset) | 85 self.finetune(self.dataset) |
86 | 86 |
87 def pretrain(self,dataset): | 87 def pretrain(self,dataset): |
88 print "STARTING PRETRAINING, time = ", datetime.datetime.now() | 88 print "STARTING PRETRAINING, time = ", datetime.datetime.now() |
89 sys.stdout.flush() | 89 sys.stdout.flush() |
90 | |
91 un_fichier=int(819200.0/self.hp.minibatch_size) #Number of batches in a P07 batch | |
90 | 92 |
91 start_time = time.clock() | 93 start_time = time.clock() |
92 ## Pre-train layer-wise | 94 ## Pre-train layer-wise |
93 for i in xrange(self.classifier.n_layers): | 95 for i in xrange(self.classifier.n_layers): |
94 # go through pretraining epochs | 96 # go through pretraining epochs |
111 if self.max_minibatches and batch_index >= self.max_minibatches: | 113 if self.max_minibatches and batch_index >= self.max_minibatches: |
112 break | 114 break |
113 | 115 |
114 #When we pass through the data only once (the case with P07) | 116 #When we pass through the data only once (the case with P07) |
115 #There is approximately 800*1024=819200 examples per file (1k per example and files are 800M) | 117 #There is approximately 800*1024=819200 examples per file (1k per example and files are 800M) |
116 if self.hp.pretraining_epochs_per_layer == 1 and count%819200 == 0: | 118 if self.hp.pretraining_epochs_per_layer == 1 and count%un_fichier == 0: |
117 print 'Pre-training layer %i, epoch %d, cost '%(i,num_files),c | 119 print 'Pre-training layer %i, epoch %d, cost '%(i,num_files),c |
118 num_files+=1 | 120 num_files+=1 |
119 sys.stdout.flush() | 121 sys.stdout.flush() |
120 self.series['params'].append((num_files,), self.classifier.all_params) | 122 self.series['params'].append((num_files,), self.classifier.all_params) |
121 | 123 |