Mercurial > ift6266
diff deep/stacked_dae/v2/sgd_optimization.py @ 228:851e7ad4a143
Corrigé une erreur dans la formule de coût modifiée dans stacked_dae, et enlevé des timers dans sgd_optimization
author | fsavard |
---|---|
date | Fri, 12 Mar 2010 10:47:36 -0500 |
parents | acae439d6572 |
children | 02eb98d051fe |
line wrap: on
line diff
--- a/deep/stacked_dae/v2/sgd_optimization.py Fri Mar 12 10:31:10 2010 -0500 +++ b/deep/stacked_dae/v2/sgd_optimization.py Fri Mar 12 10:47:36 2010 -0500 @@ -104,9 +104,6 @@ print "STARTING PRETRAINING, time = ", datetime.datetime.now() sys.stdout.flush() - time_acc_func = 0.0 - time_acc_total = 0.0 - start_time = time.clock() ## Pre-train layer-wise for i in xrange(self.classifier.n_layers): @@ -114,14 +111,7 @@ for epoch in xrange(self.hp.pretraining_epochs_per_layer): # go through the training set for batch_index in xrange(self.n_train_batches): - t1 = time.clock() c = self.classifier.pretrain_functions[i](batch_index) - t2 = time.clock() - - time_acc_func += t2 - t1 - - if batch_index % 500 == 0: - print "acc / total", time_acc_func / (t2 - start_time), time_acc_func self.series["reconstruction_error"].append((epoch, batch_index), c)