Mercurial > ift6266
changeset 228:851e7ad4a143
Corrigé une erreur dans la formule de coût modifiée dans stacked_dae, et enlevé des timers dans sgd_optimization
author | fsavard |
---|---|
date | Fri, 12 Mar 2010 10:47:36 -0500 |
parents | acae439d6572 |
children | 02eb98d051fe 6f4e3719a3cc |
files | deep/stacked_dae/v2/nist_sda.py deep/stacked_dae/v2/sgd_optimization.py deep/stacked_dae/v2/stacked_dae.py |
diffstat | 3 files changed, 6 insertions(+), 17 deletions(-) [+] |
line wrap: on
line diff
--- a/deep/stacked_dae/v2/nist_sda.py Fri Mar 12 10:31:10 2010 -0500 +++ b/deep/stacked_dae/v2/nist_sda.py Fri Mar 12 10:47:36 2010 -0500 @@ -35,8 +35,8 @@ TEST_CONFIG = False NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all' -JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_db/fsavard_sda4' -EXPERIMENT_PATH = "ift6266.deep.stacked_dae.nist_sda.jobman_entrypoint" +JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_sandbox_db/fsavard_sda_v2' +EXPERIMENT_PATH = "ift6266.deep.stacked_dae.v2.nist_sda.jobman_entrypoint" REDUCE_TRAIN_TO = None MAX_FINETUNING_EPOCHS = 1000 @@ -74,8 +74,7 @@ ''' Function called by jobman upon launching each job -Its path is the one given when inserting jobs: -ift6266.deep.stacked_dae.nist_sda.jobman_entrypoint +Its path is the one given when inserting jobs: see EXPERIMENT_PATH ''' def jobman_entrypoint(state, channel): # record mercurial versions of each package
--- a/deep/stacked_dae/v2/sgd_optimization.py Fri Mar 12 10:31:10 2010 -0500 +++ b/deep/stacked_dae/v2/sgd_optimization.py Fri Mar 12 10:47:36 2010 -0500 @@ -104,9 +104,6 @@ print "STARTING PRETRAINING, time = ", datetime.datetime.now() sys.stdout.flush() - time_acc_func = 0.0 - time_acc_total = 0.0 - start_time = time.clock() ## Pre-train layer-wise for i in xrange(self.classifier.n_layers): @@ -114,14 +111,7 @@ for epoch in xrange(self.hp.pretraining_epochs_per_layer): # go through the training set for batch_index in xrange(self.n_train_batches): - t1 = time.clock() c = self.classifier.pretrain_functions[i](batch_index) - t2 = time.clock() - - time_acc_func += t2 - t1 - - if batch_index % 500 == 0: - print "acc / total", time_acc_func / (t2 - start_time), time_acc_func self.series["reconstruction_error"].append((epoch, batch_index), c)
--- a/deep/stacked_dae/v2/stacked_dae.py Fri Mar 12 10:31:10 2010 -0500 +++ b/deep/stacked_dae/v2/stacked_dae.py Fri Mar 12 10:47:36 2010 -0500 @@ -133,7 +133,7 @@ # used later when stacking dAs. self.y = T.nnet.sigmoid(T.dot(self.tilde_x, self.W ) + self.b) # Equation (3) - self.z = T.nnet.sigmoid(T.dot(self.y, self.W_prime) + self.b_prime) + #self.z = T.nnet.sigmoid(T.dot(self.y, self.W_prime) + self.b_prime) # Equation (4) # note : we sum over the size of a datapoint; if we are using minibatches, # L will be a vector, with one entry per example in minibatch @@ -142,9 +142,9 @@ # bypassing z to avoid running to log(0) z_a = T.dot(self.y, self.W_prime) + self.b_prime - log_sigmoid = T.log(1) - T.log(1+T.exp(-z_a)) + log_sigmoid = T.log(1.) - T.log(1.+T.exp(-z_a)) # log(1-sigmoid(z_a)) - log_1_sigmoid = -self.x - T.log(1+T.exp(-z_a)) + log_1_sigmoid = -self.z_a - T.log(1.+T.exp(-z_a)) self.L = -T.sum( self.x * (log_sigmoid) \ + (1.0-self.x) * (log_1_sigmoid), axis=1 )