Mercurial > ift6266
diff deep/stacked_dae/sgd_optimization.py @ 192:e656edaedb48
Commented a few things, renamed the produit_croise_jobs function, replaced the cost function (NOT TESTED YET).
author | fsavard |
---|---|
date | Wed, 03 Mar 2010 12:51:40 -0500 |
parents | 3632e6258642 |
children | acb942530923 |
line wrap: on
line diff
--- a/deep/stacked_dae/sgd_optimization.py Tue Mar 02 14:47:18 2010 -0500 +++ b/deep/stacked_dae/sgd_optimization.py Wed Mar 03 12:51:40 2010 -0500 @@ -60,20 +60,27 @@ # compute number of minibatches for training, validation and testing self.n_train_batches = self.train_set_x.value.shape[0] / self.hp.minibatch_size self.n_valid_batches = self.valid_set_x.value.shape[0] / self.hp.minibatch_size - self.n_test_batches = self.test_set_x.value.shape[0] / self.hp.minibatch_size + # remove last batch in case it's incomplete + self.n_test_batches = (self.test_set_x.value.shape[0] / self.hp.minibatch_size) - 1 def init_classifier(self): print "Constructing classifier" + # we don't want to save arrays in DD objects, so + # we recreate those arrays here + nhl = self.hp.num_hidden_layers + layers_sizes = [self.hp.hidden_layers_sizes] * nhl + corruption_levels = [self.hp.corruption_levels] * nhl + # construct the stacked denoising autoencoder class self.classifier = SdA( \ train_set_x= self.train_set_x, \ train_set_y = self.train_set_y,\ batch_size = self.hp.minibatch_size, \ n_ins= self.n_ins, \ - hidden_layers_sizes = self.hp.hidden_layers_sizes, \ + hidden_layers_sizes = layers_sizes, \ n_outs = self.n_outs, \ - corruption_levels = self.hp.corruption_levels,\ + corruption_levels = corruption_levels,\ rng = self.rng,\ pretrain_lr = self.hp.pretraining_lr, \ finetune_lr = self.hp.finetuning_lr,\