Mercurial > ift6266
comparison deep/stacked_dae/sgd_optimization.py @ 192:e656edaedb48
Commented a few things, renamed the produit_croise_jobs function, replaced the cost function (NOT TESTED YET).
author | fsavard |
---|---|
date | Wed, 03 Mar 2010 12:51:40 -0500 |
parents | 3632e6258642 |
children | acb942530923 |
comparison
equal
deleted
inserted
replaced
191:3632e6258642 | 192:e656edaedb48 |
---|---|
58 self.train_set_x, self.train_set_y = shared_dataset(train_set) | 58 self.train_set_x, self.train_set_y = shared_dataset(train_set) |
59 | 59 |
60 # compute number of minibatches for training, validation and testing | 60 # compute number of minibatches for training, validation and testing |
61 self.n_train_batches = self.train_set_x.value.shape[0] / self.hp.minibatch_size | 61 self.n_train_batches = self.train_set_x.value.shape[0] / self.hp.minibatch_size |
62 self.n_valid_batches = self.valid_set_x.value.shape[0] / self.hp.minibatch_size | 62 self.n_valid_batches = self.valid_set_x.value.shape[0] / self.hp.minibatch_size |
63 self.n_test_batches = self.test_set_x.value.shape[0] / self.hp.minibatch_size | 63 # remove last batch in case it's incomplete |
64 self.n_test_batches = (self.test_set_x.value.shape[0] / self.hp.minibatch_size) - 1 | |
64 | 65 |
65 def init_classifier(self): | 66 def init_classifier(self): |
66 print "Constructing classifier" | 67 print "Constructing classifier" |
68 | |
69 # we don't want to save arrays in DD objects, so | |
70 # we recreate those arrays here | |
71 nhl = self.hp.num_hidden_layers | |
72 layers_sizes = [self.hp.hidden_layers_sizes] * nhl | |
73 corruption_levels = [self.hp.corruption_levels] * nhl | |
67 | 74 |
68 # construct the stacked denoising autoencoder class | 75 # construct the stacked denoising autoencoder class |
69 self.classifier = SdA( \ | 76 self.classifier = SdA( \ |
70 train_set_x= self.train_set_x, \ | 77 train_set_x= self.train_set_x, \ |
71 train_set_y = self.train_set_y,\ | 78 train_set_y = self.train_set_y,\ |
72 batch_size = self.hp.minibatch_size, \ | 79 batch_size = self.hp.minibatch_size, \ |
73 n_ins= self.n_ins, \ | 80 n_ins= self.n_ins, \ |
74 hidden_layers_sizes = self.hp.hidden_layers_sizes, \ | 81 hidden_layers_sizes = layers_sizes, \ |
75 n_outs = self.n_outs, \ | 82 n_outs = self.n_outs, \ |
76 corruption_levels = self.hp.corruption_levels,\ | 83 corruption_levels = corruption_levels,\ |
77 rng = self.rng,\ | 84 rng = self.rng,\ |
78 pretrain_lr = self.hp.pretraining_lr, \ | 85 pretrain_lr = self.hp.pretraining_lr, \ |
79 finetune_lr = self.hp.finetuning_lr,\ | 86 finetune_lr = self.hp.finetuning_lr,\ |
80 input_divider = self.input_divider ) | 87 input_divider = self.input_divider ) |
81 | 88 |