Mercurial > ift6266
diff deep/stacked_dae/v_sylvain/stacked_dae.py @ 233:02ed13244133
version pour utilisation du module dataset
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Sun, 14 Mar 2010 15:07:17 -0400 |
parents | 8a94a5c808cd |
children | ecb69e17950b |
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/stacked_dae.py Sat Mar 13 15:45:43 2010 -0500 +++ b/deep/stacked_dae/v_sylvain/stacked_dae.py Sun Mar 14 15:07:17 2010 -0400 @@ -193,14 +193,14 @@ print "input_divider", input_divider print "----" - self.shared_divider = theano.shared(numpy.asarray(input_divider, dtype=theano.config.floatX)) + #self.shared_divider = theano.shared(numpy.asarray(input_divider, dtype=theano.config.floatX)) if len(hidden_layers_sizes) < 1 : raiseException (' You must have at least one hidden layer ') # allocate symbolic variables for the data - index = T.lscalar() # index to a [mini]batch + ##index = T.lscalar() # index to a [mini]batch self.x = T.matrix('x') # the data is presented as rasterized images self.y = T.ivector('y') # the labels are presented as 1D vector of # [int] labels @@ -247,10 +247,10 @@ updates[param] = param - gparam * pretrain_lr # create a function that trains the dA - update_fn = theano.function([index], dA_layer.cost, \ + update_fn = theano.function([ensemble], dA_layer.cost, \ updates = updates, givens = { - self.x : train_set_x[index*batch_size:(index+1)*batch_size] / self.shared_divider}) + self.x : ensemble}) # collect this function into a list self.pretrain_functions += [update_fn] @@ -273,11 +273,13 @@ for param,gparam in zip(self.params, gparams): updates[param] = param - gparam*finetune_lr - self.finetune = theano.function([index], cost, + self.finetune = theano.function([ensemble_x,ensemble_y], cost, updates = updates, givens = { - self.x : train_set_x[index*batch_size:(index+1)*batch_size]/self.shared_divider, - self.y : train_set_y[index*batch_size:(index+1)*batch_size]} ) + #self.x : train_set_x[index*batch_size:(index+1)*batch_size]/self.shared_divider, + #self.y : train_set_y[index*batch_size:(index+1)*batch_size]} ) + self.x : ensemble_x, + self.y : ensemble_y} ) # symbolic variable that points to the number of errors made on the # minibatch given by self.x and self.y