comparison deep/stacked_dae/v_sylvain/stacked_dae.py @ 329:54ad8a091783

Rajout de la capacite de faire decroitre le taux d'apprentissage si demande
author SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca>
date Sun, 11 Apr 2010 19:52:44 -0400
parents 60cacb9a70e4
children 799ad23a161f
comparison
equal deleted inserted replaced
328:c61b72d07676 329:54ad8a091783
201 # allocate symbolic variables for the data 201 # allocate symbolic variables for the data
202 #index = T.lscalar() # index to a [mini]batch 202 #index = T.lscalar() # index to a [mini]batch
203 self.x = T.matrix('x') # the data is presented as rasterized images 203 self.x = T.matrix('x') # the data is presented as rasterized images
204 self.y = T.ivector('y') # the labels are presented as 1D vector of 204 self.y = T.ivector('y') # the labels are presented as 1D vector of
205 # [int] labels 205 # [int] labels
206 self.finetune_lr = T.fscalar('finetune_lr') #To get a dynamic finetune learning rate
206 207
207 for i in xrange( self.n_layers ): 208 for i in xrange( self.n_layers ):
208 # construct the sigmoidal layer 209 # construct the sigmoidal layer
209 210
210 # the size of the input is either the number of hidden units of 211 # the size of the input is either the number of hidden units of
273 # compute the gradients with respect to the model parameters 274 # compute the gradients with respect to the model parameters
274 gparams = T.grad(cost, self.params) 275 gparams = T.grad(cost, self.params)
275 # compute list of updates 276 # compute list of updates
276 updates = {} 277 updates = {}
277 for param,gparam in zip(self.params, gparams): 278 for param,gparam in zip(self.params, gparams):
278 updates[param] = param - gparam*finetune_lr 279 updates[param] = param - gparam*self.finetune_lr
279 280
280 self.finetune = theano.function([self.x,self.y], cost, 281 self.finetune = theano.function([self.x,self.y,self.finetune_lr], cost,
281 updates = updates)#, 282 updates = updates)#,
282 283
283 # symbolic variable that points to the number of errors made on the 284 # symbolic variable that points to the number of errors made on the
284 # minibatch given by self.x and self.y 285 # minibatch given by self.x and self.y
285 286
300 n_in = sum(hidden_layers_sizes), n_out = n_outs) 301 n_in = sum(hidden_layers_sizes), n_out = n_outs)
301 #n_in=hidden_layers_sizes[0],n_out=n_outs) 302 #n_in=hidden_layers_sizes[0],n_out=n_outs)
302 303
303 #self.logistic_params+= self.logLayer2.params 304 #self.logistic_params+= self.logLayer2.params
304 # construct a function that implements one step of finetunining 305 # construct a function that implements one step of finetunining
305 306
307 self.logistic_params+=self.logLayer2.params
306 # compute the cost, defined as the negative log likelihood 308 # compute the cost, defined as the negative log likelihood
307 cost2 = self.logLayer2.negative_log_likelihood(self.y) 309 cost2 = self.logLayer2.negative_log_likelihood(self.y)
308 # compute the gradients with respect to the model parameters 310 # compute the gradients with respect to the model parameters
309 gparams2 = T.grad(cost2, self.logLayer2.params) 311 gparams2 = T.grad(cost2, self.logistic_params)
310 312
311 # compute list of updates 313 # compute list of updates
312 updates2 = {} 314 updates2 = {}
313 for param,gparam in zip(self.logLayer2.params, gparams2): 315 for param,gparam in zip(self.logistic_params, gparams2):
314 updates2[param] = param - gparam*finetune_lr 316 updates2[param] = param - gparam*finetune_lr
315 317
316 self.finetune2 = theano.function([self.x,self.y], cost2, 318 self.finetune2 = theano.function([self.x,self.y], cost2,
317 updates = updates2) 319 updates = updates2)
318 320