comparison deep/stacked_dae/v_sylvain/sgd_optimization.py @ 330:18dc860a4ef4

Rajout de la capacite de faire decroitre le taux d'apprentissage si demande
author SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca>
date Sun, 11 Apr 2010 19:52:52 -0400
parents 048898c1ee55
children c2331b8e4b89
comparison
equal deleted inserted replaced
329:54ad8a091783 330:18dc860a4ef4
143 f = open('params_pretrain.txt', 'w') 143 f = open('params_pretrain.txt', 'w')
144 pickle.dump(self.parameters_pre,f) 144 pickle.dump(self.parameters_pre,f)
145 f.close() 145 f.close()
146 146
147 147
148 def finetune(self,dataset,dataset_test,num_finetune,ind_test,special=0): 148 def finetune(self,dataset,dataset_test,num_finetune,ind_test,special=0,decrease=0):
149 149
150 if special != 0 and special != 1: 150 if special != 0 and special != 1:
151 sys.exit('Bad value for variable special. Must be in {0,1}') 151 sys.exit('Bad value for variable special. Must be in {0,1}')
152 print "STARTING FINETUNING, time = ", datetime.datetime.now() 152 print "STARTING FINETUNING, time = ", datetime.datetime.now()
153 153
198 198
199 done_looping = False 199 done_looping = False
200 epoch = 0 200 epoch = 0
201 201
202 total_mb_index = 0 202 total_mb_index = 0
203 minibatch_index = -1 203 minibatch_index = 0
204 parameters_finetune=[] 204 parameters_finetune=[]
205 learning_rate = self.hp.finetuning_lr #The initial finetune lr
206
205 207
206 while (epoch < num_finetune) and (not done_looping): 208 while (epoch < num_finetune) and (not done_looping):
207 epoch = epoch + 1 209 epoch = epoch + 1
208 210
209 for x,y in dataset.train(minibatch_size,bufsize=buffersize): 211 for x,y in dataset.train(minibatch_size,bufsize=buffersize):
210 minibatch_index += 1 212 minibatch_index += 1
213
214
211 if special == 0: 215 if special == 0:
212 cost_ij = self.classifier.finetune(x,y) 216 cost_ij = self.classifier.finetune(x,y,learning_rate)
213 elif special == 1: 217 elif special == 1:
214 cost_ij = self.classifier.finetune2(x,y) 218 cost_ij = self.classifier.finetune2(x,y)
215 total_mb_index += 1 219 total_mb_index += 1
216 220
217 self.series["training_error"].append((epoch, minibatch_index), cost_ij) 221 self.series["training_error"].append((epoch, minibatch_index), cost_ij)
283 sys.stdout.flush() 287 sys.stdout.flush()
284 288
285 # useful when doing tests 289 # useful when doing tests
286 if self.max_minibatches and minibatch_index >= self.max_minibatches: 290 if self.max_minibatches and minibatch_index >= self.max_minibatches:
287 break 291 break
288 292
293 if decrease == 1:
294 learning_rate /= 2 #divide the learning rate by 2 for each new epoch
295
289 self.series['params'].append((epoch,), self.classifier.all_params) 296 self.series['params'].append((epoch,), self.classifier.all_params)
290 297
291 if done_looping == True: #To exit completly the fine-tuning 298 if done_looping == True: #To exit completly the fine-tuning
292 break #to exit the WHILE loop 299 break #to exit the WHILE loop
293 300
339 for idx,x in enumerate(self.parameters_pre): 346 for idx,x in enumerate(self.parameters_pre):
340 if x.dtype=='float64': 347 if x.dtype=='float64':
341 self.classifier.params[idx].value=theano._asarray(copy(x),dtype=theano.config.floatX) 348 self.classifier.params[idx].value=theano._asarray(copy(x),dtype=theano.config.floatX)
342 else: 349 else:
343 self.classifier.params[idx].value=copy(x) 350 self.classifier.params[idx].value=copy(x)
344 351
345 #Calculate error over the training set (or a part of) 352 def training_error(self,dataset):
346 def training_error(self,data):
347 # create a function to compute the mistakes that are made by the model 353 # create a function to compute the mistakes that are made by the model
348 # on the validation set, or testing set 354 # on the validation set, or testing set
349 test_model = \ 355 test_model = \
350 theano.function( 356 theano.function(
351 [self.classifier.x,self.classifier.y], self.classifier.errors) 357 [self.classifier.x,self.classifier.y], self.classifier.errors)
352 358
353 iter2 = data.train(self.hp.minibatch_size,bufsize=buffersize) 359 iter2 = dataset.train(self.hp.minibatch_size,bufsize=buffersize)
354 train_losses2 = [test_model(x,y) for x,y in iter2] 360 train_losses2 = [test_model(x,y) for x,y in iter2]
355 train_score2 = numpy.mean(train_losses2) 361 train_score2 = numpy.mean(train_losses2)
356 print "Training error is: " + str(train_score2) 362 print "Training error is: " + str(train_score2)
357 363
358 364
359 365
360 366
361