comparison deep/stacked_dae/v_sylvain/sgd_optimization.py @ 389:88cb95007670

Ajout d'une option finetune amelioree pour PNIST07
author SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca>
date Tue, 27 Apr 2010 08:43:04 -0400
parents 8117c0e70db9
children 5e11dda78995
comparison
equal deleted inserted replaced
388:0d97fead004f 389:88cb95007670
164 164
165 minibatch_size = self.hp.minibatch_size 165 minibatch_size = self.hp.minibatch_size
166 if ind_test == 0 or ind_test == 20: 166 if ind_test == 0 or ind_test == 20:
167 nom_test = "NIST" 167 nom_test = "NIST"
168 nom_train="P07" 168 nom_train="P07"
169 elif ind_test == 2: 169 elif ind_test == 30:
170 nom_train = "PNIST07" 170 nom_train = "PNIST07"
171 nom_test = "NIST" 171 nom_test = "NIST"
172 nom_test2 = "P07"
173 elif ind_test == 31:
174 nom_train = "NIST"
175 nom_test = "PNIST07"
172 nom_test2 = "P07" 176 nom_test2 = "P07"
173 else: 177 else:
174 nom_test = "P07" 178 nom_test = "P07"
175 nom_train = "NIST" 179 nom_train = "NIST"
176 180
216 220
217 total_mb_index = 0 221 total_mb_index = 0
218 minibatch_index = 0 222 minibatch_index = 0
219 parameters_finetune=[] 223 parameters_finetune=[]
220 224
221 if ind_test == 21: 225 if ind_test == 21 | ind_test == 31:
222 learning_rate = self.hp.finetuning_lr / 10.0 226 learning_rate = self.hp.finetuning_lr / 10.0
223 else: 227 else:
224 learning_rate = self.hp.finetuning_lr #The initial finetune lr 228 learning_rate = self.hp.finetuning_lr #The initial finetune lr
225 229
226 230
240 self.series["training_error"].append((epoch, minibatch_index), cost_ij) 244 self.series["training_error"].append((epoch, minibatch_index), cost_ij)
241 245
242 if (total_mb_index+1) % validation_frequency == 0: 246 if (total_mb_index+1) % validation_frequency == 0:
243 #minibatch_index += 1 247 #minibatch_index += 1
244 #The validation set is always NIST (we want the model to be good on NIST) 248 #The validation set is always NIST (we want the model to be good on NIST)
245 if ind_test == 0 | ind_test == 20 | ind_test == 2: 249 if ind_test == 0 | ind_test == 20 | ind_test == 30:
246 iter=dataset_test.valid(minibatch_size,bufsize=buffersize) 250 iter=dataset_test.valid(minibatch_size,bufsize=buffersize)
247 else: 251 else:
248 iter = dataset.valid(minibatch_size,bufsize=buffersize) 252 iter = dataset.valid(minibatch_size,bufsize=buffersize)
249 if self.max_minibatches: 253 if self.max_minibatches:
250 iter = itermax(iter, self.max_minibatches) 254 iter = itermax(iter, self.max_minibatches)
319 # useful when doing tests 323 # useful when doing tests
320 if self.max_minibatches and minibatch_index >= self.max_minibatches: 324 if self.max_minibatches and minibatch_index >= self.max_minibatches:
321 break 325 break
322 326
323 if decrease == 1: 327 if decrease == 1:
324 if (ind_test == 21 & epoch % 100 == 0) | ind_test == 20 | ind_test == 2: 328 if (ind_test == 21 & epoch % 100 == 0) | ind_test == 20 | ind_test == 30 | (ind_test == 31 & epoch % 100 == 0):
325 learning_rate /= 2 #divide the learning rate by 2 for each new epoch of P07 (or 100 of NIST) 329 learning_rate /= 2 #divide the learning rate by 2 for each new epoch of P07 (or 100 of NIST)
326 330
327 self.series['params'].append((epoch,), self.classifier.all_params) 331 self.series['params'].append((epoch,), self.classifier.all_params)
328 332
329 if done_looping == True: #To exit completly the fine-tuning 333 if done_looping == True: #To exit completly the fine-tuning
365 369
366 elif ind_test== 21: #To keep a track of the value of the parameters 370 elif ind_test== 21: #To keep a track of the value of the parameters
367 f = open('params_finetune_P07_then_NIST.txt', 'w') 371 f = open('params_finetune_P07_then_NIST.txt', 'w')
368 cPickle.dump(parameters_finetune,f,protocol=-1) 372 cPickle.dump(parameters_finetune,f,protocol=-1)
369 f.close() 373 f.close()
370 elif ind_test == 2: 374 elif ind_test == 30:
371 f = open('params_finetune_PNIST07.txt', 'w') 375 f = open('params_finetune_PNIST07.txt', 'w')
376 cPickle.dump(parameters_finetune,f,protocol=-1)
377 f.close()
378 elif ind_test == 31:
379 f = open('params_finetune_PNIST07_then_NIST.txt', 'w')
372 cPickle.dump(parameters_finetune,f,protocol=-1) 380 cPickle.dump(parameters_finetune,f,protocol=-1)
373 f.close() 381 f.close()
374 382
375 383
376 #Set parameters like they where right after pre-train or finetune 384 #Set parameters like they where right after pre-train or finetune