Mercurial > ift6266
diff deep/stacked_dae/v_sylvain/nist_sda.py @ 354:ffc06af1c543
Ajout d'une fonctionnalite pour pouvoir avoir un taux d'apprentissage decroissant dans le pretrain
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Wed, 21 Apr 2010 14:54:54 -0400 |
parents | 4306796d60a8 |
children | 87e684bfe538 |
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/nist_sda.py Wed Apr 21 14:51:14 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/nist_sda.py Wed Apr 21 14:54:54 2010 -0400 @@ -55,6 +55,11 @@ decrease_lr = state['decrease_lr'] else : decrease_lr = 0 + + if state.has_key('decrease_lr_pretrain'): + dec=state['decrease_lr_pretrain'] + else : + dec=0 n_ins = 32*32 n_outs = 62 # 10 digits, 26*2 (lower, capitals) @@ -87,7 +92,7 @@ nb_file=0 if state['pretrain_choice'] == 0: print('\n\tpretraining with NIST\n') - optimizer.pretrain(datasets.nist_all()) + optimizer.pretrain(datasets.nist_all(), decrease = dec) elif state['pretrain_choice'] == 1: #To know how many file will be used during pretraining nb_file = int(state['pretraining_epochs_per_layer']) @@ -97,7 +102,7 @@ "You have to correct the code (and be patient, P07 is huge !!)\n"+ "or reduce the number of pretraining epoch to run the code (better idea).\n") print('\n\tpretraining with P07') - optimizer.pretrain(datasets.nist_P07(min_file=0,max_file=nb_file)) + optimizer.pretrain(datasets.nist_P07(min_file=0,max_file=nb_file),decrease = dec) channel.save() #Set some of the parameters used for the finetuning