# HG changeset patch # User SylvainPL # Date 1271876094 14400 # Node ID ffc06af1c5430b4a804648f4652104e1ecdcc855 # Parent bc4464c0894c747b95479326cd26fd84831dab04 Ajout d'une fonctionnalite pour pouvoir avoir un taux d'apprentissage decroissant dans le pretrain diff -r bc4464c0894c -r ffc06af1c543 deep/stacked_dae/v_sylvain/nist_sda.py --- a/deep/stacked_dae/v_sylvain/nist_sda.py Wed Apr 21 14:51:14 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/nist_sda.py Wed Apr 21 14:54:54 2010 -0400 @@ -55,6 +55,11 @@ decrease_lr = state['decrease_lr'] else : decrease_lr = 0 + + if state.has_key('decrease_lr_pretrain'): + dec=state['decrease_lr_pretrain'] + else : + dec=0 n_ins = 32*32 n_outs = 62 # 10 digits, 26*2 (lower, capitals) @@ -87,7 +92,7 @@ nb_file=0 if state['pretrain_choice'] == 0: print('\n\tpretraining with NIST\n') - optimizer.pretrain(datasets.nist_all()) + optimizer.pretrain(datasets.nist_all(), decrease = dec) elif state['pretrain_choice'] == 1: #To know how many file will be used during pretraining nb_file = int(state['pretraining_epochs_per_layer']) @@ -97,7 +102,7 @@ "You have to correct the code (and be patient, P07 is huge !!)\n"+ "or reduce the number of pretraining epoch to run the code (better idea).\n") print('\n\tpretraining with P07') - optimizer.pretrain(datasets.nist_P07(min_file=0,max_file=nb_file)) + optimizer.pretrain(datasets.nist_P07(min_file=0,max_file=nb_file),decrease = dec) channel.save() #Set some of the parameters used for the finetuning