Mercurial > ift6266
changeset 328:c61b72d07676
Rajout de la capacite de faire decroitre le taux d'apprentissage si demande
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Sun, 11 Apr 2010 19:52:35 -0400 |
parents | 4306796d60a8 |
children | 54ad8a091783 |
files | deep/stacked_dae/v_sylvain/nist_sda_retrieve.py |
diffstat | 1 files changed, 15 insertions(+), 10 deletions(-) [+] |
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/nist_sda_retrieve.py Sun Apr 11 19:52:28 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/nist_sda_retrieve.py Sun Apr 11 19:52:35 2010 -0400 @@ -50,6 +50,11 @@ rtt = state['reduce_train_to'] elif REDUCE_TRAIN_TO: rtt = REDUCE_TRAIN_TO + + if state.has_key('decrease_lr'): + decrease_lr = state['decrease_lr'] + else : + decrease_lr = 0 n_ins = 32*32 n_outs = 62 # 10 digits, 26*2 (lower, capitals) @@ -124,48 +129,48 @@ if finetune_choice == 0: print('\n\n\tfinetune with NIST\n\n') optimizer.reload_parameters(PATH+'params_pretrain.txt') - optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1) + optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,decrease=decrease_lr) channel.save() if finetune_choice == 1: print('\n\n\tfinetune with P07\n\n') optimizer.reload_parameters(PATH+'params_pretrain.txt') - optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) + optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0,decrease=decrease_lr) channel.save() if finetune_choice == 2: print('\n\n\tfinetune with P07 followed by NIST\n\n') optimizer.reload_parameters(PATH+'params_pretrain.txt') - optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20) - optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21) + optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20,decrease=decrease_lr) + optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21,decrease=decrease_lr) channel.save() if finetune_choice == 3: print('\n\n\tfinetune with NIST only on the logistic regression on top (but validation on P07).\n\ All hidden units output are input of the logistic regression\n\n') optimizer.reload_parameters(PATH+'params_pretrain.txt') - optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1) + optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1,decrease=decrease_lr) if finetune_choice==-1: - print('\nSERIE OF 3 DIFFERENT FINETUNINGS') + print('\nSERIE OF 4 DIFFERENT FINETUNINGS') print('\n\n\tfinetune with NIST\n\n') sys.stdout.flush() optimizer.reload_parameters(PATH+'params_pretrain.txt') - optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1) + optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,decrease=decrease_lr) channel.save() print('\n\n\tfinetune with P07\n\n') sys.stdout.flush() optimizer.reload_parameters(PATH+'params_pretrain.txt') - optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) + optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0,decrease=decrease_lr) channel.save() print('\n\n\tfinetune with P07 (done earlier) followed by NIST (written here)\n\n') sys.stdout.flush() optimizer.reload_parameters('params_finetune_P07.txt') - optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21) + optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21,decrease=decrease_lr) channel.save() print('\n\n\tfinetune with NIST only on the logistic regression on top.\n\ All hidden units output are input of the logistic regression\n\n') sys.stdout.flush() optimizer.reload_parameters(PATH+'params_pretrain.txt') - optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1) + optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1,decrease=decrease_lr) channel.save() channel.save()