comparison deep/stacked_dae/v_sylvain/nist_sda_retrieve.py @ 320:71ffe2c9bfad

finetune NIST+P07 change pour P07+NIST, les experiences n'ont pas ete concluentes
author SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca>
date Fri, 02 Apr 2010 14:53:46 -0400
parents 067e747fd9c0
children c61b72d07676
comparison
equal deleted inserted replaced
319:7a12d2c3d06b 320:71ffe2c9bfad
130 print('\n\n\tfinetune with P07\n\n') 130 print('\n\n\tfinetune with P07\n\n')
131 optimizer.reload_parameters(PATH+'params_pretrain.txt') 131 optimizer.reload_parameters(PATH+'params_pretrain.txt')
132 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) 132 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0)
133 channel.save() 133 channel.save()
134 if finetune_choice == 2: 134 if finetune_choice == 2:
135 print('\n\n\tfinetune with NIST followed by P07\n\n') 135 print('\n\n\tfinetune with P07 followed by NIST\n\n')
136 optimizer.reload_parameters(PATH+'params_pretrain.txt') 136 optimizer.reload_parameters(PATH+'params_pretrain.txt')
137 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20)
137 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21) 138 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21)
138 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20)
139 channel.save() 139 channel.save()
140 if finetune_choice == 3: 140 if finetune_choice == 3:
141 print('\n\n\tfinetune with NIST only on the logistic regression on top (but validation on P07).\n\ 141 print('\n\n\tfinetune with NIST only on the logistic regression on top (but validation on P07).\n\
142 All hidden units output are input of the logistic regression\n\n') 142 All hidden units output are input of the logistic regression\n\n')
143 optimizer.reload_parameters(PATH+'params_pretrain.txt') 143 optimizer.reload_parameters(PATH+'params_pretrain.txt')
154 print('\n\n\tfinetune with P07\n\n') 154 print('\n\n\tfinetune with P07\n\n')
155 sys.stdout.flush() 155 sys.stdout.flush()
156 optimizer.reload_parameters(PATH+'params_pretrain.txt') 156 optimizer.reload_parameters(PATH+'params_pretrain.txt')
157 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) 157 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0)
158 channel.save() 158 channel.save()
159 print('\n\n\tfinetune with NIST (done earlier) followed by P07 (written here)\n\n') 159 print('\n\n\tfinetune with P07 (done earlier) followed by NIST (written here)\n\n')
160 sys.stdout.flush() 160 sys.stdout.flush()
161 optimizer.reload_parameters('params_finetune_NIST.txt') 161 optimizer.reload_parameters('params_finetune_P07.txt')
162 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20) 162 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21)
163 channel.save() 163 channel.save()
164 print('\n\n\tfinetune with NIST only on the logistic regression on top.\n\ 164 print('\n\n\tfinetune with NIST only on the logistic regression on top.\n\
165 All hidden units output are input of the logistic regression\n\n') 165 All hidden units output are input of the logistic regression\n\n')
166 sys.stdout.flush() 166 sys.stdout.flush()
167 optimizer.reload_parameters(PATH+'params_pretrain.txt') 167 optimizer.reload_parameters(PATH+'params_pretrain.txt')