Mercurial > ift6266
comparison deep/stacked_dae/v_sylvain/nist_sda.py @ 319:7a12d2c3d06b
finetune NIST+P07 change pour P07+NIST, les experiences n'ont pas ete concluentes
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Fri, 02 Apr 2010 14:53:32 -0400 |
parents | 8de3bef71458 |
children | 4306796d60a8 |
comparison
equal
deleted
inserted
replaced
318:8de3bef71458 | 319:7a12d2c3d06b |
---|---|
122 print('\n\n\tfinetune with P07\n\n') | 122 print('\n\n\tfinetune with P07\n\n') |
123 optimizer.reload_parameters('params_pretrain.txt') | 123 optimizer.reload_parameters('params_pretrain.txt') |
124 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) | 124 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) |
125 channel.save() | 125 channel.save() |
126 if finetune_choice == 2: | 126 if finetune_choice == 2: |
127 print('\n\n\tfinetune with NIST followed by P07\n\n') | 127 print('\n\n\tfinetune with P07 followed by NIST\n\n') |
128 optimizer.reload_parameters('params_pretrain.txt') | 128 optimizer.reload_parameters('params_pretrain.txt') |
129 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20) | |
129 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21) | 130 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21) |
130 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20) | |
131 channel.save() | 131 channel.save() |
132 if finetune_choice == 3: | 132 if finetune_choice == 3: |
133 print('\n\n\tfinetune with NIST only on the logistic regression on top (but validation on P07).\n\ | 133 print('\n\n\tfinetune with NIST only on the logistic regression on top (but validation on P07).\n\ |
134 All hidden units output are input of the logistic regression\n\n') | 134 All hidden units output are input of the logistic regression\n\n') |
135 optimizer.reload_parameters('params_pretrain.txt') | 135 optimizer.reload_parameters('params_pretrain.txt') |
146 print('\n\n\tfinetune with P07\n\n') | 146 print('\n\n\tfinetune with P07\n\n') |
147 sys.stdout.flush() | 147 sys.stdout.flush() |
148 optimizer.reload_parameters('params_pretrain.txt') | 148 optimizer.reload_parameters('params_pretrain.txt') |
149 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) | 149 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) |
150 channel.save() | 150 channel.save() |
151 print('\n\n\tfinetune with NIST (done earlier) followed by P07 (written here)\n\n') | 151 print('\n\n\tfinetune with P07 (done earlier) followed by NIST (written here)\n\n') |
152 sys.stdout.flush() | 152 sys.stdout.flush() |
153 optimizer.reload_parameters('params_finetune_NIST.txt') | 153 optimizer.reload_parameters('params_finetune_P07.txt') |
154 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20) | 154 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21) |
155 channel.save() | 155 channel.save() |
156 print('\n\n\tfinetune with NIST only on the logistic regression on top.\n\ | 156 print('\n\n\tfinetune with NIST only on the logistic regression on top.\n\ |
157 All hidden units output are input of the logistic regression\n\n') | 157 All hidden units output are input of the logistic regression\n\n') |
158 sys.stdout.flush() | 158 sys.stdout.flush() |
159 optimizer.reload_parameters('params_pretrain.txt') | 159 optimizer.reload_parameters('params_pretrain.txt') |