Mercurial > ift6266
comparison deep/stacked_dae/v_sylvain/nist_sda_retrieve.py @ 328:c61b72d07676
Rajout de la capacite de faire decroitre le taux d'apprentissage si demande
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Sun, 11 Apr 2010 19:52:35 -0400 |
parents | 71ffe2c9bfad |
children | 5c3935aa3f8a |
comparison
equal
deleted
inserted
replaced
327:4306796d60a8 | 328:c61b72d07676 |
---|---|
48 rtt = None | 48 rtt = None |
49 if state.has_key('reduce_train_to'): | 49 if state.has_key('reduce_train_to'): |
50 rtt = state['reduce_train_to'] | 50 rtt = state['reduce_train_to'] |
51 elif REDUCE_TRAIN_TO: | 51 elif REDUCE_TRAIN_TO: |
52 rtt = REDUCE_TRAIN_TO | 52 rtt = REDUCE_TRAIN_TO |
53 | |
54 if state.has_key('decrease_lr'): | |
55 decrease_lr = state['decrease_lr'] | |
56 else : | |
57 decrease_lr = 0 | |
53 | 58 |
54 n_ins = 32*32 | 59 n_ins = 32*32 |
55 n_outs = 62 # 10 digits, 26*2 (lower, capitals) | 60 n_outs = 62 # 10 digits, 26*2 (lower, capitals) |
56 | 61 |
57 examples_per_epoch = NIST_ALL_TRAIN_SIZE | 62 examples_per_epoch = NIST_ALL_TRAIN_SIZE |
122 #Decide how the finetune is done | 127 #Decide how the finetune is done |
123 | 128 |
124 if finetune_choice == 0: | 129 if finetune_choice == 0: |
125 print('\n\n\tfinetune with NIST\n\n') | 130 print('\n\n\tfinetune with NIST\n\n') |
126 optimizer.reload_parameters(PATH+'params_pretrain.txt') | 131 optimizer.reload_parameters(PATH+'params_pretrain.txt') |
127 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1) | 132 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,decrease=decrease_lr) |
128 channel.save() | 133 channel.save() |
129 if finetune_choice == 1: | 134 if finetune_choice == 1: |
130 print('\n\n\tfinetune with P07\n\n') | 135 print('\n\n\tfinetune with P07\n\n') |
131 optimizer.reload_parameters(PATH+'params_pretrain.txt') | 136 optimizer.reload_parameters(PATH+'params_pretrain.txt') |
132 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) | 137 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0,decrease=decrease_lr) |
133 channel.save() | 138 channel.save() |
134 if finetune_choice == 2: | 139 if finetune_choice == 2: |
135 print('\n\n\tfinetune with P07 followed by NIST\n\n') | 140 print('\n\n\tfinetune with P07 followed by NIST\n\n') |
136 optimizer.reload_parameters(PATH+'params_pretrain.txt') | 141 optimizer.reload_parameters(PATH+'params_pretrain.txt') |
137 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20) | 142 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20,decrease=decrease_lr) |
138 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21) | 143 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21,decrease=decrease_lr) |
139 channel.save() | 144 channel.save() |
140 if finetune_choice == 3: | 145 if finetune_choice == 3: |
141 print('\n\n\tfinetune with NIST only on the logistic regression on top (but validation on P07).\n\ | 146 print('\n\n\tfinetune with NIST only on the logistic regression on top (but validation on P07).\n\ |
142 All hidden units output are input of the logistic regression\n\n') | 147 All hidden units output are input of the logistic regression\n\n') |
143 optimizer.reload_parameters(PATH+'params_pretrain.txt') | 148 optimizer.reload_parameters(PATH+'params_pretrain.txt') |
144 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1) | 149 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1,decrease=decrease_lr) |
145 | 150 |
146 | 151 |
147 if finetune_choice==-1: | 152 if finetune_choice==-1: |
148 print('\nSERIE OF 3 DIFFERENT FINETUNINGS') | 153 print('\nSERIE OF 4 DIFFERENT FINETUNINGS') |
149 print('\n\n\tfinetune with NIST\n\n') | 154 print('\n\n\tfinetune with NIST\n\n') |
150 sys.stdout.flush() | 155 sys.stdout.flush() |
151 optimizer.reload_parameters(PATH+'params_pretrain.txt') | 156 optimizer.reload_parameters(PATH+'params_pretrain.txt') |
152 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1) | 157 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,decrease=decrease_lr) |
153 channel.save() | 158 channel.save() |
154 print('\n\n\tfinetune with P07\n\n') | 159 print('\n\n\tfinetune with P07\n\n') |
155 sys.stdout.flush() | 160 sys.stdout.flush() |
156 optimizer.reload_parameters(PATH+'params_pretrain.txt') | 161 optimizer.reload_parameters(PATH+'params_pretrain.txt') |
157 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) | 162 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0,decrease=decrease_lr) |
158 channel.save() | 163 channel.save() |
159 print('\n\n\tfinetune with P07 (done earlier) followed by NIST (written here)\n\n') | 164 print('\n\n\tfinetune with P07 (done earlier) followed by NIST (written here)\n\n') |
160 sys.stdout.flush() | 165 sys.stdout.flush() |
161 optimizer.reload_parameters('params_finetune_P07.txt') | 166 optimizer.reload_parameters('params_finetune_P07.txt') |
162 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21) | 167 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21,decrease=decrease_lr) |
163 channel.save() | 168 channel.save() |
164 print('\n\n\tfinetune with NIST only on the logistic regression on top.\n\ | 169 print('\n\n\tfinetune with NIST only on the logistic regression on top.\n\ |
165 All hidden units output are input of the logistic regression\n\n') | 170 All hidden units output are input of the logistic regression\n\n') |
166 sys.stdout.flush() | 171 sys.stdout.flush() |
167 optimizer.reload_parameters(PATH+'params_pretrain.txt') | 172 optimizer.reload_parameters(PATH+'params_pretrain.txt') |
168 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1) | 173 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1,decrease=decrease_lr) |
169 channel.save() | 174 channel.save() |
170 | 175 |
171 channel.save() | 176 channel.save() |
172 | 177 |
173 return channel.COMPLETE | 178 return channel.COMPLETE |