Mercurial > ift6266
comparison deep/stacked_dae/v_sylvain/nist_sda.py @ 327:4306796d60a8
Rajout de la capacite de faire decroitre le taux d'apprentissage si demande
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Sun, 11 Apr 2010 19:52:28 -0400 |
parents | 7a12d2c3d06b |
children | ffc06af1c543 |
comparison
equal
deleted
inserted
replaced
326:b762ac18a2d7 | 327:4306796d60a8 |
---|---|
48 rtt = None | 48 rtt = None |
49 if state.has_key('reduce_train_to'): | 49 if state.has_key('reduce_train_to'): |
50 rtt = state['reduce_train_to'] | 50 rtt = state['reduce_train_to'] |
51 elif REDUCE_TRAIN_TO: | 51 elif REDUCE_TRAIN_TO: |
52 rtt = REDUCE_TRAIN_TO | 52 rtt = REDUCE_TRAIN_TO |
53 | |
54 if state.has_key('decrease_lr'): | |
55 decrease_lr = state['decrease_lr'] | |
56 else : | |
57 decrease_lr = 0 | |
53 | 58 |
54 n_ins = 32*32 | 59 n_ins = 32*32 |
55 n_outs = 62 # 10 digits, 26*2 (lower, capitals) | 60 n_outs = 62 # 10 digits, 26*2 (lower, capitals) |
56 | 61 |
57 examples_per_epoch = NIST_ALL_TRAIN_SIZE | 62 examples_per_epoch = NIST_ALL_TRAIN_SIZE |
114 #Decide how the finetune is done | 119 #Decide how the finetune is done |
115 | 120 |
116 if finetune_choice == 0: | 121 if finetune_choice == 0: |
117 print('\n\n\tfinetune with NIST\n\n') | 122 print('\n\n\tfinetune with NIST\n\n') |
118 optimizer.reload_parameters('params_pretrain.txt') | 123 optimizer.reload_parameters('params_pretrain.txt') |
119 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1) | 124 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,decrease=decrease_lr) |
120 channel.save() | 125 channel.save() |
121 if finetune_choice == 1: | 126 if finetune_choice == 1: |
122 print('\n\n\tfinetune with P07\n\n') | 127 print('\n\n\tfinetune with P07\n\n') |
123 optimizer.reload_parameters('params_pretrain.txt') | 128 optimizer.reload_parameters('params_pretrain.txt') |
124 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) | 129 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0,decrease=decrease_lr) |
125 channel.save() | 130 channel.save() |
126 if finetune_choice == 2: | 131 if finetune_choice == 2: |
127 print('\n\n\tfinetune with P07 followed by NIST\n\n') | 132 print('\n\n\tfinetune with P07 followed by NIST\n\n') |
128 optimizer.reload_parameters('params_pretrain.txt') | 133 optimizer.reload_parameters('params_pretrain.txt') |
129 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20) | 134 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20,decrease=decrease_lr) |
130 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21) | 135 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21,decrease=decrease_lr) |
131 channel.save() | 136 channel.save() |
132 if finetune_choice == 3: | 137 if finetune_choice == 3: |
133 print('\n\n\tfinetune with NIST only on the logistic regression on top (but validation on P07).\n\ | 138 print('\n\n\tfinetune with NIST only on the logistic regression on top (but validation on P07).\n\ |
134 All hidden units output are input of the logistic regression\n\n') | 139 All hidden units output are input of the logistic regression\n\n') |
135 optimizer.reload_parameters('params_pretrain.txt') | 140 optimizer.reload_parameters('params_pretrain.txt') |
136 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1) | 141 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1,decrease=decrease_lr) |
137 | 142 |
138 | 143 |
139 if finetune_choice==-1: | 144 if finetune_choice==-1: |
140 print('\nSERIE OF 4 DIFFERENT FINETUNINGS') | 145 print('\nSERIE OF 4 DIFFERENT FINETUNINGS') |
141 print('\n\n\tfinetune with NIST\n\n') | 146 print('\n\n\tfinetune with NIST\n\n') |
142 sys.stdout.flush() | 147 sys.stdout.flush() |
143 optimizer.reload_parameters('params_pretrain.txt') | 148 optimizer.reload_parameters('params_pretrain.txt') |
144 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1) | 149 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,decrease=decrease_lr) |
145 channel.save() | 150 channel.save() |
146 print('\n\n\tfinetune with P07\n\n') | 151 print('\n\n\tfinetune with P07\n\n') |
147 sys.stdout.flush() | 152 sys.stdout.flush() |
148 optimizer.reload_parameters('params_pretrain.txt') | 153 optimizer.reload_parameters('params_pretrain.txt') |
149 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0) | 154 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0,decrease=decrease_lr) |
150 channel.save() | 155 channel.save() |
151 print('\n\n\tfinetune with P07 (done earlier) followed by NIST (written here)\n\n') | 156 print('\n\n\tfinetune with P07 (done earlier) followed by NIST (written here)\n\n') |
152 sys.stdout.flush() | 157 sys.stdout.flush() |
153 optimizer.reload_parameters('params_finetune_P07.txt') | 158 optimizer.reload_parameters('params_finetune_P07.txt') |
154 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21) | 159 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21,decrease=decrease_lr) |
155 channel.save() | 160 channel.save() |
156 print('\n\n\tfinetune with NIST only on the logistic regression on top.\n\ | 161 print('\n\n\tfinetune with NIST only on the logistic regression on top.\n\ |
157 All hidden units output are input of the logistic regression\n\n') | 162 All hidden units output are input of the logistic regression\n\n') |
158 sys.stdout.flush() | 163 sys.stdout.flush() |
159 optimizer.reload_parameters('params_pretrain.txt') | 164 optimizer.reload_parameters('params_pretrain.txt') |
160 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1) | 165 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1,decrease=decrease_lr) |
161 channel.save() | 166 channel.save() |
162 | 167 |
163 channel.save() | 168 channel.save() |
164 | 169 |
165 return channel.COMPLETE | 170 return channel.COMPLETE |