Mercurial > ift6266
comparison deep/stacked_dae/v_youssouf/nist_sda.py @ 371:8cf52a1c8055
initial commit of sda with 36 classes
author | youssouf |
---|---|
date | Sun, 25 Apr 2010 12:31:22 -0400 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
336:a79db7cee035 | 371:8cf52a1c8055 |
---|---|
1 #!/usr/bin/python | |
2 # coding: utf-8 | |
3 | |
4 import ift6266 | |
5 import pylearn | |
6 | |
7 import numpy | |
8 import theano | |
9 import time | |
10 | |
11 import pylearn.version | |
12 import theano.tensor as T | |
13 from theano.tensor.shared_randomstreams import RandomStreams | |
14 | |
15 import copy | |
16 import sys | |
17 import os | |
18 import os.path | |
19 | |
20 from jobman import DD | |
21 import jobman, jobman.sql | |
22 from pylearn.io import filetensor | |
23 | |
24 from utils import produit_cartesien_jobs | |
25 from copy import copy | |
26 | |
27 from sgd_optimization import SdaSgdOptimizer | |
28 | |
29 #from ift6266.utils.scalar_series import * | |
30 from ift6266.utils.seriestables import * | |
31 import tables | |
32 | |
33 from ift6266 import datasets | |
34 from config import * | |
35 | |
36 | |
37 | |
38 ''' | |
39 Function called by jobman upon launching each job | |
40 Its path is the one given when inserting jobs: see EXPERIMENT_PATH | |
41 ''' | |
42 def jobman_entrypoint(state, channel): | |
43 # record mercurial versions of each package | |
44 pylearn.version.record_versions(state,[theano,ift6266,pylearn]) | |
45 # TODO: remove this, bad for number of simultaneous requests on DB | |
46 channel.save() | |
47 | |
48 # For test runs, we don't want to use the whole dataset so | |
49 # reduce it to fewer elements if asked to. | |
50 rtt = None | |
51 if state.has_key('reduce_train_to'): | |
52 rtt = state['reduce_train_to'] | |
53 elif REDUCE_TRAIN_TO: | |
54 rtt = REDUCE_TRAIN_TO | |
55 | |
56 if state.has_key('decrease_lr'): | |
57 decrease_lr = state['decrease_lr'] | |
58 else : | |
59 decrease_lr = 0 | |
60 | |
61 n_ins = 32*32 | |
62 n_outs = 36 # 10 digits, 26 characters (merged lower and capitals) | |
63 | |
64 examples_per_epoch = NIST_ALL_TRAIN_SIZE | |
65 | |
66 #To be sure variables will not be only in the if statement | |
67 PATH = '' | |
68 nom_reptrain = '' | |
69 nom_serie = "" | |
70 if state['pretrain_choice'] == 0: | |
71 nom_serie="series_NIST.h5" | |
72 elif state['pretrain_choice'] == 1: | |
73 nom_serie="series_P07.h5" | |
74 | |
75 series = create_series(state.num_hidden_layers,nom_serie) | |
76 | |
77 | |
78 print "Creating optimizer with state, ", state | |
79 | |
80 optimizer = SdaSgdOptimizer(dataset=datasets.nist_all(), | |
81 hyperparameters=state, \ | |
82 n_ins=n_ins, n_outs=n_outs,\ | |
83 examples_per_epoch=examples_per_epoch, \ | |
84 series=series, | |
85 max_minibatches=rtt) | |
86 | |
87 parameters=[] | |
88 #Number of files of P07 used for pretraining | |
89 nb_file=0 | |
90 if state['pretrain_choice'] == 0: | |
91 print('\n\tpretraining with NIST\n') | |
92 optimizer.pretrain(datasets.nist_all()) | |
93 elif state['pretrain_choice'] == 1: | |
94 #To know how many file will be used during pretraining | |
95 nb_file = int(state['pretraining_epochs_per_layer']) | |
96 state['pretraining_epochs_per_layer'] = 1 #Only 1 time over the dataset | |
97 if nb_file >=100: | |
98 sys.exit("The code does not support this much pretraining epoch (99 max with P07).\n"+ | |
99 "You have to correct the code (and be patient, P07 is huge !!)\n"+ | |
100 "or reduce the number of pretraining epoch to run the code (better idea).\n") | |
101 print('\n\tpretraining with P07') | |
102 optimizer.pretrain(datasets.nist_P07(min_file=0,max_file=nb_file)) | |
103 channel.save() | |
104 | |
105 #Set some of the parameters used for the finetuning | |
106 if state.has_key('finetune_set'): | |
107 finetune_choice=state['finetune_set'] | |
108 else: | |
109 finetune_choice=FINETUNE_SET | |
110 | |
111 if state.has_key('max_finetuning_epochs'): | |
112 max_finetune_epoch_NIST=state['max_finetuning_epochs'] | |
113 else: | |
114 max_finetune_epoch_NIST=MAX_FINETUNING_EPOCHS | |
115 | |
116 if state.has_key('max_finetuning_epochs_P07'): | |
117 max_finetune_epoch_P07=state['max_finetuning_epochs_P07'] | |
118 else: | |
119 max_finetune_epoch_P07=max_finetune_epoch_NIST | |
120 | |
121 #Decide how the finetune is done | |
122 | |
123 if finetune_choice == 0: | |
124 print('\n\n\tfinetune with NIST\n\n') | |
125 optimizer.reload_parameters('params_pretrain.txt') | |
126 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,decrease=decrease_lr) | |
127 channel.save() | |
128 if finetune_choice == 1: | |
129 print('\n\n\tfinetune with P07\n\n') | |
130 optimizer.reload_parameters('params_pretrain.txt') | |
131 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0,decrease=decrease_lr) | |
132 channel.save() | |
133 if finetune_choice == 2: | |
134 print('\n\n\tfinetune with P07\n\n') | |
135 optimizer.reload_parameters('params_pretrain.txt') | |
136 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=20,decrease=decrease_lr) | |
137 #optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21,decrease=decrease_lr) | |
138 channel.save() | |
139 if finetune_choice == 3: | |
140 print('\n\n\tfinetune with NIST only on the logistic regression on top (but validation on P07).\n\ | |
141 All hidden units output are input of the logistic regression\n\n') | |
142 optimizer.reload_parameters('params_pretrain.txt') | |
143 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1,decrease=decrease_lr) | |
144 | |
145 | |
146 if finetune_choice==-1: | |
147 print('\nSERIE OF 4 DIFFERENT FINETUNINGS') | |
148 print('\n\n\tfinetune with NIST\n\n') | |
149 sys.stdout.flush() | |
150 optimizer.reload_parameters('params_pretrain.txt') | |
151 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,decrease=decrease_lr) | |
152 channel.save() | |
153 print('\n\n\tfinetune with P07\n\n') | |
154 sys.stdout.flush() | |
155 optimizer.reload_parameters('params_pretrain.txt') | |
156 optimizer.finetune(datasets.nist_P07(),datasets.nist_all(),max_finetune_epoch_P07,ind_test=0,decrease=decrease_lr) | |
157 channel.save() | |
158 print('\n\n\tfinetune with P07 (done earlier) followed by NIST (written here)\n\n') | |
159 sys.stdout.flush() | |
160 optimizer.reload_parameters('params_finetune_P07.txt') | |
161 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=21,decrease=decrease_lr) | |
162 channel.save() | |
163 print('\n\n\tfinetune with NIST only on the logistic regression on top.\n\ | |
164 All hidden units output are input of the logistic regression\n\n') | |
165 sys.stdout.flush() | |
166 optimizer.reload_parameters('params_pretrain.txt') | |
167 optimizer.finetune(datasets.nist_all(),datasets.nist_P07(),max_finetune_epoch_NIST,ind_test=1,special=1,decrease=decrease_lr) | |
168 channel.save() | |
169 | |
170 channel.save() | |
171 | |
172 return channel.COMPLETE | |
173 | |
174 # These Series objects are used to save various statistics | |
175 # during the training. | |
176 def create_series(num_hidden_layers, nom_serie): | |
177 | |
178 # Replace series we don't want to save with DummySeries, e.g. | |
179 # series['training_error'] = DummySeries() | |
180 | |
181 series = {} | |
182 | |
183 basedir = os.getcwd() | |
184 | |
185 h5f = tables.openFile(os.path.join(basedir, nom_serie), "w") | |
186 | |
187 # reconstruction | |
188 reconstruction_base = \ | |
189 ErrorSeries(error_name="reconstruction_error", | |
190 table_name="reconstruction_error", | |
191 hdf5_file=h5f, | |
192 index_names=('epoch','minibatch'), | |
193 title="Reconstruction error (mean over "+str(REDUCE_EVERY)+" minibatches)") | |
194 series['reconstruction_error'] = \ | |
195 AccumulatorSeriesWrapper(base_series=reconstruction_base, | |
196 reduce_every=REDUCE_EVERY) | |
197 | |
198 # train | |
199 training_base = \ | |
200 ErrorSeries(error_name="training_error", | |
201 table_name="training_error", | |
202 hdf5_file=h5f, | |
203 index_names=('epoch','minibatch'), | |
204 title="Training error (mean over "+str(REDUCE_EVERY)+" minibatches)") | |
205 series['training_error'] = \ | |
206 AccumulatorSeriesWrapper(base_series=training_base, | |
207 reduce_every=REDUCE_EVERY) | |
208 | |
209 # valid and test are not accumulated/mean, saved directly | |
210 series['validation_error'] = \ | |
211 ErrorSeries(error_name="validation_error", | |
212 table_name="validation_error", | |
213 hdf5_file=h5f, | |
214 index_names=('epoch','minibatch')) | |
215 | |
216 series['test_error'] = \ | |
217 ErrorSeries(error_name="test_error", | |
218 table_name="test_error", | |
219 hdf5_file=h5f, | |
220 index_names=('epoch','minibatch')) | |
221 | |
222 param_names = [] | |
223 for i in range(num_hidden_layers): | |
224 param_names += ['layer%d_W'%i, 'layer%d_b'%i, 'layer%d_bprime'%i] | |
225 param_names += ['logreg_layer_W', 'logreg_layer_b'] | |
226 | |
227 # comment out series we don't want to save | |
228 series['params'] = SharedParamsStatisticsWrapper( | |
229 new_group_name="params", | |
230 base_group="/", | |
231 arrays_names=param_names, | |
232 hdf5_file=h5f, | |
233 index_names=('epoch',)) | |
234 | |
235 return series | |
236 | |
237 # Perform insertion into the Postgre DB based on combination | |
238 # of hyperparameter values above | |
239 # (see comment for produit_cartesien_jobs() to know how it works) | |
240 def jobman_insert_nist(): | |
241 jobs = produit_cartesien_jobs(JOB_VALS) | |
242 | |
243 db = jobman.sql.db(JOBDB) | |
244 for job in jobs: | |
245 job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH}) | |
246 jobman.sql.insert_dict(job, db) | |
247 | |
248 print "inserted" | |
249 | |
250 if __name__ == '__main__': | |
251 | |
252 args = sys.argv[1:] | |
253 | |
254 #if len(args) > 0 and args[0] == 'load_nist': | |
255 # test_load_nist() | |
256 | |
257 if len(args) > 0 and args[0] == 'jobman_insert': | |
258 jobman_insert_nist() | |
259 | |
260 elif len(args) > 0 and args[0] == 'test_jobman_entrypoint': | |
261 chanmock = DD({'COMPLETE':0,'save':(lambda:None)}) | |
262 jobman_entrypoint(DD(DEFAULT_HP_NIST), chanmock) | |
263 | |
264 else: | |
265 print "Bad arguments" | |
266 |