Mercurial > ift6266
comparison deep/stacked_dae/nist_sda.py @ 265:c8fe09a65039
Déplacer le nouveau code de stacked_dae de v2 vers le répertoire de base 'stacked_dae', et bougé le vieux code vers le répertoire 'old'
author | fsavard |
---|---|
date | Fri, 19 Mar 2010 10:54:39 -0400 |
parents | deep/stacked_dae/v2/nist_sda.py@42005ec87747 |
children | 798d1344e6a2 |
comparison
equal
deleted
inserted
replaced
243:3c54cb3713ef | 265:c8fe09a65039 |
---|---|
23 | 23 |
24 from utils import produit_cartesien_jobs | 24 from utils import produit_cartesien_jobs |
25 | 25 |
26 from sgd_optimization import SdaSgdOptimizer | 26 from sgd_optimization import SdaSgdOptimizer |
27 | 27 |
28 from ift6266.utils.scalar_series import * | 28 #from ift6266.utils.scalar_series import * |
29 from ift6266.utils.seriestables import * | |
30 import tables | |
29 | 31 |
30 ############################################################################## | 32 from ift6266 import datasets |
31 # GLOBALS | 33 from config import * |
32 | |
33 TEST_CONFIG = False | |
34 | |
35 NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all' | |
36 JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_db/fsavard_sda4' | |
37 EXPERIMENT_PATH = "ift6266.deep.stacked_dae.nist_sda.jobman_entrypoint" | |
38 | |
39 REDUCE_TRAIN_TO = None | |
40 MAX_FINETUNING_EPOCHS = 1000 | |
41 # number of minibatches before taking means for valid error etc. | |
42 REDUCE_EVERY = 1000 | |
43 | |
44 if TEST_CONFIG: | |
45 REDUCE_TRAIN_TO = 1000 | |
46 MAX_FINETUNING_EPOCHS = 2 | |
47 REDUCE_EVERY = 10 | |
48 | |
49 # Possible values the hyperparameters can take. These are then | |
50 # combined with produit_cartesien_jobs so we get a list of all | |
51 # possible combinations, each one resulting in a job inserted | |
52 # in the jobman DB. | |
53 JOB_VALS = {'pretraining_lr': [0.1, 0.01],#, 0.001],#, 0.0001], | |
54 'pretraining_epochs_per_layer': [10,20], | |
55 'hidden_layers_sizes': [300,800], | |
56 'corruption_levels': [0.1,0.2,0.3], | |
57 'minibatch_size': [20], | |
58 'max_finetuning_epochs':[MAX_FINETUNING_EPOCHS], | |
59 'finetuning_lr':[0.1, 0.01], #0.001 was very bad, so we leave it out | |
60 'num_hidden_layers':[2,3]} | |
61 | |
62 # Just useful for tests... minimal number of epochs | |
63 DEFAULT_HP_NIST = DD({'finetuning_lr':0.1, | |
64 'pretraining_lr':0.1, | |
65 'pretraining_epochs_per_layer':20, | |
66 'max_finetuning_epochs':2, | |
67 'hidden_layers_sizes':800, | |
68 'corruption_levels':0.2, | |
69 'minibatch_size':20, | |
70 #'reduce_train_to':300, | |
71 'num_hidden_layers':2}) | |
72 | 34 |
73 ''' | 35 ''' |
74 Function called by jobman upon launching each job | 36 Function called by jobman upon launching each job |
75 Its path is the one given when inserting jobs: | 37 Its path is the one given when inserting jobs: see EXPERIMENT_PATH |
76 ift6266.deep.stacked_dae.nist_sda.jobman_entrypoint | |
77 ''' | 38 ''' |
78 def jobman_entrypoint(state, channel): | 39 def jobman_entrypoint(state, channel): |
79 # record mercurial versions of each package | 40 # record mercurial versions of each package |
80 pylearn.version.record_versions(state,[theano,ift6266,pylearn]) | 41 pylearn.version.record_versions(state,[theano,ift6266,pylearn]) |
42 # TODO: remove this, bad for number of simultaneous requests on DB | |
81 channel.save() | 43 channel.save() |
82 | |
83 workingdir = os.getcwd() | |
84 | |
85 print "Will load NIST" | |
86 | |
87 nist = NIST(minibatch_size=20) | |
88 | |
89 print "NIST loaded" | |
90 | 44 |
91 # For test runs, we don't want to use the whole dataset so | 45 # For test runs, we don't want to use the whole dataset so |
92 # reduce it to fewer elements if asked to. | 46 # reduce it to fewer elements if asked to. |
93 rtt = None | 47 rtt = None |
94 if state.has_key('reduce_train_to'): | 48 if state.has_key('reduce_train_to'): |
95 rtt = state['reduce_train_to'] | 49 rtt = state['reduce_train_to'] |
96 elif REDUCE_TRAIN_TO: | 50 elif REDUCE_TRAIN_TO: |
97 rtt = REDUCE_TRAIN_TO | 51 rtt = REDUCE_TRAIN_TO |
98 | 52 |
99 if rtt: | |
100 print "Reducing training set to "+str(rtt)+ " examples" | |
101 nist.reduce_train_set(rtt) | |
102 | |
103 train,valid,test = nist.get_tvt() | |
104 dataset = (train,valid,test) | |
105 | |
106 n_ins = 32*32 | 53 n_ins = 32*32 |
107 n_outs = 62 # 10 digits, 26*2 (lower, capitals) | 54 n_outs = 62 # 10 digits, 26*2 (lower, capitals) |
55 | |
56 examples_per_epoch = NIST_ALL_TRAIN_SIZE | |
108 | 57 |
109 # b,b',W for each hidden layer | 58 series = create_series(state.num_hidden_layers) |
110 # + b,W of last layer (logreg) | |
111 numparams = state.num_hidden_layers * 3 + 2 | |
112 series_mux = None | |
113 series_mux = create_series(workingdir, numparams) | |
114 | 59 |
115 print "Creating optimizer with state, ", state | 60 print "Creating optimizer with state, ", state |
116 | 61 |
117 optimizer = SdaSgdOptimizer(dataset=dataset, hyperparameters=state, \ | 62 optimizer = SdaSgdOptimizer(dataset=datasets.nist_all, |
63 hyperparameters=state, \ | |
118 n_ins=n_ins, n_outs=n_outs,\ | 64 n_ins=n_ins, n_outs=n_outs,\ |
119 input_divider=255.0, series_mux=series_mux) | 65 examples_per_epoch=examples_per_epoch, \ |
66 series=series, | |
67 max_minibatches=rtt) | |
120 | 68 |
121 optimizer.pretrain() | 69 optimizer.pretrain(datasets.nist_all) |
122 channel.save() | 70 channel.save() |
123 | 71 |
124 optimizer.finetune() | 72 optimizer.finetune(datasets.nist_all) |
125 channel.save() | 73 channel.save() |
126 | 74 |
127 return channel.COMPLETE | 75 return channel.COMPLETE |
128 | 76 |
129 # These Series objects are used to save various statistics | 77 # These Series objects are used to save various statistics |
130 # during the training. | 78 # during the training. |
131 def create_series(basedir, numparams): | 79 def create_series(num_hidden_layers): |
132 mux = SeriesMultiplexer() | 80 |
81 # Replace series we don't want to save with DummySeries, e.g. | |
82 # series['training_error'] = DummySeries() | |
83 | |
84 series = {} | |
85 | |
86 basedir = os.getcwd() | |
87 | |
88 h5f = tables.openFile(os.path.join(basedir, "series.h5"), "w") | |
89 | |
90 # reconstruction | |
91 reconstruction_base = \ | |
92 ErrorSeries(error_name="reconstruction_error", | |
93 table_name="reconstruction_error", | |
94 hdf5_file=h5f, | |
95 index_names=('epoch','minibatch'), | |
96 title="Reconstruction error (mean over "+str(REDUCE_EVERY)+" minibatches)") | |
97 series['reconstruction_error'] = \ | |
98 AccumulatorSeriesWrapper(base_series=reconstruction_base, | |
99 reduce_every=REDUCE_EVERY) | |
100 | |
101 # train | |
102 training_base = \ | |
103 ErrorSeries(error_name="training_error", | |
104 table_name="training_error", | |
105 hdf5_file=h5f, | |
106 index_names=('epoch','minibatch'), | |
107 title="Training error (mean over "+str(REDUCE_EVERY)+" minibatches)") | |
108 series['training_error'] = \ | |
109 AccumulatorSeriesWrapper(base_series=training_base, | |
110 reduce_every=REDUCE_EVERY) | |
111 | |
112 # valid and test are not accumulated/mean, saved directly | |
113 series['validation_error'] = \ | |
114 ErrorSeries(error_name="validation_error", | |
115 table_name="validation_error", | |
116 hdf5_file=h5f, | |
117 index_names=('epoch','minibatch')) | |
118 | |
119 series['test_error'] = \ | |
120 ErrorSeries(error_name="test_error", | |
121 table_name="test_error", | |
122 hdf5_file=h5f, | |
123 index_names=('epoch','minibatch')) | |
124 | |
125 param_names = [] | |
126 for i in range(num_hidden_layers): | |
127 param_names += ['layer%d_W'%i, 'layer%d_b'%i, 'layer%d_bprime'%i] | |
128 param_names += ['logreg_layer_W', 'logreg_layer_b'] | |
133 | 129 |
134 # comment out series we don't want to save | 130 # comment out series we don't want to save |
135 mux.add_series(AccumulatorSeries(name="reconstruction_error", | 131 series['params'] = SharedParamsStatisticsWrapper( |
136 reduce_every=REDUCE_EVERY, # every 1000 batches, we take the mean and save | 132 new_group_name="params", |
137 mean=True, | 133 base_group="/", |
138 directory=basedir, flush_every=1)) | 134 arrays_names=param_names, |
135 hdf5_file=h5f, | |
136 index_names=('epoch',)) | |
139 | 137 |
140 mux.add_series(AccumulatorSeries(name="training_error", | 138 return series |
141 reduce_every=REDUCE_EVERY, # every 1000 batches, we take the mean and save | |
142 mean=True, | |
143 directory=basedir, flush_every=1)) | |
144 | |
145 mux.add_series(BaseSeries(name="validation_error", directory=basedir, flush_every=1)) | |
146 mux.add_series(BaseSeries(name="test_error", directory=basedir, flush_every=1)) | |
147 | |
148 mux.add_series(ParamsArrayStats(numparams,name="params",directory=basedir)) | |
149 | |
150 return mux | |
151 | 139 |
152 # Perform insertion into the Postgre DB based on combination | 140 # Perform insertion into the Postgre DB based on combination |
153 # of hyperparameter values above | 141 # of hyperparameter values above |
154 # (see comment for produit_cartesien_jobs() to know how it works) | 142 # (see comment for produit_cartesien_jobs() to know how it works) |
155 def jobman_insert_nist(): | 143 def jobman_insert_nist(): |
160 job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH}) | 148 job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH}) |
161 jobman.sql.insert_dict(job, db) | 149 jobman.sql.insert_dict(job, db) |
162 | 150 |
163 print "inserted" | 151 print "inserted" |
164 | 152 |
165 class NIST: | |
166 def __init__(self, minibatch_size, basepath=None, reduce_train_to=None): | |
167 global NIST_ALL_LOCATION | |
168 | |
169 self.minibatch_size = minibatch_size | |
170 self.basepath = basepath and basepath or NIST_ALL_LOCATION | |
171 | |
172 self.set_filenames() | |
173 | |
174 # arrays of 2 elements: .x, .y | |
175 self.train = [None, None] | |
176 self.test = [None, None] | |
177 | |
178 self.load_train_test() | |
179 | |
180 self.valid = [[], []] | |
181 self.split_train_valid() | |
182 if reduce_train_to: | |
183 self.reduce_train_set(reduce_train_to) | |
184 | |
185 def get_tvt(self): | |
186 return self.train, self.valid, self.test | |
187 | |
188 def set_filenames(self): | |
189 self.train_files = ['all_train_data.ft', | |
190 'all_train_labels.ft'] | |
191 | |
192 self.test_files = ['all_test_data.ft', | |
193 'all_test_labels.ft'] | |
194 | |
195 def load_train_test(self): | |
196 self.load_data_labels(self.train_files, self.train) | |
197 self.load_data_labels(self.test_files, self.test) | |
198 | |
199 def load_data_labels(self, filenames, pair): | |
200 for i, fn in enumerate(filenames): | |
201 f = open(os.path.join(self.basepath, fn)) | |
202 pair[i] = filetensor.read(f) | |
203 f.close() | |
204 | |
205 def reduce_train_set(self, max): | |
206 self.train[0] = self.train[0][:max] | |
207 self.train[1] = self.train[1][:max] | |
208 | |
209 if max < len(self.test[0]): | |
210 for ar in (self.test, self.valid): | |
211 ar[0] = ar[0][:max] | |
212 ar[1] = ar[1][:max] | |
213 | |
214 def split_train_valid(self): | |
215 test_len = len(self.test[0]) | |
216 | |
217 new_train_x = self.train[0][:-test_len] | |
218 new_train_y = self.train[1][:-test_len] | |
219 | |
220 self.valid[0] = self.train[0][-test_len:] | |
221 self.valid[1] = self.train[1][-test_len:] | |
222 | |
223 self.train[0] = new_train_x | |
224 self.train[1] = new_train_y | |
225 | |
226 def test_load_nist(): | |
227 print "Will load NIST" | |
228 | |
229 import time | |
230 t1 = time.time() | |
231 nist = NIST(20) | |
232 t2 = time.time() | |
233 | |
234 print "NIST loaded. time delta = ", t2-t1 | |
235 | |
236 tr,v,te = nist.get_tvt() | |
237 | |
238 print "Lenghts: ", len(tr[0]), len(v[0]), len(te[0]) | |
239 | |
240 raw_input("Press any key") | |
241 | |
242 if __name__ == '__main__': | 153 if __name__ == '__main__': |
243 | |
244 import sys | |
245 | 154 |
246 args = sys.argv[1:] | 155 args = sys.argv[1:] |
247 | 156 |
248 if len(args) > 0 and args[0] == 'load_nist': | 157 #if len(args) > 0 and args[0] == 'load_nist': |
249 test_load_nist() | 158 # test_load_nist() |
250 | 159 |
251 elif len(args) > 0 and args[0] == 'jobman_insert': | 160 if len(args) > 0 and args[0] == 'jobman_insert': |
252 jobman_insert_nist() | 161 jobman_insert_nist() |
253 | 162 |
254 elif len(args) > 0 and args[0] == 'test_jobman_entrypoint': | 163 elif len(args) > 0 and args[0] == 'test_jobman_entrypoint': |
255 chanmock = DD({'COMPLETE':0,'save':(lambda:None)}) | 164 chanmock = DD({'COMPLETE':0,'save':(lambda:None)}) |
256 jobman_entrypoint(DEFAULT_HP_NIST, chanmock) | 165 jobman_entrypoint(DEFAULT_HP_NIST, chanmock) |