Mercurial > ift6266
comparison deep/stacked_dae/v2/nist_sda.py @ 227:acae439d6572
Ajouté une modification sur stacked_dae qui utilise les nouvelles SeriesTables. Je le met dans le repository pour que mes expériences en cours continuent sans perturbation, et pour que Sylvain puisse récupérer la version actuelle; je fusionnerai à moment donné.
author | fsavard |
---|---|
date | Fri, 12 Mar 2010 10:31:10 -0500 |
parents | |
children | 851e7ad4a143 |
comparison
equal
deleted
inserted
replaced
226:bfe20d63f88c | 227:acae439d6572 |
---|---|
1 #!/usr/bin/python | |
2 # coding: utf-8 | |
3 | |
4 import ift6266 | |
5 import pylearn | |
6 | |
7 import numpy | |
8 import theano | |
9 import time | |
10 | |
11 import pylearn.version | |
12 import theano.tensor as T | |
13 from theano.tensor.shared_randomstreams import RandomStreams | |
14 | |
15 import copy | |
16 import sys | |
17 import os | |
18 import os.path | |
19 | |
20 from jobman import DD | |
21 import jobman, jobman.sql | |
22 from pylearn.io import filetensor | |
23 | |
24 from utils import produit_cartesien_jobs | |
25 | |
26 from sgd_optimization import SdaSgdOptimizer | |
27 | |
28 #from ift6266.utils.scalar_series import * | |
29 from ift6266.utils.seriestables import * | |
30 import tables | |
31 | |
32 ############################################################################## | |
33 # GLOBALS | |
34 | |
35 TEST_CONFIG = False | |
36 | |
37 NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all' | |
38 JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_db/fsavard_sda4' | |
39 EXPERIMENT_PATH = "ift6266.deep.stacked_dae.nist_sda.jobman_entrypoint" | |
40 | |
41 REDUCE_TRAIN_TO = None | |
42 MAX_FINETUNING_EPOCHS = 1000 | |
43 # number of minibatches before taking means for valid error etc. | |
44 REDUCE_EVERY = 100 | |
45 | |
46 if TEST_CONFIG: | |
47 REDUCE_TRAIN_TO = 1000 | |
48 MAX_FINETUNING_EPOCHS = 2 | |
49 REDUCE_EVERY = 10 | |
50 | |
51 # Possible values the hyperparameters can take. These are then | |
52 # combined with produit_cartesien_jobs so we get a list of all | |
53 # possible combinations, each one resulting in a job inserted | |
54 # in the jobman DB. | |
55 JOB_VALS = {'pretraining_lr': [0.1, 0.01],#, 0.001],#, 0.0001], | |
56 'pretraining_epochs_per_layer': [10,20], | |
57 'hidden_layers_sizes': [300,800], | |
58 'corruption_levels': [0.1,0.2,0.3], | |
59 'minibatch_size': [20], | |
60 'max_finetuning_epochs':[MAX_FINETUNING_EPOCHS], | |
61 'finetuning_lr':[0.1, 0.01], #0.001 was very bad, so we leave it out | |
62 'num_hidden_layers':[2,3]} | |
63 | |
64 # Just useful for tests... minimal number of epochs | |
65 DEFAULT_HP_NIST = DD({'finetuning_lr':0.1, | |
66 'pretraining_lr':0.1, | |
67 'pretraining_epochs_per_layer':2, | |
68 'max_finetuning_epochs':2, | |
69 'hidden_layers_sizes':800, | |
70 'corruption_levels':0.2, | |
71 'minibatch_size':20, | |
72 'reduce_train_to':30000, | |
73 'num_hidden_layers':1}) | |
74 | |
75 ''' | |
76 Function called by jobman upon launching each job | |
77 Its path is the one given when inserting jobs: | |
78 ift6266.deep.stacked_dae.nist_sda.jobman_entrypoint | |
79 ''' | |
80 def jobman_entrypoint(state, channel): | |
81 # record mercurial versions of each package | |
82 pylearn.version.record_versions(state,[theano,ift6266,pylearn]) | |
83 # TODO: remove this, bad for number of simultaneous requests on DB | |
84 channel.save() | |
85 | |
86 workingdir = os.getcwd() | |
87 | |
88 print "Will load NIST" | |
89 | |
90 nist = NIST(minibatch_size=20) | |
91 | |
92 print "NIST loaded" | |
93 | |
94 # For test runs, we don't want to use the whole dataset so | |
95 # reduce it to fewer elements if asked to. | |
96 rtt = None | |
97 if state.has_key('reduce_train_to'): | |
98 rtt = state['reduce_train_to'] | |
99 elif REDUCE_TRAIN_TO: | |
100 rtt = REDUCE_TRAIN_TO | |
101 | |
102 if rtt: | |
103 print "Reducing training set to "+str(rtt)+ " examples" | |
104 nist.reduce_train_set(rtt) | |
105 | |
106 train,valid,test = nist.get_tvt() | |
107 dataset = (train,valid,test) | |
108 | |
109 n_ins = 32*32 | |
110 n_outs = 62 # 10 digits, 26*2 (lower, capitals) | |
111 | |
112 series = create_series(state.num_hidden_layers) | |
113 | |
114 print "Creating optimizer with state, ", state | |
115 | |
116 optimizer = SdaSgdOptimizer(dataset=dataset, hyperparameters=state, \ | |
117 n_ins=n_ins, n_outs=n_outs,\ | |
118 input_divider=255.0, series=series) | |
119 | |
120 optimizer.pretrain() | |
121 channel.save() | |
122 | |
123 optimizer.finetune() | |
124 channel.save() | |
125 | |
126 return channel.COMPLETE | |
127 | |
128 # These Series objects are used to save various statistics | |
129 # during the training. | |
130 def create_series(num_hidden_layers): | |
131 | |
132 # Replace series we don't want to save with DummySeries, e.g. | |
133 # series['training_error'] = DummySeries() | |
134 | |
135 series = {} | |
136 | |
137 basedir = os.getcwd() | |
138 | |
139 h5f = tables.openFile(os.path.join(basedir, "series.h5"), "w") | |
140 | |
141 # reconstruction | |
142 reconstruction_base = \ | |
143 ErrorSeries(error_name="reconstruction_error", | |
144 table_name="reconstruction_error", | |
145 hdf5_file=h5f, | |
146 index_names=('epoch','minibatch'), | |
147 title="Reconstruction error (mean over "+str(REDUCE_EVERY)+" minibatches)") | |
148 series['reconstruction_error'] = \ | |
149 AccumulatorSeriesWrapper(base_series=reconstruction_base, | |
150 reduce_every=REDUCE_EVERY) | |
151 | |
152 # train | |
153 training_base = \ | |
154 ErrorSeries(error_name="training_error", | |
155 table_name="training_error", | |
156 hdf5_file=h5f, | |
157 index_names=('epoch','minibatch'), | |
158 title="Training error (mean over "+str(REDUCE_EVERY)+" minibatches)") | |
159 series['training_error'] = \ | |
160 AccumulatorSeriesWrapper(base_series=training_base, | |
161 reduce_every=REDUCE_EVERY) | |
162 | |
163 # valid and test are not accumulated/mean, saved directly | |
164 series['validation_error'] = \ | |
165 ErrorSeries(error_name="validation_error", | |
166 table_name="validation_error", | |
167 hdf5_file=h5f, | |
168 index_names=('epoch','minibatch')) | |
169 | |
170 series['test_error'] = \ | |
171 ErrorSeries(error_name="test_error", | |
172 table_name="test_error", | |
173 hdf5_file=h5f, | |
174 index_names=('epoch','minibatch')) | |
175 | |
176 param_names = [] | |
177 for i in range(num_hidden_layers): | |
178 param_names += ['layer%d_W'%i, 'layer%d_b'%i, 'layer%d_bprime'%i] | |
179 param_names += ['logreg_layer_W', 'logreg_layer_b'] | |
180 | |
181 # comment out series we don't want to save | |
182 series['params'] = SharedParamsStatisticsWrapper( | |
183 new_group_name="params", | |
184 base_group="/", | |
185 arrays_names=param_names, | |
186 hdf5_file=h5f, | |
187 index_names=('epoch',)) | |
188 | |
189 return series | |
190 | |
191 # Perform insertion into the Postgre DB based on combination | |
192 # of hyperparameter values above | |
193 # (see comment for produit_cartesien_jobs() to know how it works) | |
194 def jobman_insert_nist(): | |
195 jobs = produit_cartesien_jobs(JOB_VALS) | |
196 | |
197 db = jobman.sql.db(JOBDB) | |
198 for job in jobs: | |
199 job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH}) | |
200 jobman.sql.insert_dict(job, db) | |
201 | |
202 print "inserted" | |
203 | |
204 class NIST: | |
205 def __init__(self, minibatch_size, basepath=None, reduce_train_to=None): | |
206 global NIST_ALL_LOCATION | |
207 | |
208 self.minibatch_size = minibatch_size | |
209 self.basepath = basepath and basepath or NIST_ALL_LOCATION | |
210 | |
211 self.set_filenames() | |
212 | |
213 # arrays of 2 elements: .x, .y | |
214 self.train = [None, None] | |
215 self.test = [None, None] | |
216 | |
217 self.load_train_test() | |
218 | |
219 self.valid = [[], []] | |
220 self.split_train_valid() | |
221 if reduce_train_to: | |
222 self.reduce_train_set(reduce_train_to) | |
223 | |
224 def get_tvt(self): | |
225 return self.train, self.valid, self.test | |
226 | |
227 def set_filenames(self): | |
228 self.train_files = ['all_train_data.ft', | |
229 'all_train_labels.ft'] | |
230 | |
231 self.test_files = ['all_test_data.ft', | |
232 'all_test_labels.ft'] | |
233 | |
234 def load_train_test(self): | |
235 self.load_data_labels(self.train_files, self.train) | |
236 self.load_data_labels(self.test_files, self.test) | |
237 | |
238 def load_data_labels(self, filenames, pair): | |
239 for i, fn in enumerate(filenames): | |
240 f = open(os.path.join(self.basepath, fn)) | |
241 pair[i] = filetensor.read(f) | |
242 f.close() | |
243 | |
244 def reduce_train_set(self, max): | |
245 self.train[0] = self.train[0][:max] | |
246 self.train[1] = self.train[1][:max] | |
247 | |
248 if max < len(self.test[0]): | |
249 for ar in (self.test, self.valid): | |
250 ar[0] = ar[0][:max] | |
251 ar[1] = ar[1][:max] | |
252 | |
253 def split_train_valid(self): | |
254 test_len = len(self.test[0]) | |
255 | |
256 new_train_x = self.train[0][:-test_len] | |
257 new_train_y = self.train[1][:-test_len] | |
258 | |
259 self.valid[0] = self.train[0][-test_len:] | |
260 self.valid[1] = self.train[1][-test_len:] | |
261 | |
262 self.train[0] = new_train_x | |
263 self.train[1] = new_train_y | |
264 | |
265 def test_load_nist(): | |
266 print "Will load NIST" | |
267 | |
268 import time | |
269 t1 = time.time() | |
270 nist = NIST(20) | |
271 t2 = time.time() | |
272 | |
273 print "NIST loaded. time delta = ", t2-t1 | |
274 | |
275 tr,v,te = nist.get_tvt() | |
276 | |
277 print "Lenghts: ", len(tr[0]), len(v[0]), len(te[0]) | |
278 | |
279 raw_input("Press any key") | |
280 | |
281 if __name__ == '__main__': | |
282 | |
283 import sys | |
284 | |
285 args = sys.argv[1:] | |
286 | |
287 if len(args) > 0 and args[0] == 'load_nist': | |
288 test_load_nist() | |
289 | |
290 elif len(args) > 0 and args[0] == 'jobman_insert': | |
291 jobman_insert_nist() | |
292 | |
293 elif len(args) > 0 and args[0] == 'test_jobman_entrypoint': | |
294 chanmock = DD({'COMPLETE':0,'save':(lambda:None)}) | |
295 jobman_entrypoint(DEFAULT_HP_NIST, chanmock) | |
296 | |
297 else: | |
298 print "Bad arguments" | |
299 |