Mercurial > ift6266
comparison deep/crbm/utils.py @ 339:ffbf0e41bcee
Aded code to run experiment on cluster, separate configuration from other machinery. Not tested yet.
author | fsavard |
---|---|
date | Sat, 17 Apr 2010 20:29:18 -0400 |
parents | |
children | 82dae7c46046 |
comparison
equal
deleted
inserted
replaced
338:fca22114bb23 | 339:ffbf0e41bcee |
---|---|
1 #!/usr/bin/python | |
2 # coding: utf-8 | |
3 | |
4 from __future__ import with_statement | |
5 | |
6 from jobman import DD | |
7 | |
8 from pylearn.io.seriestables import * | |
9 import tables | |
10 | |
11 | |
12 | |
13 # from pylearn codebase | |
14 # useful in __init__(param1, param2, etc.) to save | |
15 # values in self.param1, self.param2... just call | |
16 # update_locals(self, locals()) | |
17 def update_locals(obj, dct): | |
18 if 'self' in dct: | |
19 del dct['self'] | |
20 obj.__dict__.update(dct) | |
21 | |
22 # from a dictionary of possible values for hyperparameters, e.g. | |
23 # hp_values = {'learning_rate':[0.1, 0.01], 'num_layers': [1,2]} | |
24 # create a list of other dictionaries representing all the possible | |
25 # combinations, thus in this example creating: | |
26 # [{'learning_rate': 0.1, 'num_layers': 1}, ...] | |
27 # (similarly for combinations (0.1, 2), (0.01, 1), (0.01, 2)) | |
28 def produit_cartesien_jobs(val_dict): | |
29 job_list = [DD()] | |
30 all_keys = val_dict.keys() | |
31 | |
32 for key in all_keys: | |
33 possible_values = val_dict[key] | |
34 new_job_list = [] | |
35 for val in possible_values: | |
36 for job in job_list: | |
37 to_insert = job.copy() | |
38 to_insert.update({key: val}) | |
39 new_job_list.append(to_insert) | |
40 job_list = new_job_list | |
41 | |
42 return job_list | |
43 | |
44 def jobs_from_reinsert_list(cols, job_vals): | |
45 job_list = [] | |
46 for vals in job_vals: | |
47 job = DD() | |
48 for i, col in enumerate(cols): | |
49 job[col] = vals[i] | |
50 job_list.append(job) | |
51 | |
52 return job_list | |
53 | |
54 def save_params(all_params, filename): | |
55 import pickle | |
56 with open(filename, 'wb') as f: | |
57 values = [p.value for p in all_params] | |
58 | |
59 # -1 for HIGHEST_PROTOCOL | |
60 pickle.dump(values, f, -1) | |
61 | |
62 # Perform insertion into the Postgre DB based on combination | |
63 # of hyperparameter values above | |
64 # (see comment for produit_cartesien_jobs() to know how it works) | |
65 def jobman_insert_job_vals(job_db, experiment_path, job_vals): | |
66 jobs = produit_cartesien_jobs(job_vals) | |
67 | |
68 db = jobman.sql.db(job_db) | |
69 for job in jobs: | |
70 job.update({jobman.sql.EXPERIMENT: experiment_path}) | |
71 jobman.sql.insert_dict(job, db) | |
72 | |
73 def jobman_insert_specific_jobs(job_db, experiment_path, | |
74 insert_cols, insert_vals): | |
75 jobs = jobs_from_reinsert_list(insert_cols, insert_vals) | |
76 | |
77 db = jobman.sql.db(job_db) | |
78 for job in jobs: | |
79 job.update({jobman.sql.EXPERIMENT: experiment_path}) | |
80 jobman.sql.insert_dict(job, db) | |
81 | |
82 # Just a shortcut for a common case where we need a few | |
83 # related Error (float) series | |
84 def get_accumulator_series_array( \ | |
85 hdf5_file, group_name, series_names, | |
86 reduce_every, | |
87 index_names=('epoch','minibatch'), | |
88 stdout_too=True, | |
89 skip_hdf5_append=False): | |
90 all_series = [] | |
91 | |
92 new_group = hdf5_file.createGroup('/', group_name) | |
93 | |
94 other_targets = [] | |
95 if stdout_too: | |
96 other_targets = [StdoutAppendTarget()] | |
97 | |
98 for sn in series_names: | |
99 series_base = \ | |
100 ErrorSeries(error_name=sn, | |
101 table_name=sn, | |
102 hdf5_file=hdf5_file, | |
103 hdf5_group=new_group._v_pathname, | |
104 index_names=index_names, | |
105 other_targets=other_targets, | |
106 skip_hdf5_append=skip_hdf5_append) | |
107 | |
108 all_series.append( \ | |
109 AccumulatorSeriesWrapper( \ | |
110 base_series=series_base, | |
111 reduce_every=reduce_every)) | |
112 | |
113 ret_wrapper = SeriesArrayWrapper(all_series) | |
114 | |
115 return ret_wrapper | |
116 | |
117 |