Mercurial > ift6266
comparison deep/crbm/utils.py @ 360:f37c0705649d
Problèmes de révisions hg... tentative de merger
author | fsavard |
---|---|
date | Thu, 22 Apr 2010 10:34:26 -0400 |
parents | |
children | 64fa85d68923 |
comparison
equal
deleted
inserted
replaced
359:969ad25e78cc | 360:f37c0705649d |
---|---|
1 #!/usr/bin/python | |
2 # coding: utf-8 | |
3 | |
4 from __future__ import with_statement | |
5 | |
6 import jobman | |
7 from jobman import DD | |
8 | |
9 from pylearn.io.seriestables import * | |
10 import tables | |
11 | |
12 | |
13 | |
14 # from pylearn codebase | |
15 # useful in __init__(param1, param2, etc.) to save | |
16 # values in self.param1, self.param2... just call | |
17 # update_locals(self, locals()) | |
18 def update_locals(obj, dct): | |
19 if 'self' in dct: | |
20 del dct['self'] | |
21 obj.__dict__.update(dct) | |
22 | |
23 # from a dictionary of possible values for hyperparameters, e.g. | |
24 # hp_values = {'learning_rate':[0.1, 0.01], 'num_layers': [1,2]} | |
25 # create a list of other dictionaries representing all the possible | |
26 # combinations, thus in this example creating: | |
27 # [{'learning_rate': 0.1, 'num_layers': 1}, ...] | |
28 # (similarly for combinations (0.1, 2), (0.01, 1), (0.01, 2)) | |
29 def produit_cartesien_jobs(val_dict): | |
30 job_list = [DD()] | |
31 all_keys = val_dict.keys() | |
32 | |
33 for key in all_keys: | |
34 possible_values = val_dict[key] | |
35 new_job_list = [] | |
36 for val in possible_values: | |
37 for job in job_list: | |
38 to_insert = job.copy() | |
39 to_insert.update({key: val}) | |
40 new_job_list.append(to_insert) | |
41 job_list = new_job_list | |
42 | |
43 return job_list | |
44 | |
45 def jobs_from_reinsert_list(cols, job_vals): | |
46 job_list = [] | |
47 for vals in job_vals: | |
48 job = DD() | |
49 for i, col in enumerate(cols): | |
50 job[col] = vals[i] | |
51 job_list.append(job) | |
52 | |
53 return job_list | |
54 | |
55 def save_params(all_params, filename): | |
56 import pickle | |
57 with open(filename, 'wb') as f: | |
58 values = [p.value for p in all_params] | |
59 | |
60 # -1 for HIGHEST_PROTOCOL | |
61 pickle.dump(values, f, -1) | |
62 | |
63 # Perform insertion into the Postgre DB based on combination | |
64 # of hyperparameter values above | |
65 # (see comment for produit_cartesien_jobs() to know how it works) | |
66 def jobman_insert_job_vals(job_db, experiment_path, job_vals): | |
67 jobs = produit_cartesien_jobs(job_vals) | |
68 | |
69 db = jobman.sql.db(job_db) | |
70 for job in jobs: | |
71 job.update({jobman.sql.EXPERIMENT: experiment_path}) | |
72 jobman.sql.insert_dict(job, db) | |
73 | |
74 def jobman_insert_specific_jobs(job_db, experiment_path, | |
75 insert_cols, insert_vals): | |
76 jobs = jobs_from_reinsert_list(insert_cols, insert_vals) | |
77 | |
78 db = jobman.sql.db(job_db) | |
79 for job in jobs: | |
80 job.update({jobman.sql.EXPERIMENT: experiment_path}) | |
81 jobman.sql.insert_dict(job, db) | |
82 | |
83 # Just a shortcut for a common case where we need a few | |
84 # related Error (float) series | |
85 def get_accumulator_series_array( \ | |
86 hdf5_file, group_name, series_names, | |
87 reduce_every, | |
88 index_names=('epoch','minibatch'), | |
89 stdout_too=True, | |
90 skip_hdf5_append=False): | |
91 all_series = [] | |
92 | |
93 new_group = hdf5_file.createGroup('/', group_name) | |
94 | |
95 other_targets = [] | |
96 if stdout_too: | |
97 other_targets = [StdoutAppendTarget()] | |
98 | |
99 for sn in series_names: | |
100 series_base = \ | |
101 ErrorSeries(error_name=sn, | |
102 table_name=sn, | |
103 hdf5_file=hdf5_file, | |
104 hdf5_group=new_group._v_pathname, | |
105 index_names=index_names, | |
106 other_targets=other_targets, | |
107 skip_hdf5_append=skip_hdf5_append) | |
108 | |
109 all_series.append( \ | |
110 AccumulatorSeriesWrapper( \ | |
111 base_series=series_base, | |
112 reduce_every=reduce_every)) | |
113 | |
114 ret_wrapper = SeriesArrayWrapper(all_series) | |
115 | |
116 return ret_wrapper | |
117 | |
118 |