Mercurial > ift6266
comparison deep/crbm/mnist_config.py.example @ 359:969ad25e78cc
Fichier config.py.example supprimé je ne sais pas pourquoi ?! Enfin je le réajoute
author | fsavard |
---|---|
date | Thu, 22 Apr 2010 10:19:07 -0400 |
parents | |
children | f37c0705649d |
comparison
equal
deleted
inserted
replaced
358:31641a84e0ae | 359:969ad25e78cc |
---|---|
1 # ---------------------------------------------------------------------------- | |
2 # BEGIN EXPERIMENT ISOLATION CODE | |
3 | |
4 # Path to pass to jobman sqlschedule. IMPORTANT TO CHANGE TO REFLECT YOUR CLONE. | |
5 # Make sure this is accessible from the default $PYTHONPATH (in your .bashrc) | |
6 # (and make sure every subdirectory has its __init__.py file) | |
7 EXPERIMENT_PATH = "ift6266_mnistcrbm_exp1.ift6266.deep.crbm.mnist_crbm.jobman_entrypoint" | |
8 | |
9 def isolate_experiment(): | |
10 ''' | |
11 This makes sure we use the codebase clone created for this experiment. | |
12 I.e. if you want to make modifications to the codebase but don't want your | |
13 running experiment code to be impacted by those changes, first copy the | |
14 codebase somewhere, and configure this section. It will make sure we import | |
15 from the right place. | |
16 | |
17 MUST BE DONE BEFORE IMPORTING ANYTHING ELSE | |
18 (Leave this comment there so others will understand what's going on) | |
19 ''' | |
20 | |
21 # Place where you copied modules that should be frozen for this experiment | |
22 codebase_clone_path = "/u/savardf/ift6266/experiment_clones/ift6266_mnistcrbm_exp1" | |
23 | |
24 # Places where there might be conflicting modules from your $PYTHONPATH | |
25 remove_these_from_pythonpath = ["/u/savardf/ift6266/dev_code"] | |
26 | |
27 import sys | |
28 sys.path[0:0] = [codebase_clone_path] | |
29 | |
30 # remove paths we specifically don't want in $PYTHONPATH | |
31 for bad_path in remove_these_from_pythonpath: | |
32 sys.path[:] = [el for el in sys.path if not el in (bad_path, bad_path+"/")] | |
33 | |
34 # Make the imports | |
35 import ift6266 | |
36 | |
37 # Just making sure we're importing from the right place | |
38 modules_to_check = [ift6266] | |
39 for module in modules_to_check: | |
40 if not codebase_clone_path in module.__path__[0]: | |
41 raise RuntimeError("Module loaded from incorrect path "+module.__path__[0]) | |
42 | |
43 # END EXPERIMENT ISOLATION CODE | |
44 # ---------------------------------------------------------------------------- | |
45 | |
46 from jobman import DD | |
47 | |
48 ''' | |
49 These are parameters used by mnist_crbm.py. They'll end up as globals in there. | |
50 | |
51 Rename this file to config.py and configure as needed. | |
52 DON'T add the renamed file to the repository, as others might use it | |
53 without realizing it, with dire consequences. | |
54 ''' | |
55 | |
56 # change "sandbox" when you're ready | |
57 JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_sandbox_db/fsavard_mnistcrbm_exp1' | |
58 | |
59 # Set this to True when you want to run cluster tests, ie. you want | |
60 # to run on the cluster, many jobs, but want to reduce the training | |
61 # set size and the number of epochs, so you know everything runs | |
62 # fine on the cluster. | |
63 # Set this PRIOR to inserting your test jobs in the DB. | |
64 TEST_CONFIG = False | |
65 | |
66 # save params at training end | |
67 SAVE_PARAMS = True | |
68 | |
69 IMAGE_OUTPUT_DIR = 'img/' | |
70 | |
71 # number of minibatches before taking means for valid error etc. | |
72 REDUCE_EVERY = 100 | |
73 | |
74 # print series to stdout too (otherwise just produce the HDF5 file) | |
75 SERIES_STDOUT_TOO = False | |
76 | |
77 # every X minibatches, not every X examples | |
78 VISUALIZE_EVERY = 1000 | |
79 GIBBS_STEPS_IN_VIZ_CHAIN = 1000 | |
80 | |
81 if TEST_CONFIG: | |
82 REDUCE_EVERY = 10 | |
83 VISUALIZE_EVERY = 20 | |
84 | |
85 # This is to configure insertion of jobs on the cluster. | |
86 # Possible values the hyperparameters can take. These are then | |
87 # combined with produit_cartesien_jobs so we get a list of all | |
88 # possible combinations, each one resulting in a job inserted | |
89 # in the jobman DB. | |
90 JOB_VALS = {'learning_rate': [1.0, 0.1, 0.01], | |
91 'sparsity_lambda': [3.0,0.5], | |
92 'sparsity_p': [0.3,0.05], | |
93 'num_filters': [40,15], | |
94 'filter_size': [12,7], | |
95 'minibatch_size': [20], | |
96 'num_epochs': [20]} | |
97 | |
98 # Just useful for tests... minimal number of epochs | |
99 # Useful when launching a single local job | |
100 DEFAULT_STATE = DD({'learning_rate': 0.1, | |
101 'sparsity_lambda': 1.0, | |
102 'sparsity_p': 0.05, | |
103 'num_filters': 40, | |
104 'filter_size': 12, | |
105 'minibatch_size': 10, | |
106 'num_epochs': 20}) | |
107 | |
108 # To reinsert duplicate of jobs that crashed | |
109 REINSERT_COLS = ['learning_rate','sparsity_lambda','sparsity_p','num_filters','filter_size','minibatch_size','dupe'] | |
110 #REINSERT_JOB_VALS = [\ | |
111 # [,2],] | |
112 |