Mercurial > ift6266
comparison deep/stacked_dae/config.py.example @ 265:c8fe09a65039
Déplacer le nouveau code de stacked_dae de v2 vers le répertoire de base 'stacked_dae', et bougé le vieux code vers le répertoire 'old'
author | fsavard |
---|---|
date | Fri, 19 Mar 2010 10:54:39 -0400 |
parents | deep/stacked_dae/v2/config.py.example@42005ec87747 |
children | 798d1344e6a2 |
comparison
equal
deleted
inserted
replaced
243:3c54cb3713ef | 265:c8fe09a65039 |
---|---|
1 # ---------------------------------------------------------------------------- | |
2 # BEGIN EXPERIMENT ISOLATION CODE | |
3 | |
4 ''' | |
5 This makes sure we use the codebase clone created for this experiment. | |
6 I.e. if you want to make modifications to the codebase but don't want your | |
7 running experiment code to be impacted by those changes, first copy the | |
8 codebase somewhere, and configure this section. It will make sure we import | |
9 from the right place. | |
10 | |
11 MUST BE DONE BEFORE IMPORTING ANYTHING ELSE | |
12 (Leave this comment there so others will understand what's going on) | |
13 ''' | |
14 | |
15 # Place where you copied modules that should be fixed for this experiment | |
16 codebase_clone_path = "/u/savardf/ift6266/experiment_clones/ift6266_experiment10" | |
17 | |
18 # Places where there might be conflicting modules from your $PYTHONPATH | |
19 remove_these_from_pythonpath = ["/u/savardf/ift6266/dev_code"] | |
20 | |
21 import sys | |
22 sys.path[0:0] = [codebase_clone_path] | |
23 | |
24 # remove paths we specifically don't want in $PYTHONPATH | |
25 for bad_path in remove_these_from_pythonpath: | |
26 sys.path[:] = [el for el in sys.path if not el (bad_path, bad_path+"/")] | |
27 | |
28 # Make the imports | |
29 import ift6266 | |
30 | |
31 # Just making sure we're importing from the right place | |
32 modules_to_check = [ift6266] | |
33 for module in modules_to_check: | |
34 assert codebase_clone_path in module.__path__ | |
35 | |
36 # Path to pass to jobman sqlschedule. IMPORTANT TO CHANGE TO REFLECT YOUR CLONE. | |
37 # Make sure this is accessible from the default $PYTHONPATH (in your .bashrc) | |
38 EXPERIMENT_PATH = "ift6266_experiment10.deep.stacked_dae.nist_sda.jobman_entrypoint" | |
39 | |
40 # END EXPERIMENT ISOLATION CODE | |
41 # ---------------------------------------------------------------------------- | |
42 | |
43 ''' | |
44 These are parameters used by nist_sda.py. They'll end up as globals in there. | |
45 | |
46 Rename this file to config.py and configure as needed. | |
47 DON'T add the renamed file to the repository, as others might use it | |
48 without realizing it, with dire consequences. | |
49 ''' | |
50 | |
51 # Set this to True when you want to run cluster tests, ie. you want | |
52 # to run on the cluster, many jobs, but want to reduce the training | |
53 # set size and the number of epochs, so you know everything runs | |
54 # fine on the cluster. | |
55 # Set this PRIOR to inserting your test jobs in the DB. | |
56 TEST_CONFIG = False | |
57 | |
58 NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all' | |
59 NIST_ALL_TRAIN_SIZE = 649081 | |
60 # valid et test =82587 82587 | |
61 | |
62 # change "sandbox" when you're ready | |
63 JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_sandbox_db/yourtablenamehere' | |
64 | |
65 # reduce training set to that many examples | |
66 REDUCE_TRAIN_TO = None | |
67 # that's a max, it usually doesn't get to that point | |
68 MAX_FINETUNING_EPOCHS = 1000 | |
69 # number of minibatches before taking means for valid error etc. | |
70 REDUCE_EVERY = 100 | |
71 | |
72 if TEST_CONFIG: | |
73 REDUCE_TRAIN_TO = 1000 | |
74 MAX_FINETUNING_EPOCHS = 2 | |
75 REDUCE_EVERY = 10 | |
76 | |
77 | |
78 # This is to configure insertion of jobs on the cluster. | |
79 # Possible values the hyperparameters can take. These are then | |
80 # combined with produit_cartesien_jobs so we get a list of all | |
81 # possible combinations, each one resulting in a job inserted | |
82 # in the jobman DB. | |
83 JOB_VALS = {'pretraining_lr': [0.1, 0.01],#, 0.001],#, 0.0001], | |
84 'pretraining_epochs_per_layer': [10,20], | |
85 'hidden_layers_sizes': [300,800], | |
86 'corruption_levels': [0.1,0.2,0.3], | |
87 'minibatch_size': [20], | |
88 'max_finetuning_epochs':[MAX_FINETUNING_EPOCHS], | |
89 'finetuning_lr':[0.1, 0.01], #0.001 was very bad, so we leave it out | |
90 'num_hidden_layers':[2,3]} | |
91 | |
92 # Just useful for tests... minimal number of epochs | |
93 # (This is used when running a single job, locally, when | |
94 # calling ./nist_sda.py test_jobman_entrypoint | |
95 DEFAULT_HP_NIST = DD({'finetuning_lr':0.1, | |
96 'pretraining_lr':0.1, | |
97 'pretraining_epochs_per_layer':2, | |
98 'max_finetuning_epochs':2, | |
99 'hidden_layers_sizes':800, | |
100 'corruption_levels':0.2, | |
101 'minibatch_size':20, | |
102 'reduce_train_to':10000, | |
103 'num_hidden_layers':1}) | |
104 | |
105 |