Mercurial > ift6266
comparison deep/convolutional_dae/salah_exp/config.py @ 364:c05680f8c92f
Fixing a wrong commit and committing more files.
author | humel |
---|---|
date | Thu, 22 Apr 2010 19:50:21 -0400 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
358:31641a84e0ae | 364:c05680f8c92f |
---|---|
1 ''' | |
2 These are parameters used by nist_sda.py. They'll end up as globals in there. | |
3 | |
4 Rename this file to config.py and configure as needed. | |
5 DON'T add the renamed file to the repository, as others might use it | |
6 without realizing it, with dire consequences. | |
7 ''' | |
8 | |
9 # Set this to True when you want to run cluster tests, ie. you want | |
10 # to run on the cluster, many jobs, but want to reduce the training | |
11 # set size and the number of epochs, so you know everything runs | |
12 # fine on the cluster. | |
13 # Set this PRIOR to inserting your test jobs in the DB. | |
14 TEST_CONFIG = False | |
15 | |
16 NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all' | |
17 NIST_ALL_TRAIN_SIZE = 649081 | |
18 # valid et test =82587 82587 | |
19 | |
20 # change "sandbox" when you're ready | |
21 JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_db/rifaisal_csda' | |
22 EXPERIMENT_PATH = "ift6266.deep.convolutional_dae.salah_exp.nist_csda.jobman_entrypoint" | |
23 | |
24 ##Pour lancer des travaux sur le cluster: (il faut etre ou se trouve les fichiers) | |
25 ##python nist_sda.py jobman_insert | |
26 ##dbidispatch --condor --repeat_jobs=2 jobman sql 'postgres://ift6266h10@gershwin/ift6266h10_db/pannetis_finetuningSDA0' . #C'est le path dans config.py | |
27 | |
28 # reduce training set to that many examples | |
29 REDUCE_TRAIN_TO = None | |
30 # that's a max, it usually doesn't get to that point | |
31 MAX_FINETUNING_EPOCHS = 1000 | |
32 # number of minibatches before taking means for valid error etc. | |
33 REDUCE_EVERY = 100 | |
34 #Set the finetune dataset | |
35 FINETUNE_SET=1 | |
36 #Set the pretrain dataset used. 0: NIST, 1:P07 | |
37 PRETRAIN_CHOICE=1 | |
38 | |
39 | |
40 if TEST_CONFIG: | |
41 REDUCE_TRAIN_TO = 1000 | |
42 MAX_FINETUNING_EPOCHS = 2 | |
43 REDUCE_EVERY = 10 | |
44 | |
45 | |
46 # This is to configure insertion of jobs on the cluster. | |
47 # Possible values the hyperparameters can take. These are then | |
48 # combined with produit_cartesien_jobs so we get a list of all | |
49 # possible combinations, each one resulting in a job inserted | |
50 # in the jobman DB. | |
51 | |
52 | |
53 JOB_VALS = {'pretraining_lr': [0.01],#, 0.001],#, 0.0001], | |
54 'pretraining_epochs_per_layer': [10], | |
55 'kernels' : [[[52,5,5], [32,3,3]], [[52,7,7], [52,3,3]]], | |
56 'mlp_size' : [[1000],[500]], | |
57 'imgshp' : [[32,32]], | |
58 'max_pool_layers' : [[[2,2],[2,2]]], | |
59 'corruption_levels': [[0.2,0.1]], | |
60 'minibatch_size': [100], | |
61 'max_finetuning_epochs':[MAX_FINETUNING_EPOCHS], | |
62 'max_finetuning_epochs_P07':[1000], | |
63 'finetuning_lr':[0.1,0.01], #0.001 was very bad, so we leave it out | |
64 'num_hidden_layers':[2], | |
65 'finetune_set':[1], | |
66 'pretrain_choice':[1] | |
67 } | |
68 | |
69 DEFAULT_HP_NIST = {'pretraining_lr': 0.01, | |
70 'pretraining_epochs_per_layer': 1, | |
71 'kernels' : [[4,5,5], [2,3,3]], | |
72 'mlp_size' : [10], | |
73 'imgshp' : [32,32], | |
74 'max_pool_layers' : [[2,2],[2,2]], | |
75 'corruption_levels': [0.1,0.2], | |
76 'minibatch_size': 20, | |
77 'max_finetuning_epochs':MAX_FINETUNING_EPOCHS, | |
78 'max_finetuning_epochs_P07':1000, | |
79 'finetuning_lr':0.1, #0.001 was very bad, so we leave it out | |
80 'num_hidden_layers':2, | |
81 'finetune_set':1, | |
82 'pretrain_choice':1, | |
83 #'reduce_train_to':1000, | |
84 } | |
85 | |
86 | |
87 | |
88 ##[pannetis@ceylon test]$ python nist_sda.py test_jobman_entrypoint | |
89 ##WARNING: untracked file /u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/TMP_DBI/configobj.py | |
90 ##WARNING: untracked file /u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/TMP_DBI/utils.py | |
91 ##WARNING: untracked file /u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/config.py | |
92 ##WARNING: untracked file /u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/config2.py | |
93 ##Creating optimizer with state, DD{'reduce_train_to': 11000, 'pretraining_epochs_per_layer': 2, 'hidden_layers_sizes': 300, 'num_hidden_layers': 2, 'corruption_levels': 0.20000000000000001, 'finetuning_lr': 0.10000000000000001, 'pretrain_choice': 0, 'max_finetuning_epochs': 2, 'version_pylearn': '08b37147dec1', 'finetune_set': -1, 'pretraining_lr': 0.10000000000000001, 'version_ift6266': 'a6b6b1140de9', 'version_theano': 'fb6c3a06cb65', 'minibatch_size': 20} | |
94 ##SdaSgdOptimizer, max_minibatches = 11000 | |
95 ##C##n_outs 62 | |
96 ##pretrain_lr 0.1 | |
97 ##finetune_lr 0.1 | |
98 ##---- | |
99 ## | |
100 ##pretraining with NIST | |
101 ## | |
102 ##STARTING PRETRAINING, time = 2010-03-29 15:07:43.945981 | |
103 ##Pre-training layer 0, epoch 0, cost 113.562562494 | |
104 ##Pre-training layer 0, epoch 1, cost 113.410032944 | |
105 ##Pre-training layer 1, epoch 0, cost 98.4539954687 | |
106 ##Pre-training layer 1, epoch 1, cost 97.8658966686 | |
107 ##Pretraining took 9.011333 minutes | |
108 ## | |
109 ##SERIE OF 3 DIFFERENT FINETUNINGS | |
110 ## | |
111 ## | |
112 ##finetune with NIST | |
113 ## | |
114 ## | |
115 ##STARTING FINETUNING, time = 2010-03-29 15:16:46.512235 | |
116 ##epoch 1, minibatch 4999, validation error on P07 : 29.511250 % | |
117 ## epoch 1, minibatch 4999, test error on dataset NIST (train data) of best model 40.408509 % | |
118 ## epoch 1, minibatch 4999, test error on dataset P07 of best model 96.700000 % | |
119 ##epoch 1, minibatch 9999, validation error on P07 : 25.560000 % | |
120 ## epoch 1, minibatch 9999, test error on dataset NIST (train data) of best model 34.778969 % | |
121 ## epoch 1, minibatch 9999, test error on dataset P07 of best model 97.037500 % | |
122 ## | |
123 ##Optimization complete with best validation score of 25.560000 %,with test performance 34.778969 % on dataset NIST | |
124 ##The test score on the P07 dataset is 97.037500 | |
125 ##The finetuning ran for 3.281833 minutes | |
126 ## | |
127 ## | |
128 ##finetune with P07 | |
129 ## | |
130 ## | |
131 ##STARTING FINETUNING, time = 2010-03-29 15:20:06.346009 | |
132 ##epoch 1, minibatch 4999, validation error on NIST : 65.226250 % | |
133 ## epoch 1, minibatch 4999, test error on dataset P07 (train data) of best model 84.465000 % | |
134 ## epoch 1, minibatch 4999, test error on dataset NIST of best model 65.965237 % | |
135 ##epoch 1, minibatch 9999, validation error on NIST : 58.745000 % | |
136 ## epoch 1, minibatch 9999, test error on dataset P07 (train data) of best model 80.405000 % | |
137 ## epoch 1, minibatch 9999, test error on dataset NIST of best model 61.341923 % | |
138 ## | |
139 ##Optimization complete with best validation score of 58.745000 %,with test performance 80.405000 % on dataset P07 | |
140 ##The test score on the NIST dataset is 61.341923 | |
141 ##The finetuning ran for 3.299500 minutes | |
142 ## | |
143 ## | |
144 ##finetune with NIST (done earlier) followed by P07 (written here) | |
145 ## | |
146 ## | |
147 ##STARTING FINETUNING, time = 2010-03-29 15:23:27.947374 | |
148 ##epoch 1, minibatch 4999, validation error on NIST : 83.975000 % | |
149 ## epoch 1, minibatch 4999, test error on dataset P07 (train data) of best model 83.872500 % | |
150 ## epoch 1, minibatch 4999, test error on dataset NIST of best model 43.170010 % | |
151 ##epoch 1, minibatch 9999, validation error on NIST : 79.775000 % | |
152 ## epoch 1, minibatch 9999, test error on dataset P07 (train data) of best model 80.971250 % | |
153 ## epoch 1, minibatch 9999, test error on dataset NIST of best model 49.017468 % | |
154 ## | |
155 ##Optimization complete with best validation score of 79.775000 %,with test performance 80.971250 % on dataset P07 | |
156 ##The test score on the NIST dataset is 49.017468 | |
157 ##The finetuning ran for 2.851500 minutes | |
158 ## | |
159 ## | |
160 ##finetune with NIST only on the logistic regression on top. | |
161 ## All hidden units output are input of the logistic regression | |
162 ## | |
163 ## | |
164 ##STARTING FINETUNING, time = 2010-03-29 15:26:21.430557 | |
165 ##epoch 1, minibatch 4999, validation error on P07 : 95.223750 % | |
166 ## epoch 1, minibatch 4999, test error on dataset NIST (train data) of best model 93.268765 % | |
167 ## epoch 1, minibatch 4999, test error on dataset P07 of best model 96.535000 % | |
168 ##epoch 1, minibatch 9999, validation error on P07 : 95.223750 % | |
169 ## | |
170 ##Optimization complete with best validation score of 95.223750 %,with test performance 93.268765 % on dataset NIST | |
171 ##The test score on the P07 dataset is 96.535000 | |
172 ##The finetuning ran for 2.013167 minutes | |
173 ##Closing remaining open files: /u/pannetis/IFT6266/test/series.h5... done | |
174 ##[pannetis@ceylon test]$ | |
175 | |
176 | |
177 |