Mercurial > ift6266
comparison deep/stacked_dae/v_guillaume/config.py @ 443:89a49dae6cf3
merge
author | Xavier Glorot <glorotxa@iro.umontreal.ca> |
---|---|
date | Mon, 03 May 2010 18:38:58 -0400 |
parents | 0ca069550abd |
children |
comparison
equal
deleted
inserted
replaced
442:d5b2b6397a5a | 443:89a49dae6cf3 |
---|---|
1 # -*- coding: utf-8 -*- | |
2 ''' | |
3 These are parameters used by nist_sda_retrieve.py. They'll end up as globals in there. | |
4 | |
5 Rename this file to config.py and configure as needed. | |
6 DON'T add the renamed file to the repository, as others might use it | |
7 without realizing it, with dire consequences. | |
8 ''' | |
9 | |
10 # Set this to True when you want to run cluster tests, ie. you want | |
11 # to run on the cluster, many jobs, but want to reduce the training | |
12 # set size and the number of epochs, so you know everything runs | |
13 # fine on the cluster. | |
14 # Set this PRIOR to inserting your test jobs in the DB. | |
15 TEST_CONFIG = False | |
16 | |
17 NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all' | |
18 NIST_UPPER_LOCATION = '/data/lisa/data/nist/by_class/upper' | |
19 NIST_LOWER_LOCATION = '/data/lisa/data/nist/by_class/lower' | |
20 NIST_DIGITS_LOCATION = '/data/lisa/data/nist/by_class/digits' | |
21 | |
22 NIST_ALL_TRAIN_SIZE = 649081 | |
23 # valid et test =82587 82587 | |
24 NIST_UPPER_TRAIN_SIZE = 196422 | |
25 NIST_LOWER_TRAIN_SIZE = 166998 | |
26 NIST_DIGITS_TRAIN_SIZE = 285661 | |
27 | |
28 SUBDATASET_NIST = 'all' | |
29 | |
30 #Path of two pre-train done earlier | |
31 PATH_NIST = '/u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/NIST_big' | |
32 PATH_P07 = '/u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/P07_big/' | |
33 | |
34 # change "sandbox" when you're ready | |
35 JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_db/pannetis_SDA_retrieve' | |
36 EXPERIMENT_PATH = "ift6266.deep.stacked_dae.v_sylvain.nist_sda_retrieve.jobman_entrypoint" | |
37 | |
38 ##Pour lancer des travaux sur le cluster: (il faut etre ou se trouve les fichiers) | |
39 ##python nist_sda_retrieve.py jobman_insert | |
40 ##dbidispatch --condor --repeat_jobs=2 jobman sql 'postgres://ift6266h10@gershwin/ift6266h10_db/pannetis_finetuningSDA0' . #C'est le path dans config.py | |
41 | |
42 ##Pour lancer sur GPU sur boltzmann (changer device=gpuX pour X le bon assigne) | |
43 ##THEANO_FLAGS=floatX=float32,device=gpu2 python nist_sda_retrieve.py test_jobman_entrypoint | |
44 | |
45 | |
46 # reduce training set to that many examples | |
47 REDUCE_TRAIN_TO = None | |
48 # that's a max, it usually doesn't get to that point | |
49 MAX_FINETUNING_EPOCHS = 1000 | |
50 # number of minibatches before taking means for valid error etc. | |
51 REDUCE_EVERY = 100 | |
52 #Set the finetune dataset | |
53 FINETUNE_SET=0 | |
54 #Set the pretrain dataset used. 0: NIST, 1:P07 | |
55 PRETRAIN_CHOICE=0 | |
56 | |
57 | |
58 if TEST_CONFIG: | |
59 REDUCE_TRAIN_TO = 1000 | |
60 MAX_FINETUNING_EPOCHS = 2 | |
61 REDUCE_EVERY = 10 | |
62 | |
63 | |
64 # This is to configure insertion of jobs on the cluster. | |
65 # Possible values the hyperparameters can take. These are then | |
66 # combined with produit_cartesien_jobs so we get a list of all | |
67 # possible combinations, each one resulting in a job inserted | |
68 # in the jobman DB. | |
69 JOB_VALS = {'pretraining_lr': [0.1],#, 0.001],#, 0.0001], | |
70 'pretraining_epochs_per_layer': [10], | |
71 'hidden_layers_sizes': [800], | |
72 'corruption_levels': [0.2], | |
73 'minibatch_size': [100], | |
74 'max_finetuning_epochs':[MAX_FINETUNING_EPOCHS], | |
75 'max_finetuning_epochs_P07':[1], | |
76 'finetuning_lr':[0.01], #0.001 was very bad, so we leave it out | |
77 'num_hidden_layers':[4], | |
78 'finetune_set':[-1], | |
79 'pretrain_choice':[0,1] | |
80 } | |
81 | |
82 # Just useful for tests... minimal number of epochs | |
83 # (This is used when running a single job, locally, when | |
84 # calling ./nist_sda.py test_jobman_entrypoint | |
85 DEFAULT_HP_NIST = {'finetuning_lr':0.1, | |
86 'pretraining_lr':0.01, | |
87 'pretraining_epochs_per_layer':15, | |
88 'max_finetuning_epochs':MAX_FINETUNING_EPOCHS, | |
89 #'max_finetuning_epochs':1, | |
90 'max_finetuning_epochs_P07':7, | |
91 'hidden_layers_sizes':1000, | |
92 'corruption_levels':0.2, | |
93 'minibatch_size':100, | |
94 #'reduce_train_to':2000, | |
95 'decrease_lr':1, | |
96 'num_hidden_layers':3, | |
97 'finetune_set':0, | |
98 'pretrain_choice':0} | |
99 | |
100 |