comparison deep/stacked_dae/v_guillaume/config2.py @ 436:0ca069550abd

Added : single class version of SDA
author Guillaume Sicard <guitch21@gmail.com>
date Mon, 03 May 2010 06:14:05 -0400
parents
children
comparison
equal deleted inserted replaced
435:d8129a09ffb1 436:0ca069550abd
1 '''
2 These are parameters used by nist_sda_retrieve.py. They'll end up as globals in there.
3
4 Rename this file to config.py and configure as needed.
5 DON'T add the renamed file to the repository, as others might use it
6 without realizing it, with dire consequences.
7 '''
8
9 # Set this to True when you want to run cluster tests, ie. you want
10 # to run on the cluster, many jobs, but want to reduce the training
11 # set size and the number of epochs, so you know everything runs
12 # fine on the cluster.
13 # Set this PRIOR to inserting your test jobs in the DB.
14 TEST_CONFIG = False
15
16 NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all'
17 NIST_ALL_TRAIN_SIZE = 649081
18 # valid et test =82587 82587
19
20 #Path of two pre-train done earlier
21 PATH_NIST = '/u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/NIST_big'
22 PATH_P07 = '/u/pannetis/IFT6266/ift6266/deep/stacked_dae/v_sylvain/P07_demo/'
23
24 # change "sandbox" when you're ready
25 JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_db/pannetis_SDA_retrieve'
26 EXPERIMENT_PATH = "ift6266.deep.stacked_dae.v_sylvain.nist_sda_retrieve.jobman_entrypoint"
27
28 ##Pour lancer des travaux sur le cluster: (il faut etre ou se trouve les fichiers)
29 ##python nist_sda_retrieve.py jobman_insert
30 ##dbidispatch --condor --repeat_jobs=2 jobman sql 'postgres://ift6266h10@gershwin/ift6266h10_db/pannetis_finetuningSDA0' . #C'est le path dans config.py
31
32 ##Pour lancer sur GPU sur boltzmann (changer device=gpuX pour X le bon assigne)
33 ##THEANO_FLAGS=floatX=float32,device=gpu2 python nist_sda_retrieve.py test_jobman_entrypoint
34
35
36 # reduce training set to that many examples
37 REDUCE_TRAIN_TO = None
38 # that's a max, it usually doesn't get to that point
39 MAX_FINETUNING_EPOCHS = 1000
40 # number of minibatches before taking means for valid error etc.
41 REDUCE_EVERY = 100
42 #Set the finetune dataset
43 FINETUNE_SET=0
44 #Set the pretrain dataset used. 0: NIST, 1:P07
45 PRETRAIN_CHOICE=0
46
47
48 if TEST_CONFIG:
49 REDUCE_TRAIN_TO = 1000
50 MAX_FINETUNING_EPOCHS = 2
51 REDUCE_EVERY = 10
52
53
54 # This is to configure insertion of jobs on the cluster.
55 # Possible values the hyperparameters can take. These are then
56 # combined with produit_cartesien_jobs so we get a list of all
57 # possible combinations, each one resulting in a job inserted
58 # in the jobman DB.
59 JOB_VALS = {'pretraining_lr': [0.1],#, 0.001],#, 0.0001],
60 'pretraining_epochs_per_layer': [10],
61 'hidden_layers_sizes': [800],
62 'corruption_levels': [0.2],
63 'minibatch_size': [100],
64 'max_finetuning_epochs':[MAX_FINETUNING_EPOCHS],
65 'max_finetuning_epochs_P07':[1],
66 'finetuning_lr':[0.01], #0.001 was very bad, so we leave it out
67 'num_hidden_layers':[4],
68 'finetune_set':[-1],
69 'pretrain_choice':[0,1]
70 }
71
72 # Just useful for tests... minimal number of epochs
73 # (This is used when running a single job, locally, when
74 # calling ./nist_sda.py test_jobman_entrypoint
75 DEFAULT_HP_NIST = {'finetuning_lr':0.1,
76 'pretraining_lr':0.01,
77 'pretraining_epochs_per_layer':15,
78 'max_finetuning_epochs':MAX_FINETUNING_EPOCHS,
79 #'max_finetuning_epochs':1,
80 'max_finetuning_epochs_P07':7,
81 'hidden_layers_sizes':1000,
82 'corruption_levels':0.2,
83 'minibatch_size':100,
84 #'reduce_train_to':2000,
85 'decrease_lr':1,
86 'num_hidden_layers':3,
87 'finetune_set':2,
88 'pretrain_choice':1}
89
90