Mercurial > ift6266
diff deep/stacked_dae/v_sylvain/nist_sda.py @ 234:c452e3a0a3b1
Changement de la base de donnees qui sera utilisee
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Sun, 14 Mar 2010 15:17:04 -0400 |
parents | 8a94a5c808cd |
children | ecb69e17950b |
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/nist_sda.py Sun Mar 14 15:07:17 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/nist_sda.py Sun Mar 14 15:17:04 2010 -0400 @@ -37,8 +37,8 @@ TEST_CONFIG = False #NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all' -JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_sandbox_db/fsavard_sda_v2' -EXPERIMENT_PATH = "ift6266.deep.stacked_dae.v2.nist_sda.jobman_entrypoint" +JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_sandbox_db/sylvainpl_sda_vsylvain' +EXPERIMENT_PATH = "ift6266.deep.stacked_dae.v_sylvain.nist_sda.jobman_entrypoint" REDUCE_TRAIN_TO = None MAX_FINETUNING_EPOCHS = 1000 @@ -54,24 +54,24 @@ # combined with produit_cartesien_jobs so we get a list of all # possible combinations, each one resulting in a job inserted # in the jobman DB. -JOB_VALS = {'pretraining_lr': [0.1, 0.01],#, 0.001],#, 0.0001], - 'pretraining_epochs_per_layer': [10,20], - 'hidden_layers_sizes': [300,800], - 'corruption_levels': [0.1,0.2,0.3], +JOB_VALS = {'pretraining_lr': [0.1],#, 0.01],#, 0.001],#, 0.0001], + 'pretraining_epochs_per_layer': [10], + 'hidden_layers_sizes': [500], + 'corruption_levels': [0.1], 'minibatch_size': [20], 'max_finetuning_epochs':[MAX_FINETUNING_EPOCHS], - 'finetuning_lr':[0.1, 0.01], #0.001 was very bad, so we leave it out - 'num_hidden_layers':[2,3]} + 'finetuning_lr':[0.1], #0.001 was very bad, so we leave it out + 'num_hidden_layers':[1,1]} # Just useful for tests... minimal number of epochs DEFAULT_HP_NIST = DD({'finetuning_lr':0.1, 'pretraining_lr':0.1, 'pretraining_epochs_per_layer':2, 'max_finetuning_epochs':2, - 'hidden_layers_sizes':800, + 'hidden_layers_sizes':500, 'corruption_levels':0.2, 'minibatch_size':20, - 'reduce_train_to':10000, + #'reduce_train_to':10000, 'num_hidden_layers':1}) '''