comparison scripts/stacked_dae/mnist_sda.py @ 139:7d8366fb90bf

Ajouté des __init__.py dans l'arborescence pour que les scripts puissent être utilisés avec des paths pour jobman, et fait pas mal de modifs dans stacked_dae pour pouvoir réutiliser le travail fait pour des tests où le pretraining est le même.
author fsavard
date Mon, 22 Feb 2010 13:38:25 -0500
parents 5c79a2557f2f
children
comparison
equal deleted inserted replaced
138:128507ac4edf 139:7d8366fb90bf
7 import theano 7 import theano
8 import time 8 import time
9 import theano.tensor as T 9 import theano.tensor as T
10 from theano.tensor.shared_randomstreams import RandomStreams 10 from theano.tensor.shared_randomstreams import RandomStreams
11 11
12 from stacked_dae import sgd_optimization 12 from sgd_optimization import SdaSgdOptimizer
13 import cPickle, gzip 13 import cPickle, gzip
14 from jobman import DD 14 from jobman import DD
15 15
16 MNIST_LOCATION = '/u/savardf/datasets/mnist.pkl.gz' 16 MNIST_LOCATION = '/u/savardf/datasets/mnist.pkl.gz'
17 17
29 29
30 hyperparameters = DD({'finetuning_lr':learning_rate, 30 hyperparameters = DD({'finetuning_lr':learning_rate,
31 'pretraining_lr':pretrain_lr, 31 'pretraining_lr':pretrain_lr,
32 'pretraining_epochs_per_layer':pretraining_epochs, 32 'pretraining_epochs_per_layer':pretraining_epochs,
33 'max_finetuning_epochs':training_epochs, 33 'max_finetuning_epochs':training_epochs,
34 'hidden_layers_sizes':[1000,1000,1000], 34 'hidden_layers_sizes':[100],
35 'corruption_levels':[0.2,0.2,0.2], 35 'corruption_levels':[0.2],
36 'minibatch_size':20}) 36 'minibatch_size':20})
37 37
38 sgd_optimization(dataset, hyperparameters, n_ins, n_outs) 38 optimizer = SdaSgdOptimizer(dataset, hyperparameters, n_ins, n_outs)
39 optimizer.pretrain()
40 optimizer.finetune()
39 41
40 if __name__ == '__main__': 42 if __name__ == '__main__':
41 sgd_optimization_mnist() 43 sgd_optimization_mnist(dataset=MNIST_LOCATION)
42 44