Mercurial > ift6266
comparison scripts/stacked_dae/mnist_sda.py @ 131:5c79a2557f2f
Un peu de ménage dans code pour stacked DAE, splitté en fichiers dans un nouveau sous-répertoire.
author | savardf |
---|---|
date | Fri, 19 Feb 2010 08:43:10 -0500 |
parents | |
children | 7d8366fb90bf |
comparison
equal
deleted
inserted
replaced
130:38929c29b602 | 131:5c79a2557f2f |
---|---|
1 #!/usr/bin/python | |
2 # coding: utf-8 | |
3 | |
4 # Parameterize call to sgd_optimization for MNIST | |
5 | |
6 import numpy | |
7 import theano | |
8 import time | |
9 import theano.tensor as T | |
10 from theano.tensor.shared_randomstreams import RandomStreams | |
11 | |
12 from stacked_dae import sgd_optimization | |
13 import cPickle, gzip | |
14 from jobman import DD | |
15 | |
16 MNIST_LOCATION = '/u/savardf/datasets/mnist.pkl.gz' | |
17 | |
18 def sgd_optimization_mnist(learning_rate=0.1, pretraining_epochs = 2, \ | |
19 pretrain_lr = 0.1, training_epochs = 5, \ | |
20 dataset='mnist.pkl.gz'): | |
21 # Load the dataset | |
22 f = gzip.open(dataset,'rb') | |
23 # this gives us train, valid, test (each with .x, .y) | |
24 dataset = cPickle.load(f) | |
25 f.close() | |
26 | |
27 n_ins = 28*28 | |
28 n_outs = 10 | |
29 | |
30 hyperparameters = DD({'finetuning_lr':learning_rate, | |
31 'pretraining_lr':pretrain_lr, | |
32 'pretraining_epochs_per_layer':pretraining_epochs, | |
33 'max_finetuning_epochs':training_epochs, | |
34 'hidden_layers_sizes':[1000,1000,1000], | |
35 'corruption_levels':[0.2,0.2,0.2], | |
36 'minibatch_size':20}) | |
37 | |
38 sgd_optimization(dataset, hyperparameters, n_ins, n_outs) | |
39 | |
40 if __name__ == '__main__': | |
41 sgd_optimization_mnist() | |
42 |