Mercurial > ift6266
comparison deep/stacked_dae/old/mnist_sda.py @ 265:c8fe09a65039
Déplacer le nouveau code de stacked_dae de v2 vers le répertoire de base 'stacked_dae', et bougé le vieux code vers le répertoire 'old'
author | fsavard |
---|---|
date | Fri, 19 Mar 2010 10:54:39 -0400 |
parents | deep/stacked_dae/mnist_sda.py@3632e6258642 |
children |
comparison
equal
deleted
inserted
replaced
243:3c54cb3713ef | 265:c8fe09a65039 |
---|---|
1 #!/usr/bin/python | |
2 # coding: utf-8 | |
3 | |
4 # TODO: This probably doesn't work anymore, adapt to new code in sgd_opt | |
5 # Parameterize call to sgd_optimization for MNIST | |
6 | |
7 import numpy | |
8 import theano | |
9 import time | |
10 import theano.tensor as T | |
11 from theano.tensor.shared_randomstreams import RandomStreams | |
12 | |
13 from sgd_optimization import SdaSgdOptimizer | |
14 import cPickle, gzip | |
15 from jobman import DD | |
16 | |
17 MNIST_LOCATION = '/u/savardf/datasets/mnist.pkl.gz' | |
18 | |
19 def sgd_optimization_mnist(learning_rate=0.1, pretraining_epochs = 2, \ | |
20 pretrain_lr = 0.1, training_epochs = 5, \ | |
21 dataset='mnist.pkl.gz'): | |
22 # Load the dataset | |
23 f = gzip.open(dataset,'rb') | |
24 # this gives us train, valid, test (each with .x, .y) | |
25 dataset = cPickle.load(f) | |
26 f.close() | |
27 | |
28 n_ins = 28*28 | |
29 n_outs = 10 | |
30 | |
31 hyperparameters = DD({'finetuning_lr':learning_rate, | |
32 'pretraining_lr':pretrain_lr, | |
33 'pretraining_epochs_per_layer':pretraining_epochs, | |
34 'max_finetuning_epochs':training_epochs, | |
35 'hidden_layers_sizes':[100], | |
36 'corruption_levels':[0.2], | |
37 'minibatch_size':20}) | |
38 | |
39 optimizer = SdaSgdOptimizer(dataset, hyperparameters, n_ins, n_outs) | |
40 optimizer.pretrain() | |
41 optimizer.finetune() | |
42 | |
43 if __name__ == '__main__': | |
44 sgd_optimization_mnist(dataset=MNIST_LOCATION) | |
45 |