Mercurial > ift6266
comparison scripts/stacked_dae/stacked_dae.py @ 131:5c79a2557f2f
Un peu de ménage dans code pour stacked DAE, splitté en fichiers dans un nouveau sous-répertoire.
author | savardf |
---|---|
date | Fri, 19 Feb 2010 08:43:10 -0500 |
parents | |
children | 7d8366fb90bf |
comparison
equal
deleted
inserted
replaced
130:38929c29b602 | 131:5c79a2557f2f |
---|---|
1 #!/usr/bin/python | |
2 # coding: utf-8 | |
3 | |
4 import numpy | |
5 import theano | |
6 import time | |
7 import theano.tensor as T | |
8 from theano.tensor.shared_randomstreams import RandomStreams | |
9 | |
10 class LogisticRegression(object): | |
11 def __init__(self, input, n_in, n_out): | |
12 # initialize with 0 the weights W as a matrix of shape (n_in, n_out) | |
13 self.W = theano.shared( value=numpy.zeros((n_in,n_out), | |
14 dtype = theano.config.floatX) ) | |
15 # initialize the baises b as a vector of n_out 0s | |
16 self.b = theano.shared( value=numpy.zeros((n_out,), | |
17 dtype = theano.config.floatX) ) | |
18 # compute vector of class-membership probabilities in symbolic form | |
19 self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b) | |
20 | |
21 # compute prediction as class whose probability is maximal in | |
22 # symbolic form | |
23 self.y_pred=T.argmax(self.p_y_given_x, axis=1) | |
24 | |
25 # list of parameters for this layer | |
26 self.params = [self.W, self.b] | |
27 | |
28 def negative_log_likelihood(self, y): | |
29 return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y]) | |
30 | |
31 def errors(self, y): | |
32 # check if y has same dimension of y_pred | |
33 if y.ndim != self.y_pred.ndim: | |
34 raise TypeError('y should have the same shape as self.y_pred', | |
35 ('y', target.type, 'y_pred', self.y_pred.type)) | |
36 | |
37 # check if y is of the correct datatype | |
38 if y.dtype.startswith('int'): | |
39 # the T.neq operator returns a vector of 0s and 1s, where 1 | |
40 # represents a mistake in prediction | |
41 return T.mean(T.neq(self.y_pred, y)) | |
42 else: | |
43 raise NotImplementedError() | |
44 | |
45 | |
46 class SigmoidalLayer(object): | |
47 def __init__(self, rng, input, n_in, n_out): | |
48 self.input = input | |
49 | |
50 W_values = numpy.asarray( rng.uniform( \ | |
51 low = -numpy.sqrt(6./(n_in+n_out)), \ | |
52 high = numpy.sqrt(6./(n_in+n_out)), \ | |
53 size = (n_in, n_out)), dtype = theano.config.floatX) | |
54 self.W = theano.shared(value = W_values) | |
55 | |
56 b_values = numpy.zeros((n_out,), dtype= theano.config.floatX) | |
57 self.b = theano.shared(value= b_values) | |
58 | |
59 self.output = T.nnet.sigmoid(T.dot(input, self.W) + self.b) | |
60 self.params = [self.W, self.b] | |
61 | |
62 | |
63 | |
64 class dA(object): | |
65 def __init__(self, n_visible= 784, n_hidden= 500, corruption_level = 0.1,\ | |
66 input = None, shared_W = None, shared_b = None): | |
67 self.n_visible = n_visible | |
68 self.n_hidden = n_hidden | |
69 | |
70 # create a Theano random generator that gives symbolic random values | |
71 theano_rng = RandomStreams() | |
72 | |
73 if shared_W != None and shared_b != None : | |
74 self.W = shared_W | |
75 self.b = shared_b | |
76 else: | |
77 # initial values for weights and biases | |
78 # note : W' was written as `W_prime` and b' as `b_prime` | |
79 | |
80 # W is initialized with `initial_W` which is uniformely sampled | |
81 # from -6./sqrt(n_visible+n_hidden) and 6./sqrt(n_hidden+n_visible) | |
82 # the output of uniform if converted using asarray to dtype | |
83 # theano.config.floatX so that the code is runable on GPU | |
84 initial_W = numpy.asarray( numpy.random.uniform( \ | |
85 low = -numpy.sqrt(6./(n_hidden+n_visible)), \ | |
86 high = numpy.sqrt(6./(n_hidden+n_visible)), \ | |
87 size = (n_visible, n_hidden)), dtype = theano.config.floatX) | |
88 initial_b = numpy.zeros(n_hidden, dtype = theano.config.floatX) | |
89 | |
90 | |
91 # theano shared variables for weights and biases | |
92 self.W = theano.shared(value = initial_W, name = "W") | |
93 self.b = theano.shared(value = initial_b, name = "b") | |
94 | |
95 | |
96 initial_b_prime= numpy.zeros(n_visible) | |
97 # tied weights, therefore W_prime is W transpose | |
98 self.W_prime = self.W.T | |
99 self.b_prime = theano.shared(value = initial_b_prime, name = "b'") | |
100 | |
101 # if no input is given, generate a variable representing the input | |
102 if input == None : | |
103 # we use a matrix because we expect a minibatch of several examples, | |
104 # each example being a row | |
105 self.x = T.dmatrix(name = 'input') | |
106 else: | |
107 self.x = input | |
108 # Equation (1) | |
109 # keep 90% of the inputs the same and zero-out randomly selected subset of 10% of the inputs | |
110 # note : first argument of theano.rng.binomial is the shape(size) of | |
111 # random numbers that it should produce | |
112 # second argument is the number of trials | |
113 # third argument is the probability of success of any trial | |
114 # | |
115 # this will produce an array of 0s and 1s where 1 has a | |
116 # probability of 1 - ``corruption_level`` and 0 with | |
117 # ``corruption_level`` | |
118 self.tilde_x = theano_rng.binomial( self.x.shape, 1, 1 - corruption_level) * self.x | |
119 # Equation (2) | |
120 # note : y is stored as an attribute of the class so that it can be | |
121 # used later when stacking dAs. | |
122 self.y = T.nnet.sigmoid(T.dot(self.tilde_x, self.W ) + self.b) | |
123 # Equation (3) | |
124 self.z = T.nnet.sigmoid(T.dot(self.y, self.W_prime) + self.b_prime) | |
125 # Equation (4) | |
126 # note : we sum over the size of a datapoint; if we are using minibatches, | |
127 # L will be a vector, with one entry per example in minibatch | |
128 self.L = - T.sum( self.x*T.log(self.z) + (1-self.x)*T.log(1-self.z), axis=1 ) | |
129 # note : L is now a vector, where each element is the cross-entropy cost | |
130 # of the reconstruction of the corresponding example of the | |
131 # minibatch. We need to compute the average of all these to get | |
132 # the cost of the minibatch | |
133 self.cost = T.mean(self.L) | |
134 | |
135 self.params = [ self.W, self.b, self.b_prime ] | |
136 | |
137 | |
138 | |
139 | |
140 class SdA(object): | |
141 def __init__(self, train_set_x, train_set_y, batch_size, n_ins, | |
142 hidden_layers_sizes, n_outs, | |
143 corruption_levels, rng, pretrain_lr, finetune_lr): | |
144 | |
145 self.layers = [] | |
146 self.pretrain_functions = [] | |
147 self.params = [] | |
148 self.n_layers = len(hidden_layers_sizes) | |
149 | |
150 if len(hidden_layers_sizes) < 1 : | |
151 raiseException (' You must have at least one hidden layer ') | |
152 | |
153 | |
154 # allocate symbolic variables for the data | |
155 index = T.lscalar() # index to a [mini]batch | |
156 self.x = T.matrix('x') # the data is presented as rasterized images | |
157 self.y = T.ivector('y') # the labels are presented as 1D vector of | |
158 # [int] labels | |
159 | |
160 for i in xrange( self.n_layers ): | |
161 # construct the sigmoidal layer | |
162 | |
163 # the size of the input is either the number of hidden units of | |
164 # the layer below or the input size if we are on the first layer | |
165 if i == 0 : | |
166 input_size = n_ins | |
167 else: | |
168 input_size = hidden_layers_sizes[i-1] | |
169 | |
170 # the input to this layer is either the activation of the hidden | |
171 # layer below or the input of the SdA if you are on the first | |
172 # layer | |
173 if i == 0 : | |
174 layer_input = self.x | |
175 else: | |
176 layer_input = self.layers[-1].output | |
177 | |
178 layer = SigmoidalLayer(rng, layer_input, input_size, | |
179 hidden_layers_sizes[i] ) | |
180 # add the layer to the | |
181 self.layers += [layer] | |
182 self.params += layer.params | |
183 | |
184 # Construct a denoising autoencoder that shared weights with this | |
185 # layer | |
186 dA_layer = dA(input_size, hidden_layers_sizes[i], \ | |
187 corruption_level = corruption_levels[0],\ | |
188 input = layer_input, \ | |
189 shared_W = layer.W, shared_b = layer.b) | |
190 | |
191 # Construct a function that trains this dA | |
192 # compute gradients of layer parameters | |
193 gparams = T.grad(dA_layer.cost, dA_layer.params) | |
194 # compute the list of updates | |
195 updates = {} | |
196 for param, gparam in zip(dA_layer.params, gparams): | |
197 updates[param] = param - gparam * pretrain_lr | |
198 | |
199 # create a function that trains the dA | |
200 update_fn = theano.function([index], dA_layer.cost, \ | |
201 updates = updates, | |
202 givens = { | |
203 self.x : train_set_x[index*batch_size:(index+1)*batch_size]}) | |
204 # collect this function into a list | |
205 self.pretrain_functions += [update_fn] | |
206 | |
207 | |
208 # We now need to add a logistic layer on top of the MLP | |
209 self.logLayer = LogisticRegression(\ | |
210 input = self.layers[-1].output,\ | |
211 n_in = hidden_layers_sizes[-1], n_out = n_outs) | |
212 | |
213 self.params += self.logLayer.params | |
214 # construct a function that implements one step of finetunining | |
215 | |
216 # compute the cost, defined as the negative log likelihood | |
217 cost = self.logLayer.negative_log_likelihood(self.y) | |
218 # compute the gradients with respect to the model parameters | |
219 gparams = T.grad(cost, self.params) | |
220 # compute list of updates | |
221 updates = {} | |
222 for param,gparam in zip(self.params, gparams): | |
223 updates[param] = param - gparam*finetune_lr | |
224 | |
225 self.finetune = theano.function([index], cost, | |
226 updates = updates, | |
227 givens = { | |
228 self.x : train_set_x[index*batch_size:(index+1)*batch_size], | |
229 self.y : train_set_y[index*batch_size:(index+1)*batch_size]} ) | |
230 | |
231 # symbolic variable that points to the number of errors made on the | |
232 # minibatch given by self.x and self.y | |
233 | |
234 self.errors = self.logLayer.errors(self.y) | |
235 | |
236 if __name__ == '__main__': | |
237 import sys | |
238 args = sys.argv[1:] | |
239 | |
240 if len(args) < 1: | |
241 print "Options: mnist, jobman_add, load_nist" | |
242 sys.exit(0) | |
243 | |
244 if args[0] == "jobman_add": | |
245 jobman_add() | |
246 elif args[0] == "mnist": | |
247 sgd_optimization_mnist(dataset=MNIST_LOCATION) | |
248 elif args[0] == "load_nist": | |
249 load_nist_test() | |
250 elif args[0] == "nist": | |
251 sgd_optimization_nist() | |
252 elif args[0] == "pc": | |
253 test_produit_croise_jobs() | |
254 | |
255 |