Mercurial > ift6266
comparison deep/stacked_dae/stacked_dae.py @ 167:1f5937e9e530
More moves - transformations into data_generation, added "deep" folder
author | Dumitru Erhan <dumitru.erhan@gmail.com> |
---|---|
date | Fri, 26 Feb 2010 14:15:38 -0500 |
parents | scripts/stacked_dae/stacked_dae.py@7d8366fb90bf |
children | b9ea8e2d071a |
comparison
equal
deleted
inserted
replaced
166:17ae5a1a4dd1 | 167:1f5937e9e530 |
---|---|
1 #!/usr/bin/python | |
2 # coding: utf-8 | |
3 | |
4 import numpy | |
5 import theano | |
6 import time | |
7 import theano.tensor as T | |
8 from theano.tensor.shared_randomstreams import RandomStreams | |
9 import copy | |
10 | |
11 from utils import update_locals | |
12 | |
13 class LogisticRegression(object): | |
14 def __init__(self, input, n_in, n_out): | |
15 # initialize with 0 the weights W as a matrix of shape (n_in, n_out) | |
16 self.W = theano.shared( value=numpy.zeros((n_in,n_out), | |
17 dtype = theano.config.floatX) ) | |
18 # initialize the baises b as a vector of n_out 0s | |
19 self.b = theano.shared( value=numpy.zeros((n_out,), | |
20 dtype = theano.config.floatX) ) | |
21 # compute vector of class-membership probabilities in symbolic form | |
22 self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b) | |
23 | |
24 # compute prediction as class whose probability is maximal in | |
25 # symbolic form | |
26 self.y_pred=T.argmax(self.p_y_given_x, axis=1) | |
27 | |
28 # list of parameters for this layer | |
29 self.params = [self.W, self.b] | |
30 | |
31 def negative_log_likelihood(self, y): | |
32 return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y]) | |
33 | |
34 def errors(self, y): | |
35 # check if y has same dimension of y_pred | |
36 if y.ndim != self.y_pred.ndim: | |
37 raise TypeError('y should have the same shape as self.y_pred', | |
38 ('y', target.type, 'y_pred', self.y_pred.type)) | |
39 | |
40 # check if y is of the correct datatype | |
41 if y.dtype.startswith('int'): | |
42 # the T.neq operator returns a vector of 0s and 1s, where 1 | |
43 # represents a mistake in prediction | |
44 return T.mean(T.neq(self.y_pred, y)) | |
45 else: | |
46 raise NotImplementedError() | |
47 | |
48 | |
49 class SigmoidalLayer(object): | |
50 def __init__(self, rng, input, n_in, n_out): | |
51 self.input = input | |
52 | |
53 W_values = numpy.asarray( rng.uniform( \ | |
54 low = -numpy.sqrt(6./(n_in+n_out)), \ | |
55 high = numpy.sqrt(6./(n_in+n_out)), \ | |
56 size = (n_in, n_out)), dtype = theano.config.floatX) | |
57 self.W = theano.shared(value = W_values) | |
58 | |
59 b_values = numpy.zeros((n_out,), dtype= theano.config.floatX) | |
60 self.b = theano.shared(value= b_values) | |
61 | |
62 self.output = T.nnet.sigmoid(T.dot(input, self.W) + self.b) | |
63 self.params = [self.W, self.b] | |
64 | |
65 | |
66 | |
67 class dA(object): | |
68 def __init__(self, n_visible= 784, n_hidden= 500, corruption_level = 0.1,\ | |
69 input = None, shared_W = None, shared_b = None): | |
70 self.n_visible = n_visible | |
71 self.n_hidden = n_hidden | |
72 | |
73 # create a Theano random generator that gives symbolic random values | |
74 theano_rng = RandomStreams() | |
75 | |
76 if shared_W != None and shared_b != None : | |
77 self.W = shared_W | |
78 self.b = shared_b | |
79 else: | |
80 # initial values for weights and biases | |
81 # note : W' was written as `W_prime` and b' as `b_prime` | |
82 | |
83 # W is initialized with `initial_W` which is uniformely sampled | |
84 # from -6./sqrt(n_visible+n_hidden) and 6./sqrt(n_hidden+n_visible) | |
85 # the output of uniform if converted using asarray to dtype | |
86 # theano.config.floatX so that the code is runable on GPU | |
87 initial_W = numpy.asarray( numpy.random.uniform( \ | |
88 low = -numpy.sqrt(6./(n_hidden+n_visible)), \ | |
89 high = numpy.sqrt(6./(n_hidden+n_visible)), \ | |
90 size = (n_visible, n_hidden)), dtype = theano.config.floatX) | |
91 initial_b = numpy.zeros(n_hidden, dtype = theano.config.floatX) | |
92 | |
93 | |
94 # theano shared variables for weights and biases | |
95 self.W = theano.shared(value = initial_W, name = "W") | |
96 self.b = theano.shared(value = initial_b, name = "b") | |
97 | |
98 | |
99 initial_b_prime= numpy.zeros(n_visible) | |
100 # tied weights, therefore W_prime is W transpose | |
101 self.W_prime = self.W.T | |
102 self.b_prime = theano.shared(value = initial_b_prime, name = "b'") | |
103 | |
104 # if no input is given, generate a variable representing the input | |
105 if input == None : | |
106 # we use a matrix because we expect a minibatch of several examples, | |
107 # each example being a row | |
108 self.x = T.dmatrix(name = 'input') | |
109 else: | |
110 self.x = input | |
111 # Equation (1) | |
112 # keep 90% of the inputs the same and zero-out randomly selected subset of 10% of the inputs | |
113 # note : first argument of theano.rng.binomial is the shape(size) of | |
114 # random numbers that it should produce | |
115 # second argument is the number of trials | |
116 # third argument is the probability of success of any trial | |
117 # | |
118 # this will produce an array of 0s and 1s where 1 has a | |
119 # probability of 1 - ``corruption_level`` and 0 with | |
120 # ``corruption_level`` | |
121 self.tilde_x = theano_rng.binomial( self.x.shape, 1, 1 - corruption_level) * self.x | |
122 # Equation (2) | |
123 # note : y is stored as an attribute of the class so that it can be | |
124 # used later when stacking dAs. | |
125 self.y = T.nnet.sigmoid(T.dot(self.tilde_x, self.W ) + self.b) | |
126 # Equation (3) | |
127 self.z = T.nnet.sigmoid(T.dot(self.y, self.W_prime) + self.b_prime) | |
128 # Equation (4) | |
129 # note : we sum over the size of a datapoint; if we are using minibatches, | |
130 # L will be a vector, with one entry per example in minibatch | |
131 self.L = - T.sum( self.x*T.log(self.z) + (1-self.x)*T.log(1-self.z), axis=1 ) | |
132 # note : L is now a vector, where each element is the cross-entropy cost | |
133 # of the reconstruction of the corresponding example of the | |
134 # minibatch. We need to compute the average of all these to get | |
135 # the cost of the minibatch | |
136 self.cost = T.mean(self.L) | |
137 | |
138 self.params = [ self.W, self.b, self.b_prime ] | |
139 | |
140 | |
141 | |
142 | |
143 class SdA(object): | |
144 def __init__(self, train_set_x, train_set_y, batch_size, n_ins, | |
145 hidden_layers_sizes, n_outs, | |
146 corruption_levels, rng, pretrain_lr, finetune_lr, input_divider=1.0): | |
147 update_locals(self, locals()) | |
148 | |
149 self.layers = [] | |
150 self.pretrain_functions = [] | |
151 self.params = [] | |
152 self.n_layers = len(hidden_layers_sizes) | |
153 | |
154 self.input_divider = numpy.asarray(input_divider, dtype=theano.config.floatX) | |
155 | |
156 if len(hidden_layers_sizes) < 1 : | |
157 raiseException (' You must have at least one hidden layer ') | |
158 | |
159 | |
160 # allocate symbolic variables for the data | |
161 index = T.lscalar() # index to a [mini]batch | |
162 self.x = T.matrix('x') # the data is presented as rasterized images | |
163 self.y = T.ivector('y') # the labels are presented as 1D vector of | |
164 # [int] labels | |
165 | |
166 for i in xrange( self.n_layers ): | |
167 # construct the sigmoidal layer | |
168 | |
169 # the size of the input is either the number of hidden units of | |
170 # the layer below or the input size if we are on the first layer | |
171 if i == 0 : | |
172 input_size = n_ins | |
173 else: | |
174 input_size = hidden_layers_sizes[i-1] | |
175 | |
176 # the input to this layer is either the activation of the hidden | |
177 # layer below or the input of the SdA if you are on the first | |
178 # layer | |
179 if i == 0 : | |
180 layer_input = self.x | |
181 else: | |
182 layer_input = self.layers[-1].output | |
183 | |
184 layer = SigmoidalLayer(rng, layer_input, input_size, | |
185 hidden_layers_sizes[i] ) | |
186 # add the layer to the | |
187 self.layers += [layer] | |
188 self.params += layer.params | |
189 | |
190 # Construct a denoising autoencoder that shared weights with this | |
191 # layer | |
192 dA_layer = dA(input_size, hidden_layers_sizes[i], \ | |
193 corruption_level = corruption_levels[0],\ | |
194 input = layer_input, \ | |
195 shared_W = layer.W, shared_b = layer.b) | |
196 | |
197 # Construct a function that trains this dA | |
198 # compute gradients of layer parameters | |
199 gparams = T.grad(dA_layer.cost, dA_layer.params) | |
200 # compute the list of updates | |
201 updates = {} | |
202 for param, gparam in zip(dA_layer.params, gparams): | |
203 updates[param] = param - gparam * pretrain_lr | |
204 | |
205 # create a function that trains the dA | |
206 update_fn = theano.function([index], dA_layer.cost, \ | |
207 updates = updates, | |
208 givens = { | |
209 self.x : train_set_x[index*batch_size:(index+1)*batch_size] / self.input_divider}) | |
210 # collect this function into a list | |
211 self.pretrain_functions += [update_fn] | |
212 | |
213 | |
214 # We now need to add a logistic layer on top of the MLP | |
215 self.logLayer = LogisticRegression(\ | |
216 input = self.layers[-1].output,\ | |
217 n_in = hidden_layers_sizes[-1], n_out = n_outs) | |
218 | |
219 self.params += self.logLayer.params | |
220 # construct a function that implements one step of finetunining | |
221 | |
222 # compute the cost, defined as the negative log likelihood | |
223 cost = self.logLayer.negative_log_likelihood(self.y) | |
224 # compute the gradients with respect to the model parameters | |
225 gparams = T.grad(cost, self.params) | |
226 # compute list of updates | |
227 updates = {} | |
228 for param,gparam in zip(self.params, gparams): | |
229 updates[param] = param - gparam*finetune_lr | |
230 | |
231 self.finetune = theano.function([index], cost, | |
232 updates = updates, | |
233 givens = { | |
234 self.x : train_set_x[index*batch_size:(index+1)*batch_size]/self.input_divider, | |
235 self.y : train_set_y[index*batch_size:(index+1)*batch_size]} ) | |
236 | |
237 # symbolic variable that points to the number of errors made on the | |
238 # minibatch given by self.x and self.y | |
239 | |
240 self.errors = self.logLayer.errors(self.y) | |
241 | |
242 @classmethod | |
243 def copy_reusing_lower_layers(cls, obj, num_hidden_layers, new_finetuning_lr=None): | |
244 assert(num_hidden_layers <= obj.n_layers) | |
245 | |
246 if not new_finetuning_lr: | |
247 new_finetuning_lr = obj.finetune_lr | |
248 | |
249 new_sda = cls(train_set_x= obj.train_set_x, \ | |
250 train_set_y = obj.train_set_y,\ | |
251 batch_size = obj.batch_size, \ | |
252 n_ins= obj.n_ins, \ | |
253 hidden_layers_sizes = obj.hidden_layers_sizes[:num_hidden_layers], \ | |
254 n_outs = obj.n_outs, \ | |
255 corruption_levels = obj.corruption_levels[:num_hidden_layers],\ | |
256 rng = obj.rng,\ | |
257 pretrain_lr = obj.pretrain_lr, \ | |
258 finetune_lr = new_finetuning_lr, \ | |
259 input_divider = obj.input_divider ) | |
260 | |
261 # new_sda.layers contains only the hidden layers actually | |
262 for i, layer in enumerate(new_sda.layers): | |
263 original_layer = obj.layers[i] | |
264 for p1,p2 in zip(layer.params, original_layer.params): | |
265 p1.value = p2.value.copy() | |
266 | |
267 return new_sda | |
268 | |
269 def get_params_copy(self): | |
270 return copy.deepcopy(self.params) | |
271 | |
272 def set_params_from_copy(self, copy): | |
273 # We don't want to replace the var, as the functions have pointers in there | |
274 # We only want to replace values. | |
275 for i, p in enumerate(self.params): | |
276 p.value = copy[i].value | |
277 | |
278 def get_params_means(self): | |
279 s = [] | |
280 for p in self.params: | |
281 s.append(numpy.mean(p.value)) | |
282 return s | |
283 | |
284 if __name__ == '__main__': | |
285 import sys | |
286 args = sys.argv[1:] | |
287 |