Mercurial > ift6266
comparison conv_mlp/convolutional_mlp.py @ 146:33038ab4e799
Reseau a convolution
author | Jeremy Eustache <jeremy.eustache@voila.fr> |
---|---|
date | Wed, 24 Feb 2010 12:44:39 -0500 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
145:8ceaaf812891 | 146:33038ab4e799 |
---|---|
1 """ | |
2 This tutorial introduces the LeNet5 neural network architecture using Theano. LeNet5 is a | |
3 convolutional neural network, good for classifying images. This tutorial shows how to build the | |
4 architecture, and comes with all the hyper-parameters you need to reproduce the paper's MNIST | |
5 results. | |
6 | |
7 The best results are obtained after X iterations of the main program loop, which takes *** | |
8 minutes on my workstation (an Intel Core i7, circa July 2009), and *** minutes on my GPU (an | |
9 NVIDIA GTX 285 graphics processor). | |
10 | |
11 This implementation simplifies the model in the following ways: | |
12 | |
13 - LeNetConvPool doesn't implement location-specific gain and bias parameters | |
14 - LeNetConvPool doesn't implement pooling by average, it implements pooling by max. | |
15 - Digit classification is implemented with a logistic regression rather than an RBF network | |
16 - LeNet5 was not fully-connected convolutions at second layer | |
17 | |
18 References: | |
19 - Y. LeCun, L. Bottou, Y. Bengio and P. Haffner: Gradient-Based Learning Applied to Document | |
20 Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998. | |
21 http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf | |
22 """ | |
23 | |
24 import numpy, theano, cPickle, gzip, time | |
25 import theano.tensor as T | |
26 import theano.sandbox.softsign | |
27 import pylearn.datasets.MNIST | |
28 from pylearn.io import filetensor as ft | |
29 from theano.sandbox import conv, downsample | |
30 | |
31 class LeNetConvPoolLayer(object): | |
32 | |
33 def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2,2)): | |
34 """ | |
35 Allocate a LeNetConvPoolLayer with shared variable internal parameters. | |
36 :type rng: numpy.random.RandomState | |
37 :param rng: a random number generator used to initialize weights | |
38 :type input: theano.tensor.dtensor4 | |
39 :param input: symbolic image tensor, of shape image_shape | |
40 :type filter_shape: tuple or list of length 4 | |
41 :param filter_shape: (number of filters, num input feature maps, | |
42 filter height,filter width) | |
43 :type image_shape: tuple or list of length 4 | |
44 :param image_shape: (batch size, num input feature maps, | |
45 image height, image width) | |
46 :type poolsize: tuple or list of length 2 | |
47 :param poolsize: the downsampling (pooling) factor (#rows,#cols) | |
48 """ | |
49 assert image_shape[1]==filter_shape[1] | |
50 self.input = input | |
51 | |
52 # initialize weight values: the fan-in of each hidden neuron is | |
53 # restricted by the size of the receptive fields. | |
54 fan_in = numpy.prod(filter_shape[1:]) | |
55 W_values = numpy.asarray( rng.uniform( \ | |
56 low = -numpy.sqrt(3./fan_in), \ | |
57 high = numpy.sqrt(3./fan_in), \ | |
58 size = filter_shape), dtype = theano.config.floatX) | |
59 self.W = theano.shared(value = W_values) | |
60 | |
61 # the bias is a 1D tensor -- one bias per output feature map | |
62 b_values = numpy.zeros((filter_shape[0],), dtype= theano.config.floatX) | |
63 self.b = theano.shared(value= b_values) | |
64 | |
65 # convolve input feature maps with filters | |
66 conv_out = conv.conv2d(input, self.W, | |
67 filter_shape=filter_shape, image_shape=image_shape) | |
68 | |
69 # downsample each feature map individually, using maxpooling | |
70 pooled_out = downsample.max_pool2D(conv_out, poolsize, ignore_border=True) | |
71 | |
72 # add the bias term. Since the bias is a vector (1D array), we first | |
73 # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will thus | |
74 # be broadcasted across mini-batches and feature map width & height | |
75 self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) | |
76 | |
77 # store parameters of this layer | |
78 self.params = [self.W, self.b] | |
79 | |
80 | |
81 class SigmoidalLayer(object): | |
82 def __init__(self, rng, input, n_in, n_out): | |
83 """ | |
84 Typical hidden layer of a MLP: units are fully-connected and have | |
85 sigmoidal activation function. Weight matrix W is of shape (n_in,n_out) | |
86 and the bias vector b is of shape (n_out,). | |
87 | |
88 Hidden unit activation is given by: sigmoid(dot(input,W) + b) | |
89 | |
90 :type rng: numpy.random.RandomState | |
91 :param rng: a random number generator used to initialize weights | |
92 :type input: theano.tensor.dmatrix | |
93 :param input: a symbolic tensor of shape (n_examples, n_in) | |
94 :type n_in: int | |
95 :param n_in: dimensionality of input | |
96 :type n_out: int | |
97 :param n_out: number of hidden units | |
98 """ | |
99 self.input = input | |
100 | |
101 W_values = numpy.asarray( rng.uniform( \ | |
102 low = -numpy.sqrt(6./(n_in+n_out)), \ | |
103 high = numpy.sqrt(6./(n_in+n_out)), \ | |
104 size = (n_in, n_out)), dtype = theano.config.floatX) | |
105 self.W = theano.shared(value = W_values) | |
106 | |
107 b_values = numpy.zeros((n_out,), dtype= theano.config.floatX) | |
108 self.b = theano.shared(value= b_values) | |
109 | |
110 self.output = T.tanh(T.dot(input, self.W) + self.b) | |
111 self.params = [self.W, self.b] | |
112 | |
113 | |
114 class LogisticRegression(object): | |
115 """Multi-class Logistic Regression Class | |
116 | |
117 The logistic regression is fully described by a weight matrix :math:`W` | |
118 and bias vector :math:`b`. Classification is done by projecting data | |
119 points onto a set of hyperplanes, the distance to which is used to | |
120 determine a class membership probability. | |
121 """ | |
122 | |
123 def __init__(self, input, n_in, n_out): | |
124 """ Initialize the parameters of the logistic regression | |
125 :param input: symbolic variable that describes the input of the | |
126 architecture (one minibatch) | |
127 :type n_in: int | |
128 :param n_in: number of input units, the dimension of the space in | |
129 which the datapoints lie | |
130 :type n_out: int | |
131 :param n_out: number of output units, the dimension of the space in | |
132 which the labels lie | |
133 """ | |
134 | |
135 # initialize with 0 the weights W as a matrix of shape (n_in, n_out) | |
136 self.W = theano.shared( value=numpy.zeros((n_in,n_out), | |
137 dtype = theano.config.floatX) ) | |
138 # initialize the baises b as a vector of n_out 0s | |
139 self.b = theano.shared( value=numpy.zeros((n_out,), | |
140 dtype = theano.config.floatX) ) | |
141 # compute vector of class-membership probabilities in symbolic form | |
142 self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b) | |
143 | |
144 # compute prediction as class whose probability is maximal in | |
145 # symbolic form | |
146 self.y_pred=T.argmax(self.p_y_given_x, axis=1) | |
147 | |
148 # list of parameters for this layer | |
149 self.params = [self.W, self.b] | |
150 | |
151 def negative_log_likelihood(self, y): | |
152 """Return the mean of the negative log-likelihood of the prediction | |
153 of this model under a given target distribution. | |
154 :param y: corresponds to a vector that gives for each example the | |
155 correct label | |
156 Note: we use the mean instead of the sum so that | |
157 the learning rate is less dependent on the batch size | |
158 """ | |
159 return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y]) | |
160 | |
161 def errors(self, y): | |
162 """Return a float representing the number of errors in the minibatch | |
163 over the total number of examples of the minibatch ; zero one | |
164 loss over the size of the minibatch | |
165 """ | |
166 # check if y has same dimension of y_pred | |
167 if y.ndim != self.y_pred.ndim: | |
168 raise TypeError('y should have the same shape as self.y_pred', | |
169 ('y', target.type, 'y_pred', self.y_pred.type)) | |
170 | |
171 # check if y is of the correct datatype | |
172 if y.dtype.startswith('int'): | |
173 # the T.neq operator returns a vector of 0s and 1s, where 1 | |
174 # represents a mistake in prediction | |
175 return T.mean(T.neq(self.y_pred, y)) | |
176 else: | |
177 raise NotImplementedError() | |
178 | |
179 | |
180 def load_dataset(fname,batch=20): | |
181 | |
182 # repertoire qui contient les donnees NIST | |
183 # le repertoire suivant va fonctionner si vous etes connecte sur un ordinateur | |
184 # du reseau DIRO | |
185 datapath = '/data/lisa/data/nist/by_class/' | |
186 # le fichier .ft contient chiffres NIST dans un format efficace. Les chiffres | |
187 # sont stockes dans une matrice de NxD, ou N est le nombre d'images, est D est | |
188 # le nombre de pixels par image (32x32 = 1024). Chaque pixel de l'image est une | |
189 # valeur entre 0 et 255, correspondant a un niveau de gris. Les valeurs sont | |
190 # stockees comme des uint8, donc des bytes. | |
191 f = open(datapath+'digits/digits_train_data.ft') | |
192 # Verifier que vous avez assez de memoire pour loader les donnees au complet | |
193 # dans le memoire. Sinon, utilisez ft.arraylike, une classe construite | |
194 # specialement pour des fichiers qu'on ne souhaite pas loader dans RAM. | |
195 d = ft.read(f) | |
196 | |
197 # NB: N'oubliez pas de diviser les valeurs des pixels par 255. si jamais vous | |
198 # utilisez les donnees commes entrees dans un reseaux de neurones et que vous | |
199 # voulez des entres entre 0 et 1. | |
200 # digits_train_data.ft contient les images, digits_train_labels.ft contient les | |
201 # etiquettes | |
202 f = open(datapath+'digits/digits_train_labels.ft') | |
203 labels = ft.read(f) | |
204 | |
205 | |
206 # Load the dataset | |
207 #f = gzip.open(fname,'rb') | |
208 #train_set, valid_set, test_set = cPickle.load(f) | |
209 #f.close() | |
210 | |
211 # make minibatches of size 20 | |
212 batch_size = batch # sized of the minibatch | |
213 | |
214 # Dealing with the training set | |
215 # get the list of training images (x) and their labels (y) | |
216 (train_set_x, train_set_y) = (d[:4000,:],labels[:4000]) | |
217 # initialize the list of training minibatches with empty list | |
218 train_batches = [] | |
219 for i in xrange(0, len(train_set_x), batch_size): | |
220 # add to the list of minibatches the minibatch starting at | |
221 # position i, ending at position i+batch_size | |
222 # a minibatch is a pair ; the first element of the pair is a list | |
223 # of datapoints, the second element is the list of corresponding | |
224 # labels | |
225 train_batches = train_batches + \ | |
226 [(train_set_x[i:i+batch_size], train_set_y[i:i+batch_size])] | |
227 | |
228 #print train_batches[500] | |
229 | |
230 # Dealing with the validation set | |
231 (valid_set_x, valid_set_y) = (d[4000:5000,:],labels[4000:5000]) | |
232 # initialize the list of validation minibatches | |
233 valid_batches = [] | |
234 for i in xrange(0, len(valid_set_x), batch_size): | |
235 valid_batches = valid_batches + \ | |
236 [(valid_set_x[i:i+batch_size], valid_set_y[i:i+batch_size])] | |
237 | |
238 # Dealing with the testing set | |
239 (test_set_x, test_set_y) = (d[5000:6000,:],labels[5000:6000]) | |
240 # initialize the list of testing minibatches | |
241 test_batches = [] | |
242 for i in xrange(0, len(test_set_x), batch_size): | |
243 test_batches = test_batches + \ | |
244 [(test_set_x[i:i+batch_size], test_set_y[i:i+batch_size])] | |
245 | |
246 return train_batches, valid_batches, test_batches | |
247 | |
248 | |
249 def evaluate_lenet5(learning_rate=0.1, n_iter=1, batch_size=20, n_kern0=20,n_kern1=50,filter_shape=5,n_layer=3, dataset='mnist.pkl.gz'): | |
250 rng = numpy.random.RandomState(23455) | |
251 | |
252 print 'Before load dataset' | |
253 train_batches, valid_batches, test_batches = load_dataset(dataset,batch_size) | |
254 print 'After load dataset' | |
255 | |
256 ishape = (32,32) # this is the size of NIST images | |
257 n_kern2=80 | |
258 | |
259 # allocate symbolic variables for the data | |
260 x = T.matrix('x') # rasterized images | |
261 y = T.lvector() # the labels are presented as 1D vector of [long int] labels | |
262 | |
263 | |
264 ###################### | |
265 # BUILD ACTUAL MODEL # | |
266 ###################### | |
267 | |
268 # Reshape matrix of rasterized images of shape (batch_size,28*28) | |
269 # to a 4D tensor, compatible with our LeNetConvPoolLayer | |
270 layer0_input = x.reshape((batch_size,1,32,32)) | |
271 | |
272 # Construct the first convolutional pooling layer: | |
273 # filtering reduces the image size to (32-5+1,32-5+1)=(28,28) | |
274 # maxpooling reduces this further to (28/2,28/2) = (14,14) | |
275 # 4D output tensor is thus of shape (20,20,14,14) | |
276 layer0 = LeNetConvPoolLayer(rng, input=layer0_input, | |
277 image_shape=(batch_size,1,32,32), | |
278 filter_shape=(n_kern0,1,filter_shape,filter_shape), poolsize=(2,2)) | |
279 | |
280 if(n_layer>2): | |
281 | |
282 # Construct the second convolutional pooling layer | |
283 # filtering reduces the image size to (14-5+1,14-5+1)=(10,10) | |
284 # maxpooling reduces this further to (10/2,10/2) = (5,5) | |
285 # 4D output tensor is thus of shape (20,50,5,5) | |
286 fshape=(32-filter_shape+1)/2 | |
287 layer1 = LeNetConvPoolLayer(rng, input=layer0.output, | |
288 image_shape=(batch_size,n_kern0,fshape,fshape), | |
289 filter_shape=(n_kern1,n_kern0,filter_shape,filter_shape), poolsize=(2,2)) | |
290 | |
291 else: | |
292 | |
293 fshape=(32-filter_shape+1)/2 | |
294 layer1_input = layer0.output.flatten(2) | |
295 # construct a fully-connected sigmoidal layer | |
296 layer1 = SigmoidalLayer(rng, input=layer1_input,n_in=n_kern0*fshape*fshape, n_out=500) | |
297 | |
298 layer2 = LogisticRegression(input=layer1.output, n_in=500, n_out=10) | |
299 cost = layer2.negative_log_likelihood(y) | |
300 test_model = theano.function([x,y], layer2.errors(y)) | |
301 params = layer2.params+ layer1.params + layer0.params | |
302 | |
303 | |
304 if(n_layer>3): | |
305 | |
306 fshape=(32-filter_shape+1)/2 | |
307 fshape2=(fshape-filter_shape+1)/2 | |
308 fshape3=(fshape2-filter_shape+1)/2 | |
309 layer2 = LeNetConvPoolLayer(rng, input=layer1.output, | |
310 image_shape=(batch_size,n_kern1,fshape2,fshape2), | |
311 filter_shape=(n_kern2,n_kern1,filter_shape,filter_shape), poolsize=(2,2)) | |
312 | |
313 layer3_input = layer2.output.flatten(2) | |
314 | |
315 layer3 = SigmoidalLayer(rng, input=layer3_input, | |
316 n_in=n_kern2*fshape3*fshape3, n_out=500) | |
317 | |
318 | |
319 layer4 = LogisticRegression(input=layer3.output, n_in=500, n_out=10) | |
320 | |
321 cost = layer4.negative_log_likelihood(y) | |
322 | |
323 test_model = theano.function([x,y], layer4.errors(y)) | |
324 | |
325 params = layer4.params+ layer3.params+ layer2.params+ layer1.params + layer0.params | |
326 | |
327 | |
328 elif(n_layer>2): | |
329 | |
330 fshape=(32-filter_shape+1)/2 | |
331 fshape2=(fshape-filter_shape+1)/2 | |
332 | |
333 # the SigmoidalLayer being fully-connected, it operates on 2D matrices of | |
334 # shape (batch_size,num_pixels) (i.e matrix of rasterized images). | |
335 # This will generate a matrix of shape (20,32*4*4) = (20,512) | |
336 layer2_input = layer1.output.flatten(2) | |
337 | |
338 # construct a fully-connected sigmoidal layer | |
339 layer2 = SigmoidalLayer(rng, input=layer2_input, | |
340 n_in=n_kern1*fshape2*fshape2, n_out=500) | |
341 | |
342 | |
343 # classify the values of the fully-connected sigmoidal layer | |
344 layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10) | |
345 | |
346 # the cost we minimize during training is the NLL of the model | |
347 cost = layer3.negative_log_likelihood(y) | |
348 | |
349 # create a function to compute the mistakes that are made by the model | |
350 test_model = theano.function([x,y], layer3.errors(y)) | |
351 | |
352 # create a list of all model parameters to be fit by gradient descent | |
353 params = layer3.params+ layer2.params+ layer1.params + layer0.params | |
354 | |
355 | |
356 | |
357 | |
358 | |
359 # create a list of gradients for all model parameters | |
360 grads = T.grad(cost, params) | |
361 | |
362 # train_model is a function that updates the model parameters by SGD | |
363 # Since this model has many parameters, it would be tedious to manually | |
364 # create an update rule for each model parameter. We thus create the updates | |
365 # dictionary by automatically looping over all (params[i],grads[i]) pairs. | |
366 updates = {} | |
367 for param_i, grad_i in zip(params, grads): | |
368 updates[param_i] = param_i - learning_rate * grad_i | |
369 train_model = theano.function([x, y], cost, updates=updates) | |
370 | |
371 | |
372 ############### | |
373 # TRAIN MODEL # | |
374 ############### | |
375 | |
376 n_minibatches = len(train_batches) | |
377 | |
378 # early-stopping parameters | |
379 patience = 10000 # look as this many examples regardless | |
380 patience_increase = 2 # wait this much longer when a new best is | |
381 # found | |
382 improvement_threshold = 0.995 # a relative improvement of this much is | |
383 # considered significant | |
384 validation_frequency = n_minibatches # go through this many | |
385 # minibatche before checking the network | |
386 # on the validation set; in this case we | |
387 # check every epoch | |
388 | |
389 best_params = None | |
390 best_validation_loss = float('inf') | |
391 best_iter = 0 | |
392 test_score = 0. | |
393 start_time = time.clock() | |
394 | |
395 # have a maximum of `n_iter` iterations through the entire dataset | |
396 for iter in xrange(n_iter * n_minibatches): | |
397 | |
398 # get epoch and minibatch index | |
399 epoch = iter / n_minibatches | |
400 minibatch_index = iter % n_minibatches | |
401 | |
402 # get the minibatches corresponding to `iter` modulo | |
403 # `len(train_batches)` | |
404 x,y = train_batches[ minibatch_index ] | |
405 | |
406 if iter %100 == 0: | |
407 print 'training @ iter = ', iter | |
408 cost_ij = train_model(x,y) | |
409 | |
410 if (iter+1) % validation_frequency == 0: | |
411 | |
412 # compute zero-one loss on validation set | |
413 this_validation_loss = 0. | |
414 for x,y in valid_batches: | |
415 # sum up the errors for each minibatch | |
416 this_validation_loss += test_model(x,y) | |
417 | |
418 # get the average by dividing with the number of minibatches | |
419 this_validation_loss /= len(valid_batches) | |
420 print('epoch %i, minibatch %i/%i, validation error %f %%' % \ | |
421 (epoch, minibatch_index+1, n_minibatches, \ | |
422 this_validation_loss*100.)) | |
423 | |
424 | |
425 # if we got the best validation score until now | |
426 if this_validation_loss < best_validation_loss: | |
427 | |
428 #improve patience if loss improvement is good enough | |
429 if this_validation_loss < best_validation_loss * \ | |
430 improvement_threshold : | |
431 patience = max(patience, iter * patience_increase) | |
432 | |
433 # save best validation score and iteration number | |
434 best_validation_loss = this_validation_loss | |
435 best_iter = iter | |
436 | |
437 # test it on the test set | |
438 test_score = 0. | |
439 for x,y in test_batches: | |
440 test_score += test_model(x,y) | |
441 test_score /= len(test_batches) | |
442 print((' epoch %i, minibatch %i/%i, test error of best ' | |
443 'model %f %%') % | |
444 (epoch, minibatch_index+1, n_minibatches, | |
445 test_score*100.)) | |
446 | |
447 if patience <= iter : | |
448 break | |
449 | |
450 end_time = time.clock() | |
451 print('Optimization complete.') | |
452 print('Best validation score of %f %% obtained at iteration %i,'\ | |
453 'with test performance %f %%' % | |
454 (best_validation_loss * 100., best_iter, test_score*100.)) | |
455 print('The code ran for %f minutes' % ((end_time-start_time)/60.)) | |
456 | |
457 return (best_validation_loss * 100., test_score*100., (end_time-start_time)/60., best_iter) | |
458 | |
459 if __name__ == '__main__': | |
460 evaluate_lenet5() | |
461 | |
462 def experiment(state, channel): | |
463 print 'start experiment' | |
464 (best_validation_loss, test_score, minutes_trained, iter) = evaluate_lenet5(state.learning_rate, state.n_iter, state.batch_size, state.n_kern0, state.n_kern1, state.filter_shape, state.n_layer) | |
465 print 'end experiment' | |
466 | |
467 state.best_validation_loss = best_validation_loss | |
468 state.test_score = test_score | |
469 state.minutes_trained = minutes_trained | |
470 state.iter = iter | |
471 | |
472 return channel.COMPLETE |