Mercurial > ift6266
comparison code_tutoriel/convolutional_mlp.py @ 165:4bc5eeec6394
Updating the tutorial code to the latest revisions.
author | Dumitru Erhan <dumitru.erhan@gmail.com> |
---|---|
date | Fri, 26 Feb 2010 13:55:27 -0500 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
164:e3de934a98b6 | 165:4bc5eeec6394 |
---|---|
1 """ | |
2 This tutorial introduces the LeNet5 neural network architecture using Theano. LeNet5 is a | |
3 convolutional neural network, good for classifying images. This tutorial shows how to build the | |
4 architecture, and comes with all the hyper-parameters you need to reproduce the paper's MNIST | |
5 results. | |
6 | |
7 | |
8 This implementation simplifies the model in the following ways: | |
9 | |
10 - LeNetConvPool doesn't implement location-specific gain and bias parameters | |
11 - LeNetConvPool doesn't implement pooling by average, it implements pooling by max. | |
12 - Digit classification is implemented with a logistic regression rather than an RBF network | |
13 - LeNet5 was not fully-connected convolutions at second layer | |
14 | |
15 References: | |
16 - Y. LeCun, L. Bottou, Y. Bengio and P. Haffner: Gradient-Based Learning Applied to Document | |
17 Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998. | |
18 http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf | |
19 """ | |
20 | |
21 import numpy, time, cPickle, gzip | |
22 | |
23 import theano | |
24 import theano.tensor as T | |
25 from theano.tensor.signal import downsample | |
26 from theano.tensor.nnet import conv | |
27 | |
28 from logistic_sgd import LogisticRegression, load_data | |
29 from mlp import HiddenLayer | |
30 | |
31 | |
32 class LeNetConvPoolLayer(object): | |
33 """Pool Layer of a convolutional network """ | |
34 | |
35 def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2,2)): | |
36 """ | |
37 Allocate a LeNetConvPoolLayer with shared variable internal parameters. | |
38 | |
39 :type rng: numpy.random.RandomState | |
40 :param rng: a random number generator used to initialize weights | |
41 | |
42 :type input: theano.tensor.dtensor4 | |
43 :param input: symbolic image tensor, of shape image_shape | |
44 | |
45 :type filter_shape: tuple or list of length 4 | |
46 :param filter_shape: (number of filters, num input feature maps, | |
47 filter height,filter width) | |
48 | |
49 :type image_shape: tuple or list of length 4 | |
50 :param image_shape: (batch size, num input feature maps, | |
51 image height, image width) | |
52 | |
53 :type poolsize: tuple or list of length 2 | |
54 :param poolsize: the downsampling (pooling) factor (#rows,#cols) | |
55 """ | |
56 | |
57 assert image_shape[1]==filter_shape[1] | |
58 self.input = input | |
59 | |
60 # initialize weights to temporary values until we know the shape of the output feature | |
61 # maps | |
62 W_values = numpy.zeros(filter_shape, dtype=theano.config.floatX) | |
63 self.W = theano.shared(value = W_values) | |
64 | |
65 # the bias is a 1D tensor -- one bias per output feature map | |
66 b_values = numpy.zeros((filter_shape[0],), dtype= theano.config.floatX) | |
67 self.b = theano.shared(value= b_values) | |
68 | |
69 # convolve input feature maps with filters | |
70 conv_out = conv.conv2d(input = input, filters = self.W, | |
71 filter_shape=filter_shape, image_shape=image_shape) | |
72 | |
73 # there are "num input feature maps * filter height * filter width" inputs | |
74 # to each hidden unit | |
75 fan_in = numpy.prod(filter_shape[1:]) | |
76 # each unit in the lower layer receives a gradient from: | |
77 # "num output feature maps * filter height * filter width" / pooling size | |
78 fan_out = filter_shape[0] * numpy.prod(filter_shape[2:]) / numpy.prod(poolsize) | |
79 # replace weight values with random weights | |
80 W_bound = numpy.sqrt(6./(fan_in + fan_out)) | |
81 self.W.value = numpy.asarray( | |
82 rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), | |
83 dtype = theano.config.floatX) | |
84 | |
85 # downsample each feature map individually, using maxpooling | |
86 pooled_out = downsample.max_pool2D( input = conv_out, | |
87 ds = poolsize, ignore_border=True) | |
88 | |
89 # add the bias term. Since the bias is a vector (1D array), we first | |
90 # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will thus | |
91 # be broadcasted across mini-batches and feature map width & height | |
92 self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) | |
93 | |
94 # store parameters of this layer | |
95 self.params = [self.W, self.b] | |
96 | |
97 | |
98 | |
99 def evaluate_lenet5(learning_rate=0.1, n_epochs=200, dataset='mnist.pkl.gz', nkerns=[20,50]): | |
100 """ Demonstrates lenet on MNIST dataset | |
101 | |
102 :type learning_rate: float | |
103 :param learning_rate: learning rate used (factor for the stochastic | |
104 gradient) | |
105 | |
106 :type n_epochs: int | |
107 :param n_epochs: maximal number of epochs to run the optimizer | |
108 | |
109 :type dataset: string | |
110 :param dataset: path to the dataset used for training /testing (MNIST here) | |
111 | |
112 :type nkerns: list of ints | |
113 :param nkerns: number of kernels on each layer | |
114 """ | |
115 | |
116 rng = numpy.random.RandomState(23455) | |
117 | |
118 datasets = load_data(dataset) | |
119 | |
120 train_set_x, train_set_y = datasets[0] | |
121 valid_set_x, valid_set_y = datasets[1] | |
122 test_set_x , test_set_y = datasets[2] | |
123 | |
124 | |
125 batch_size = 500 # size of the minibatch | |
126 | |
127 # compute number of minibatches for training, validation and testing | |
128 n_train_batches = train_set_x.value.shape[0] / batch_size | |
129 n_valid_batches = valid_set_x.value.shape[0] / batch_size | |
130 n_test_batches = test_set_x.value.shape[0] / batch_size | |
131 | |
132 # allocate symbolic variables for the data | |
133 index = T.lscalar() # index to a [mini]batch | |
134 x = T.matrix('x') # the data is presented as rasterized images | |
135 y = T.ivector('y') # the labels are presented as 1D vector of | |
136 # [int] labels | |
137 | |
138 | |
139 ishape = (28,28) # this is the size of MNIST images | |
140 | |
141 ###################### | |
142 # BUILD ACTUAL MODEL # | |
143 ###################### | |
144 print '... building the model' | |
145 | |
146 # Reshape matrix of rasterized images of shape (batch_size,28*28) | |
147 # to a 4D tensor, compatible with our LeNetConvPoolLayer | |
148 layer0_input = x.reshape((batch_size,1,28,28)) | |
149 | |
150 # Construct the first convolutional pooling layer: | |
151 # filtering reduces the image size to (28-5+1,28-5+1)=(24,24) | |
152 # maxpooling reduces this further to (24/2,24/2) = (12,12) | |
153 # 4D output tensor is thus of shape (batch_size,nkerns[0],12,12) | |
154 layer0 = LeNetConvPoolLayer(rng, input=layer0_input, | |
155 image_shape=(batch_size,1,28,28), | |
156 filter_shape=(nkerns[0],1,5,5), poolsize=(2,2)) | |
157 | |
158 # Construct the second convolutional pooling layer | |
159 # filtering reduces the image size to (12-5+1,12-5+1)=(8,8) | |
160 # maxpooling reduces this further to (8/2,8/2) = (4,4) | |
161 # 4D output tensor is thus of shape (nkerns[0],nkerns[1],4,4) | |
162 layer1 = LeNetConvPoolLayer(rng, input=layer0.output, | |
163 image_shape=(batch_size,nkerns[0],12,12), | |
164 filter_shape=(nkerns[1],nkerns[0],5,5), poolsize=(2,2)) | |
165 | |
166 # the TanhLayer being fully-connected, it operates on 2D matrices of | |
167 # shape (batch_size,num_pixels) (i.e matrix of rasterized images). | |
168 # This will generate a matrix of shape (20,32*4*4) = (20,512) | |
169 layer2_input = layer1.output.flatten(2) | |
170 | |
171 # construct a fully-connected sigmoidal layer | |
172 layer2 = HiddenLayer(rng, input=layer2_input, n_in=nkerns[1]*4*4, | |
173 n_out=500, activation = T.tanh) | |
174 | |
175 # classify the values of the fully-connected sigmoidal layer | |
176 layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10) | |
177 | |
178 # the cost we minimize during training is the NLL of the model | |
179 cost = layer3.negative_log_likelihood(y) | |
180 | |
181 # create a function to compute the mistakes that are made by the model | |
182 test_model = theano.function([index], layer3.errors(y), | |
183 givens = { | |
184 x: test_set_x[index*batch_size:(index+1)*batch_size], | |
185 y: test_set_y[index*batch_size:(index+1)*batch_size]}) | |
186 | |
187 validate_model = theano.function([index], layer3.errors(y), | |
188 givens = { | |
189 x: valid_set_x[index*batch_size:(index+1)*batch_size], | |
190 y: valid_set_y[index*batch_size:(index+1)*batch_size]}) | |
191 | |
192 # create a list of all model parameters to be fit by gradient descent | |
193 params = layer3.params+ layer2.params+ layer1.params + layer0.params | |
194 | |
195 # create a list of gradients for all model parameters | |
196 grads = T.grad(cost, params) | |
197 | |
198 # train_model is a function that updates the model parameters by SGD | |
199 # Since this model has many parameters, it would be tedious to manually | |
200 # create an update rule for each model parameter. We thus create the updates | |
201 # dictionary by automatically looping over all (params[i],grads[i]) pairs. | |
202 updates = {} | |
203 for param_i, grad_i in zip(params, grads): | |
204 updates[param_i] = param_i - learning_rate * grad_i | |
205 | |
206 train_model = theano.function([index], cost, updates=updates, | |
207 givens = { | |
208 x: train_set_x[index*batch_size:(index+1)*batch_size], | |
209 y: train_set_y[index*batch_size:(index+1)*batch_size]}) | |
210 | |
211 | |
212 ############### | |
213 # TRAIN MODEL # | |
214 ############### | |
215 print '... training' | |
216 # early-stopping parameters | |
217 patience = 10000 # look as this many examples regardless | |
218 patience_increase = 2 # wait this much longer when a new best is | |
219 # found | |
220 improvement_threshold = 0.995 # a relative improvement of this much is | |
221 # considered significant | |
222 validation_frequency = min(n_train_batches, patience/2) | |
223 # go through this many | |
224 # minibatche before checking the network | |
225 # on the validation set; in this case we | |
226 # check every epoch | |
227 | |
228 best_params = None | |
229 best_validation_loss = float('inf') | |
230 best_iter = 0 | |
231 test_score = 0. | |
232 start_time = time.clock() | |
233 | |
234 epoch = 0 | |
235 done_looping = False | |
236 | |
237 while (epoch < n_epochs) and (not done_looping): | |
238 epoch = epoch + 1 | |
239 for minibatch_index in xrange(n_train_batches): | |
240 | |
241 iter = epoch * n_train_batches + minibatch_index | |
242 | |
243 if iter %100 == 0: | |
244 print 'training @ iter = ', iter | |
245 cost_ij = train_model(minibatch_index) | |
246 | |
247 if (iter+1) % validation_frequency == 0: | |
248 | |
249 # compute zero-one loss on validation set | |
250 validation_losses = [validate_model(i) for i in xrange(n_valid_batches)] | |
251 this_validation_loss = numpy.mean(validation_losses) | |
252 print('epoch %i, minibatch %i/%i, validation error %f %%' % \ | |
253 (epoch, minibatch_index+1, n_train_batches, \ | |
254 this_validation_loss*100.)) | |
255 | |
256 | |
257 # if we got the best validation score until now | |
258 if this_validation_loss < best_validation_loss: | |
259 | |
260 #improve patience if loss improvement is good enough | |
261 if this_validation_loss < best_validation_loss * \ | |
262 improvement_threshold : | |
263 patience = max(patience, iter * patience_increase) | |
264 | |
265 # save best validation score and iteration number | |
266 best_validation_loss = this_validation_loss | |
267 best_iter = iter | |
268 | |
269 # test it on the test set | |
270 test_losses = [test_model(i) for i in xrange(n_test_batches)] | |
271 test_score = numpy.mean(test_losses) | |
272 print((' epoch %i, minibatch %i/%i, test error of best ' | |
273 'model %f %%') % | |
274 (epoch, minibatch_index+1, n_train_batches, | |
275 test_score*100.)) | |
276 | |
277 if patience <= iter : | |
278 done_looping = False | |
279 break | |
280 | |
281 end_time = time.clock() | |
282 print('Optimization complete.') | |
283 print('Best validation score of %f %% obtained at iteration %i,'\ | |
284 'with test performance %f %%' % | |
285 (best_validation_loss * 100., best_iter, test_score*100.)) | |
286 print('The code ran for %f minutes' % ((end_time-start_time)/60.)) | |
287 | |
288 if __name__ == '__main__': | |
289 evaluate_lenet5() | |
290 | |
291 def experiment(state, channel): | |
292 evaluate_lenet5(state.learning_rate, dataset=state.dataset) |