Mercurial > ift6266
comparison deep/autoencoder/DA_training.py @ 246:2024368a8d3d
merge
author | Xavier Glorot <glorotxa@iro.umontreal.ca> |
---|---|
date | Tue, 16 Mar 2010 12:14:10 -0400 |
parents | e12702b88a2d |
children |
comparison
equal
deleted
inserted
replaced
245:0de14b2034c6 | 246:2024368a8d3d |
---|---|
91 | 91 |
92 # create a Theano random generator that gives symbolic random values | 92 # create a Theano random generator that gives symbolic random values |
93 theano_rng = RandomStreams() | 93 theano_rng = RandomStreams() |
94 # create a numpy random generator | 94 # create a numpy random generator |
95 numpy_rng = numpy.random.RandomState() | 95 numpy_rng = numpy.random.RandomState() |
96 | 96 |
97 # print the parameter of the DA | |
98 if True : | |
99 print 'input size = %d' %n_visible | |
100 print 'hidden size = %d' %n_hidden | |
101 print 'complexity = %2.2f' %complexity | |
97 | 102 |
98 # initial values for weights and biases | 103 # initial values for weights and biases |
99 # note : W' was written as `W_prime` and b' as `b_prime` | 104 # note : W' was written as `W_prime` and b' as `b_prime` |
100 | 105 |
101 # W is initialized with `initial_W` which is uniformely sampled | 106 # W is initialized with `initial_W` which is uniformely sampled |
248 y = T.lvector() # the labels are presented as 1D vector of | 253 y = T.lvector() # the labels are presented as 1D vector of |
249 # [long int] labels | 254 # [long int] labels |
250 | 255 |
251 # construct the denoising autoencoder class | 256 # construct the denoising autoencoder class |
252 n_ins = 32*32 | 257 n_ins = 32*32 |
253 encoder = dA(n_ins, n_code_layer, input = x.reshape((batch_size,n_ins))) | 258 encoder = dA(n_ins, n_code_layer, complexity, input = x.reshape((batch_size,n_ins))) |
254 | 259 |
255 # Train autoencoder | 260 # Train autoencoder |
256 | 261 |
257 # compute gradients of the layer parameters | 262 # compute gradients of the layer parameters |
258 gW = T.grad(encoder.cost, encoder.W) | 263 gW = T.grad(encoder.cost, encoder.W) |
361 'model %f ') % | 366 'model %f ') % |
362 (epoch, minibatch_index+1, n_minibatches, | 367 (epoch, minibatch_index+1, n_minibatches, |
363 test_score)) | 368 test_score)) |
364 | 369 |
365 if patience <= iter : | 370 if patience <= iter : |
366 print('iter (%i) is superior than patience(%i). break', iter, patience) | 371 print('iter (%i) is superior than patience(%i). break', (iter, patience)) |
367 break | 372 break |
368 | 373 |
369 | 374 |
370 | 375 |
371 end_time = time.clock() | 376 end_time = time.clock() |
449 y = T.lvector() # the labels are presented as 1D vector of | 454 y = T.lvector() # the labels are presented as 1D vector of |
450 # [long int] labels | 455 # [long int] labels |
451 | 456 |
452 # construct the denoising autoencoder class | 457 # construct the denoising autoencoder class |
453 n_ins = 28*28 | 458 n_ins = 28*28 |
454 encoder = dA(n_ins, n_code_layer, input = x.reshape((batch_size,n_ins))) | 459 encoder = dA(n_ins, n_code_layer, complexity, input = x.reshape((batch_size,n_ins))) |
455 | 460 |
456 # Train autoencoder | 461 # Train autoencoder |
457 | 462 |
458 # compute gradients of the layer parameters | 463 # compute gradients of the layer parameters |
459 gW = T.grad(encoder.cost, encoder.W) | 464 gW = T.grad(encoder.cost, encoder.W) |