Mercurial > ift6266
changeset 207:43af74a348ac
Merge branches from main repo.
author | Arnaud Bergeron <abergeron@gmail.com> |
---|---|
date | Thu, 04 Mar 2010 20:43:21 -0500 |
parents | e12702b88a2d (diff) 10a801240bfc (current diff) |
children | d982dfa583df |
files | |
diffstat | 1 files changed, 9 insertions(+), 4 deletions(-) [+] |
line wrap: on
line diff
--- a/deep/autoencoder/DA_training.py Thu Mar 04 08:21:43 2010 -0500 +++ b/deep/autoencoder/DA_training.py Thu Mar 04 20:43:21 2010 -0500 @@ -93,7 +93,12 @@ theano_rng = RandomStreams() # create a numpy random generator numpy_rng = numpy.random.RandomState() - + + # print the parameter of the DA + if True : + print 'input size = %d' %n_visible + print 'hidden size = %d' %n_hidden + print 'complexity = %2.2f' %complexity # initial values for weights and biases # note : W' was written as `W_prime` and b' as `b_prime` @@ -250,7 +255,7 @@ # construct the denoising autoencoder class n_ins = 32*32 - encoder = dA(n_ins, n_code_layer, input = x.reshape((batch_size,n_ins))) + encoder = dA(n_ins, n_code_layer, complexity, input = x.reshape((batch_size,n_ins))) # Train autoencoder @@ -363,7 +368,7 @@ test_score)) if patience <= iter : - print('iter (%i) is superior than patience(%i). break', iter, patience) + print('iter (%i) is superior than patience(%i). break', (iter, patience)) break @@ -451,7 +456,7 @@ # construct the denoising autoencoder class n_ins = 28*28 - encoder = dA(n_ins, n_code_layer, input = x.reshape((batch_size,n_ins))) + encoder = dA(n_ins, n_code_layer, complexity, input = x.reshape((batch_size,n_ins))) # Train autoencoder