Mercurial > pylearn
annotate sandbox/denoising_aa.py @ 419:43d9aa93934e
added other_ops.py to nnet_ops; added basic tests, no docs.
author | James Bergstra <bergstrj@iro.umontreal.ca> |
---|---|
date | Mon, 14 Jul 2008 16:48:02 -0400 |
parents | cf22ebfc90eb |
children |
rev | line source |
---|---|
210
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
1 """ |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
2 A denoising auto-encoder |
409
cf22ebfc90eb
Moved denoising AA to sandbox
Joseph Turian <turian@gmail.com>
parents:
299
diff
changeset
|
3 |
cf22ebfc90eb
Moved denoising AA to sandbox
Joseph Turian <turian@gmail.com>
parents:
299
diff
changeset
|
4 @warning: You should use this interface. It is not complete and is not functional. |
cf22ebfc90eb
Moved denoising AA to sandbox
Joseph Turian <turian@gmail.com>
parents:
299
diff
changeset
|
5 Instead, use:: |
cf22ebfc90eb
Moved denoising AA to sandbox
Joseph Turian <turian@gmail.com>
parents:
299
diff
changeset
|
6 ssh://projects@lgcm.iro.umontreal.ca/repos/denoising_aa |
210
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
7 """ |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
8 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
9 import theano |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
10 from theano.formula import * |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
11 from learner import * |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
12 from theano import tensor as t |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
13 from nnet_ops import * |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
14 import math |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
15 from misc import * |
233
9e96fe8b955c
moved the function from misc.py that have dependency on theano in misc_theano.py
Frederic Bastien <bastienf@iro.umontreal.ca>
parents:
218
diff
changeset
|
16 from misc_theano import * |
210
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
17 from theano.tensor_random import binomial |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
18 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
19 def hiding_corruption_formula(seed,average_fraction_hidden): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
20 """ |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
21 Return a formula for the corruption process, in which a random |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
22 subset of the input numbers are hidden (mapped to 0). |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
23 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
24 @param seed: seed of the random generator |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
25 @type seed: anything that numpy.random.RandomState accepts |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
26 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
27 @param average_fraction_hidden: the probability with which each |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
28 input number is hidden (set to 0). |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
29 @type average_fraction_hidden: 0 <= real number <= 1 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
30 """ |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
31 class HidingCorruptionFormula(Formulas): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
32 x = t.matrix() |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
33 corrupted_x = x * binomial(seed,x,1,fraction_sampled) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
34 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
35 return HidingCorruptionFormula() |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
36 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
37 def squash_affine_formula(squash_function=sigmoid): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
38 """ |
218
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
39 Simply does: squash_function(b + xW) |
210
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
40 By convention prefix the parameters by _ |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
41 """ |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
42 class SquashAffineFormula(Formulas): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
43 x = t.matrix() # of dimensions minibatch_size x n_inputs |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
44 _b = t.row() # of dimensions 1 x n_outputs |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
45 _W = t.matrix() # of dimensions n_inputs x n_outputs |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
46 a = _b + t.dot(x,_W) # of dimensions minibatch_size x n_outputs |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
47 y = squash_function(a) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
48 return SquashAffineFormula() |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
49 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
50 def gradient_descent_update_formula(): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
51 class GradientDescentUpdateFormula(Formula): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
52 param = t.matrix() |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
53 learning_rate = t.scalar() |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
54 cost = t.column() # cost of each example in a minibatch |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
55 param_update = t.add_inplace(param, -learning_rate*t.sgrad(cost)) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
56 return gradient_descent_update_formula() |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
57 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
58 def probabilistic_classifier_loss_formula(): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
59 class ProbabilisticClassifierLossFormula(Formulas): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
60 a = t.matrix() # of dimensions minibatch_size x n_classes, pre-softmax output |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
61 target_class = t.ivector() # dimension (minibatch_size) |
218
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
62 nll, probability_predictions = crossentropy_softmax_1hot(a, target_class) # defined in nnet_ops.py |
210
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
63 return ProbabilisticClassifierLossFormula() |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
64 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
65 def binomial_cross_entropy_formula(): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
66 class BinomialCrossEntropyFormula(Formulas): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
67 a = t.matrix() # pre-sigmoid activations, minibatch_size x dim |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
68 p = sigmoid(a) # model prediction |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
69 q = t.matrix() # target binomial probabilities, minibatch_size x dim |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
70 # using the identity softplus(a) - softplus(-a) = a, |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
71 # we obtain that q log(p) + (1-q) log(1-p) = q a - softplus(a) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
72 nll = -t.sum(q*a - softplus(-a)) |
218
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
73 # next line was missing... hope it's all correct above |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
74 return BinomialCrossEntropyFormula() |
210
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
75 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
76 def squash_affine_autoencoder_formula(hidden_squash=t.tanh, |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
77 reconstruction_squash=sigmoid, |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
78 share_weights=True, |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
79 reconstruction_nll_formula=binomial_cross_entropy_formula(), |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
80 update_formula=gradient_descent_update_formula): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
81 if share_weights: |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
82 autoencoder = squash_affine_formula(hidden_squash).rename(a='code_a') + \ |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
83 squash_affine_formula(reconstruction_squash).rename(x='hidden',y='reconstruction',_b='_c') + \ |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
84 reconstruction_nll_formula |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
85 else: |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
86 autoencoder = squash_affine_formula(hidden_squash).rename(a='code_a',_W='_W1') + \ |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
87 squash_affine_formula(reconstruction_squash).rename(x='hidden',y='reconstruction',_b='_c',_W='_W2') + \ |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
88 reconstruction_nll_formula |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
89 autoencoder = autoencoder + [update_formula().rename(cost = 'nll', |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
90 param = p) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
91 for p in autoencoder.get_all('_.*')] |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
92 return autoencoder |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
93 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
94 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
95 # @todo: try other corruption formulae. The above is the default one. |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
96 # not quite used in the ICML paper... (had a fixed number of 0s). |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
97 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
98 class DenoisingAutoEncoder(LearningAlgorithm): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
99 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
100 def __init__(self,n_inputs,n_hidden_per_layer, |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
101 learning_rate=0.1, |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
102 max_n_epochs=100, |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
103 L1_regularizer=0, |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
104 init_range=1., |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
105 corruption_formula = hiding_corruption_formula(), |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
106 autoencoder = squash_affine_autoencoder_formula(), |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
107 minibatch_size=None,linker = "c|py"): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
108 for name,val in locals().items(): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
109 if val is not self: self.__setattribute__(name,val) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
110 self.denoising_autoencoder_formula = corruption_formula + autoencoder.rename(x='corrupted_x') |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
111 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
112 def __call__(self, training_set=None): |
299
eded3cb54930
small bug fixed
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
233
diff
changeset
|
113 """ Allocate and optionnaly train a model |
eded3cb54930
small bug fixed
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
233
diff
changeset
|
114 |
eded3cb54930
small bug fixed
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
233
diff
changeset
|
115 @TODO enables passing in training and valid sets, instead of cutting one set in 80/20 |
eded3cb54930
small bug fixed
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
233
diff
changeset
|
116 """ |
210
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
117 model = DenoisingAutoEncoderModel(self) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
118 if training_set: |
218
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
119 print 'DenoisingAutoEncoder(): what do I do if training_set????' |
299
eded3cb54930
small bug fixed
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
233
diff
changeset
|
120 # copied from old mlp_factory_approach: |
218
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
121 if len(trainset) == sys.maxint: |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
122 raise NotImplementedError('Learning from infinite streams is not supported') |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
123 nval = int(self.validation_portion * len(trainset)) |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
124 nmin = len(trainset) - nval |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
125 assert nmin >= 0 |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
126 minset = trainset[:nmin] #real training set for minimizing loss |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
127 valset = trainset[nmin:] #validation set for early stopping |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
128 best = model |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
129 for stp in self.early_stopper(): |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
130 model.update( |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
131 minset.minibatches([input, target], minibatch_size=min(32, |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
132 len(trainset)))) |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
133 #print 'mlp.__call__(), we did an update' |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
134 if stp.set_score: |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
135 stp.score = model(valset, ['loss_01']) |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
136 if (stp.score < stp.best_score): |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
137 best = copy.copy(model) |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
138 model = best |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
139 # end of the copy from mlp_factory_approach |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
140 |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
141 return model |
df3fae88ab46
small debugging
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
211
diff
changeset
|
142 |
210
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
143 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
144 def compile(self, inputs, outputs): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
145 return theano.function(inputs,outputs,unpack_single=False,linker=self.linker) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
146 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
147 class DenoisingAutoEncoderModel(LearnerModel): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
148 def __init__(self,learning_algorithm,params): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
149 self.learning_algorithm=learning_algorithm |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
150 self.params=params |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
151 v = learning_algorithm.v |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
152 self.update_fn = learning_algorithm.compile(learning_algorithm.denoising_autoencoder_formula.inputs, |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
153 learning_algorithm.denoising_autoencoder_formula.outputs) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
154 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
155 def update(self, training_set, train_stats_collector=None): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
156 |
211
bd728c83faff
in __get__, problem if the i.stop was None, i being the slice, added one line replacing None by the len(self)
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents:
210
diff
changeset
|
157 print 'dont update you crazy frog!' |
210
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
158 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
159 # old stuff |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
160 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
161 # self._learning_rate = t.scalar('learning_rate') # this is the symbol |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
162 # self.L1_regularizer = L1_regularizer |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
163 # self._L1_regularizer = t.scalar('L1_regularizer') |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
164 # self._input = t.matrix('input') # n_examples x n_inputs |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
165 # self._W = t.matrix('W') |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
166 # self._b = t.row('b') |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
167 # self._c = t.row('b') |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
168 # self._regularization_term = self._L1_regularizer * t.sum(t.abs(self._W)) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
169 # self._corrupted_input = corruption_process(self._input) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
170 # self._hidden = t.tanh(self._b + t.dot(self._input, self._W.T)) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
171 # self._reconstruction_activations =self._c+t.dot(self._hidden,self._W) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
172 # self._nll,self._output = crossentropy_softmax_1hot(Print("output_activations")(self._output_activations),self._target_vector) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
173 # self._output_class = t.argmax(self._output,1) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
174 # self._class_error = t.neq(self._output_class,self._target_vector) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
175 # self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0] |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
176 # OnlineGradientTLearner.__init__(self) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
177 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
178 # def attributeNames(self): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
179 # return ["parameters","b1","W2","b2","W2", "L2_regularizer","regularization_term"] |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
180 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
181 # def parameterAttributes(self): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
182 # return ["b1","W1", "b2", "W2"] |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
183 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
184 # def updateMinibatchInputFields(self): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
185 # return ["input","target"] |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
186 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
187 # def updateEndOutputAttributes(self): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
188 # return ["regularization_term"] |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
189 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
190 # def lossAttribute(self): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
191 # return "minibatch_criterion" |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
192 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
193 # def defaultOutputFields(self, input_fields): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
194 # output_fields = ["output", "output_class",] |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
195 # if "target" in input_fields: |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
196 # output_fields += ["class_error", "nll"] |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
197 # return output_fields |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
198 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
199 # def allocate(self,minibatch): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
200 # minibatch_n_inputs = minibatch["input"].shape[1] |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
201 # if not self._n_inputs: |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
202 # self._n_inputs = minibatch_n_inputs |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
203 # self.b1 = numpy.zeros((1,self._n_hidden)) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
204 # self.b2 = numpy.zeros((1,self._n_outputs)) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
205 # self.forget() |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
206 # elif self._n_inputs!=minibatch_n_inputs: |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
207 # # if the input changes dimension on the fly, we resize and forget everything |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
208 # self.forget() |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
209 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
210 # def forget(self): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
211 # if self._n_inputs: |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
212 # r = self._init_range/math.sqrt(self._n_inputs) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
213 # self.W1 = numpy.random.uniform(low=-r,high=r, |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
214 # size=(self._n_hidden,self._n_inputs)) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
215 # r = self._init_range/math.sqrt(self._n_hidden) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
216 # self.W2 = numpy.random.uniform(low=-r,high=r, |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
217 # size=(self._n_outputs,self._n_hidden)) |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
218 # self.b1[:]=0 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
219 # self.b2[:]=0 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
220 # self._n_epochs=0 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
221 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
222 # def isLastEpoch(self): |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
223 # self._n_epochs +=1 |
ffd50efefb70
work in progress denoising auto-encoder
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
diff
changeset
|
224 # return self._n_epochs>=self._max_n_epochs |