Mercurial > pylearn
comparison denoising_aa.py @ 218:df3fae88ab46
small debugging
author | Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca> |
---|---|
date | Fri, 23 May 2008 12:22:54 -0400 |
parents | bd728c83faff |
children | 9e96fe8b955c |
comparison
equal
deleted
inserted
replaced
217:44dd9b6448c5 | 218:df3fae88ab46 |
---|---|
29 | 29 |
30 return HidingCorruptionFormula() | 30 return HidingCorruptionFormula() |
31 | 31 |
32 def squash_affine_formula(squash_function=sigmoid): | 32 def squash_affine_formula(squash_function=sigmoid): |
33 """ | 33 """ |
34 Simply does: squash_function(b + xW) | |
34 By convention prefix the parameters by _ | 35 By convention prefix the parameters by _ |
35 """ | 36 """ |
36 class SquashAffineFormula(Formulas): | 37 class SquashAffineFormula(Formulas): |
37 x = t.matrix() # of dimensions minibatch_size x n_inputs | 38 x = t.matrix() # of dimensions minibatch_size x n_inputs |
38 _b = t.row() # of dimensions 1 x n_outputs | 39 _b = t.row() # of dimensions 1 x n_outputs |
51 | 52 |
52 def probabilistic_classifier_loss_formula(): | 53 def probabilistic_classifier_loss_formula(): |
53 class ProbabilisticClassifierLossFormula(Formulas): | 54 class ProbabilisticClassifierLossFormula(Formulas): |
54 a = t.matrix() # of dimensions minibatch_size x n_classes, pre-softmax output | 55 a = t.matrix() # of dimensions minibatch_size x n_classes, pre-softmax output |
55 target_class = t.ivector() # dimension (minibatch_size) | 56 target_class = t.ivector() # dimension (minibatch_size) |
56 nll, probability_predictions = crossentropy_softmax_1hot(a, target_class) | 57 nll, probability_predictions = crossentropy_softmax_1hot(a, target_class) # defined in nnet_ops.py |
57 return ProbabilisticClassifierLossFormula() | 58 return ProbabilisticClassifierLossFormula() |
58 | 59 |
59 def binomial_cross_entropy_formula(): | 60 def binomial_cross_entropy_formula(): |
60 class BinomialCrossEntropyFormula(Formulas): | 61 class BinomialCrossEntropyFormula(Formulas): |
61 a = t.matrix() # pre-sigmoid activations, minibatch_size x dim | 62 a = t.matrix() # pre-sigmoid activations, minibatch_size x dim |
62 p = sigmoid(a) # model prediction | 63 p = sigmoid(a) # model prediction |
63 q = t.matrix() # target binomial probabilities, minibatch_size x dim | 64 q = t.matrix() # target binomial probabilities, minibatch_size x dim |
64 # using the identity softplus(a) - softplus(-a) = a, | 65 # using the identity softplus(a) - softplus(-a) = a, |
65 # we obtain that q log(p) + (1-q) log(1-p) = q a - softplus(a) | 66 # we obtain that q log(p) + (1-q) log(1-p) = q a - softplus(a) |
66 nll = -t.sum(q*a - softplus(-a)) | 67 nll = -t.sum(q*a - softplus(-a)) |
68 # next line was missing... hope it's all correct above | |
69 return BinomialCrossEntropyFormula() | |
67 | 70 |
68 def squash_affine_autoencoder_formula(hidden_squash=t.tanh, | 71 def squash_affine_autoencoder_formula(hidden_squash=t.tanh, |
69 reconstruction_squash=sigmoid, | 72 reconstruction_squash=sigmoid, |
70 share_weights=True, | 73 share_weights=True, |
71 reconstruction_nll_formula=binomial_cross_entropy_formula(), | 74 reconstruction_nll_formula=binomial_cross_entropy_formula(), |
100 for name,val in locals().items(): | 103 for name,val in locals().items(): |
101 if val is not self: self.__setattribute__(name,val) | 104 if val is not self: self.__setattribute__(name,val) |
102 self.denoising_autoencoder_formula = corruption_formula + autoencoder.rename(x='corrupted_x') | 105 self.denoising_autoencoder_formula = corruption_formula + autoencoder.rename(x='corrupted_x') |
103 | 106 |
104 def __call__(self, training_set=None): | 107 def __call__(self, training_set=None): |
108 """ Allocate and optionnaly train a model""" | |
105 model = DenoisingAutoEncoderModel(self) | 109 model = DenoisingAutoEncoderModel(self) |
106 if training_set: | 110 if training_set: |
107 print 'what do I do if training set????' | 111 print 'DenoisingAutoEncoder(): what do I do if training_set????' |
112 # copied from mlp_factory_approach: | |
113 if len(trainset) == sys.maxint: | |
114 raise NotImplementedError('Learning from infinite streams is not supported') | |
115 nval = int(self.validation_portion * len(trainset)) | |
116 nmin = len(trainset) - nval | |
117 assert nmin >= 0 | |
118 minset = trainset[:nmin] #real training set for minimizing loss | |
119 valset = trainset[nmin:] #validation set for early stopping | |
120 best = model | |
121 for stp in self.early_stopper(): | |
122 model.update( | |
123 minset.minibatches([input, target], minibatch_size=min(32, | |
124 len(trainset)))) | |
125 #print 'mlp.__call__(), we did an update' | |
126 if stp.set_score: | |
127 stp.score = model(valset, ['loss_01']) | |
128 if (stp.score < stp.best_score): | |
129 best = copy.copy(model) | |
130 model = best | |
131 # end of the copy from mlp_factory_approach | |
132 | |
133 return model | |
134 | |
108 | 135 |
109 def compile(self, inputs, outputs): | 136 def compile(self, inputs, outputs): |
110 return theano.function(inputs,outputs,unpack_single=False,linker=self.linker) | 137 return theano.function(inputs,outputs,unpack_single=False,linker=self.linker) |
111 | 138 |
112 class DenoisingAutoEncoderModel(LearnerModel): | 139 class DenoisingAutoEncoderModel(LearnerModel): |