# HG changeset patch # User SylvainPL # Date 1271958184 14400 # Node ID 14b28e43ce4e4bcde310ef3007c1a85cb3c649fd # Parent 793e89fcdab770f6eb87edcc18f16bd171106ec1 Correction d'un bug dans le pre-train du SDA cause par tanh diff -r 793e89fcdab7 -r 14b28e43ce4e deep/stacked_dae/v_sylvain/stacked_dae.py --- a/deep/stacked_dae/v_sylvain/stacked_dae.py Thu Apr 22 13:18:12 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/stacked_dae.py Thu Apr 22 13:43:04 2010 -0400 @@ -88,7 +88,7 @@ b_values = numpy.zeros((n_out,), dtype= theano.config.floatX) self.b = theano.shared(value= b_values) - self.output = (T.tanh(T.dot(input, self.W) + self.b) + 1) /2 + self.output = (T.tanh(T.dot(input, self.W) + self.b) + 1.0)/2.0 # ( *+ 1) /2 is because tanh goes from -1 to 1 and sigmoid goes from 0 to 1 # I want to use tanh, but the image has to stay the same. The correction is necessary. self.params = [self.W, self.b] @@ -185,10 +185,10 @@ #Or use a Tanh everything is always between 0 and 1, the range is #changed so it remain the same as when sigmoid is used - self.y = (T.tanh(T.dot(self.tilde_x, self.W ) + self.b)+1.0)/2.0 + self.y = (T.tanh(T.dot(self.tilde_x, self.W ) + self.b)+1.0)/2.0 z_a = T.dot(self.y, self.W_prime) + self.b_prime - self.z = (T.tanh(z_a + self.b_prime)+1.0) / 2.0 + self.z = (T.tanh(z_a )+1.0) / 2.0 #To ensure to do not have a log(0) operation if self.z <= 0: self.z = 0.000001