changeset 363:14b28e43ce4e

Correction d'un bug dans le pre-train du SDA cause par tanh
author SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca>
date Thu, 22 Apr 2010 13:43:04 -0400
parents 793e89fcdab7
children 64fa85d68923
files deep/stacked_dae/v_sylvain/stacked_dae.py
diffstat 1 files changed, 3 insertions(+), 3 deletions(-) [+]
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/stacked_dae.py	Thu Apr 22 13:18:12 2010 -0400
+++ b/deep/stacked_dae/v_sylvain/stacked_dae.py	Thu Apr 22 13:43:04 2010 -0400
@@ -88,7 +88,7 @@
         b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
         self.b = theano.shared(value= b_values)
 
-        self.output = (T.tanh(T.dot(input, self.W) + self.b) + 1) /2
+        self.output = (T.tanh(T.dot(input, self.W) + self.b) + 1.0)/2.0
         # ( *+ 1) /2  is because tanh goes from -1 to 1 and sigmoid goes from 0 to 1
         # I want to use tanh, but the image has to stay the same. The correction is necessary.
         self.params = [self.W, self.b]
@@ -185,10 +185,10 @@
     
     #Or use a Tanh everything is always between 0 and 1, the range is 
     #changed so it remain the same as when sigmoid is used
-    self.y   = (T.tanh(T.dot(self.tilde_x, self.W      ) + self.b)+1.0)/2.0
+    self.y   = (T.tanh(T.dot(self.tilde_x, self.W ) + self.b)+1.0)/2.0
     
     z_a = T.dot(self.y, self.W_prime) + self.b_prime
-    self.z =  (T.tanh(z_a + self.b_prime)+1.0) / 2.0
+    self.z =  (T.tanh(z_a )+1.0) / 2.0
     #To ensure to do not have a log(0) operation
     if self.z <= 0:
         self.z = 0.000001