diff deep/stacked_dae/v2/stacked_dae.py @ 228:851e7ad4a143

Corrigé une erreur dans la formule de coût modifiée dans stacked_dae, et enlevé des timers dans sgd_optimization
author fsavard
date Fri, 12 Mar 2010 10:47:36 -0500
parents acae439d6572
children 02eb98d051fe
line wrap: on
line diff
--- a/deep/stacked_dae/v2/stacked_dae.py	Fri Mar 12 10:31:10 2010 -0500
+++ b/deep/stacked_dae/v2/stacked_dae.py	Fri Mar 12 10:47:36 2010 -0500
@@ -133,7 +133,7 @@
     #         used later when stacking dAs. 
     self.y   = T.nnet.sigmoid(T.dot(self.tilde_x, self.W      ) + self.b)
     # Equation (3)
-    self.z   = T.nnet.sigmoid(T.dot(self.y, self.W_prime) + self.b_prime)
+    #self.z   = T.nnet.sigmoid(T.dot(self.y, self.W_prime) + self.b_prime)
     # Equation (4)
     # note : we sum over the size of a datapoint; if we are using minibatches,
     #        L will  be a vector, with one entry per example in minibatch
@@ -142,9 +142,9 @@
 
     # bypassing z to avoid running to log(0)
     z_a = T.dot(self.y, self.W_prime) + self.b_prime
-    log_sigmoid = T.log(1) - T.log(1+T.exp(-z_a))
+    log_sigmoid = T.log(1.) - T.log(1.+T.exp(-z_a))
     # log(1-sigmoid(z_a))
-    log_1_sigmoid = -self.x - T.log(1+T.exp(-z_a))
+    log_1_sigmoid = -self.z_a - T.log(1.+T.exp(-z_a))
     self.L = -T.sum( self.x * (log_sigmoid) \
                     + (1.0-self.x) * (log_1_sigmoid), axis=1 )