Mercurial > ift6266
changeset 368:d391ad815d89
Correction d'un bug avec la fonction de log-likelihood pour utilisation de non-linearite de sortie sigmoides
author | SylvainPL <sylvain.pannetier.lebeuf@umontreal.ca> |
---|---|
date | Fri, 23 Apr 2010 12:12:03 -0400 |
parents | f24b10e43a6f |
children | d81284e13d77 |
files | deep/stacked_dae/v_sylvain/stacked_dae.py |
diffstat | 1 files changed, 11 insertions(+), 6 deletions(-) [+] |
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/stacked_dae.py Fri Apr 23 11:39:55 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/stacked_dae.py Fri Apr 23 12:12:03 2010 -0400 @@ -28,8 +28,8 @@ self.b = theano.shared( value=numpy.zeros((n_out,), dtype = theano.config.floatX) ) # compute vector of class-membership. This is a sigmoid instead of - #a softmax to be able to classify as nothing later -## self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b) + #a softmax to be able later to classify as nothing +## self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b) #row-wise self.p_y_given_x = T.nnet.sigmoid(T.dot(input, self.W)+self.b) # compute prediction as class whose probability is maximal in @@ -41,7 +41,13 @@ def negative_log_likelihood(self, y): - return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y]) +## return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y]) + return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y]+T.sum(T.log(1-self.p_y_given_x), axis=1)-T.log(1-self.p_y_given_x)[T.arange(y.shape[0]),y]) + + +## def kullback_leibler(self,y): +## return -T.mean(T.log(1/float(self.p_y_given_x))[T.arange(y.shape[0]),y]) + def errors(self, y): # check if y has same dimension of y_pred @@ -187,15 +193,14 @@ #changed so it remain the same as when sigmoid is used self.y = (T.tanh(T.dot(self.tilde_x, self.W ) + self.b)+1.0)/2.0 - z_a = T.dot(self.y, self.W_prime) + self.b_prime - self.z = (T.tanh(z_a )+1.0) / 2.0 + self.z = (T.tanh(T.dot(self.y, self.W_prime) + self.b_prime)+1.0) / 2.0 #To ensure to do not have a log(0) operation if self.z <= 0: self.z = 0.000001 if self.z >= 1: self.z = 0.999999 - self.L = - T.sum( self.x*T.log(self.z) + (1-self.x)*T.log(1-self.z), axis=1 ) + self.L = - T.sum( self.x*T.log(self.z) + (1.0-self.x)*T.log(1.0-self.z), axis=1 ) self.cost = T.mean(self.L)