Mercurial > pylearn
changeset 764:f02dc24dad8f
Normalized tanh to be equivalent to sigmoid in DAAig
author | Xavier Glorot <glorotxa@iro.umontreal.ca> |
---|---|
date | Wed, 03 Jun 2009 14:25:56 -0400 |
parents | f353c9a99f95 |
children | c95a56f055aa |
files | pylearn/algorithms/sandbox/DAA_inputs_groups.py |
diffstat | 1 files changed, 3 insertions(+), 3 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/algorithms/sandbox/DAA_inputs_groups.py Wed Jun 03 13:54:31 2009 -0400 +++ b/pylearn/algorithms/sandbox/DAA_inputs_groups.py Wed Jun 03 14:25:56 2009 -0400 @@ -33,7 +33,7 @@ return theano.tensor.nnet.sigmoid(x) def tanh_act(x): - return theano.tensor.tanh(x) + return theano.tensor.tanh(x/2.0) # costs utils:--------------------------------------------------- @@ -45,8 +45,8 @@ return -T.mean(T.sum(XE, axis=sum_axis),axis=mean_axis) def tanh_cross_entropy(target, output_act, mean_axis, sum_axis): - XE =-(target+1)/2.0 * T.log(1 + T.exp(-2 * output_act)) + \ - (1 - (target+1)/2.0) * (- T.log(1 + T.exp(2 * output_act))) + XE =-(target+1)/2.0 * T.log(1 + T.exp(- output_act)) + \ + (1 - (target+1)/2.0) * (- T.log(1 + T.exp(output_act))) return -T.mean(T.sum(XE, axis=sum_axis),axis=mean_axis) def cross_entropy(target, output_act, act, mean_axis=0, sum_axis=1):