Mercurial > pylearn
changeset 828:9945cd79fe79
merge
author | Xavier Glorot <glorotxa@iro.umontreal.ca> |
---|---|
date | Wed, 30 Sep 2009 17:15:01 -0400 |
parents | 5bbfaf01395d (diff) 43e726898cf9 (current diff) |
children | 3f44379177b2 0f66973e4f95 |
files | |
diffstat | 1 files changed, 6 insertions(+), 2 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/algorithms/sandbox/DAA_inputs_groups.py Wed Sep 16 19:18:29 2009 -0400 +++ b/pylearn/algorithms/sandbox/DAA_inputs_groups.py Wed Sep 30 17:15:01 2009 -0400 @@ -100,6 +100,9 @@ def softsign_act(x): return x/(1.0 + T.abs_(x)) +def arsinh_act(x): + return T.log(x+T.sqrt(1+x*x)) + # costs utils:--------------------------------------------------- # in order to fix numerical instability of the cost and gradient calculation for the cross entropy we calculate it # with the following functions direclty from the activation: @@ -128,6 +131,7 @@ XS = T.xlogx.xlogx((target+1)/2.0) + T.xlogx.xlogx(1-(target+1)/2.0) return -T.mean(T.sum(XE-XS, axis=sum_axis),axis=mean_axis) + def cross_entropy(target, output_act, act, mean_axis=0, sum_axis=1): if act == 'sigmoid_act': return sigmoid_cross_entropy(target, output_act, mean_axis, sum_axis) @@ -205,7 +209,7 @@ self.corruption_pattern = corruption_pattern self.blockgrad = blockgrad - assert hid_fn in ('sigmoid_act','tanh_act','softsign_act','tanh2_act') + assert hid_fn in ('sigmoid_act','tanh_act','softsign_act','tanh2_act','arsinh_act') self.hid_fn = eval(hid_fn) assert rec_fn in ('sigmoid_act','tanh_act','softsign_act','tanh2_act') @@ -542,7 +546,7 @@ self.corruption_pattern = corruption_pattern self.blockgrad = blockgrad - assert act_reg in ('sigmoid_act','tanh_act','softsign_act','tanh2_act') + assert act_reg in ('sigmoid_act','tanh_act','softsign_act','tanh2_act','arsinh_act') self.act_reg = eval(act_reg) print '\t**** StackedDAAig.__init__ ****'