# HG changeset patch # User Xavier Glorot # Date 1252100255 14400 # Node ID f1a29c772210ba7c35b96c1efbbe950b28fd77f0 # Parent 2333cd78f5745918a85ed41b4bdcd2a325d28f79 add a tanh2 activation function DAA_input_groups diff -r 2333cd78f574 -r f1a29c772210 pylearn/algorithms/sandbox/DAA_inputs_groups.py --- a/pylearn/algorithms/sandbox/DAA_inputs_groups.py Wed Sep 02 18:28:37 2009 -0400 +++ b/pylearn/algorithms/sandbox/DAA_inputs_groups.py Fri Sep 04 17:37:35 2009 -0400 @@ -93,6 +93,10 @@ def tanh_act(x): return theano.tensor.tanh(x/2.0) +#divide per 2 is a bad idea with many layers... we lose the std of U*x +def tanh2_act(x): + return theano.tensor.tanh(x) + def softsign_act(x): return x/(1.0 + T.abs_(x)) @@ -112,6 +116,12 @@ XS = T.xlogx.xlogx((target+1)/2.0) + T.xlogx.xlogx(1-(target+1)/2.0) return -T.mean(T.sum(XE-XS, axis=sum_axis),axis=mean_axis) +def tanh2_cross_entropy(target, output_act, mean_axis, sum_axis): + XE = (target+1)/2.0 * (- T.log(1 + T.exp(- 2*output_act))) + \ + (1 - (target+1)/2.0) * (- T.log(1 + T.exp( 2*output_act))) + XS = T.xlogx.xlogx((target+1)/2.0) + T.xlogx.xlogx(1-(target+1)/2.0) + return -T.mean(T.sum(XE-XS, axis=sum_axis),axis=mean_axis) + def softsign_cross_entropy(target, output_act, mean_axis, sum_axis): newact = ((output_act/(1.0 + T.abs_(output_act)))+1)/2.0 XE = (target+1)/2.0 * T.log(newact) + (1 - (target+1)/2.0) * T.log(1 - newact) @@ -125,6 +135,8 @@ return tanh_cross_entropy(target, output_act, mean_axis, sum_axis) if act == 'softsign_act': return softsign_cross_entropy(target, output_act, mean_axis, sum_axis) + if act == 'tanh2_act': + return tanh2_cross_entropy(target, output_act, mean_axis, sum_axis) assert False def quadratic(target, output, act, mean_axis = 0): @@ -193,10 +205,10 @@ self.corruption_pattern = corruption_pattern self.blockgrad = blockgrad - assert hid_fn in ('sigmoid_act','tanh_act','softsign_act') + assert hid_fn in ('sigmoid_act','tanh_act','softsign_act','tanh2_act') self.hid_fn = eval(hid_fn) - assert rec_fn in ('sigmoid_act','tanh_act','softsign_act') + assert rec_fn in ('sigmoid_act','tanh_act','softsign_act','tanh2_act') self.rec_fn = eval(rec_fn) self.rec_name = rec_fn @@ -530,7 +542,7 @@ self.corruption_pattern = corruption_pattern self.blockgrad = blockgrad - assert act_reg in ('sigmoid_act','tanh_act','softsign_act') + assert act_reg in ('sigmoid_act','tanh_act','softsign_act','tanh2_act') self.act_reg = eval(act_reg) print '\t**** StackedDAAig.__init__ ****' @@ -909,7 +921,7 @@ else: if typeup == 'total': if layer == 'all': - cost[-1] = inst.totalupdate[-1](*data[-1]) + cost[-1] = inst.totalupdate[-2](*data[-1]) else: cost[layer] = inst.totalupdate[layer](*data[layer]) else: