changeset 1308:d5e536338b69

5 activation functions added to formulas
author Razvan Pascanu <r.pascanu@gmail.com>
date Tue, 05 Oct 2010 09:57:35 -0400
parents bc41fd23db25
children e5b7a7913329
files pylearn/formulas/activations.py
diffstat 1 files changed, 106 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pylearn/formulas/activations.py	Tue Oct 05 09:57:35 2010 -0400
@@ -0,0 +1,106 @@
+"""
+Activation function for artificial neural units. 
+
+"""
+
+__authors__   = "Razvan Pascanu, .."
+__copyright__ = "(c) 2010, Universite de Montreal"
+__license__   = "3-clause BSD License"
+__contact__   = "Razvan Pascanu <r.pascanu@gmail.com>"
+
+import theano
+import tags
+
+@tags.tags('activation','softsign')
+def softsign_act(input):
+    """
+    Returns a symbolic variable that computes the softsign of ``input``.
+    
+    .. math::
+                f(input) = \frac{input}{1.0 + |input|}
+
+    :type input:  tensor-like
+    :param input: input tensor to which softsign should be applied
+    :rtype:       Theano variable
+    :return:      tensor obtained after applying the softsign function
+
+    """
+    return input/(1.0 + T.abs_(input))
+
+@tags.tags('activation','softsign','abs')
+def abssoftsign_act(input):
+    """
+    Returns a symbolic variable that computes the absolute value of the
+    softsign function on the input tensor ``input``.
+
+    .. math::
+                f(input) = \left| \frac{input}{1.0 +|input|} \right|
+
+    :type input:  tensor-like
+    :param input: input tensor to which softsign should be applied
+    :rtype:       Tensor variable
+    :return:      tensor obtained by taking the absolute value of softsign 
+                  of the input
+    """
+    return T.abs_(input)/(1.0 + T.abs_(input))
+
+
+@tags.tags('activation','rectifier')
+def rectifier_act(input):
+    """
+    Returns a symbolic variable that computes the value of the ``input`` if
+    and only if it is positive, 0 otherwise.
+
+    .. math::
+                f(input) = \left\lbrace \begin{array}{l}
+                            input \quad \text{ if } input > 0 \\
+                            0     \quad \text{ else }
+                         \end{array}
+                         \right
+
+    :type input:  tensor-like
+    :param input: input tensor to which the rectifier activation function 
+                  will be applied
+    :rtype:       Tensor variable
+    :return:      always positive tensor which equals with the input if it is also 
+                  positive or to 0 otherwise
+
+    """
+    return input*(input>=0)
+
+@tags.tags('activation','softplus')
+def softplus_act(input):
+    """
+    Returns a symbolic variable that computes the softplus of ``input``.
+    Note : (TODO) rescale in order to have a steady state regime close to 0 
+           at initialization.
+
+    .. math::
+                f(input) = ln \left( 1 + e^{input} \right)
+
+    :type input:  tensor-like
+    :param input: input tensor to which the softplus should be applied
+    :rtype:       Theano variable
+    :return:      tensor obtained by applying softsign on the input
+    """
+    return theano.tensor.nnet.softplus(input)
+
+@tags.tags('activation','abs')
+def abs_act(input):
+    """
+    Returns the symbolic variable that represents the absolute value of
+    ``input``.
+
+    .. math::
+                f(input) = |input|
+
+    :type input:  tensor-like
+    :param input: input tensor
+    :rtype:       Theano variable
+    :return:      tensor that represents the absolute value of the input
+
+
+    """
+    return theano.tensor.abs_(input)
+
+