# HG changeset patch # User Olivier Delalleau # Date 1286484544 14400 # Node ID 9f1f51a5939f5140dd3b115b1a7909dda25f59f5 # Parent 3234913a3642b5e3074394e23aff3d4faf754901# Parent f21693eecec74c86be6bacefbfba0286221429dd Merged diff -r 3234913a3642 -r 9f1f51a5939f doc/formulas.txt --- a/doc/formulas.txt Thu Oct 07 16:48:21 2010 -0400 +++ b/doc/formulas.txt Thu Oct 07 16:49:04 2010 -0400 @@ -5,13 +5,18 @@ ========== .. taglist:: +pylearn.formulas.activations +---------------------------- +.. automodule:: pylearn.formulas.activations + :members: + pylearn.formulas.costs ------------------------ +---------------------- .. automodule:: pylearn.formulas.costs :members: pylearn.formulas.noise ------------------------ +---------------------- .. automodule:: pylearn.formulas.noise :members: diff -r 3234913a3642 -r 9f1f51a5939f pylearn/formulas/activations.py --- a/pylearn/formulas/activations.py Thu Oct 07 16:48:21 2010 -0400 +++ b/pylearn/formulas/activations.py Thu Oct 07 16:49:04 2010 -0400 @@ -13,13 +13,161 @@ import tags + + +@tags.tags('activation', 'unary', + 'sigmoid', 'logistic', + 'non-negative', 'increasing') +def sigmoid(x): + """ + Return a symbolic variable representing the sigmoid (logistic) + function of the input x. + + .. math:: + \\textrm{sigmoid}(x) = \\frac{1}{1 + e^x} + + The image of :math:`\\textrm{sigmoid}(x)` is the open interval (0, + 1), *in theory*. *In practice*, due to rounding errors in floating + point representations, :math:`\\textrm{sigmoid}(x)` will lie in the + closed range [0, 1]. + + :param x: tensor-like (a Theano variable with type theano.Tensor, + or a value that can be converted to one) :math:`\in + \mathbb{R}^n` + + :return: a Theano variable with the same shape as the input, where + the sigmoid function is mapped to each element of the + input x. + """ + return theano.tensor.nnet.sigmoid(x) + + + +@tags.tags('activation', 'unary', + 'tanh', 'hyperbolic tangent', + 'odd', 'increasing') +def tanh(x): + """ + Return a symbolic variable representing the tanh (hyperbolic + tangent) of the input x. + + .. math:: + \\textrm{tanh}(x) = \\frac{e^{2x} - 1}{e^{2x} + 1} + + The image of :math:`\\textrm{tanh}(x)` is the open interval (-1, + 1), *in theory*. *In practice*, due to rounding errors in floating + point representations, :math:`\\textrm{tanh}(x)` will lie in the + closed range [-1, 1]. + + :param x: tensor-like (a Theano variable with type theano.Tensor, + or a value that can be converted to one) :math:`\in + \mathbb{R}^n` + + :return: a Theano variable with the same shape as the input, where + the tanh function is mapped to each element of the input + x. + """ + return theano.tensor.tanh(x) + + + +@tags.tags('activation', 'unary', + 'tanh', 'hyperbolic tangent', 'normalized', + 'odd', 'increasing') +def tanh_normalized(x): + """ + Return a symbolic variable representing a normalized tanh + (hyperbolic tangent) of the input x. + TODO: where does 1.759 come from? why is it normalized like that? + + .. math:: + \\textrm{tanh\_normalized}(x) = 1.759\\textrm{ tanh}\left(\\frac{2x}{3}\\right) + + The image of :math:`\\textrm{tanh\_normalized}(x)` is the open + interval (-1.759, 1.759), *in theory*. *In practice*, due to + rounding errors in floating point representations, + :math:`\\textrm{tanh\_normalized}(x)` will lie in the approximative + closed range [-1.759, 1.759]. The exact bound depends on the + precision of the floating point representation. + + :param x: tensor-like (a Theano variable with type theano.Tensor, + or a value that can be converted to one) :math:`\in + \mathbb{R}^n` + + :return: a Theano variable with the same shape as the input, where + the tanh\_normalized function is mapped to each element of + the input x. + """ + return 1.759*theano.tensor.tanh(0.6666*x) + + + +@tags.tags('activation', 'unary', + 'abs_tanh', 'abs', 'tanh', 'hyperbolic tangent', + 'non-negative', 'even') +def abs_tanh(x): + """ + Return a symbolic variable representing the absolute value of the + hyperbolic tangent of x. + + .. math:: + \\textrm{abs\_tanh}(x) = |\\textrm{tanh}(x)| + + The image of :math:`\\textrm{abs\_tanh}(x)` is the interval [0, 1), + *in theory*. *In practice*, due to rounding errors in floating + point representations, :math:`\\textrm{abs\_tanh}(x)` will lie in + the range [0, 1]. + + :param x: tensor-like (a Theano variable with type theano.Tensor, + or a value that can be converted to one) :math:`\in + \mathbb{R}^n` + + :return: a Theano variable with the same shape as the input, where + the abs_tanh function is mapped to each element of the + input x. + """ + return theano.tensor.abs_(theano.tensor.tanh(x)) + + + +@tags.tags('activation', 'unary', + 'abs_tanh', 'abs', 'tanh', 'hyperbolic tangent', 'normalized', + 'non-negative', 'even') +def abs_tanh_normalized(x): + """ + Return a symbolic variable representing the absolute value of a + normalized tanh (hyperbolic tangent) of the input x. + TODO: where does 1.759 come from? why is it normalized like that? + + .. math:: + \\textrm{abs\_tanh\_normalized}(x) = \left|1.759\\textrm{ tanh}\left(\\frac{2x}{3}\\right)\\right| + + The image of :math:`\\textrm{abs\_tanh\_normalized}(x)` is the range + [0, 1.759), *in theory*. *In practice*, due to rounding errors in + floating point representations, + :math:`\\textrm{abs\_tanh\_normalized}(x)` will lie in the + approximative closed range [0, 1.759]. The exact upper bound + depends on the precision of the floating point representation. + + :param x: tensor-like (a Theano variable with type theano.Tensor, + or a value that can be converted to one) :math:`\in + \mathbb{R}^n` + + :return: a Theano variable with the same shape as the input, where + the abs_tanh_normalized function is mapped to each + element of the input x. + """ + return theano.tensor.abs_(1.759*theano.tensor.tanh(0.6666*x)) + + + @tags.tags('activation','softsign') def softsign_act(input): """ Returns a symbolic variable that computes the softsign of ``input``. .. math:: - f(input) = \frac{input}{1.0 + |input|} + f(input) = \\frac{input}{1.0 + |input|} :type input: tensor-like :param input: input tensor to which softsign should be applied @@ -36,7 +184,7 @@ softsign function on the input tensor ``input``. .. math:: - f(input) = \left| \frac{input}{1.0 +|input|} \right| + f(input) = \left| \\frac{input}{1.0 +|input|} \\right| :type input: tensor-like :param input: input tensor to which softsign should be applied @@ -54,11 +202,11 @@ and only if it is positive, 0 otherwise. .. math:: - f(input) = \left\lbrace \begin{array}{l} - input \quad \text{ if } input > 0 \\ - 0 \quad \text{ else } + f(input) = \left \lbrace \\begin{array}{l} + input \quad \\text{ if } input > 0 \\ + 0 \quad \\text{ else } \end{array} - \right + \\right \} :type input: tensor-like :param input: input tensor to which the rectifier activation function @@ -78,7 +226,7 @@ at initialization. .. math:: - f(input) = ln \left( 1 + e^{input} \right) + f(input) = ln \left( 1 + e^{input} \\right) :type input: tensor-like :param input: input tensor to which the softplus should be applied