Mercurial > pylearn
changeset 659:85436cda77ba
Added a cost to sandbox, building out Poisson regressor
author | Joseph Turian <turian@gmail.com> |
---|---|
date | Mon, 09 Mar 2009 00:25:46 -0400 |
parents | 6d927441a38f |
children | 12b1b09ffd2b |
files | pylearn/algorithms/sandbox/__init__.py pylearn/algorithms/sandbox/cost.py pylearn/algorithms/sandbox/test_cost.py pylearn/sandbox/test_speed.py |
diffstat | 3 files changed, 88 insertions(+), 1 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/pylearn/algorithms/sandbox/cost.py Mon Mar 09 00:25:46 2009 -0400 @@ -0,0 +1,61 @@ +""" +Cost functions. + +@note: All of these functions return one cost per example. So it is your +job to perform a tensor.sum over the individual example losses. +""" + +import theano as T +from theano import tensor, scalar +import numpy + +class LogFactorial(scalar.UnaryScalarOp): + """ + Compute log x!. + @todo: Rewrite so that it uses INTs not FLOATs. + @todo: Move this to Theano. + @todo: This function is slow, probably want to cache the values. + """ + @staticmethod + def st_impl(x): + if not isinstance(x, int): + raise TypeError('type(x) = %s, must be int' % type(x)) + if x == 0.0: + return 0.0 + v = 0.0 + for i in range(x): + v += numpy.log(x) + return v + def impl(self, x): + return LogFactorial.st_impl(x) + def grad(self, (x,), (gz,)): + raise NotImplementedError('gradient not defined over discrete values') +# return [gz * (1 + scalar.log(x))] +# def c_code(self, node, name, (x,), (z,), sub): +# if node.inputs[0].type in [scalar.float32, scalar.float64]: +# return """%(z)s = +# %(x)s == 0.0 +# ? 0.0 +# : %(x)s * log(%(x)s);""" % locals() +# raise NotImplementedError('only floatingpoint is implemented') +scalar_logfactorial = LogFactorial(scalar.upgrade_to_float, name='scalar_logfactoral') +logfactorial = tensor.Elemwise(scalar_logfactorial, name='logfactorial') + + +def nlpoisson(target, output, beta_scale=1, axis=0): + """ + The negative log Poisson regression probability. + From Marc'Aurelio and Szummer (2008). + + Output should be of the form Weight*code+bias, i.e. unsquashed. + + There is a beta term that is proportional to document length. We + are not sure what beta scale is used by the authors. We use 1 as + the default, but this value might be inappropriate. + + Axis is the axis along which we sum the target values, to obtain + the document length. + @bug: This axis may be wrong!! + """ + beta = beta_scale * T.sum(target, axis=axis) + return beta * T.exp(output) - T.dot(target, output) + logfactorial(target)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/pylearn/algorithms/sandbox/test_cost.py Mon Mar 09 00:25:46 2009 -0400 @@ -0,0 +1,26 @@ +import pylearn.algorithms.sandbox.cost as cost + +import unittest +import theano as T +import theano.tensor as TT +import numpy + +class T_logfactorial(unittest.TestCase): + def test(self): + x = TT.as_tensor(range(10)) + o = cost.logfactorial(x) + f = T.function([],o) + self.failUnless(numpy.all(f() - numpy.asarray([0., 0., 1.38629436, 3.29583687, 5.54517744, 8.04718956, 10.75055682, 13.62137104, 16.63553233, 19.7750212])) < 1e-5) + + def test_float(self): + """ + This should fail because we can't use floats in logfactorial + """ + x = TT.as_tensor([0.5, 2.7]) + o = cost.logfactorial(x) + f = T.function([],o) +# print repr(f()) + self.failUnless(numpy.all(f() == numpy.asarray([0., 0., 1.38629436, 3.29583687, 5.54517744, 8.04718956, 10.75055682, 13.62137104, 16.63553233, 19.7750212]))) + +if __name__ == '__main__': + unittest.main()