Mercurial > pylearn
view pylearn/gd/tests/test_sgd.py @ 952:5f80351bc762
Moving sgd to a new 'gd' pylearn module, where it should be joined by TONGA
and Hessian-Free.
author | James Bergstra <bergstrj@iro.umontreal.ca> |
---|---|
date | Thu, 19 Aug 2010 11:53:19 -0400 |
parents | pylearn/algorithms/tests/test_sgd.py@be420f1836bb |
children |
line wrap: on
line source
import theano from theano.compile.debugmode import DebugMode from pylearn.gd import sgd mode = theano.compile.mode.get_default_mode() if isinstance(mode,DebugMode): mode = 'FAST_RUN' def test_sgd0(): x = theano.tensor.dscalar('x') y = theano.tensor.dscalar('y') M = sgd.StochasticGradientDescent([x], (1.0 - x * y)**2, [y], stepsize=0.01) M.y = y m = M.make(mode=mode) m.y = 5.0 for i in xrange(100): c = m.step_cost(3.0) #print c[0], m.y assert c < 1.0e-5 assert abs(m.y - (1.0 / 3)) < 1.0e-4 def test_sgd_stepsize_variable(): x = theano.tensor.dscalar('x') y = theano.tensor.dscalar('y') lr = theano.tensor.dscalar('lr') M = sgd.StochasticGradientDescent([x], (1.0 - x * y)**2, [y], stepsize=lr) M.y = y M.lr = lr m = M.make(mode=mode) m.y = 5.0 m.lr = 0.01 for i in xrange(100): c = m.step_cost(3.0) # print c, m.y assert c < 1.0e-5 assert abs(m.y - (1.0 / 3)) < 1.0e-4 #test that changing the lr has impact m.y = 5.0 m.lr = 0.0 for i in xrange(10): c = m.step_cost(3.0) # print c, m.y assert m.y == 5.0 def test_sgd_stepsize_none(): x = theano.tensor.dscalar('x') y = theano.tensor.dscalar('y') M = sgd.StochasticGradientDescent([x], (1.0 - x * y)**2, [y]) M.y = y m = M.make(mode=mode) m.y = 5.0 #there should be a learning rate here by default assert m.stepsize is None m.stepsize = 0.01 for i in xrange(100): c = m.step_cost(3.0) # print c, m.y assert c < 1.0e-5 assert abs(m.y - (1.0 / 3)) < 1.0e-4 if __name__ == '__main__': test_sgd0()