Mercurial > pylearn
changeset 924:be420f1836bb
make tests faster in debug mode
author | Frederic Bastien <nouiz@nouiz.org> |
---|---|
date | Tue, 30 Mar 2010 13:46:08 -0400 |
parents | 06cc2b0c06ea |
children | 6be337c30928 |
files | pylearn/algorithms/tests/test_exponential_mean.py pylearn/algorithms/tests/test_sgd.py |
diffstat | 2 files changed, 13 insertions(+), 7 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/algorithms/tests/test_exponential_mean.py Tue Mar 30 12:09:11 2010 -0400 +++ b/pylearn/algorithms/tests/test_exponential_mean.py Tue Mar 30 13:46:08 2010 -0400 @@ -1,4 +1,5 @@ import theano, numpy +from theano.compile.debugmode import DebugMode from pylearn.algorithms import exponential_mean def test_mean(): @@ -50,9 +51,9 @@ assert i > rows_to_test def test_dynamic_normalizer(): - m = theano.compile.mode.get_defautl_mode() - if isinstance(m,DebugMode): - m='FAST_RUN' + mode = theano.compile.mode.get_default_mode() + if isinstance(mode,DebugMode): + mode = 'FAST_RUN' x = theano.tensor.dvector() rows_to_test = 100 @@ -79,7 +80,7 @@ M.f = theano.Method([x], [D.output, M.dn_mean.curval, M.dn_var.curval, M.x_mean.curval] , updates) - m = M.make(mode=m) + m = M.make(mode=mode) m.dn.initialize() m.dn_mean.initialize() m.dn_var.initialize()
--- a/pylearn/algorithms/tests/test_sgd.py Tue Mar 30 12:09:11 2010 -0400 +++ b/pylearn/algorithms/tests/test_sgd.py Tue Mar 30 13:46:08 2010 -0400 @@ -1,6 +1,11 @@ import theano +from theano.compile.debugmode import DebugMode from pylearn.algorithms import sgd +mode = theano.compile.mode.get_default_mode() +if isinstance(mode,DebugMode): + mode = 'FAST_RUN' + def test_sgd0(): x = theano.tensor.dscalar('x') @@ -8,7 +13,7 @@ M = sgd.StochasticGradientDescent([x], (1.0 - x * y)**2, [y], stepsize=0.01) M.y = y - m = M.make() + m = M.make(mode=mode) m.y = 5.0 for i in xrange(100): c = m.step_cost(3.0) @@ -26,7 +31,7 @@ M = sgd.StochasticGradientDescent([x], (1.0 - x * y)**2, [y], stepsize=lr) M.y = y M.lr = lr - m = M.make() + m = M.make(mode=mode) m.y = 5.0 m.lr = 0.01 for i in xrange(100): @@ -54,7 +59,7 @@ M = sgd.StochasticGradientDescent([x], (1.0 - x * y)**2, [y]) M.y = y - m = M.make() + m = M.make(mode=mode) m.y = 5.0 #there should be a learning rate here by default assert m.stepsize is None