Mercurial > pylearn
changeset 909:8e3f1d852ab1
Merge
author | fsavard |
---|---|
date | Thu, 18 Mar 2010 11:34:29 -0400 |
parents | 9472d234db2e (current diff) 6d4f98f86514 (diff) |
children | 8837535006f1 |
files | |
diffstat | 4 files changed, 16 insertions(+), 14 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/algorithms/tests/test_daa.py Thu Mar 18 11:33:49 2010 -0400 +++ b/pylearn/algorithms/tests/test_daa.py Thu Mar 18 11:34:29 2010 -0400 @@ -6,12 +6,14 @@ import time import pylearn.algorithms.logistic_regression -from theano.compile.mode import default_mode - -def test_train_daa(mode = default_mode): +from theano import config +from pylearn.algorithms.stacker import Stacker +from pylearn.algorithms.daa import SigmoidXEDenoisingAA +from pylearn.algorithms.regressor import BinRegressor +def test_train_daa(mode = config.mode): ndaa = 3 - daa = models.Stacker([(models.SigmoidXEDenoisingAA, 'hidden')] * ndaa + [(models.BinRegressor, 'output')], + daa = Stacker([(SigmoidXEDenoisingAA, 'hidden')] * ndaa + [(BinRegressor, 'output')], regularize = False) model = daa.make([4, 20, 20, 20, 1], @@ -39,7 +41,7 @@ def test_train_daa2(mode = theano.Mode('c|py', 'fast_run')): ndaa = 3 - daa = models.Stacker([(models.SigmoidXEDenoisingAA, 'hidden')] * ndaa + [(pylearn.algorithms.logistic_regression.Module_Nclass, 'pred')], + daa = Stacker([(SigmoidXEDenoisingAA, 'hidden')] * ndaa + [(pylearn.algorithms.logistic_regression.Module_Nclass, 'pred')], regularize = False) model = daa.make([4] + [20] * ndaa + [10],
--- a/pylearn/sandbox/test_scan_inputs_groups.py Thu Mar 18 11:33:49 2010 -0400 +++ b/pylearn/sandbox/test_scan_inputs_groups.py Thu Mar 18 11:34:29 2010 -0400 @@ -9,6 +9,7 @@ import theano.tensor as T from pylearn.sandbox.scan_inputs_groups import FillMissing import theano.compile.mode as mode_module +import theano class TestFillMissing(unittest.TestCase): def setUp(self): @@ -16,9 +17,9 @@ #we need to desactivate the check for NaN value as we have them in input #TODO: Make an option to don't check NaN value in input only, bug check in output. - m=mode_module.default_mode - if m=="DEBUG_MODE": - m=copy.copy(mode_module.predefined_modes[m]) + m=mode_module.get_default_mode() + if isinstance(m,theano.compile.debugmode.DebugMode): + m=copy.copy(m) m.check_isfinite=False self.mode = m
--- a/pylearn/shared/layers/kording2004.py Thu Mar 18 11:33:49 2010 -0400 +++ b/pylearn/shared/layers/kording2004.py Thu Mar 18 11:34:29 2010 -0400 @@ -1,7 +1,6 @@ import numpy import theano.tensor -from hpu.theano_outgoing import mean, var, cov - +from theano.tensor.basic import mean from pylearn.shared.layers.exponential_mean import ExponentialMean # exponential_mean.py import logging
--- a/pylearn/shared/layers/tests/test_kouh2008.py Thu Mar 18 11:33:49 2010 -0400 +++ b/pylearn/shared/layers/tests/test_kouh2008.py Thu Mar 18 11:34:29 2010 -0400 @@ -9,9 +9,9 @@ n_out = 10 n_terms = 3 rng = numpy.random.RandomState(23455) - layer = Kouh2008.new_filters(rng, tensor.dmatrix(), n_in, n_out, n_terms, dtype='float64') + layer = Kouh2008.new_filters_expbounds(rng, tensor.dmatrix(), n_in, n_out, n_terms, dtype='float64') assert layer.output.dtype =='float64' - layer = Kouh2008.new_filters(rng, tensor.fmatrix(), n_in, n_out, n_terms, dtype='float32') + layer = Kouh2008.new_filters_expbounds(rng, tensor.fmatrix(), n_in, n_out, n_terms, dtype='float32') assert layer.output.dtype =='float32' def run_w_random(bsize=10, n_iter=200, n_in = 1024, n_out = 100, n_terms=2, dtype='float64'): @@ -19,7 +19,7 @@ y = tensor.lvector() rng = numpy.random.RandomState(23455) - layer = Kouh2008.new_filters(rng, x, n_in, n_out, n_terms, dtype='float64') + layer = Kouh2008.new_filters_expbounds(rng, x, n_in, n_out, n_terms, dtype='float64') out = LogisticRegression.new(layer.output, n_out, 2) cost = out.nll(y).sum() @@ -52,7 +52,7 @@ y = tensor.lvector() rng = numpy.random.RandomState(23455) - layer = Kouh2008.new_filters(rng, x, n_in, n_out, n_terms, dtype='float64') + layer = Kouh2008.new_filters_expbounds(rng, x, n_in, n_out, n_terms, dtype='float64') out = LogisticRegression.new(layer.output, n_out, 2) cost = out.nll(y).sum() #joint optimization except for one of the linear filters