Mercurial > pylearn
changeset 818:f4729745bb58
backporting to 2.4
author | dumitru@deepnets.mtv.corp.google.com |
---|---|
date | Wed, 02 Sep 2009 14:22:02 -0700 |
parents | c61d775f4f95 |
children | 7dfecf11cbf4 |
files | pylearn/algorithms/logistic_regression.py pylearn/algorithms/regressor.py pylearn/algorithms/stacker.py pylearn/algorithms/tests/test_daa.py pylearn/datasets/MNIST.py pylearn/datasets/config.py pylearn/datasets/smallNorb.py pylearn/io/filetensor.py |
diffstat | 8 files changed, 145 insertions(+), 28 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/algorithms/logistic_regression.py Thu Jul 30 18:09:17 2009 -0400 +++ b/pylearn/algorithms/logistic_regression.py Wed Sep 02 14:22:02 2009 -0700 @@ -33,18 +33,57 @@ self.n_in = n_in self.n_out = n_out - self.input = input if input is not None else T.matrix() - self.target = target if target is not None else T.lvector() + if input is not None: + self.input = input + else: + self.input = T.matrix() - self.w = w if w is not None else (T.dmatrix()) - self.b = b if b is not None else (T.dvector()) + if target is not None: + self.target = target + else: + self.target = T.lvector() + + #backport + #self.input = input if input is not None else T.matrix() + #self.target = target if target is not None else T.lvector() + + if w is not None: + self.w = w + else: + self.w = (T.dmatrix()) + if b is not None: + self.b = b + else: + self.b = (T.dvector()) + + #backport + #self.w = w if w is not None else (T.dmatrix()) + #self.b = b if b is not None else (T.dvector()) + + self.params = [] + for p in [self.w, self.b]: + if p.owner is None: + self.params += [p] + + #backport #the params of the model are the ones we fit to the data - self.params = [p for p in [self.w, self.b] if p.owner is None] + #self.params = [p for p in [self.w, self.b] if p.owner is None] + if l2 is not None: + self.l2 = l2 + else: + self.l2 = (T.dscalar()) + + if l1 is not None: + self.l1 = l1 + else: + self.l1 = (T.dscalar()) + + #backport #the hyper-parameters of the model are not fit to the data - self.l2 = l2 if l2 is not None else (T.dscalar()) - self.l1 = l1 if l1 is not None else (T.dscalar()) + #self.l2 = l2 if l2 is not None else (T.dscalar()) + #self.l1 = l1 if l1 is not None else (T.dscalar()) #here we actually build the model self.linear_output = T.dot(self.input, self.w) + self.b @@ -163,14 +202,46 @@ def __init__(self, input=None, targ=None, w=None, b=None, lr=None, regularize=False): super(LogReg2, self).__init__() #boilerplate - self.input = (input) if input is not None else T.matrix('input') - self.targ = (targ) if targ is not None else T.lcol() + if input is not None: + self.input = (input) + else: + self.input = T.matrix('input') + + if targ is not None: + self.targ = (targ) + else: + self.targ = T.lcol() + + #self.input = (input) if input is not None else T.matrix('input') + #self.targ = (targ) if targ is not None else T.lcol() + + if w is not None: + self.w = (w) + else: + self.w = (T.dmatrix()) - self.w = (w) if w is not None else (T.dmatrix()) - self.b = (b) if b is not None else (T.dvector()) - self.lr = (lr) if lr is not None else (T.dscalar()) + if b is not None: + self.b = (b) + else: + self.b = (T.dvector()) + + if lr is not None: + self.lr = (lr) + else: + self.lr = (T.scalar()) - self.params = [p for p in [self.w, self.b] if p.owner is None] + #backport + #self.w = (w) if w is not None else (T.dmatrix()) + #self.b = (b) if b is not None else (T.dvector()) + #self.lr = (lr) if lr is not None else (T.dscalar()) + + self.params = [] + for p in [self.w, self.b]: + if p.owner is None: + self.params += [p] + + #backport + #self.params = [p for p in [self.w, self.b] if p.owner is None] output = nnet.sigmoid(T.dot(self.x, self.w) + self.b) xent = -self.targ * T.log(output) - (1.0 - self.targ) * T.log(1.0 - output) @@ -251,11 +322,23 @@ def __init__(self, n_in=None, n_out=None, w=None, b=None): super(LogRegNew, self).__init__() #boilerplate + if w is not None: + self.w = w + else: + self.w = (T.dmatrix()) + + if b is not None: + self.b = b + else: + self.b = (T.dvector()) + + self.n_in = n_in self.n_out = n_out - self.w = w if w is not None else (T.dmatrix()) - self.b = b if b is not None else (T.dvector()) + #backport + #self.w = w if w is not None else (T.dmatrix()) + #self.b = b if b is not None else (T.dvector()) def _instance_initialize(self, obj): obj.w = N.zeros((self.n_in, self.n_out))
--- a/pylearn/algorithms/regressor.py Thu Jul 30 18:09:17 2009 -0400 +++ b/pylearn/algorithms/regressor.py Wed Sep 02 14:22:02 2009 -0700 @@ -13,8 +13,18 @@ self.regularize = regularize # ACQUIRE/MAKE INPUT AND TARGET - self.input = input if input else T.matrix('input') - self.target = target if target else T.matrix('target') + if input: + self.input = input + else: + self.target = target + + if target: + self.target = target + else: + self.target = T.dmatrix('target') + #backport + #self.input = input if input else T.matrix('input') + #self.target = target if target else T.matrix('target') # HYPER-PARAMETERS self.lr = T.scalar()
--- a/pylearn/algorithms/stacker.py Thu Jul 30 18:09:17 2009 -0400 +++ b/pylearn/algorithms/stacker.py Wed Sep 02 14:22:02 2009 -0700 @@ -69,7 +69,14 @@ if isinstance(method, theano.Method) and not hasattr(self, name): if not isinstance(method.inputs, (list,dict)): method.inputs = [method.inputs] - inputs = [self.input if x is ll.input else x for x in method.inputs] + inputs = [] + for x in method.inputs: + if x is ll.input: + inputs += [self.input] + else: + inputs += [x] + #backport + #inputs = [self.input if x is ll.input else x for x in method.inputs] m = theano.Method(inputs, method.outputs, method.updates) setattr(self, name, m)
--- a/pylearn/algorithms/tests/test_daa.py Thu Jul 30 18:09:17 2009 -0400 +++ b/pylearn/algorithms/tests/test_daa.py Wed Sep 02 14:22:02 2009 -0700 @@ -6,8 +6,9 @@ import time import pylearn.algorithms.logistic_regression +from theano.compile.mode import default_mode -def test_train_daa(mode = theano.Mode('c|py', 'fast_run')): +def test_train_daa(mode = default_mode): ndaa = 3 daa = models.Stacker([(models.SigmoidXEDenoisingAA, 'hidden')] * ndaa + [(models.BinRegressor, 'output')],
--- a/pylearn/datasets/MNIST.py Thu Jul 30 18:09:17 2009 -0400 +++ b/pylearn/datasets/MNIST.py Wed Sep 02 14:22:02 2009 -0700 @@ -6,9 +6,9 @@ import os import numpy -from ..io.pmat import PMat -from .config import data_root # config -from .dataset import Dataset +from pylearn.io.pmat import PMat +from pylearn.datasets.config import data_root # config +from pylearn.datasets.dataset import Dataset def head(n=10, path=None): """Load the first MNIST examples. @@ -18,7 +18,8 @@ is the label of the i'th row of x. """ - path = os.path.join(data_root(), 'mnist','mnist_all.pmat') if path is None else path + if path is None: + path = os.path.join(data_root(), 'mnist','mnist_all.pmat') dat = PMat(fname=path)
--- a/pylearn/datasets/config.py Thu Jul 30 18:09:17 2009 -0400 +++ b/pylearn/datasets/config.py Wed Sep 02 14:22:02 2009 -0700 @@ -11,7 +11,11 @@ if os.getenv(key) is None: print >> sys.stderr, "WARNING: Environment variable", key, print >> sys.stderr, "is not set. Using default of", default - return default if os.getenv(key) is None else os.getenv(key) + if os.getenv(key) is None: + return default + else: + return os.getenv(key) + #return default if os.getenv(key) is None else os.getenv(key) def data_root(): return env_get('PYLEARN_DATA_ROOT', os.getenv('HOME')+'/data', 'DBPATH')
--- a/pylearn/datasets/smallNorb.py Thu Jul 30 18:09:17 2009 -0400 +++ b/pylearn/datasets/smallNorb.py Wed Sep 02 14:22:02 2009 -0700 @@ -1,7 +1,7 @@ import os import numpy -from ..io.filetensor import read -from .config import data_root +from pylearn.io.filetensor import read +from pylearn.datasets.config import data_root #Path = '/u/bergstrj/pub/data/smallnorb' #Path = '/home/fringant2/lisa/louradoj/data/smallnorb'
--- a/pylearn/io/filetensor.py Thu Jul 30 18:09:17 2009 -0400 +++ b/pylearn/io/filetensor.py Wed Sep 02 14:22:02 2009 -0700 @@ -129,8 +129,19 @@ self.magic_t, self.elsize, self.ndim, self.dim, self.dim_size = _read_header(f,debug) self.f_start = f.tell() - self.readshape = tuple(self.dim[self.ndim-rank:]) if rank <= self.ndim else tuple(self.dim) - padding = tuple() if rank <= self.ndim else (1,) * (rank - self.ndim) + if rank <= self.ndim: + self.readshape = tuple(self.dim[self.ndim-rank:]) + else: + self.readshape = tuple(self.dim) + + #self.readshape = tuple(self.dim[self.ndim-rank:]) if rank <= self.ndim else tuple(self.dim) + + if rank <= self.ndim: + padding = tuple() + else: + padding = (1,) * (rank - self.ndim) + + #padding = tuple() if rank <= self.ndim else (1,) * (rank - self.ndim) self.returnshape = padding + self.readshape self.readsize = _prod(self.readshape) if debug: print 'READ PARAM', self.readshape, self.returnshape, self.readsize