Mercurial > pylearn
changeset 499:a419edf4e06c
removed unpicklable nested classes in logistic regression
author | James Bergstra <bergstrj@iro.umontreal.ca> |
---|---|
date | Tue, 28 Oct 2008 12:57:49 -0400 |
parents | 2be795cc5c3a |
children | 3c60c2db0319 |
files | algorithms/logistic_regression.py |
diffstat | 1 files changed, 17 insertions(+), 21 deletions(-) [+] |
line wrap: on
line diff
--- a/algorithms/logistic_regression.py Tue Oct 28 12:25:15 2008 -0400 +++ b/algorithms/logistic_regression.py Tue Oct 28 12:57:49 2008 -0400 @@ -7,21 +7,22 @@ import numpy as N +class LogRegInstanceType(module.FancyModuleInstance): + def initialize(self, n_in, n_out=1, rng=N.random): + #self.component is the LogisticRegressionTemplate instance that built this guy. + + self.w = N.zeros((n_in, n_out)) + self.b = N.zeros(n_out) + self.lr = 0.01 + self.__hide__ = ['params'] class Module_Nclass(module.FancyModule): - class InstanceType(module.FancyModuleInstance): - def initialize(self, n_in, n_out, rng=N.random): - #self.component is the LogisticRegressionTemplate instance that built this guy. + InstanceType = LogRegInstanceType - self.w = N.zeros((n_in, n_out)) - self.b = N.zeros(n_out) - self.lr = 0.01 - self.__hide__ = ['params'] - - def __init__(self, input=None, targ=None, w=None, b=None, lr=None, regularize=False): + def __init__(self, x=None, targ=None, w=None, b=None, lr=None, regularize=False): super(Module_Nclass, self).__init__() #boilerplate - self.input = input if input is not None else T.matrix('input') + self.x = x if x is not None else T.matrix('input') self.targ = targ if targ is not None else T.lvector() self.w = w if w is not None else module.Member(T.dmatrix()) @@ -31,12 +32,16 @@ self.params = [p for p in [self.w, self.b] if p.owner is None] xent, output = nnet.crossentropy_softmax_1hot( - T.dot(self.input, self.w) + self.b, self.targ) + T.dot(self.x, self.w) + self.b, self.targ) sum_xent = T.sum(xent) self.output = output self.sum_xent = sum_xent + + #compatibility with current implementation of stacker/daa or something + #TODO: remove this, make a wrapper self.cost = sum_xent + self.input = self.x #define the apply method self.pred = T.argmax(T.dot(self.input, self.w) + self.b, axis=1) @@ -49,14 +54,7 @@ updates = dict((p, p - self.lr * g) for p, g in zip(self.params, gparams))) class Module(module.FancyModule): - class InstanceType(module.FancyModuleInstance): - def initialize(self, n_in): - #self.component is the LogisticRegressionTemplate instance that built this guy. - - self.w = N.random.randn(n_in,1) - self.b = N.random.randn(1) - self.lr = 0.01 - self.__hide__ = ['params'] + InstanceType = LogRegInstanceType def __init__(self, input=None, targ=None, w=None, b=None, lr=None, regularize=False): super(Module, self).__init__() #boilerplate @@ -89,8 +87,6 @@ self.update = module.Method([self.input, self.targ], sum_xent, updates = dict((p, p - self.lr * g) for p, g in zip(self.params, gparams))) - - class Learner(object): """TODO: Encapsulate the algorithm for finding an optimal regularization coefficient""" pass