Mercurial > pylearn
diff algorithms/logistic_regression.py @ 503:c7ce66b4e8f4
Extensions to algorithms, and some cleanup (by defining linear_output result).
author | Joseph Turian <turian@gmail.com> |
---|---|
date | Wed, 29 Oct 2008 03:29:18 -0400 |
parents | 17945defd813 |
children | b267a8000f92 |
line wrap: on
line diff
--- a/algorithms/logistic_regression.py Wed Oct 29 02:08:56 2008 -0400 +++ b/algorithms/logistic_regression.py Wed Oct 29 03:29:18 2008 -0400 @@ -18,6 +18,8 @@ self.b = N.zeros(n_out) self.lr = 0.01 self.__hide__ = ['params'] + self.input_dimension = n_in + self.output_dimension = n_out class Module_Nclass(module.FancyModule): InstanceType = LogRegInstanceType @@ -34,22 +36,35 @@ self.params = [p for p in [self.w, self.b] if p.owner is None] - xent, output = nnet.crossentropy_softmax_1hot( - T.dot(self.x, self.w) + self.b, self.targ) + linear_output = T.dot(self.x, self.w) + self.b + + (xent, softmax, max_pr, argmax) = nnet.crossentropy_softmax_max_and_argmax_1hot( + linear_output, self.targ) sum_xent = T.sum(xent) - self.output = output + self.softmax = softmax + self.argmax = argmax + self.max_pr = max_pr self.sum_xent = sum_xent + # Softmax being computed directly. + softmax_unsupervised = nnet.softmax(linear_output) + self.softmax_unsupervised = softmax_unsupervised + #compatibility with current implementation of stacker/daa or something #TODO: remove this, make a wrapper - self.cost = sum_xent + self.cost = self.sum_xent self.input = self.x + # TODO: I want to make output = linear_output. + self.output = self.softmax_unsupervised #define the apply method - self.pred = T.argmax(T.dot(self.input, self.w) + self.b, axis=1) + self.pred = T.argmax(linear_output, axis=1) self.apply = module.Method([self.input], self.pred) + self.validate = module.Method([self.input, self.targ], [self.cost, self.argmax, self.max_pr]) + self.softmax_output = module.Method([self.input], self.softmax_unsupervised) + if self.params: gparams = T.grad(sum_xent, self.params)