# HG changeset patch # User Olivier Breuleux # Date 1231804030 18000 # Node ID 605ab704abc3f1ad1d1612d69efced6f3e08dfe2 # Parent 7ddb286162f66656aa6ca983ece7c38e5d264697# Parent 8d0b73c7d76803d85c876eb2167bac798bed5767 merge diff -r 7ddb286162f6 -r 605ab704abc3 pylearn/algorithms/aa.py --- a/pylearn/algorithms/aa.py Mon Jan 12 18:46:46 2009 -0500 +++ b/pylearn/algorithms/aa.py Mon Jan 12 18:47:10 2009 -0500 @@ -4,7 +4,7 @@ from theano.tensor import nnet as NN import numpy as N -class AutoEncoder(theano.FancyModule): +class AutoEncoder(theano.Module): def __init__(self, input = None, regularize = True, tie_weights = True): super(AutoEncoder, self).__init__() @@ -64,7 +64,7 @@ def _instance_initialize(self, obj, input_size = None, hidden_size = None, seed = None, **init): if (input_size is None) ^ (hidden_size is None): - raise ValueError("Must specify hidden_size and target_size or neither.") + raise ValueError("Must specify hidden_size and input_size or neither.") super(AutoEncoder, self)._instance_initialize(obj, **init) if seed is not None: R = N.random.RandomState(seed) diff -r 7ddb286162f6 -r 605ab704abc3 pylearn/algorithms/cost.py --- a/pylearn/algorithms/cost.py Mon Jan 12 18:46:46 2009 -0500 +++ b/pylearn/algorithms/cost.py Mon Jan 12 18:47:10 2009 -0500 @@ -10,7 +10,6 @@ """ import theano.tensor as T -from xlogx import xlogx def quadratic(target, output, axis=1): return T.mean(T.sqr(target - output), axis=axis) @@ -28,5 +27,5 @@ different shapes then the result will be garbled. """ return -(target * T.log(output) + (1 - target) * T.log(1 - output)) \ - + (xlogx(target) + xlogx(1 - target)) + + (T.xlogx(target) + T.xlogx(1 - target)) # return cross_entropy(target, output, axis) - cross_entropy(target, target, axis) diff -r 7ddb286162f6 -r 605ab704abc3 pylearn/algorithms/logistic_regression.py --- a/pylearn/algorithms/logistic_regression.py Mon Jan 12 18:46:46 2009 -0500 +++ b/pylearn/algorithms/logistic_regression.py Mon Jan 12 18:47:10 2009 -0500 @@ -48,7 +48,7 @@ else: # TODO: when above is fixed, remove this hack (need an argmax # which is independent of targets) - self.argmax_standalone = T.argmax(self.linear_output); + self.argmax_standalone = T.argmax(self.linear_output) (self._xent, self.softmax, self._max_pr, self.argmax) =\ nnet.crossentropy_softmax_max_and_argmax_1hot( self.linear_output, self.target)