# HG changeset patch # User James Bergstra # Date 1281546965 14400 # Node ID 0181459b53a129701a0ec29c4be910595e884824 # Parent fdd648c7c58309353a29be9beeb21367b4b875cf# Parent a75bf0aca18f9ae34c29c02723d17512bfb812b8 merge diff -r a75bf0aca18f -r 0181459b53a1 pylearn/shared/layers/sgd.py --- a/pylearn/shared/layers/sgd.py Mon Jun 21 15:47:30 2010 -0400 +++ b/pylearn/shared/layers/sgd.py Wed Aug 11 13:16:05 2010 -0400 @@ -20,27 +20,17 @@ """ :param stepsize: the step to take in (negative) gradient direction :type stepsize: None, scalar value, or scalar TensorVariable - - :param updates: extra symbolic updates to make when evating either step or step_cost - (these override the gradients if necessary) - :type updates: dict Variable -> Variable - :param auxout: auxiliary outputs, list containing output symbols to - compute at the same time as cost (for efficiency) - :param methods: Should this module define the step and step_cost methods? """ if len(inputs) != len(gradients): raise ValueError('inputs list and gradients list must have same len') self.inputs = inputs - self.params = params - self.updates = updates = [] - self.outputs = outputs = [] - - for i, g in zip(inputs, gradients): - o = i - stepsize * g - outputs.append(o) - if hasattr(i, 'value'): # this is true for shared variables, false for most things. - updates.append((i, o)) + self.gradients = gradients + self.params = params # contains either nothing or the learning rate + self.outputs = [i - stepsize*g for (i,g) in zip(inputs, gradients)] + self.updates = [(input, self.outputs[i]) + for (i,input) in enumerate(self.inputs) + if hasattr(input, 'value')] # true for shared variables @classmethod def new(cls, inputs, cost, stepsize, dtype=None):