Mercurial > pylearn
changeset 943:0181459b53a1
merge
author | James Bergstra <bergstrj@iro.umontreal.ca> |
---|---|
date | Wed, 11 Aug 2010 13:16:05 -0400 |
parents | fdd648c7c583 (diff) a75bf0aca18f (current diff) |
children | 1529c84e460f |
files | pylearn/shared/layers/sgd.py |
diffstat | 1 files changed, 6 insertions(+), 16 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/shared/layers/sgd.py Mon Jun 21 15:47:30 2010 -0400 +++ b/pylearn/shared/layers/sgd.py Wed Aug 11 13:16:05 2010 -0400 @@ -20,27 +20,17 @@ """ :param stepsize: the step to take in (negative) gradient direction :type stepsize: None, scalar value, or scalar TensorVariable - - :param updates: extra symbolic updates to make when evating either step or step_cost - (these override the gradients if necessary) - :type updates: dict Variable -> Variable - :param auxout: auxiliary outputs, list containing output symbols to - compute at the same time as cost (for efficiency) - :param methods: Should this module define the step and step_cost methods? """ if len(inputs) != len(gradients): raise ValueError('inputs list and gradients list must have same len') self.inputs = inputs - self.params = params - self.updates = updates = [] - self.outputs = outputs = [] - - for i, g in zip(inputs, gradients): - o = i - stepsize * g - outputs.append(o) - if hasattr(i, 'value'): # this is true for shared variables, false for most things. - updates.append((i, o)) + self.gradients = gradients + self.params = params # contains either nothing or the learning rate + self.outputs = [i - stepsize*g for (i,g) in zip(inputs, gradients)] + self.updates = [(input, self.outputs[i]) + for (i,input) in enumerate(self.inputs) + if hasattr(input, 'value')] # true for shared variables @classmethod def new(cls, inputs, cost, stepsize, dtype=None):