Mercurial > pylearn
changeset 942:fdd648c7c583
shared/layers/sgd removed cruft
author | James Bergstra <bergstrj@iro.umontreal.ca> |
---|---|
date | Fri, 16 Jul 2010 14:20:48 -0400 |
parents | 57ac1af9c17c |
children | 0181459b53a1 |
files | pylearn/shared/layers/sgd.py |
diffstat | 1 files changed, 6 insertions(+), 16 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/shared/layers/sgd.py Tue Nov 17 15:20:40 2009 -0500 +++ b/pylearn/shared/layers/sgd.py Fri Jul 16 14:20:48 2010 -0400 @@ -20,27 +20,17 @@ """ :param stepsize: the step to take in (negative) gradient direction :type stepsize: None, scalar value, or scalar TensorVariable - - :param updates: extra symbolic updates to make when evating either step or step_cost - (these override the gradients if necessary) - :type updates: dict Variable -> Variable - :param auxout: auxiliary outputs, list containing output symbols to - compute at the same time as cost (for efficiency) - :param methods: Should this module define the step and step_cost methods? """ if len(inputs) != len(gradients): raise ValueError('inputs list and gradients list must have same len') self.inputs = inputs - self.params = params - self.updates = updates = [] - self.outputs = outputs = [] - - for i, g in zip(inputs, gradients): - o = i - stepsize * g - outputs.append(o) - if hasattr(i, 'value'): # this is true for shared variables, false for most things. - updates.append((i, o)) + self.gradients = gradients + self.params = params # contains either nothing or the learning rate + self.outputs = [i - stepsize*g for (i,g) in zip(inputs, gradients)] + self.updates = [(input, self.outputs[i]) + for (i,input) in enumerate(self.inputs) + if hasattr(input, 'value')] # true for shared variables @classmethod def new(cls, inputs, cost, stepsize, dtype=None):