Mercurial > pylearn
changeset 999:c6d08a760960
added sgd_updates to gd/sgd.py. Modif mcRBM to use it.
author | James Bergstra <bergstrj@iro.umontreal.ca> |
---|---|
date | Tue, 24 Aug 2010 17:01:09 -0400 |
parents | 8ba8b08e0442 |
children | d4a14c6c36e0 |
files | pylearn/algorithms/mcRBM.py pylearn/gd/sgd.py |
diffstat | 2 files changed, 22 insertions(+), 11 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/algorithms/mcRBM.py Tue Aug 24 16:51:53 2010 -0400 +++ b/pylearn/algorithms/mcRBM.py Tue Aug 24 17:01:09 2010 -0400 @@ -203,6 +203,7 @@ import pylearn from pylearn.sampling.hmc import HMC_sampler from pylearn.io import image_tiling +from pylearn.gd.sgd import sgd_updates #TODO: This should be in the datasets folder import pylearn.datasets.config @@ -215,15 +216,6 @@ #TODO: This should be in the nnet part of the library -def sgd_updates(params, grads, lr): - try: - float(lr) - lr = [lr for p in params] - except TypeError: - pass - updates = [(p, p - plr * gp) for (plr, p, gp) in zip(lr, params, grads)] - return updates - def hidden_cov_units_preactivation_given_v(rbm, v, small=0.5): """Return argument to the sigmoid that would give mean of covariance hid units @@ -453,7 +445,7 @@ sgd_ups = sgd_updates( rbm.params, grads, - lr=[2*s_lr, .2*s_lr, .02*s_lr, .1*s_lr, .02*s_lr ]) + stepsizes=[2*s_lr, .2*s_lr, .02*s_lr, .1*s_lr, .02*s_lr ]) learn_fn = function([batch_idx, s_lr, s_l1_penalty], outputs=[ grads[0].norm(2),
--- a/pylearn/gd/sgd.py Tue Aug 24 16:51:53 2010 -0400 +++ b/pylearn/gd/sgd.py Tue Aug 24 17:01:09 2010 -0400 @@ -1,8 +1,27 @@ -"""A stochastic gradient descent minimizer. (Possibly the simplest minimizer.) +"""A stochastic gradient descent minimizer. """ import theano +def sgd_updates(params, grads, stepsizes): + """Return a list of (pairs) that can be used as updates in theano.function to implement + stochastic gradient descent. + + :param params: variables to adjust in order to minimize some cost + :type params: a list of variables (theano.function will require shared variables) + :param grads: the gradient on each param (with respect to some cost) + :type grads: list of theano expressions + :param stepsizes: step by this amount times the negative gradient on each iteration + :type stepsizes: [symbolic] scalar or list of one [symbolic] scalar per param + """ + try: + iter(stepsizes) + except: + stepsizes = [stepsizes for p in params] + updates = [(p, p - step * gp) for (step, p, gp) in zip(stepsizes, params, grads)] + return updates + + class StochasticGradientDescent(theano.Module): """Fixed stepsize gradient descent