Mercurial > pylearn
changeset 756:8447bc9bb2d4
merge
author | Pascal Lamblin <lamblinp@iro.umontreal.ca> |
---|---|
date | Tue, 02 Jun 2009 21:15:41 -0400 |
parents | 6a703c5f2391 (current diff) 390d8c5a1fee (diff) |
children | 61a3608d5767 1e0fa60bfacd |
files | |
diffstat | 4 files changed, 34 insertions(+), 15 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/algorithms/exponential_mean.py Tue Jun 02 21:15:21 2009 -0400 +++ b/pylearn/algorithms/exponential_mean.py Tue Jun 02 21:15:41 2009 -0400 @@ -14,6 +14,12 @@ :math:`self.curval = (1.0 - (1.0/max_denom)) * self.old_curval + (1.0/max_denom) * x` + + The symbolic buffer containing the running mean is called `old_curval`. (This has a value + in the ModuleInstance). + + The symbolic variable for the updated running mean is called `curval`. + """ max_denom = None
--- a/pylearn/algorithms/sgd.py Tue Jun 02 21:15:21 2009 -0400 +++ b/pylearn/algorithms/sgd.py Tue Jun 02 21:15:41 2009 -0400 @@ -4,10 +4,16 @@ import theano class StochasticGradientDescent(theano.Module): - """Fixed stepsize gradient descent""" + """Fixed stepsize gradient descent + + Methods for gradient descent are: + - step(arg_vals) which returns None and updates the params + - step_cost(arg_vals) which returns the cost value, and updates the params + + """ def __init__(self, args, cost, params, gradients=None, stepsize=None, - updates=None, auxout=None): + updates=None, auxout=None, methods=True): """ :param stepsize: the step to take in (negative) gradient direction :type stepsize: None, scalar value, or scalar TensorVariable @@ -15,8 +21,9 @@ :param updates: extra symbolic updates to make when evating either step or step_cost (these override the gradients if necessary) :type updatess: dict Variable -> Variable - :type auxout: auxiliary outputs, list containing output symbols to + :param auxout: auxiliary outputs, list containing output symbols to compute at the same time as cost (for efficiency) + :param methods: Should this module define the step and step_cost methods? """ super(StochasticGradientDescent, self).__init__() self.stepsize_init = None @@ -38,13 +45,19 @@ if updates is not None: self._updates.update(updates) - auxout = auxout if auxout else [] - self.step = theano.Method( - args, auxout, - updates=self._updates) - self.step_cost = theano.Method( - args, [cost]+auxout, - updates=self._updates) + if methods: + if auxout is None: + self.step = theano.Method(args, [], updates=self._updates) + self.step_cost = theano.Method(args, cost, updates=self._updates) + else: + # step cost always returns a list if auxout + self.step = theano.Method( + args, [] + auxout, + updates=self._updates) + self.step_cost = theano.Method( + args, [cost]+auxout, + updates=self._updates) + updates = property(lambda self: self._updates.copy())
--- a/pylearn/external/wrap_libsvm.py Tue Jun 02 21:15:21 2009 -0400 +++ b/pylearn/external/wrap_libsvm.py Tue Jun 02 21:15:41 2009 -0400 @@ -66,7 +66,7 @@ svm_problem = libsvm.svm_problem svm_parameter = libsvm.svm_parameter -RBF = libsvm.svm_RBF +RBF = libsvm.RBF #################################### @@ -159,7 +159,7 @@ def train_rbf_model(train_X, train_Y, C, gamma): param = libsvm.svm_parameter(C=C, kernel_type=libsvm.RBF, gamma=gamma) problem = libsvm.svm_problem(train_Y, train_X) - model libsvm.svm_model(problem, param) + model = svm_model(problem, param) #save_filename = state.save_filename #model.save(save_filename) @@ -181,7 +181,7 @@ train_set=None, svm_param=dict(kernel='RBF', C=C, gamma=g), save_filename='model_RBF_C%f_G%f.libsvm') - for C in C_grid, + for C in C_grid for g in gamma_grid] # will return quickly if jobs have already run
--- a/pylearn/io/image_tiling.py Tue Jun 02 21:15:21 2009 -0400 +++ b/pylearn/io/image_tiling.py Tue Jun 02 21:15:41 2009 -0400 @@ -5,10 +5,10 @@ import numpy from PIL import Image -def scale_to_unit_interval(ndar): +def scale_to_unit_interval(ndar,eps=1e-8): ndar = ndar.copy() ndar -= ndar.min() - ndar *= 1.0 / ndar.max() + ndar *= 1.0 / (ndar.max()+eps) return ndar def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0,0),