# HG changeset patch # User bergstrj@iro.umontreal.ca # Date 1206591556 14400 # Node ID 60b164a0d84ae46b88ad59f42c6d2e99f2489c1f # Parent 759d17112b2389d669cc70bbcf101f5bee131aa1# Parent 5ede27026e05aba55213589b00795f7091b4ee07 merged diff -r 759d17112b23 -r 60b164a0d84a gradient_learner.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/gradient_learner.py Thu Mar 27 00:19:16 2008 -0400 @@ -0,0 +1,50 @@ + +from learner import * +from tensor import * +import gradient +from compile import Function +from gradient_based_optimizer import * + +class GradientLearner(Learner): + """ + Base class for gradient-based optimization of a training criterion + that can consist in two parts, an additive part over examples, and + an example-independent part (usually called the regularizer). + The user provides a Theano formula that maps the fields of a training example + and parameters to output fields (for the use function), one of which must be a cost + that is the training criterion to be minimized. Subclasses implement + a training strategy that uses the function to compute gradients and + to compute outputs in the update method. + The inputs, parameters, and outputs are lists of Theano tensors, + while the example_wise_cost and regularization_term are Theano tensors. + The user can specify a regularization coefficient that multiplies the regularization term. + The training algorithm looks for parameters that minimize + regularization_coefficienet * regularization_term(parameters) + + sum_{inputs in training_set} example_wise_cost(inputs,parameters) + i.e. the regularization_term should not depend on the inputs, only on the parameters. + The learned function can map a subset of inputs to a subset of outputs (as long as the inputs subset + includes all the inputs required in the Theano expression for the selected outputs). + It is assumed that all the inputs are provided in the training set, but + not necessarily when using the learned function. + """ + def __init__(self, inputs, parameters, outputs, example_wise_cost, regularization_term, + gradient_based_optimizer=StochasticGradientDescent(), regularization_coefficient = astensor(1.0)): + self.inputs = inputs + self.outputs = outputs + self.parameters = parameters + self.example_wise_cost = example_wise_cost + self.regularization_term = regularization_term + self.gradient_based_optimizer = gradient_based_optimizer + self.regularization_coefficient = regularization_coefficient + self.parameters_example_wise_gradient = gradient.grad(example_wise_cost, parameters) + self.parameters_regularization_gradient = gradient.grad(self.regularization_coefficient * regularization, parameters) + if example_wise_cost not in outputs: + outputs.append(example_wise_cost) + if regularization_term not in outputs: + outputs.append(regularization_term) + self.example_wise_gradient_fn = Function(inputs + parameters, + [self.parameters_example_wise_gradient + self.parameters_regularization_gradient]) + self.use_functions = {frozenset([input.name for input in inputs]) : Function(inputs, outputs)} + + def update(self,training_set): + diff -r 759d17112b23 -r 60b164a0d84a learner.py --- a/learner.py Wed Mar 26 21:05:14 2008 -0400 +++ b/learner.py Thu Mar 27 00:19:16 2008 -0400 @@ -1,6 +1,5 @@ from dataset import * -from statscollector import * class Learner(object): """Base class for learning algorithms, provides an interface @@ -24,15 +23,17 @@ """ raise NotImplementedError - def update(self,training_set): + def update(self,training_set,train_stats_collector=None): """ Continue training a learner, with the evidence provided by the given training set. Hence update can be called multiple times. This is particularly useful in the on-line setting or the sequential (Bayesian or not) settings. The result is a function that can be applied on data, with the same semantics of the Learner.use method. + The user may optionally provide a training StatsCollector that is used to record + some statistics of the outputs computed during training. """ - return self.use + return self.use # default behavior is 'non-adaptive', i.e. update does not do anything def __call__(self,training_set):