view gradient_learner.py @ 19:57f4015e2e09

Iterators extend LookupList
author bergstrj@iro.umontreal.ca
date Thu, 27 Mar 2008 01:59:44 -0400
parents 5ede27026e05
children 266c68cb6136
line wrap: on
line source


from learner import *
from tensor import *
import gradient
from compile import Function
from gradient_based_optimizer import *

class GradientLearner(Learner):
    """
    Base class for gradient-based optimization of a training criterion
    that can consist in two parts, an additive part over examples, and
    an example-independent part (usually called the regularizer).
    The user provides a Theano formula that maps the fields of a training example
    and parameters to output fields (for the use function), one of which must be a cost
    that is the training criterion to be minimized. Subclasses implement
    a training strategy that uses the function to compute gradients and
    to compute outputs in the update method.
    The inputs, parameters, and outputs are lists of Theano tensors,
    while the example_wise_cost and regularization_term are Theano tensors.
    The user can specify a regularization coefficient that multiplies the regularization term.
    The training algorithm looks for parameters that minimize
       regularization_coefficienet * regularization_term(parameters) +
       sum_{inputs in training_set} example_wise_cost(inputs,parameters)
    i.e. the regularization_term should not depend on the inputs, only on the parameters.
    The learned function can map a subset of inputs to a subset of outputs (as long as the inputs subset
    includes all the inputs required in the Theano expression for the selected outputs).
    It is assumed that all the inputs are provided in the training set, but
    not necessarily when using the learned function.
    """
    def __init__(self, inputs, parameters, outputs, example_wise_cost, regularization_term,
                 gradient_based_optimizer=StochasticGradientDescent(), regularization_coefficient = astensor(1.0)):
        self.inputs = inputs
        self.outputs = outputs
        self.parameters = parameters
        self.example_wise_cost = example_wise_cost
        self.regularization_term = regularization_term
        self.gradient_based_optimizer = gradient_based_optimizer
        self.regularization_coefficient = regularization_coefficient
        self.parameters_example_wise_gradient = gradient.grad(example_wise_cost, parameters)
        self.parameters_regularization_gradient = gradient.grad(self.regularization_coefficient * regularization, parameters)
        if example_wise_cost not in outputs:
            outputs.append(example_wise_cost)
        if regularization_term not in outputs:
            outputs.append(regularization_term)
        self.example_wise_gradient_fn = Function(inputs + parameters, 
                                       [self.parameters_example_wise_gradient + self.parameters_regularization_gradient])
        self.use_functions = {frozenset([input.name for input in inputs]) : Function(inputs, outputs)}

    def update(self,training_set):