Mercurial > pylearn
comparison gradient_learner.py @ 13:633453635d51
Starting to work on gradient_based_learner.py
author | bengioy@bengiomac.local |
---|---|
date | Wed, 26 Mar 2008 21:38:08 -0400 |
parents | |
children | 5ede27026e05 |
comparison
equal
deleted
inserted
replaced
12:ff4e551490f1 | 13:633453635d51 |
---|---|
1 | |
2 from learner import * | |
3 from tensor import * | |
4 import gradient | |
5 from compile import Function | |
6 from gradient_based_optimizer import * | |
7 | |
8 class GradientLearner(Learner): | |
9 """ | |
10 Generic Learner for gradient-based optimization of a training criterion | |
11 that can consist in two parts, an additive part over examples, and | |
12 an example-independent part (usually called the regularizer). | |
13 The user provides a Theano formula that maps the fields of a training example | |
14 and parameters to output fields (for the use function), one of which must be a cost | |
15 that is the training criterion to be minimized. The user also provides | |
16 a GradientBasedOptimizer that implements the optimization strategy. | |
17 The inputs, parameters, outputs and lists of Theano tensors, | |
18 while the example_wise_cost and regularization_term are Theano tensors. | |
19 The user can specify a regularization coefficient that multiplies the regularization term. | |
20 The training algorithm looks for parameters that minimize | |
21 regularization_coefficienet * regularization_term(parameters) + | |
22 sum_{inputs in training_set} example_wise_cost(inputs,parameters) | |
23 i.e. the regularization_term should not depend on the inputs, only on the parameters. | |
24 The learned function can map a subset of inputs to a subset of outputs (as long as the inputs subset | |
25 includes all the inputs required in the Theano expression for the selected outputs). | |
26 """ | |
27 def __init__(self, inputs, parameters, outputs, example_wise_cost, regularization_term, | |
28 gradient_based_optimizer=StochasticGradientDescent(), regularization_coefficient = astensor(1.0)): | |
29 self.inputs = inputs | |
30 self.outputs = outputs | |
31 self.parameters = parameters | |
32 self.example_wise_cost = example_wise_cost | |
33 self.regularization_term = regularization_term | |
34 self.gradient_based_optimizer = gradient_based_optimizer | |
35 self.regularization_coefficient = regularization_coefficient | |
36 self.parameters_example_wise_gradient = gradient.grad(example_wise_cost, parameters) | |
37 self.parameters_regularization_gradient = gradient.grad(self.regularization_coefficient * regularization, parameters) | |
38 | |
39 # def update(self,training_set): | |
40 |