Mercurial > pylearn
annotate mlp.py @ 259:621faba17c60
created 'dummytests', tests that checks consistency of new weird datasets, where we can't compare with actual values in a matrix, for instance. Useful as a first debugging when creating a dataset
author | Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca> |
---|---|
date | Tue, 03 Jun 2008 16:41:55 -0400 |
parents | d1359de1ea13 |
children |
rev | line source |
---|---|
132
f6505ec32dc3
Updated documentation slightly
Joseph Turian <turian@gmail.com>
parents:
129
diff
changeset
|
1 """ |
f6505ec32dc3
Updated documentation slightly
Joseph Turian <turian@gmail.com>
parents:
129
diff
changeset
|
2 A straightforward classicial feedforward |
f6505ec32dc3
Updated documentation slightly
Joseph Turian <turian@gmail.com>
parents:
129
diff
changeset
|
3 one-hidden-layer neural net, with L2 regularization. |
f6505ec32dc3
Updated documentation slightly
Joseph Turian <turian@gmail.com>
parents:
129
diff
changeset
|
4 This is one of the simplest example of L{Learner}, and illustrates |
f6505ec32dc3
Updated documentation slightly
Joseph Turian <turian@gmail.com>
parents:
129
diff
changeset
|
5 the use of theano. |
f6505ec32dc3
Updated documentation slightly
Joseph Turian <turian@gmail.com>
parents:
129
diff
changeset
|
6 """ |
111
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
7 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
8 from learner import * |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
9 from theano import tensor as t |
118
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
10 from nnet_ops import * |
133 | 11 import math |
175 | 12 from misc import * |
111
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
13 |
186
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
14 def function(inputs, outputs, linker='c&py'): |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
15 return theano.function(inputs, outputs, unpack_single=False,linker=linker) |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
16 |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
17 def randshape(*shape): return (numpy.random.rand(*shape) -0.5) * 0.001 |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
18 |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
19 class ManualNNet(object): |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
20 def __init__(self, ninputs, nhid, nclass, lr, nepochs, |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
21 linker='c&yp', |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
22 hidden_layer=None): |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
23 class Vars: |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
24 def __init__(self, lr, l2coef=0.0): |
186
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
25 lr = t.constant(lr) |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
26 l2coef = t.constant(l2coef) |
186
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
27 input = t.matrix('input') # n_examples x n_inputs |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
28 target = t.ivector('target') # n_examples x 1 |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
29 W2 = t.matrix('W2') |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
30 b2 = t.vector('b2') |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
31 |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
32 if hidden_layer: |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
33 hid, hid_params, hid_ivals, hid_regularization = hidden_layer(input) |
186
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
34 else: |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
35 W1 = t.matrix('W1') |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
36 b1 = t.vector('b1') |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
37 hid = t.tanh(b1 + t.dot(input, W1)) |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
38 hid_params = [W1, b1] |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
39 hid_regularization = l2coef * t.sum(W1*W1) |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
40 hid_ivals = [randshape(ninputs, nhid), randshape(nhid)] |
186
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
41 |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
42 params = [W2, b2] + hid_params |
186
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
43 ivals = [randshape(nhid, nclass), randshape(nclass)]\ |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
44 + hid_ivals |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
45 nll, predictions = crossentropy_softmax_1hot( b2 + t.dot(hid, W2), target) |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
46 regularization = l2coef * t.sum(W2*W2) + hid_regularization |
186
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
47 output_class = t.argmax(predictions,1) |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
48 loss_01 = t.neq(output_class, target) |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
186
diff
changeset
|
49 g_params = t.grad(nll + regularization, params) |
186
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
50 new_params = [t.sub_inplace(p, lr * gp) for p,gp in zip(params, g_params)] |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
51 self.__dict__.update(locals()); del self.self |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
52 self.nhid = nhid |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
53 self.nclass = nclass |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
54 self.nepochs = nepochs |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
55 self.v = Vars(lr) |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
56 self.params = None |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
57 |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
58 def update(self, trainset): |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
59 params = self.v.ivals |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
60 update_fn = function( |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
61 [self.v.input, self.v.target] + self.v.params, |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
62 [self.v.nll] + self.v.new_params) |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
63 for i in xrange(self.nepochs): |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
64 for input, target in trainset.minibatches(['input', 'target'], |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
65 minibatch_size=min(32, len(trainset))): |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
66 dummy = update_fn(input, target[:,0], *params) |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
67 if 0: print dummy[0] #the nll |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
68 return self.use |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
69 __call__ = update |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
70 |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
71 def use(self, dset, |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
72 output_fieldnames=['output_class'], |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
73 test_stats_collector=None, |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
74 copy_inputs=False, |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
75 put_stats_in_output_dataset=True, |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
76 output_attributes=[]): |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
77 inputs = [self.v.input, self.v.target] + self.v.params |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
78 fn = function(inputs, [getattr(self.v, name) for name in output_fieldnames]) |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
79 target = dset.fields()['target'] if ('target' in dset.fields()) else numpy.zeros((1,1),dtype='int64') |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
80 return ApplyFunctionDataSet(dset, |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
81 lambda input, target: fn(input, target[:,0], *self.v.ivals), |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
82 output_fieldnames) |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
83 |
562f308873f0
added ManualNNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
183
diff
changeset
|
84 |
129
4c2280edcaf5
Fixed typos in learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
126
diff
changeset
|
85 class OneHiddenLayerNNetClassifier(OnlineGradientTLearner): |
111
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
86 """ |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
87 Implement a straightforward classicial feedforward |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
88 one-hidden-layer neural net, with L2 regularization. |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
89 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
90 The predictor parameters are obtained by minibatch/online gradient descent. |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
91 Training can proceed sequentially (with multiple calls to update with |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
92 different disjoint subsets of the training sets). |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
93 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
94 Hyper-parameters: |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
95 - L2_regularizer |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
96 - learning_rate |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
97 - n_hidden |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
98 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
99 For each (input_t,output_t) pair in a minibatch,:: |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
100 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
101 output_activations_t = b2+W2*tanh(b1+W1*input_t) |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
102 output_t = softmax(output_activations_t) |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
103 output_class_t = argmax(output_activations_t) |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
104 class_error_t = 1_{output_class_t != target_t} |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
105 nll_t = -log(output_t[target_t]) |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
106 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
107 and the training criterion is:: |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
108 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
109 loss = L2_regularizer*(||W1||^2 + ||W2||^2) + sum_t nll_t |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
110 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
111 The parameters are [b1,W1,b2,W2] and are obtained by minimizing the loss by |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
112 stochastic minibatch gradient descent:: |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
113 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
114 parameters[i] -= learning_rate * dloss/dparameters[i] |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
115 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
116 The fields and attributes expected and produced by use and update are the following: |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
117 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
118 - Input and output fields (example-wise quantities): |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
119 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
120 - 'input' (always expected by use and update) |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
121 - 'target' (optionally expected by use and always by update) |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
122 - 'output' (optionally produced by use) |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
123 - 'output_class' (optionally produced by use) |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
124 - 'class_error' (optionally produced by use) |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
125 - 'nll' (optionally produced by use) |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
126 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
127 - optional attributes (optionally expected as input_dataset attributes) |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
128 (warning, this may be dangerous, the 'use' method will use those provided in the |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
129 input_dataset rather than those learned during 'update'; currently no support |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
130 for providing these to update): |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
131 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
132 - 'L2_regularizer' |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
133 - 'b1' |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
134 - 'W1' |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
135 - 'b2' |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
136 - 'W2' |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
137 - 'parameters' = [b1, W1, b2, W2] |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
138 - 'regularization_term' |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
139 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
140 """ |
183
25d0a0c713da
did some debugging of test_mlp
Olivier Breuleux <breuleuo@iro.umontreal.ca>
parents:
182
diff
changeset
|
141 def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,L2_regularizer=0,init_range=1.,n_inputs=None,minibatch_size=None,linker='c|py'): |
133 | 142 self._n_inputs = n_inputs |
121 | 143 self._n_outputs = n_classes |
144 self._n_hidden = n_hidden | |
145 self._init_range = init_range | |
133 | 146 self._max_n_epochs = max_n_epochs |
147 self._minibatch_size = minibatch_size | |
121 | 148 self.learning_rate = learning_rate # this is the float |
134
3f4e5c9bdc5e
Fixes to ApplyFunctionDataSet and other things to make learner and mlp work
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
133
diff
changeset
|
149 self.L2_regularizer = L2_regularizer |
121 | 150 self._learning_rate = t.scalar('learning_rate') # this is the symbol |
151 self._input = t.matrix('input') # n_examples x n_inputs | |
183
25d0a0c713da
did some debugging of test_mlp
Olivier Breuleux <breuleuo@iro.umontreal.ca>
parents:
182
diff
changeset
|
152 self._target = t.lmatrix('target') # n_examples x 1 |
134
3f4e5c9bdc5e
Fixes to ApplyFunctionDataSet and other things to make learner and mlp work
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
133
diff
changeset
|
153 self._target_vector = self._target[:,0] |
121 | 154 self._L2_regularizer = t.scalar('L2_regularizer') |
155 self._W1 = t.matrix('W1') | |
156 self._W2 = t.matrix('W2') | |
157 self._b1 = t.row('b1') | |
158 self._b2 = t.row('b2') | |
126 | 159 self._regularization_term = self._L2_regularizer * (t.sum(self._W1*self._W1) + t.sum(self._W2*self._W2)) |
121 | 160 self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T) |
180
2698c0feeb54
mlp seems to work!
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
179
diff
changeset
|
161 self._nll,self._output = crossentropy_softmax_1hot(self._output_activations,self._target_vector) |
155
ae5651a3696b
new argmax calling convention
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
134
diff
changeset
|
162 self._output_class = t.argmax(self._output,1) |
134
3f4e5c9bdc5e
Fixes to ApplyFunctionDataSet and other things to make learner and mlp work
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
133
diff
changeset
|
163 self._class_error = t.neq(self._output_class,self._target_vector) |
121 | 164 self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0] |
183
25d0a0c713da
did some debugging of test_mlp
Olivier Breuleux <breuleuo@iro.umontreal.ca>
parents:
182
diff
changeset
|
165 OnlineGradientTLearner.__init__(self, linker = linker) |
121 | 166 |
111
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
167 def attributeNames(self): |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
168 return ["parameters","b1","W2","b2","W2", "L2_regularizer","regularization_term"] |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
169 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
170 def parameterAttributes(self): |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
171 return ["b1","W1", "b2", "W2"] |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
172 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
173 def updateMinibatchInputFields(self): |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
174 return ["input","target"] |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
175 |
180
2698c0feeb54
mlp seems to work!
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
179
diff
changeset
|
176 def updateMinibatchInputAttributes(self): |
2698c0feeb54
mlp seems to work!
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
179
diff
changeset
|
177 return OnlineGradientTLearner.updateMinibatchInputAttributes(self)+["L2_regularizer"] |
2698c0feeb54
mlp seems to work!
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
179
diff
changeset
|
178 |
111
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
179 def updateEndOutputAttributes(self): |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
180 return ["regularization_term"] |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
181 |
118
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
182 def lossAttribute(self): |
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
183 return "minibatch_criterion" |
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
184 |
111
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
185 def defaultOutputFields(self, input_fields): |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
186 output_fields = ["output", "output_class",] |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
187 if "target" in input_fields: |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
188 output_fields += ["class_error", "nll"] |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
189 return output_fields |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
190 |
182
4afb41e61fcf
strange bug in linker obtained by 'python test_mlp.py'
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
180
diff
changeset
|
191 def updateMinibatch(self,minibatch): |
4afb41e61fcf
strange bug in linker obtained by 'python test_mlp.py'
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
180
diff
changeset
|
192 MinibatchUpdatesTLearner.updateMinibatch(self,minibatch) |
183
25d0a0c713da
did some debugging of test_mlp
Olivier Breuleux <breuleuo@iro.umontreal.ca>
parents:
182
diff
changeset
|
193 #print self.nll |
182
4afb41e61fcf
strange bug in linker obtained by 'python test_mlp.py'
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
180
diff
changeset
|
194 |
111
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
195 def allocate(self,minibatch): |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
196 minibatch_n_inputs = minibatch["input"].shape[1] |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
197 if not self._n_inputs: |
118
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
198 self._n_inputs = minibatch_n_inputs |
134
3f4e5c9bdc5e
Fixes to ApplyFunctionDataSet and other things to make learner and mlp work
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
133
diff
changeset
|
199 self.b1 = numpy.zeros((1,self._n_hidden)) |
3f4e5c9bdc5e
Fixes to ApplyFunctionDataSet and other things to make learner and mlp work
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
133
diff
changeset
|
200 self.b2 = numpy.zeros((1,self._n_outputs)) |
111
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
201 self.forget() |
118
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
202 elif self._n_inputs!=minibatch_n_inputs: |
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
203 # if the input changes dimension on the fly, we resize and forget everything |
111
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
204 self.forget() |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
205 |
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
206 def forget(self): |
118
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
207 if self._n_inputs: |
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
208 r = self._init_range/math.sqrt(self._n_inputs) |
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
209 self.W1 = numpy.random.uniform(low=-r,high=r, |
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
210 size=(self._n_hidden,self._n_inputs)) |
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
211 r = self._init_range/math.sqrt(self._n_hidden) |
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
212 self.W2 = numpy.random.uniform(low=-r,high=r, |
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
213 size=(self._n_outputs,self._n_hidden)) |
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
214 self.b1[:]=0 |
d0a1bd0378c6
Finished draft of OneHiddenLayerNNetClassifier to debut learner.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
111
diff
changeset
|
215 self.b2[:]=0 |
133 | 216 self._n_epochs=0 |
111
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
217 |
133 | 218 def isLastEpoch(self): |
219 self._n_epochs +=1 | |
220 return self._n_epochs>=self._max_n_epochs | |
111
88257dfedf8c
Added another work in progress, for mlp's
bengioy@bengiomac.local
parents:
diff
changeset
|
221 |
180
2698c0feeb54
mlp seems to work!
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
179
diff
changeset
|
222 def debug_updateMinibatch(self,minibatch): |
178 | 223 # make sure all required fields are allocated and initialized |
224 self.allocate(minibatch) | |
225 input_attributes = self.names2attributes(self.updateMinibatchInputAttributes()) | |
226 input_fields = minibatch(*self.updateMinibatchInputFields()) | |
227 print 'input attributes', input_attributes | |
228 print 'input fields', input_fields | |
229 results = self.update_minibatch_function(*(input_attributes+input_fields)) | |
230 print 'output attributes', self.updateMinibatchOutputAttributes() | |
231 print 'results', results | |
232 self.setAttributes(self.updateMinibatchOutputAttributes(), | |
233 results) | |
234 | |
235 if 0: | |
236 print 'n0', self.names2OpResults(self.updateMinibatchOutputAttributes()+ self.updateMinibatchInputFields()) | |
237 print 'n1', self.names2OpResults(self.updateMinibatchOutputAttributes()) | |
238 print 'n2', self.names2OpResults(self.updateEndInputAttributes()) | |
239 print 'n3', self.names2OpResults(self.updateEndOutputAttributes()) | |
240 |