comparison mlp.py @ 178:4090779e39a9

merged
author James Bergstra <bergstrj@iro.umontreal.ca>
date Tue, 13 May 2008 15:12:20 -0400
parents ae5651a3696b
children 9911d2cc3c01
comparison
equal deleted inserted replaced
177:69759976b3ac 178:4090779e39a9
7 7
8 from learner import * 8 from learner import *
9 from theano import tensor as t 9 from theano import tensor as t
10 from nnet_ops import * 10 from nnet_ops import *
11 import math 11 import math
12
13 def sum_l2_cost(*params):
14 p = params[0]
15 rval = t.sum(p*p)
16 for p in params[1:]:
17 rval = rval + t.sum(p*p)
18 return rval
19
20 def activation(w, b, v, c, x):
21 return t.dot(t.tanh(t.dot(x, w) + b), v) + c
22 def nll(w, b, v, c, x, y):
23 return crossentropy_softmax_1hot(prediction(w, b, v, c, x), y)[0]
24 def output(w, b, v, c, x, y):
25 return crossentropy_softmax_1hot(prediction(w, b, v, c, x), y)[1]
26
12 27
13 28
14 class OneHiddenLayerNNetClassifier(OnlineGradientTLearner): 29 class OneHiddenLayerNNetClassifier(OnlineGradientTLearner):
15 """ 30 """
16 Implement a straightforward classicial feedforward 31 Implement a straightforward classicial feedforward
65 - 'W2' 80 - 'W2'
66 - 'parameters' = [b1, W1, b2, W2] 81 - 'parameters' = [b1, W1, b2, W2]
67 - 'regularization_term' 82 - 'regularization_term'
68 83
69 """ 84 """
70
71 def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,L2_regularizer=0,init_range=1.,n_inputs=None,minibatch_size=None): 85 def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,L2_regularizer=0,init_range=1.,n_inputs=None,minibatch_size=None):
72 self._n_inputs = n_inputs 86 self._n_inputs = n_inputs
73 self._n_outputs = n_classes 87 self._n_outputs = n_classes
74 self._n_hidden = n_hidden 88 self._n_hidden = n_hidden
75 self._init_range = init_range 89 self._init_range = init_range
140 154
141 def isLastEpoch(self): 155 def isLastEpoch(self):
142 self._n_epochs +=1 156 self._n_epochs +=1
143 return self._n_epochs>=self._max_n_epochs 157 return self._n_epochs>=self._max_n_epochs
144 158
159 def updateMinibatch(self,minibatch):
160 # make sure all required fields are allocated and initialized
161 self.allocate(minibatch)
162 input_attributes = self.names2attributes(self.updateMinibatchInputAttributes())
163 input_fields = minibatch(*self.updateMinibatchInputFields())
164 print 'input attributes', input_attributes
165 print 'input fields', input_fields
166 results = self.update_minibatch_function(*(input_attributes+input_fields))
167 print 'output attributes', self.updateMinibatchOutputAttributes()
168 print 'results', results
169 self.setAttributes(self.updateMinibatchOutputAttributes(),
170 results)
171
172 if 0:
173 print 'n0', self.names2OpResults(self.updateMinibatchOutputAttributes()+ self.updateMinibatchInputFields())
174 print 'n1', self.names2OpResults(self.updateMinibatchOutputAttributes())
175 print 'n2', self.names2OpResults(self.updateEndInputAttributes())
176 print 'n3', self.names2OpResults(self.updateEndOutputAttributes())
177
145 class MLP(MinibatchUpdatesTLearner): 178 class MLP(MinibatchUpdatesTLearner):
146 """ 179 """
147 Implement a feedforward multi-layer perceptron, with or without L1 and/or L2 regularization. 180 Implement a feedforward multi-layer perceptron, with or without L1 and/or L2 regularization.
148 181
149 The predictor parameters are obtained by minibatch/online gradient descent. 182 The predictor parameters are obtained by minibatch/online gradient descent.