Mercurial > pylearn
comparison mlp.py @ 179:9911d2cc3c01
merged
author | James Bergstra <bergstrj@iro.umontreal.ca> |
---|---|
date | Tue, 13 May 2008 15:14:04 -0400 |
parents | 4090779e39a9 e9a95e19e6f8 |
children | 2698c0feeb54 |
comparison
equal
deleted
inserted
replaced
178:4090779e39a9 | 179:9911d2cc3c01 |
---|---|
7 | 7 |
8 from learner import * | 8 from learner import * |
9 from theano import tensor as t | 9 from theano import tensor as t |
10 from nnet_ops import * | 10 from nnet_ops import * |
11 import math | 11 import math |
12 | 12 from misc import * |
13 def sum_l2_cost(*params): | |
14 p = params[0] | |
15 rval = t.sum(p*p) | |
16 for p in params[1:]: | |
17 rval = rval + t.sum(p*p) | |
18 return rval | |
19 | |
20 def activation(w, b, v, c, x): | |
21 return t.dot(t.tanh(t.dot(x, w) + b), v) + c | |
22 def nll(w, b, v, c, x, y): | |
23 return crossentropy_softmax_1hot(prediction(w, b, v, c, x), y)[0] | |
24 def output(w, b, v, c, x, y): | |
25 return crossentropy_softmax_1hot(prediction(w, b, v, c, x), y)[1] | |
26 | |
27 | |
28 | 13 |
29 class OneHiddenLayerNNetClassifier(OnlineGradientTLearner): | 14 class OneHiddenLayerNNetClassifier(OnlineGradientTLearner): |
30 """ | 15 """ |
31 Implement a straightforward classicial feedforward | 16 Implement a straightforward classicial feedforward |
32 one-hidden-layer neural net, with L2 regularization. | 17 one-hidden-layer neural net, with L2 regularization. |
100 self._W2 = t.matrix('W2') | 85 self._W2 = t.matrix('W2') |
101 self._b1 = t.row('b1') | 86 self._b1 = t.row('b1') |
102 self._b2 = t.row('b2') | 87 self._b2 = t.row('b2') |
103 self._regularization_term = self._L2_regularizer * (t.sum(self._W1*self._W1) + t.sum(self._W2*self._W2)) | 88 self._regularization_term = self._L2_regularizer * (t.sum(self._W1*self._W1) + t.sum(self._W2*self._W2)) |
104 self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T) | 89 self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T) |
105 self._nll,self._output = crossentropy_softmax_1hot(self._output_activations,self._target_vector) | 90 self._nll,self._output = crossentropy_softmax_1hot(Print("output_activations")(self._output_activations),self._target_vector) |
106 self._output_class = t.argmax(self._output,1) | 91 self._output_class = t.argmax(self._output,1) |
107 self._class_error = t.neq(self._output_class,self._target_vector) | 92 self._class_error = t.neq(self._output_class,self._target_vector) |
108 self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0] | 93 self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0] |
109 OnlineGradientTLearner.__init__(self) | 94 OnlineGradientTLearner.__init__(self) |
110 | 95 |