Mercurial > pylearn
diff mlp.py @ 179:9911d2cc3c01
merged
author | James Bergstra <bergstrj@iro.umontreal.ca> |
---|---|
date | Tue, 13 May 2008 15:14:04 -0400 |
parents | 4090779e39a9 e9a95e19e6f8 |
children | 2698c0feeb54 |
line wrap: on
line diff
--- a/mlp.py Tue May 13 15:12:20 2008 -0400 +++ b/mlp.py Tue May 13 15:14:04 2008 -0400 @@ -9,22 +9,7 @@ from theano import tensor as t from nnet_ops import * import math - -def sum_l2_cost(*params): - p = params[0] - rval = t.sum(p*p) - for p in params[1:]: - rval = rval + t.sum(p*p) - return rval - -def activation(w, b, v, c, x): - return t.dot(t.tanh(t.dot(x, w) + b), v) + c -def nll(w, b, v, c, x, y): - return crossentropy_softmax_1hot(prediction(w, b, v, c, x), y)[0] -def output(w, b, v, c, x, y): - return crossentropy_softmax_1hot(prediction(w, b, v, c, x), y)[1] - - +from misc import * class OneHiddenLayerNNetClassifier(OnlineGradientTLearner): """ @@ -102,7 +87,7 @@ self._b2 = t.row('b2') self._regularization_term = self._L2_regularizer * (t.sum(self._W1*self._W1) + t.sum(self._W2*self._W2)) self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T) - self._nll,self._output = crossentropy_softmax_1hot(self._output_activations,self._target_vector) + self._nll,self._output = crossentropy_softmax_1hot(Print("output_activations")(self._output_activations),self._target_vector) self._output_class = t.argmax(self._output,1) self._class_error = t.neq(self._output_class,self._target_vector) self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0]