changeset 155:ae5651a3696b

new argmax calling convention
author James Bergstra <bergstrj@iro.umontreal.ca>
date Mon, 12 May 2008 16:16:32 -0400
parents 39bb21348fdf
children cc8b032417db
files mlp.py
diffstat 1 files changed, 1 insertions(+), 1 deletions(-) [+]
line wrap: on
line diff
--- a/mlp.py	Mon May 12 15:51:43 2008 -0400
+++ b/mlp.py	Mon May 12 16:16:32 2008 -0400
@@ -89,7 +89,7 @@
         self._regularization_term = self._L2_regularizer * (t.sum(self._W1*self._W1) + t.sum(self._W2*self._W2))
         self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T)
         self._nll,self._output = crossentropy_softmax_1hot(self._output_activations,self._target_vector)
-        self._output_class, self._max_output = t.argmax(self._output,1)
+        self._output_class = t.argmax(self._output,1)
         self._class_error = t.neq(self._output_class,self._target_vector)
         self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0]
         OnlineGradientTLearner.__init__(self)