# HG changeset patch # User Yoshua Bengio # Date 1210705899 14400 # Node ID 6ee54861134e1ad3088511f721b876e198178038 # Parent b60cd6745dc210301c8999942d9829f809c43dcc# Parent e9a95e19e6f84927ab156579e036a2999fa5d573 Automated merge with ssh://p-omega1@lgcm.iro.umontreal.ca/tlearn diff -r b60cd6745dc2 -r 6ee54861134e misc.py --- a/misc.py Tue May 13 15:04:51 2008 -0400 +++ b/misc.py Tue May 13 15:11:39 2008 -0400 @@ -1,3 +1,24 @@ + +import theano + +class Print(theano.Op): + def __init__(self,message=""): + self.message=message + self.view_map={0:[0]} + + def make_node(self,xin): + xout = xin.type.make_result() + return theano.Apply(op = self, inputs = [xin], outputs=[xout]) + + def perform(self,node,inputs,output_storage): + xin, = inputs + xout, = output_storage + xout[0] = xin + print self.message,xin + + def grad(self,input,output_gradients): + return output_gradients + def unique_elements_list_intersection(list1,list2): """ diff -r b60cd6745dc2 -r 6ee54861134e mlp.py --- a/mlp.py Tue May 13 15:04:51 2008 -0400 +++ b/mlp.py Tue May 13 15:11:39 2008 -0400 @@ -9,7 +9,7 @@ from theano import tensor as t from nnet_ops import * import math - +from misc import * class OneHiddenLayerNNetClassifier(OnlineGradientTLearner): """ @@ -88,7 +88,7 @@ self._b2 = t.row('b2') self._regularization_term = self._L2_regularizer * (t.sum(self._W1*self._W1) + t.sum(self._W2*self._W2)) self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T) - self._nll,self._output = crossentropy_softmax_1hot(self._output_activations,self._target_vector) + self._nll,self._output = crossentropy_softmax_1hot(Print("output_activations")(self._output_activations),self._target_vector) self._output_class = t.argmax(self._output,1) self._class_error = t.neq(self._output_class,self._target_vector) self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0]