Mercurial > pylearn
diff mlp.py @ 183:25d0a0c713da
did some debugging of test_mlp
author | Olivier Breuleux <breuleuo@iro.umontreal.ca> |
---|---|
date | Tue, 13 May 2008 18:30:08 -0400 |
parents | 4afb41e61fcf |
children | 562f308873f0 |
line wrap: on
line diff
--- a/mlp.py Tue May 13 17:00:53 2008 -0400 +++ b/mlp.py Tue May 13 18:30:08 2008 -0400 @@ -67,7 +67,7 @@ - 'regularization_term' """ - def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,L2_regularizer=0,init_range=1.,n_inputs=None,minibatch_size=None): + def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,L2_regularizer=0,init_range=1.,n_inputs=None,minibatch_size=None,linker='c|py'): self._n_inputs = n_inputs self._n_outputs = n_classes self._n_hidden = n_hidden @@ -78,7 +78,7 @@ self.L2_regularizer = L2_regularizer self._learning_rate = t.scalar('learning_rate') # this is the symbol self._input = t.matrix('input') # n_examples x n_inputs - self._target = t.imatrix('target') # n_examples x 1 + self._target = t.lmatrix('target') # n_examples x 1 self._target_vector = self._target[:,0] self._L2_regularizer = t.scalar('L2_regularizer') self._W1 = t.matrix('W1') @@ -91,7 +91,7 @@ self._output_class = t.argmax(self._output,1) self._class_error = t.neq(self._output_class,self._target_vector) self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0] - OnlineGradientTLearner.__init__(self) + OnlineGradientTLearner.__init__(self, linker = linker) def attributeNames(self): return ["parameters","b1","W2","b2","W2", "L2_regularizer","regularization_term"] @@ -119,7 +119,7 @@ def updateMinibatch(self,minibatch): MinibatchUpdatesTLearner.updateMinibatch(self,minibatch) - print self.nll + #print self.nll def allocate(self,minibatch): minibatch_n_inputs = minibatch["input"].shape[1]