Mercurial > pylearn
changeset 183:25d0a0c713da
did some debugging of test_mlp
author | Olivier Breuleux <breuleuo@iro.umontreal.ca> |
---|---|
date | Tue, 13 May 2008 18:30:08 -0400 |
parents | 4afb41e61fcf |
children | 9a2aecc57a79 |
files | mlp.py test_mlp.py |
diffstat | 2 files changed, 53 insertions(+), 5 deletions(-) [+] |
line wrap: on
line diff
--- a/mlp.py Tue May 13 17:00:53 2008 -0400 +++ b/mlp.py Tue May 13 18:30:08 2008 -0400 @@ -67,7 +67,7 @@ - 'regularization_term' """ - def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,L2_regularizer=0,init_range=1.,n_inputs=None,minibatch_size=None): + def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,L2_regularizer=0,init_range=1.,n_inputs=None,minibatch_size=None,linker='c|py'): self._n_inputs = n_inputs self._n_outputs = n_classes self._n_hidden = n_hidden @@ -78,7 +78,7 @@ self.L2_regularizer = L2_regularizer self._learning_rate = t.scalar('learning_rate') # this is the symbol self._input = t.matrix('input') # n_examples x n_inputs - self._target = t.imatrix('target') # n_examples x 1 + self._target = t.lmatrix('target') # n_examples x 1 self._target_vector = self._target[:,0] self._L2_regularizer = t.scalar('L2_regularizer') self._W1 = t.matrix('W1') @@ -91,7 +91,7 @@ self._output_class = t.argmax(self._output,1) self._class_error = t.neq(self._output_class,self._target_vector) self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0] - OnlineGradientTLearner.__init__(self) + OnlineGradientTLearner.__init__(self, linker = linker) def attributeNames(self): return ["parameters","b1","W2","b2","W2", "L2_regularizer","regularization_term"] @@ -119,7 +119,7 @@ def updateMinibatch(self,minibatch): MinibatchUpdatesTLearner.updateMinibatch(self,minibatch) - print self.nll + #print self.nll def allocate(self,minibatch): minibatch_n_inputs = minibatch["input"].shape[1]
--- a/test_mlp.py Tue May 13 17:00:53 2008 -0400 +++ b/test_mlp.py Tue May 13 18:30:08 2008 -0400 @@ -2,8 +2,56 @@ from mlp import * import dataset + +from functools import partial +def separator(debugger, i, node, *ths): + print "===================" + +def what(debugger, i, node, *ths): + print "#%i" % i, node + +def parents(debugger, i, node, *ths): + print [input.step for input in node.inputs] + +def input_shapes(debugger, i, node, *ths): + print "input shapes: ", + for r in node.inputs: + if hasattr(r.value, 'shape'): + print r.value.shape, + else: + print "no_shape", + print + +def input_types(debugger, i, node, *ths): + print "input types: ", + for r in node.inputs: + print r.type, + print + +def output_shapes(debugger, i, node, *ths): + print "output shapes:", + for r in node.outputs: + if hasattr(r.value, 'shape'): + print r.value.shape, + else: + print "no_shape", + print + +def output_types(debugger, i, node, *ths): + print "output types:", + for r in node.outputs: + print r.type, + print + + def test0(): - nnet = OneHiddenLayerNNetClassifier(10,2,.001,1000) + linker = 'c|py' + #linker = partial(theano.gof.DebugLinker, linkers = [theano.gof.OpWiseCLinker], + # debug_pre = [separator, what, parents, input_types, input_shapes], + # debug_post = [output_shapes, output_types], + # compare_fn = lambda x, y: numpy.all(x == y)) + + nnet = OneHiddenLayerNNetClassifier(10,2,.001,1000, linker = linker) training_set = dataset.ArrayDataSet(numpy.array([[0, 0, 0], [0, 1, 1], [1, 0, 1],