Mercurial > pylearn
diff mlp_factory_approach.py @ 211:bd728c83faff
in __get__, problem if the i.stop was None, i being the slice, added one line replacing None by the len(self)
author | Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca> |
---|---|
date | Wed, 21 May 2008 17:39:30 -0400 |
parents | bf320808919f |
children | 9b57ea8c767f |
line wrap: on
line diff
--- a/mlp_factory_approach.py Sat May 17 00:01:47 2008 -0400 +++ b/mlp_factory_approach.py Wed May 21 17:39:30 2008 -0400 @@ -6,6 +6,7 @@ from tlearn import dataset, nnet_ops, stopper + def _randshape(*shape): return (numpy.random.rand(*shape) -0.5) * 0.001 @@ -31,7 +32,8 @@ params = self.params #TODO: why should we have to unpack target like this? for input, target in input_target: - self.update_fn(input, target[:,0], *params) + rval= self.update_fn(input, target[:,0], *params) + print rval[0] def __call__(self, testset, fieldnames=['output_class']): """Apply this model (as a function) to new data""" @@ -102,7 +104,7 @@ # prefer caching in _Model.__call__ return theano.function(inputs, outputs, unpack_single=False, linker=self.linker) - def __call__(self, trainset=None, iparams=None): + def __call__(self, trainset=None, iparams=None, input='input', target='target'): """Allocate and optionally train a model""" if iparams is None: iparams = [_randshape(self.nhid, self.nclass), _randshape(self.nclass)]\ @@ -119,8 +121,9 @@ best = rval for stp in self.early_stopper(): rval.update( - trainset.minibatches(['input', 'target'], minibatch_size=min(32, + minset.minibatches([input, target], minibatch_size=min(32, len(trainset)))) + print 'mlp.__call__(), we did an update' if stp.set_score: stp.score = rval(valset, ['loss_01']) if (stp.score < stp.best_score): @@ -154,7 +157,7 @@ , linker='c&py' , early_stopper = lambda:stopper.NStages(100,1)) - model1 = learn_algo(training_set1) + model1 = learn_algo(training_set1,input='input',target='target') model2 = learn_algo(training_set2)