# HG changeset patch # User delallea@opale.iro.umontreal.ca # Date 1211917749 14400 # Node ID c047238e5b3fcbd3a8c43830cf87c6ef00cc2bac # Parent 38beb81f4e8be838a2ec7b36eeb4754ffb049951 Fixed by James diff -r 38beb81f4e8b -r c047238e5b3f mlp_factory_approach.py --- a/mlp_factory_approach.py Tue May 27 13:46:03 2008 -0400 +++ b/mlp_factory_approach.py Tue May 27 15:49:09 2008 -0400 @@ -46,7 +46,7 @@ #TODO: why should we have to unpack target like this? # tbm : creates problem... for input, target in input_target: - rval= self.update_fn(input, target[:,0], *params) + rval= self.update_fn(input, target, *params) #print rval[0] def __call__(self, testset, fieldnames=['output_class'],input='input',target='target'): @@ -136,7 +136,7 @@ for stp in self.early_stopper(): rval.update( minset.minibatches([input, target], minibatch_size=min(32, - len(trainset)))) + len(minset)))) #print 'mlp.__call__(), we did an update' if stp.set_score: stp.score = rval(valset, ['loss_01']) @@ -171,7 +171,7 @@ , linker='c&py' , early_stopper = lambda:stopper.NStages(100,1)) - model1 = learn_algo(training_set1,input='input',target='target') + model1 = learn_algo(training_set1) model2 = learn_algo(training_set2)