Mercurial > pylearn
changeset 232:c047238e5b3f
Fixed by James
author | delallea@opale.iro.umontreal.ca |
---|---|
date | Tue, 27 May 2008 15:49:09 -0400 |
parents | 38beb81f4e8b |
children | 9504940ef5ef ddb88a8e9fd2 |
files | mlp_factory_approach.py |
diffstat | 1 files changed, 3 insertions(+), 3 deletions(-) [+] |
line wrap: on
line diff
--- a/mlp_factory_approach.py Tue May 27 13:46:03 2008 -0400 +++ b/mlp_factory_approach.py Tue May 27 15:49:09 2008 -0400 @@ -46,7 +46,7 @@ #TODO: why should we have to unpack target like this? # tbm : creates problem... for input, target in input_target: - rval= self.update_fn(input, target[:,0], *params) + rval= self.update_fn(input, target, *params) #print rval[0] def __call__(self, testset, fieldnames=['output_class'],input='input',target='target'): @@ -136,7 +136,7 @@ for stp in self.early_stopper(): rval.update( minset.minibatches([input, target], minibatch_size=min(32, - len(trainset)))) + len(minset)))) #print 'mlp.__call__(), we did an update' if stp.set_score: stp.score = rval(valset, ['loss_01']) @@ -171,7 +171,7 @@ , linker='c&py' , early_stopper = lambda:stopper.NStages(100,1)) - model1 = learn_algo(training_set1,input='input',target='target') + model1 = learn_algo(training_set1) model2 = learn_algo(training_set2)