# HG changeset patch # User James Bergstra # Date 1284399746 14400 # Node ID b422cbaddc52470a83da184e820c9f42ef7df01c # Parent 153cf820a97503bfc3d539fe9b9f738edf1e4370 v2planning - minor edits to use_cases diff -r 153cf820a975 -r b422cbaddc52 doc/v2_planning/use_cases.txt --- a/doc/v2_planning/use_cases.txt Mon Sep 13 13:41:53 2010 -0400 +++ b/doc/v2_planning/use_cases.txt Mon Sep 13 13:42:26 2010 -0400 @@ -66,6 +66,7 @@ classification_accuracy( examples=MNIST.validation_dataset, function=as_classifier('learner_obj'))), + step_fn = vm_lambda(('learner_obj',), sgd_step_fn( parameters = vm_getattr('learner_obj', 'params'), @@ -113,7 +114,7 @@ initial_model=alloc_model('param1', 'param2'), burnin=100, score_fn = vm_lambda(('learner_obj',), - graph=classification_error( + classification_error( function=as_classifier('learner_obj'), dataset=MNIST.subset(validation_set))), step_fn = vm_lambda(('learner_obj',), @@ -145,7 +146,7 @@ extending the symbolic program, and calling the extended function. vm.call( - [pylearn.min(model.weights) for model in trained_models], + [pylearn.min(pylearn_getattr(model, 'weights')) for model in trained_models], param1=1, param2=2) If this is run after the previous calls: