changeset 785:12f587e37ee3

* fixed test_stacker.py * required updating stacker by: removing kits, reworking so that it work with "new" version of BinRegressor, creating new methods instead of using .duplicate
author desjagui@atchoum.iro.umontreal.ca
date Mon, 29 Jun 2009 11:55:29 -0400
parents ba65e95d1221
children 0eb53b967ee7
files pylearn/algorithms/stacker.py pylearn/algorithms/tests/test_stacker.py
diffstat 2 files changed, 19 insertions(+), 15 deletions(-) [+]
line wrap: on
line diff
--- a/pylearn/algorithms/stacker.py	Mon Jun 29 09:49:28 2009 -0400
+++ b/pylearn/algorithms/stacker.py	Mon Jun 29 11:55:29 2009 -0400
@@ -25,7 +25,7 @@
         for i, (submodule, outname) in enumerate(submodules):
             layer = submodule(current, regularize = regularize)
             layers.append(layer)
-            current = layer[outname]
+            current = getattr(layer, outname)
         self.layers = layers
 
         self.input = self.layers[0].input
@@ -35,16 +35,14 @@
         local_update = []
         global_update = []
         to_update = []
-        all_kits = []
         for layer, (submodule, outname) in zip(layers, submodules):
             u = layer.update
             u.resolve_all()
             to_update += u.updates.keys()
-            all_kits += u.kits
             # the input is the whole deep model's input instead of the layer's own
             # input (which is previous_layer[outname])
             inputs = [self.input] + u.inputs[1:]
-            method = theano.Method(inputs, u.outputs, u.updates, u.kits)
+            method = theano.Method(inputs, u.outputs, u.updates)
             local_update.append(method)
             global_update.append(
                 theano.Method(inputs,
@@ -52,9 +50,8 @@
                               # we update the params of the previous layers too but wrt
                               # this layer's cost
                               dict((param, param - layer.lr * T.grad(layer.cost, param))
-                                   for param in to_update),
-                              list(all_kits)))
-            representation.append(theano.Method(self.input, layer[outname]))
+                                   for param in to_update)))
+            representation.append(theano.Method(self.input, getattr(layer,outname)))
 
 #           @todo: Add diagnostics
 #             self.diagnose_from_input = Method([self.input], self.layers[0].diagnose.outputs + self.layers[1].diagnose.outputs ...
@@ -64,12 +61,16 @@
         self.representation = representation
         self.update = self.global_update[-1]
         self.compute = theano.Method(self.input, self.output)
+
+        # takes method from last layer (usually ll.classify), copies it to self.,
+        # while converting its input to deal with the global "model" input 
         ll = self.layers[-1]
-        for name, method in ll.components_map():
+        for name, method in ll.__dict__['local_attr'].iteritems():
             if isinstance(method, theano.Method) and not hasattr(self, name):
-                m = method.dup()
-                m.resolve_all()
-                m.inputs = [self.input if x is ll.input else x for x in m.inputs]
+                if not isinstance(method.inputs, (list,dict)):
+                    method.inputs = [method.inputs]
+                inputs = [self.input if x is ll.input else x for x in method.inputs]
+                m = theano.Method(inputs, method.outputs, method.updates)
                 setattr(self, name, m)
 
     def _instance_initialize(self, obj, nunits = None, lr = 0.01, seed = None, **kwargs):
--- a/pylearn/algorithms/tests/test_stacker.py	Mon Jun 29 09:49:28 2009 -0400
+++ b/pylearn/algorithms/tests/test_stacker.py	Mon Jun 29 11:55:29 2009 -0400
@@ -5,12 +5,15 @@
 import numpy
 import time
 
+class StackBinRegressor(models_reg.BinRegressor):
+    def __init__(self, input = None, target = None, regularize = True):
+        super(StackBinRegressor, self).__init__(input, target, regularize)
+        self.build_extensions()
 
 def test_train(mode = theano.Mode('c|py', 'fast_run')):
-
-    reg = models_stacker.Stacker([(models_reg.BinRegressor, 'output'),
-        (models_reg.BinRegressor, 'output')],
-        regularize = False)
+    reg = models_stacker.Stacker([(StackBinRegressor, 'output'),
+                                  (StackBinRegressor, 'output')],
+                                  regularize = False)
     #print reg.global_update[1].pretty(mode = mode.excluding('inplace'))
 
     model = reg.make([100, 200, 1],