changeset 581:01e04bf878e2

removed some code that is not needed anymore as the bug is fixed. I will add a test in module later.
author Frederic Bastien <bastienf@iro.umontreal.ca>
date Fri, 05 Dec 2008 15:56:42 -0500
parents 1972bc9bea6d
children 8991bb6c39cc
files pylearn/algorithms/rnn.py pylearn/algorithms/sgd.py
diffstat 2 files changed, 8 insertions(+), 17 deletions(-) [+]
line wrap: on
line diff
--- a/pylearn/algorithms/rnn.py	Fri Dec 05 15:38:04 2008 -0500
+++ b/pylearn/algorithms/rnn.py	Fri Dec 05 15:56:42 2008 -0500
@@ -214,22 +214,22 @@
     LAG = 4
     y[LAG:] = x[:-LAG, 0:n_out]
 
-    minimizer_fn1 = make_minimizer('sgd', stepsize = 0.001, WEIRD_STUFF = False)
-    minimizer_fn2 = make_minimizer('sgd', stepsize = 0.001, WEIRD_STUFF = True)
+    minimizer_fn1 = make_minimizer('sgd', stepsize = 0.001)
+    minimizer_fn2 = make_minimizer('sgd', stepsize = 0.001)
     rnn_module1 = ExampleRNN(n_vis, n_hid, n_out, minimizer_fn1)
     rnn_module2 = ExampleRNN(n_vis, n_hid, n_out, minimizer_fn2)
     rnn1 = rnn_module2.make(mode='FAST_RUN')
     rnn2 = rnn_module1.make(mode='FAST_COMPILE')
-    topo1=rnn1.minimizer.step_cost.maker.env.toposort()
-    topo2=rnn2.minimizer.step_cost.maker.env.toposort()
     if 0:
+        topo1=rnn1.minimizer.step_cost.maker.env.toposort()
+        topo2=rnn2.minimizer.step_cost.maker.env.toposort()
         for i in range(len(topo1)):
             print '1',i, topo1[i]
             print '2',i, topo2[i]
 
 
 
-    niter=3
+    niter=50
     for i in xrange(niter):
         rnn1.minimizer.step(x, y)
         rnn2.minimizer.step(x, y)
--- a/pylearn/algorithms/sgd.py	Fri Dec 05 15:38:04 2008 -0500
+++ b/pylearn/algorithms/sgd.py	Fri Dec 05 15:56:42 2008 -0500
@@ -8,13 +8,12 @@
 
 class StochasticGradientDescent(module.FancyModule):
     """Fixed stepsize gradient descent"""
-    def __init__(self, args, cost, params, gradients=None, stepsize=None, WEIRD_STUFF=True):
+    def __init__(self, args, cost, params, gradients=None, stepsize=None):
         """
         :param stepsize: the step to take in (negative) gradient direction
         :type stepsize: None, scalar value, or scalar TensorResult
         """
         super(StochasticGradientDescent, self).__init__()
-        self.WEIRD_STUFF = WEIRD_STUFF
         self.stepsize_init = None
 
         if stepsize is None:
@@ -22,12 +21,7 @@
         elif isinstance(stepsize, T.TensorResult):
             self.stepsize = stepsize
         else:
-            if self.WEIRD_STUFF:
-                #TODO: why is this necessary? why does the else clause not work?
-                self.stepsize = module.Member(T.dscalar())
-                self.stepsize_init = stepsize
-            else:
-                self.stepsize = module.Member(T.value(stepsize))
+            self.stepsize = module.Member(T.value(stepsize))
 
         if self.stepsize.ndim != 0:
             raise ValueError('stepsize must be a scalar', stepsize)
@@ -44,10 +38,7 @@
                 args, cost,
                 updates=self.updates)
     def _instance_initialize(self, obj):
-        if self.WEIRD_STUFF:
-            obj.stepsize = self.stepsize_init
-        else:
-            pass
+        pass
 
 
 @minimizer_factory('sgd')