changeset 583:e06d5ef74e6f

Automated merge with ssh://projects@lgcm.iro.umontreal.ca/hg/pylearn_refactor
author Frederic Bastien <bastienf@iro.umontreal.ca>
date Fri, 05 Dec 2008 15:58:43 -0500
parents a83f62555c57 (current diff) 8991bb6c39cc (diff)
children 4e25f4e90c1e
files
diffstat 3 files changed, 10 insertions(+), 19 deletions(-) [+]
line wrap: on
line diff
--- a/pylearn/algorithms/rnn.py	Thu Dec 04 17:07:29 2008 -0500
+++ b/pylearn/algorithms/rnn.py	Fri Dec 05 15:58:43 2008 -0500
@@ -214,22 +214,22 @@
     LAG = 4
     y[LAG:] = x[:-LAG, 0:n_out]
 
-    minimizer_fn1 = make_minimizer('sgd', stepsize = 0.001, WEIRD_STUFF = False)
-    minimizer_fn2 = make_minimizer('sgd', stepsize = 0.001, WEIRD_STUFF = True)
+    minimizer_fn1 = make_minimizer('sgd', stepsize = 0.001)
+    minimizer_fn2 = make_minimizer('sgd', stepsize = 0.001)
     rnn_module1 = ExampleRNN(n_vis, n_hid, n_out, minimizer_fn1)
     rnn_module2 = ExampleRNN(n_vis, n_hid, n_out, minimizer_fn2)
     rnn1 = rnn_module2.make(mode='FAST_RUN')
     rnn2 = rnn_module1.make(mode='FAST_COMPILE')
-    topo1=rnn1.minimizer.step_cost.maker.env.toposort()
-    topo2=rnn2.minimizer.step_cost.maker.env.toposort()
     if 0:
+        topo1=rnn1.minimizer.step_cost.maker.env.toposort()
+        topo2=rnn2.minimizer.step_cost.maker.env.toposort()
         for i in range(len(topo1)):
             print '1',i, topo1[i]
             print '2',i, topo2[i]
 
 
 
-    niter=3
+    niter=50
     for i in xrange(niter):
         rnn1.minimizer.step(x, y)
         rnn2.minimizer.step(x, y)
@@ -243,5 +243,5 @@
 if __name__ == '__main__':
 #    from theano.tests import main
 #    main(__file__)
-#    test_example_rnn()
+    test_example_rnn()
     test_WEIRD_STUFF()
--- a/pylearn/algorithms/sgd.py	Thu Dec 04 17:07:29 2008 -0500
+++ b/pylearn/algorithms/sgd.py	Fri Dec 05 15:58:43 2008 -0500
@@ -8,13 +8,12 @@
 
 class StochasticGradientDescent(module.FancyModule):
     """Fixed stepsize gradient descent"""
-    def __init__(self, args, cost, params, gradients=None, stepsize=None, WEIRD_STUFF=True):
+    def __init__(self, args, cost, params, gradients=None, stepsize=None):
         """
         :param stepsize: the step to take in (negative) gradient direction
         :type stepsize: None, scalar value, or scalar TensorResult
         """
         super(StochasticGradientDescent, self).__init__()
-        self.WEIRD_STUFF = WEIRD_STUFF
         self.stepsize_init = None
 
         if stepsize is None:
@@ -22,12 +21,7 @@
         elif isinstance(stepsize, T.TensorResult):
             self.stepsize = stepsize
         else:
-            if self.WEIRD_STUFF:
-                #TODO: why is this necessary? why does the else clause not work?
-                self.stepsize = module.Member(T.dscalar())
-                self.stepsize_init = stepsize
-            else:
-                self.stepsize = module.Member(T.value(stepsize))
+            self.stepsize = module.Member(T.value(stepsize))
 
         if self.stepsize.ndim != 0:
             raise ValueError('stepsize must be a scalar', stepsize)
@@ -44,10 +38,7 @@
                 args, cost,
                 updates=self.updates)
     def _instance_initialize(self, obj):
-        if self.WEIRD_STUFF:
-            obj.stepsize = self.stepsize_init
-        else:
-            pass
+        pass
 
 
 @minimizer_factory('sgd')
--- a/pylearn/algorithms/tests/test_logistic_regression.py	Thu Dec 04 17:07:29 2008 -0500
+++ b/pylearn/algorithms/tests/test_logistic_regression.py	Fri Dec 05 15:58:43 2008 -0500
@@ -1,4 +1,4 @@
-from logistic_regression import *
+from pylearn.algorithms.logistic_regression import *
 import sys, time
 
 if __name__ == '__main__':