changeset 575:cf19655ec48b

Automated merge with ssh://projects@lgcm.iro.umontreal.ca/hg/pylearn_refactor
author Frederic Bastien <bastienf@iro.umontreal.ca>
date Thu, 04 Dec 2008 11:03:38 -0500
parents 9f5891cd4048 (current diff) 220044be9fd8 (diff)
children df2e2c7ba4ac 1972bc9bea6d
files
diffstat 2 files changed, 31 insertions(+), 20 deletions(-) [+]
line wrap: on
line diff
--- a/pylearn/algorithms/rnn.py	Wed Dec 03 23:23:03 2008 -0500
+++ b/pylearn/algorithms/rnn.py	Thu Dec 04 11:03:38 2008 -0500
@@ -3,7 +3,7 @@
 from theano import Op, Apply, tensor as T, Module, Member, Method, Mode, compile
 from theano.gof import OpSub, TopoOptimizer
 
-from .minimizer import make_minimizer # minimizer
+from minimizer import make_minimizer # minimizer
 from theano.printing import Print
 import sgd #until Olivier's module-import thing works better
 
@@ -160,17 +160,17 @@
         obj.u = rng.randn(n_hid, n_out) * 0.01
         obj.c = N.zeros(n_out)
         obj.minimizer.initialize()
-    def __eq__(self, other):
+    def _instance__eq__(self, other):
         if not isinstance(other.component, ExampleRNN):
             raise NotImplemented
          #we compare the member.
-        if self.n_vis != other.n_vis or slef.n_hid != other.n_hid or self.n_out != other.n_out:
-            return False
+#        if self.n_vis != other.n_vis or slef.n_hid != other.n_hid or self.n_out != other.n_out:
+#            return False
         if (N.abs(self.z0-other.z0)<1e-8).all() and (N.abs(self.v-other.v)<1e-8).all() and (N.abs(self.b-other.b)<1e-8).all() and (N.abs(self.w-other.w)<1e-8).all() and (N.abs(self.u-other.u)<1e-8).all() and (N.abs(self.c-other.c)<1e-8).all() and (N.abs(self.z0-other.z0)<1e-8).all():
             return True
         return False
 
-    def __hash__(self):
+    def _instance__hash__(self):
         raise NotImplemented
 
 def test_example_rnn():
@@ -214,23 +214,34 @@
     LAG = 4
     y[LAG:] = x[:-LAG, 0:n_out]
 
-    minimizer_fn = make_minimizer('sgd', stepsize = 0.001, WEIRD_STUFF = False)
-    rnn_module = ExampleRNN(n_vis, n_hid, n_out, minimizer_fn)
+    minimizer_fn1 = make_minimizer('sgd', stepsize = 0.001, WEIRD_STUFF = False)
+    minimizer_fn2 = make_minimizer('sgd', stepsize = 0.001, WEIRD_STUFF = True)
+    rnn_module1 = ExampleRNN(n_vis, n_hid, n_out, minimizer_fn1)
+    rnn_module2 = ExampleRNN(n_vis, n_hid, n_out, minimizer_fn2)
+    rnn1 = rnn_module2.make(mode='FAST_RUN')
+    rnn2 = rnn_module1.make(mode='FAST_COMPILE')
+    topo1=rnn1.minimizer.step_cost.maker.env.toposort()
+    topo2=rnn2.minimizer.step_cost.maker.env.toposort()
+    if 0:
+        for i in range(len(topo1)):
+            print '1',i, topo1[i]
+            print '2',i, topo2[i]
 
-    rnn1 = rnn_module.make(mode='FAST_RUN')
+
 
-    rng1 = N.random.RandomState(7722342)
-
-    niter=15
+    niter=3
     for i in xrange(niter):
-        rnn1.minimizer.step_cost(x, y)
+        rnn1.minimizer.step(x, y)
+        rnn2.minimizer.step(x, y)
 
-    minimizer_fn = make_minimizer('sgd', stepsize = 0.001, WEIRD_STUFF = True)
+    #    assert rnn1.n_vis != rnn2.n_vis or slef.n_hid != rnn2.n_hid or rnn1.n_out != rnn2.n_out
+        assert (N.abs(rnn1.z0-rnn2.z0)<1e-8).all()
+        assert (N.abs(rnn1.v-rnn2.v)<1e-8).all() and (N.abs(rnn1.b-rnn2.b)<1e-8).all() and (N.abs(rnn1.w-rnn2.w)<1e-8).all() and (N.abs(rnn1.u-rnn2.u)<1e-8).all() and (N.abs(rnn1.c-rnn2.c)<1e-8).all()
 
-    rnn_module = ExampleRNN(n_vis, n_hid, n_out, minimizer_fn)
-    rnn2 = rnn_module.make(mode='FAST_RUN')
+    #    assert b
 
-    for i in xrange(niter):
-        rnn2.minimizer.step_cost(x, y)
-
-    assert rnn1 == rnn2
+if __name__ == '__main__':
+#    from theano.tests import main
+#    main(__file__)
+#    test_example_rnn()
+    test_WEIRD_STUFF()
--- a/pylearn/algorithms/sgd.py	Wed Dec 03 23:23:03 2008 -0500
+++ b/pylearn/algorithms/sgd.py	Thu Dec 04 11:03:38 2008 -0500
@@ -4,7 +4,7 @@
 from theano.compile import module
 from theano import tensor as T
 
-from .minimizer import minimizer_factory
+from minimizer import minimizer_factory
 
 class StochasticGradientDescent(module.FancyModule):
     """Fixed stepsize gradient descent"""