changeset 784:ba65e95d1221

removed manual call to Member and Variable as this is deprecated in theano.
author Frederic Bastien <bastienf@iro.umontreal.ca>
date Mon, 29 Jun 2009 09:49:28 -0400
parents f84e6f301a18
children 12f587e37ee3
files pylearn/algorithms/aa.py pylearn/algorithms/daa.py pylearn/algorithms/rbm.py pylearn/algorithms/regressor.py pylearn/algorithms/rnn.py
diffstat 5 files changed, 29 insertions(+), 29 deletions(-) [+]
line wrap: on
line diff
--- a/pylearn/algorithms/aa.py	Fri Jun 26 16:44:55 2009 -0400
+++ b/pylearn/algorithms/aa.py	Mon Jun 29 09:49:28 2009 -0400
@@ -16,19 +16,19 @@
         # ACQUIRE/MAKE INPUT
         if not input:
             input = T.matrix('input')
-        self.input = theano.External(input)
+        self.input = input
 
         # HYPER-PARAMETERS
-        self.lr = theano.Member(T.scalar())
+        self.lr = T.scalar()
 
         # PARAMETERS
-        self.w1 = theano.Member(T.matrix())
+        self.w1 = T.matrix()
         if not tie_weights:
-            self.w2 = theano.Member(T.matrix())
+            self.w2 = T.matrix()
         else:
             self.w2 = self.w1.T
-        self.b1 = theano.Member(T.vector())
-        self.b2 = theano.Member(T.vector())
+        self.b1 = T.vector()
+        self.b2 = T.vector()
 
         # HIDDEN LAYER
         self.hidden_activation = T.dot(input, self.w1) + self.b1
@@ -97,7 +97,7 @@
         return T.sum(self.reconstruction_costs)
 
     def build_regularization(self):
-        self.l2_coef = theano.Member(T.scalar())
+        self.l2_coef = T.scalar()
         if self.tie_weights:
             return self.l2_coef * T.sum(self.w1 * self.w1)
         else:
--- a/pylearn/algorithms/daa.py	Fri Jun 26 16:44:55 2009 -0400
+++ b/pylearn/algorithms/daa.py	Mon Jun 29 09:49:28 2009 -0400
@@ -49,19 +49,19 @@
         # ACQUIRE/MAKE INPUT
         if not input:
             input = T.matrix('input')
-        self.input = theano.External(input)
+        self.input = input
 
         # HYPER-PARAMETERS
-        self.lr = theano.Member(T.scalar())
+        self.lr = T.scalar()
 
         # PARAMETERS
-        self.w1 = theano.Member(T.matrix())
+        self.w1 = T.matrix()
         if not tie_weights:
-            self.w2 = theano.Member(T.matrix())
+            self.w2 = T.matrix()
         else:
             self.w2 = self.w1.T
-        self.b1 = theano.Member(T.vector())
-        self.b2 = theano.Member(T.vector())
+        self.b1 = T.vector()
+        self.b2 = T.vector()
 
 
         # REGULARIZATION COST
@@ -162,7 +162,7 @@
     """
 
     def build_corrupted_input(self):
-        self.noise_level = theano.Member(T.scalar())
+        self.noise_level = T.scalar()
         return self.random.binomial(T.shape(self.input), 1, 1 - self.noise_level) * self.input
 
     def hid_activation_function(self, activation):
@@ -175,7 +175,7 @@
         return self.reconstruction_cost_function(self.input, output)
 
     def build_regularization(self):
-        self.l2_coef = theano.Member(T.scalar())
+        self.l2_coef = T.scalar()
         if self.tie_weights:
             return self.l2_coef * T.sum(self.w1 * self.w1)
         else:
--- a/pylearn/algorithms/rbm.py	Fri Jun 26 16:44:55 2009 -0400
+++ b/pylearn/algorithms/rbm.py	Mon Jun 29 09:49:28 2009 -0400
@@ -29,9 +29,9 @@
         # symbolic theano stuff
         # what about multidimensional inputs/outputs ? do they have to be 
         # flattened or should we used tensors instead ?
-        self.w = w if w is not None else module.Member(T.dmatrix())
-        self.visb = visb if visb is not None else module.Member(T.dvector())
-        self.hidb = hidb if hidb is not None else module.Member(T.dvector())
+        self.w = w if w is not None else T.dmatrix()
+        self.visb = visb if visb is not None else T.dvector()
+        self.hidb = hidb if hidb is not None else T.dvector()
         self.seed = seed;
        
         # 1-step Markov chain
--- a/pylearn/algorithms/regressor.py	Fri Jun 26 16:44:55 2009 -0400
+++ b/pylearn/algorithms/regressor.py	Mon Jun 29 09:49:28 2009 -0400
@@ -13,15 +13,15 @@
         self.regularize = regularize
 
         # ACQUIRE/MAKE INPUT AND TARGET
-        self.input = theano.External(input) if input else T.matrix('input')
-        self.target = theano.External(target) if target else T.matrix('target')
+        self.input = input if input else T.matrix('input')
+        self.target = target if target else T.matrix('target')
 
         # HYPER-PARAMETERS
-        self.lr = theano.Member(T.scalar())
+        self.lr = T.scalar()
 
         # PARAMETERS
-        self.w = theano.Member(T.matrix())
-        self.b = theano.Member(T.vector())
+        self.w = T.matrix()
+        self.b = T.vector()
 
         # OUTPUT
         self.output_activation = T.dot(self.input, self.w) + self.b
@@ -96,7 +96,7 @@
         return T.mean(self.regression_costs)
 
     def build_regularization(self):
-        self.l2_coef = theano.Member(T.scalar())
+        self.l2_coef = T.scalar()
         return self.l2_coef * T.sum(self.w * self.w)
 
     def _instance_initialize(self, obj, input_size = None, output_size = 1, seed = None, **init):
--- a/pylearn/algorithms/rnn.py	Fri Jun 26 16:44:55 2009 -0400
+++ b/pylearn/algorithms/rnn.py	Mon Jun 29 09:49:28 2009 -0400
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 import numpy as N
-from theano import Op, Apply, tensor as T, Module, Member, Method, Mode, compile
+from theano import Op, Apply, tensor as T, Module, Method, Mode, compile
 from theano.gof import OpSub, TopoOptimizer
 
 from minimizer import make_minimizer # minimizer
@@ -121,15 +121,15 @@
         self.n_out = n_out
 
         #affine transformatoin x -> latent space
-        self.v, self.b = Member(T.dmatrix()), Member(T.dvector())
+        self.v, self.b = T.dmatrix(), T.dvector()
         input_transform = affine(self.v, self.b)
 
         #recurrent weight matrix in latent space
-        self.z0 = Member(T.dvector())
-        self.w = Member(T.dmatrix())
+        self.z0 = T.dvector()
+        self.w = T.dmatrix()
 
         #affine transformation latent -> output space
-        self.u, self.c = Member(T.dmatrix()), Member(T.dvector())
+        self.u, self.c = T.dmatrix(), T.dvector()
         output_transform = affine(self.u, self.c)
 
         self.params = [self.v, self.b, self.w, self.u, self.c]