# HG changeset patch # User Frederic Bastien # Date 1246283368 14400 # Node ID ba65e95d122191244963aa38cb45ef4e05641a26 # Parent f84e6f301a18670fc8aaade8dff64ffad4281156 removed manual call to Member and Variable as this is deprecated in theano. diff -r f84e6f301a18 -r ba65e95d1221 pylearn/algorithms/aa.py --- a/pylearn/algorithms/aa.py Fri Jun 26 16:44:55 2009 -0400 +++ b/pylearn/algorithms/aa.py Mon Jun 29 09:49:28 2009 -0400 @@ -16,19 +16,19 @@ # ACQUIRE/MAKE INPUT if not input: input = T.matrix('input') - self.input = theano.External(input) + self.input = input # HYPER-PARAMETERS - self.lr = theano.Member(T.scalar()) + self.lr = T.scalar() # PARAMETERS - self.w1 = theano.Member(T.matrix()) + self.w1 = T.matrix() if not tie_weights: - self.w2 = theano.Member(T.matrix()) + self.w2 = T.matrix() else: self.w2 = self.w1.T - self.b1 = theano.Member(T.vector()) - self.b2 = theano.Member(T.vector()) + self.b1 = T.vector() + self.b2 = T.vector() # HIDDEN LAYER self.hidden_activation = T.dot(input, self.w1) + self.b1 @@ -97,7 +97,7 @@ return T.sum(self.reconstruction_costs) def build_regularization(self): - self.l2_coef = theano.Member(T.scalar()) + self.l2_coef = T.scalar() if self.tie_weights: return self.l2_coef * T.sum(self.w1 * self.w1) else: diff -r f84e6f301a18 -r ba65e95d1221 pylearn/algorithms/daa.py --- a/pylearn/algorithms/daa.py Fri Jun 26 16:44:55 2009 -0400 +++ b/pylearn/algorithms/daa.py Mon Jun 29 09:49:28 2009 -0400 @@ -49,19 +49,19 @@ # ACQUIRE/MAKE INPUT if not input: input = T.matrix('input') - self.input = theano.External(input) + self.input = input # HYPER-PARAMETERS - self.lr = theano.Member(T.scalar()) + self.lr = T.scalar() # PARAMETERS - self.w1 = theano.Member(T.matrix()) + self.w1 = T.matrix() if not tie_weights: - self.w2 = theano.Member(T.matrix()) + self.w2 = T.matrix() else: self.w2 = self.w1.T - self.b1 = theano.Member(T.vector()) - self.b2 = theano.Member(T.vector()) + self.b1 = T.vector() + self.b2 = T.vector() # REGULARIZATION COST @@ -162,7 +162,7 @@ """ def build_corrupted_input(self): - self.noise_level = theano.Member(T.scalar()) + self.noise_level = T.scalar() return self.random.binomial(T.shape(self.input), 1, 1 - self.noise_level) * self.input def hid_activation_function(self, activation): @@ -175,7 +175,7 @@ return self.reconstruction_cost_function(self.input, output) def build_regularization(self): - self.l2_coef = theano.Member(T.scalar()) + self.l2_coef = T.scalar() if self.tie_weights: return self.l2_coef * T.sum(self.w1 * self.w1) else: diff -r f84e6f301a18 -r ba65e95d1221 pylearn/algorithms/rbm.py --- a/pylearn/algorithms/rbm.py Fri Jun 26 16:44:55 2009 -0400 +++ b/pylearn/algorithms/rbm.py Mon Jun 29 09:49:28 2009 -0400 @@ -29,9 +29,9 @@ # symbolic theano stuff # what about multidimensional inputs/outputs ? do they have to be # flattened or should we used tensors instead ? - self.w = w if w is not None else module.Member(T.dmatrix()) - self.visb = visb if visb is not None else module.Member(T.dvector()) - self.hidb = hidb if hidb is not None else module.Member(T.dvector()) + self.w = w if w is not None else T.dmatrix() + self.visb = visb if visb is not None else T.dvector() + self.hidb = hidb if hidb is not None else T.dvector() self.seed = seed; # 1-step Markov chain diff -r f84e6f301a18 -r ba65e95d1221 pylearn/algorithms/regressor.py --- a/pylearn/algorithms/regressor.py Fri Jun 26 16:44:55 2009 -0400 +++ b/pylearn/algorithms/regressor.py Mon Jun 29 09:49:28 2009 -0400 @@ -13,15 +13,15 @@ self.regularize = regularize # ACQUIRE/MAKE INPUT AND TARGET - self.input = theano.External(input) if input else T.matrix('input') - self.target = theano.External(target) if target else T.matrix('target') + self.input = input if input else T.matrix('input') + self.target = target if target else T.matrix('target') # HYPER-PARAMETERS - self.lr = theano.Member(T.scalar()) + self.lr = T.scalar() # PARAMETERS - self.w = theano.Member(T.matrix()) - self.b = theano.Member(T.vector()) + self.w = T.matrix() + self.b = T.vector() # OUTPUT self.output_activation = T.dot(self.input, self.w) + self.b @@ -96,7 +96,7 @@ return T.mean(self.regression_costs) def build_regularization(self): - self.l2_coef = theano.Member(T.scalar()) + self.l2_coef = T.scalar() return self.l2_coef * T.sum(self.w * self.w) def _instance_initialize(self, obj, input_size = None, output_size = 1, seed = None, **init): diff -r f84e6f301a18 -r ba65e95d1221 pylearn/algorithms/rnn.py --- a/pylearn/algorithms/rnn.py Fri Jun 26 16:44:55 2009 -0400 +++ b/pylearn/algorithms/rnn.py Mon Jun 29 09:49:28 2009 -0400 @@ -1,6 +1,6 @@ #!/usr/bin/env python import numpy as N -from theano import Op, Apply, tensor as T, Module, Member, Method, Mode, compile +from theano import Op, Apply, tensor as T, Module, Method, Mode, compile from theano.gof import OpSub, TopoOptimizer from minimizer import make_minimizer # minimizer @@ -121,15 +121,15 @@ self.n_out = n_out #affine transformatoin x -> latent space - self.v, self.b = Member(T.dmatrix()), Member(T.dvector()) + self.v, self.b = T.dmatrix(), T.dvector() input_transform = affine(self.v, self.b) #recurrent weight matrix in latent space - self.z0 = Member(T.dvector()) - self.w = Member(T.dmatrix()) + self.z0 = T.dvector() + self.w = T.dmatrix() #affine transformation latent -> output space - self.u, self.c = Member(T.dmatrix()), Member(T.dvector()) + self.u, self.c = T.dmatrix(), T.dvector() output_transform = affine(self.u, self.c) self.params = [self.v, self.b, self.w, self.u, self.c]