changeset 121:2ca8dccba270

debugging mlp.py
author Yoshua Bengio <bengioy@iro.umontreal.ca>
date Wed, 07 May 2008 16:08:18 -0400
parents 5fa46297191b
children 9c4f522526bf 4efe6d36c061
files mlp.py nnet_ops.py test_mlp.py
diffstat 3 files changed, 33 insertions(+), 26 deletions(-) [+]
line wrap: on
line diff
--- a/mlp.py	Wed May 07 15:46:24 2008 -0400
+++ b/mlp.py	Wed May 07 16:08:18 2008 -0400
@@ -1,7 +1,6 @@
 
 from learner import *
 from theano import tensor as t
-from theano.scalar import as_scalar
 from nnet_ops import *
 
 # this is one of the simplest example of learner, and illustrates
@@ -65,6 +64,27 @@
 
     """
 
+    def __init__(self,n_hidden,n_classes,learning_rate,init_range=1.):
+        self._n_outputs = n_classes
+        self._n_hidden = n_hidden
+        self._init_range = init_range
+        self.learning_rate = learning_rate # this is the float
+        self._learning_rate = t.scalar('learning_rate') # this is the symbol
+        self._input = t.matrix('input') # n_examples x n_inputs
+        self._target = t.matrix('target','int32') # n_examples x n_outputs
+        self._L2_regularizer = t.scalar('L2_regularizer')
+        self._W1 = t.matrix('W1')
+        self._W2 = t.matrix('W2')
+        self._b1 = t.row('b1')
+        self._b2 = t.row('b2')
+        self._regularization_term = self._L2_regularizer * (t.dot(self._W1,self._W1) + t.dot(self._W2,self._W2))
+        self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T)
+        self._nll,self._output = crossentropy_softmax_1hot(self._output_activations,self._target)
+        self._output_class = t.argmax(self._output,1)
+        self._class_error = self._output_class != self._target
+        self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0]
+        MinibatchUpdatesTLearner.__init__(self)
+            
     def attributeNames(self):
         return ["parameters","b1","W2","b2","W2", "L2_regularizer","regularization_term"]
 
@@ -95,28 +115,6 @@
             output_fields += ["class_error", "nll"]
         return output_fields
         
-    def __init__(self,n_hidden,n_classes,learning_rate,init_range=1.):
-        self._n_outputs = n_classes
-        self._n_hidden = n_hidden
-        self._init_range = init_range
-        self.learning_rate = learning_rate # this is the float
-        self._learning_rate = t.scalar('learning_rate') # this is the symbol
-        self._input = t.matrix('input') # n_examples x n_inputs
-        self._target = t.matrix('target') # n_examples x n_outputs
-        self._L2_regularizer = as_scalar(0.,'L2_regularizer')
-        self._W1 = t.matrix('W1')
-        self._W2 = t.matrix('W2')
-        self._b1 = t.row('b1')
-        self._b2 = t.row('b2')
-        self._regularizer = self._L2_regularizer * (t.dot(self._W1,self._W1) + t.dot(self._W2,self._W2))
-        self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T)
-        self._output = t.softmax(self._output_activations)
-        self._output_class = t.argmax(self._output,1)
-        self._class_error = self._output_class != self._target
-        self._nll,self._output = crossentropy_softmax_1hot(self._output_activation,self._target)
-        self._minibatch_criterion = self._nll + self._regularizer / t.shape(self._input)[0]
-        MinibatchUpdatesTLearner.__init__(self)
-            
     def allocate(self,minibatch):
         minibatch_n_inputs  = minibatch["input"].shape[1]
         if not self._n_inputs:
@@ -234,7 +232,7 @@
     def __init__(self):
         self._input = t.matrix('input') # n_examples x n_inputs
         self._target = t.matrix('target') # n_examples x n_outputs
-        self._L2_regularizer = as_scalar(0.,'L2_regularizer')
+        self._L2_regularizer = t.scalar('L2_regularizer')
         self._theta = t.matrix('theta')
         self._W = self._theta[:,1:] 
         self._b = self._theta[:,0]
--- a/nnet_ops.py	Wed May 07 15:46:24 2008 -0400
+++ b/nnet_ops.py	Wed May 07 16:08:18 2008 -0400
@@ -99,10 +99,10 @@
             raise ValueError('x must be 2-d tensor of floats')
         if b.type.ndim != 1 \
                 or x.type.dtype not in ['float32', 'float64']:
-            raise ValueError('x must be 1-d tensor of floats')
+            raise ValueError('b must be 1-d tensor of floats')
         if y_idx.type.ndim != 1 \
                 or y_idx.type.dtype not in ['int32', 'int64']:
-            raise ValueError('x must be 1-d tensor of ints')
+            raise ValueError('y_idx must be 1-d tensor of ints')
 
 #       TODO: Is this correct? It used to be y, not y_idx
         nll = tensor.Tensor(x.type.dtype, 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test_mlp.py	Wed May 07 16:08:18 2008 -0400
@@ -0,0 +1,9 @@
+
+from mlp import *
+
+def test0():
+    nnet = OneHiddenLayerNNetClassifier(10,3,.1)
+
+
+test0()
+