changeset 299:eded3cb54930

small bug fixed
author Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
date Fri, 06 Jun 2008 17:58:45 -0400
parents 5987415496df
children 7c5e5356cb11
files _test_dataset.py denoising_aa.py mlp_factory_approach.py
diffstat 3 files changed, 13 insertions(+), 6 deletions(-) [+]
line wrap: on
line diff
--- a/_test_dataset.py	Fri Jun 06 17:55:14 2008 -0400
+++ b/_test_dataset.py	Fri Jun 06 17:58:45 2008 -0400
@@ -480,7 +480,7 @@
     
     def test_ApplyFunctionDataSet(self):
         ds = T_Exotic1.DataSet()
-        dsa = ApplyFunctionDataSet(ds,lambda x,y,z: ([x[-1]],[y*10],[int(z)]),['input','target','name'],minibatch_mode=False) #broken!!!!!!
+        dsa = ApplyFunctionDataSet(ds,lambda x,y,z: (x[-1],y*10,int(z)),['input','target','name'],minibatch_mode=False) #broken!!!!!!
         for k in range(len(dsa)):
             res = dsa[k]
             self.failUnless(ds[k]('input')[0][-1] == res('input')[0] , 'problem in first applied function')
--- a/denoising_aa.py	Fri Jun 06 17:55:14 2008 -0400
+++ b/denoising_aa.py	Fri Jun 06 17:58:45 2008 -0400
@@ -106,11 +106,14 @@
         self.denoising_autoencoder_formula = corruption_formula + autoencoder.rename(x='corrupted_x')
         
     def __call__(self, training_set=None):
-        """ Allocate and optionnaly train a model"""
+        """ Allocate and optionnaly train a model
+
+        @TODO enables passing in training and valid sets, instead of cutting one set in 80/20
+        """
         model = DenoisingAutoEncoderModel(self)
         if training_set:
             print 'DenoisingAutoEncoder(): what do I do if training_set????'
-            # copied from mlp_factory_approach:
+            # copied from old mlp_factory_approach:
             if len(trainset) == sys.maxint:
                 raise NotImplementedError('Learning from infinite streams is not supported')
             nval = int(self.validation_portion * len(trainset))
--- a/mlp_factory_approach.py	Fri Jun 06 17:55:14 2008 -0400
+++ b/mlp_factory_approach.py	Fri Jun 06 17:58:45 2008 -0400
@@ -4,7 +4,8 @@
 import theano
 from theano import tensor as T
 
-from pylearn import dataset, nnet_ops, stopper, LookupList, filetensor
+import dataset, nnet_ops, stopper, filetensor
+from lookup_list import LookupList
 
 
 class AbstractFunction (Exception): pass
@@ -54,7 +55,9 @@
             return d[key]
 
         def update_minibatch(self, minibatch):
-            #assert isinstance(minibatch, LookupList) # why false???
+            if not isinstance(minibatch, LookupList):
+                print type(minibatch)
+            assert isinstance(minibatch, LookupList)
             self.update_fn(minibatch['input'], minibatch['target'], *self.params)
 
         def update(self, dataset, 
@@ -216,6 +219,7 @@
         l2coef = T.constant(l2coef_val)
         input = T.matrix() # n_examples x n_inputs
         target = T.ivector() # len: n_examples
+        #target = T.matrix()
         W2, b2 = T.matrix(), T.vector()
 
         W1, b1 = T.matrix(), T.vector()
@@ -224,7 +228,7 @@
 
         params = [W1, b1, W2, b2] 
         activations = b2 + T.dot(hid, W2)
-        nll, predictions = nnet_ops.crossentropy_softmax_1hot(activations, target)
+        nll, predictions = nnet_ops.crossentropy_softmax_1hot(activations, target )
         regularization = l2coef * T.sum(W2*W2) + hid_regularization
         output_class = T.argmax(activations,1)
         loss_01 = T.neq(output_class, target)