changeset 435:d8129a09ffb1

bug fix in output
author Guillaume Sicard <guitch21@gmail.com>
date Mon, 03 May 2010 06:11:30 -0400
parents 310c730516af
children 0ca069550abd
files baseline/mlp/ratio_classes/mlp_nist_ratio.py
diffstat 1 files changed, 37 insertions(+), 19 deletions(-) [+]
line wrap: on
line diff
--- a/baseline/mlp/ratio_classes/mlp_nist_ratio.py	Mon May 03 03:08:34 2010 -0400
+++ b/baseline/mlp/ratio_classes/mlp_nist_ratio.py	Mon May 03 06:11:30 2010 -0400
@@ -24,8 +24,7 @@
 """
 __docformat__ = 'restructedtext en'
 
-import ift6266
-from scripts import setup_batches
+import setup_batches
 import pdb
 import numpy
 
@@ -50,7 +49,7 @@
 
 
 
-    def __init__(self, input, n_in, n_hidden, n_out,learning_rate):
+    def __init__(self, input, n_in, n_hidden, n_out,learning_rate, test_subclass):
         """Initialize the parameters for the multilayer perceptron
 
         :param input: symbolic variable that describes the input of the 
@@ -113,12 +112,20 @@
 
         # compute prediction as class whose probability is maximal in 
         # symbolic form
-        self.y_pred = T.argmax( self.p_y_given_x, axis =1)
-        self.y_pred_num = T.argmax( self.p_y_given_x[0:9], axis =1)
+        #self.y_pred = T.argmax( self.p_y_given_x, axis =1)
+        #self.y_pred_num = T.argmax( self.p_y_given_x[0:9], axis =1)
         
-        
-        
-        
+	self.test_subclass = test_subclass
+
+	#if (self.test_subclass == "u"):
+	#  self.y_pred = T.argmax( self.p_y_given_x[10:35], axis =1) + 10
+        #elif (self.test_subclass == "l"):
+	#  self.y_pred = T.argmax( self.p_y_given_x[35:], axis =1) + 35
+        #elif (self.test_subclass == "d"):
+	#  self.y_pred = T.argmax( self.p_y_given_x[0:9], axis =1)
+        #else:
+	self.y_pred = T.argmax( self.p_y_given_x, axis =1)
+
         # L1 norm ; one regularization option is to enforce L1 norm to 
         # be small 
         self.L1     = abs(self.W1).sum() + abs(self.W2).sum()
@@ -178,9 +185,9 @@
                         nb_max_exemples=1000000,\
                         batch_size=20,\
                         nb_hidden = 500,\
-                        nb_targets = 62,\
+                        nb_targets = 26,\
 			tau=1e6,\
-			main_class="d",\
+			main_class="l",\
 			start_ratio=1,\
 			end_ratio=1):
    
@@ -216,8 +223,9 @@
     classifier = MLP( input=x.reshape((batch_size,32*32)),\
                         n_in=32*32,\
                         n_hidden=nb_hidden,\
-                        n_out=nb_targets,
-                        learning_rate=learning_rate)
+                        n_out=nb_targets,\
+                        learning_rate=learning_rate,\
+			test_subclass=main_class)
                         
                         
    
@@ -285,7 +293,13 @@
     n_iter=max(1,n_iter) # run at least once on short debug call
     time_n=0 #in unit of exemples
     
-    
+    if (main_class == "u"):
+      class_offset = 10
+    elif (main_class == "l"):
+      class_offset = 36
+    else:
+      class_offset = 0
+
    
     if verbose == True:
         print 'looping at most %d times through the data set' %n_iter
@@ -302,6 +316,9 @@
         # get the minibatches corresponding to `iter` modulo
         # `len(train_batches)`
         x,y = train_batches[ minibatch_index ]
+
+	y = y - class_offset
+
         # convert to float
         x_float = x/255.0
         cost_ij = train_model(x_float,y)
@@ -312,6 +329,7 @@
             this_validation_loss = 0.
             for x,y in validation_batches:
                 # sum up the errors for each minibatch
+		y = y - class_offset
                 x_float = x/255.0
                 this_validation_loss += test_model(x_float,y)
             # get the average by dividing with the number of minibatches
@@ -323,6 +341,7 @@
             this_train_loss=0
             for x,y in train_batches:
                 # sum up the errors for each minibatch
+		y = y - class_offset
                 x_float = x/255.0
                 this_train_loss += test_model(x_float,y)
             # get the average by dividing with the number of minibatches
@@ -355,6 +374,7 @@
                 # test it on the test set
                 test_score = 0.
                 for x,y in test_batches:
+		    y = y - class_offset
                     x_float=x/255.0
                     test_score += test_model(x_float,y)
                 test_score /= len(test_batches)
@@ -381,6 +401,7 @@
                 #calculation before aborting
                 patience = iter+validation_frequency+1
                 for x,y in test_batches:
+		    y = y - class_offset
                     x_float=x/255.0
                     test_score += test_model(x_float,y)
                 test_score /= len(test_batches)
@@ -421,13 +442,10 @@
 
 def jobman_mlp_full_nist(state,channel):
     (train_error,validation_error,test_error,nb_exemples,time)=mlp_full_nist(learning_rate=state.learning_rate,\
-                                                                nb_max_exemples=state.nb_max_exemples,\
                                                                 nb_hidden=state.nb_hidden,\
-                                                                adaptive_lr=state.adaptive_lr,\
-								tau=state.tau,\
-								main_class=state.main_class,\
-								start_ratio=state.start_ratio,\
-								end_ratio=state.end_ratio)
+                                                                main_class=state.main_class,\
+								start_ratio=state.ratio,\
+								end_ratio=state.ratio)
     state.train_error=train_error
     state.validation_error=validation_error
     state.test_error=test_error