diff baseline/mlp/mlp_nist.py @ 404:1509b9bba4cc

added digit/char error
author xaviermuller
date Wed, 28 Apr 2010 11:45:14 -0400
parents 60a4432b8071
children 195f95c3d461
line wrap: on
line diff
--- a/baseline/mlp/mlp_nist.py	Wed Apr 28 11:30:37 2010 -0400
+++ b/baseline/mlp/mlp_nist.py	Wed Apr 28 11:45:14 2010 -0400
@@ -169,10 +169,9 @@
     
 
 
-    # allocate symbolic variables for the data
-    x = T.fmatrix()  # the data is presented as rasterized images
-    y = T.lvector()  # the labels are presented as 1D vector of 
-                          # [long int] labels
+    
+
+    
 
     # load the data set and create an mlp based on the dimensions of the model
     model=numpy.load(model_name)
@@ -180,11 +179,23 @@
     W2=model['W2']
     b1=model['b1']
     b2=model['b2']
-    nb_hidden=b1.shape[0]
-    input_dim=W1.shape[0]
-    nb_targets=b2.shape[0]
-    learning_rate=0.1
-
+    
+    total_error_count=0.0
+    total_exemple_count=0.0
+    
+    nb_error_count=0.0
+    nb_exemple_count=0.0
+    
+    char_error_count=0.0
+    char_exemple_count=0.0
+    
+    min_error_count=0.0
+    min_exemple_count=0.0
+    
+    maj_error_count=0.0
+    maj_exemple_count=0.0
+    
+    
 
     if data_set==0:
         dataset=datasets.nist_all()
@@ -192,42 +203,51 @@
         dataset=datasets.nist_P07()
 
 
-    classifier = MLP( input=x,\
-                        n_in=input_dim,\
-                        n_hidden=nb_hidden,\
-                        n_out=nb_targets,
-                        learning_rate=learning_rate)
-
-
-    #overwrite weights with weigths from model
-    classifier.W1.value=W1
-    classifier.W2.value=W2
-    classifier.b1.value=b1
-    classifier.b2.value=b2
-
-
-    cost = classifier.negative_log_likelihood(y) \
-         + 0.0 * classifier.L1 \
-         + 0.0 * classifier.L2_sqr 
-
-    # compiling a theano function that computes the mistakes that are made by 
-    # the model on a minibatch
-    test_model = theano.function([x,y], classifier.errors(y))
-
-
 
     #get the test error
     #use a batch size of 1 so we can get the sub-class error
     #without messing with matrices (will be upgraded later)
     test_score=0
     temp=0
-    for xt,yt in dataset.test(20):
-        test_score += test_model(xt,yt)
-        temp = temp+1
-    test_score /= temp
+    for xt,yt in dataset.test(1):
+        
+        total_exemple_count = total_exemple_count +1
+        #get activation for layer 1
+        a0=numpy.dot(numpy.transpose(W1),numpy.transpose(xt[0])) + b1
+        #add non linear function to layer 1 activation
+        a0_out=numpy.tanh(a0)
+        
+        #get activation for output layer
+        a1= numpy.dot(numpy.transpose(W2),a0_out) + b2
+        #add non linear function for output activation (softmax)
+        a1_exp = numpy.exp(a1)
+        sum_a1=numpy.sum(a1_exp)
+        a1_out=a1_exp/sum_a1
+        
+        predicted_class=numpy.argmax(a1_out)
+        wanted_class=yt[0]
+        if(predicted_class!=wanted_class):
+            total_error_count = total_error_count +1
+            
+        #treat digit error
+        if(wanted_class<10):
+            nb_exemple_count=nb_exemple_count + 1
+            predicted_class=numpy.argmax(a1_out[0:10])
+            if(predicted_class!=wanted_class):
+                nb_error_count = nb_error_count +1
+                
+        if(wanted_class>9):
+            char_exemple_count=char_exemple_count + 1
+            predicted_class=numpy.argmax(a1_out[10:62])+10
+            if((predicted_class!=wanted_class) and ((predicted_class+26)!=wanted_class) and ((predicted_class-26)!=wanted_class)):
+               char_error_count = char_error_count +1
+            
+            
 
-
-    return test_score*100
+    print (('total error = %f') % ((total_error_count/total_exemple_count)*100.0))
+    print (('number error = %f') % ((nb_error_count/nb_exemple_count)*100.0))
+    print (('char error = %f') % ((char_error_count/char_exemple_count)*100.0))
+    return (total_error_count/total_exemple_count)*100.0