changeset 366:64fa85d68923

undoing unwanted changes to setup_batches.py
author humel
date Thu, 22 Apr 2010 20:06:11 -0400
parents 22919039f7ab (current diff) 14b28e43ce4e (diff)
children f24b10e43a6f
files deep/crbm/mnist_config.py.example deep/crbm/utils.py
diffstat 3 files changed, 262 insertions(+), 3 deletions(-) [+]
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/sgd_optimization.py	Thu Apr 22 19:57:05 2010 -0400
+++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py	Thu Apr 22 20:06:11 2010 -0400
@@ -377,6 +377,147 @@
         train_losses2 = [test_model(x,y) for x,y in iter2]
         train_score2 = numpy.mean(train_losses2)
         print "Training error is: " + str(train_score2)
+    
+    #To see the prediction of the model, the real answer and the image to judge    
+    def see_error(self, dataset):
+        import pylab
+        #The function to know the prediction
+        test_model = \
+            theano.function(
+                [self.classifier.x,self.classifier.y], self.classifier.logLayer.y_pred)
+        user = []
+        nb_total = 0     #total number of exemples seen
+        nb_error = 0   #total number of errors
+        for x,y in dataset.test(1):
+            nb_total += 1
+            pred = self.translate(test_model(x,y))
+            rep =  self.translate(y)
+            error = pred != rep
+            print 'prediction: ' + str(pred) +'\t answer: ' + str(rep) + '\t right: ' + str(not(error))
+            pylab.imshow(x.reshape((32,32)))
+            pylab.draw()
+            if error:
+                nb_error += 1
+                user.append(int(raw_input("1 = The error is normal, 0 = The error is not normal : ")))
+                print '\t\t character is hard to distinguish: ' + str(user[-1])
+            else:
+                time.sleep(3)
+        print '\n Over the '+str(nb_total)+' exemples, there is '+str(nb_error)+' errors. \nThe percentage of errors is'+ str(float(nb_error)/float(nb_total))
+        print 'The percentage of errors done by the model that an human will also do: ' + str(numpy.mean(user))
+        
+            
+            
+            
+    #To translate the numeric prediction in character if necessary     
+    def translate(self,y):
+        
+        if y <= 9:
+            return y[0]
+        elif y == 10:
+            return 'A'
+        elif y == 11:
+            return 'B'
+        elif y == 12:
+            return 'C'
+        elif y == 13:
+            return 'D'
+        elif y == 14:
+            return 'E'
+        elif y == 15:
+            return 'F'
+        elif y == 16:
+            return 'G'
+        elif y == 17:
+            return 'H'
+        elif y == 18:
+            return 'I'
+        elif y == 19:
+            return 'J'
+        elif y == 20:
+            return 'K'
+        elif y == 21:
+            return 'L'
+        elif y == 22:
+            return 'M'
+        elif y == 23:
+            return 'N'
+        elif y == 24:
+            return 'O'
+        elif y == 25:
+            return 'P'
+        elif y == 26:
+            return 'Q'
+        elif y == 27:
+            return 'R'
+        elif y == 28:
+            return 'S'
+        elif y == 28:
+            return 'T'
+        elif y == 30:
+            return 'U'
+        elif y == 31:
+            return 'V'
+        elif y == 32:
+            return 'W'
+        elif y == 33:
+            return 'X'
+        elif y == 34:
+            return 'Y'
+        elif y == 35:
+            return 'Z'
+            
+        elif y == 36:
+            return 'a'
+        elif y == 37:
+            return 'b'
+        elif y == 38:
+            return 'c'
+        elif y == 39:
+            return 'd'
+        elif y == 40:
+            return 'e'
+        elif y == 41:
+            return 'f'
+        elif y == 42:
+            return 'g'
+        elif y == 43:
+            return 'h'
+        elif y == 44:
+            return 'i'
+        elif y == 45:
+            return 'j'
+        elif y == 46:
+            return 'k'
+        elif y == 47:
+            return 'l'
+        elif y == 48:
+            return 'm'
+        elif y == 49:
+            return 'n'
+        elif y == 50:
+            return 'o'
+        elif y == 51:
+            return 'p'
+        elif y == 52:
+            return 'q'
+        elif y == 53:
+            return 'r'
+        elif y == 54:
+            return 's'
+        elif y == 55:
+            return 't'
+        elif y == 56:
+            return 'u'
+        elif y == 57:
+            return 'v'
+        elif y == 58:
+            return 'w'
+        elif y == 59:
+            return 'x'
+        elif y == 60:
+            return 'y'
+        elif y == 61:
+            return 'z'    
 
 
 
--- a/deep/stacked_dae/v_sylvain/stacked_dae.py	Thu Apr 22 19:57:05 2010 -0400
+++ b/deep/stacked_dae/v_sylvain/stacked_dae.py	Thu Apr 22 20:06:11 2010 -0400
@@ -88,7 +88,7 @@
         b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
         self.b = theano.shared(value= b_values)
 
-        self.output = (T.tanh(T.dot(input, self.W) + self.b) + 1) /2
+        self.output = (T.tanh(T.dot(input, self.W) + self.b) + 1.0)/2.0
         # ( *+ 1) /2  is because tanh goes from -1 to 1 and sigmoid goes from 0 to 1
         # I want to use tanh, but the image has to stay the same. The correction is necessary.
         self.params = [self.W, self.b]
@@ -185,10 +185,10 @@
     
     #Or use a Tanh everything is always between 0 and 1, the range is 
     #changed so it remain the same as when sigmoid is used
-    self.y   = (T.tanh(T.dot(self.tilde_x, self.W      ) + self.b)+1.0)/2.0
+    self.y   = (T.tanh(T.dot(self.tilde_x, self.W ) + self.b)+1.0)/2.0
     
     z_a = T.dot(self.y, self.W_prime) + self.b_prime
-    self.z =  (T.tanh(z_a + self.b_prime)+1.0) / 2.0
+    self.z =  (T.tanh(z_a )+1.0) / 2.0
     #To ensure to do not have a log(0) operation
     if self.z <= 0:
         self.z = 0.000001
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/deep/stacked_dae/v_sylvain/voir_erreurs.py	Thu Apr 22 20:06:11 2010 -0400
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# coding: utf-8
+
+import ift6266
+import pylearn
+
+import numpy 
+import theano
+import time
+
+import pylearn.version
+import theano.tensor as T
+from theano.tensor.shared_randomstreams import RandomStreams
+
+import copy
+import sys
+import os
+import os.path
+
+from jobman import DD
+import jobman, jobman.sql
+from pylearn.io import filetensor
+
+from utils import produit_cartesien_jobs
+from copy import copy
+
+from sgd_optimization import SdaSgdOptimizer
+
+#from ift6266.utils.scalar_series import *
+from ift6266.utils.seriestables import *
+import tables
+
+from ift6266 import datasets
+from config import *
+
+'''
+Function called by jobman upon launching each job
+Its path is the one given when inserting jobs: see EXPERIMENT_PATH
+'''
+def jobman_entrypoint(state, channel):
+    # record mercurial versions of each package
+    pylearn.version.record_versions(state,[theano,ift6266,pylearn])
+    # TODO: remove this, bad for number of simultaneous requests on DB
+    channel.save()
+
+    # For test runs, we don't want to use the whole dataset so
+    # reduce it to fewer elements if asked to.
+    rtt = None
+    if state.has_key('reduce_train_to'):
+        rtt = state['reduce_train_to']
+    elif REDUCE_TRAIN_TO:
+        rtt = REDUCE_TRAIN_TO
+ 
+    n_ins = 32*32
+    n_outs = 62 # 10 digits + 26*2 (lower, capitals)
+     
+    examples_per_epoch = NIST_ALL_TRAIN_SIZE
+
+    PATH = PATH_P07
+    maximum_exemples=int(100) #Maximum number of exemples seen
+
+
+
+    print "Creating optimizer with state, ", state
+
+    optimizer = SdaSgdOptimizer(dataset=datasets.nist_all(), 
+                                    hyperparameters=state, \
+                                    n_ins=n_ins, n_outs=n_outs,\
+                                    examples_per_epoch=examples_per_epoch, \
+                                    max_minibatches=rtt)	
+
+
+    
+    
+    print 'The model is created'
+    if os.path.exists(PATH+'params_finetune_NIST.txt'):
+        print ('\n finetune = NIST ')
+        optimizer.reload_parameters(PATH+'params_finetune_NIST.txt')
+        print "For" + str(maximum_exemples) + "over the NIST test set: "
+        optimizer.see_error(datasets.nist_all(maxsize=maximum_exemples))
+        
+    
+    if os.path.exists(PATH+'params_finetune_P07.txt'):
+        print ('\n finetune = P07 ')
+        optimizer.reload_parameters(PATH+'params_finetune_P07.txt')
+        print "For" + str(maximum_exemples) + "over the P07 test set: "
+        optimizer.see_error(datasets.nist_P07(maxsize=maximum_exemples))
+
+    
+    if os.path.exists(PATH+'params_finetune_NIST_then_P07.txt'):
+        print ('\n finetune = NIST then P07')
+        optimizer.reload_parameters(PATH+'params_finetune_NIST_then_P07.txt')
+        print "For" + str(maximum_exemples) + "over the NIST test set: "
+        optimizer.see_error(datasets.nist_all(maxsize=maximum_exemples))
+        print "For" + str(maximum_exemples) + "over the P07 test set: "
+        optimizer.see_error(datasets.nist_P07(maxsize=maximum_exemples))
+    
+    if os.path.exists(PATH+'params_finetune_P07_then_NIST.txt'):
+        print ('\n finetune = P07 then NIST')
+        optimizer.reload_parameters(PATH+'params_finetune_P07_then_NIST.txt')
+        print "For" + str(maximum_exemples) + "over the P07 test set: "
+        optimizer.see_error(datasets.nist_P07(maxsize=maximum_exemples))
+        print "For" + str(maximum_exemples) + "over the NIST test set: "
+        optimizer.see_error(datasets.nist_all(maxsize=maximum_exemples))
+    
+    channel.save()
+
+    return channel.COMPLETE
+
+
+
+if __name__ == '__main__':
+
+
+    chanmock = DD({'COMPLETE':0,'save':(lambda:None)})
+    jobman_entrypoint(DD(DEFAULT_HP_NIST), chanmock)
+
+