changeset 144:c958941c1b9d

merge
author XavierMuller
date Tue, 23 Feb 2010 18:16:55 -0500
parents f341a4efb44a (current diff) bb26c12bb9f6 (diff)
children 8ceaaf812891
files
diffstat 23 files changed, 1850 insertions(+), 212 deletions(-) [+]
line wrap: on
line diff
--- a/pycaptcha/Captcha/Visual/Text.py	Tue Feb 23 18:08:11 2010 -0500
+++ b/pycaptcha/Captcha/Visual/Text.py	Tue Feb 23 18:16:55 2010 -0500
@@ -18,7 +18,7 @@
        If any of the given files are directories, all *.ttf found
        in that directory will be added.
        """
-    extensions = [".ttf"]
+    extensions = [".ttf", ".TTF"]
     basePath = "fonts"
 
 # arguments variables a modifier pour mettre le chemin vers les fontes.
@@ -39,7 +39,7 @@
         return (fileName, size)
 
 # Predefined font factories
-defaultFontFactory = FontFactory(25, "vera", "others")
+defaultFontFactory = FontFactory(25, "allfonts")
 #defaultFontFactory = FontFactory((30, 40), "vera")
 
 class TextLayer(Visual.Layer):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pycaptcha/Captcha/data/fonts/allfonts	Tue Feb 23 18:16:55 2010 -0500
@@ -0,0 +1,1 @@
+/Tmp/allfonts
\ No newline at end of file
--- a/pycaptcha/Captcha/data/words/characters	Tue Feb 23 18:08:11 2010 -0500
+++ b/pycaptcha/Captcha/data/words/characters	Tue Feb 23 18:16:55 2010 -0500
@@ -1,26 +1,62 @@
-q
-w
+0
+1
+2
+3
+4
+5
+6
+7
+8
+9
+A
+B
+C
+D
+E
+F
+G
+H
+I
+J
+K
+L
+M
+N
+O
+P
+Q
+R
+S
+T
+U
+V
+W
+X
+Y
+Z
+a
+b
+c
+d
 e
-r
-t
-y
-u
-i
-o
-p
-a
-s
-d
 f
 g
 h
+i
 j
 k
 l
-z
-x
-c
+m
+n
+o
+p
+q
+r
+s
+t
+u
 v
-b
-n
-m
+w
+x
+y
+z
--- a/pycaptcha/Facade.py	Tue Feb 23 18:08:11 2010 -0500
+++ b/pycaptcha/Facade.py	Tue Feb 23 18:16:55 2010 -0500
@@ -30,4 +30,4 @@
        return a
 
     else :
-        return (a, g,solutions)
+        return (a, g.solutions)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/fonts_test.py	Tue Feb 23 18:16:55 2010 -0500
@@ -0,0 +1,19 @@
+#!/usr/bin/python                                                                                 
+
+import os
+import ImageFont, ImageDraw, Image
+
+dir1 =  "/data/lisa/data/ift6266h10/allfonts/"
+#dir1 = "/Tmp/allfonts/"
+
+img = Image.new("L", (132,132))
+draw = ImageDraw.Draw(img)
+L = [chr(ord('0')+x) for x in range(10)] + [chr(ord('A')+x) for x in range(26)] + [chr(ord('a')+x) for x in range(26)]
+
+for f in os.listdir(dir1):
+    try:
+        font = ImageFont.truetype(dir1+f, 25)
+        for l in L:
+            draw.text((60,60), l, font=font, fill="white")
+    except:
+        print dir1+f
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/imgbg_test.py	Tue Feb 23 18:16:55 2010 -0500
@@ -0,0 +1,15 @@
+#!/usr/bin/python
+
+import Image, cPickle
+
+f=open('/Tmp/image_net/filelist.pkl')
+image_files = cPickle.load(f)
+f.close()
+
+for i in range(len(image_files)):
+    filename = '/Tmp/image_net/' + image_files[i]
+    try:
+        image = Image.open(filename).convert('L')
+    except:
+        print filename
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/launch_generate100.py	Tue Feb 23 18:16:55 2010 -0500
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+
+import os
+dir1 = "/data/lisa/data/ift6266h10/"
+
+for i,s in enumerate(['valid','test']):
+    for c in [0.3,0.5,0.7,1]:
+        l = str(c).replace('.','')
+        os.system("dbidispatch --condor --os=fc9 --machine=brams0c.iro.umontreal.ca ./run_pipeline.sh -o %sdata/P%s_%s_data.ft -p %sdata/P%s_%s_params -x %sdata/P%s_%s_labels.ft -f %s%s_data.ft -l %s%s_labels.ft -c %socr_%s_data.ft -d %socr_%s_labels.ft -m 0.3 -z 0.1 -a 0.1 -b 0.25 -g 0.25 -s %d" % (dir1, l, s, dir1, l, s, dir1, l, s, dir1, s, dir1, s, dir1, s, dir1, s, [20000,80000][i]))
+
+for i in range(100):
+    os.system("dbidispatch --condor --os=fc9 --machine=brams0c.iro.umontreal.ca ./run_pipeline.sh -o %sdata/P07_train%d_data.ft -p %sdata/P07_train%d_params -x %sdata/P07_train%d_labels.ft -f %strain_data.ft -l %strain_labels.ft -c %socr_train_data.ft -d %socr_train_labels.ft -m 0.7 -z 0.1 -a 0.1 -b 0.25 -g 0.25 -s 819200" % (dir1, i, dir1, i, dir1, i, dir1, dir1, dir1, dir1))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/ocr_divide.py	Tue Feb 23 18:16:55 2010 -0500
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+'''
+creation des ensembles train, valid et test OCR
+ensemble valid est trainorig[:20000]
+ensemble test est trainorig[20000:40000]
+ensemble train est trainorig[40000:]
+trainorig est deja shuffled
+'''
+
+from pylearn.io import filetensor as ft
+import numpy, os
+
+dir1 = '/data/lisa/data/ocr_breuel/filetensor/'
+dir2 = "/data/lisa/data/ift6266h10/"
+
+f = open(dir1 + 'unlv-corrected-2010-02-01-shuffled.ft')
+d = ft.read(f)
+f = open(dir2 + "ocr_valid_data.ft", 'wb')
+ft.write(f, d[:20000])
+f = open(dir2 + "ocr_test_data.ft", 'wb')
+ft.write(f, d[20000:40000])
+f = open(dir2 + "ocr_train_data.ft", 'wb')
+ft.write(f, d[40000:])
+
+f = open(dir1 + 'unlv-corrected-2010-02-01-labels-shuffled.ft')
+d = ft.read(f)
+f = open(dir2 + "ocr_valid_labels.ft", 'wb')
+ft.write(f, d[:20000])
+f = open(dir2 + "ocr_test_labels.ft", 'wb')
+ft.write(f, d[20000:40000])
+f = open(dir2 + "ocr_train_labels.ft", 'wb')
+ft.write(f, d[40000:])
+
+for i in ["train", "valid", "test"]:
+    os.chmod(dir2 + "ocr_" + i + "_data.ft", 0744)
+    os.chmod(dir2 + "ocr_" + i + "_labels.ft", 0744)
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/stacked_dae/mnist_sda.py	Tue Feb 23 18:16:55 2010 -0500
@@ -0,0 +1,44 @@
+#!/usr/bin/python
+# coding: utf-8
+
+# Parameterize call to sgd_optimization for MNIST
+
+import numpy 
+import theano
+import time
+import theano.tensor as T
+from theano.tensor.shared_randomstreams import RandomStreams
+
+from sgd_optimization import SdaSgdOptimizer
+import cPickle, gzip
+from jobman import DD
+
+MNIST_LOCATION = '/u/savardf/datasets/mnist.pkl.gz'
+
+def sgd_optimization_mnist(learning_rate=0.1, pretraining_epochs = 2, \
+                            pretrain_lr = 0.1, training_epochs = 5, \
+                            dataset='mnist.pkl.gz'):
+    # Load the dataset 
+    f = gzip.open(dataset,'rb')
+    # this gives us train, valid, test (each with .x, .y)
+    dataset = cPickle.load(f)
+    f.close()
+
+    n_ins = 28*28
+    n_outs = 10
+
+    hyperparameters = DD({'finetuning_lr':learning_rate,
+                       'pretraining_lr':pretrain_lr,
+                       'pretraining_epochs_per_layer':pretraining_epochs,
+                       'max_finetuning_epochs':training_epochs,
+                       'hidden_layers_sizes':[100],
+                       'corruption_levels':[0.2],
+                       'minibatch_size':20})
+
+    optimizer = SdaSgdOptimizer(dataset, hyperparameters, n_ins, n_outs)
+    optimizer.pretrain()
+    optimizer.finetune()
+
+if __name__ == '__main__':
+    sgd_optimization_mnist(dataset=MNIST_LOCATION)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/stacked_dae/nist_sda.py	Tue Feb 23 18:16:55 2010 -0500
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+# coding: utf-8
+
+import numpy 
+import theano
+import time
+import theano.tensor as T
+from theano.tensor.shared_randomstreams import RandomStreams
+import copy
+
+import sys
+import os.path
+
+from sgd_optimization import SdaSgdOptimizer
+
+from jobman import DD
+import jobman, jobman.sql
+from pylearn.io import filetensor
+
+from utils import produit_croise_jobs
+
+TEST_CONFIG = False
+
+NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all'
+
+JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_db/'
+REDUCE_TRAIN_TO = None
+MAX_FINETUNING_EPOCHS = 1000
+if TEST_CONFIG:
+    JOBDB = 'postgres://ift6266h10@gershwin/ift6266h10_sandbox_db/'
+    REDUCE_TRAIN_TO = 1000
+    MAX_FINETUNING_EPOCHS = 2
+
+JOBDB_JOBS = JOBDB + 'fsavard_sda1_jobs'
+JOBDB_RESULTS = JOBDB + 'fsavard_sda1_results'
+EXPERIMENT_PATH = "ift6266.scripts.stacked_dae.nist_sda.jobman_entrypoint"
+
+# There used to be
+# 'finetuning_lr': [0.00001, 0.0001, 0.001, 0.01, 0.1]
+# and
+#  'num_hidden_layers':[1,2,3]
+# but this is now handled by a special mechanism in SgdOptimizer
+# to reuse intermediate results (for the same training of lower layers,
+# we can test many finetuning_lr)
+JOB_VALS = {'pretraining_lr': [0.1, 0.01, 0.001],#, 0.0001],
+        'pretraining_epochs_per_layer': [10,20],
+        'hidden_layers_sizes': [300,800],
+        'corruption_levels': [0.1,0.2],
+        'minibatch_size': [20],
+        'max_finetuning_epochs':[MAX_FINETUNING_EPOCHS]}
+FINETUNING_LR_VALS = [0.1, 0.01, 0.001]#, 0.0001]
+NUM_HIDDEN_LAYERS_VALS = [1,2,3]
+
+# Just useful for tests... minimal number of epochs
+DEFAULT_HP_NIST = DD({'finetuning_lr':0.01,
+                       'pretraining_lr':0.01,
+                       'pretraining_epochs_per_layer':1,
+                       'max_finetuning_epochs':1,
+                       'hidden_layers_sizes':[1000],
+                       'corruption_levels':[0.2],
+                       'minibatch_size':20})
+
+def jobman_entrypoint(state, channel):
+    state = copy.copy(state)
+
+    print "Will load NIST"
+    nist = NIST(20)
+    print "NIST loaded"
+
+    rtt = None
+    if state.has_key('reduce_train_to'):
+        rtt = state['reduce_train_to']
+    elif REDUCE_TRAIN_TO:
+        rtt = REDUCE_TRAIN_TO
+
+    if rtt:
+        print "Reducing training set to ", rtt, " examples"
+        nist.reduce_train_set(rtt)
+
+    train,valid,test = nist.get_tvt()
+    dataset = (train,valid,test)
+
+    n_ins = 32*32
+    n_outs = 62 # 10 digits, 26*2 (lower, capitals)
+
+    db = jobman.sql.db(JOBDB_RESULTS)
+    optimizer = SdaSgdOptimizer(dataset, state, n_ins, n_outs,\
+                    input_divider=255.0, job_tree=True, results_db=db, \
+                    experiment=EXPERIMENT_PATH, \
+                    finetuning_lr_to_try=FINETUNING_LR_VALS, \
+                    num_hidden_layers_to_try=NUM_HIDDEN_LAYERS_VALS)
+    optimizer.train()
+
+    return channel.COMPLETE
+
+def estimate_pretraining_time(job):
+    job = DD(job)
+    # time spent on pretraining estimated as O(n^2) where n=num hidens
+    # no need to multiply by num_hidden_layers, as results from num=1 
+    # is reused for num=2, or 3, so in the end we get the same time
+    # as if we were training 3 times a single layer
+    # constants:
+    # - 20 mins to pretrain a layer with 1000 units (per 1 epoch)
+    # - 12 mins to finetune (per 1 epoch)
+    # basically the job_tree trick gives us a 5 times speedup on the
+    # pretraining time due to reusing for finetuning_lr
+    # and gives us a second x2 speedup for reusing previous layers
+    # to explore num_hidden_layers
+    return (job.pretraining_epochs_per_layer * 20 / (1000.0*1000) \
+            * job.hidden_layer_sizes * job.hidden_layer_sizes)
+
+def estimate_total_time():
+    jobs = produit_croise_jobs(JOB_VALS)
+    sumtime = 0.0
+    sum_without = 0.0
+    for job in jobs:
+        sumtime += estimate_pretraining_time(job)
+        # 12 mins per epoch * 30 epochs
+        # 5 finetuning_lr per pretraining combination
+    sum_without = (12*20*len(jobs) + sumtime*2) * len(FINETUNING_LR_VALS)
+    sumtime += len(FINETUNING_LR_VALS) * len(jobs) * 12 * 20
+    print "num jobs=", len(jobs)
+    print "estimate", sumtime/60, " hours"
+    print "estimate without tree optimization", sum_without/60, "ratio", sumtime / sum_without
+
+def jobman_insert_nist():
+    jobs = produit_croise_jobs(JOB_VALS)
+
+    db = jobman.sql.db(JOBDB_JOBS)
+    for job in jobs:
+        job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH})
+        jobman.sql.insert_dict(job, db)
+
+    print "inserted"
+
+class NIST:
+    def __init__(self, minibatch_size, basepath=None, reduce_train_to=None):
+        global NIST_ALL_LOCATION
+
+        self.minibatch_size = minibatch_size
+        self.basepath = basepath and basepath or NIST_ALL_LOCATION
+
+        self.set_filenames()
+
+        # arrays of 2 elements: .x, .y
+        self.train = [None, None]
+        self.test = [None, None]
+
+        self.load_train_test()
+
+        self.valid = [[], []]
+        self.split_train_valid()
+        if reduce_train_to:
+            self.reduce_train_set(reduce_train_to)
+
+    def get_tvt(self):
+        return self.train, self.valid, self.test
+
+    def set_filenames(self):
+        self.train_files = ['all_train_data.ft',
+                                'all_train_labels.ft']
+
+        self.test_files = ['all_test_data.ft',
+                            'all_test_labels.ft']
+
+    def load_train_test(self):
+        self.load_data_labels(self.train_files, self.train)
+        self.load_data_labels(self.test_files, self.test)
+
+    def load_data_labels(self, filenames, pair):
+        for i, fn in enumerate(filenames):
+            f = open(os.path.join(self.basepath, fn))
+            pair[i] = filetensor.read(f)
+            f.close()
+
+    def reduce_train_set(self, max):
+        self.train[0] = self.train[0][:max]
+        self.train[1] = self.train[1][:max]
+
+        if max < len(self.test[0]):
+            for ar in (self.test, self.valid):
+                ar[0] = ar[0][:max]
+                ar[1] = ar[1][:max]
+
+    def split_train_valid(self):
+        test_len = len(self.test[0])
+        
+        new_train_x = self.train[0][:-test_len]
+        new_train_y = self.train[1][:-test_len]
+
+        self.valid[0] = self.train[0][-test_len:]
+        self.valid[1] = self.train[1][-test_len:]
+
+        self.train[0] = new_train_x
+        self.train[1] = new_train_y
+
+def test_load_nist():
+    print "Will load NIST"
+
+    import time
+    t1 = time.time()
+    nist = NIST(20)
+    t2 = time.time()
+
+    print "NIST loaded. time delta = ", t2-t1
+
+    tr,v,te = nist.get_tvt()
+
+    print "Lenghts: ", len(tr[0]), len(v[0]), len(te[0])
+
+    raw_input("Press any key")
+
+# hp for hyperparameters
+def sgd_optimization_nist(hp=None, dataset_dir='/data/lisa/data/nist'):
+    global DEFAULT_HP_NIST
+    hp = hp and hp or DEFAULT_HP_NIST
+
+    print "Will load NIST"
+
+    import time
+    t1 = time.time()
+    nist = NIST(20, reduce_train_to=100)
+    t2 = time.time()
+
+    print "NIST loaded. time delta = ", t2-t1
+
+    train,valid,test = nist.get_tvt()
+    dataset = (train,valid,test)
+
+    print train[0][15]
+    print type(train[0][1])
+
+
+    print "Lengths train, valid, test: ", len(train[0]), len(valid[0]), len(test[0])
+
+    n_ins = 32*32
+    n_outs = 62 # 10 digits, 26*2 (lower, capitals)
+
+    optimizer = SdaSgdOptimizer(dataset, hp, n_ins, n_outs, input_divider=255.0)
+    optimizer.train()
+
+if __name__ == '__main__':
+
+    import sys
+
+    args = sys.argv[1:]
+
+    if len(args) > 0 and args[0] == 'load_nist':
+        test_load_nist()
+
+    elif len(args) > 0 and args[0] == 'jobman_insert':
+        jobman_insert_nist()
+    elif len(args) > 0 and args[0] == 'test_job_tree':
+        # dont forget to comment out sql.inserts and make reduce_train_to=100
+        print "TESTING JOB TREE"
+        chanmock = {'COMPLETE':0}
+        hp = copy.copy(DEFAULT_HP_NIST)
+        hp.update({'reduce_train_to':100})
+        jobman_entrypoint(hp, chanmock)
+    elif len(args) > 0 and args[0] == 'estimate':
+        estimate_total_time()
+    else:
+        sgd_optimization_nist()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/stacked_dae/sgd_optimization.py	Tue Feb 23 18:16:55 2010 -0500
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# coding: utf-8
+
+# Generic SdA optimization loop, adapted from the deeplearning.net tutorial
+
+import numpy 
+import theano
+import time
+import theano.tensor as T
+import copy
+import sys
+
+from jobman import DD
+import jobman, jobman.sql
+
+from stacked_dae import SdA
+
+def shared_dataset(data_xy):
+    data_x, data_y = data_xy
+    #shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX))
+    #shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX))
+    #shared_y = T.cast(shared_y, 'int32')
+    shared_x = theano.shared(data_x)
+    shared_y = theano.shared(data_y)
+    return shared_x, shared_y
+
+class SdaSgdOptimizer:
+    def __init__(self, dataset, hyperparameters, n_ins, n_outs, input_divider=1.0,\
+                job_tree=False, results_db=None,\
+                experiment="",\
+                num_hidden_layers_to_try=[1,2,3], \
+                finetuning_lr_to_try=[0.1, 0.01, 0.001, 0.0001, 0.00001]):
+
+        self.dataset = dataset
+        self.hp = copy.copy(hyperparameters)
+        self.n_ins = n_ins
+        self.n_outs = n_outs
+        self.input_divider = numpy.asarray(input_divider, dtype=theano.config.floatX)
+
+        self.job_tree = job_tree
+        self.results_db = results_db
+        self.experiment = experiment
+        if self.job_tree:
+            assert(not results_db is None)
+            # these hp should not be there, so we insert default values
+            # we use 3 hidden layers as we'll iterate through 1,2,3
+            self.hp.finetuning_lr = 0.1 # dummy value, will be replaced anyway
+            cl = self.hp.corruption_levels
+            nh = self.hp.hidden_layers_sizes
+            self.hp.corruption_levels = [cl,cl,cl]
+            self.hp.hidden_layers_sizes = [nh,nh,nh]
+            
+        self.num_hidden_layers_to_try = num_hidden_layers_to_try
+        self.finetuning_lr_to_try = finetuning_lr_to_try
+
+        self.printout_frequency = 1000
+
+        self.rng = numpy.random.RandomState(1234)
+
+        self.init_datasets()
+        self.init_classifier()
+     
+    def init_datasets(self):
+        print "init_datasets"
+        train_set, valid_set, test_set = self.dataset
+        self.test_set_x, self.test_set_y = shared_dataset(test_set)
+        self.valid_set_x, self.valid_set_y = shared_dataset(valid_set)
+        self.train_set_x, self.train_set_y = shared_dataset(train_set)
+
+        # compute number of minibatches for training, validation and testing
+        self.n_train_batches = self.train_set_x.value.shape[0] / self.hp.minibatch_size
+        self.n_valid_batches = self.valid_set_x.value.shape[0] / self.hp.minibatch_size
+        self.n_test_batches  = self.test_set_x.value.shape[0]  / self.hp.minibatch_size
+
+    def init_classifier(self):
+        print "Constructing classifier"
+        # construct the stacked denoising autoencoder class
+        self.classifier = SdA( \
+                          train_set_x= self.train_set_x, \
+                          train_set_y = self.train_set_y,\
+                          batch_size = self.hp.minibatch_size, \
+                          n_ins= self.n_ins, \
+                          hidden_layers_sizes = self.hp.hidden_layers_sizes, \
+                          n_outs = self.n_outs, \
+                          corruption_levels = self.hp.corruption_levels,\
+                          rng = self.rng,\
+                          pretrain_lr = self.hp.pretraining_lr, \
+                          finetune_lr = self.hp.finetuning_lr,\
+                          input_divider = self.input_divider )
+
+    def train(self):
+        self.pretrain()
+        if not self.job_tree:
+            # if job_tree is True, finetuning was already performed
+            self.finetune()
+
+    def pretrain(self):
+        print "STARTING PRETRAINING"
+
+        printout_acc = 0.0
+        last_error = 0.0
+
+        start_time = time.clock()  
+        ## Pre-train layer-wise 
+        for i in xrange(self.classifier.n_layers):
+            # go through pretraining epochs 
+            for epoch in xrange(self.hp.pretraining_epochs_per_layer):
+                # go through the training set
+                for batch_index in xrange(self.n_train_batches):
+                    c = self.classifier.pretrain_functions[i](batch_index)
+
+                    printout_acc += c / self.printout_frequency
+                    if (batch_index+1) % self.printout_frequency == 0:
+                        print batch_index, "reconstruction cost avg=", printout_acc
+                        last_error = printout_acc
+                        printout_acc = 0.0
+                        
+                print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c
+
+            self.job_splitter(i+1, time.clock()-start_time, last_error)
+     
+        end_time = time.clock()
+
+        print ('Pretraining took %f minutes' %((end_time-start_time)/60.))
+
+    # Save time by reusing intermediate results
+    def job_splitter(self, current_pretraining_layer, pretraining_time, last_error):
+
+        state_copy = None
+        original_classifier = None
+
+        if self.job_tree and current_pretraining_layer in self.num_hidden_layers_to_try:
+            for lr in self.finetuning_lr_to_try:
+                sys.stdout.flush()
+                sys.stderr.flush()
+
+                state_copy = copy.copy(self.hp)
+
+                self.hp.update({'num_hidden_layers':current_pretraining_layer, \
+                            'finetuning_lr':lr,\
+                            'pretraining_time':pretraining_time,\
+                            'last_reconstruction_error':last_error})
+
+                original_classifier = self.classifier
+                print "ORIGINAL CLASSIFIER MEANS",original_classifier.get_params_means()
+                self.classifier = SdA.copy_reusing_lower_layers(original_classifier, current_pretraining_layer, new_finetuning_lr=lr)
+                
+                self.finetune()
+            
+                self.insert_finished_job()
+
+                print "NEW CLASSIFIER MEANS AFTERWARDS",self.classifier.get_params_means()
+                print "ORIGINAL CLASSIFIER MEANS AFTERWARDS",original_classifier.get_params_means()
+                self.classifier = original_classifier
+                self.hp = state_copy
+
+    def insert_finished_job(self):
+        job = copy.copy(self.hp)
+        job[jobman.sql.STATUS] = jobman.sql.DONE
+        job[jobman.sql.EXPERIMENT] = self.experiment
+
+        # don,t try to store arrays in db
+        job['hidden_layers_sizes'] = job.hidden_layers_sizes[0]
+        job['corruption_levels'] = job.corruption_levels[0]
+
+        print "Will insert finished job", job
+        jobman.sql.insert_dict(jobman.flatten(job), self.results_db)
+
+    def finetune(self):
+        print "STARTING FINETUNING"
+
+        index   = T.lscalar()    # index to a [mini]batch 
+        minibatch_size = self.hp.minibatch_size
+
+        # create a function to compute the mistakes that are made by the model
+        # on the validation set, or testing set
+        test_model = theano.function([index], self.classifier.errors,
+                 givens = {
+                   self.classifier.x: self.test_set_x[index*minibatch_size:(index+1)*minibatch_size] / self.input_divider,
+                   self.classifier.y: self.test_set_y[index*minibatch_size:(index+1)*minibatch_size]})
+
+        validate_model = theano.function([index], self.classifier.errors,
+                givens = {
+                   self.classifier.x: self.valid_set_x[index*minibatch_size:(index+1)*minibatch_size] / self.input_divider,
+                   self.classifier.y: self.valid_set_y[index*minibatch_size:(index+1)*minibatch_size]})
+
+
+        # early-stopping parameters
+        patience              = 10000 # look as this many examples regardless
+        patience_increase     = 2.    # wait this much longer when a new best is 
+                                      # found
+        improvement_threshold = 0.995 # a relative improvement of this much is 
+                                      # considered significant
+        validation_frequency  = min(self.n_train_batches, patience/2)
+                                      # go through this many 
+                                      # minibatche before checking the network 
+                                      # on the validation set; in this case we 
+                                      # check every epoch 
+
+        best_params          = None
+        best_validation_loss = float('inf')
+        test_score           = 0.
+        start_time = time.clock()
+
+        done_looping = False
+        epoch = 0
+
+        printout_acc = 0.0
+
+        if not self.hp.has_key('max_finetuning_epochs'):
+            self.hp.max_finetuning_epochs = 1000
+
+        while (epoch < self.hp.max_finetuning_epochs) and (not done_looping):
+            epoch = epoch + 1
+            for minibatch_index in xrange(self.n_train_batches):
+
+                cost_ij = self.classifier.finetune(minibatch_index)
+                iter    = epoch * self.n_train_batches + minibatch_index
+
+                printout_acc += cost_ij / float(self.printout_frequency * minibatch_size)
+                if (iter+1) % self.printout_frequency == 0:
+                    print iter, "cost avg=", printout_acc
+                    printout_acc = 0.0
+
+                if (iter+1) % validation_frequency == 0: 
+                    
+                    validation_losses = [validate_model(i) for i in xrange(self.n_valid_batches)]
+                    this_validation_loss = numpy.mean(validation_losses)
+                    print('epoch %i, minibatch %i/%i, validation error %f %%' % \
+                           (epoch, minibatch_index+1, self.n_train_batches, \
+                            this_validation_loss*100.))
+
+
+                    # if we got the best validation score until now
+                    if this_validation_loss < best_validation_loss:
+
+                        #improve patience if loss improvement is good enough
+                        if this_validation_loss < best_validation_loss *  \
+                               improvement_threshold :
+                            patience = max(patience, iter * patience_increase)
+
+                        # save best validation score and iteration number
+                        best_validation_loss = this_validation_loss
+                        best_iter = iter
+
+                        # test it on the test set
+                        test_losses = [test_model(i) for i in xrange(self.n_test_batches)]
+                        test_score = numpy.mean(test_losses)
+                        print(('     epoch %i, minibatch %i/%i, test error of best '
+                              'model %f %%') % 
+                                     (epoch, minibatch_index+1, self.n_train_batches,
+                                      test_score*100.))
+
+
+            if patience <= iter :
+                done_looping = True
+                break
+
+        end_time = time.clock()
+        self.hp.update({'finetuning_time':end_time-start_time,\
+                    'best_validation_error':best_validation_loss,\
+                    'test_score':test_score,
+                    'num_finetuning_epochs':epoch})
+        print(('Optimization complete with best validation score of %f %%,'
+               'with test performance %f %%') %  
+                     (best_validation_loss * 100., test_score*100.))
+        print ('The finetuning ran for %f minutes' % ((end_time-start_time)/60.))
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/stacked_dae/stacked_convolutional_dae.py	Tue Feb 23 18:16:55 2010 -0500
@@ -0,0 +1,415 @@
+import numpy
+import theano
+import time
+import theano.tensor as T
+from theano.tensor.shared_randomstreams import RandomStreams
+import theano.sandbox.softsign
+
+from theano.tensor.signal import downsample
+from theano.tensor.nnet import conv 
+import gzip
+import cPickle
+ 
+ 
+class LogisticRegression(object):
+ 
+    def __init__(self, input, n_in, n_out):
+ 
+        self.W = theano.shared( value=numpy.zeros((n_in,n_out),
+                                            dtype = theano.config.floatX) )
+
+        self.b = theano.shared( value=numpy.zeros((n_out,),
+                                            dtype = theano.config.floatX) )
+
+        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b)
+        
+
+        self.y_pred=T.argmax(self.p_y_given_x, axis=1)
+ 
+        self.params = [self.W, self.b]
+ 
+    def negative_log_likelihood(self, y):
+        return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y])
+ 
+    def MSE(self, y):
+        return -T.mean(abs((self.p_y_given_x)[T.arange(y.shape[0]),y]-y)**2)
+
+    def errors(self, y):
+        if y.ndim != self.y_pred.ndim:
+            raise TypeError('y should have the same shape as self.y_pred',
+                ('y', target.type, 'y_pred', self.y_pred.type))
+ 
+
+        if y.dtype.startswith('int'):
+            return T.mean(T.neq(self.y_pred, y))
+        else:
+            raise NotImplementedError()
+ 
+ 
+class SigmoidalLayer(object):
+    def __init__(self, rng, input, n_in, n_out):
+
+        self.input = input
+ 
+        W_values = numpy.asarray( rng.uniform( \
+              low = -numpy.sqrt(6./(n_in+n_out)), \
+              high = numpy.sqrt(6./(n_in+n_out)), \
+              size = (n_in, n_out)), dtype = theano.config.floatX)
+        self.W = theano.shared(value = W_values)
+ 
+        b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
+        self.b = theano.shared(value= b_values)
+ 
+        self.output = T.tanh(T.dot(input, self.W) + self.b)
+        self.params = [self.W, self.b]
+ 
+class dA_conv(object):
+ 
+  def __init__(self, corruption_level = 0.1, input = None, shared_W = None,\
+                   shared_b = None, filter_shape = None, image_shape = None, poolsize = (2,2)):
+
+    theano_rng = RandomStreams()
+    
+    fan_in = numpy.prod(filter_shape[1:])
+    fan_out = filter_shape[0] * numpy.prod(filter_shape[2:])
+
+    center = theano.shared(value = 1, name="center")
+    scale = theano.shared(value = 2, name="scale")
+
+    if shared_W != None and shared_b != None :
+        self.W = shared_W
+        self.b = shared_b
+    else:
+        initial_W = numpy.asarray( numpy.random.uniform( \
+              low = -numpy.sqrt(6./(fan_in+fan_out)), \
+              high = numpy.sqrt(6./(fan_in+fan_out)), \
+              size = filter_shape), dtype = theano.config.floatX)
+        initial_b = numpy.zeros((filter_shape[0],), dtype= theano.config.floatX)
+    
+    
+        self.W = theano.shared(value = initial_W, name = "W")
+        self.b = theano.shared(value = initial_b, name = "b")
+    
+ 
+    initial_b_prime= numpy.zeros((filter_shape[1],))
+        
+    self.W_prime=T.dtensor4('W_prime')
+
+    self.b_prime = theano.shared(value = initial_b_prime, name = "b_prime")
+ 
+    self.x = input
+
+    self.tilde_x = theano_rng.binomial( self.x.shape, 1, 1 - corruption_level) * self.x
+
+    conv1_out = conv.conv2d(self.tilde_x, self.W, \
+                             filter_shape=filter_shape, \
+                                image_shape=image_shape, border_mode='valid')
+
+    
+    self.y = T.tanh(conv1_out + self.b.dimshuffle('x', 0, 'x', 'x'))
+
+    
+    da_filter_shape = [ filter_shape[1], filter_shape[0], filter_shape[2],\
+                       filter_shape[3] ]
+    da_image_shape = [ image_shape[0],filter_shape[0],image_shape[2]-filter_shape[2]+1, \
+                         image_shape[3]-filter_shape[3]+1 ]
+    initial_W_prime =  numpy.asarray( numpy.random.uniform( \
+              low = -numpy.sqrt(6./(fan_in+fan_out)), \
+              high = numpy.sqrt(6./(fan_in+fan_out)), \
+              size = da_filter_shape), dtype = theano.config.floatX)
+    self.W_prime = theano.shared(value = initial_W_prime, name = "W_prime")
+
+    #import pdb;pdb.set_trace()
+
+    conv2_out = conv.conv2d(self.y, self.W_prime, \
+                               filter_shape = da_filter_shape, image_shape = da_image_shape ,\
+                                border_mode='full')
+
+    self.z =  (T.tanh(conv2_out + self.b_prime.dimshuffle('x', 0, 'x', 'x'))+center) / scale
+
+    scaled_x = (self.x + center) / scale
+
+    self.L = - T.sum( scaled_x*T.log(self.z) + (1-scaled_x)*T.log(1-self.z), axis=1 )
+
+    self.cost = T.mean(self.L)
+
+    self.params = [ self.W, self.b, self.b_prime ] 
+ 
+ 
+
+class LeNetConvPoolLayer(object):
+    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2,2)):
+        assert image_shape[1]==filter_shape[1]
+        self.input = input
+  
+        W_values = numpy.zeros(filter_shape, dtype=theano.config.floatX)
+        self.W = theano.shared(value = W_values)
+ 
+        b_values = numpy.zeros((filter_shape[0],), dtype= theano.config.floatX)
+        self.b = theano.shared(value= b_values)
+ 
+        conv_out = conv.conv2d(input, self.W,
+                filter_shape=filter_shape, image_shape=image_shape)
+ 
+
+        fan_in = numpy.prod(filter_shape[1:])
+        fan_out = filter_shape[0] * numpy.prod(filter_shape[2:]) / numpy.prod(poolsize)
+
+        W_bound = numpy.sqrt(6./(fan_in + fan_out))
+        self.W.value = numpy.asarray(
+                rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
+                dtype = theano.config.floatX)
+  
+
+        pooled_out = downsample.max_pool2D(conv_out, poolsize, ignore_border=True)
+ 
+        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
+        self.params = [self.W, self.b]
+ 
+
+class SdA():
+    def __init__(self, input, n_ins_conv, n_ins_mlp, train_set_x, train_set_y, batch_size, \
+                     conv_hidden_layers_sizes, mlp_hidden_layers_sizes, corruption_levels, \
+                     rng, n_out, pretrain_lr, finetune_lr):
+
+        self.layers = []
+        self.pretrain_functions = []
+        self.params = []
+        self.conv_n_layers = len(conv_hidden_layers_sizes)
+        self.mlp_n_layers = len(mlp_hidden_layers_sizes)
+         
+        index = T.lscalar() # index to a [mini]batch
+        self.x = T.dmatrix('x') # the data is presented as rasterized images
+        self.y = T.ivector('y') # the labels are presented as 1D vector of
+        
+ 
+        
+        for i in xrange( self.conv_n_layers ):
+
+            filter_shape=conv_hidden_layers_sizes[i][0]
+            image_shape=conv_hidden_layers_sizes[i][1]
+            max_poolsize=conv_hidden_layers_sizes[i][2]
+                
+            if i == 0 :
+                layer_input=self.x.reshape((batch_size,1,28,28))
+            else:
+                layer_input=self.layers[-1].output
+
+            layer = LeNetConvPoolLayer(rng, input=layer_input, \
+                                image_shape=image_shape, \
+                                filter_shape=filter_shape,poolsize=max_poolsize)
+            print 'Convolutional layer '+str(i+1)+' created'
+                
+            self.layers += [layer]
+            self.params += layer.params
+                
+            da_layer = dA_conv(corruption_level = corruption_levels[0],\
+                                  input = layer_input, \
+                                  shared_W = layer.W, shared_b = layer.b,\
+                                  filter_shape = filter_shape , image_shape = image_shape )
+                
+                
+            gparams = T.grad(da_layer.cost, da_layer.params)
+                
+            updates = {}
+            for param, gparam in zip(da_layer.params, gparams):
+                    updates[param] = param - gparam * pretrain_lr
+                    
+                
+            update_fn = theano.function([index], da_layer.cost, \
+                                        updates = updates,
+                                        givens = {
+                    self.x : train_set_x[index*batch_size:(index+1)*batch_size]} )
+             
+            self.pretrain_functions += [update_fn]
+
+        for i in xrange( self.mlp_n_layers ): 
+            if i == 0 :
+                input_size = n_ins_mlp
+            else:
+                input_size = mlp_hidden_layers_sizes[i-1]
+
+            if i == 0 :
+                if len( self.layers ) == 0 :
+                    layer_input=self.x
+                else :
+                    layer_input = self.layers[-1].output.flatten(2)
+            else:
+                layer_input = self.layers[-1].output
+     
+            layer = SigmoidalLayer(rng, layer_input, input_size,
+                                        mlp_hidden_layers_sizes[i] )
+              
+            self.layers += [layer]
+            self.params += layer.params
+            
+
+            print 'MLP layer '+str(i+1)+' created'
+            
+        self.logLayer = LogisticRegression(input=self.layers[-1].output, \
+                                                     n_in=mlp_hidden_layers_sizes[-1], n_out=n_out)
+        self.params += self.logLayer.params
+
+        cost = self.logLayer.negative_log_likelihood(self.y)
+
+        gparams = T.grad(cost, self.params)
+        updates = {}
+
+        for param,gparam in zip(self.params, gparams):
+            updates[param] = param - gparam*finetune_lr
+            
+        self.finetune = theano.function([index], cost,
+                updates = updates,
+                givens = {
+                  self.x : train_set_x[index*batch_size:(index+1)*batch_size],
+                  self.y : train_set_y[index*batch_size:(index+1)*batch_size]} )
+ 
+
+        self.errors = self.logLayer.errors(self.y)
+ 
+ 
+ 
+def sgd_optimization_mnist( learning_rate=0.1, pretraining_epochs = 2, \
+                            pretrain_lr = 0.01, training_epochs = 1000, \
+                            dataset='mnist.pkl.gz'):
+
+    f = gzip.open(dataset,'rb')
+    train_set, valid_set, test_set = cPickle.load(f)
+    f.close()
+ 
+ 
+    def shared_dataset(data_xy):
+        data_x, data_y = data_xy
+        shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX))
+        shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX))
+        return shared_x, T.cast(shared_y, 'int32')
+ 
+
+    test_set_x, test_set_y = shared_dataset(test_set)
+    valid_set_x, valid_set_y = shared_dataset(valid_set)
+    train_set_x, train_set_y = shared_dataset(train_set)
+ 
+    batch_size = 500 # size of the minibatch
+ 
+
+    n_train_batches = train_set_x.value.shape[0] / batch_size
+    n_valid_batches = valid_set_x.value.shape[0] / batch_size
+    n_test_batches = test_set_x.value.shape[0] / batch_size
+ 
+    # allocate symbolic variables for the data
+    index = T.lscalar() # index to a [mini]batch
+    x = T.matrix('x') # the data is presented as rasterized images
+    y = T.ivector('y') # the labels are presented as 1d vector of
+                           # [int] labels
+    layer0_input = x.reshape((batch_size,1,28,28))
+    
+
+    # Setup the convolutional layers with their DAs(add as many as you want)
+    corruption_levels = [ 0.2, 0.2, 0.2]
+    rng = numpy.random.RandomState(1234)
+    ker1=2
+    ker2=2
+    conv_layers=[]
+    conv_layers.append([[ker1,1,5,5], [batch_size,1,28,28], [2,2] ])
+    conv_layers.append([[ker2,ker1,5,5], [batch_size,ker1,12,12], [2,2] ])
+
+    # Setup the MLP layers of the network
+    mlp_layers=[500]
+  
+    network = SdA(input = layer0_input, n_ins_conv = 28*28, n_ins_mlp = ker2*4*4, \
+                      train_set_x = train_set_x, train_set_y = train_set_y, batch_size = batch_size,
+                      conv_hidden_layers_sizes = conv_layers,  \
+                      mlp_hidden_layers_sizes = mlp_layers, \
+                      corruption_levels = corruption_levels , n_out = 10, \
+                      rng = rng , pretrain_lr = pretrain_lr , finetune_lr = learning_rate )
+
+    test_model = theano.function([index], network.errors,
+             givens = {
+                network.x: test_set_x[index*batch_size:(index+1)*batch_size],
+                network.y: test_set_y[index*batch_size:(index+1)*batch_size]})
+ 
+    validate_model = theano.function([index], network.errors,
+           givens = {
+                network.x: valid_set_x[index*batch_size:(index+1)*batch_size],
+                network.y: valid_set_y[index*batch_size:(index+1)*batch_size]})
+
+
+
+    start_time = time.clock()
+    for i in xrange(len(network.layers)-len(mlp_layers)):
+        for epoch in xrange(pretraining_epochs):
+            for batch_index in xrange(n_train_batches):
+                c = network.pretrain_functions[i](batch_index)
+            print 'pre-training convolution layer %i, epoch %d, cost '%(i,epoch),c
+
+    patience = 10000 # look as this many examples regardless
+    patience_increase = 2. # WAIT THIS MUCH LONGER WHEN A NEW BEST IS
+                                  # FOUND
+    improvement_threshold = 0.995 # a relative improvement of this much is
+
+    validation_frequency = min(n_train_batches, patience/2)
+ 
+ 
+    best_params = None
+    best_validation_loss = float('inf')
+    test_score = 0.
+    start_time = time.clock()
+ 
+    done_looping = False
+    epoch = 0
+ 
+    while (epoch < training_epochs) and (not done_looping):
+      epoch = epoch + 1
+      for minibatch_index in xrange(n_train_batches):
+ 
+        cost_ij = network.finetune(minibatch_index)
+        iter = epoch * n_train_batches + minibatch_index
+ 
+        if (iter+1) % validation_frequency == 0:
+            
+            validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]
+            this_validation_loss = numpy.mean(validation_losses)
+            print('epoch %i, minibatch %i/%i, validation error %f %%' % \
+                   (epoch, minibatch_index+1, n_train_batches, \
+                    this_validation_loss*100.))
+ 
+ 
+            # if we got the best validation score until now
+            if this_validation_loss < best_validation_loss:
+ 
+                #improve patience if loss improvement is good enough
+                if this_validation_loss < best_validation_loss * \
+                       improvement_threshold :
+                    patience = max(patience, iter * patience_increase)
+ 
+                # save best validation score and iteration number
+                best_validation_loss = this_validation_loss
+                best_iter = iter
+ 
+                # test it on the test set
+                test_losses = [test_model(i) for i in xrange(n_test_batches)]
+                test_score = numpy.mean(test_losses)
+                print((' epoch %i, minibatch %i/%i, test error of best '
+                      'model %f %%') %
+                             (epoch, minibatch_index+1, n_train_batches,
+                              test_score*100.))
+ 
+ 
+        if patience <= iter :
+                done_looping = True
+                break
+ 
+    end_time = time.clock()
+    print(('Optimization complete with best validation score of %f %%,'
+           'with test performance %f %%') %
+                 (best_validation_loss * 100., test_score*100.))
+    print ('The code ran for %f minutes' % ((end_time-start_time)/60.))
+ 
+ 
+ 
+ 
+ 
+ 
+if __name__ == '__main__':
+    sgd_optimization_mnist()
+ 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/stacked_dae/stacked_dae.py	Tue Feb 23 18:16:55 2010 -0500
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+# coding: utf-8
+
+import numpy 
+import theano
+import time
+import theano.tensor as T
+from theano.tensor.shared_randomstreams import RandomStreams
+import copy
+
+from utils import update_locals
+
+class LogisticRegression(object):
+    def __init__(self, input, n_in, n_out):
+        # initialize with 0 the weights W as a matrix of shape (n_in, n_out) 
+        self.W = theano.shared( value=numpy.zeros((n_in,n_out),
+                                            dtype = theano.config.floatX) )
+        # initialize the baises b as a vector of n_out 0s
+        self.b = theano.shared( value=numpy.zeros((n_out,), 
+                                            dtype = theano.config.floatX) )
+        # compute vector of class-membership probabilities in symbolic form
+        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b)
+        
+        # compute prediction as class whose probability is maximal in 
+        # symbolic form
+        self.y_pred=T.argmax(self.p_y_given_x, axis=1)
+
+        # list of parameters for this layer
+        self.params = [self.W, self.b]
+
+    def negative_log_likelihood(self, y):
+       return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y])
+
+    def errors(self, y):
+        # check if y has same dimension of y_pred 
+        if y.ndim != self.y_pred.ndim:
+            raise TypeError('y should have the same shape as self.y_pred', 
+                ('y', target.type, 'y_pred', self.y_pred.type))
+
+        # check if y is of the correct datatype        
+        if y.dtype.startswith('int'):
+            # the T.neq operator returns a vector of 0s and 1s, where 1
+            # represents a mistake in prediction
+            return T.mean(T.neq(self.y_pred, y))
+        else:
+            raise NotImplementedError()
+
+
+class SigmoidalLayer(object):
+    def __init__(self, rng, input, n_in, n_out):
+        self.input = input
+
+        W_values = numpy.asarray( rng.uniform( \
+              low = -numpy.sqrt(6./(n_in+n_out)), \
+              high = numpy.sqrt(6./(n_in+n_out)), \
+              size = (n_in, n_out)), dtype = theano.config.floatX)
+        self.W = theano.shared(value = W_values)
+
+        b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
+        self.b = theano.shared(value= b_values)
+
+        self.output = T.nnet.sigmoid(T.dot(input, self.W) + self.b)
+        self.params = [self.W, self.b]
+
+
+
+class dA(object):
+  def __init__(self, n_visible= 784, n_hidden= 500, corruption_level = 0.1,\
+               input = None, shared_W = None, shared_b = None):
+    self.n_visible = n_visible
+    self.n_hidden  = n_hidden
+    
+    # create a Theano random generator that gives symbolic random values
+    theano_rng = RandomStreams()
+    
+    if shared_W != None and shared_b != None : 
+        self.W = shared_W
+        self.b = shared_b
+    else:
+        # initial values for weights and biases
+        # note : W' was written as `W_prime` and b' as `b_prime`
+
+        # W is initialized with `initial_W` which is uniformely sampled
+        # from -6./sqrt(n_visible+n_hidden) and 6./sqrt(n_hidden+n_visible)
+        # the output of uniform if converted using asarray to dtype 
+        # theano.config.floatX so that the code is runable on GPU
+        initial_W = numpy.asarray( numpy.random.uniform( \
+              low = -numpy.sqrt(6./(n_hidden+n_visible)), \
+              high = numpy.sqrt(6./(n_hidden+n_visible)), \
+              size = (n_visible, n_hidden)), dtype = theano.config.floatX)
+        initial_b       = numpy.zeros(n_hidden, dtype = theano.config.floatX)
+    
+    
+        # theano shared variables for weights and biases
+        self.W       = theano.shared(value = initial_W,       name = "W")
+        self.b       = theano.shared(value = initial_b,       name = "b")
+    
+ 
+    initial_b_prime= numpy.zeros(n_visible)
+    # tied weights, therefore W_prime is W transpose
+    self.W_prime = self.W.T 
+    self.b_prime = theano.shared(value = initial_b_prime, name = "b'")
+
+    # if no input is given, generate a variable representing the input
+    if input == None : 
+        # we use a matrix because we expect a minibatch of several examples,
+        # each example being a row
+        self.x = T.dmatrix(name = 'input') 
+    else:
+        self.x = input
+    # Equation (1)
+    # keep 90% of the inputs the same and zero-out randomly selected subset of 10% of the inputs
+    # note : first argument of theano.rng.binomial is the shape(size) of 
+    #        random numbers that it should produce
+    #        second argument is the number of trials 
+    #        third argument is the probability of success of any trial
+    #
+    #        this will produce an array of 0s and 1s where 1 has a 
+    #        probability of 1 - ``corruption_level`` and 0 with
+    #        ``corruption_level``
+    self.tilde_x  = theano_rng.binomial( self.x.shape,  1,  1 - corruption_level) * self.x
+    # Equation (2)
+    # note  : y is stored as an attribute of the class so that it can be 
+    #         used later when stacking dAs. 
+    self.y   = T.nnet.sigmoid(T.dot(self.tilde_x, self.W      ) + self.b)
+    # Equation (3)
+    self.z   = T.nnet.sigmoid(T.dot(self.y, self.W_prime) + self.b_prime)
+    # Equation (4)
+    # note : we sum over the size of a datapoint; if we are using minibatches,
+    #        L will  be a vector, with one entry per example in minibatch
+    self.L = - T.sum( self.x*T.log(self.z) + (1-self.x)*T.log(1-self.z), axis=1 ) 
+    # note : L is now a vector, where each element is the cross-entropy cost 
+    #        of the reconstruction of the corresponding example of the 
+    #        minibatch. We need to compute the average of all these to get 
+    #        the cost of the minibatch
+    self.cost = T.mean(self.L)
+
+    self.params = [ self.W, self.b, self.b_prime ]
+
+
+
+
+class SdA(object):
+    def __init__(self, train_set_x, train_set_y, batch_size, n_ins, 
+                 hidden_layers_sizes, n_outs, 
+                 corruption_levels, rng, pretrain_lr, finetune_lr, input_divider=1.0):
+        update_locals(self, locals())      
+ 
+        self.layers             = []
+        self.pretrain_functions = []
+        self.params             = []
+        self.n_layers           = len(hidden_layers_sizes)
+
+        self.input_divider = numpy.asarray(input_divider, dtype=theano.config.floatX)
+
+        if len(hidden_layers_sizes) < 1 :
+            raiseException (' You must have at least one hidden layer ')
+
+
+        # allocate symbolic variables for the data
+        index   = T.lscalar()    # index to a [mini]batch 
+        self.x  = T.matrix('x')  # the data is presented as rasterized images
+        self.y  = T.ivector('y') # the labels are presented as 1D vector of 
+                                 # [int] labels
+
+        for i in xrange( self.n_layers ):
+            # construct the sigmoidal layer
+
+            # the size of the input is either the number of hidden units of 
+            # the layer below or the input size if we are on the first layer
+            if i == 0 :
+                input_size = n_ins
+            else:
+                input_size = hidden_layers_sizes[i-1]
+
+            # the input to this layer is either the activation of the hidden
+            # layer below or the input of the SdA if you are on the first
+            # layer
+            if i == 0 : 
+                layer_input = self.x
+            else:
+                layer_input = self.layers[-1].output
+
+            layer = SigmoidalLayer(rng, layer_input, input_size, 
+                                   hidden_layers_sizes[i] )
+            # add the layer to the 
+            self.layers += [layer]
+            self.params += layer.params
+        
+            # Construct a denoising autoencoder that shared weights with this
+            # layer
+            dA_layer = dA(input_size, hidden_layers_sizes[i], \
+                          corruption_level = corruption_levels[0],\
+                          input = layer_input, \
+                          shared_W = layer.W, shared_b = layer.b)
+        
+            # Construct a function that trains this dA
+            # compute gradients of layer parameters
+            gparams = T.grad(dA_layer.cost, dA_layer.params)
+            # compute the list of updates
+            updates = {}
+            for param, gparam in zip(dA_layer.params, gparams):
+                updates[param] = param - gparam * pretrain_lr
+            
+            # create a function that trains the dA
+            update_fn = theano.function([index], dA_layer.cost, \
+                  updates = updates,
+                  givens = { 
+                     self.x : train_set_x[index*batch_size:(index+1)*batch_size] / self.input_divider})
+            # collect this function into a list
+            self.pretrain_functions += [update_fn]
+
+        
+        # We now need to add a logistic layer on top of the MLP
+        self.logLayer = LogisticRegression(\
+                         input = self.layers[-1].output,\
+                         n_in = hidden_layers_sizes[-1], n_out = n_outs)
+
+        self.params += self.logLayer.params
+        # construct a function that implements one step of finetunining
+
+        # compute the cost, defined as the negative log likelihood 
+        cost = self.logLayer.negative_log_likelihood(self.y)
+        # compute the gradients with respect to the model parameters
+        gparams = T.grad(cost, self.params)
+        # compute list of updates
+        updates = {}
+        for param,gparam in zip(self.params, gparams):
+            updates[param] = param - gparam*finetune_lr
+            
+        self.finetune = theano.function([index], cost, 
+                updates = updates,
+                givens = {
+                  self.x : train_set_x[index*batch_size:(index+1)*batch_size]/self.input_divider,
+                  self.y : train_set_y[index*batch_size:(index+1)*batch_size]} )
+
+        # symbolic variable that points to the number of errors made on the
+        # minibatch given by self.x and self.y
+
+        self.errors = self.logLayer.errors(self.y)
+
+    @classmethod
+    def copy_reusing_lower_layers(cls, obj, num_hidden_layers, new_finetuning_lr=None):
+        assert(num_hidden_layers <= obj.n_layers)
+
+        if not new_finetuning_lr:
+            new_finetuning_lr = obj.finetune_lr
+
+        new_sda = cls(train_set_x= obj.train_set_x, \
+                      train_set_y = obj.train_set_y,\
+                      batch_size = obj.batch_size, \
+                      n_ins= obj.n_ins, \
+                      hidden_layers_sizes = obj.hidden_layers_sizes[:num_hidden_layers], \
+                      n_outs = obj.n_outs, \
+                      corruption_levels = obj.corruption_levels[:num_hidden_layers],\
+                      rng = obj.rng,\
+                      pretrain_lr = obj.pretrain_lr, \
+                      finetune_lr = new_finetuning_lr, \
+                      input_divider = obj.input_divider )
+
+        # new_sda.layers contains only the hidden layers actually
+        for i, layer in enumerate(new_sda.layers):
+            original_layer = obj.layers[i]
+            for p1,p2 in zip(layer.params, original_layer.params):
+                p1.value = p2.value.copy()
+
+        return new_sda
+
+    def get_params_copy(self):
+        return copy.deepcopy(self.params)
+
+    def set_params_from_copy(self, copy):
+        # We don't want to replace the var, as the functions have pointers in there
+        # We only want to replace values.
+        for i, p in enumerate(self.params):
+            p.value = copy[i].value
+
+    def get_params_means(self):
+        s = []
+        for p in self.params:
+            s.append(numpy.mean(p.value))
+        return s
+
+if __name__ == '__main__':
+    import sys
+    args = sys.argv[1:]
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/stacked_dae/utils.py	Tue Feb 23 18:16:55 2010 -0500
@@ -0,0 +1,57 @@
+#!/usr/bin/python
+
+from jobman import DD
+
+# from pylearn codebase
+def update_locals(obj, dct):
+    if 'self' in dct:
+        del dct['self']
+    obj.__dict__.update(dct)
+
+def produit_croise_jobs(val_dict):
+    job_list = [DD()]
+    all_keys = val_dict.keys()
+
+    for key in all_keys:
+        possible_values = val_dict[key]
+        new_job_list = []
+        for val in possible_values:
+            for job in job_list:
+                to_insert = job.copy()
+                to_insert.update({key: val})
+                new_job_list.append(to_insert)
+        job_list = new_job_list
+
+    return job_list
+
+def test_produit_croise_jobs():
+    vals = {'a': [1,2], 'b': [3,4,5]}
+    print produit_croise_jobs(vals)
+
+
+# taken from http://stackoverflow.com/questions/276052/how-to-get-current-cpu-and-ram-usage-in-python
+"""Simple module for getting amount of memory used by a specified user's
+processes on a UNIX system.
+It uses UNIX ps utility to get the memory usage for a specified username and
+pipe it to awk for summing up per application memory usage and return the total.
+Python's Popen() from subprocess module is used for spawning ps and awk.
+
+"""
+
+import subprocess
+
+class MemoryMonitor(object):
+
+    def __init__(self, username):
+        """Create new MemoryMonitor instance."""
+        self.username = username
+
+    def usage(self):
+        """Return int containing memory used by user's processes."""
+        self.process = subprocess.Popen("ps -u %s -o rss | awk '{sum+=$1} END {print sum}'" % self.username,
+                                        shell=True,
+                                        stdout=subprocess.PIPE,
+                                        )
+        self.stdout_list = self.process.communicate()[0].split('\n')
+        return int(self.stdout_list[0])
+
--- a/transformations/BruitGauss.py	Tue Feb 23 18:08:11 2010 -0500
+++ b/transformations/BruitGauss.py	Tue Feb 23 18:16:55 2010 -0500
@@ -35,39 +35,39 @@
         self.regenerate_parameters(complexity)
         
     def get_settings_names(self):
-        return ['nb_chng','sigma_gauss','grandeur','effectuer']
+        return ['nb_chng','sigma_gauss','grandeur']
 
     def regenerate_parameters(self, complexity):
-        self.nb_chng=3+int(numpy.random.rand()*self.nb_chngmax*complexity)
+        self.effectuer =numpy.random.binomial(1,0.25)    ##### On a 25% de faire un bruit #####
+
         
-        if float(complexity) > 0:
+        if self.effectuer and complexity > 0:
+            self.nb_chng=3+int(numpy.random.rand()*self.nb_chngmax*complexity)
             self.sigma_gauss=2.0 + numpy.random.rand()*self.sigmamax*complexity
             self.grandeur=12+int(numpy.random.rand()*self.grandeurmax*complexity)
-            self.effectuer =numpy.random.binomial(1,0.25)    ##### On a 25% de faire un bruit #####
+                        #creation du noyau gaussien
+            self.gauss=numpy.zeros((self.grandeur,self.grandeur))
+            x0 = y0 = self.grandeur/2.0
+            for i in xrange(self.grandeur):
+                for j in xrange(self.grandeur):
+                    self.gauss[i,j]=numpy.exp(-((i-x0)**2 + (j-y0)**2) / self.sigma_gauss**2)
+            #creation de la fenetre de moyennage
+            self.moy=numpy.zeros((self.grandeur,self.grandeur))
+            x0 = y0 = self.grandeur/2
+            for i in xrange(0,self.grandeur):
+                for j in xrange(0,self.grandeur):
+                    self.moy[i,j]=((numpy.sqrt(2*(self.grandeur/2.0)**2) -\
+                                 numpy.sqrt(numpy.abs(i-self.grandeur/2.0)**2+numpy.abs(j-self.grandeur/2.0)**2))/numpy.sqrt((self.grandeur/2.0)**2))**5
         else:
-            self.effectuer = 0
             self.sigma_gauss = 1 # eviter division par 0
             self.grandeur=1
-        #Un peu de paranoia ici, mais on ne sait jamais
+            self.nb_chng = 0
+            self.effectuer = 0
         
-        #creation du noyau gaussien
-        self.gauss=numpy.zeros((self.grandeur,self.grandeur))
-        x0 = y0 = self.grandeur/2.0
-        for i in xrange(self.grandeur):
-            for j in xrange(self.grandeur):
-                self.gauss[i,j]=numpy.exp(-((i-x0)**2 + (j-y0)**2) / self.sigma_gauss**2)
-        #creation de la fenetre de moyennage
-        self.moy=numpy.zeros((self.grandeur,self.grandeur))
-        x0 = y0 = self.grandeur/2
-        for i in xrange(0,self.grandeur):
-            for j in xrange(0,self.grandeur):
-                self.moy[i,j]=((numpy.sqrt(2*(self.grandeur/2.0)**2) - numpy.sqrt(numpy.abs(i-self.grandeur/2.0)**2+numpy.abs(j-self.grandeur/2.0)**2))/\
-                                numpy.sqrt((self.grandeur/2.0)**2))**5
-
         return self._get_current_parameters()
 
     def _get_current_parameters(self):
-        return [self.nb_chng,self.sigma_gauss,self.grandeur,self.effectuer]
+        return [self.nb_chng,self.sigma_gauss,self.grandeur]
 
     
     def transform_image(self, image):
--- a/transformations/Occlusion.py	Tue Feb 23 18:08:11 2010 -0500
+++ b/transformations/Occlusion.py	Tue Feb 23 18:16:55 2010 -0500
@@ -11,7 +11,7 @@
 Le fichier /data/lisa/data/ift6266h10/echantillon_occlusion.ft 
 (sur le reseau DIRO) est necessaire.
 
-Il y a 20% de chance d'avoir une occlusion quelconque.
+Il y a 30% de chance d'avoir une occlusion quelconque.
 
 Sylvain Pannetier Lebeuf dans le cadre de IFT6266, hiver 2010
 
@@ -61,10 +61,10 @@
         return ['haut','bas','gauche','droite','x_arrivee','y_arrivee','endroit','rajout','appliquer']
 
     def regenerate_parameters(self, complexity):
-        self.haut=min(15,int(numpy.abs(numpy.random.normal(int(7*complexity),2))))
-        self.bas=min(15,int(numpy.abs(numpy.random.normal(int(7*complexity),2))))
-        self.gauche=min(15,int(numpy.abs(numpy.random.normal(int(7*complexity),2))))
-        self.droite=min(15,int(numpy.abs(numpy.random.normal(int(7*complexity),2))))
+        self.haut=min(15,int(numpy.abs(numpy.random.normal(int(8*complexity),2))))
+        self.bas=min(15,int(numpy.abs(numpy.random.normal(int(8*complexity),2))))
+        self.gauche=min(15,int(numpy.abs(numpy.random.normal(int(8*complexity),2))))
+        self.droite=min(15,int(numpy.abs(numpy.random.normal(int(8*complexity),2))))
         if self.haut+self.bas+self.gauche+self.droite==0:   #Tres improbable
             self.haut=1
             self.bas=1
@@ -75,8 +75,8 @@
         self.x_arrivee=int(numpy.abs(numpy.random.normal(0,2))) #Complexity n'entre pas en jeu, pas besoin
         self.y_arrivee=int(numpy.random.normal(0,3)) 
         
-        self.rajout=numpy.random.randint(0,self.longueur)  #les bouts de quelle lettre
-        self.appliquer=numpy.random.binomial(1,0.2)    #####  20 % du temps, on met une occlusion #####
+        self.rajout=numpy.random.randint(0,self.longueur-1)  #les bouts de quelle lettre
+        self.appliquer=numpy.random.binomial(1,0.4)    #####  40 % du temps, on met une occlusion #####
         
         if complexity == 0: #On ne fait rien dans ce cas
             self.applique=0
@@ -151,4 +151,4 @@
 if __name__ == '__main__':
     import pylab
     import scipy
-    _test(0.5)
\ No newline at end of file
+    _test(0.5)
--- a/transformations/PoivreSel.py	Tue Feb 23 18:08:11 2010 -0500
+++ b/transformations/PoivreSel.py	Tue Feb 23 18:16:55 2010 -0500
@@ -23,7 +23,7 @@
 class PoivreSel():
     
     def __init__(self):
-        self.proportion_bruit=0.1 #Le pourcentage des pixels qui seront bruites
+        self.proportion_bruit=0.08 #Le pourcentage des pixels qui seront bruites
         self.nb_chng=10 #Le nombre de pixels changes. Seulement pour fin de calcul
         self.effectuer=1    #Vaut 1 si on effectue et 0 sinon.
         
--- a/transformations/Rature.py	Tue Feb 23 18:08:11 2010 -0500
+++ b/transformations/Rature.py	Tue Feb 23 18:16:55 2010 -0500
@@ -2,15 +2,12 @@
 # coding: utf-8
 
 '''
-Ajout de rature sur le caractère. La rature peut etre horizontale, verticale 
-(dans ces deux cas, l'amplacement de la bande est aleatoire) ou sur la diagonale
-(et anti-diagonale).
+Ajout d'une rature sur le caractère. La rature est en fait un 1 qui recoit une
+rotation et qui est ensuite appliqué sur le caractère. Un grossissement, puis deux
+erosions sont effectuees sur le 1 afin qu'il ne soit plus reconnaissable.
+Il y a des chances d'avoir plus d'une seule rature !
 
-La largeur de la bande ainsi que sa clarté sont definies a l'aide de complexity
-et d'une composante aleatoire.
-clarte: 0=blanc et 1=noir
-
-Il y a 15% d'effectuer une rature
+Il y a 15% d'effectuer une rature.
 
 Ce fichier prend pour acquis que les images sont donnees une a la fois
 sous forme de numpy.array de 1024 (32 x 32) valeurs entre 0 et 1.
@@ -19,95 +16,203 @@
 
 '''
 
-import numpy
+import numpy, Image, random
+import scipy.ndimage.morphology
+from pylearn.io import filetensor as ft
 
 
 class Rature():
    
     def __init__(self):
-        self.largeur=2  #Largeur de la bande
-        self.deplacement=0  #Deplacement par rapport au milieu
-        self.orientation=0  #0=horizontal, 1=vertical, 2=oblique
-        self.clarte=0.5 #Clarte de la ligne appliquee
-        self.faire=1  #Si ==1, on applique une rature
+        self.angle=0 #Angle en degre de la rotation (entre 0 et 180)
+        self.numero=0 #Le numero du 1 choisi dans la banque de 1
+        self.gauche=-1   #Le numero de la colonne la plus a gauche contenant le 1
+        self.droite=-1
+        self.haut=-1
+        self.bas=-1
+        self.faire=1    #1=on effectue et 0=fait rien
+        
+        self.crop_haut=0
+        self.crop_gauche=0  #Ces deux valeurs sont entre 0 et 31 afin de definir
+                            #l'endroit ou sera pris le crop dans l'image du 1
+                            
+        self.largeur_bande=-1    #La largeur de la bande
+        self.smooth=-1   #La largeur de la matrice carree servant a l'erosion
+        self.nb_ratures=-1   #Le nombre de ratures appliques
+        self.fini=0 #1=fini de mettre toutes les couches 0=pas fini
+        self.complexity=0   #Pour garder en memoire la complexite si plusieurs couches sont necessaires
+        
+        f3 = open('/data/lisa/data/ift6266h10/un_rature.ft')   #Doit etre sur le reseau DIRO.
+        #f3 = open('/home/sylvain/Dropbox/Msc/IFT6266/donnees/un_rature.ft')
+        #Il faut arranger le path sinon
+        w=ft.read(f3)
+        f3.close()
+        self.d=(w.astype('float'))/255
+        
+        self.patch=self.d[0].reshape((32,32)) #La patch de rature qui sera appliquee sur l'image
 
     def get_settings_names(self):
-        return ['orientation','deplacement','clarte','faire']
+        return ['angle','numero','faire','crop_haut','crop_gauche','largeur_bande','smooth','nb_ratures']
 
-    def regenerate_parameters(self, complexity):
-        #Il faut choisir parmis vertical, horizontal et diagonal.
-        #La methode n'est pas exacte, mais un peu plus rapide que generer un int.
-        #Complexity n'a rien a voir avec ce choix
+    def regenerate_parameters(self, complexity,next_rature = False):
         
-        choix=numpy.random.random()
         
-        if choix <0.34:
-            self.orientation=0
-        elif choix <0.67:
-            self.orientation=1
-        else:
-            self.orientation=2
+        self.numero=random.randint(0,4999)  #Ces bornes sont inclusives !
+        self.fini=0
+        self.complexity=complexity
+            
+        if float(complexity) > 0:
+            
+            self.gauche=self.droite=self.haut=self.bas=-1   #Remet tout a -1
+            
+            self.angle=int(numpy.random.normal(90,100*complexity))
+
+            self.faire=numpy.random.binomial(1,0.15)    ##### 15% d'effectuer une rature #####
+            if next_rature:
+                self.faire = 1
+            #self.faire=1 #Pour tester seulement
             
-        if float(complexity) > 0:    
-            self.largeur=min(32,max(1,int(numpy.ceil(complexity*5)*numpy.random.normal(1,float(complexity)/2))))
-            self.clarte=min(1,max(0,complexity*numpy.random.normal(1,float(complexity)/2)))
-            self.faire=numpy.random.binomial(1,0.15)    ##### 15% d'effectuer une rature #####
+            self.crop_haut=random.randint(0,17)
+            self.crop_gauche=random.randint(0,17)
+            if complexity <= 0.25 :
+                self.smooth=6
+            elif complexity <= 0.5:
+                self.smooth=5
+            elif complexity <= 0.75:
+                self.smooth=4
+            else:
+                self.smooth=3
+            
+            p = numpy.random.rand()
+            if p < 0.5:
+                self.nb_ratures= 1
+            else:
+                if p < 0.8:
+                    self.nb_ratures = 2
+                else:
+                    self.nb_ratures = 3
+            
+            #Creation de la "patch" de rature qui sera appliquee sur l'image
+            if self.faire == 1:
+                self.get_size()
+                self.get_image_rot()    #On fait la "patch"
+            
         else:
-            self.largeur=0
-            self.clarte=0
-            self.faire=0    #On ne fait rien !!!
+            self.faire=0    #On ne fait rien si complexity=0 !!
         
         return self._get_current_parameters()
+    
+    
+    def get_image_rot(self):
+        image2=(self.d[self.numero].reshape((32,32))[self.haut:self.bas,self.gauche:self.droite])
+        
+        im = Image.fromarray(numpy.asarray(image2*255,dtype='uint8'))
+        
+        #La rotation et le resize sont de belle qualite afin d'avoir une image nette
+        im2 = im.rotate(self.angle,Image.BICUBIC,expand=False)
+        im3=im2.resize((50,50),Image.ANTIALIAS)
+        
+        grosse=numpy.asarray(numpy.asarray(im3)/255.0,dtype='float32')
+        crop=grosse[self.haut:self.haut+32,self.gauche:self.gauche+32]
+        
+        self.get_patch(crop)
+        
+    def get_patch(self,crop):
+        smooting = numpy.ones((self.smooth,self.smooth))
+        #Il y a deux erosions afin d'avoir un beau resultat. Pas trop large et
+        #pas trop mince
+        trans=scipy.ndimage.morphology.grey_erosion\
+                    (crop,size=smooting.shape,structure=smooting,mode='wrap')
+        trans1=scipy.ndimage.morphology.grey_erosion\
+                    (trans,size=smooting.shape,structure=smooting,mode='wrap')
+        
+               
+        patch_img=Image.fromarray(numpy.asarray(trans1*255,dtype='uint8'))
+        
+        patch_img2=patch_img.crop((4,4,28,28)).resize((32,32))  #Pour contrer les effets de bords !
+        
+        trans2=numpy.asarray(numpy.asarray(patch_img2)/255.0,dtype='float32')
+            
+            
+        #Tout ramener entre 0 et 1
+        trans2=trans2-trans2.min() #On remet tout positif
+        trans2=trans2/trans2.max()
+        
+        #La rayure a plus de chance d'etre en bas ou oblique le haut a 10h
+        if random.random() <= 0.5:  #On renverse la matrice dans ce cas
+            for i in xrange(0,32):
+                self.patch[i,:]=trans2[31-i,:]
+        else:
+            self.patch=trans2
+        
+    
+    
+    
+    def get_size(self):
+        image=self.d[self.numero].reshape((32,32))
+        
+        #haut
+        for i in xrange(0,32):
+            for j in xrange(0,32):
+                if(image[i,j]) != 0:
+                    if self.haut == -1:
+                        self.haut=i
+                        break
+            if self.haut > -1:
+                break
+        
+        #bas
+        for i in xrange(31,-1,-1):
+            for j in xrange(0,32):
+                if(image[i,j]) != 0:
+                    if self.bas == -1:
+                        self.bas=i
+                        break
+            if self.bas > -1:
+                break
+            
+        #gauche
+        for i in xrange(0,32):
+            for j in xrange(0,32):
+                if(image[j,i]) != 0:
+                    if self.gauche == -1:
+                        self.gauche=i
+                        break
+            if self.gauche > -1:
+                break
+            
+        #droite
+        for i in xrange(31,-1,-1):
+            for j in xrange(0,32):
+                if(image[j,i]) != 0:
+                    if self.droite == -1:
+                        self.droite=i
+                        break
+            if self.droite > -1:
+                break
+                
 
     def _get_current_parameters(self):
-        return [self.orientation,self.largeur,self.clarte,self.faire]
+        return [self.angle,self.numero,self.faire,self.crop_haut,self.crop_gauche,self.largeur_bande,self.smooth,self.nb_ratures]
 
     def transform_image(self, image):
-        if self.faire == 0:
+        if self.faire == 0: #Rien faire !!
             return image
         
-        if self.orientation == 0:
-            return self._horizontal(image)
-        elif self.orientation == 1:
-            return self._vertical(image)
-        else:
-            return self._oblique(image)
-        
-    def _horizontal(self,image):
-        self.deplacement=numpy.random.normal(0,5)
-        #On s'assure de rester dans l'image
-        if self.deplacement < -16:  #Si on recule trop
-            self.deplacement = -16
-        if self.deplacement+self.largeur > 16: #Si on avance trop
-            self.deplacement=16-self.largeur
-        for i in xrange(0,self.largeur):
-            for j in xrange(0,32):
-                image[i+15+self.deplacement,j]=min(1,max(image[i+15+self.deplacement,j],self.clarte))
-        return image
-    
-    def _vertical(self,image):
-        self.deplacement=numpy.random.normal(0,5)
-        #On s'assure de rester dans l'image
-        if self.deplacement < -16:  #Si on recule trop
-            self.deplacement = -16
-        if self.deplacement+self.largeur > 16: #Si on avance trop
-            self.deplacement=16-self.largeur
-        for i in xrange(0,self.largeur):
-            for j in xrange(0,32):
-                image[j,i+15+self.deplacement]=min(1,max(image[j,i+15+self.deplacement],self.clarte))
-        return image
-    
-    def _oblique(self,image):
-        decision=numpy.random.random()
-        D=numpy.zeros((32,32)) #La matrice qui sera additionnee
-        for i in xrange(int(-numpy.floor(self.largeur/2)),int(numpy.ceil((self.largeur+1)/2))):
-            D+=numpy.eye(32,32,i)
-        if decision<0.5: #On met tout sur l'anti-diagonale
-            D = D[:,::-1]
-        D*=self.clarte
+        if self.fini == 0:   #S'il faut rajouter des couches
+            patch_temp=self.patch
+            for w in xrange(1,self.nb_ratures):
+                self.regenerate_parameters(self.complexity,1)
+                for i in xrange(0,32):
+                    for j in xrange(0,32):
+                        patch_temp[i,j]=max(patch_temp[i,j],self.patch[i,j])
+            self.fini=1
+            self.patch=patch_temp
+            
         for i in xrange(0,32):
             for j in xrange(0,32):
-                image[i,j]=min(1,max(image[i,j],D[i,j])) 
+                image[i,j]=max(image[i,j],self.patch[i,j])
+        self.patch*=0   #Remise a zero de la patch (pas necessaire)
         return image
 
 
@@ -116,27 +221,29 @@
 def _load_image():
     f = open('/home/sylvain/Dropbox/Msc/IFT6266/donnees/lower_test_data.ft')  #Le jeu de donnees est en local. 
     d = ft.read(f)
-    w=numpy.asarray(d[1])
+    w=numpy.asarray(d[0:1000])
     return (w/255.0).astype('float')
 
 def _test(complexite):
     img=_load_image()
     transfo = Rature()
-    pylab.imshow(img.reshape((32,32)))
-    pylab.show()
-    print transfo.get_settings_names()
-    print transfo.regenerate_parameters(complexite)
-    img=img.reshape((32,32))
-    
-    img_trans=transfo.transform_image(img)
-    
-    pylab.imshow(img_trans.reshape((32,32)))
-    pylab.show()
+    for i in xrange(0,10):
+        img2=img[random.randint(0,1000)]
+        pylab.imshow(img2.reshape((32,32)))
+        pylab.show()
+        print transfo.get_settings_names()
+        print transfo.regenerate_parameters(complexite)
+        img2=img2.reshape((32,32))
+        
+        img2_trans=transfo.transform_image(img2)
+        
+        pylab.imshow(img2_trans.reshape((32,32)))
+        pylab.show()
     
 
 if __name__ == '__main__':
     from pylearn.io import filetensor as ft
     import pylab
-    _test(0.8)
+    _test(1)
 
 
--- a/transformations/affine_transform.py	Tue Feb 23 18:08:11 2010 -0500
+++ b/transformations/affine_transform.py	Tue Feb 23 18:16:55 2010 -0500
@@ -19,11 +19,11 @@
         self.rng = numpy.random.RandomState()
         self.complexity = complexity
         params = self.rng.uniform(size=6) -.5
-        self.a = 1. + params[0]*.4*complexity
-        self.b = 0. + params[1]*.4*complexity
+        self.a = 1. + params[0]*.6*complexity
+        self.b = 0. + params[1]*.6*complexity
         self.c = params[2]*8.*complexity
-        self.d = 0. + params[3]*.4*complexity
-        self.e = 1. + params[4]*.4*complexity
+        self.d = 0. + params[3]*.6*complexity
+        self.e = 1. + params[4]*.6*complexity
         self.f = params[5]*8.*complexity
 
     
@@ -44,12 +44,12 @@
  
         self.complexity = complexity
         params = self.rng.uniform(size=6) -.5
-        self.a = 1. + params[0]*.4*complexity
-        self.b = 0. + params[1]*.4*complexity
-        self.c = params[2]*8.*complexity
-        self.d = 0. + params[3]*.4*complexity
-        self.e = 1. + params[4]*.4*complexity
-        self.f = params[5]*8.*complexity
+        self.a = 1. + params[0]*.8*complexity
+        self.b = 0. + params[1]*.8*complexity
+        self.c = params[2]*9.*complexity
+        self.d = 0. + params[3]*.8*complexity
+        self.e = 1. + params[4]*.8*complexity
+        self.f = params[5]*9.*complexity
         return self._get_current_parameters()
 
       
--- a/transformations/pipeline.py	Tue Feb 23 18:08:11 2010 -0500
+++ b/transformations/pipeline.py	Tue Feb 23 18:16:55 2010 -0500
@@ -55,6 +55,7 @@
 from add_background_image import AddBackground
 from affine_transform import AffineTransformation
 from ttf2jpg import ttf2jpg
+from pycaptcha.Facade import generateCaptcha
 
 if DEBUG:
     from visualizer import Visualizer
@@ -102,7 +103,7 @@
 
         self.res_data = numpy.empty((total, num_px), dtype=numpy.uint8)
         # +1 to store complexity
-        self.params = numpy.empty((total, self.num_params_stored+1))
+        self.params = numpy.empty((total, self.num_params_stored+len(self.modules)))
         self.res_labels = numpy.empty(total, dtype=numpy.int32)
 
     def run(self, img_iterator, complexity_iterator):
@@ -113,20 +114,26 @@
 
         for img_no, (img, label) in enumerate(img_iterator):
             sys.stdout.flush()
-            complexity = complexity_iterator.next()
-
+            
             global_idx = img_no
 
             img = img.reshape(img_size)
 
-            param_idx = 1
-            # store complexity along with other params
-            self.params[global_idx, 0] = complexity
+            param_idx = 0
+            mod_idx = 0
             for mod in self.modules:
                 # This used to be done _per batch_,
-                # ie. out of the "for img" loop                   
+                # ie. out of the "for img" loop
+                complexity = complexity_iterator.next() 
+                #better to do a complexity sampling for each transformations in order to have more variability
+                #otherwise a lot of images similar to the source are generated (i.e. when complexity is close to 0 (1/8 of the time))
+                #we need to save the complexity of each transformations and the sum of these complexity is a good indicator of the overall
+                #complexity
+                self.params[global_idx, mod_idx] = complexity
+                mod_idx += 1
+                 
                 p = mod.regenerate_parameters(complexity)
-                self.params[global_idx, param_idx:param_idx+len(p)] = p
+                self.params[global_idx, param_idx+len(self.modules):param_idx+len(p)+len(self.modules)] = p
                 param_idx += len(p)
 
                 img = mod.transform_image(img)
@@ -213,13 +220,15 @@
         ocr_img = ft.read(nist.ocr_data)
         ocr_labels = ft.read(nist.ocr_labels)
     ttf = ttf2jpg()
+    L = [chr(ord('0')+x) for x in range(10)] + [chr(ord('A')+x) for x in range(26)] + [chr(ord('a')+x) for x in range(26)]
 
     for i in xrange(num_img):
         r = numpy.random.rand()
         if r <= prob_font:
             yield ttf.generate_image()
-        elif r <= prob_font + prob_captcha:
-            pass #get captcha
+        elif r <=prob_font + prob_captcha:
+            (arr, charac) = generateCaptcha(0,1)
+            yield arr.astype(numpy.float32)/255, L.index(charac[0])
         elif r <= prob_font + prob_captcha + prob_ocr:
             j = numpy.random.randint(len(ocr_labels))
             yield ocr_img[j].astype(numpy.float32)/255, ocr_labels[j]
@@ -259,7 +268,7 @@
     -d, --ocrlabel-file: path to filetensor (.ft) labels file (OCR labels)
     -a, --prob-font: probability of using a raw font image
     -b, --prob-captcha: probability of using a captcha image
-    -e, --prob-ocr: probability of using an ocr image
+    -g, --prob-ocr: probability of using an ocr image
     '''
 
 # See run_pipeline.py
@@ -291,7 +300,8 @@
     reload_mode = False
 
     try:
-        opts, args = getopt.getopt(get_argv(), "rm:z:o:p:x:s:f:l:c:d:a:b:e:", ["reload","max-complexity=", "probability-zero=", "output-file=", "params-output-file=", "labels-output-file=", "stop-after=", "data-file=", "label-file=", "ocr-file=", "ocrlabel-file=", "prob-font=", "prob-captcha=", "prob-ocr="])
+        opts, args = getopt.getopt(get_argv(), "rm:z:o:p:x:s:f:l:c:d:a:b:g:", ["reload","max-complexity=", "probability-zero=", "output-file=", "params-output-file=", "labels-output-file=", 
+"stop-after=", "data-file=", "label-file=", "ocr-file=", "ocrlabel-file=", "prob-font=", "prob-captcha=", "prob-ocr="])
     except getopt.GetoptError, err:
         # print help information and exit:
         print str(err) # will print something like "option -a not recognized"
@@ -328,7 +338,7 @@
             prob_font = float(a)
         elif o in ('-b', "--prob-captcha"):
             prob_captcha = float(a)
-        elif o in ('-e', "--prob-ocr"):
+        elif o in ('-g', "--prob-ocr"):
             prob_ocr = float(a)
         else:
             assert False, "unhandled option"
--- a/transformations/testtransformations.py	Tue Feb 23 18:08:11 2010 -0500
+++ b/transformations/testtransformations.py	Tue Feb 23 18:16:55 2010 -0500
@@ -1,12 +1,15 @@
 #!/usr/bin/env python
 
 
+
 from pylearn.io import filetensor as ft
 import copy
 import pygame
 import time
 import numpy as N
 
+from ttf2jpg import ttf2jpg
+
 #from gimpfu import *
 
 
@@ -28,24 +31,77 @@
 MODULE_INSTANCES = [Slant(),Thick(),AffineTransformation(),LocalElasticDistorter(),GIMP1(),Rature(),Occlusion(), PermutPixel(),DistorsionGauss(),AddBackground(), PoivreSel(), BruitGauss(), Contrast()]
 
 ###---------------------complexity associated to each of them
-complexity = [0.6,0.6,0.6,0.6,0.6,0.3,0.3,0.5,0.5,0.5,0.3,0.3,0.5]
-complexity = [0.5]*len(MODULE_INSTANCES)
+complexity = 0.7
+#complexity = [0.5]*len(MODULE_INSTANCES)
 #complexity = [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]
+n=100
+
+def createimage(path,d):
+    for i in range(n):
+        screen.fill(0)
+        a=d[i,:]
+        off1=4*32
+        off2=0
+        for u in range(n):
+            b=N.asarray(N.reshape(a,(32,32)))
+            c=N.asarray([N.reshape(a*255.0,(32,32))]*3).T
+            new=pygame.surfarray.make_surface(c)
+            new=pygame.transform.scale2x(new)
+            new=pygame.transform.scale2x(new)
+            #new.set_palette(anglcolorpalette)
+            screen.blit(new,(0,0))
+            exemple.blit(new,(0,0))
+            
+            offset = 4*32
+            offset2 = 0
+            ct = 0
+            ctmp =  N.random.rand()*complexity
+            print u
+            for j in MODULE_INSTANCES:
+                #max dilation
+                #ctmp = N.random.rand()*complexity[ct]
+                ctmp = N.random.rand()*complexity 
+                #print j.get_settings_names(), j.regenerate_parameters(ctmp)
+                th=j.regenerate_parameters(ctmp)
+                
+                b=j.transform_image(b)
+                c=N.asarray([b*255]*3).T
+                new=pygame.surfarray.make_surface(c)
+                new=pygame.transform.scale2x(new)
+                new=pygame.transform.scale2x(new)
+                if u==0:
+                    #new.set_palette(anglcolorpalette)
+                    screen.blit(new,(offset,offset2))
+                    font = pygame.font.SysFont('liberationserif',18)
+                    text = font.render('%s '%(int(ctmp*100.0)/100.0) + j.__module__,0,(255,255,255),(0,0,0))
+                    #if  j.__module__ == 'Rature':
+                    #     text = font.render('%s,%s'%(th[-1],int(ctmp*100.0)/100.0) + j.__module__,0,(255,255,255),(0,0,0))
+                    screen.blit(text,(offset,offset2+4*32))
+                    if ct == len(MODULE_INSTANCES)/2-1:
+                        offset = 0
+                        offset2 = 4*32+20
+                    else:
+                        offset += 4*32
+                    ct+=1
+            exemple.blit(new,(off1,off2))
+            if off1 != 9*4*32:
+                off1+=4*32
+            else:
+                off1=0
+                off2+=4*32
+        pygame.image.save(exemple,path+'/perimages/%s.PNG'%i)
+        pygame.image.save(screen,path+'/exemples/%s.PNG'%i)
+ 
 
 
 
 nbmodule = len(MODULE_INSTANCES)
 
-datapath = '/data/lisa/data/nist/by_class/'
-f = open(datapath+'lower/lower_train_data.ft')
-d = ft.read(f)
-
-d = d[0:1000,:]/255.0
-
 pygame.surfarray.use_arraytype('numpy')
 
 #pygame.display.init()
 screen = pygame.Surface((4*(nbmodule+1)/2*32,2*(4*32+20)),depth=32)
+exemple = pygame.Surface((N.ceil(N.sqrt(n))*4*32,N.ceil(N.sqrt(n))*4*32),depth=32)
 
 anglcolorpalette=[(x,x,x) for x in xrange(0,256)]
 #pygame.Surface.set_palette(anglcolorpalette)
@@ -53,43 +109,48 @@
 
 pygame.font.init()
 
-for i in range(1000):
-    a=d[i,:]
-    b=N.asarray(N.reshape(a,(32,32)))
-    c=N.asarray([N.reshape(a*255.0,(32,32))]*3).T
-    new=pygame.surfarray.make_surface(c)
-    new=pygame.transform.scale2x(new)
-    new=pygame.transform.scale2x(new)
-    #new.set_palette(anglcolorpalette)
-    screen.blit(new,(0,0))
-    
-    offset = 4*32
-    offset2 = 0
-    ct = 0
-    for j in MODULE_INSTANCES:
-        #max dilation
-             
-        #random
-        print j.get_settings_names(), j.regenerate_parameters(N.random.rand()*complexity[ct])
+d = N.zeros((n,1024))
+
+datapath = '/data/lisa/data/ocr_breuel/filetensor/unlv-corrected-2010-02-01-shuffled.ft'
+f = open(datapath)
+d = ft.read(f)
+d = d[0:n,:]/255.0
+createimage('/u/glorotxa/transf/OCR',d)
+
+
+
+datapath = '/data/lisa/data/nist/by_class/'
+f = open(datapath+'digits_reshuffled/digits_reshuffled_train_data.ft')
+d = ft.read(f)
+d = d[0:n,:]/255.0
+createimage('/u/glorotxa/transf/NIST_digits',d)
+
+
 
-        b=j.transform_image(b)
-        c=N.asarray([b*255]*3).T
-        
-        new=pygame.surfarray.make_surface(c)
-        new=pygame.transform.scale2x(new)
-        new=pygame.transform.scale2x(new)
-        #new.set_palette(anglcolorpalette)
-        screen.blit(new,(offset,offset2))
-        font = pygame.font.SysFont('liberationserif',18)
-        text = font.render(j.__module__,0,(255,255,255),(0,0,0))
-        screen.blit(text,(offset,offset2+4*32))
-        if ct == len(MODULE_INSTANCES)/2-1:
-            offset = 0
-            offset2 = 4*32+20
-        else:
-            offset += 4*32
-        ct+=1
-    pygame.image.save(screen,'/u/glorotxa/exemples/%s.PNG'%i)
-    #raw_input('Press Enter')
+datapath = '/data/lisa/data/nist/by_class/'
+f = open(datapath+'upper/upper_train_data.ft')
+d = ft.read(f)
+d = d[0:n,:]/255.0
+createimage('/u/glorotxa/transf/NIST_upper',d)
+
+from Facade import *
+
+for i in range(n):
+    d[i,:]=N.asarray(N.reshape(generateCaptcha(0.8,0),(1,1024))/255.0,dtype='float32')
+
+createimage('/u/glorotxa/transf/capcha',d)
+
+
+for i in range(n):
+    myttf2jpg = ttf2jpg()
+    d[i,:]=N.reshape(myttf2jpg.generate_image()[0],(1,1024))
+createimage('/u/glorotxa/transf/fonts',d)
+
+datapath = '/data/lisa/data/nist/by_class/'
+f = open(datapath+'lower/lower_train_data.ft')
+d = ft.read(f)
+d = d[0:n,:]/255.0
+createimage('/u/glorotxa/transf/NIST_lower',d)
+
 
 #pygame.display.quit()
--- a/transformations/thick.py	Tue Feb 23 18:08:11 2010 -0500
+++ b/transformations/thick.py	Tue Feb 23 18:16:55 2010 -0500
@@ -21,7 +21,7 @@
         #---------- private attributes
         self.__nx__ = 32 #xdim of the images
         self.__ny__ = 32 #ydim of the images
-        self.__erodemax__ = 9 #nb of index max of erode structuring elements
+        self.__erodemax__ = 5 #nb of index max of erode structuring elements
         self.__dilatemax__ = 9 #nb of index max of dilation structuring elements
         self.__structuring_elements__ = [N.asarray([[1,1]]),N.asarray([[1],[1]]),\
                                         N.asarray([[1,1],[1,1]]),N.asarray([[0,1,0],[1,1,1],[0,1,0]]),\
--- a/transformations/ttf2jpg.py	Tue Feb 23 18:08:11 2010 -0500
+++ b/transformations/ttf2jpg.py	Tue Feb 23 18:16:55 2010 -0500
@@ -15,7 +15,7 @@
     def __init__(self, font_file = ''):
         self.w = 32
         self.h = 32
-        self.font_dir = '/data/lisa/data/ift6266h10/fonts/windows7/'
+        self.font_dir = '/Tmp/allfonts/'
         self.font_file = font_file
         self.image_dir = './images/'
         self.pattern = '*.ttf'
@@ -26,6 +26,8 @@
             self.char_list.append(chr(ord('A') + i) )
         for i in range(0,26):
             self.char_list.append(chr(ord('a') + i) )
+        files = os.listdir(self.font_dir)
+        self.font_files = fnmatch.filter(files, '*.ttf') + fnmatch.filter(files, '*.TTF')
 
     # get font name
     def get_settings_names(self):
@@ -42,10 +44,8 @@
 
     # set a random font for character generation
     def set_random_font(self):
-        files = os.listdir(self.font_dir)
-        font_files = fnmatch.filter(files, self.pattern)
-        i = random.randint(0, len(font_files) - 1)
-        self.font_file = self.font_dir + font_files[i]
+        i = random.randint(0, len(self.font_files) - 1)
+        self.font_file = self.font_dir + self.font_files[i]
 
     # return a picture array of "text" with font "font_file"
     def create_image(self, text):