changeset 131:5c79a2557f2f

Un peu de ménage dans code pour stacked DAE, splitté en fichiers dans un nouveau sous-répertoire.
author savardf
date Fri, 19 Feb 2010 08:43:10 -0500
parents 38929c29b602
children 25b7c1f20949
files scripts/stacked_dae.py scripts/stacked_dae/mnist_sda.py scripts/stacked_dae/nist_sda.py scripts/stacked_dae/sgd_optimization.py scripts/stacked_dae/stacked_dae.py scripts/stacked_dae/utils.py
diffstat 6 files changed, 637 insertions(+), 456 deletions(-) [+]
line wrap: on
line diff
--- a/scripts/stacked_dae.py	Thu Feb 18 14:44:23 2010 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,456 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8
-
-import numpy 
-import theano
-import time
-import theano.tensor as T
-from theano.tensor.shared_randomstreams import RandomStreams
-import os.path
-
-import gzip
-import cPickle
-
-MNIST_LOCATION = '/u/savardf/datasets/mnist.pkl.gz'
-
-# from pylearn codebase
-def update_locals(obj, dct):
-    if 'self' in dct:
-        del dct['self']
-    obj.__dict__.update(dct)
-
-class LogisticRegression(object):
-    def __init__(self, input, n_in, n_out):
-        # initialize with 0 the weights W as a matrix of shape (n_in, n_out) 
-        self.W = theano.shared( value=numpy.zeros((n_in,n_out),
-                                            dtype = theano.config.floatX) )
-        # initialize the baises b as a vector of n_out 0s
-        self.b = theano.shared( value=numpy.zeros((n_out,), 
-                                            dtype = theano.config.floatX) )
-        # compute vector of class-membership probabilities in symbolic form
-        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b)
-        
-        # compute prediction as class whose probability is maximal in 
-        # symbolic form
-        self.y_pred=T.argmax(self.p_y_given_x, axis=1)
-
-        # list of parameters for this layer
-        self.params = [self.W, self.b]
-
-    def negative_log_likelihood(self, y):
-       return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y])
-
-    def errors(self, y):
-        # check if y has same dimension of y_pred 
-        if y.ndim != self.y_pred.ndim:
-            raise TypeError('y should have the same shape as self.y_pred', 
-                ('y', target.type, 'y_pred', self.y_pred.type))
-
-        # check if y is of the correct datatype        
-        if y.dtype.startswith('int'):
-            # the T.neq operator returns a vector of 0s and 1s, where 1
-            # represents a mistake in prediction
-            return T.mean(T.neq(self.y_pred, y))
-        else:
-            raise NotImplementedError()
-
-
-class SigmoidalLayer(object):
-    def __init__(self, rng, input, n_in, n_out):
-        self.input = input
-
-        W_values = numpy.asarray( rng.uniform( \
-              low = -numpy.sqrt(6./(n_in+n_out)), \
-              high = numpy.sqrt(6./(n_in+n_out)), \
-              size = (n_in, n_out)), dtype = theano.config.floatX)
-        self.W = theano.shared(value = W_values)
-
-        b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
-        self.b = theano.shared(value= b_values)
-
-        self.output = T.nnet.sigmoid(T.dot(input, self.W) + self.b)
-        self.params = [self.W, self.b]
-
-
-
-class dA(object):
-  def __init__(self, n_visible= 784, n_hidden= 500, corruption_level = 0.1,\
-               input = None, shared_W = None, shared_b = None):
-    self.n_visible = n_visible
-    self.n_hidden  = n_hidden
-    
-    # create a Theano random generator that gives symbolic random values
-    theano_rng = RandomStreams()
-    
-    if shared_W != None and shared_b != None : 
-        self.W = shared_W
-        self.b = shared_b
-    else:
-        # initial values for weights and biases
-        # note : W' was written as `W_prime` and b' as `b_prime`
-
-        # W is initialized with `initial_W` which is uniformely sampled
-        # from -6./sqrt(n_visible+n_hidden) and 6./sqrt(n_hidden+n_visible)
-        # the output of uniform if converted using asarray to dtype 
-        # theano.config.floatX so that the code is runable on GPU
-        initial_W = numpy.asarray( numpy.random.uniform( \
-              low = -numpy.sqrt(6./(n_hidden+n_visible)), \
-              high = numpy.sqrt(6./(n_hidden+n_visible)), \
-              size = (n_visible, n_hidden)), dtype = theano.config.floatX)
-        initial_b       = numpy.zeros(n_hidden, dtype = theano.config.floatX)
-    
-    
-        # theano shared variables for weights and biases
-        self.W       = theano.shared(value = initial_W,       name = "W")
-        self.b       = theano.shared(value = initial_b,       name = "b")
-    
- 
-    initial_b_prime= numpy.zeros(n_visible)
-    # tied weights, therefore W_prime is W transpose
-    self.W_prime = self.W.T 
-    self.b_prime = theano.shared(value = initial_b_prime, name = "b'")
-
-    # if no input is given, generate a variable representing the input
-    if input == None : 
-        # we use a matrix because we expect a minibatch of several examples,
-        # each example being a row
-        self.x = T.dmatrix(name = 'input') 
-    else:
-        self.x = input
-    # Equation (1)
-    # keep 90% of the inputs the same and zero-out randomly selected subset of 10% of the inputs
-    # note : first argument of theano.rng.binomial is the shape(size) of 
-    #        random numbers that it should produce
-    #        second argument is the number of trials 
-    #        third argument is the probability of success of any trial
-    #
-    #        this will produce an array of 0s and 1s where 1 has a 
-    #        probability of 1 - ``corruption_level`` and 0 with
-    #        ``corruption_level``
-    self.tilde_x  = theano_rng.binomial( self.x.shape,  1,  1 - corruption_level) * self.x
-    # Equation (2)
-    # note  : y is stored as an attribute of the class so that it can be 
-    #         used later when stacking dAs. 
-    self.y   = T.nnet.sigmoid(T.dot(self.tilde_x, self.W      ) + self.b)
-    # Equation (3)
-    self.z   = T.nnet.sigmoid(T.dot(self.y, self.W_prime) + self.b_prime)
-    # Equation (4)
-    # note : we sum over the size of a datapoint; if we are using minibatches,
-    #        L will  be a vector, with one entry per example in minibatch
-    self.L = - T.sum( self.x*T.log(self.z) + (1-self.x)*T.log(1-self.z), axis=1 ) 
-    # note : L is now a vector, where each element is the cross-entropy cost 
-    #        of the reconstruction of the corresponding example of the 
-    #        minibatch. We need to compute the average of all these to get 
-    #        the cost of the minibatch
-    self.cost = T.mean(self.L)
-
-    self.params = [ self.W, self.b, self.b_prime ]
-
-
-
-
-class SdA(object):
-    def __init__(self, train_set_x, train_set_y, batch_size, n_ins, 
-                 hidden_layers_sizes, n_outs, 
-                 corruption_levels, rng, pretrain_lr, finetune_lr):
-       
-        self.layers             = []
-        self.pretrain_functions = []
-        self.params             = []
-        self.n_layers           = len(hidden_layers_sizes)
-
-        if len(hidden_layers_sizes) < 1 :
-            raiseException (' You must have at least one hidden layer ')
-
-
-        # allocate symbolic variables for the data
-        index   = T.lscalar()    # index to a [mini]batch 
-        self.x  = T.matrix('x')  # the data is presented as rasterized images
-        self.y  = T.ivector('y') # the labels are presented as 1D vector of 
-                                 # [int] labels
-
-        for i in xrange( self.n_layers ):
-            # construct the sigmoidal layer
-
-            # the size of the input is either the number of hidden units of 
-            # the layer below or the input size if we are on the first layer
-            if i == 0 :
-                input_size = n_ins
-            else:
-                input_size = hidden_layers_sizes[i-1]
-
-            # the input to this layer is either the activation of the hidden
-            # layer below or the input of the SdA if you are on the first
-            # layer
-            if i == 0 : 
-                layer_input = self.x
-            else:
-                layer_input = self.layers[-1].output
-
-            layer = SigmoidalLayer(rng, layer_input, input_size, 
-                                   hidden_layers_sizes[i] )
-            # add the layer to the 
-            self.layers += [layer]
-            self.params += layer.params
-        
-            # Construct a denoising autoencoder that shared weights with this
-            # layer
-            dA_layer = dA(input_size, hidden_layers_sizes[i], \
-                          corruption_level = corruption_levels[0],\
-                          input = layer_input, \
-                          shared_W = layer.W, shared_b = layer.b)
-        
-            # Construct a function that trains this dA
-            # compute gradients of layer parameters
-            gparams = T.grad(dA_layer.cost, dA_layer.params)
-            # compute the list of updates
-            updates = {}
-            for param, gparam in zip(dA_layer.params, gparams):
-                updates[param] = param - gparam * pretrain_lr
-            
-            # create a function that trains the dA
-            update_fn = theano.function([index], dA_layer.cost, \
-                  updates = updates,
-                  givens = { 
-                     self.x : train_set_x[index*batch_size:(index+1)*batch_size]})
-            # collect this function into a list
-            self.pretrain_functions += [update_fn]
-
-        
-        # We now need to add a logistic layer on top of the MLP
-        self.logLayer = LogisticRegression(\
-                         input = self.layers[-1].output,\
-                         n_in = hidden_layers_sizes[-1], n_out = n_outs)
-
-        self.params += self.logLayer.params
-        # construct a function that implements one step of finetunining
-
-        # compute the cost, defined as the negative log likelihood 
-        cost = self.logLayer.negative_log_likelihood(self.y)
-        # compute the gradients with respect to the model parameters
-        gparams = T.grad(cost, self.params)
-        # compute list of updates
-        updates = {}
-        for param,gparam in zip(self.params, gparams):
-            updates[param] = param - gparam*finetune_lr
-            
-        self.finetune = theano.function([index], cost, 
-                updates = updates,
-                givens = {
-                  self.x : train_set_x[index*batch_size:(index+1)*batch_size],
-                  self.y : train_set_y[index*batch_size:(index+1)*batch_size]} )
-
-        # symbolic variable that points to the number of errors made on the
-        # minibatch given by self.x and self.y
-
-        self.errors = self.logLayer.errors(self.y)
-
-class Hyperparameters:
-    def __init__(self, dict):
-        self.__dict__.update(dict)
-
-def sgd_optimization_mnist(learning_rate=0.1, pretraining_epochs = 2, \
-                            pretrain_lr = 0.1, training_epochs = 5, \
-                            dataset='mnist.pkl.gz'):
-    # Load the dataset 
-    f = gzip.open(dataset,'rb')
-    # this gives us train, valid, test (each with .x, .y)
-    dataset = cPickle.load(f)
-    f.close()
-
-    n_ins = 28*28
-    n_outs = 10
-
-    hyperparameters = Hyperparameters({'finetuning_lr':learning_rate,
-                       'pretraining_lr':pretrain_lr,
-                       'pretraining_epochs_per_layer':pretraining_epochs,
-                       'max_finetuning_epochs':training_epochs,
-                       'hidden_layers_sizes':[1000,1000,1000],
-                       'corruption_levels':[0.2,0.2,0.2],
-                       'minibatch_size':20})
-
-    sgd_optimization(dataset, hyperparameters, n_ins, n_outs)
-
-class NIST:
-    def __init__(self, minibatch_size, basepath=='/data/lisa/data/nist/by_class/all'):
-        self.minibatch_size = minibatch_size
-        self.basepath = basepath
-
-        self.train = [None, None]
-        self.test = [None, None]
-
-        self.load_train_test()
-
-        self.valid = [None, None]
-        self.split_train_valid()
-
-    def set_filenames(self):
-        self.train_files = ['all_train_data.ft',
-                                'all_train_labels.ft']
-
-        self.test_files = ['all_test_data.ft',
-                            'all_test_labels.ft']
-
-    def load_train_test(self):
-        self.load_data_labels(self.train_files, self.train)
-        self.load_data_labels(self.test_files, self.test)
-
-    def load_data_labels(self, filenames, pair):
-        for i, fn in enumerate(filenames):
-            f = open(fn)
-            pair[i] = ft.read(os.path.join(self.base_path, fn))
-            f.close()
-
-    def split_train_valid(self):
-        test_len = len(self.test[0])
-        
-        new_train_x = self.train[0][:-test_len]
-        new_train_y = self.train[1][:-test_len]
-
-        self.valid[0] = self.train[0][-test_len:]
-        self.valid[1] = self.train[1][-test_len:]
-
-        self.train[0] = new_train_x
-        self.train[1] = new_train_y
-
-def sgd_optimization_nist(dataset_dir='/data/lisa/data/nist'):
-    pass
-
-def sgd_optimization(dataset, hyperparameters, n_ins, n_outs):
-    hp = hyperparameters
-
-    train_set, valid_set, test_set = dataset
-
-    def shared_dataset(data_xy):
-        data_x, data_y = data_xy
-        shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX))
-        shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX))
-        return shared_x, T.cast(shared_y, 'int32')
-
-    test_set_x, test_set_y = shared_dataset(test_set)
-    valid_set_x, valid_set_y = shared_dataset(valid_set)
-    train_set_x, train_set_y = shared_dataset(train_set)
-
-    # compute number of minibatches for training, validation and testing
-    n_train_batches = train_set_x.value.shape[0] / hp.minibatch_size
-    n_valid_batches = valid_set_x.value.shape[0] / hp.minibatch_size
-    n_test_batches  = test_set_x.value.shape[0]  / hp.minibatch_size
-
-    # allocate symbolic variables for the data
-    index   = T.lscalar()    # index to a [mini]batch 
- 
-    # construct the stacked denoising autoencoder class
-    classifier = SdA( train_set_x=train_set_x, train_set_y = train_set_y,\
-                      batch_size = hp.minibatch_size, n_ins= n_ins, \
-                      hidden_layers_sizes = hp.hidden_layers_sizes, n_outs=10, \
-                      corruption_levels = hp.corruption_levels,\
-                      rng = numpy.random.RandomState(1234),\
-                      pretrain_lr = hp.pretraining_lr, finetune_lr = hp.finetuning_lr )
-
-
-    start_time = time.clock()  
-    ## Pre-train layer-wise 
-    for i in xrange(classifier.n_layers):
-        # go through pretraining epochs 
-        for epoch in xrange(hp.pretraining_epochs_per_layer):
-            # go through the training set
-            for batch_index in xrange(n_train_batches):
-                c = classifier.pretrain_functions[i](batch_index)
-            print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c
- 
-    end_time = time.clock()
-
-    print ('Pretraining took %f minutes' %((end_time-start_time)/60.))
-    # Fine-tune the entire model
-
-    minibatch_size = hp.minibatch_size
-
-    # create a function to compute the mistakes that are made by the model
-    # on the validation set, or testing set
-    test_model = theano.function([index], classifier.errors,
-             givens = {
-               classifier.x: test_set_x[index*minibatch_size:(index+1)*minibatch_size],
-               classifier.y: test_set_y[index*minibatch_size:(index+1)*minibatch_size]})
-
-    validate_model = theano.function([index], classifier.errors,
-            givens = {
-               classifier.x: valid_set_x[index*minibatch_size:(index+1)*minibatch_size],
-               classifier.y: valid_set_y[index*minibatch_size:(index+1)*minibatch_size]})
-
-
-    # early-stopping parameters
-    patience              = 10000 # look as this many examples regardless
-    patience_increase     = 2.    # wait this much longer when a new best is 
-                                  # found
-    improvement_threshold = 0.995 # a relative improvement of this much is 
-                                  # considered significant
-    validation_frequency  = min(n_train_batches, patience/2)
-                                  # go through this many 
-                                  # minibatche before checking the network 
-                                  # on the validation set; in this case we 
-                                  # check every epoch 
-
-    best_params          = None
-    best_validation_loss = float('inf')
-    test_score           = 0.
-    start_time = time.clock()
-
-    done_looping = False
-    epoch = 0
-
-    while (epoch < hp.max_finetuning_epochs) and (not done_looping):
-      epoch = epoch + 1
-      for minibatch_index in xrange(n_train_batches):
-
-        cost_ij = classifier.finetune(minibatch_index)
-        iter    = epoch * n_train_batches + minibatch_index
-
-        if (iter+1) % validation_frequency == 0: 
-            
-            validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]
-            this_validation_loss = numpy.mean(validation_losses)
-            print('epoch %i, minibatch %i/%i, validation error %f %%' % \
-                   (epoch, minibatch_index+1, n_train_batches, \
-                    this_validation_loss*100.))
-
-
-            # if we got the best validation score until now
-            if this_validation_loss < best_validation_loss:
-
-                #improve patience if loss improvement is good enough
-                if this_validation_loss < best_validation_loss *  \
-                       improvement_threshold :
-                    patience = max(patience, iter * patience_increase)
-
-                # save best validation score and iteration number
-                best_validation_loss = this_validation_loss
-                best_iter = iter
-
-                # test it on the test set
-                test_losses = [test_model(i) for i in xrange(n_test_batches)]
-                test_score = numpy.mean(test_losses)
-                print(('     epoch %i, minibatch %i/%i, test error of best '
-                      'model %f %%') % 
-                             (epoch, minibatch_index+1, n_train_batches,
-                              test_score*100.))
-
-
-        if patience <= iter :
-                done_looping = True
-                break
-
-    end_time = time.clock()
-    print(('Optimization complete with best validation score of %f %%,'
-           'with test performance %f %%') %  
-                 (best_validation_loss * 100., test_score*100.))
-    print ('The code ran for %f minutes' % ((end_time-start_time)/60.))
-
-
-if __name__ == '__main__':
-    import sys
-    args = sys.argv[1:]
-    if len(args) > 0 and args[0] == "jobman_add":
-        jobman_add()
-    else:
-        sgd_optimization_mnist(dataset=MNIST_LOCATION)
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/stacked_dae/mnist_sda.py	Fri Feb 19 08:43:10 2010 -0500
@@ -0,0 +1,42 @@
+#!/usr/bin/python
+# coding: utf-8
+
+# Parameterize call to sgd_optimization for MNIST
+
+import numpy 
+import theano
+import time
+import theano.tensor as T
+from theano.tensor.shared_randomstreams import RandomStreams
+
+from stacked_dae import sgd_optimization
+import cPickle, gzip
+from jobman import DD
+
+MNIST_LOCATION = '/u/savardf/datasets/mnist.pkl.gz'
+
+def sgd_optimization_mnist(learning_rate=0.1, pretraining_epochs = 2, \
+                            pretrain_lr = 0.1, training_epochs = 5, \
+                            dataset='mnist.pkl.gz'):
+    # Load the dataset 
+    f = gzip.open(dataset,'rb')
+    # this gives us train, valid, test (each with .x, .y)
+    dataset = cPickle.load(f)
+    f.close()
+
+    n_ins = 28*28
+    n_outs = 10
+
+    hyperparameters = DD({'finetuning_lr':learning_rate,
+                       'pretraining_lr':pretrain_lr,
+                       'pretraining_epochs_per_layer':pretraining_epochs,
+                       'max_finetuning_epochs':training_epochs,
+                       'hidden_layers_sizes':[1000,1000,1000],
+                       'corruption_levels':[0.2,0.2,0.2],
+                       'minibatch_size':20})
+
+    sgd_optimization(dataset, hyperparameters, n_ins, n_outs)
+
+if __name__ == '__main__':
+    sgd_optimization_mnist()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/stacked_dae/nist_sda.py	Fri Feb 19 08:43:10 2010 -0500
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# coding: utf-8
+
+import numpy 
+import theano
+import time
+import theano.tensor as T
+from theano.tensor.shared_randomstreams import RandomStreams
+
+import os.path
+
+from sgd_optimization import sgd_optimization
+
+from jobman import DD
+from pylearn.io import filetensor
+
+from utils import produit_croise_jobs
+
+NIST_ALL_LOCATION = '/data/lisa/data/nist/by_class/all'
+
+# Just useful for tests... minimal number of epochs
+DEFAULT_HP_NIST = DD({'finetuning_lr':0.1,
+                       'pretraining_lr':0.1,
+                       'pretraining_epochs_per_layer':1,
+                       'max_finetuning_epochs':1,
+                       'hidden_layers_sizes':[1000,1000],
+                       'corruption_levels':[0.2,0.2],
+                       'minibatch_size':20})
+
+def jobman_entrypoint_nist(state, channel):
+    sgd_optimization_nist(state)
+
+def jobman_insert_nist():
+    vals = {'finetuning_lr': [0.00001, 0.0001, 0.001, 0.01, 0.1],
+            'pretraining_lr': [0.00001, 0.0001, 0.001, 0.01, 0.1],
+            'pretraining_epochs_per_layer': [2,5,20],
+            'hidden_layer_sizes': [100,300,1000],
+            'num_hidden_layers':[1,2,3],
+            'corruption_levels': [0.1,0.2,0.4],
+            'minibatch_size': [5,20,100]}
+
+    jobs = produit_croise_jobs(vals)
+
+    for job in jobs:
+        insert_job(job)
+
+
+class NIST:
+    def __init__(self, minibatch_size, basepath=None):
+        global NIST_ALL_LOCATION
+
+        self.minibatch_size = minibatch_size
+        self.basepath = basepath and basepath or NIST_ALL_LOCATION
+
+        self.set_filenames()
+
+        # arrays of 2 elements: .x, .y
+        self.train = [None, None]
+        self.test = [None, None]
+
+        self.load_train_test()
+
+        self.valid = [[], []]
+        #self.split_train_valid()
+
+
+    def get_tvt(self):
+        return self.train, self.valid, self.test
+
+    def set_filenames(self):
+        self.train_files = ['all_train_data.ft',
+                                'all_train_labels.ft']
+
+        self.test_files = ['all_test_data.ft',
+                            'all_test_labels.ft']
+
+    def load_train_test(self):
+        self.load_data_labels(self.train_files, self.train)
+        self.load_data_labels(self.test_files, self.test)
+
+    def load_data_labels(self, filenames, pair):
+        for i, fn in enumerate(filenames):
+            f = open(os.path.join(self.basepath, fn))
+            pair[i] = filetensor.read(f)
+            f.close()
+
+    def split_train_valid(self):
+        test_len = len(self.test[0])
+        
+        new_train_x = self.train[0][:-test_len]
+        new_train_y = self.train[1][:-test_len]
+
+        self.valid[0] = self.train[0][-test_len:]
+        self.valid[1] = self.train[1][-test_len:]
+
+        self.train[0] = new_train_x
+        self.train[1] = new_train_y
+
+def test_load_nist():
+    print "Will load NIST"
+
+    import time
+    t1 = time.time()
+    nist = NIST(20)
+    t2 = time.time()
+
+    print "NIST loaded. time delta = ", t2-t1
+
+    tr,v,te = nist.get_tvt()
+
+    print "Lenghts: ", len(tr[0]), len(v[0]), len(te[0])
+
+    raw_input("Press any key")
+
+# hp for hyperparameters
+def sgd_optimization_nist(hp=None, dataset_dir='/data/lisa/data/nist'):
+    global DEFAULT_HP_NIST
+    hp = hp and hp or DEFAULT_HP_NIST
+
+    print "Will load NIST"
+
+    import time
+    t1 = time.time()
+    nist = NIST(20)
+    t2 = time.time()
+
+    print "NIST loaded. time delta = ", t2-t1
+
+    train,valid,test = nist.get_tvt()
+    dataset = (train,valid,test)
+
+    print "Lenghts train, valid, test: ", len(train[0]), len(valid[0]), len(test[0])
+
+    n_ins = 32*32
+    n_outs = 62 # 10 digits, 26*2 (lower, capitals)
+
+    sgd_optimization(dataset, hp, n_ins, n_outs)
+
+if __name__ == '__main__':
+
+    import sys
+
+    args = sys.argv[1:]
+
+    if len(args) > 0 and args[0] == 'load_nist':
+        test_load_nist()
+
+    else:
+        sgd_optimization_nist()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/stacked_dae/sgd_optimization.py	Fri Feb 19 08:43:10 2010 -0500
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# coding: utf-8
+
+# Generic SdA optimization loop, adapted slightly from the deeplearning.net tutorial
+
+import numpy 
+import theano
+import time
+import theano.tensor as T
+
+from jobman import DD
+
+from stacked_dae import SdA
+
+def sgd_optimization(dataset, hyperparameters, n_ins, n_outs):
+    hp = hyperparameters
+
+    printout_frequency = 1000
+
+    train_set, valid_set, test_set = dataset
+
+    def shared_dataset(data_xy):
+        data_x, data_y = data_xy
+        shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX))
+        shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX))
+        return shared_x, T.cast(shared_y, 'int32')
+
+    test_set_x, test_set_y = shared_dataset(test_set)
+    valid_set_x, valid_set_y = shared_dataset(valid_set)
+    train_set_x, train_set_y = shared_dataset(train_set)
+
+    # compute number of minibatches for training, validation and testing
+    n_train_batches = train_set_x.value.shape[0] / hp.minibatch_size
+    n_valid_batches = valid_set_x.value.shape[0] / hp.minibatch_size
+    n_test_batches  = test_set_x.value.shape[0]  / hp.minibatch_size
+
+    # allocate symbolic variables for the data
+    index   = T.lscalar()    # index to a [mini]batch 
+ 
+    # construct the stacked denoising autoencoder class
+    classifier = SdA( train_set_x=train_set_x, train_set_y = train_set_y,\
+                      batch_size = hp.minibatch_size, n_ins= n_ins, \
+                      hidden_layers_sizes = hp.hidden_layers_sizes, n_outs=10, \
+                      corruption_levels = hp.corruption_levels,\
+                      rng = numpy.random.RandomState(1234),\
+                      pretrain_lr = hp.pretraining_lr, finetune_lr = hp.finetuning_lr )
+
+    printout_acc = 0.0
+
+    start_time = time.clock()  
+    ## Pre-train layer-wise 
+    for i in xrange(classifier.n_layers):
+        # go through pretraining epochs 
+        for epoch in xrange(hp.pretraining_epochs_per_layer):
+            # go through the training set
+            for batch_index in xrange(n_train_batches):
+                c = classifier.pretrain_functions[i](batch_index)
+
+                print c
+
+                printout_acc += c / printout_frequency
+                if (batch_index+1) % printout_frequency == 0:
+                    print batch_index, "reconstruction cost avg=", printout_acc
+                    printout_acc = 0.0
+                    
+            print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c
+ 
+    end_time = time.clock()
+
+    print ('Pretraining took %f minutes' %((end_time-start_time)/60.))
+    # Fine-tune the entire model
+
+    minibatch_size = hp.minibatch_size
+
+    # create a function to compute the mistakes that are made by the model
+    # on the validation set, or testing set
+    test_model = theano.function([index], classifier.errors,
+             givens = {
+               classifier.x: test_set_x[index*minibatch_size:(index+1)*minibatch_size],
+               classifier.y: test_set_y[index*minibatch_size:(index+1)*minibatch_size]})
+
+    validate_model = theano.function([index], classifier.errors,
+            givens = {
+               classifier.x: valid_set_x[index*minibatch_size:(index+1)*minibatch_size],
+               classifier.y: valid_set_y[index*minibatch_size:(index+1)*minibatch_size]})
+
+
+    # early-stopping parameters
+    patience              = 10000 # look as this many examples regardless
+    patience_increase     = 2.    # wait this much longer when a new best is 
+                                  # found
+    improvement_threshold = 0.995 # a relative improvement of this much is 
+                                  # considered significant
+    validation_frequency  = min(n_train_batches, patience/2)
+                                  # go through this many 
+                                  # minibatche before checking the network 
+                                  # on the validation set; in this case we 
+                                  # check every epoch 
+
+    best_params          = None
+    best_validation_loss = float('inf')
+    test_score           = 0.
+    start_time = time.clock()
+
+    done_looping = False
+    epoch = 0
+
+    printout_acc = 0.0
+
+    print "----- START FINETUNING -----"
+
+    while (epoch < hp.max_finetuning_epochs) and (not done_looping):
+      epoch = epoch + 1
+      for minibatch_index in xrange(n_train_batches):
+
+        cost_ij = classifier.finetune(minibatch_index)
+        iter    = epoch * n_train_batches + minibatch_index
+
+        printout_acc += cost_ij / float(printout_frequency * minibatch_size)
+        if (iter+1) % printout_frequency == 0:
+            print iter, "cost avg=", printout_acc
+            printout_acc = 0.0
+
+        if (iter+1) % validation_frequency == 0: 
+            
+            validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]
+            this_validation_loss = numpy.mean(validation_losses)
+            print('epoch %i, minibatch %i/%i, validation error %f %%' % \
+                   (epoch, minibatch_index+1, n_train_batches, \
+                    this_validation_loss*100.))
+
+
+            # if we got the best validation score until now
+            if this_validation_loss < best_validation_loss:
+
+                #improve patience if loss improvement is good enough
+                if this_validation_loss < best_validation_loss *  \
+                       improvement_threshold :
+                    patience = max(patience, iter * patience_increase)
+
+                # save best validation score and iteration number
+                best_validation_loss = this_validation_loss
+                best_iter = iter
+
+                # test it on the test set
+                test_losses = [test_model(i) for i in xrange(n_test_batches)]
+                test_score = numpy.mean(test_losses)
+                print(('     epoch %i, minibatch %i/%i, test error of best '
+                      'model %f %%') % 
+                             (epoch, minibatch_index+1, n_train_batches,
+                              test_score*100.))
+
+
+        if patience <= iter :
+                done_looping = True
+                break
+
+    end_time = time.clock()
+    print(('Optimization complete with best validation score of %f %%,'
+           'with test performance %f %%') %  
+
+                 (best_validation_loss * 100., test_score*100.))
+    print ('The code ran for %f minutes' % ((end_time-start_time)/60.))
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/stacked_dae/stacked_dae.py	Fri Feb 19 08:43:10 2010 -0500
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# coding: utf-8
+
+import numpy 
+import theano
+import time
+import theano.tensor as T
+from theano.tensor.shared_randomstreams import RandomStreams
+
+class LogisticRegression(object):
+    def __init__(self, input, n_in, n_out):
+        # initialize with 0 the weights W as a matrix of shape (n_in, n_out) 
+        self.W = theano.shared( value=numpy.zeros((n_in,n_out),
+                                            dtype = theano.config.floatX) )
+        # initialize the baises b as a vector of n_out 0s
+        self.b = theano.shared( value=numpy.zeros((n_out,), 
+                                            dtype = theano.config.floatX) )
+        # compute vector of class-membership probabilities in symbolic form
+        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b)
+        
+        # compute prediction as class whose probability is maximal in 
+        # symbolic form
+        self.y_pred=T.argmax(self.p_y_given_x, axis=1)
+
+        # list of parameters for this layer
+        self.params = [self.W, self.b]
+
+    def negative_log_likelihood(self, y):
+       return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y])
+
+    def errors(self, y):
+        # check if y has same dimension of y_pred 
+        if y.ndim != self.y_pred.ndim:
+            raise TypeError('y should have the same shape as self.y_pred', 
+                ('y', target.type, 'y_pred', self.y_pred.type))
+
+        # check if y is of the correct datatype        
+        if y.dtype.startswith('int'):
+            # the T.neq operator returns a vector of 0s and 1s, where 1
+            # represents a mistake in prediction
+            return T.mean(T.neq(self.y_pred, y))
+        else:
+            raise NotImplementedError()
+
+
+class SigmoidalLayer(object):
+    def __init__(self, rng, input, n_in, n_out):
+        self.input = input
+
+        W_values = numpy.asarray( rng.uniform( \
+              low = -numpy.sqrt(6./(n_in+n_out)), \
+              high = numpy.sqrt(6./(n_in+n_out)), \
+              size = (n_in, n_out)), dtype = theano.config.floatX)
+        self.W = theano.shared(value = W_values)
+
+        b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
+        self.b = theano.shared(value= b_values)
+
+        self.output = T.nnet.sigmoid(T.dot(input, self.W) + self.b)
+        self.params = [self.W, self.b]
+
+
+
+class dA(object):
+  def __init__(self, n_visible= 784, n_hidden= 500, corruption_level = 0.1,\
+               input = None, shared_W = None, shared_b = None):
+    self.n_visible = n_visible
+    self.n_hidden  = n_hidden
+    
+    # create a Theano random generator that gives symbolic random values
+    theano_rng = RandomStreams()
+    
+    if shared_W != None and shared_b != None : 
+        self.W = shared_W
+        self.b = shared_b
+    else:
+        # initial values for weights and biases
+        # note : W' was written as `W_prime` and b' as `b_prime`
+
+        # W is initialized with `initial_W` which is uniformely sampled
+        # from -6./sqrt(n_visible+n_hidden) and 6./sqrt(n_hidden+n_visible)
+        # the output of uniform if converted using asarray to dtype 
+        # theano.config.floatX so that the code is runable on GPU
+        initial_W = numpy.asarray( numpy.random.uniform( \
+              low = -numpy.sqrt(6./(n_hidden+n_visible)), \
+              high = numpy.sqrt(6./(n_hidden+n_visible)), \
+              size = (n_visible, n_hidden)), dtype = theano.config.floatX)
+        initial_b       = numpy.zeros(n_hidden, dtype = theano.config.floatX)
+    
+    
+        # theano shared variables for weights and biases
+        self.W       = theano.shared(value = initial_W,       name = "W")
+        self.b       = theano.shared(value = initial_b,       name = "b")
+    
+ 
+    initial_b_prime= numpy.zeros(n_visible)
+    # tied weights, therefore W_prime is W transpose
+    self.W_prime = self.W.T 
+    self.b_prime = theano.shared(value = initial_b_prime, name = "b'")
+
+    # if no input is given, generate a variable representing the input
+    if input == None : 
+        # we use a matrix because we expect a minibatch of several examples,
+        # each example being a row
+        self.x = T.dmatrix(name = 'input') 
+    else:
+        self.x = input
+    # Equation (1)
+    # keep 90% of the inputs the same and zero-out randomly selected subset of 10% of the inputs
+    # note : first argument of theano.rng.binomial is the shape(size) of 
+    #        random numbers that it should produce
+    #        second argument is the number of trials 
+    #        third argument is the probability of success of any trial
+    #
+    #        this will produce an array of 0s and 1s where 1 has a 
+    #        probability of 1 - ``corruption_level`` and 0 with
+    #        ``corruption_level``
+    self.tilde_x  = theano_rng.binomial( self.x.shape,  1,  1 - corruption_level) * self.x
+    # Equation (2)
+    # note  : y is stored as an attribute of the class so that it can be 
+    #         used later when stacking dAs. 
+    self.y   = T.nnet.sigmoid(T.dot(self.tilde_x, self.W      ) + self.b)
+    # Equation (3)
+    self.z   = T.nnet.sigmoid(T.dot(self.y, self.W_prime) + self.b_prime)
+    # Equation (4)
+    # note : we sum over the size of a datapoint; if we are using minibatches,
+    #        L will  be a vector, with one entry per example in minibatch
+    self.L = - T.sum( self.x*T.log(self.z) + (1-self.x)*T.log(1-self.z), axis=1 ) 
+    # note : L is now a vector, where each element is the cross-entropy cost 
+    #        of the reconstruction of the corresponding example of the 
+    #        minibatch. We need to compute the average of all these to get 
+    #        the cost of the minibatch
+    self.cost = T.mean(self.L)
+
+    self.params = [ self.W, self.b, self.b_prime ]
+
+
+
+
+class SdA(object):
+    def __init__(self, train_set_x, train_set_y, batch_size, n_ins, 
+                 hidden_layers_sizes, n_outs, 
+                 corruption_levels, rng, pretrain_lr, finetune_lr):
+       
+        self.layers             = []
+        self.pretrain_functions = []
+        self.params             = []
+        self.n_layers           = len(hidden_layers_sizes)
+
+        if len(hidden_layers_sizes) < 1 :
+            raiseException (' You must have at least one hidden layer ')
+
+
+        # allocate symbolic variables for the data
+        index   = T.lscalar()    # index to a [mini]batch 
+        self.x  = T.matrix('x')  # the data is presented as rasterized images
+        self.y  = T.ivector('y') # the labels are presented as 1D vector of 
+                                 # [int] labels
+
+        for i in xrange( self.n_layers ):
+            # construct the sigmoidal layer
+
+            # the size of the input is either the number of hidden units of 
+            # the layer below or the input size if we are on the first layer
+            if i == 0 :
+                input_size = n_ins
+            else:
+                input_size = hidden_layers_sizes[i-1]
+
+            # the input to this layer is either the activation of the hidden
+            # layer below or the input of the SdA if you are on the first
+            # layer
+            if i == 0 : 
+                layer_input = self.x
+            else:
+                layer_input = self.layers[-1].output
+
+            layer = SigmoidalLayer(rng, layer_input, input_size, 
+                                   hidden_layers_sizes[i] )
+            # add the layer to the 
+            self.layers += [layer]
+            self.params += layer.params
+        
+            # Construct a denoising autoencoder that shared weights with this
+            # layer
+            dA_layer = dA(input_size, hidden_layers_sizes[i], \
+                          corruption_level = corruption_levels[0],\
+                          input = layer_input, \
+                          shared_W = layer.W, shared_b = layer.b)
+        
+            # Construct a function that trains this dA
+            # compute gradients of layer parameters
+            gparams = T.grad(dA_layer.cost, dA_layer.params)
+            # compute the list of updates
+            updates = {}
+            for param, gparam in zip(dA_layer.params, gparams):
+                updates[param] = param - gparam * pretrain_lr
+            
+            # create a function that trains the dA
+            update_fn = theano.function([index], dA_layer.cost, \
+                  updates = updates,
+                  givens = { 
+                     self.x : train_set_x[index*batch_size:(index+1)*batch_size]})
+            # collect this function into a list
+            self.pretrain_functions += [update_fn]
+
+        
+        # We now need to add a logistic layer on top of the MLP
+        self.logLayer = LogisticRegression(\
+                         input = self.layers[-1].output,\
+                         n_in = hidden_layers_sizes[-1], n_out = n_outs)
+
+        self.params += self.logLayer.params
+        # construct a function that implements one step of finetunining
+
+        # compute the cost, defined as the negative log likelihood 
+        cost = self.logLayer.negative_log_likelihood(self.y)
+        # compute the gradients with respect to the model parameters
+        gparams = T.grad(cost, self.params)
+        # compute list of updates
+        updates = {}
+        for param,gparam in zip(self.params, gparams):
+            updates[param] = param - gparam*finetune_lr
+            
+        self.finetune = theano.function([index], cost, 
+                updates = updates,
+                givens = {
+                  self.x : train_set_x[index*batch_size:(index+1)*batch_size],
+                  self.y : train_set_y[index*batch_size:(index+1)*batch_size]} )
+
+        # symbolic variable that points to the number of errors made on the
+        # minibatch given by self.x and self.y
+
+        self.errors = self.logLayer.errors(self.y)
+
+if __name__ == '__main__':
+    import sys
+    args = sys.argv[1:]
+
+    if len(args) < 1:
+        print "Options: mnist, jobman_add, load_nist"
+        sys.exit(0)
+
+    if args[0] == "jobman_add":
+        jobman_add()
+    elif args[0] == "mnist":
+        sgd_optimization_mnist(dataset=MNIST_LOCATION)
+    elif args[0] == "load_nist":
+        load_nist_test()
+    elif args[0] == "nist":
+        sgd_optimization_nist()
+    elif args[0] == "pc":
+        test_produit_croise_jobs()
+
+    
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/stacked_dae/utils.py	Fri Feb 19 08:43:10 2010 -0500
@@ -0,0 +1,25 @@
+#!/usr/bin/python
+
+from jobman import DD
+
+def produit_croise_jobs(val_dict):
+    job_list = [DD()]
+    all_keys = val_dict.keys()
+
+    for key in all_keys:
+        possible_values = val_dict[key]
+        new_job_list = []
+        for val in possible_values:
+            for job in job_list:
+                to_insert = job.copy()
+                to_insert.update({key: val})
+                new_job_list.append(to_insert)
+        job_list = new_job_list
+
+    return job_list
+
+def test_produit_croise_jobs():
+    vals = {'a': [1,2], 'b': [3,4,5]}
+    print produit_croise_jobs(vals)
+
+