changeset 169:d37c944133c3

directory name change
author Dumitru Erhan <dumitru.erhan@gmail.com>
date Fri, 26 Feb 2010 14:24:11 -0500
parents 5e0e5f1860ec
children 89a725d332ae
files baseline/conv_mlp/convolutional_mlp.conf baseline/conv_mlp/convolutional_mlp.py baseline/log_reg/log_reg.py baseline/mlp/mlp_nist.py baseline_algorithms/conv_mlp/convolutional_mlp.conf baseline_algorithms/conv_mlp/convolutional_mlp.py baseline_algorithms/log_reg/log_reg.py baseline_algorithms/mlp/mlp_nist.py
diffstat 8 files changed, 1375 insertions(+), 1375 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/baseline/conv_mlp/convolutional_mlp.conf	Fri Feb 26 14:24:11 2010 -0500
@@ -0,0 +1,7 @@
+learning_rate=0.01
+n_iter=1
+batch_size=20
+n_kern0=20
+n_kern1=50
+filter_shape=5
+n_layer=3
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/baseline/conv_mlp/convolutional_mlp.py	Fri Feb 26 14:24:11 2010 -0500
@@ -0,0 +1,472 @@
+"""
+This tutorial introduces the LeNet5 neural network architecture using Theano.  LeNet5 is a
+convolutional neural network, good for classifying images. This tutorial shows how to build the
+architecture, and comes with all the hyper-parameters you need to reproduce the paper's MNIST
+results.
+
+The best results are obtained after X iterations of the main program loop, which takes ***
+minutes on my workstation (an Intel Core i7, circa July 2009), and *** minutes on my GPU (an
+NVIDIA GTX 285 graphics processor).
+
+This implementation simplifies the model in the following ways:
+
+ - LeNetConvPool doesn't implement location-specific gain and bias parameters
+ - LeNetConvPool doesn't implement pooling by average, it implements pooling by max.
+ - Digit classification is implemented with a logistic regression rather than an RBF network
+ - LeNet5 was not fully-connected convolutions at second layer
+
+References:
+ - Y. LeCun, L. Bottou, Y. Bengio and P. Haffner: Gradient-Based Learning Applied to Document
+   Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
+   http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
+"""
+
+import numpy, theano, cPickle, gzip, time
+import theano.tensor as T
+import theano.sandbox.softsign
+import pylearn.datasets.MNIST
+from pylearn.io import filetensor as ft
+from theano.sandbox import conv, downsample
+
+class LeNetConvPoolLayer(object):
+
+    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2,2)):
+        """
+        Allocate a LeNetConvPoolLayer with shared variable internal parameters.
+        :type rng: numpy.random.RandomState
+        :param rng: a random number generator used to initialize weights
+        :type input: theano.tensor.dtensor4
+        :param input: symbolic image tensor, of shape image_shape
+        :type filter_shape: tuple or list of length 4
+        :param filter_shape: (number of filters, num input feature maps,
+                              filter height,filter width)
+        :type image_shape: tuple or list of length 4
+        :param image_shape: (batch size, num input feature maps,
+                             image height, image width)
+        :type poolsize: tuple or list of length 2
+        :param poolsize: the downsampling (pooling) factor (#rows,#cols)
+        """
+        assert image_shape[1]==filter_shape[1]
+        self.input = input
+   
+        # initialize weight values: the fan-in of each hidden neuron is
+        # restricted by the size of the receptive fields.
+        fan_in =  numpy.prod(filter_shape[1:])
+        W_values = numpy.asarray( rng.uniform( \
+              low = -numpy.sqrt(3./fan_in), \
+              high = numpy.sqrt(3./fan_in), \
+              size = filter_shape), dtype = theano.config.floatX)
+        self.W = theano.shared(value = W_values)
+
+        # the bias is a 1D tensor -- one bias per output feature map
+        b_values = numpy.zeros((filter_shape[0],), dtype= theano.config.floatX)
+        self.b = theano.shared(value= b_values)
+
+        # convolve input feature maps with filters
+        conv_out = conv.conv2d(input, self.W, 
+                filter_shape=filter_shape, image_shape=image_shape)
+
+        # downsample each feature map individually, using maxpooling
+        pooled_out = downsample.max_pool2D(conv_out, poolsize, ignore_border=True)
+
+        # add the bias term. Since the bias is a vector (1D array), we first
+        # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will thus
+        # be broadcasted across mini-batches and feature map width & height
+        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
+
+        # store parameters of this layer
+        self.params = [self.W, self.b]
+
+
+class SigmoidalLayer(object):
+    def __init__(self, rng, input, n_in, n_out):
+        """
+        Typical hidden layer of a MLP: units are fully-connected and have
+        sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
+        and the bias vector b is of shape (n_out,).
+        
+        Hidden unit activation is given by: sigmoid(dot(input,W) + b)
+
+        :type rng: numpy.random.RandomState
+        :param rng: a random number generator used to initialize weights
+        :type input: theano.tensor.dmatrix
+        :param input: a symbolic tensor of shape (n_examples, n_in)
+        :type n_in: int
+        :param n_in: dimensionality of input
+        :type n_out: int
+        :param n_out: number of hidden units
+        """
+        self.input = input
+
+        W_values = numpy.asarray( rng.uniform( \
+              low = -numpy.sqrt(6./(n_in+n_out)), \
+              high = numpy.sqrt(6./(n_in+n_out)), \
+              size = (n_in, n_out)), dtype = theano.config.floatX)
+        self.W = theano.shared(value = W_values)
+
+        b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
+        self.b = theano.shared(value= b_values)
+
+        self.output = T.tanh(T.dot(input, self.W) + self.b)
+        self.params = [self.W, self.b]
+
+
+class LogisticRegression(object):
+    """Multi-class Logistic Regression Class
+
+    The logistic regression is fully described by a weight matrix :math:`W` 
+    and bias vector :math:`b`. Classification is done by projecting data 
+    points onto a set of hyperplanes, the distance to which is used to 
+    determine a class membership probability. 
+    """
+
+    def __init__(self, input, n_in, n_out):
+        """ Initialize the parameters of the logistic regression
+        :param input: symbolic variable that describes the input of the 
+                      architecture (one minibatch)
+        :type n_in: int
+        :param n_in: number of input units, the dimension of the space in 
+                     which the datapoints lie
+        :type n_out: int
+        :param n_out: number of output units, the dimension of the space in 
+                      which the labels lie
+        """ 
+
+        # initialize with 0 the weights W as a matrix of shape (n_in, n_out) 
+        self.W = theano.shared( value=numpy.zeros((n_in,n_out),
+                                            dtype = theano.config.floatX) )
+        # initialize the baises b as a vector of n_out 0s
+        self.b = theano.shared( value=numpy.zeros((n_out,), 
+                                            dtype = theano.config.floatX) )
+        # compute vector of class-membership probabilities in symbolic form
+        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b)
+        
+        # compute prediction as class whose probability is maximal in 
+        # symbolic form
+        self.y_pred=T.argmax(self.p_y_given_x, axis=1)
+
+        # list of parameters for this layer
+        self.params = [self.W, self.b]
+
+    def negative_log_likelihood(self, y):
+        """Return the mean of the negative log-likelihood of the prediction
+        of this model under a given target distribution.
+        :param y: corresponds to a vector that gives for each example the
+                  correct label
+        Note: we use the mean instead of the sum so that
+        the learning rate is less dependent on the batch size
+        """
+        return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y])
+
+    def errors(self, y):
+        """Return a float representing the number of errors in the minibatch 
+        over the total number of examples of the minibatch ; zero one
+        loss over the size of the minibatch
+        """
+        # check if y has same dimension of y_pred 
+        if y.ndim != self.y_pred.ndim:
+            raise TypeError('y should have the same shape as self.y_pred', 
+                ('y', target.type, 'y_pred', self.y_pred.type))
+
+        # check if y is of the correct datatype        
+        if y.dtype.startswith('int'):
+            # the T.neq operator returns a vector of 0s and 1s, where 1
+            # represents a mistake in prediction
+            return T.mean(T.neq(self.y_pred, y))
+        else:
+            raise NotImplementedError()
+
+
+def load_dataset(fname,batch=20):
+
+    # repertoire qui contient les donnees NIST
+    # le repertoire suivant va fonctionner si vous etes connecte sur un ordinateur
+    # du reseau DIRO
+    datapath = '/data/lisa/data/nist/by_class/'
+    # le fichier .ft contient chiffres NIST dans un format efficace. Les chiffres
+    # sont stockes dans une matrice de NxD, ou N est le nombre d'images, est D est
+    # le nombre de pixels par image (32x32 = 1024). Chaque pixel de l'image est une
+    # valeur entre 0 et 255, correspondant a un niveau de gris. Les valeurs sont
+    # stockees comme des uint8, donc des bytes.
+    f = open(datapath+'digits/digits_train_data.ft')
+    # Verifier que vous avez assez de memoire pour loader les donnees au complet
+    # dans le memoire. Sinon, utilisez ft.arraylike, une classe construite
+    # specialement pour des fichiers qu'on ne souhaite pas loader dans RAM.
+    d = ft.read(f)
+
+    # NB: N'oubliez pas de diviser les valeurs des pixels par 255. si jamais vous
+    # utilisez les donnees commes entrees dans un reseaux de neurones et que vous 
+    # voulez des entres entre 0 et 1.
+    # digits_train_data.ft contient les images, digits_train_labels.ft contient les
+    # etiquettes
+    f = open(datapath+'digits/digits_train_labels.ft')
+    labels = ft.read(f)
+
+
+    # Load the dataset 
+    #f = gzip.open(fname,'rb')
+    #train_set, valid_set, test_set = cPickle.load(f)
+    #f.close()
+
+    # make minibatches of size 20 
+    batch_size = batch   # sized of the minibatch
+
+    # Dealing with the training set
+    # get the list of training images (x) and their labels (y)
+    (train_set_x, train_set_y) = (d[:4000,:],labels[:4000])
+    # initialize the list of training minibatches with empty list
+    train_batches = []
+    for i in xrange(0, len(train_set_x), batch_size):
+        # add to the list of minibatches the minibatch starting at 
+        # position i, ending at position i+batch_size
+        # a minibatch is a pair ; the first element of the pair is a list 
+        # of datapoints, the second element is the list of corresponding 
+        # labels
+        train_batches = train_batches + \
+               [(train_set_x[i:i+batch_size], train_set_y[i:i+batch_size])]
+
+    #print train_batches[500]
+
+    # Dealing with the validation set
+    (valid_set_x, valid_set_y) = (d[4000:5000,:],labels[4000:5000])
+    # initialize the list of validation minibatches 
+    valid_batches = []
+    for i in xrange(0, len(valid_set_x), batch_size):
+        valid_batches = valid_batches + \
+               [(valid_set_x[i:i+batch_size], valid_set_y[i:i+batch_size])]
+
+    # Dealing with the testing set
+    (test_set_x, test_set_y) = (d[5000:6000,:],labels[5000:6000])
+    # initialize the list of testing minibatches 
+    test_batches = []
+    for i in xrange(0, len(test_set_x), batch_size):
+        test_batches = test_batches + \
+              [(test_set_x[i:i+batch_size], test_set_y[i:i+batch_size])]
+
+    return train_batches, valid_batches, test_batches
+
+
+def evaluate_lenet5(learning_rate=0.1, n_iter=1, batch_size=20, n_kern0=20,n_kern1=50,filter_shape=5,n_layer=3, dataset='mnist.pkl.gz'):
+    rng = numpy.random.RandomState(23455)
+
+    print 'Before load dataset'
+    train_batches, valid_batches, test_batches = load_dataset(dataset,batch_size)
+    print 'After load dataset'
+
+    ishape = (32,32)     # this is the size of NIST images
+    n_kern2=80
+
+    # allocate symbolic variables for the data
+    x = T.matrix('x')  # rasterized images
+    y = T.lvector()  # the labels are presented as 1D vector of [long int] labels
+
+
+    ######################
+    # BUILD ACTUAL MODEL #
+    ######################
+
+    # Reshape matrix of rasterized images of shape (batch_size,28*28)
+    # to a 4D tensor, compatible with our LeNetConvPoolLayer
+    layer0_input = x.reshape((batch_size,1,32,32))
+
+    # Construct the first convolutional pooling layer:
+    # filtering reduces the image size to (32-5+1,32-5+1)=(28,28)
+    # maxpooling reduces this further to (28/2,28/2) = (14,14)
+    # 4D output tensor is thus of shape (20,20,14,14)
+    layer0 = LeNetConvPoolLayer(rng, input=layer0_input,
+            image_shape=(batch_size,1,32,32), 
+            filter_shape=(n_kern0,1,filter_shape,filter_shape), poolsize=(2,2))
+
+    if(n_layer>2):
+
+	# Construct the second convolutional pooling layer
+	# filtering reduces the image size to (14-5+1,14-5+1)=(10,10)
+	# maxpooling reduces this further to (10/2,10/2) = (5,5)
+	# 4D output tensor is thus of shape (20,50,5,5)
+	fshape=(32-filter_shape+1)/2
+	layer1 = LeNetConvPoolLayer(rng, input=layer0.output,
+		image_shape=(batch_size,n_kern0,fshape,fshape), 
+		filter_shape=(n_kern1,n_kern0,filter_shape,filter_shape), poolsize=(2,2))
+
+    else:
+
+	fshape=(32-filter_shape+1)/2
+	layer1_input = layer0.output.flatten(2)
+		# construct a fully-connected sigmoidal layer
+	layer1 = SigmoidalLayer(rng, input=layer1_input,n_in=n_kern0*fshape*fshape, n_out=500)
+
+	layer2 = LogisticRegression(input=layer1.output, n_in=500, n_out=10)
+	cost = layer2.negative_log_likelihood(y)
+	test_model = theano.function([x,y], layer2.errors(y))
+	params = layer2.params+ layer1.params + layer0.params
+
+
+    if(n_layer>3):
+
+	fshape=(32-filter_shape+1)/2
+	fshape2=(fshape-filter_shape+1)/2
+	fshape3=(fshape2-filter_shape+1)/2
+	layer2 = LeNetConvPoolLayer(rng, input=layer1.output,
+		image_shape=(batch_size,n_kern1,fshape2,fshape2), 
+		filter_shape=(n_kern2,n_kern1,filter_shape,filter_shape), poolsize=(2,2))
+
+	layer3_input = layer2.output.flatten(2)
+
+	layer3 = SigmoidalLayer(rng, input=layer3_input, 
+					n_in=n_kern2*fshape3*fshape3, n_out=500)
+
+  
+	layer4 = LogisticRegression(input=layer3.output, n_in=500, n_out=10)
+
+	cost = layer4.negative_log_likelihood(y)
+
+	test_model = theano.function([x,y], layer4.errors(y))
+
+	params = layer4.params+ layer3.params+ layer2.params+ layer1.params + layer0.params
+
+ 
+    elif(n_layer>2):
+
+	fshape=(32-filter_shape+1)/2
+	fshape2=(fshape-filter_shape+1)/2
+
+	# the SigmoidalLayer being fully-connected, it operates on 2D matrices of
+	# shape (batch_size,num_pixels) (i.e matrix of rasterized images).
+	# This will generate a matrix of shape (20,32*4*4) = (20,512)
+	layer2_input = layer1.output.flatten(2)
+
+	# construct a fully-connected sigmoidal layer
+	layer2 = SigmoidalLayer(rng, input=layer2_input, 
+					n_in=n_kern1*fshape2*fshape2, n_out=500)
+
+  
+	# classify the values of the fully-connected sigmoidal layer
+	layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
+
+	# the cost we minimize during training is the NLL of the model
+	cost = layer3.negative_log_likelihood(y)
+
+	# create a function to compute the mistakes that are made by the model
+	test_model = theano.function([x,y], layer3.errors(y))
+
+	# create a list of all model parameters to be fit by gradient descent
+	params = layer3.params+ layer2.params+ layer1.params + layer0.params
+    	
+      
+  
+		
+    
+    # create a list of gradients for all model parameters
+    grads = T.grad(cost, params)
+
+    # train_model is a function that updates the model parameters by SGD
+    # Since this model has many parameters, it would be tedious to manually
+    # create an update rule for each model parameter. We thus create the updates
+    # dictionary by automatically looping over all (params[i],grads[i])  pairs.
+    updates = {}
+    for param_i, grad_i in zip(params, grads):
+        updates[param_i] = param_i - learning_rate * grad_i
+    train_model = theano.function([x, y], cost, updates=updates)
+
+
+    ###############
+    # TRAIN MODEL #
+    ###############
+
+    n_minibatches        = len(train_batches) 
+
+    # early-stopping parameters
+    patience              = 10000 # look as this many examples regardless
+    patience_increase     = 2     # wait this much longer when a new best is 
+                                  # found
+    improvement_threshold = 0.995 # a relative improvement of this much is 
+                                  # considered significant
+    validation_frequency  = n_minibatches  # go through this many 
+                                  # minibatche before checking the network 
+                                  # on the validation set; in this case we 
+                                  # check every epoch 
+
+    best_params          = None
+    best_validation_loss = float('inf')
+    best_iter            = 0
+    test_score           = 0.
+    start_time = time.clock()
+
+    # have a maximum of `n_iter` iterations through the entire dataset
+    for iter in xrange(n_iter * n_minibatches):
+
+        # get epoch and minibatch index
+        epoch           = iter / n_minibatches
+        minibatch_index =  iter % n_minibatches
+
+        # get the minibatches corresponding to `iter` modulo
+        # `len(train_batches)`
+        x,y = train_batches[ minibatch_index ]
+	
+        if iter %100 == 0:
+            print 'training @ iter = ', iter
+        cost_ij = train_model(x,y)
+
+        if (iter+1) % validation_frequency == 0: 
+
+            # compute zero-one loss on validation set 
+            this_validation_loss = 0.
+            for x,y in valid_batches:
+                # sum up the errors for each minibatch
+                this_validation_loss += test_model(x,y)
+
+            # get the average by dividing with the number of minibatches
+            this_validation_loss /= len(valid_batches)
+            print('epoch %i, minibatch %i/%i, validation error %f %%' % \
+                   (epoch, minibatch_index+1, n_minibatches, \
+                    this_validation_loss*100.))
+
+
+            # if we got the best validation score until now
+            if this_validation_loss < best_validation_loss:
+
+                #improve patience if loss improvement is good enough
+                if this_validation_loss < best_validation_loss *  \
+                       improvement_threshold :
+                    patience = max(patience, iter * patience_increase)
+
+                # save best validation score and iteration number
+                best_validation_loss = this_validation_loss
+                best_iter = iter
+
+                # test it on the test set
+                test_score = 0.
+                for x,y in test_batches:
+                    test_score += test_model(x,y)
+                test_score /= len(test_batches)
+                print(('     epoch %i, minibatch %i/%i, test error of best '
+                      'model %f %%') % 
+                             (epoch, minibatch_index+1, n_minibatches,
+                              test_score*100.))
+
+        if patience <= iter :
+            break
+
+    end_time = time.clock()
+    print('Optimization complete.')
+    print('Best validation score of %f %% obtained at iteration %i,'\
+          'with test performance %f %%' %  
+          (best_validation_loss * 100., best_iter, test_score*100.))
+    print('The code ran for %f minutes' % ((end_time-start_time)/60.))
+
+    return (best_validation_loss * 100., test_score*100., (end_time-start_time)/60., best_iter)
+
+if __name__ == '__main__':
+    evaluate_lenet5()
+
+def experiment(state, channel):
+    print 'start experiment'
+    (best_validation_loss, test_score, minutes_trained, iter) = evaluate_lenet5(state.learning_rate, state.n_iter, state.batch_size, state.n_kern0, state.n_kern1, state.filter_shape, state.n_layer)
+    print 'end experiment'
+    
+    state.best_validation_loss = best_validation_loss
+    state.test_score = test_score
+    state.minutes_trained = minutes_trained
+    state.iter = iter
+
+    return channel.COMPLETE
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/baseline/log_reg/log_reg.py	Fri Feb 26 14:24:11 2010 -0500
@@ -0,0 +1,437 @@
+"""
+This tutorial introduces logistic regression using Theano and stochastic 
+gradient descent.  
+
+Logistic regression is a probabilistic, linear classifier. It is parametrized
+by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
+done by projecting data points onto a set of hyperplanes, the distance to
+which is used to determine a class membership probability. 
+
+Mathematically, this can be written as:
+
+.. math::
+  P(Y=i|x, W,b) &= softmax_i(W x + b) \\
+                &= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
+
+
+The output of the model or prediction is then done by taking the argmax of 
+the vector whose i'th element is P(Y=i|x).
+
+.. math::
+
+  y_{pred} = argmax_i P(Y=i|x,W,b)
+
+
+This tutorial presents a stochastic gradient descent optimization method 
+suitable for large datasets, and a conjugate gradient optimization method 
+that is suitable for smaller datasets.
+
+
+References:
+
+    - textbooks: "Pattern Recognition and Machine Learning" - 
+                 Christopher M. Bishop, section 4.3.2
+
+"""
+__docformat__ = 'restructedtext en'
+
+import numpy, time, cPickle, gzip
+
+import theano
+import theano.tensor as T
+
+
+class LogisticRegression(object):
+    """Multi-class Logistic Regression Class
+
+    The logistic regression is fully described by a weight matrix :math:`W` 
+    and bias vector :math:`b`. Classification is done by projecting data 
+    points onto a set of hyperplanes, the distance to which is used to 
+    determine a class membership probability. 
+    """
+
+
+    def __init__( self, input, n_in, n_out ):
+        """ Initialize the parameters of the logistic regression
+
+        :type input: theano.tensor.TensorType
+        :param input: symbolic variable that describes the input of the 
+                      architecture (one minibatch)
+        
+        :type n_in: int
+        :param n_in: number of input units, the dimension of the space in 
+                     which the datapoints lie
+
+        :type n_out: int
+        :param n_out: number of output units, the dimension of the space in 
+                      which the labels lie
+
+        """ 
+
+        # initialize with 0 the weights W as a matrix of shape (n_in, n_out) 
+        self.W = theano.shared( value = numpy.zeros(( n_in, n_out ), dtype = theano.config.floatX ),
+                                name =' W')
+        # initialize the baises b as a vector of n_out 0s
+        self.b = theano.shared( value = numpy.zeros(( n_out, ), dtype = theano.config.floatX ),
+                               name = 'b')
+
+
+        # compute vector of class-membership probabilities in symbolic form
+        self.p_y_given_x = T.nnet.softmax( T.dot( input, self.W ) + self.b )
+
+        # compute prediction as class whose probability is maximal in 
+        # symbolic form
+        self.y_pred=T.argmax( self.p_y_given_x, axis =1 )
+
+        # parameters of the model
+        self.params = [ self.W, self.b ]
+
+
+    def negative_log_likelihood( self, y ):
+        """Return the mean of the negative log-likelihood of the prediction
+        of this model under a given target distribution.
+
+        .. math::
+
+            \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
+            \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
+                \ell (\theta=\{W,b\}, \mathcal{D})
+
+        :type y: theano.tensor.TensorType
+        :param y: corresponds to a vector that gives for each example the
+                  correct label
+
+        Note: we use the mean instead of the sum so that
+              the learning rate is less dependent on the batch size
+        """
+        # y.shape[0] is (symbolically) the number of rows in y, i.e., number of examples (call it n) in the minibatch
+        # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
+        # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class 
+        # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]]
+        # and T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v,
+        # i.e., the mean log-likelihood across the minibatch.
+        return -T.mean( T.log( self.p_y_given_x )[ T.arange( y.shape[0] ), y ] )
+
+
+    def errors( self, y ):
+        """Return a float representing the number of errors in the minibatch 
+        over the total number of examples of the minibatch ; zero one
+        loss over the size of the minibatch
+
+        :type y: theano.tensor.TensorType
+        :param y: corresponds to a vector that gives for each example the 
+                  correct label
+        """
+
+        # check if y has same dimension of y_pred 
+        if y.ndim != self.y_pred.ndim:
+            raise TypeError( 'y should have the same shape as self.y_pred', 
+                ( 'y', target.type, 'y_pred', self.y_pred.type ) )
+        # check if y is of the correct datatype        
+        if y.dtype.startswith('int'):
+            # the T.neq operator returns a vector of 0s and 1s, where 1
+            # represents a mistake in prediction
+            return T.mean( T.neq( self.y_pred, y ) )
+        else:
+            raise NotImplementedError()
+        
+def shared_dataset( data_xy ):
+        """ Function that loads the dataset into shared variables
+        
+        The reason we store our dataset in shared variables is to allow 
+        Theano to copy it into the GPU memory (when code is run on GPU). 
+        Since copying data into the GPU is slow, copying a minibatch everytime
+        is needed (the default behaviour if the data is not in a shared 
+        variable) would lead to a large decrease in performance.
+        """
+        data_x, data_y = data_xy
+        shared_x = theano.shared( numpy.asarray( data_x, dtype = theano.config.floatX ) )
+        shared_y = theano.shared( numpy.asarray( data_y, dtype = theano.config.floatX ) )
+        # When storing data on the GPU it has to be stored as floats
+        # therefore we will store the labels as ``floatX`` as well
+        # (``shared_y`` does exactly that). But during our computations
+        # we need them as ints (we use labels as index, and if they are 
+        # floats it doesn't make sense) therefore instead of returning 
+        # ``shared_y`` we will have to cast it to int. This little hack
+        # lets ous get around this issue
+        return shared_x, T.cast( shared_y, 'int32' )
+
+def load_data_pkl_gz( dataset ):
+    ''' Loads the dataset
+
+    :type dataset: string
+    :param dataset: the path to the dataset (here MNIST)
+    '''
+
+    #--------------------------------------------------------------------------------------------------------------------
+    # Load Data
+    #--------------------------------------------------------------------------------------------------------------------
+
+
+    print '... loading data'
+
+    # Load the dataset 
+    f = gzip.open(dataset,'rb')
+    train_set, valid_set, test_set = cPickle.load(f)
+    f.close()
+    
+    test_set_x,  test_set_y  = shared_dataset( test_set )
+    valid_set_x, valid_set_y = shared_dataset( valid_set )
+    train_set_x, train_set_y = shared_dataset( train_set )
+
+    rval = [ ( train_set_x, train_set_y ), ( valid_set_x,valid_set_y ), ( test_set_x, test_set_y ) ]
+    return rval
+
+##def load_data_ft(      verbose = False,\
+##                                    data_path = '/data/lisa/data/nist/by_class/'\
+##                                    train_data = 'all/all_train_data.ft',\
+##                                    train_labels = 'all/all_train_labels.ft',\
+##                                    test_data = 'all/all_test_data.ft',\
+##                                    test_labels = 'all/all_test_labels.ft'):
+##   
+##    train_data_file = open(data_path + train_data)
+##    train_labels_file = open(data_path + train_labels)
+##    test_labels_file = open(data_path + test_data)
+##    test_data_file = open(data_path + test_labels)
+##    
+##    raw_train_data = ft.read( train_data_file)
+##    raw_train_labels = ft.read(train_labels_file)
+##    raw_test_data = ft.read( test_labels_file)
+##    raw_test_labels = ft.read( test_data_file)
+##    
+##    f.close()
+##    g.close()
+##    i.close()
+##    h.close()
+##    
+##    
+##    test_set_x,  test_set_y  = shared_dataset(test_set)
+##    valid_set_x, valid_set_y = shared_dataset(valid_set)
+##    train_set_x, train_set_y = shared_dataset(train_set)
+##
+##    rval = [(train_set_x, train_set_y), (valid_set_x,valid_set_y), (test_set_x, test_set_y)]
+##    return rval
+##    #create a validation set the same size as the test size
+##    #use the end of the training array for this purpose
+##    #discard the last remaining so we get a %batch_size number
+##    test_size=len(raw_test_labels)
+##    test_size = int(test_size/batch_size)
+##    test_size*=batch_size
+##    train_size = len(raw_train_data)
+##    train_size = int(train_size/batch_size)
+##    train_size*=batch_size
+##    validation_size =test_size 
+##    offset = train_size-test_size
+##    if verbose == True:
+##        print 'train size = %d' %train_size
+##        print 'test size = %d' %test_size
+##        print 'valid size = %d' %validation_size
+##        print 'offset = %d' %offset
+##    
+##    
+
+#--------------------------------------------------------------------------------------------------------------------
+# MAIN
+#--------------------------------------------------------------------------------------------------------------------
+
+def log_reg( learning_rate = 0.13, nb_max_examples =1000000, batch_size = 50, \
+                    dataset_name = 'mnist.pkl.gz', image_size = 28 * 28, nb_class = 10,  \
+                    patience = 5000, patience_increase = 2, improvement_threshold = 0.995):
+    
+    """
+    Demonstrate stochastic gradient descent optimization of a log-linear 
+    model
+
+    This is demonstrated on MNIST.
+    
+    :type learning_rate: float
+    :param learning_rate: learning rate used (factor for the stochastic 
+                          gradient)
+
+    :type nb_max_examples: int
+    :param nb_max_examples: maximal number of epochs to run the optimizer 
+    
+    :type batch_size: int  
+    :param batch_size:  size of the minibatch
+
+    :type dataset_name: string
+    :param dataset: the path of the MNIST dataset file from 
+                         http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
+                        
+    :type image_size: int
+    :param image_size: size of the input image in pixels (width * height)
+    
+    :type nb_class: int
+    :param nb_class: number of classes
+    
+    :type patience: int
+    :param patience: look as this many examples regardless
+    
+    :type patience_increase: int
+    :param patience_increase: wait this much longer when a new best is found
+    
+    :type improvement_threshold: float
+    :param improvement_threshold: a relative improvement of this much is considered significant
+
+
+    """
+    datasets = load_data_pkl_gz( dataset_name )
+
+    train_set_x, train_set_y = datasets[0]
+    valid_set_x, valid_set_y = datasets[1]
+    test_set_x , test_set_y   = datasets[2]
+
+    # compute number of minibatches for training, validation and testing
+    n_train_batches = train_set_x.value.shape[0] / batch_size
+    n_valid_batches = valid_set_x.value.shape[0] / batch_size
+    n_test_batches  = test_set_x.value.shape[0]  / batch_size
+
+    #--------------------------------------------------------------------------------------------------------------------
+    # Build actual model
+    #--------------------------------------------------------------------------------------------------------------------
+    
+    print '... building the model'
+
+    # allocate symbolic variables for the data
+    index = T.lscalar( )    # index to a [mini]batch 
+    x        = T.matrix('x')  # the data is presented as rasterized images
+    y        = T.ivector('y') # the labels are presented as 1D vector of 
+                           # [int] labels
+
+    # construct the logistic regression class
+    
+    classifier = LogisticRegression( input = x, n_in = image_size, n_out = nb_class )
+
+    # the cost we minimize during training is the negative log likelihood of 
+    # the model in symbolic format
+    cost = classifier.negative_log_likelihood( y ) 
+
+    # compiling a Theano function that computes the mistakes that are made by 
+    # the model on a minibatch
+    test_model = theano.function( inputs = [ index ], 
+            outputs = classifier.errors( y ),
+            givens = {
+                x:test_set_x[ index * batch_size: ( index + 1 ) * batch_size ],
+                y:test_set_y[ index * batch_size: ( index + 1 ) * batch_size ] } )
+
+    validate_model = theano.function( inputs = [ index ], 
+            outputs = classifier.errors( y ),
+            givens = {
+                x:valid_set_x[ index * batch_size: ( index + 1 ) * batch_size ],
+                y:valid_set_y[ index * batch_size: ( index + 1 ) * batch_size ] } )
+
+    # compute the gradient of cost with respect to theta = ( W, b ) 
+    g_W = T.grad( cost = cost, wrt = classifier.W )
+    g_b  = T.grad( cost = cost, wrt = classifier.b )
+
+    # specify how to update the parameters of the model as a dictionary
+    updates = { classifier.W: classifier.W - learning_rate * g_W,\
+                         classifier.b: classifier.b  - learning_rate * g_b}
+
+    # compiling a Theano function `train_model` that returns the cost, but in 
+    # the same time updates the parameter of the model based on the rules 
+    # defined in `updates`
+    train_model = theano.function( inputs = [ index ], 
+            outputs = cost, 
+            updates = updates,
+            givens = {
+                x: train_set_x[ index * batch_size: ( index + 1 ) * batch_size ],
+                y: train_set_y[ index * batch_size: ( index + 1 ) * batch_size ] } )
+
+    #--------------------------------------------------------------------------------------------------------------------
+    # Train model
+    #--------------------------------------------------------------------------------------------------------------------
+   
+    print '... training the model'
+    # early-stopping parameters
+    patience              = 5000  # look as this many examples regardless
+    patience_increase     = 2     # wait this much longer when a new best is 
+                                  # found
+    improvement_threshold = 0.995 # a relative improvement of this much is 
+                                  # considered significant
+    validation_frequency  = min( n_train_batches, patience * 0.5 )  
+                                  # go through this many 
+                                  # minibatche before checking the network 
+                                  # on the validation set; in this case we 
+                                  # check every epoch 
+
+    best_params             = None
+    best_validation_loss = float('inf')
+    test_score                 = 0.
+    start_time                  = time.clock()
+
+    done_looping = False 
+    n_epochs       = nb_max_examples / train_set_x.value.shape[0]
+    epoch             = 0  
+    
+    while ( epoch < n_epochs ) and ( not done_looping ):
+        
+      epoch = epoch + 1
+      for minibatch_index in xrange( n_train_batches ):
+
+        minibatch_avg_cost = train_model( minibatch_index )
+        # iteration number
+        iter = epoch * n_train_batches + minibatch_index
+
+        if ( iter + 1 ) % validation_frequency == 0: 
+            # compute zero-one loss on validation set 
+            validation_losses     = [ validate_model( i ) for i in xrange( n_valid_batches ) ]
+            this_validation_loss = numpy.mean( validation_losses )
+
+            print('epoch %i, minibatch %i/%i, validation error %f %%' % \
+                 ( epoch, minibatch_index + 1,n_train_batches, \
+                  this_validation_loss*100. ) )
+
+
+            # if we got the best validation score until now
+            if this_validation_loss < best_validation_loss:
+                #improve patience if loss improvement is good enough
+                if this_validation_loss < best_validation_loss *  \
+                       improvement_threshold :
+                    patience = max( patience, iter * patience_increase )
+
+                best_validation_loss = this_validation_loss
+                # test it on the test set
+
+                test_losses = [test_model(i) for i in xrange(n_test_batches)]
+                test_score  = numpy.mean(test_losses)
+
+                print(('     epoch %i, minibatch %i/%i, test error of best ' 
+                       'model %f %%') % \
+                  (epoch, minibatch_index+1, n_train_batches,test_score*100.))
+
+        if patience <= iter :
+                done_looping = True
+                break
+
+    end_time = time.clock()
+    print(('Optimization complete with best validation score of %f %%,'
+           'with test performance %f %%') %  
+                 ( best_validation_loss * 100., test_score * 100.))
+    print ('The code ran for %f minutes' % ((end_time-start_time) / 60.))
+    
+ ######   return validation_error, test_error, nb_exemples, time
+
+if __name__ == '__main__':
+    log_reg()
+    
+ 
+def jobman_log_reg(state, channel):
+    (validation_error, test_error, nb_exemples, time) = log_reg( learning_rate = state.learning_rate,\
+                                                                                        nb_max_examples = state.nb_max_examples,\
+                                                                                                    batch_size  = state.batch_size,\
+                                                                                                dataset_name = state.dataset_name, \
+                                                                                                    image_size = state.image_size,  \
+                                                                                                       nb_class  = state.nb_class )
+
+    state.validation_error = validation_error
+    state.test_error = test_error
+    state.nb_exemples = nb_exemples
+    state.time = time
+    return channel.COMPLETE
+                                                                
+                                      
+    
+    
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/baseline/mlp/mlp_nist.py	Fri Feb 26 14:24:11 2010 -0500
@@ -0,0 +1,459 @@
+"""
+This tutorial introduces the multilayer perceptron using Theano.  
+
+ A multilayer perceptron is a logistic regressor where
+instead of feeding the input to the logistic regression you insert a
+intermidiate layer, called the hidden layer, that has a nonlinear 
+activation function (usually tanh or sigmoid) . One can use many such 
+hidden layers making the architecture deep. The tutorial will also tackle 
+the problem of MNIST digit classification.
+
+.. math::
+
+    f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
+
+References:
+
+    - textbooks: "Pattern Recognition and Machine Learning" - 
+                 Christopher M. Bishop, section 5
+
+TODO: recommended preprocessing, lr ranges, regularization ranges (explain 
+      to do lr first, then add regularization)
+
+"""
+__docformat__ = 'restructedtext en'
+
+import pdb
+import numpy
+import pylab
+import theano
+import theano.tensor as T
+import time 
+import theano.tensor.nnet
+import pylearn
+from pylearn.io import filetensor as ft
+
+data_path = '/data/lisa/data/nist/by_class/'
+
+class MLP(object):
+    """Multi-Layer Perceptron Class
+
+    A multilayer perceptron is a feedforward artificial neural network model 
+    that has one layer or more of hidden units and nonlinear activations. 
+    Intermidiate layers usually have as activation function thanh or the 
+    sigmoid function  while the top layer is a softamx layer. 
+    """
+
+
+
+    def __init__(self, input, n_in, n_hidden, n_out,learning_rate):
+        """Initialize the parameters for the multilayer perceptron
+
+        :param input: symbolic variable that describes the input of the 
+        architecture (one minibatch)
+
+        :param n_in: number of input units, the dimension of the space in 
+        which the datapoints lie
+
+        :param n_hidden: number of hidden units 
+
+        :param n_out: number of output units, the dimension of the space in 
+        which the labels lie
+
+        """
+
+        # initialize the parameters theta = (W1,b1,W2,b2) ; note that this 
+        # example contains only one hidden layer, but one can have as many 
+        # layers as he/she wishes, making the network deeper. The only 
+        # problem making the network deep this way is during learning, 
+        # backpropagation being unable to move the network from the starting
+        # point towards; this is where pre-training helps, giving a good 
+        # starting point for backpropagation, but more about this in the 
+        # other tutorials
+        
+        # `W1` is initialized with `W1_values` which is uniformely sampled
+        # from -6./sqrt(n_in+n_hidden) and 6./sqrt(n_in+n_hidden)
+        # the output of uniform if converted using asarray to dtype 
+        # theano.config.floatX so that the code is runable on GPU
+        W1_values = numpy.asarray( numpy.random.uniform( \
+              low = -numpy.sqrt(6./(n_in+n_hidden)), \
+              high = numpy.sqrt(6./(n_in+n_hidden)), \
+              size = (n_in, n_hidden)), dtype = theano.config.floatX)
+        # `W2` is initialized with `W2_values` which is uniformely sampled 
+        # from -6./sqrt(n_hidden+n_out) and 6./sqrt(n_hidden+n_out)
+        # the output of uniform if converted using asarray to dtype 
+        # theano.config.floatX so that the code is runable on GPU
+        W2_values = numpy.asarray( numpy.random.uniform( 
+              low = -numpy.sqrt(6./(n_hidden+n_out)), \
+              high= numpy.sqrt(6./(n_hidden+n_out)),\
+              size= (n_hidden, n_out)), dtype = theano.config.floatX)
+
+        self.W1 = theano.shared( value = W1_values )
+        self.b1 = theano.shared( value = numpy.zeros((n_hidden,), 
+                                                dtype= theano.config.floatX))
+        self.W2 = theano.shared( value = W2_values )
+        self.b2 = theano.shared( value = numpy.zeros((n_out,), 
+                                                dtype= theano.config.floatX))
+
+        #include the learning rate in the classifer so
+        #we can modify it on the fly when we want
+        lr_value=learning_rate
+        self.lr=theano.shared(value=lr_value)
+        # symbolic expression computing the values of the hidden layer
+        self.hidden = T.tanh(T.dot(input, self.W1)+ self.b1)
+        
+        
+
+        # symbolic expression computing the values of the top layer 
+        self.p_y_given_x= T.nnet.softmax(T.dot(self.hidden, self.W2)+self.b2)
+
+        # compute prediction as class whose probability is maximal in 
+        # symbolic form
+        self.y_pred = T.argmax( self.p_y_given_x, axis =1)
+        self.y_pred_num = T.argmax( self.p_y_given_x[0:9], axis =1)
+        
+        
+        
+        
+        # L1 norm ; one regularization option is to enforce L1 norm to 
+        # be small 
+        self.L1     = abs(self.W1).sum() + abs(self.W2).sum()
+
+        # square of L2 norm ; one regularization option is to enforce 
+        # square of L2 norm to be small
+        self.L2_sqr = (self.W1**2).sum() + (self.W2**2).sum()
+
+
+
+    def negative_log_likelihood(self, y):
+        """Return the mean of the negative log-likelihood of the prediction
+        of this model under a given target distribution.
+
+        .. math::
+
+            \frac{1}{|\mathcal{D}|}\mathcal{L} (\theta=\{W,b\}, \mathcal{D}) = 
+            \frac{1}{|\mathcal{D}|}\sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
+                \ell (\theta=\{W,b\}, \mathcal{D}) 
+
+
+        :param y: corresponds to a vector that gives for each example the
+        :correct label
+        """
+        return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y])
+
+
+
+
+    def errors(self, y):
+        """Return a float representing the number of errors in the minibatch 
+        over the total number of examples of the minibatch 
+        """
+
+        # check if y has same dimension of y_pred 
+        if y.ndim != self.y_pred.ndim:
+            raise TypeError('y should have the same shape as self.y_pred', 
+                ('y', target.type, 'y_pred', self.y_pred.type))
+        # check if y is of the correct datatype        
+        if y.dtype.startswith('int'):
+            # the T.neq operator returns a vector of 0s and 1s, where 1
+            # represents a mistake in prediction
+            return T.mean(T.neq(self.y_pred, y))
+        else:
+            raise NotImplementedError()
+
+
+def mlp_full_nist(      verbose = False,\
+                        adaptive_lr = 0,\
+                        train_data = 'all/all_train_data.ft',\
+                        train_labels = 'all/all_train_labels.ft',\
+                        test_data = 'all/all_test_data.ft',\
+                        test_labels = 'all/all_test_labels.ft',\
+                        learning_rate=0.01,\
+                        L1_reg = 0.00,\
+                        L2_reg = 0.0001,\
+                        nb_max_exemples=1000000,\
+                        batch_size=20,\
+                        nb_hidden = 500,\
+                        nb_targets = 62):
+   
+    
+    configuration = [learning_rate,nb_max_exemples,nb_hidden,adaptive_lr]
+    
+    total_validation_error_list = []
+    total_train_error_list = []
+    learning_rate_list=[]
+    best_training_error=float('inf');
+    
+    
+   
+    f = open(data_path+train_data)
+    g= open(data_path+train_labels)
+    h = open(data_path+test_data)
+    i= open(data_path+test_labels)
+    
+    raw_train_data = ft.read(f)
+    raw_train_labels = ft.read(g)
+    raw_test_data = ft.read(h)
+    raw_test_labels = ft.read(i)
+    
+    f.close()
+    g.close()
+    i.close()
+    h.close()
+    #create a validation set the same size as the test size
+    #use the end of the training array for this purpose
+    #discard the last remaining so we get a %batch_size number
+    test_size=len(raw_test_labels)
+    test_size = int(test_size/batch_size)
+    test_size*=batch_size
+    train_size = len(raw_train_data)
+    train_size = int(train_size/batch_size)
+    train_size*=batch_size
+    validation_size =test_size 
+    offset = train_size-test_size
+    if verbose == True:
+        print 'train size = %d' %train_size
+        print 'test size = %d' %test_size
+        print 'valid size = %d' %validation_size
+        print 'offset = %d' %offset
+    
+    
+    train_set = (raw_train_data,raw_train_labels)
+    train_batches = []
+    for i in xrange(0, train_size-test_size, batch_size):
+        train_batches = train_batches + \
+            [(raw_train_data[i:i+batch_size], raw_train_labels[i:i+batch_size])]
+            
+    test_batches = []
+    for i in xrange(0, test_size, batch_size):
+        test_batches = test_batches + \
+            [(raw_test_data[i:i+batch_size], raw_test_labels[i:i+batch_size])]
+    
+    validation_batches = []
+    for i in xrange(0, test_size, batch_size):
+        validation_batches = validation_batches + \
+            [(raw_train_data[offset+i:offset+i+batch_size], raw_train_labels[offset+i:offset+i+batch_size])]
+
+
+    ishape     = (32,32) # this is the size of NIST images
+
+    # allocate symbolic variables for the data
+    x = T.fmatrix()  # the data is presented as rasterized images
+    y = T.lvector()  # the labels are presented as 1D vector of 
+                          # [long int] labels
+
+    if verbose==True:
+        print 'finished parsing the data'
+    # construct the logistic regression class
+    classifier = MLP( input=x.reshape((batch_size,32*32)),\
+                        n_in=32*32,\
+                        n_hidden=nb_hidden,\
+                        n_out=nb_targets,
+                        learning_rate=learning_rate)
+                        
+                        
+   
+
+    # the cost we minimize during training is the negative log likelihood of 
+    # the model plus the regularization terms (L1 and L2); cost is expressed
+    # here symbolically
+    cost = classifier.negative_log_likelihood(y) \
+         + L1_reg * classifier.L1 \
+         + L2_reg * classifier.L2_sqr 
+
+    # compiling a theano function that computes the mistakes that are made by 
+    # the model on a minibatch
+    test_model = theano.function([x,y], classifier.errors(y))
+
+    # compute the gradient of cost with respect to theta = (W1, b1, W2, b2) 
+    g_W1 = T.grad(cost, classifier.W1)
+    g_b1 = T.grad(cost, classifier.b1)
+    g_W2 = T.grad(cost, classifier.W2)
+    g_b2 = T.grad(cost, classifier.b2)
+
+    # specify how to update the parameters of the model as a dictionary
+    updates = \
+        { classifier.W1: classifier.W1 - classifier.lr*g_W1 \
+        , classifier.b1: classifier.b1 - classifier.lr*g_b1 \
+        , classifier.W2: classifier.W2 - classifier.lr*g_W2 \
+        , classifier.b2: classifier.b2 - classifier.lr*g_b2 }
+
+    # compiling a theano function `train_model` that returns the cost, but in 
+    # the same time updates the parameter of the model based on the rules 
+    # defined in `updates`
+    train_model = theano.function([x, y], cost, updates = updates )
+    n_minibatches        = len(train_batches)
+
+   
+   
+    
+   
+   
+   #conditions for stopping the adaptation:
+   #1) we have reached  nb_max_exemples (this is rounded up to be a multiple of the train size)
+   #2) validation error is going up twice in a row(probable overfitting)
+   
+   # This means we no longer stop on slow convergence as low learning rates stopped
+   # too fast. 
+   
+   # no longer relevant
+    patience              =nb_max_exemples/batch_size
+    patience_increase     = 2     # wait this much longer when a new best is 
+                                  # found
+    improvement_threshold = 0.995 # a relative improvement of this much is 
+                                  # considered significant
+    validation_frequency = n_minibatches/4
+   
+     
+
+   
+    best_params          = None
+    best_validation_loss = float('inf')
+    best_iter            = 0
+    test_score           = 0.
+    start_time = time.clock()
+    n_iter = nb_max_exemples/batch_size  # nb of max times we are allowed to run through all exemples
+    n_iter = n_iter/n_minibatches + 1 #round up
+    n_iter=max(1,n_iter) # run at least once on short debug call
+    
+   
+    if verbose == True:
+        print 'looping at most %d times through the data set' %n_iter
+    for iter in xrange(n_iter* n_minibatches):
+
+        # get epoch and minibatch index
+        epoch           = iter / n_minibatches
+        minibatch_index =  iter % n_minibatches
+        
+      
+        
+        # get the minibatches corresponding to `iter` modulo
+        # `len(train_batches)`
+        x,y = train_batches[ minibatch_index ]
+        # convert to float
+        x_float = x/255.0
+        cost_ij = train_model(x_float,y)
+
+        if (iter+1) % validation_frequency == 0: 
+            # compute zero-one loss on validation set 
+            
+            this_validation_loss = 0.
+            for x,y in validation_batches:
+                # sum up the errors for each minibatch
+                x_float = x/255.0
+                this_validation_loss += test_model(x_float,y)
+            # get the average by dividing with the number of minibatches
+            this_validation_loss /= len(validation_batches)
+            #save the validation loss
+            total_validation_error_list.append(this_validation_loss)
+            
+            #get the training error rate
+            this_train_loss=0
+            for x,y in train_batches:
+                # sum up the errors for each minibatch
+                x_float = x/255.0
+                this_train_loss += test_model(x_float,y)
+            # get the average by dividing with the number of minibatches
+            this_train_loss /= len(train_batches)
+            #save the validation loss
+            total_train_error_list.append(this_train_loss)
+            if(this_train_loss<best_training_error):
+                best_training_error=this_train_loss
+                
+            if verbose == True:
+                print('epoch %i, minibatch %i/%i, validation error %f, training error %f %%' % \
+                    (epoch, minibatch_index+1, n_minibatches, \
+                        this_validation_loss*100.,this_train_loss*100))
+                        
+                        
+            #save the learning rate
+            learning_rate_list.append(classifier.lr.value)
+
+
+            # if we got the best validation score until now
+            if this_validation_loss < best_validation_loss:
+                # save best validation score and iteration number
+                best_validation_loss = this_validation_loss
+                best_iter = iter
+                # reset patience if we are going down again
+                # so we continue exploring
+                patience=nb_max_exemples/batch_size
+                # test it on the test set
+                test_score = 0.
+                for x,y in test_batches:
+                    x_float=x/255.0
+                    test_score += test_model(x_float,y)
+                test_score /= len(test_batches)
+                if verbose == True:
+                    print(('     epoch %i, minibatch %i/%i, test error of best '
+                        'model %f %%') % 
+                                (epoch, minibatch_index+1, n_minibatches,
+                                test_score*100.))
+                                
+            # if the validation error is going up, we are overfitting (or oscillating)
+            # stop converging but run at least to next validation
+            # to check overfitting or ocsillation
+            # the saved weights of the model will be a bit off in that case
+            elif this_validation_loss >= best_validation_loss:
+                #calculate the test error at this point and exit
+                # test it on the test set
+                # however, if adaptive_lr is true, try reducing the lr to
+                # get us out of an oscilliation
+                if adaptive_lr==1:
+                    classifier.lr.value=classifier.lr.value/2.0
+
+                test_score = 0.
+                #cap the patience so we are allowed one more validation error
+                #calculation before aborting
+                patience = iter+validation_frequency+1
+                for x,y in test_batches:
+                    x_float=x/255.0
+                    test_score += test_model(x_float,y)
+                test_score /= len(test_batches)
+                if verbose == True:
+                    print ' validation error is going up, possibly stopping soon'
+                    print(('     epoch %i, minibatch %i/%i, test error of best '
+                        'model %f %%') % 
+                                (epoch, minibatch_index+1, n_minibatches,
+                                test_score*100.))
+                                
+                
+
+
+        if iter>patience:
+            print 'we have diverged'
+            break
+
+
+    end_time = time.clock()
+    if verbose == True:
+        print(('Optimization complete. Best validation score of %f %% '
+            'obtained at iteration %i, with test performance %f %%') %  
+                    (best_validation_loss * 100., best_iter, test_score*100.))
+        print ('The code ran for %f minutes' % ((end_time-start_time)/60.))
+        print iter
+        
+    #save the model and the weights
+    numpy.savez('model.npy', config=configuration, W1=classifier.W1.value,W2=classifier.W2.value, b1=classifier.b1.value,b2=classifier.b2.value)
+    numpy.savez('results.npy',config=configuration,total_train_error_list=total_train_error_list,total_validation_error_list=total_validation_error_list,\
+    learning_rate_list=learning_rate_list)
+    
+    return (best_training_error*100.0,best_validation_loss * 100.,test_score*100.,best_iter*batch_size,(end_time-start_time)/60)
+
+
+if __name__ == '__main__':
+    mlp_full_mnist()
+
+def jobman_mlp_full_nist(state,channel):
+    (train_error,validation_error,test_error,nb_exemples,time)=mlp_full_nist(learning_rate=state.learning_rate,\
+                                                                nb_max_exemples=state.nb_max_exemples,\
+                                                                nb_hidden=state.nb_hidden,\
+                                                                adaptive_lr=state.adaptive_lr)
+    state.train_error=train_error
+    state.validation_error=validation_error
+    state.test_error=test_error
+    state.nb_exemples=nb_exemples
+    state.time=time
+    return channel.COMPLETE
+                                                                
+                                                                
\ No newline at end of file
--- a/baseline_algorithms/conv_mlp/convolutional_mlp.conf	Fri Feb 26 14:23:47 2010 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-learning_rate=0.01
-n_iter=1
-batch_size=20
-n_kern0=20
-n_kern1=50
-filter_shape=5
-n_layer=3
\ No newline at end of file
--- a/baseline_algorithms/conv_mlp/convolutional_mlp.py	Fri Feb 26 14:23:47 2010 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,472 +0,0 @@
-"""
-This tutorial introduces the LeNet5 neural network architecture using Theano.  LeNet5 is a
-convolutional neural network, good for classifying images. This tutorial shows how to build the
-architecture, and comes with all the hyper-parameters you need to reproduce the paper's MNIST
-results.
-
-The best results are obtained after X iterations of the main program loop, which takes ***
-minutes on my workstation (an Intel Core i7, circa July 2009), and *** minutes on my GPU (an
-NVIDIA GTX 285 graphics processor).
-
-This implementation simplifies the model in the following ways:
-
- - LeNetConvPool doesn't implement location-specific gain and bias parameters
- - LeNetConvPool doesn't implement pooling by average, it implements pooling by max.
- - Digit classification is implemented with a logistic regression rather than an RBF network
- - LeNet5 was not fully-connected convolutions at second layer
-
-References:
- - Y. LeCun, L. Bottou, Y. Bengio and P. Haffner: Gradient-Based Learning Applied to Document
-   Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
-   http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
-"""
-
-import numpy, theano, cPickle, gzip, time
-import theano.tensor as T
-import theano.sandbox.softsign
-import pylearn.datasets.MNIST
-from pylearn.io import filetensor as ft
-from theano.sandbox import conv, downsample
-
-class LeNetConvPoolLayer(object):
-
-    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2,2)):
-        """
-        Allocate a LeNetConvPoolLayer with shared variable internal parameters.
-        :type rng: numpy.random.RandomState
-        :param rng: a random number generator used to initialize weights
-        :type input: theano.tensor.dtensor4
-        :param input: symbolic image tensor, of shape image_shape
-        :type filter_shape: tuple or list of length 4
-        :param filter_shape: (number of filters, num input feature maps,
-                              filter height,filter width)
-        :type image_shape: tuple or list of length 4
-        :param image_shape: (batch size, num input feature maps,
-                             image height, image width)
-        :type poolsize: tuple or list of length 2
-        :param poolsize: the downsampling (pooling) factor (#rows,#cols)
-        """
-        assert image_shape[1]==filter_shape[1]
-        self.input = input
-   
-        # initialize weight values: the fan-in of each hidden neuron is
-        # restricted by the size of the receptive fields.
-        fan_in =  numpy.prod(filter_shape[1:])
-        W_values = numpy.asarray( rng.uniform( \
-              low = -numpy.sqrt(3./fan_in), \
-              high = numpy.sqrt(3./fan_in), \
-              size = filter_shape), dtype = theano.config.floatX)
-        self.W = theano.shared(value = W_values)
-
-        # the bias is a 1D tensor -- one bias per output feature map
-        b_values = numpy.zeros((filter_shape[0],), dtype= theano.config.floatX)
-        self.b = theano.shared(value= b_values)
-
-        # convolve input feature maps with filters
-        conv_out = conv.conv2d(input, self.W, 
-                filter_shape=filter_shape, image_shape=image_shape)
-
-        # downsample each feature map individually, using maxpooling
-        pooled_out = downsample.max_pool2D(conv_out, poolsize, ignore_border=True)
-
-        # add the bias term. Since the bias is a vector (1D array), we first
-        # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will thus
-        # be broadcasted across mini-batches and feature map width & height
-        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
-
-        # store parameters of this layer
-        self.params = [self.W, self.b]
-
-
-class SigmoidalLayer(object):
-    def __init__(self, rng, input, n_in, n_out):
-        """
-        Typical hidden layer of a MLP: units are fully-connected and have
-        sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
-        and the bias vector b is of shape (n_out,).
-        
-        Hidden unit activation is given by: sigmoid(dot(input,W) + b)
-
-        :type rng: numpy.random.RandomState
-        :param rng: a random number generator used to initialize weights
-        :type input: theano.tensor.dmatrix
-        :param input: a symbolic tensor of shape (n_examples, n_in)
-        :type n_in: int
-        :param n_in: dimensionality of input
-        :type n_out: int
-        :param n_out: number of hidden units
-        """
-        self.input = input
-
-        W_values = numpy.asarray( rng.uniform( \
-              low = -numpy.sqrt(6./(n_in+n_out)), \
-              high = numpy.sqrt(6./(n_in+n_out)), \
-              size = (n_in, n_out)), dtype = theano.config.floatX)
-        self.W = theano.shared(value = W_values)
-
-        b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
-        self.b = theano.shared(value= b_values)
-
-        self.output = T.tanh(T.dot(input, self.W) + self.b)
-        self.params = [self.W, self.b]
-
-
-class LogisticRegression(object):
-    """Multi-class Logistic Regression Class
-
-    The logistic regression is fully described by a weight matrix :math:`W` 
-    and bias vector :math:`b`. Classification is done by projecting data 
-    points onto a set of hyperplanes, the distance to which is used to 
-    determine a class membership probability. 
-    """
-
-    def __init__(self, input, n_in, n_out):
-        """ Initialize the parameters of the logistic regression
-        :param input: symbolic variable that describes the input of the 
-                      architecture (one minibatch)
-        :type n_in: int
-        :param n_in: number of input units, the dimension of the space in 
-                     which the datapoints lie
-        :type n_out: int
-        :param n_out: number of output units, the dimension of the space in 
-                      which the labels lie
-        """ 
-
-        # initialize with 0 the weights W as a matrix of shape (n_in, n_out) 
-        self.W = theano.shared( value=numpy.zeros((n_in,n_out),
-                                            dtype = theano.config.floatX) )
-        # initialize the baises b as a vector of n_out 0s
-        self.b = theano.shared( value=numpy.zeros((n_out,), 
-                                            dtype = theano.config.floatX) )
-        # compute vector of class-membership probabilities in symbolic form
-        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b)
-        
-        # compute prediction as class whose probability is maximal in 
-        # symbolic form
-        self.y_pred=T.argmax(self.p_y_given_x, axis=1)
-
-        # list of parameters for this layer
-        self.params = [self.W, self.b]
-
-    def negative_log_likelihood(self, y):
-        """Return the mean of the negative log-likelihood of the prediction
-        of this model under a given target distribution.
-        :param y: corresponds to a vector that gives for each example the
-                  correct label
-        Note: we use the mean instead of the sum so that
-        the learning rate is less dependent on the batch size
-        """
-        return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y])
-
-    def errors(self, y):
-        """Return a float representing the number of errors in the minibatch 
-        over the total number of examples of the minibatch ; zero one
-        loss over the size of the minibatch
-        """
-        # check if y has same dimension of y_pred 
-        if y.ndim != self.y_pred.ndim:
-            raise TypeError('y should have the same shape as self.y_pred', 
-                ('y', target.type, 'y_pred', self.y_pred.type))
-
-        # check if y is of the correct datatype        
-        if y.dtype.startswith('int'):
-            # the T.neq operator returns a vector of 0s and 1s, where 1
-            # represents a mistake in prediction
-            return T.mean(T.neq(self.y_pred, y))
-        else:
-            raise NotImplementedError()
-
-
-def load_dataset(fname,batch=20):
-
-    # repertoire qui contient les donnees NIST
-    # le repertoire suivant va fonctionner si vous etes connecte sur un ordinateur
-    # du reseau DIRO
-    datapath = '/data/lisa/data/nist/by_class/'
-    # le fichier .ft contient chiffres NIST dans un format efficace. Les chiffres
-    # sont stockes dans une matrice de NxD, ou N est le nombre d'images, est D est
-    # le nombre de pixels par image (32x32 = 1024). Chaque pixel de l'image est une
-    # valeur entre 0 et 255, correspondant a un niveau de gris. Les valeurs sont
-    # stockees comme des uint8, donc des bytes.
-    f = open(datapath+'digits/digits_train_data.ft')
-    # Verifier que vous avez assez de memoire pour loader les donnees au complet
-    # dans le memoire. Sinon, utilisez ft.arraylike, une classe construite
-    # specialement pour des fichiers qu'on ne souhaite pas loader dans RAM.
-    d = ft.read(f)
-
-    # NB: N'oubliez pas de diviser les valeurs des pixels par 255. si jamais vous
-    # utilisez les donnees commes entrees dans un reseaux de neurones et que vous 
-    # voulez des entres entre 0 et 1.
-    # digits_train_data.ft contient les images, digits_train_labels.ft contient les
-    # etiquettes
-    f = open(datapath+'digits/digits_train_labels.ft')
-    labels = ft.read(f)
-
-
-    # Load the dataset 
-    #f = gzip.open(fname,'rb')
-    #train_set, valid_set, test_set = cPickle.load(f)
-    #f.close()
-
-    # make minibatches of size 20 
-    batch_size = batch   # sized of the minibatch
-
-    # Dealing with the training set
-    # get the list of training images (x) and their labels (y)
-    (train_set_x, train_set_y) = (d[:4000,:],labels[:4000])
-    # initialize the list of training minibatches with empty list
-    train_batches = []
-    for i in xrange(0, len(train_set_x), batch_size):
-        # add to the list of minibatches the minibatch starting at 
-        # position i, ending at position i+batch_size
-        # a minibatch is a pair ; the first element of the pair is a list 
-        # of datapoints, the second element is the list of corresponding 
-        # labels
-        train_batches = train_batches + \
-               [(train_set_x[i:i+batch_size], train_set_y[i:i+batch_size])]
-
-    #print train_batches[500]
-
-    # Dealing with the validation set
-    (valid_set_x, valid_set_y) = (d[4000:5000,:],labels[4000:5000])
-    # initialize the list of validation minibatches 
-    valid_batches = []
-    for i in xrange(0, len(valid_set_x), batch_size):
-        valid_batches = valid_batches + \
-               [(valid_set_x[i:i+batch_size], valid_set_y[i:i+batch_size])]
-
-    # Dealing with the testing set
-    (test_set_x, test_set_y) = (d[5000:6000,:],labels[5000:6000])
-    # initialize the list of testing minibatches 
-    test_batches = []
-    for i in xrange(0, len(test_set_x), batch_size):
-        test_batches = test_batches + \
-              [(test_set_x[i:i+batch_size], test_set_y[i:i+batch_size])]
-
-    return train_batches, valid_batches, test_batches
-
-
-def evaluate_lenet5(learning_rate=0.1, n_iter=1, batch_size=20, n_kern0=20,n_kern1=50,filter_shape=5,n_layer=3, dataset='mnist.pkl.gz'):
-    rng = numpy.random.RandomState(23455)
-
-    print 'Before load dataset'
-    train_batches, valid_batches, test_batches = load_dataset(dataset,batch_size)
-    print 'After load dataset'
-
-    ishape = (32,32)     # this is the size of NIST images
-    n_kern2=80
-
-    # allocate symbolic variables for the data
-    x = T.matrix('x')  # rasterized images
-    y = T.lvector()  # the labels are presented as 1D vector of [long int] labels
-
-
-    ######################
-    # BUILD ACTUAL MODEL #
-    ######################
-
-    # Reshape matrix of rasterized images of shape (batch_size,28*28)
-    # to a 4D tensor, compatible with our LeNetConvPoolLayer
-    layer0_input = x.reshape((batch_size,1,32,32))
-
-    # Construct the first convolutional pooling layer:
-    # filtering reduces the image size to (32-5+1,32-5+1)=(28,28)
-    # maxpooling reduces this further to (28/2,28/2) = (14,14)
-    # 4D output tensor is thus of shape (20,20,14,14)
-    layer0 = LeNetConvPoolLayer(rng, input=layer0_input,
-            image_shape=(batch_size,1,32,32), 
-            filter_shape=(n_kern0,1,filter_shape,filter_shape), poolsize=(2,2))
-
-    if(n_layer>2):
-
-	# Construct the second convolutional pooling layer
-	# filtering reduces the image size to (14-5+1,14-5+1)=(10,10)
-	# maxpooling reduces this further to (10/2,10/2) = (5,5)
-	# 4D output tensor is thus of shape (20,50,5,5)
-	fshape=(32-filter_shape+1)/2
-	layer1 = LeNetConvPoolLayer(rng, input=layer0.output,
-		image_shape=(batch_size,n_kern0,fshape,fshape), 
-		filter_shape=(n_kern1,n_kern0,filter_shape,filter_shape), poolsize=(2,2))
-
-    else:
-
-	fshape=(32-filter_shape+1)/2
-	layer1_input = layer0.output.flatten(2)
-		# construct a fully-connected sigmoidal layer
-	layer1 = SigmoidalLayer(rng, input=layer1_input,n_in=n_kern0*fshape*fshape, n_out=500)
-
-	layer2 = LogisticRegression(input=layer1.output, n_in=500, n_out=10)
-	cost = layer2.negative_log_likelihood(y)
-	test_model = theano.function([x,y], layer2.errors(y))
-	params = layer2.params+ layer1.params + layer0.params
-
-
-    if(n_layer>3):
-
-	fshape=(32-filter_shape+1)/2
-	fshape2=(fshape-filter_shape+1)/2
-	fshape3=(fshape2-filter_shape+1)/2
-	layer2 = LeNetConvPoolLayer(rng, input=layer1.output,
-		image_shape=(batch_size,n_kern1,fshape2,fshape2), 
-		filter_shape=(n_kern2,n_kern1,filter_shape,filter_shape), poolsize=(2,2))
-
-	layer3_input = layer2.output.flatten(2)
-
-	layer3 = SigmoidalLayer(rng, input=layer3_input, 
-					n_in=n_kern2*fshape3*fshape3, n_out=500)
-
-  
-	layer4 = LogisticRegression(input=layer3.output, n_in=500, n_out=10)
-
-	cost = layer4.negative_log_likelihood(y)
-
-	test_model = theano.function([x,y], layer4.errors(y))
-
-	params = layer4.params+ layer3.params+ layer2.params+ layer1.params + layer0.params
-
- 
-    elif(n_layer>2):
-
-	fshape=(32-filter_shape+1)/2
-	fshape2=(fshape-filter_shape+1)/2
-
-	# the SigmoidalLayer being fully-connected, it operates on 2D matrices of
-	# shape (batch_size,num_pixels) (i.e matrix of rasterized images).
-	# This will generate a matrix of shape (20,32*4*4) = (20,512)
-	layer2_input = layer1.output.flatten(2)
-
-	# construct a fully-connected sigmoidal layer
-	layer2 = SigmoidalLayer(rng, input=layer2_input, 
-					n_in=n_kern1*fshape2*fshape2, n_out=500)
-
-  
-	# classify the values of the fully-connected sigmoidal layer
-	layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
-
-	# the cost we minimize during training is the NLL of the model
-	cost = layer3.negative_log_likelihood(y)
-
-	# create a function to compute the mistakes that are made by the model
-	test_model = theano.function([x,y], layer3.errors(y))
-
-	# create a list of all model parameters to be fit by gradient descent
-	params = layer3.params+ layer2.params+ layer1.params + layer0.params
-    	
-      
-  
-		
-    
-    # create a list of gradients for all model parameters
-    grads = T.grad(cost, params)
-
-    # train_model is a function that updates the model parameters by SGD
-    # Since this model has many parameters, it would be tedious to manually
-    # create an update rule for each model parameter. We thus create the updates
-    # dictionary by automatically looping over all (params[i],grads[i])  pairs.
-    updates = {}
-    for param_i, grad_i in zip(params, grads):
-        updates[param_i] = param_i - learning_rate * grad_i
-    train_model = theano.function([x, y], cost, updates=updates)
-
-
-    ###############
-    # TRAIN MODEL #
-    ###############
-
-    n_minibatches        = len(train_batches) 
-
-    # early-stopping parameters
-    patience              = 10000 # look as this many examples regardless
-    patience_increase     = 2     # wait this much longer when a new best is 
-                                  # found
-    improvement_threshold = 0.995 # a relative improvement of this much is 
-                                  # considered significant
-    validation_frequency  = n_minibatches  # go through this many 
-                                  # minibatche before checking the network 
-                                  # on the validation set; in this case we 
-                                  # check every epoch 
-
-    best_params          = None
-    best_validation_loss = float('inf')
-    best_iter            = 0
-    test_score           = 0.
-    start_time = time.clock()
-
-    # have a maximum of `n_iter` iterations through the entire dataset
-    for iter in xrange(n_iter * n_minibatches):
-
-        # get epoch and minibatch index
-        epoch           = iter / n_minibatches
-        minibatch_index =  iter % n_minibatches
-
-        # get the minibatches corresponding to `iter` modulo
-        # `len(train_batches)`
-        x,y = train_batches[ minibatch_index ]
-	
-        if iter %100 == 0:
-            print 'training @ iter = ', iter
-        cost_ij = train_model(x,y)
-
-        if (iter+1) % validation_frequency == 0: 
-
-            # compute zero-one loss on validation set 
-            this_validation_loss = 0.
-            for x,y in valid_batches:
-                # sum up the errors for each minibatch
-                this_validation_loss += test_model(x,y)
-
-            # get the average by dividing with the number of minibatches
-            this_validation_loss /= len(valid_batches)
-            print('epoch %i, minibatch %i/%i, validation error %f %%' % \
-                   (epoch, minibatch_index+1, n_minibatches, \
-                    this_validation_loss*100.))
-
-
-            # if we got the best validation score until now
-            if this_validation_loss < best_validation_loss:
-
-                #improve patience if loss improvement is good enough
-                if this_validation_loss < best_validation_loss *  \
-                       improvement_threshold :
-                    patience = max(patience, iter * patience_increase)
-
-                # save best validation score and iteration number
-                best_validation_loss = this_validation_loss
-                best_iter = iter
-
-                # test it on the test set
-                test_score = 0.
-                for x,y in test_batches:
-                    test_score += test_model(x,y)
-                test_score /= len(test_batches)
-                print(('     epoch %i, minibatch %i/%i, test error of best '
-                      'model %f %%') % 
-                             (epoch, minibatch_index+1, n_minibatches,
-                              test_score*100.))
-
-        if patience <= iter :
-            break
-
-    end_time = time.clock()
-    print('Optimization complete.')
-    print('Best validation score of %f %% obtained at iteration %i,'\
-          'with test performance %f %%' %  
-          (best_validation_loss * 100., best_iter, test_score*100.))
-    print('The code ran for %f minutes' % ((end_time-start_time)/60.))
-
-    return (best_validation_loss * 100., test_score*100., (end_time-start_time)/60., best_iter)
-
-if __name__ == '__main__':
-    evaluate_lenet5()
-
-def experiment(state, channel):
-    print 'start experiment'
-    (best_validation_loss, test_score, minutes_trained, iter) = evaluate_lenet5(state.learning_rate, state.n_iter, state.batch_size, state.n_kern0, state.n_kern1, state.filter_shape, state.n_layer)
-    print 'end experiment'
-    
-    state.best_validation_loss = best_validation_loss
-    state.test_score = test_score
-    state.minutes_trained = minutes_trained
-    state.iter = iter
-
-    return channel.COMPLETE
--- a/baseline_algorithms/log_reg/log_reg.py	Fri Feb 26 14:23:47 2010 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,437 +0,0 @@
-"""
-This tutorial introduces logistic regression using Theano and stochastic 
-gradient descent.  
-
-Logistic regression is a probabilistic, linear classifier. It is parametrized
-by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
-done by projecting data points onto a set of hyperplanes, the distance to
-which is used to determine a class membership probability. 
-
-Mathematically, this can be written as:
-
-.. math::
-  P(Y=i|x, W,b) &= softmax_i(W x + b) \\
-                &= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
-
-
-The output of the model or prediction is then done by taking the argmax of 
-the vector whose i'th element is P(Y=i|x).
-
-.. math::
-
-  y_{pred} = argmax_i P(Y=i|x,W,b)
-
-
-This tutorial presents a stochastic gradient descent optimization method 
-suitable for large datasets, and a conjugate gradient optimization method 
-that is suitable for smaller datasets.
-
-
-References:
-
-    - textbooks: "Pattern Recognition and Machine Learning" - 
-                 Christopher M. Bishop, section 4.3.2
-
-"""
-__docformat__ = 'restructedtext en'
-
-import numpy, time, cPickle, gzip
-
-import theano
-import theano.tensor as T
-
-
-class LogisticRegression(object):
-    """Multi-class Logistic Regression Class
-
-    The logistic regression is fully described by a weight matrix :math:`W` 
-    and bias vector :math:`b`. Classification is done by projecting data 
-    points onto a set of hyperplanes, the distance to which is used to 
-    determine a class membership probability. 
-    """
-
-
-    def __init__( self, input, n_in, n_out ):
-        """ Initialize the parameters of the logistic regression
-
-        :type input: theano.tensor.TensorType
-        :param input: symbolic variable that describes the input of the 
-                      architecture (one minibatch)
-        
-        :type n_in: int
-        :param n_in: number of input units, the dimension of the space in 
-                     which the datapoints lie
-
-        :type n_out: int
-        :param n_out: number of output units, the dimension of the space in 
-                      which the labels lie
-
-        """ 
-
-        # initialize with 0 the weights W as a matrix of shape (n_in, n_out) 
-        self.W = theano.shared( value = numpy.zeros(( n_in, n_out ), dtype = theano.config.floatX ),
-                                name =' W')
-        # initialize the baises b as a vector of n_out 0s
-        self.b = theano.shared( value = numpy.zeros(( n_out, ), dtype = theano.config.floatX ),
-                               name = 'b')
-
-
-        # compute vector of class-membership probabilities in symbolic form
-        self.p_y_given_x = T.nnet.softmax( T.dot( input, self.W ) + self.b )
-
-        # compute prediction as class whose probability is maximal in 
-        # symbolic form
-        self.y_pred=T.argmax( self.p_y_given_x, axis =1 )
-
-        # parameters of the model
-        self.params = [ self.W, self.b ]
-
-
-    def negative_log_likelihood( self, y ):
-        """Return the mean of the negative log-likelihood of the prediction
-        of this model under a given target distribution.
-
-        .. math::
-
-            \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
-            \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
-                \ell (\theta=\{W,b\}, \mathcal{D})
-
-        :type y: theano.tensor.TensorType
-        :param y: corresponds to a vector that gives for each example the
-                  correct label
-
-        Note: we use the mean instead of the sum so that
-              the learning rate is less dependent on the batch size
-        """
-        # y.shape[0] is (symbolically) the number of rows in y, i.e., number of examples (call it n) in the minibatch
-        # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
-        # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class 
-        # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]]
-        # and T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v,
-        # i.e., the mean log-likelihood across the minibatch.
-        return -T.mean( T.log( self.p_y_given_x )[ T.arange( y.shape[0] ), y ] )
-
-
-    def errors( self, y ):
-        """Return a float representing the number of errors in the minibatch 
-        over the total number of examples of the minibatch ; zero one
-        loss over the size of the minibatch
-
-        :type y: theano.tensor.TensorType
-        :param y: corresponds to a vector that gives for each example the 
-                  correct label
-        """
-
-        # check if y has same dimension of y_pred 
-        if y.ndim != self.y_pred.ndim:
-            raise TypeError( 'y should have the same shape as self.y_pred', 
-                ( 'y', target.type, 'y_pred', self.y_pred.type ) )
-        # check if y is of the correct datatype        
-        if y.dtype.startswith('int'):
-            # the T.neq operator returns a vector of 0s and 1s, where 1
-            # represents a mistake in prediction
-            return T.mean( T.neq( self.y_pred, y ) )
-        else:
-            raise NotImplementedError()
-        
-def shared_dataset( data_xy ):
-        """ Function that loads the dataset into shared variables
-        
-        The reason we store our dataset in shared variables is to allow 
-        Theano to copy it into the GPU memory (when code is run on GPU). 
-        Since copying data into the GPU is slow, copying a minibatch everytime
-        is needed (the default behaviour if the data is not in a shared 
-        variable) would lead to a large decrease in performance.
-        """
-        data_x, data_y = data_xy
-        shared_x = theano.shared( numpy.asarray( data_x, dtype = theano.config.floatX ) )
-        shared_y = theano.shared( numpy.asarray( data_y, dtype = theano.config.floatX ) )
-        # When storing data on the GPU it has to be stored as floats
-        # therefore we will store the labels as ``floatX`` as well
-        # (``shared_y`` does exactly that). But during our computations
-        # we need them as ints (we use labels as index, and if they are 
-        # floats it doesn't make sense) therefore instead of returning 
-        # ``shared_y`` we will have to cast it to int. This little hack
-        # lets ous get around this issue
-        return shared_x, T.cast( shared_y, 'int32' )
-
-def load_data_pkl_gz( dataset ):
-    ''' Loads the dataset
-
-    :type dataset: string
-    :param dataset: the path to the dataset (here MNIST)
-    '''
-
-    #--------------------------------------------------------------------------------------------------------------------
-    # Load Data
-    #--------------------------------------------------------------------------------------------------------------------
-
-
-    print '... loading data'
-
-    # Load the dataset 
-    f = gzip.open(dataset,'rb')
-    train_set, valid_set, test_set = cPickle.load(f)
-    f.close()
-    
-    test_set_x,  test_set_y  = shared_dataset( test_set )
-    valid_set_x, valid_set_y = shared_dataset( valid_set )
-    train_set_x, train_set_y = shared_dataset( train_set )
-
-    rval = [ ( train_set_x, train_set_y ), ( valid_set_x,valid_set_y ), ( test_set_x, test_set_y ) ]
-    return rval
-
-##def load_data_ft(      verbose = False,\
-##                                    data_path = '/data/lisa/data/nist/by_class/'\
-##                                    train_data = 'all/all_train_data.ft',\
-##                                    train_labels = 'all/all_train_labels.ft',\
-##                                    test_data = 'all/all_test_data.ft',\
-##                                    test_labels = 'all/all_test_labels.ft'):
-##   
-##    train_data_file = open(data_path + train_data)
-##    train_labels_file = open(data_path + train_labels)
-##    test_labels_file = open(data_path + test_data)
-##    test_data_file = open(data_path + test_labels)
-##    
-##    raw_train_data = ft.read( train_data_file)
-##    raw_train_labels = ft.read(train_labels_file)
-##    raw_test_data = ft.read( test_labels_file)
-##    raw_test_labels = ft.read( test_data_file)
-##    
-##    f.close()
-##    g.close()
-##    i.close()
-##    h.close()
-##    
-##    
-##    test_set_x,  test_set_y  = shared_dataset(test_set)
-##    valid_set_x, valid_set_y = shared_dataset(valid_set)
-##    train_set_x, train_set_y = shared_dataset(train_set)
-##
-##    rval = [(train_set_x, train_set_y), (valid_set_x,valid_set_y), (test_set_x, test_set_y)]
-##    return rval
-##    #create a validation set the same size as the test size
-##    #use the end of the training array for this purpose
-##    #discard the last remaining so we get a %batch_size number
-##    test_size=len(raw_test_labels)
-##    test_size = int(test_size/batch_size)
-##    test_size*=batch_size
-##    train_size = len(raw_train_data)
-##    train_size = int(train_size/batch_size)
-##    train_size*=batch_size
-##    validation_size =test_size 
-##    offset = train_size-test_size
-##    if verbose == True:
-##        print 'train size = %d' %train_size
-##        print 'test size = %d' %test_size
-##        print 'valid size = %d' %validation_size
-##        print 'offset = %d' %offset
-##    
-##    
-
-#--------------------------------------------------------------------------------------------------------------------
-# MAIN
-#--------------------------------------------------------------------------------------------------------------------
-
-def log_reg( learning_rate = 0.13, nb_max_examples =1000000, batch_size = 50, \
-                    dataset_name = 'mnist.pkl.gz', image_size = 28 * 28, nb_class = 10,  \
-                    patience = 5000, patience_increase = 2, improvement_threshold = 0.995):
-    
-    """
-    Demonstrate stochastic gradient descent optimization of a log-linear 
-    model
-
-    This is demonstrated on MNIST.
-    
-    :type learning_rate: float
-    :param learning_rate: learning rate used (factor for the stochastic 
-                          gradient)
-
-    :type nb_max_examples: int
-    :param nb_max_examples: maximal number of epochs to run the optimizer 
-    
-    :type batch_size: int  
-    :param batch_size:  size of the minibatch
-
-    :type dataset_name: string
-    :param dataset: the path of the MNIST dataset file from 
-                         http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
-                        
-    :type image_size: int
-    :param image_size: size of the input image in pixels (width * height)
-    
-    :type nb_class: int
-    :param nb_class: number of classes
-    
-    :type patience: int
-    :param patience: look as this many examples regardless
-    
-    :type patience_increase: int
-    :param patience_increase: wait this much longer when a new best is found
-    
-    :type improvement_threshold: float
-    :param improvement_threshold: a relative improvement of this much is considered significant
-
-
-    """
-    datasets = load_data_pkl_gz( dataset_name )
-
-    train_set_x, train_set_y = datasets[0]
-    valid_set_x, valid_set_y = datasets[1]
-    test_set_x , test_set_y   = datasets[2]
-
-    # compute number of minibatches for training, validation and testing
-    n_train_batches = train_set_x.value.shape[0] / batch_size
-    n_valid_batches = valid_set_x.value.shape[0] / batch_size
-    n_test_batches  = test_set_x.value.shape[0]  / batch_size
-
-    #--------------------------------------------------------------------------------------------------------------------
-    # Build actual model
-    #--------------------------------------------------------------------------------------------------------------------
-    
-    print '... building the model'
-
-    # allocate symbolic variables for the data
-    index = T.lscalar( )    # index to a [mini]batch 
-    x        = T.matrix('x')  # the data is presented as rasterized images
-    y        = T.ivector('y') # the labels are presented as 1D vector of 
-                           # [int] labels
-
-    # construct the logistic regression class
-    
-    classifier = LogisticRegression( input = x, n_in = image_size, n_out = nb_class )
-
-    # the cost we minimize during training is the negative log likelihood of 
-    # the model in symbolic format
-    cost = classifier.negative_log_likelihood( y ) 
-
-    # compiling a Theano function that computes the mistakes that are made by 
-    # the model on a minibatch
-    test_model = theano.function( inputs = [ index ], 
-            outputs = classifier.errors( y ),
-            givens = {
-                x:test_set_x[ index * batch_size: ( index + 1 ) * batch_size ],
-                y:test_set_y[ index * batch_size: ( index + 1 ) * batch_size ] } )
-
-    validate_model = theano.function( inputs = [ index ], 
-            outputs = classifier.errors( y ),
-            givens = {
-                x:valid_set_x[ index * batch_size: ( index + 1 ) * batch_size ],
-                y:valid_set_y[ index * batch_size: ( index + 1 ) * batch_size ] } )
-
-    # compute the gradient of cost with respect to theta = ( W, b ) 
-    g_W = T.grad( cost = cost, wrt = classifier.W )
-    g_b  = T.grad( cost = cost, wrt = classifier.b )
-
-    # specify how to update the parameters of the model as a dictionary
-    updates = { classifier.W: classifier.W - learning_rate * g_W,\
-                         classifier.b: classifier.b  - learning_rate * g_b}
-
-    # compiling a Theano function `train_model` that returns the cost, but in 
-    # the same time updates the parameter of the model based on the rules 
-    # defined in `updates`
-    train_model = theano.function( inputs = [ index ], 
-            outputs = cost, 
-            updates = updates,
-            givens = {
-                x: train_set_x[ index * batch_size: ( index + 1 ) * batch_size ],
-                y: train_set_y[ index * batch_size: ( index + 1 ) * batch_size ] } )
-
-    #--------------------------------------------------------------------------------------------------------------------
-    # Train model
-    #--------------------------------------------------------------------------------------------------------------------
-   
-    print '... training the model'
-    # early-stopping parameters
-    patience              = 5000  # look as this many examples regardless
-    patience_increase     = 2     # wait this much longer when a new best is 
-                                  # found
-    improvement_threshold = 0.995 # a relative improvement of this much is 
-                                  # considered significant
-    validation_frequency  = min( n_train_batches, patience * 0.5 )  
-                                  # go through this many 
-                                  # minibatche before checking the network 
-                                  # on the validation set; in this case we 
-                                  # check every epoch 
-
-    best_params             = None
-    best_validation_loss = float('inf')
-    test_score                 = 0.
-    start_time                  = time.clock()
-
-    done_looping = False 
-    n_epochs       = nb_max_examples / train_set_x.value.shape[0]
-    epoch             = 0  
-    
-    while ( epoch < n_epochs ) and ( not done_looping ):
-        
-      epoch = epoch + 1
-      for minibatch_index in xrange( n_train_batches ):
-
-        minibatch_avg_cost = train_model( minibatch_index )
-        # iteration number
-        iter = epoch * n_train_batches + minibatch_index
-
-        if ( iter + 1 ) % validation_frequency == 0: 
-            # compute zero-one loss on validation set 
-            validation_losses     = [ validate_model( i ) for i in xrange( n_valid_batches ) ]
-            this_validation_loss = numpy.mean( validation_losses )
-
-            print('epoch %i, minibatch %i/%i, validation error %f %%' % \
-                 ( epoch, minibatch_index + 1,n_train_batches, \
-                  this_validation_loss*100. ) )
-
-
-            # if we got the best validation score until now
-            if this_validation_loss < best_validation_loss:
-                #improve patience if loss improvement is good enough
-                if this_validation_loss < best_validation_loss *  \
-                       improvement_threshold :
-                    patience = max( patience, iter * patience_increase )
-
-                best_validation_loss = this_validation_loss
-                # test it on the test set
-
-                test_losses = [test_model(i) for i in xrange(n_test_batches)]
-                test_score  = numpy.mean(test_losses)
-
-                print(('     epoch %i, minibatch %i/%i, test error of best ' 
-                       'model %f %%') % \
-                  (epoch, minibatch_index+1, n_train_batches,test_score*100.))
-
-        if patience <= iter :
-                done_looping = True
-                break
-
-    end_time = time.clock()
-    print(('Optimization complete with best validation score of %f %%,'
-           'with test performance %f %%') %  
-                 ( best_validation_loss * 100., test_score * 100.))
-    print ('The code ran for %f minutes' % ((end_time-start_time) / 60.))
-    
- ######   return validation_error, test_error, nb_exemples, time
-
-if __name__ == '__main__':
-    log_reg()
-    
- 
-def jobman_log_reg(state, channel):
-    (validation_error, test_error, nb_exemples, time) = log_reg( learning_rate = state.learning_rate,\
-                                                                                        nb_max_examples = state.nb_max_examples,\
-                                                                                                    batch_size  = state.batch_size,\
-                                                                                                dataset_name = state.dataset_name, \
-                                                                                                    image_size = state.image_size,  \
-                                                                                                       nb_class  = state.nb_class )
-
-    state.validation_error = validation_error
-    state.test_error = test_error
-    state.nb_exemples = nb_exemples
-    state.time = time
-    return channel.COMPLETE
-                                                                
-                                      
-    
-    
-
-
--- a/baseline_algorithms/mlp/mlp_nist.py	Fri Feb 26 14:23:47 2010 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,459 +0,0 @@
-"""
-This tutorial introduces the multilayer perceptron using Theano.  
-
- A multilayer perceptron is a logistic regressor where
-instead of feeding the input to the logistic regression you insert a
-intermidiate layer, called the hidden layer, that has a nonlinear 
-activation function (usually tanh or sigmoid) . One can use many such 
-hidden layers making the architecture deep. The tutorial will also tackle 
-the problem of MNIST digit classification.
-
-.. math::
-
-    f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
-
-References:
-
-    - textbooks: "Pattern Recognition and Machine Learning" - 
-                 Christopher M. Bishop, section 5
-
-TODO: recommended preprocessing, lr ranges, regularization ranges (explain 
-      to do lr first, then add regularization)
-
-"""
-__docformat__ = 'restructedtext en'
-
-import pdb
-import numpy
-import pylab
-import theano
-import theano.tensor as T
-import time 
-import theano.tensor.nnet
-import pylearn
-from pylearn.io import filetensor as ft
-
-data_path = '/data/lisa/data/nist/by_class/'
-
-class MLP(object):
-    """Multi-Layer Perceptron Class
-
-    A multilayer perceptron is a feedforward artificial neural network model 
-    that has one layer or more of hidden units and nonlinear activations. 
-    Intermidiate layers usually have as activation function thanh or the 
-    sigmoid function  while the top layer is a softamx layer. 
-    """
-
-
-
-    def __init__(self, input, n_in, n_hidden, n_out,learning_rate):
-        """Initialize the parameters for the multilayer perceptron
-
-        :param input: symbolic variable that describes the input of the 
-        architecture (one minibatch)
-
-        :param n_in: number of input units, the dimension of the space in 
-        which the datapoints lie
-
-        :param n_hidden: number of hidden units 
-
-        :param n_out: number of output units, the dimension of the space in 
-        which the labels lie
-
-        """
-
-        # initialize the parameters theta = (W1,b1,W2,b2) ; note that this 
-        # example contains only one hidden layer, but one can have as many 
-        # layers as he/she wishes, making the network deeper. The only 
-        # problem making the network deep this way is during learning, 
-        # backpropagation being unable to move the network from the starting
-        # point towards; this is where pre-training helps, giving a good 
-        # starting point for backpropagation, but more about this in the 
-        # other tutorials
-        
-        # `W1` is initialized with `W1_values` which is uniformely sampled
-        # from -6./sqrt(n_in+n_hidden) and 6./sqrt(n_in+n_hidden)
-        # the output of uniform if converted using asarray to dtype 
-        # theano.config.floatX so that the code is runable on GPU
-        W1_values = numpy.asarray( numpy.random.uniform( \
-              low = -numpy.sqrt(6./(n_in+n_hidden)), \
-              high = numpy.sqrt(6./(n_in+n_hidden)), \
-              size = (n_in, n_hidden)), dtype = theano.config.floatX)
-        # `W2` is initialized with `W2_values` which is uniformely sampled 
-        # from -6./sqrt(n_hidden+n_out) and 6./sqrt(n_hidden+n_out)
-        # the output of uniform if converted using asarray to dtype 
-        # theano.config.floatX so that the code is runable on GPU
-        W2_values = numpy.asarray( numpy.random.uniform( 
-              low = -numpy.sqrt(6./(n_hidden+n_out)), \
-              high= numpy.sqrt(6./(n_hidden+n_out)),\
-              size= (n_hidden, n_out)), dtype = theano.config.floatX)
-
-        self.W1 = theano.shared( value = W1_values )
-        self.b1 = theano.shared( value = numpy.zeros((n_hidden,), 
-                                                dtype= theano.config.floatX))
-        self.W2 = theano.shared( value = W2_values )
-        self.b2 = theano.shared( value = numpy.zeros((n_out,), 
-                                                dtype= theano.config.floatX))
-
-        #include the learning rate in the classifer so
-        #we can modify it on the fly when we want
-        lr_value=learning_rate
-        self.lr=theano.shared(value=lr_value)
-        # symbolic expression computing the values of the hidden layer
-        self.hidden = T.tanh(T.dot(input, self.W1)+ self.b1)
-        
-        
-
-        # symbolic expression computing the values of the top layer 
-        self.p_y_given_x= T.nnet.softmax(T.dot(self.hidden, self.W2)+self.b2)
-
-        # compute prediction as class whose probability is maximal in 
-        # symbolic form
-        self.y_pred = T.argmax( self.p_y_given_x, axis =1)
-        self.y_pred_num = T.argmax( self.p_y_given_x[0:9], axis =1)
-        
-        
-        
-        
-        # L1 norm ; one regularization option is to enforce L1 norm to 
-        # be small 
-        self.L1     = abs(self.W1).sum() + abs(self.W2).sum()
-
-        # square of L2 norm ; one regularization option is to enforce 
-        # square of L2 norm to be small
-        self.L2_sqr = (self.W1**2).sum() + (self.W2**2).sum()
-
-
-
-    def negative_log_likelihood(self, y):
-        """Return the mean of the negative log-likelihood of the prediction
-        of this model under a given target distribution.
-
-        .. math::
-
-            \frac{1}{|\mathcal{D}|}\mathcal{L} (\theta=\{W,b\}, \mathcal{D}) = 
-            \frac{1}{|\mathcal{D}|}\sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
-                \ell (\theta=\{W,b\}, \mathcal{D}) 
-
-
-        :param y: corresponds to a vector that gives for each example the
-        :correct label
-        """
-        return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y])
-
-
-
-
-    def errors(self, y):
-        """Return a float representing the number of errors in the minibatch 
-        over the total number of examples of the minibatch 
-        """
-
-        # check if y has same dimension of y_pred 
-        if y.ndim != self.y_pred.ndim:
-            raise TypeError('y should have the same shape as self.y_pred', 
-                ('y', target.type, 'y_pred', self.y_pred.type))
-        # check if y is of the correct datatype        
-        if y.dtype.startswith('int'):
-            # the T.neq operator returns a vector of 0s and 1s, where 1
-            # represents a mistake in prediction
-            return T.mean(T.neq(self.y_pred, y))
-        else:
-            raise NotImplementedError()
-
-
-def mlp_full_nist(      verbose = False,\
-                        adaptive_lr = 0,\
-                        train_data = 'all/all_train_data.ft',\
-                        train_labels = 'all/all_train_labels.ft',\
-                        test_data = 'all/all_test_data.ft',\
-                        test_labels = 'all/all_test_labels.ft',\
-                        learning_rate=0.01,\
-                        L1_reg = 0.00,\
-                        L2_reg = 0.0001,\
-                        nb_max_exemples=1000000,\
-                        batch_size=20,\
-                        nb_hidden = 500,\
-                        nb_targets = 62):
-   
-    
-    configuration = [learning_rate,nb_max_exemples,nb_hidden,adaptive_lr]
-    
-    total_validation_error_list = []
-    total_train_error_list = []
-    learning_rate_list=[]
-    best_training_error=float('inf');
-    
-    
-   
-    f = open(data_path+train_data)
-    g= open(data_path+train_labels)
-    h = open(data_path+test_data)
-    i= open(data_path+test_labels)
-    
-    raw_train_data = ft.read(f)
-    raw_train_labels = ft.read(g)
-    raw_test_data = ft.read(h)
-    raw_test_labels = ft.read(i)
-    
-    f.close()
-    g.close()
-    i.close()
-    h.close()
-    #create a validation set the same size as the test size
-    #use the end of the training array for this purpose
-    #discard the last remaining so we get a %batch_size number
-    test_size=len(raw_test_labels)
-    test_size = int(test_size/batch_size)
-    test_size*=batch_size
-    train_size = len(raw_train_data)
-    train_size = int(train_size/batch_size)
-    train_size*=batch_size
-    validation_size =test_size 
-    offset = train_size-test_size
-    if verbose == True:
-        print 'train size = %d' %train_size
-        print 'test size = %d' %test_size
-        print 'valid size = %d' %validation_size
-        print 'offset = %d' %offset
-    
-    
-    train_set = (raw_train_data,raw_train_labels)
-    train_batches = []
-    for i in xrange(0, train_size-test_size, batch_size):
-        train_batches = train_batches + \
-            [(raw_train_data[i:i+batch_size], raw_train_labels[i:i+batch_size])]
-            
-    test_batches = []
-    for i in xrange(0, test_size, batch_size):
-        test_batches = test_batches + \
-            [(raw_test_data[i:i+batch_size], raw_test_labels[i:i+batch_size])]
-    
-    validation_batches = []
-    for i in xrange(0, test_size, batch_size):
-        validation_batches = validation_batches + \
-            [(raw_train_data[offset+i:offset+i+batch_size], raw_train_labels[offset+i:offset+i+batch_size])]
-
-
-    ishape     = (32,32) # this is the size of NIST images
-
-    # allocate symbolic variables for the data
-    x = T.fmatrix()  # the data is presented as rasterized images
-    y = T.lvector()  # the labels are presented as 1D vector of 
-                          # [long int] labels
-
-    if verbose==True:
-        print 'finished parsing the data'
-    # construct the logistic regression class
-    classifier = MLP( input=x.reshape((batch_size,32*32)),\
-                        n_in=32*32,\
-                        n_hidden=nb_hidden,\
-                        n_out=nb_targets,
-                        learning_rate=learning_rate)
-                        
-                        
-   
-
-    # the cost we minimize during training is the negative log likelihood of 
-    # the model plus the regularization terms (L1 and L2); cost is expressed
-    # here symbolically
-    cost = classifier.negative_log_likelihood(y) \
-         + L1_reg * classifier.L1 \
-         + L2_reg * classifier.L2_sqr 
-
-    # compiling a theano function that computes the mistakes that are made by 
-    # the model on a minibatch
-    test_model = theano.function([x,y], classifier.errors(y))
-
-    # compute the gradient of cost with respect to theta = (W1, b1, W2, b2) 
-    g_W1 = T.grad(cost, classifier.W1)
-    g_b1 = T.grad(cost, classifier.b1)
-    g_W2 = T.grad(cost, classifier.W2)
-    g_b2 = T.grad(cost, classifier.b2)
-
-    # specify how to update the parameters of the model as a dictionary
-    updates = \
-        { classifier.W1: classifier.W1 - classifier.lr*g_W1 \
-        , classifier.b1: classifier.b1 - classifier.lr*g_b1 \
-        , classifier.W2: classifier.W2 - classifier.lr*g_W2 \
-        , classifier.b2: classifier.b2 - classifier.lr*g_b2 }
-
-    # compiling a theano function `train_model` that returns the cost, but in 
-    # the same time updates the parameter of the model based on the rules 
-    # defined in `updates`
-    train_model = theano.function([x, y], cost, updates = updates )
-    n_minibatches        = len(train_batches)
-
-   
-   
-    
-   
-   
-   #conditions for stopping the adaptation:
-   #1) we have reached  nb_max_exemples (this is rounded up to be a multiple of the train size)
-   #2) validation error is going up twice in a row(probable overfitting)
-   
-   # This means we no longer stop on slow convergence as low learning rates stopped
-   # too fast. 
-   
-   # no longer relevant
-    patience              =nb_max_exemples/batch_size
-    patience_increase     = 2     # wait this much longer when a new best is 
-                                  # found
-    improvement_threshold = 0.995 # a relative improvement of this much is 
-                                  # considered significant
-    validation_frequency = n_minibatches/4
-   
-     
-
-   
-    best_params          = None
-    best_validation_loss = float('inf')
-    best_iter            = 0
-    test_score           = 0.
-    start_time = time.clock()
-    n_iter = nb_max_exemples/batch_size  # nb of max times we are allowed to run through all exemples
-    n_iter = n_iter/n_minibatches + 1 #round up
-    n_iter=max(1,n_iter) # run at least once on short debug call
-    
-   
-    if verbose == True:
-        print 'looping at most %d times through the data set' %n_iter
-    for iter in xrange(n_iter* n_minibatches):
-
-        # get epoch and minibatch index
-        epoch           = iter / n_minibatches
-        minibatch_index =  iter % n_minibatches
-        
-      
-        
-        # get the minibatches corresponding to `iter` modulo
-        # `len(train_batches)`
-        x,y = train_batches[ minibatch_index ]
-        # convert to float
-        x_float = x/255.0
-        cost_ij = train_model(x_float,y)
-
-        if (iter+1) % validation_frequency == 0: 
-            # compute zero-one loss on validation set 
-            
-            this_validation_loss = 0.
-            for x,y in validation_batches:
-                # sum up the errors for each minibatch
-                x_float = x/255.0
-                this_validation_loss += test_model(x_float,y)
-            # get the average by dividing with the number of minibatches
-            this_validation_loss /= len(validation_batches)
-            #save the validation loss
-            total_validation_error_list.append(this_validation_loss)
-            
-            #get the training error rate
-            this_train_loss=0
-            for x,y in train_batches:
-                # sum up the errors for each minibatch
-                x_float = x/255.0
-                this_train_loss += test_model(x_float,y)
-            # get the average by dividing with the number of minibatches
-            this_train_loss /= len(train_batches)
-            #save the validation loss
-            total_train_error_list.append(this_train_loss)
-            if(this_train_loss<best_training_error):
-                best_training_error=this_train_loss
-                
-            if verbose == True:
-                print('epoch %i, minibatch %i/%i, validation error %f, training error %f %%' % \
-                    (epoch, minibatch_index+1, n_minibatches, \
-                        this_validation_loss*100.,this_train_loss*100))
-                        
-                        
-            #save the learning rate
-            learning_rate_list.append(classifier.lr.value)
-
-
-            # if we got the best validation score until now
-            if this_validation_loss < best_validation_loss:
-                # save best validation score and iteration number
-                best_validation_loss = this_validation_loss
-                best_iter = iter
-                # reset patience if we are going down again
-                # so we continue exploring
-                patience=nb_max_exemples/batch_size
-                # test it on the test set
-                test_score = 0.
-                for x,y in test_batches:
-                    x_float=x/255.0
-                    test_score += test_model(x_float,y)
-                test_score /= len(test_batches)
-                if verbose == True:
-                    print(('     epoch %i, minibatch %i/%i, test error of best '
-                        'model %f %%') % 
-                                (epoch, minibatch_index+1, n_minibatches,
-                                test_score*100.))
-                                
-            # if the validation error is going up, we are overfitting (or oscillating)
-            # stop converging but run at least to next validation
-            # to check overfitting or ocsillation
-            # the saved weights of the model will be a bit off in that case
-            elif this_validation_loss >= best_validation_loss:
-                #calculate the test error at this point and exit
-                # test it on the test set
-                # however, if adaptive_lr is true, try reducing the lr to
-                # get us out of an oscilliation
-                if adaptive_lr==1:
-                    classifier.lr.value=classifier.lr.value/2.0
-
-                test_score = 0.
-                #cap the patience so we are allowed one more validation error
-                #calculation before aborting
-                patience = iter+validation_frequency+1
-                for x,y in test_batches:
-                    x_float=x/255.0
-                    test_score += test_model(x_float,y)
-                test_score /= len(test_batches)
-                if verbose == True:
-                    print ' validation error is going up, possibly stopping soon'
-                    print(('     epoch %i, minibatch %i/%i, test error of best '
-                        'model %f %%') % 
-                                (epoch, minibatch_index+1, n_minibatches,
-                                test_score*100.))
-                                
-                
-
-
-        if iter>patience:
-            print 'we have diverged'
-            break
-
-
-    end_time = time.clock()
-    if verbose == True:
-        print(('Optimization complete. Best validation score of %f %% '
-            'obtained at iteration %i, with test performance %f %%') %  
-                    (best_validation_loss * 100., best_iter, test_score*100.))
-        print ('The code ran for %f minutes' % ((end_time-start_time)/60.))
-        print iter
-        
-    #save the model and the weights
-    numpy.savez('model.npy', config=configuration, W1=classifier.W1.value,W2=classifier.W2.value, b1=classifier.b1.value,b2=classifier.b2.value)
-    numpy.savez('results.npy',config=configuration,total_train_error_list=total_train_error_list,total_validation_error_list=total_validation_error_list,\
-    learning_rate_list=learning_rate_list)
-    
-    return (best_training_error*100.0,best_validation_loss * 100.,test_score*100.,best_iter*batch_size,(end_time-start_time)/60)
-
-
-if __name__ == '__main__':
-    mlp_full_mnist()
-
-def jobman_mlp_full_nist(state,channel):
-    (train_error,validation_error,test_error,nb_exemples,time)=mlp_full_nist(learning_rate=state.learning_rate,\
-                                                                nb_max_exemples=state.nb_max_exemples,\
-                                                                nb_hidden=state.nb_hidden,\
-                                                                adaptive_lr=state.adaptive_lr)
-    state.train_error=train_error
-    state.validation_error=validation_error
-    state.test_error=test_error
-    state.nb_exemples=nb_exemples
-    state.time=time
-    return channel.COMPLETE
-                                                                
-                                                                
\ No newline at end of file