changeset 119:4f37755d301b

Refait stacked_dae.py en utilisant le dataset complet shared (juste reparti à 0 à partir du code du tutoriel), et préparé pour utiliser NIST (pas testé)
author fsavard
date Wed, 17 Feb 2010 17:06:54 -0500
parents 0d083964af4b
children 5e00ed18ae32
files scripts/stacked_dae.py
diffstat 1 files changed, 322 insertions(+), 288 deletions(-) [+]
line wrap: on
line diff
--- a/scripts/stacked_dae.py	Wed Feb 17 16:25:44 2010 -0500
+++ b/scripts/stacked_dae.py	Wed Feb 17 17:06:54 2010 -0500
@@ -1,29 +1,17 @@
 #!/usr/bin/python
 # coding: utf-8
 
-# Code for stacked denoising autoencoder
-# Tests with MNIST
-# TODO: adapt for NIST
-# Based almost entirely on deeplearning.net tutorial, modifications by
-# François Savard
+import numpy 
+import theano
+import time
+import theano.tensor as T
+from theano.tensor.shared_randomstreams import RandomStreams
+import os.path
 
-# Base LogisticRegression, SigmoidalLayer, dA, SdA code taken
-# from the deeplearning.net tutorial. Refactored a bit.
-# Changes (mainly):
-# - splitted initialization in smaller methods
-# - removed the "givens" thing involving an index in the whole dataset
-#       (to allow flexibility in how data is inputted... not necessarily one big tensor)
-# - changed the "driver" a lot, altough for the moment the same logic is used
+import gzip
+import cPickle
 
-import time
-import theano
-import theano.tensor as T
-import theano.tensor.nnet
-from theano.tensor.shared_randomstreams import RandomStreams
-import numpy, numpy.random
-
-from pylearn.datasets import MNIST
-
+MNIST_LOCATION = '/u/savardf/datasets/mnist.pkl.gz'
 
 # from pylearn codebase
 def update_locals(obj, dct):
@@ -31,33 +19,33 @@
         del dct['self']
     obj.__dict__.update(dct)
 
-
 class LogisticRegression(object):
     def __init__(self, input, n_in, n_out):
         # initialize with 0 the weights W as a matrix of shape (n_in, n_out) 
-        self.W = theano.shared(value=numpy.zeros((n_in,n_out), dtype = theano.config.floatX),
-                                name='W')
+        self.W = theano.shared( value=numpy.zeros((n_in,n_out),
+                                            dtype = theano.config.floatX) )
         # initialize the baises b as a vector of n_out 0s
-        self.b = theano.shared(value=numpy.zeros((n_out,), dtype = theano.config.floatX),
-                               name='b')
-
+        self.b = theano.shared( value=numpy.zeros((n_out,), 
+                                            dtype = theano.config.floatX) )
         # compute vector of class-membership probabilities in symbolic form
         self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W)+self.b)
-
+        
         # compute prediction as class whose probability is maximal in 
         # symbolic form
         self.y_pred=T.argmax(self.p_y_given_x, axis=1)
 
+        # list of parameters for this layer
         self.params = [self.W, self.b]
 
     def negative_log_likelihood(self, y):
-        return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y])
+       return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y])
 
     def errors(self, y):
         # check if y has same dimension of y_pred 
         if y.ndim != self.y_pred.ndim:
             raise TypeError('y should have the same shape as self.y_pred', 
                 ('y', target.type, 'y_pred', self.y_pred.type))
+
         # check if y is of the correct datatype        
         if y.dtype.startswith('int'):
             # the T.neq operator returns a vector of 0s and 1s, where 1
@@ -84,87 +72,88 @@
         self.params = [self.W, self.b]
 
 
+
 class dA(object):
-    def __init__(self, n_visible= 784, n_hidden= 500, \
-              corruption_level = 0.1, input = None, \
-              shared_W = None, shared_b = None):
-        update_locals(self, locals())
-
-        self.init_randomizer()
-        self.init_params()
-        self.init_functions()
-
-    def init_randomizer(self):
-        # create a Theano random generator that gives symbolic random values
-        self.theano_rng = RandomStreams()
-        # create a numpy random generator
-        self.numpy_rng = numpy.random.RandomState()
+  def __init__(self, n_visible= 784, n_hidden= 500, corruption_level = 0.1,\
+               input = None, shared_W = None, shared_b = None):
+    self.n_visible = n_visible
+    self.n_hidden  = n_hidden
+    
+    # create a Theano random generator that gives symbolic random values
+    theano_rng = RandomStreams()
+    
+    if shared_W != None and shared_b != None : 
+        self.W = shared_W
+        self.b = shared_b
+    else:
+        # initial values for weights and biases
+        # note : W' was written as `W_prime` and b' as `b_prime`
 
-    def init_params(self):
-        if self.shared_W != None and self.shared_b != None :
-            self.W = self.shared_W
-            self.b = self.shared_b
-        else:
-            # initial values for weights and biases
-            # note : W' was written as `W_prime` and b' as `b_prime`
-
-            # W is initialized with `initial_W` which is uniformely sampled
-            # from -6./sqrt(n_visible+n_hidden) and 6./sqrt(n_hidden+n_visible)
-            # the output of uniform if converted using asarray to dtype
-            # theano.config.floatX so that the code is runable on GPU
-            initial_W = numpy.asarray( self.numpy_rng.uniform( \
-                 low  = -numpy.sqrt(6./(n_hidden+n_visible)), \
-                 high = numpy.sqrt(6./(n_hidden+n_visible)), \
-                 size = (n_visible, n_hidden)), dtype = theano.config.floatX)
-            initial_b = numpy.zeros(n_hidden)
-
-            # theano shared variables for weights and biases
-            self.W = theano.shared(value = initial_W, name = "W")
-            self.b = theano.shared(value = initial_b, name = "b")
+        # W is initialized with `initial_W` which is uniformely sampled
+        # from -6./sqrt(n_visible+n_hidden) and 6./sqrt(n_hidden+n_visible)
+        # the output of uniform if converted using asarray to dtype 
+        # theano.config.floatX so that the code is runable on GPU
+        initial_W = numpy.asarray( numpy.random.uniform( \
+              low = -numpy.sqrt(6./(n_hidden+n_visible)), \
+              high = numpy.sqrt(6./(n_hidden+n_visible)), \
+              size = (n_visible, n_hidden)), dtype = theano.config.floatX)
+        initial_b       = numpy.zeros(n_hidden, dtype = theano.config.floatX)
+    
+    
+        # theano shared variables for weights and biases
+        self.W       = theano.shared(value = initial_W,       name = "W")
+        self.b       = theano.shared(value = initial_b,       name = "b")
+    
+ 
+    initial_b_prime= numpy.zeros(n_visible)
+    # tied weights, therefore W_prime is W transpose
+    self.W_prime = self.W.T 
+    self.b_prime = theano.shared(value = initial_b_prime, name = "b'")
 
-        initial_b_prime= numpy.zeros(self.n_visible)
-        # tied weights, therefore W_prime is W transpose
-        self.W_prime = self.W.T
-        self.b_prime = theano.shared(value = initial_b_prime, name = "b'")
-
-    def init_functions(self):
-        # if no input is given, generate a variable representing the input
-        if self.input == None :
-            # we use a matrix because we expect a minibatch of several examples,
-            # each example being a row
-            self.x = T.dmatrix(name = 'input')
-        else:
-            self.x = self.input
+    # if no input is given, generate a variable representing the input
+    if input == None : 
+        # we use a matrix because we expect a minibatch of several examples,
+        # each example being a row
+        self.x = T.dmatrix(name = 'input') 
+    else:
+        self.x = input
+    # Equation (1)
+    # keep 90% of the inputs the same and zero-out randomly selected subset of 10% of the inputs
+    # note : first argument of theano.rng.binomial is the shape(size) of 
+    #        random numbers that it should produce
+    #        second argument is the number of trials 
+    #        third argument is the probability of success of any trial
+    #
+    #        this will produce an array of 0s and 1s where 1 has a 
+    #        probability of 1 - ``corruption_level`` and 0 with
+    #        ``corruption_level``
+    self.tilde_x  = theano_rng.binomial( self.x.shape,  1,  1 - corruption_level) * self.x
+    # Equation (2)
+    # note  : y is stored as an attribute of the class so that it can be 
+    #         used later when stacking dAs. 
+    self.y   = T.nnet.sigmoid(T.dot(self.tilde_x, self.W      ) + self.b)
+    # Equation (3)
+    self.z   = T.nnet.sigmoid(T.dot(self.y, self.W_prime) + self.b_prime)
+    # Equation (4)
+    # note : we sum over the size of a datapoint; if we are using minibatches,
+    #        L will  be a vector, with one entry per example in minibatch
+    self.L = - T.sum( self.x*T.log(self.z) + (1-self.x)*T.log(1-self.z), axis=1 ) 
+    # note : L is now a vector, where each element is the cross-entropy cost 
+    #        of the reconstruction of the corresponding example of the 
+    #        minibatch. We need to compute the average of all these to get 
+    #        the cost of the minibatch
+    self.cost = T.mean(self.L)
 
-        # keep 90% of the inputs the same and zero-out randomly selected subset of 
-        # 10% of the inputs
-        # note : first argument of theano.rng.binomial is the shape(size) of
-        #        random numbers that it should produce
-        #        second argument is the number of trials
-        #        third argument is the probability of success of any trial
-        #
-        #        this will produce an array of 0s and 1s where 1 has a
-        #        probability of 1 - ``corruption_level`` and 0 with
-        #        ``corruption_level``
-        self.tilde_x = self.theano_rng.binomial(self.x.shape, 1, 1-self.corruption_level) * self.x
-        # using tied weights
-        self.y = T.nnet.sigmoid(T.dot(self.tilde_x, self.W) + self.b)
-        self.z = T.nnet.sigmoid(T.dot(self.y, self.W_prime) + self.b_prime)
-        self.L = - T.sum( self.x*T.log(self.z) + (1-self.x)*T.log(1-self.z), axis=1 )
-        # note : L is now a vector, where each element is the cross-entropy cost
-        #        of the reconstruction of the corresponding example of the
-        #        minibatch. We need to compute the average of all these to get
-        #        the cost of the minibatch
-        self.cost = T.mean(self.L)
+    self.params = [ self.W, self.b, self.b_prime ]
+
+
+
 
-        self.params = [ self.W, self.b, self.b_prime ]
-
-class SdA():
-    def __init__(self, batch_size, n_ins,
-               hidden_layers_sizes, n_outs,
-               corruption_levels, rng, pretrain_lr, finetune_lr):
-        update_locals(self, locals())
-
+class SdA(object):
+    def __init__(self, train_set_x, train_set_y, batch_size, n_ins, 
+                 hidden_layers_sizes, n_outs, 
+                 corruption_levels, rng, pretrain_lr, finetune_lr):
+       
         self.layers             = []
         self.pretrain_functions = []
         self.params             = []
@@ -173,250 +162,295 @@
         if len(hidden_layers_sizes) < 1 :
             raiseException (' You must have at least one hidden layer ')
 
-        # allocate symbolic variables for the data
-        self.x = T.matrix('x')  # the data is presented as rasterized images
-        self.y = T.ivector('y') # the labels are presented as 1D vector of
-                               # [int] labels
 
-        self.create_layers()
-        self.init_finetuning()
+        # allocate symbolic variables for the data
+        index   = T.lscalar()    # index to a [mini]batch 
+        self.x  = T.matrix('x')  # the data is presented as rasterized images
+        self.y  = T.ivector('y') # the labels are presented as 1D vector of 
+                                 # [int] labels
 
-    def create_layers(self):
         for i in xrange( self.n_layers ):
             # construct the sigmoidal layer
 
-            # the size of the input is either the number of hidden units of
+            # the size of the input is either the number of hidden units of 
             # the layer below or the input size if we are on the first layer
             if i == 0 :
-                input_size = self.n_ins
+                input_size = n_ins
             else:
-                input_size = self.hidden_layers_sizes[i-1]
+                input_size = hidden_layers_sizes[i-1]
 
             # the input to this layer is either the activation of the hidden
             # layer below or the input of the SdA if you are on the first
             # layer
-            if i == 0 :
+            if i == 0 : 
                 layer_input = self.x
             else:
                 layer_input = self.layers[-1].output
 
-            layer = SigmoidalLayer(self.rng, layer_input, input_size,
-                                   self.hidden_layers_sizes[i] )
-            # add the layer to the
+            layer = SigmoidalLayer(rng, layer_input, input_size, 
+                                   hidden_layers_sizes[i] )
+            # add the layer to the 
             self.layers += [layer]
             self.params += layer.params
-
+        
             # Construct a denoising autoencoder that shared weights with this
             # layer
-            dA_layer = dA(input_size, self.hidden_layers_sizes[i], \
-                          corruption_level = self.corruption_levels[0],\
+            dA_layer = dA(input_size, hidden_layers_sizes[i], \
+                          corruption_level = corruption_levels[0],\
                           input = layer_input, \
                           shared_W = layer.W, shared_b = layer.b)
-
-            self.init_updates_for_layer(dA_layer)
+        
+            # Construct a function that trains this dA
+            # compute gradients of layer parameters
+            gparams = T.grad(dA_layer.cost, dA_layer.params)
+            # compute the list of updates
+            updates = {}
+            for param, gparam in zip(dA_layer.params, gparams):
+                updates[param] = param - gparam * pretrain_lr
+            
+            # create a function that trains the dA
+            update_fn = theano.function([index], dA_layer.cost, \
+                  updates = updates,
+                  givens = { 
+                     self.x : train_set_x[index*batch_size:(index+1)*batch_size]})
+            # collect this function into a list
+            self.pretrain_functions += [update_fn]
 
-    def init_updates_for_layer(self, dA_layer):
-        # Construct a function that trains this dA
-        # compute gradients of layer parameters
-        gparams = T.grad(dA_layer.cost, dA_layer.params)
-        # compute the list of updates
-        updates = {}
-        for param, gparam in zip(dA_layer.params, gparams):
-            updates[param] = param - gparam * self.pretrain_lr
-
-        # create a function that trains the dA
-        update_fn = theano.function([self.x], dA_layer.cost, \
-              updates = updates)
-
-        # collect this function into a list
-        self.pretrain_functions += [update_fn]
-
-    def init_finetuning(self):
+        
         # We now need to add a logistic layer on top of the MLP
         self.logLayer = LogisticRegression(\
                          input = self.layers[-1].output,\
-                         n_in = self.hidden_layers_sizes[-1], n_out = self.n_outs)
+                         n_in = hidden_layers_sizes[-1], n_out = n_outs)
 
         self.params += self.logLayer.params
         # construct a function that implements one step of finetunining
 
-        # compute the cost, defined as the negative log likelihood
+        # compute the cost, defined as the negative log likelihood 
         cost = self.logLayer.negative_log_likelihood(self.y)
         # compute the gradients with respect to the model parameters
         gparams = T.grad(cost, self.params)
         # compute list of updates
         updates = {}
         for param,gparam in zip(self.params, gparams):
-            updates[param] = param - gparam*self.finetune_lr
-
-        self.finetune = theano.function([self.x, self.y], cost,
-                updates = updates)
+            updates[param] = param - gparam*finetune_lr
+            
+        self.finetune = theano.function([index], cost, 
+                updates = updates,
+                givens = {
+                  self.x : train_set_x[index*batch_size:(index+1)*batch_size],
+                  self.y : train_set_y[index*batch_size:(index+1)*batch_size]} )
 
         # symbolic variable that points to the number of errors made on the
         # minibatch given by self.x and self.y
 
         self.errors = self.logLayer.errors(self.y)
 
-class MnistIterators:
-    def __init__(self, minibatch_size):
-        self.minibatch_size = minibatch_size
+class Hyperparameters:
+    def __init__(self, dict):
+        self.__dict__.update(dict)
 
-        self.mnist = MNIST.first_1k()
+def sgd_optimization_mnist(learning_rate=0.1, pretraining_epochs = 2, \
+                            pretrain_lr = 0.1, training_epochs = 5, \
+                            dataset='mnist.pkl.gz'):
+    # Load the dataset 
+    f = gzip.open(dataset,'rb')
+    # this gives us train, valid, test (each with .x, .y)
+    dataset = cPickle.load(f)
+    f.close()
+
+    n_ins = 28*28
+    n_outs = 10
+
+    hyperparameters = Hyperparameters({'finetuning_lr':learning_rate,
+                       'pretraining_lr':pretrain_lr,
+                       'pretraining_epochs_per_layer':pretraining_epochs,
+                       'max_finetuning_epochs':training_epochs,
+                       'hidden_layers_sizes':[1000,1000,1000],
+                       'corruption_levels':[0.2,0.2,0.2],
+                       'minibatch_size':20})
 
-        self.len_train = len(self.mnist.train.x)
-        self.len_valid = len(self.mnist.valid.x)
-        self.len_test = len(self.mnist.test.x)
+    sgd_optimization(dataset, hyperparameters, n_ins, n_outs)
+
+class NIST:
+    def __init__(self, minibatch_size, basepath=='/data/lisa/data/nist/by_class/all'):
+        self.minibatch_size = minibatch_size
+        self.basepath = basepath
+
+        self.train = [None, None]
+        self.test = [None, None]
+
+        self.load_train_test()
 
-    def train_x_batches(self):
-        idx = 0
-        while idx < len(self.mnist.train.x):
-            yield self.mnist.train.x[idx:idx+self.minibatch_size]
-            idx += self.minibatch_size
+        self.valid = [None, None]
+        self.split_train_valid()
+
+    def set_filenames(self):
+        self.train_files = ['all_train_data.ft',
+                                'all_train_labels.ft']
+
+        self.test_files = ['all_test_data.ft',
+                            'all_test_labels.ft']
+
+    def load_train_test(self):
+        self.load_data_labels(self.train_files, self.train)
+        self.load_data_labels(self.test_files, self.test)
 
-    def train_xy_batches(self):
-        idx = 0
-        while idx < len(self.mnist.train.x):
-            mb_x = self.mnist.train.x[idx:idx+self.minibatch_size]
-            mb_y = self.mnist.train.y[idx:idx+self.minibatch_size]
-            yield mb_x, mb_y
-            idx += self.minibatch_size
+    def load_data_labels(self, filenames, pair):
+        for i, fn in enumerate(filenames):
+            f = open(fn)
+            pair[i] = ft.read(os.path.join(self.base_path, fn))
+            f.close()
+
+    def split_train_valid(self):
+        test_len = len(self.test[0])
+        
+        new_train_x = self.train[0][:-test_len]
+        new_train_y = self.train[1][:-test_len]
+
+        self.valid[0] = self.train[0][-test_len:]
+        self.valid[1] = self.train[1][-test_len:]
+
+        self.train[0] = new_train_x
+        self.train[1] = new_train_y
+
+def sgd_optimization_nist(dataset_dir='/data/lisa/data/nist'):
+    pass
+
+def sgd_optimization(dataset, hyperparameters, n_ins, n_outs):
+    hp = hyperparameters
+
+    train_set, valid_set, test_set = dataset
 
-    def valid_xy_batches(self):
-        idx = 0
-        while idx < len(self.mnist.valid.x):
-            mb_x = self.mnist.valid.x[idx:idx+self.minibatch_size]
-            mb_y = self.mnist.valid.y[idx:idx+self.minibatch_size]
-            yield mb_x, mb_y
-            idx += self.minibatch_size
+    def shared_dataset(data_xy):
+        data_x, data_y = data_xy
+        shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX))
+        shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX))
+        return shared_x, T.cast(shared_y, 'int32')
+
+    test_set_x, test_set_y = shared_dataset(test_set)
+    valid_set_x, valid_set_y = shared_dataset(valid_set)
+    train_set_x, train_set_y = shared_dataset(train_set)
+
+    # compute number of minibatches for training, validation and testing
+    n_train_batches = train_set_x.value.shape[0] / hp.minibatch_size
+    n_valid_batches = valid_set_x.value.shape[0] / hp.minibatch_size
+    n_test_batches  = test_set_x.value.shape[0]  / hp.minibatch_size
+
+    # allocate symbolic variables for the data
+    index   = T.lscalar()    # index to a [mini]batch 
+ 
+    # construct the stacked denoising autoencoder class
+    classifier = SdA( train_set_x=train_set_x, train_set_y = train_set_y,\
+                      batch_size = hp.minibatch_size, n_ins= n_ins, \
+                      hidden_layers_sizes = hp.hidden_layers_sizes, n_outs=10, \
+                      corruption_levels = hp.corruption_levels,\
+                      rng = numpy.random.RandomState(1234),\
+                      pretrain_lr = hp.pretraining_lr, finetune_lr = hp.finetuning_lr )
 
 
-class MnistTrainingDriver:
-    def __init__(self, rng=numpy.random):
-        self.rng = rng
-
-        self.init_SdA()
+    start_time = time.clock()  
+    ## Pre-train layer-wise 
+    for i in xrange(classifier.n_layers):
+        # go through pretraining epochs 
+        for epoch in xrange(hp.pretraining_epochs_per_layer):
+            # go through the training set
+            for batch_index in xrange(n_train_batches):
+                c = classifier.pretrain_functions[i](batch_index)
+            print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c
+ 
+    end_time = time.clock()
 
-    def init_SdA(self):
-        # Hyperparam
-        hidden_layers_sizes = [1000, 1000, 1000]
-        n_outs = 10
-        corruption_levels = [0.2, 0.2, 0.2]
-        minibatch_size = 10
-        pretrain_lr = 0.001
-        finetune_lr = 0.001
+    print ('Pretraining took %f minutes' %((end_time-start_time)/60.))
+    # Fine-tune the entire model
 
-        update_locals(self, locals())
-
-        self.mnist = MnistIterators(minibatch_size)
+    minibatch_size = hp.minibatch_size
 
-        # construct the stacked denoising autoencoder class
-        self.classifier = SdA( batch_size = minibatch_size, \
-                          n_ins=28*28, \
-                          hidden_layers_sizes = hidden_layers_sizes, \
-                          n_outs=n_outs, \
-                          corruption_levels = corruption_levels,\
-                          rng = self.rng,\
-                          pretrain_lr = pretrain_lr, \
-                          finetune_lr = finetune_lr) 
+    # create a function to compute the mistakes that are made by the model
+    # on the validation set, or testing set
+    test_model = theano.function([index], classifier.errors,
+             givens = {
+               classifier.x: test_set_x[index*minibatch_size:(index+1)*minibatch_size],
+               classifier.y: test_set_y[index*minibatch_size:(index+1)*minibatch_size]})
 
-    def compute_validation_error(self):
-        validation_error = 0.0
-
-        count = 0
-        for mb_x, mb_y in self.mnist.valid_xy_batches():
-            validation_error += self.classifier.errors(mb_x, mb_y)
-            count += 1
+    validate_model = theano.function([index], classifier.errors,
+            givens = {
+               classifier.x: valid_set_x[index*minibatch_size:(index+1)*minibatch_size],
+               classifier.y: valid_set_y[index*minibatch_size:(index+1)*minibatch_size]})
 
-        return float(validation_error) / count
 
-    def pretrain(self):
-        pretraining_epochs = 20
-
-        for layer_idx, update_fn in enumerate(self.classifier.pretrain_functions):
-            for epoch in xrange(pretraining_epochs):
-                # go through the training set
-                cost_acc = 0.0
-                for i, mb_x in enumerate(self.mnist.train_x_batches()):
-                    cost_acc += update_fn(mb_x)
-                    
-                    if i % 100 == 0:
-                        print i, "avg err = ", cost_acc / 100.0
-                        cost_acc = 0.0
-                print 'Pre-training layer %d, epoch %d' % (layer_idx, epoch)
-
-    def finetune(self):
-        max_training_epochs = 1000
+    # early-stopping parameters
+    patience              = 10000 # look as this many examples regardless
+    patience_increase     = 2.    # wait this much longer when a new best is 
+                                  # found
+    improvement_threshold = 0.995 # a relative improvement of this much is 
+                                  # considered significant
+    validation_frequency  = min(n_train_batches, patience/2)
+                                  # go through this many 
+                                  # minibatche before checking the network 
+                                  # on the validation set; in this case we 
+                                  # check every epoch 
 
-        n_train_batches = self.mnist.len_train / self.minibatch_size
+    best_params          = None
+    best_validation_loss = float('inf')
+    test_score           = 0.
+    start_time = time.clock()
+
+    done_looping = False
+    epoch = 0
+
+    while (epoch < hp.max_finetuning_epochs) and (not done_looping):
+      epoch = epoch + 1
+      for minibatch_index in xrange(n_train_batches):
+
+        cost_ij = classifier.finetune(minibatch_index)
+        iter    = epoch * n_train_batches + minibatch_index
+
+        if (iter+1) % validation_frequency == 0: 
+            
+            validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]
+            this_validation_loss = numpy.mean(validation_losses)
+            print('epoch %i, minibatch %i/%i, validation error %f %%' % \
+                   (epoch, minibatch_index+1, n_train_batches, \
+                    this_validation_loss*100.))
 
-        # early-stopping parameters
-        patience = 10000 # look as this many examples regardless
-        patience_increase = 2. # wait this much longer when a new best is
-                                      # found
-        improvement_threshold = 0.995 # a relative improvement of this much is
-                                      # considered significant
-        validation_frequency = min(n_train_batches, patience/2)
-                                      # go through this many
-                                      # minibatche before checking the network
-                                      # on the validation set; in this case we
-                                      # check every epoch
-     
-     
-        # TODO: use this
-        best_params = None
-        best_validation_loss = float('inf')
-        test_score = 0.
-        start_time = time.clock()
-     
-        done_looping = False
-        epoch = 0
-     
-        while (epoch < max_training_epochs) and (not done_looping):
-            epoch = epoch + 1
-            for minibatch_index, (mb_x, mb_y) in enumerate(self.mnist.train_xy_batches()):
-                cost_ij = classifier.finetune(mb_x, mb_y)
-                iter = epoch * n_train_batches + minibatch_index
-         
-                if (iter+1) % validation_frequency == 0:
-                    this_validation_loss = self.compute_validation_error()
-                    print('epoch %i, minibatch %i/%i, validation error %f %%' % \
-                           (epoch, minibatch_index+1, n_train_batches, \
-                            this_validation_loss*100.))
-         
-                    # if we got the best validation score until now
-                    if this_validation_loss < best_validation_loss:
-         
-                        #improve patience if loss improvement is good enough
-                        if this_validation_loss < best_validation_loss * \
-                               improvement_threshold :
-                            patience = max(patience, iter * patience_increase)
-                            print "Improving patience"
-         
-                        # save best validation score and iteration number
-                        best_validation_loss = this_validation_loss
-                        best_iter = iter
-         
-                        # test it on the test set
-                        #test_losses = [test_model(i) for i in xrange(n_test_batches)]
-                        #test_score = numpy.mean(test_losses)
-                        #print((' epoch %i, minibatch %i/%i, test error of best '
-                        #      'model %f %%') %
-                        #             (epoch, minibatch_index+1, n_train_batches,
-                        #              test_score*100.))
-     
-     
-            if patience <= iter :
+
+            # if we got the best validation score until now
+            if this_validation_loss < best_validation_loss:
+
+                #improve patience if loss improvement is good enough
+                if this_validation_loss < best_validation_loss *  \
+                       improvement_threshold :
+                    patience = max(patience, iter * patience_increase)
+
+                # save best validation score and iteration number
+                best_validation_loss = this_validation_loss
+                best_iter = iter
+
+                # test it on the test set
+                test_losses = [test_model(i) for i in xrange(n_test_batches)]
+                test_score = numpy.mean(test_losses)
+                print(('     epoch %i, minibatch %i/%i, test error of best '
+                      'model %f %%') % 
+                             (epoch, minibatch_index+1, n_train_batches,
+                              test_score*100.))
+
+
+        if patience <= iter :
                 done_looping = True
                 break
 
-def train():
-    driver = MnistTrainingDriver()
-    start_time = time.clock()
-    driver.pretrain()
-    print "PRETRAINING DONE. STARTING FINETUNING."
-    driver.finetune()
     end_time = time.clock()
+    print(('Optimization complete with best validation score of %f %%,'
+           'with test performance %f %%') %  
+                 (best_validation_loss * 100., test_score*100.))
+    print ('The code ran for %f minutes' % ((end_time-start_time)/60.))
+
 
 if __name__ == '__main__':
-    train()
+    import sys
+    args = sys.argv[1:]
+    if len(args) > 0 and args[0] == "jobman_add":
+        jobman_add()
+    else:
+        sgd_optimization_mnist(dataset=MNIST_LOCATION)