view scripts/stacked_dae/sgd_optimization.py @ 133:a4e5128ef2cb

Adapted ttf2jpg to get fonts in /Tmp/allfonts local folder
author boulanni <nicolas_boulanger@hotmail.com>
date Sat, 20 Feb 2010 02:07:29 -0500
parents 5c79a2557f2f
children 7d8366fb90bf
line wrap: on
line source

#!/usr/bin/python
# coding: utf-8

# Generic SdA optimization loop, adapted slightly from the deeplearning.net tutorial

import numpy 
import theano
import time
import theano.tensor as T

from jobman import DD

from stacked_dae import SdA

def sgd_optimization(dataset, hyperparameters, n_ins, n_outs):
    hp = hyperparameters

    printout_frequency = 1000

    train_set, valid_set, test_set = dataset

    def shared_dataset(data_xy):
        data_x, data_y = data_xy
        shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX))
        shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX))
        return shared_x, T.cast(shared_y, 'int32')

    test_set_x, test_set_y = shared_dataset(test_set)
    valid_set_x, valid_set_y = shared_dataset(valid_set)
    train_set_x, train_set_y = shared_dataset(train_set)

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.value.shape[0] / hp.minibatch_size
    n_valid_batches = valid_set_x.value.shape[0] / hp.minibatch_size
    n_test_batches  = test_set_x.value.shape[0]  / hp.minibatch_size

    # allocate symbolic variables for the data
    index   = T.lscalar()    # index to a [mini]batch 
 
    # construct the stacked denoising autoencoder class
    classifier = SdA( train_set_x=train_set_x, train_set_y = train_set_y,\
                      batch_size = hp.minibatch_size, n_ins= n_ins, \
                      hidden_layers_sizes = hp.hidden_layers_sizes, n_outs=10, \
                      corruption_levels = hp.corruption_levels,\
                      rng = numpy.random.RandomState(1234),\
                      pretrain_lr = hp.pretraining_lr, finetune_lr = hp.finetuning_lr )

    printout_acc = 0.0

    start_time = time.clock()  
    ## Pre-train layer-wise 
    for i in xrange(classifier.n_layers):
        # go through pretraining epochs 
        for epoch in xrange(hp.pretraining_epochs_per_layer):
            # go through the training set
            for batch_index in xrange(n_train_batches):
                c = classifier.pretrain_functions[i](batch_index)

                print c

                printout_acc += c / printout_frequency
                if (batch_index+1) % printout_frequency == 0:
                    print batch_index, "reconstruction cost avg=", printout_acc
                    printout_acc = 0.0
                    
            print 'Pre-training layer %i, epoch %d, cost '%(i,epoch),c
 
    end_time = time.clock()

    print ('Pretraining took %f minutes' %((end_time-start_time)/60.))
    # Fine-tune the entire model

    minibatch_size = hp.minibatch_size

    # create a function to compute the mistakes that are made by the model
    # on the validation set, or testing set
    test_model = theano.function([index], classifier.errors,
             givens = {
               classifier.x: test_set_x[index*minibatch_size:(index+1)*minibatch_size],
               classifier.y: test_set_y[index*minibatch_size:(index+1)*minibatch_size]})

    validate_model = theano.function([index], classifier.errors,
            givens = {
               classifier.x: valid_set_x[index*minibatch_size:(index+1)*minibatch_size],
               classifier.y: valid_set_y[index*minibatch_size:(index+1)*minibatch_size]})


    # early-stopping parameters
    patience              = 10000 # look as this many examples regardless
    patience_increase     = 2.    # wait this much longer when a new best is 
                                  # found
    improvement_threshold = 0.995 # a relative improvement of this much is 
                                  # considered significant
    validation_frequency  = min(n_train_batches, patience/2)
                                  # go through this many 
                                  # minibatche before checking the network 
                                  # on the validation set; in this case we 
                                  # check every epoch 

    best_params          = None
    best_validation_loss = float('inf')
    test_score           = 0.
    start_time = time.clock()

    done_looping = False
    epoch = 0

    printout_acc = 0.0

    print "----- START FINETUNING -----"

    while (epoch < hp.max_finetuning_epochs) and (not done_looping):
      epoch = epoch + 1
      for minibatch_index in xrange(n_train_batches):

        cost_ij = classifier.finetune(minibatch_index)
        iter    = epoch * n_train_batches + minibatch_index

        printout_acc += cost_ij / float(printout_frequency * minibatch_size)
        if (iter+1) % printout_frequency == 0:
            print iter, "cost avg=", printout_acc
            printout_acc = 0.0

        if (iter+1) % validation_frequency == 0: 
            
            validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]
            this_validation_loss = numpy.mean(validation_losses)
            print('epoch %i, minibatch %i/%i, validation error %f %%' % \
                   (epoch, minibatch_index+1, n_train_batches, \
                    this_validation_loss*100.))


            # if we got the best validation score until now
            if this_validation_loss < best_validation_loss:

                #improve patience if loss improvement is good enough
                if this_validation_loss < best_validation_loss *  \
                       improvement_threshold :
                    patience = max(patience, iter * patience_increase)

                # save best validation score and iteration number
                best_validation_loss = this_validation_loss
                best_iter = iter

                # test it on the test set
                test_losses = [test_model(i) for i in xrange(n_test_batches)]
                test_score = numpy.mean(test_losses)
                print(('     epoch %i, minibatch %i/%i, test error of best '
                      'model %f %%') % 
                             (epoch, minibatch_index+1, n_train_batches,
                              test_score*100.))


        if patience <= iter :
                done_looping = True
                break

    end_time = time.clock()
    print(('Optimization complete with best validation score of %f %%,'
           'with test performance %f %%') %  

                 (best_validation_loss * 100., test_score*100.))
    print ('The code ran for %f minutes' % ((end_time-start_time)/60.))