diff baseline/conv_mlp/convolutional_mlp.py @ 270:d41fe003fade

Reseau a convolution avec le bon dataset
author Jeremy Eustache <jeremy.eustache@voila.fr>
date Sat, 20 Mar 2010 15:49:55 -0400
parents a491d3600a77
children
line wrap: on
line diff
--- a/baseline/conv_mlp/convolutional_mlp.py	Sat Mar 20 10:19:11 2010 -0400
+++ b/baseline/conv_mlp/convolutional_mlp.py	Sat Mar 20 15:49:55 2010 -0400
@@ -24,9 +24,12 @@
 import numpy, theano, cPickle, gzip, time
 import theano.tensor as T
 import theano.sandbox.softsign
+import sys
 import pylearn.datasets.MNIST
 from pylearn.io import filetensor as ft
 from theano.sandbox import conv, downsample
+
+from ift6266 import datasets
 import theano,pylearn.version,ift6266
 
 class LeNetConvPoolLayer(object):
@@ -178,81 +181,16 @@
             raise NotImplementedError()
 
 
-def load_dataset(fname,batch=20):
-
-    # repertoire qui contient les donnees NIST
-    # le repertoire suivant va fonctionner si vous etes connecte sur un ordinateur
-    # du reseau DIRO
-    datapath = '/data/lisa/data/nist/by_class/'
-    # le fichier .ft contient chiffres NIST dans un format efficace. Les chiffres
-    # sont stockes dans une matrice de NxD, ou N est le nombre d'images, est D est
-    # le nombre de pixels par image (32x32 = 1024). Chaque pixel de l'image est une
-    # valeur entre 0 et 255, correspondant a un niveau de gris. Les valeurs sont
-    # stockees comme des uint8, donc des bytes.
-    f = open(datapath+'digits/digits_train_data.ft')
-    # Verifier que vous avez assez de memoire pour loader les donnees au complet
-    # dans le memoire. Sinon, utilisez ft.arraylike, une classe construite
-    # specialement pour des fichiers qu'on ne souhaite pas loader dans RAM.
-    d = ft.read(f)
-
-    # NB: N'oubliez pas de diviser les valeurs des pixels par 255. si jamais vous
-    # utilisez les donnees commes entrees dans un reseaux de neurones et que vous 
-    # voulez des entres entre 0 et 1.
-    # digits_train_data.ft contient les images, digits_train_labels.ft contient les
-    # etiquettes
-    f = open(datapath+'digits/digits_train_labels.ft')
-    labels = ft.read(f)
-
-
-    # Load the dataset 
-    #f = gzip.open(fname,'rb')
-    #train_set, valid_set, test_set = cPickle.load(f)
-    #f.close()
-
-    # make minibatches of size 20 
-    batch_size = batch   # sized of the minibatch
-
-    # Dealing with the training set
-    # get the list of training images (x) and their labels (y)
-    (train_set_x, train_set_y) = (d[:200000,:],labels[:200000])
-    # initialize the list of training minibatches with empty list
-    train_batches = []
-    for i in xrange(0, len(train_set_x), batch_size):
-        # add to the list of minibatches the minibatch starting at 
-        # position i, ending at position i+batch_size
-        # a minibatch is a pair ; the first element of the pair is a list 
-        # of datapoints, the second element is the list of corresponding 
-        # labels
-        train_batches = train_batches + \
-               [(train_set_x[i:i+batch_size], train_set_y[i:i+batch_size])]
-
-    #print train_batches[500]
-
-    # Dealing with the validation set
-    (valid_set_x, valid_set_y) = (d[200000:270000,:],labels[200000:270000])
-    # initialize the list of validation minibatches 
-    valid_batches = []
-    for i in xrange(0, len(valid_set_x), batch_size):
-        valid_batches = valid_batches + \
-               [(valid_set_x[i:i+batch_size], valid_set_y[i:i+batch_size])]
-
-    # Dealing with the testing set
-    (test_set_x, test_set_y) = (d[270000:340000,:],labels[270000:340000])
-    # initialize the list of testing minibatches 
-    test_batches = []
-    for i in xrange(0, len(test_set_x), batch_size):
-        test_batches = test_batches + \
-              [(test_set_x[i:i+batch_size], test_set_y[i:i+batch_size])]
-
-
-    return train_batches, valid_batches, test_batches
-
-
-def evaluate_lenet5(learning_rate=0.1, n_iter=200, batch_size=20, n_kern0=20, n_kern1=50, n_layer=3, filter_shape0=5, filter_shape1=5, dataset='mnist.pkl.gz'):
+def evaluate_lenet5(learning_rate=0.1, n_iter=200, batch_size=20, n_kern0=20, n_kern1=50, n_layer=3, filter_shape0=5, filter_shape1=5, sigmoide_size=500, dataset='mnist.pkl.gz'):
     rng = numpy.random.RandomState(23455)
 
     print 'Before load dataset'
-    train_batches, valid_batches, test_batches = load_dataset(dataset,batch_size)
+    dataset=datasets.nist_digits
+    train_batches= dataset.train(batch_size)
+    valid_batches=dataset.valid(batch_size)
+    test_batches=dataset.test(batch_size)
+    #print valid_batches.shape
+    #print test_batches.shape
     print 'After load dataset'
 
     ishape = (32,32)     # this is the size of NIST images
@@ -305,9 +243,9 @@
 	fshape0=(32-filter_shape0+1)/2
 	layer1_input = layer0.output.flatten(2)
 		# construct a fully-connected sigmoidal layer
-	layer1 = SigmoidalLayer(rng, input=layer1_input,n_in=n_kern0*fshape0*fshape0, n_out=500)
+	layer1 = SigmoidalLayer(rng, input=layer1_input,n_in=n_kern0*fshape0*fshape0, n_out=sigmoide_size)
 
-	layer2 = LogisticRegression(input=layer1.output, n_in=500, n_out=10)
+	layer2 = LogisticRegression(input=layer1.output, n_in=sigmoide_size, n_out=10)
 	cost = layer2.negative_log_likelihood(y)
 	test_model = theano.function([x,y], layer2.errors(y))
 	params = layer2.params+ layer1.params + layer0.params
@@ -335,10 +273,10 @@
 	layer4_input = layer3.output.flatten(2)
 
 	layer4 = SigmoidalLayer(rng, input=layer4_input, 
-					n_in=n_kern3*fshape3*fshape3, n_out=500)
+					n_in=n_kern3*fshape3*fshape3, n_out=sigmoide_size)
 
   
-	layer5 = LogisticRegression(input=layer4.output, n_in=500, n_out=10)
+	layer5 = LogisticRegression(input=layer4.output, n_in=sigmoide_size, n_out=10)
 
 	cost = layer5.negative_log_likelihood(y)
 
@@ -354,10 +292,10 @@
 	layer3_input = layer2.output.flatten(2)
 
 	layer3 = SigmoidalLayer(rng, input=layer3_input, 
-					n_in=n_kern2*fshape2*fshape2, n_out=500)
+					n_in=n_kern2*fshape2*fshape2, n_out=sigmoide_size)
 
   
-	layer4 = LogisticRegression(input=layer3.output, n_in=500, n_out=10)
+	layer4 = LogisticRegression(input=layer3.output, n_in=sigmoide_size, n_out=10)
 
 	cost = layer4.negative_log_likelihood(y)
 
@@ -378,11 +316,11 @@
 
 	# construct a fully-connected sigmoidal layer
 	layer2 = SigmoidalLayer(rng, input=layer2_input, 
-					n_in=n_kern1*fshape1*fshape1, n_out=500)
+					n_in=n_kern1*fshape1*fshape1, n_out=sigmoide_size)
 
   
 	# classify the values of the fully-connected sigmoidal layer
-	layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
+	layer3 = LogisticRegression(input=layer2.output, n_in=sigmoide_size, n_out=10)
 
 	# the cost we minimize during training is the NLL of the model
 	cost = layer3.negative_log_likelihood(y)
@@ -414,7 +352,28 @@
     # TRAIN MODEL #
     ###############
 
-    n_minibatches        = len(train_batches) 
+    #n_minibatches        = len(train_batches) 
+    n_minibatches=0
+    n_valid=0
+    n_test=0
+    for x, y in dataset.train(batch_size):
+	if x.shape[0] == batch_size:
+	    n_minibatches+=1
+    n_minibatches*=batch_size
+    print n_minibatches
+
+    for x, y in dataset.valid(batch_size):
+	if x.shape[0] == batch_size:
+	    n_valid+=1
+    n_valid*=batch_size
+    print n_valid
+
+    for x, y in dataset.test(batch_size):
+	if x.shape[0] == batch_size:
+	    n_test+=1
+    n_test*=batch_size
+    print n_test
+  
 
     # early-stopping parameters
     patience              = 10000 # look as this many examples regardless
@@ -433,60 +392,65 @@
     test_score           = 0.
     start_time = time.clock()
 
-    # have a maximum of `n_iter` iterations through the entire dataset
-    for iter in xrange(n_iter * n_minibatches):
-
-        # get epoch and minibatch index
-        epoch           = iter / n_minibatches
-        minibatch_index =  iter % n_minibatches
 
-        # get the minibatches corresponding to `iter` modulo
-        # `len(train_batches)`
-        x,y = train_batches[ minibatch_index ]
-	
-        if iter %100 == 0:
-            print 'training @ iter = ', iter
-        cost_ij = train_model(x,y)
-
-        if (iter+1) % validation_frequency == 0: 
+    # have a maximum of `n_iter` iterations through the entire dataset
+    iter=0
+    for epoch in xrange(n_iter):
+	for x, y in train_batches:
+	    if x.shape[0] != batch_size:
+		continue
+	    iter+=1
 
-            # compute zero-one loss on validation set 
-            this_validation_loss = 0.
-            for x,y in valid_batches:
-                # sum up the errors for each minibatch
-                this_validation_loss += test_model(x,y)
-
-            # get the average by dividing with the number of minibatches
-            this_validation_loss /= len(valid_batches)
-            print('epoch %i, minibatch %i/%i, validation error %f %%' % \
-                   (epoch, minibatch_index+1, n_minibatches, \
-                    this_validation_loss*100.))
+	    # get epoch and minibatch index
+	    #epoch           = iter / n_minibatches
+	    minibatch_index =  iter % n_minibatches
+	    
+	    if iter %100 == 0:
+		print 'training @ iter = ', iter
+	    cost_ij = train_model(x,y)
 
 
-            # if we got the best validation score until now
-            if this_validation_loss < best_validation_loss:
+	# compute zero-one loss on validation set 
+	this_validation_loss = 0.
+	for x,y in valid_batches:
+	    if x.shape[0] != batch_size:
+		continue
+	    # sum up the errors for each minibatch
+	    this_validation_loss += test_model(x,y)
 
-                #improve patience if loss improvement is good enough
-                if this_validation_loss < best_validation_loss *  \
-                       improvement_threshold :
-                    patience = max(patience, iter * patience_increase)
+	# get the average by dividing with the number of minibatches
+	this_validation_loss /= n_valid
+	print('epoch %i, minibatch %i/%i, validation error %f %%' % \
+	      (epoch, minibatch_index+1, n_minibatches, \
+		this_validation_loss*100.))
 
-                # save best validation score and iteration number
-                best_validation_loss = this_validation_loss
-                best_iter = iter
+
+	# if we got the best validation score until now
+	if this_validation_loss < best_validation_loss:
 
-                # test it on the test set
-                test_score = 0.
-                for x,y in test_batches:
-                    test_score += test_model(x,y)
-                test_score /= len(test_batches)
-                print(('     epoch %i, minibatch %i/%i, test error of best '
-                      'model %f %%') % 
-                             (epoch, minibatch_index+1, n_minibatches,
-                              test_score*100.))
+	    #improve patience if loss improvement is good enough
+	    if this_validation_loss < best_validation_loss *  \
+		  improvement_threshold :
+		patience = max(patience, iter * patience_increase)
+
+	    # save best validation score and iteration number
+	    best_validation_loss = this_validation_loss
+	    best_iter = iter
 
-        if patience <= iter :
-            break
+	    # test it on the test set
+	    test_score = 0.
+	    for x,y in test_batches:
+		if x.shape[0] != batch_size:
+		    continue
+		test_score += test_model(x,y)
+	    test_score /= n_test
+	    print(('     epoch %i, minibatch %i/%i, test error of best '
+		  'model %f %%') % 
+			(epoch, minibatch_index+1, n_minibatches,
+			  test_score*100.))
+
+	if patience <= iter :
+	    break
 
     end_time = time.clock()
     print('Optimization complete.')
@@ -502,8 +466,10 @@
 
 def experiment(state, channel):
     print 'start experiment'
-    (best_validation_loss, test_score, minutes_trained, iter) = evaluate_lenet5(state.learning_rate, state.n_iter, state.batch_size, state.n_kern0, state.n_kern1, state.n_layer, state.filter_shape0, state.filter_shape1)
+    (best_validation_loss, test_score, minutes_trained, iter) = evaluate_lenet5(state.learning_rate, state.n_iter, state.batch_size, state.n_kern0, state.n_kern1, state.n_layer, state.filter_shape0, state.filter_shape1,state.sigmoide_size)
     print 'end experiment'
+
+    pylearn.version.record_versions(state,[theano,ift6266,pylearn])
     
     state.best_validation_loss = best_validation_loss
     state.test_score = test_score