diff baseline/mlp/mlp_nist.py @ 212:e390b0454515

added classic lr time decay and py code to calculate the error based on a saved model
author xaviermuller
date Wed, 10 Mar 2010 16:17:59 -0500
parents d37c944133c3
children 9b6e0af062af
line wrap: on
line diff
--- a/baseline/mlp/mlp_nist.py	Wed Mar 10 09:56:02 2010 -0500
+++ b/baseline/mlp/mlp_nist.py	Wed Mar 10 16:17:59 2010 -0500
@@ -31,6 +31,7 @@
 import time 
 import theano.tensor.nnet
 import pylearn
+import theano,pylearn.version
 from pylearn.io import filetensor as ft
 
 data_path = '/data/lisa/data/nist/by_class/'
@@ -174,17 +175,22 @@
                         nb_max_exemples=1000000,\
                         batch_size=20,\
                         nb_hidden = 500,\
-                        nb_targets = 62):
+                        nb_targets = 62,
+			tau=1e6):
    
     
     configuration = [learning_rate,nb_max_exemples,nb_hidden,adaptive_lr]
     
+    #save initial learning rate if classical adaptive lr is used
+    initial_lr=learning_rate
+    
     total_validation_error_list = []
     total_train_error_list = []
     learning_rate_list=[]
     best_training_error=float('inf');
     
     
+    
    
     f = open(data_path+train_data)
     g= open(data_path+train_labels)
@@ -315,6 +321,8 @@
     n_iter = nb_max_exemples/batch_size  # nb of max times we are allowed to run through all exemples
     n_iter = n_iter/n_minibatches + 1 #round up
     n_iter=max(1,n_iter) # run at least once on short debug call
+    time_n=0 #in unit of exemples
+    
     
    
     if verbose == True:
@@ -325,6 +333,9 @@
         epoch           = iter / n_minibatches
         minibatch_index =  iter % n_minibatches
         
+	
+	if adaptive_lr==2:
+	    classifier.lr.value = tau*initial_lr/(tau+time_n)
       
         
         # get the minibatches corresponding to `iter` modulo
@@ -364,6 +375,8 @@
                 print('epoch %i, minibatch %i/%i, validation error %f, training error %f %%' % \
                     (epoch, minibatch_index+1, n_minibatches, \
                         this_validation_loss*100.,this_train_loss*100))
+		print 'learning rate = %f' %classifier.lr.value
+		print 'time  = %i' %time_n
                         
                         
             #save the learning rate
@@ -425,6 +438,7 @@
             break
 
 
+    	time_n= time_n + batch_size
     end_time = time.clock()
     if verbose == True:
         print(('Optimization complete. Best validation score of %f %% '
@@ -448,7 +462,8 @@
     (train_error,validation_error,test_error,nb_exemples,time)=mlp_full_nist(learning_rate=state.learning_rate,\
                                                                 nb_max_exemples=state.nb_max_exemples,\
                                                                 nb_hidden=state.nb_hidden,\
-                                                                adaptive_lr=state.adaptive_lr)
+                                                                adaptive_lr=state.adaptive_lr,\
+								tau=tau)
     state.train_error=train_error
     state.validation_error=validation_error
     state.test_error=test_error