changeset 355:76b7182dd32e

added support for pnist in iterator. corrected a print bug in mlp
author xaviermuller
date Wed, 21 Apr 2010 15:07:09 -0400
parents ffc06af1c543
children b0741ea3ff6f
files baseline/mlp/mlp_nist.py
diffstat 1 files changed, 34 insertions(+), 26 deletions(-) [+]
line wrap: on
line diff
--- a/baseline/mlp/mlp_nist.py	Wed Apr 21 14:54:54 2010 -0400
+++ b/baseline/mlp/mlp_nist.py	Wed Apr 21 15:07:09 2010 -0400
@@ -23,6 +23,7 @@
 """
 __docformat__ = 'restructedtext en'
 
+import sys
 import pdb
 import numpy
 import pylab
@@ -372,8 +373,9 @@
     
     
     
-    if verbose == 1:
-        print 'starting training'
+    
+    print 'starting training'
+    sys.stdout.flush()
     while(minibatch_index*batch_size<nb_max_exemples):
         
         for x, y in dataset.train(batch_size):
@@ -391,9 +393,7 @@
                 learning_rate_list.append(classifier.lr.value)
                 divergence_flag_list.append(divergence_flag)
 
-                #save temp results to check during training
-                numpy.savez('temp_results.npy',config=configuration,total_validation_error_list=total_validation_error_list,\
-                learning_rate_list=learning_rate_list, divergence_flag_list=divergence_flag_list)
+                
                 
                 # compute the validation error
                 this_validation_loss = 0.
@@ -406,10 +406,15 @@
                 this_validation_loss /= temp
                 #save the validation loss
                 total_validation_error_list.append(this_validation_loss)
-                if verbose == 1:
-                    print(('epoch %i, minibatch %i, learning rate %f current validation error %f ') % 
-                                (epoch, minibatch_index+1,classifier.lr.value,
-                                this_validation_loss*100.))
+                
+		print(('epoch %i, minibatch %i, learning rate %f current validation error %f ') % 
+			(epoch, minibatch_index+1,classifier.lr.value,
+			this_validation_loss*100.))
+		sys.stdout.flush()
+				
+		#save temp results to check during training
+                numpy.savez('temp_results.npy',config=configuration,total_validation_error_list=total_validation_error_list,\
+                learning_rate_list=learning_rate_list, divergence_flag_list=divergence_flag_list)
     
                 # if we got the best validation score until now
                 if this_validation_loss < best_validation_loss:
@@ -431,11 +436,12 @@
                         test_score += test_model(xt,yt)
                         temp = temp+1
                     test_score /= temp
-                    if verbose == 1:
-                        print(('epoch %i, minibatch %i, test error of best '
-                            'model %f %%') % 
-                                    (epoch, minibatch_index+1,
-                                    test_score*100.))
+                    
+		    print(('epoch %i, minibatch %i, test error of best '
+			'model %f %%') % 
+				(epoch, minibatch_index+1,
+				test_score*100.))
+                    sys.stdout.flush()
                                     
                 # if the validation error is going up, we are overfitting (or oscillating)
                 # check if we are allowed to continue and if we will adjust the learning rate
@@ -461,12 +467,13 @@
                         test_score += test_model(xt,yt)
                         temp=temp+1
                     test_score /= temp
-                    if verbose == 1:
-                        print ' validation error is going up, possibly stopping soon'
-                        print(('     epoch %i, minibatch %i, test error of best '
-                            'model %f %%') % 
-                                    (epoch, minibatch_index+1,
-                                    test_score*100.))
+                    
+                    print ' validation error is going up, possibly stopping soon'
+                    print(('     epoch %i, minibatch %i, test error of best '
+                        'model %f %%') % 
+                                (epoch, minibatch_index+1,
+                                test_score*100.))
+                    sys.stdout.flush()
                                     
                     
     
@@ -491,12 +498,13 @@
         # we have finished looping through the training set
         epoch = epoch+1
     end_time = time.clock()
-    if verbose == 1:
-        print(('Optimization complete. Best validation score of %f %% '
-            'obtained at iteration %i, with test performance %f %%') %  
-                    (best_validation_loss * 100., best_iter, test_score*100.))
-        print ('The code ran for %f minutes' % ((end_time-start_time)/60.))
-        print minibatch_index
+   
+    print(('Optimization complete. Best validation score of %f %% '
+        'obtained at iteration %i, with test performance %f %%') %  
+                (best_validation_loss * 100., best_iter, test_score*100.))
+    print ('The code ran for %f minutes' % ((end_time-start_time)/60.))
+    print minibatch_index
+    sys.stdout.flush()
         
     #save the model and the weights
     numpy.savez('model.npy', config=configuration, W1=classifier.W1.value,W2=classifier.W2.value, b1=classifier.b1.value,b2=classifier.b2.value)