# HG changeset patch # User Myriam Cote # Date 1268659372 14400 # Node ID 7be1f086a89e04dbf35d8f42afb6bbaa0969fe5d # Parent ecb69e17950b11e7c8a36791c423619721dcce0d added __init__.py to allow module loading of baseline diff -r ecb69e17950b -r 7be1f086a89e baseline/log_reg/log_reg.py --- a/baseline/log_reg/log_reg.py Sun Mar 14 20:25:12 2010 -0400 +++ b/baseline/log_reg/log_reg.py Mon Mar 15 09:22:52 2010 -0400 @@ -145,6 +145,7 @@ dataset=datasets.nist_digits, image_size = 32 * 32, nb_class = 10, \ patience = 5000, patience_increase = 2, improvement_threshold = 0.995): + #28 * 28 = 784 """ Demonstrate stochastic gradient descent optimization of a log-linear model @@ -296,20 +297,24 @@ ( best_validation_loss * 100., test_score * 100.)) print ('The code ran for %f minutes' % ((end_time-start_time) / 60.)) - ###### return validation_error, test_error, nb_exemples, time + return best_validation_loss, test_score, iter*batch_size, (end_time-start_time) / 60. if __name__ == '__main__': log_reg() def jobman_log_reg(state, channel): - (validation_error, test_error, nb_exemples, time) = log_reg( learning_rate = state.learning_rate,\ - nb_max_examples = state.nb_max_examples,\ - batch_size = state.batch_size,\ - dataset_name = state.dataset_name, \ + print state + (validation_error, test_error, nb_exemples, time) = log_reg( learning_rate = state.learning_rate, \ + nb_max_examples = state.nb_max_examples, \ + batch_size = state.batch_size,\ + dataset_name = state.dataset_name, \ image_size = state.image_size, \ - nb_class = state.nb_class ) - + nb_class = state.nb_class, \ + patience = state.patience, \ + patience_increase = state.patience_increase, \ + improvement_threshold = state.improvement_threshold ) + print state state.validation_error = validation_error state.test_error = test_error state.nb_exemples = nb_exemples