Mercurial > ift6266
changeset 236:7be1f086a89e
added __init__.py to allow module loading of baseline
author | Myriam Cote <cotemyri@iro.umontreal.ca> |
---|---|
date | Mon, 15 Mar 2010 09:22:52 -0400 |
parents | ecb69e17950b |
children | 9b6e0af062af |
files | baseline/log_reg/log_reg.py |
diffstat | 1 files changed, 12 insertions(+), 7 deletions(-) [+] |
line wrap: on
line diff
--- a/baseline/log_reg/log_reg.py Sun Mar 14 20:25:12 2010 -0400 +++ b/baseline/log_reg/log_reg.py Mon Mar 15 09:22:52 2010 -0400 @@ -145,6 +145,7 @@ dataset=datasets.nist_digits, image_size = 32 * 32, nb_class = 10, \ patience = 5000, patience_increase = 2, improvement_threshold = 0.995): + #28 * 28 = 784 """ Demonstrate stochastic gradient descent optimization of a log-linear model @@ -296,20 +297,24 @@ ( best_validation_loss * 100., test_score * 100.)) print ('The code ran for %f minutes' % ((end_time-start_time) / 60.)) - ###### return validation_error, test_error, nb_exemples, time + return best_validation_loss, test_score, iter*batch_size, (end_time-start_time) / 60. if __name__ == '__main__': log_reg() def jobman_log_reg(state, channel): - (validation_error, test_error, nb_exemples, time) = log_reg( learning_rate = state.learning_rate,\ - nb_max_examples = state.nb_max_examples,\ - batch_size = state.batch_size,\ - dataset_name = state.dataset_name, \ + print state + (validation_error, test_error, nb_exemples, time) = log_reg( learning_rate = state.learning_rate, \ + nb_max_examples = state.nb_max_examples, \ + batch_size = state.batch_size,\ + dataset_name = state.dataset_name, \ image_size = state.image_size, \ - nb_class = state.nb_class ) - + nb_class = state.nb_class, \ + patience = state.patience, \ + patience_increase = state.patience_increase, \ + improvement_threshold = state.improvement_threshold ) + print state state.validation_error = validation_error state.test_error = test_error state.nb_exemples = nb_exemples