comparison baseline/log_reg/log_reg.py @ 236:7be1f086a89e

added __init__.py to allow module loading of baseline
author Myriam Cote <cotemyri@iro.umontreal.ca>
date Mon, 15 Mar 2010 09:22:52 -0400
parents 777f48ba30df
children c24020aa38ac
comparison
equal deleted inserted replaced
235:ecb69e17950b 236:7be1f086a89e
143 143
144 def log_reg( learning_rate = 0.13, nb_max_examples =1000000, batch_size = 50, \ 144 def log_reg( learning_rate = 0.13, nb_max_examples =1000000, batch_size = 50, \
145 dataset=datasets.nist_digits, image_size = 32 * 32, nb_class = 10, \ 145 dataset=datasets.nist_digits, image_size = 32 * 32, nb_class = 10, \
146 patience = 5000, patience_increase = 2, improvement_threshold = 0.995): 146 patience = 5000, patience_increase = 2, improvement_threshold = 0.995):
147 147
148 #28 * 28 = 784
148 """ 149 """
149 Demonstrate stochastic gradient descent optimization of a log-linear 150 Demonstrate stochastic gradient descent optimization of a log-linear
150 model 151 model
151 152
152 This is demonstrated on MNIST. 153 This is demonstrated on MNIST.
294 print(('Optimization complete with best validation score of %f %%,' 295 print(('Optimization complete with best validation score of %f %%,'
295 'with test performance %f %%') % 296 'with test performance %f %%') %
296 ( best_validation_loss * 100., test_score * 100.)) 297 ( best_validation_loss * 100., test_score * 100.))
297 print ('The code ran for %f minutes' % ((end_time-start_time) / 60.)) 298 print ('The code ran for %f minutes' % ((end_time-start_time) / 60.))
298 299
299 ###### return validation_error, test_error, nb_exemples, time 300 return best_validation_loss, test_score, iter*batch_size, (end_time-start_time) / 60.
300 301
301 if __name__ == '__main__': 302 if __name__ == '__main__':
302 log_reg() 303 log_reg()
303 304
304 305
305 def jobman_log_reg(state, channel): 306 def jobman_log_reg(state, channel):
306 (validation_error, test_error, nb_exemples, time) = log_reg( learning_rate = state.learning_rate,\ 307 print state
307 nb_max_examples = state.nb_max_examples,\ 308 (validation_error, test_error, nb_exemples, time) = log_reg( learning_rate = state.learning_rate, \
308 batch_size = state.batch_size,\ 309 nb_max_examples = state.nb_max_examples, \
309 dataset_name = state.dataset_name, \ 310 batch_size = state.batch_size,\
311 dataset_name = state.dataset_name, \
310 image_size = state.image_size, \ 312 image_size = state.image_size, \
311 nb_class = state.nb_class ) 313 nb_class = state.nb_class, \
312 314 patience = state.patience, \
315 patience_increase = state.patience_increase, \
316 improvement_threshold = state.improvement_threshold )
317 print state
313 state.validation_error = validation_error 318 state.validation_error = validation_error
314 state.test_error = test_error 319 state.test_error = test_error
315 state.nb_exemples = nb_exemples 320 state.nb_exemples = nb_exemples
316 state.time = time 321 state.time = time
317 return channel.COMPLETE 322 return channel.COMPLETE