Mercurial > ift6266
changeset 336:a79db7cee035
Arrange pour avoir un taux d'apprentissage decroissant decent pour NIST
author | sylvainpl |
---|---|
date | Thu, 15 Apr 2010 14:41:00 -0400 |
parents | 5ddb1878dfbc |
children | 8d116d4a7593 8cf52a1c8055 |
files | deep/stacked_dae/v_sylvain/sgd_optimization.py |
diffstat | 1 files changed, 3 insertions(+), 2 deletions(-) [+] |
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/sgd_optimization.py Thu Apr 15 12:53:03 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py Thu Apr 15 14:41:00 2010 -0400 @@ -204,7 +204,7 @@ parameters_finetune=[] if ind_test == 21: - learning_rate = self.hp.finetuning_lr / 10.0 + learning_rate = self.hp.finetuning_lr / 5.0 else: learning_rate = self.hp.finetuning_lr #The initial finetune lr @@ -295,7 +295,8 @@ break if decrease == 1: - learning_rate /= 2 #divide the learning rate by 2 for each new epoch + if (ind_test == 21 & epoch % 100 == 0) | ind_test == 20: + learning_rate /= 2 #divide the learning rate by 2 for each new epoch of P07 (or 100 of NIST) self.series['params'].append((epoch,), self.classifier.all_params)