Mercurial > ift6266
diff deep/stacked_dae/v_sylvain/sgd_optimization.py @ 336:a79db7cee035
Arrange pour avoir un taux d'apprentissage decroissant decent pour NIST
author | sylvainpl |
---|---|
date | Thu, 15 Apr 2010 14:41:00 -0400 |
parents | c2331b8e4b89 |
children | 625c0c3fcbdb |
line wrap: on
line diff
--- a/deep/stacked_dae/v_sylvain/sgd_optimization.py Thu Apr 15 12:53:03 2010 -0400 +++ b/deep/stacked_dae/v_sylvain/sgd_optimization.py Thu Apr 15 14:41:00 2010 -0400 @@ -204,7 +204,7 @@ parameters_finetune=[] if ind_test == 21: - learning_rate = self.hp.finetuning_lr / 10.0 + learning_rate = self.hp.finetuning_lr / 5.0 else: learning_rate = self.hp.finetuning_lr #The initial finetune lr @@ -295,7 +295,8 @@ break if decrease == 1: - learning_rate /= 2 #divide the learning rate by 2 for each new epoch + if (ind_test == 21 & epoch % 100 == 0) | ind_test == 20: + learning_rate /= 2 #divide the learning rate by 2 for each new epoch of P07 (or 100 of NIST) self.series['params'].append((epoch,), self.classifier.all_params)