Mercurial > pylearn
comparison pylearn/algorithms/mcRBM.py @ 1322:cdda4f98c2a2
mcRBM - added mask for updates to P matrix
author | James Bergstra <bergstrj@iro.umontreal.ca> |
---|---|
date | Sun, 10 Oct 2010 13:45:21 -0400 |
parents | 4fa2a32e8fde |
children | c8c30c675a4f |
comparison
equal
deleted
inserted
replaced
1321:ebcb76b38817 | 1322:cdda4f98c2a2 |
---|---|
652 else: | 652 else: |
653 p_lr = None | 653 p_lr = None |
654 rval = cls.alloc(rbm, visible_batch, batchsize, initial_lr_per_example, rng, l1_penalty, | 654 rval = cls.alloc(rbm, visible_batch, batchsize, initial_lr_per_example, rng, l1_penalty, |
655 l1_penalty_start, learn_rate_multipliers, lr_anneal_start, persistent_chains) | 655 l1_penalty_start, learn_rate_multipliers, lr_anneal_start, persistent_chains) |
656 | 656 |
657 rval.p_mask = sharedX((rbm.P.value!=0).astype('float32'), 'p_mask') | |
658 | |
657 rval.p_lr = p_lr | 659 rval.p_lr = p_lr |
658 rval.p_training_start=p_training_start | 660 rval.p_training_start=p_training_start |
659 rval.p_training_lr=p_training_lr | 661 rval.p_training_lr=p_training_lr |
660 return rval | 662 return rval |
661 | 663 |
793 | 795 |
794 if getattr(self,'p_lr', None): | 796 if getattr(self,'p_lr', None): |
795 ups[self.p_lr] = TT.switch(self.iter > self.p_training_start, | 797 ups[self.p_lr] = TT.switch(self.iter > self.p_training_start, |
796 self.p_training_lr, | 798 self.p_training_lr, |
797 0) | 799 0) |
798 ups[self.rbm.P] = TT.clip(ups[self.rbm.P], -5, 0) | 800 new_P = ups[self.rbm.P] * self.p_mask |
801 no_pos_P = TT.switch(new_P<0, new_P, 0) | |
802 ups[self.rbm.P] = - no_pos_P / no_pos_P.sum(axis=0) #normalize to that columns sum 1 | |
799 | 803 |
800 return ups | 804 return ups |
801 | 805 |