# HG changeset patch # User Olivier Delalleau # Date 1243968043 14400 # Node ID 2a30f22ef7ffe4835e41fe139bb40e8078b40f92 # Parent 84d22b7d835aa2626b9a10f2f780fe62cc41454b Fixed bug introduced in previous change meant to improve numerical stability diff -r 84d22b7d835a -r 2a30f22ef7ff pylearn/algorithms/sandbox/DAA_inputs_groups.py --- a/pylearn/algorithms/sandbox/DAA_inputs_groups.py Tue Jun 02 13:51:22 2009 -0400 +++ b/pylearn/algorithms/sandbox/DAA_inputs_groups.py Tue Jun 02 14:40:43 2009 -0400 @@ -195,13 +195,7 @@ container.hidden = self.hid_fn(container.hidden_activation) self.define_propdown(container, idx_list , auxinput) container.rec = self.hid_fn(container.rec_activation) - if (self.ignore_missing is not None and self.input is not None and not - self.reconstruct_missing): - # Apply mask to gradient to ensure we do not backpropagate on the - # cost computed on missing inputs (that were replaced with zeros). - container.rec = mask_gradient(container.rec, - self.input_missing_mask) - + def define_propup(self, container, input, idx_list, auxinput): if self.input is not None: container.hidden_activation = self.filter_up(input, self.wenc, self.benc) @@ -226,7 +220,14 @@ container.rec_activation = rec_activation1 else: container.rec_activation = rec_activation2 - + + if (self.ignore_missing is not None and self.input is not None and not + self.reconstruct_missing): + # Apply mask to gradient to ensure we do not backpropagate on the + # cost computed on missing inputs (that have been imputed). + container.rec_activation = mask_gradient(container.rec_activation, + self.input_missing_mask) + def filter_up(self, vis, w, b=None): out = T.dot(vis, w) return out + b if b else out