Mercurial > pylearn
changeset 746:6117969dd37f
Added option to allow one to try and reconstruct originally missing inputs
author | Olivier Delalleau <delallea@iro> |
---|---|
date | Tue, 02 Jun 2009 11:31:50 -0400 |
parents | fc85ce33b518 |
children | 5818682b5489 863e34a3d01e |
files | pylearn/algorithms/sandbox/DAA_inputs_groups.py |
diffstat | 1 files changed, 11 insertions(+), 5 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/algorithms/sandbox/DAA_inputs_groups.py Tue Jun 02 10:14:00 2009 -0400 +++ b/pylearn/algorithms/sandbox/DAA_inputs_groups.py Tue Jun 02 11:31:50 2009 -0400 @@ -47,7 +47,7 @@ in_size=None, auxin_size= None, n_hid=1, regularize = False, tie_weights = False, hid_fn = 'sigmoid_act', reconstruction_cost_function=cost.cross_entropy, interface = True, - ignore_missing=None, + ignore_missing=None, reconstruct_missing=False, **init): """ :param regularize: WRITEME @@ -67,6 +67,8 @@ auxilary ones (that should never contain missing values). In fact, in the current implementation, auxiliary inputs cannot be used when this option is True. + :param reconstruct_missing: if True, then the reconstruction cost on + missing inputs will be backpropagated. Otherwise, it will not. :todo: Default noise level for all daa levels """ print '\t\t**** DAAig.__init__ ****' @@ -88,6 +90,7 @@ self.reconstruction_cost_function = reconstruction_cost_function self.interface = interface self.ignore_missing = ignore_missing + self.reconstruct_missing = reconstruct_missing assert hid_fn in ('sigmoid_act','tanh_act','softsign_act') self.hid_fn = eval(hid_fn) @@ -96,7 +99,7 @@ self.input = input if self.ignore_missing is not None and self.input is not None: no_missing = FillMissing(self.ignore_missing)(self.input) - self.input = no_missing[0] # Missing values replaced by zeros. + self.input = no_missing[0] # With missing values replaced. self.input_missing_mask = no_missing[1] # Missingness pattern. else: self.input_missing_mask = None @@ -155,7 +158,8 @@ container.hidden = self.hid_fn(container.hidden_activation) self.define_propdown(container, idx_list , auxinput) container.rec = self.hid_fn(container.rec_activation) - if self.ignore_missing is not None and self.input is not None: + if (self.ignore_missing is not None and self.input is not None and not + self.reconstruct_missing): # Apply mask to gradient to ensure we do not backpropagate on the # cost computed on missing inputs (that were replaced with zeros). container.rec = mask_gradient(container.rec, @@ -343,7 +347,7 @@ regularize = False, tie_weights = False, hid_fn = 'sigmoid_act', reconstruction_cost_function=cost.cross_entropy, n_out = 2, target = None, debugmethod = False, totalupdatebool=False, - ignore_missing=None, + ignore_missing=None, reconstruct_missing=False, **init): super(StackedDAAig, self).__init__() @@ -370,6 +374,7 @@ self.debugmethod = debugmethod self.totalupdatebool = totalupdatebool self.ignore_missing = ignore_missing + self.reconstruct_missing = reconstruct_missing # init for model construction inputprec = input @@ -435,7 +440,8 @@ param = [inputprec, self.auxinput[i-offset], in_sizeprec, auxin_size[i], self.n_hid[i],\ False, self.tie_weights, self.hid_fn, self.reconstruction_cost_function,False] - dict_params = dict(ignore_missing = self.ignore_missing) + dict_params = dict(ignore_missing = self.ignore_missing, + reconstruct_missing = self.reconstruct_missing) print '\tLayer init= ', i+1 self.daaig[i] = DAAig(*param, **dict_params)