Mercurial > pylearn
changeset 792:961dc1a7921b
added a save and load mecanism and fixed bug to DAA_inputs_groups
author | Xavier Glorot <glorotxa@iro.umontreal.ca> |
---|---|
date | Fri, 10 Jul 2009 18:14:36 -0400 |
parents | 166a89917669 |
children | 4e70f509ec01 |
files | pylearn/algorithms/sandbox/DAA_inputs_groups.py |
diffstat | 1 files changed, 90 insertions(+), 13 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/algorithms/sandbox/DAA_inputs_groups.py Fri Jul 10 16:56:52 2009 -0400 +++ b/pylearn/algorithms/sandbox/DAA_inputs_groups.py Fri Jul 10 18:14:36 2009 -0400 @@ -10,6 +10,25 @@ from pylearn.algorithms.logistic_regression import LogRegN +from pylearn.io import filetensor +import os + +# saving loading utils-------------------------------------------- +def save_mat(fname, mat, save_dir=''): + assert isinstance(mat, numpy.ndarray) + print 'save ndarray to file: ', fname + file_handle = open(os.path.join(save_dir, fname), 'w') + filetensor.write(file_handle, mat) + file_handle.close() + +def load_mat(fname, save_dir=''): + print 'loading ndarray from file: ', fname + file_handle = open(os.path.join(save_dir,fname), 'r') + rval = filetensor.read(file_handle) + file_handle.close() + return rval + + # Initialize containers: class CreateContainer: pass @@ -436,11 +455,11 @@ auxin_size = auxin_size self.n_hid = listify(n_hid,depth) self.regularize = regularize - self.tie_weights = listify(tie_weights,depth) - self.hid_fn = listify(hid_fn,depth) - self.rec_fn = listify(rec_fn,depth) - self.reconstruction_cost_function = listify(reconstruction_cost_function,depth) - self.scale_cost = listify(scale_cost,depth) + tie_weights = listify(tie_weights,depth) + hid_fn = listify(hid_fn,depth) + rec_fn = listify(rec_fn,depth) + reconstruction_cost_function = listify(reconstruction_cost_function,depth) + scale_cost = listify(scale_cost,depth) self.n_out = n_out self.target = target if target is not None else T.lvector('target') self.debugmethod = debugmethod @@ -457,11 +476,11 @@ print '\tauxin_size = ', auxin_size print '\tn_hid = ', self.n_hid print '\tregularize = ', self.regularize - print '\ttie_weights = ', self.tie_weights - print '\thid_fn = ', self.hid_fn - print '\trec_fn = ', self.rec_fn - print '\treconstruction_cost_function = ', self.reconstruction_cost_function - print '\tscale_cost = ', self.scale_cost + print '\ttie_weights = ', tie_weights + print '\thid_fn = ', hid_fn + print '\trec_fn = ', rec_fn + print '\treconstruction_cost_function = ', reconstruction_cost_function + print '\tscale_cost = ', scale_cost print '\tn_out = ', self.n_out # init for model construction @@ -516,9 +535,9 @@ for i in range(self.depth): dict_params = dict(input = inputprec, in_size = in_sizeprec, auxin_size = auxin_size[i], - n_hid = self.n_hid[i], regularize = False, tie_weights = self.tie_weights[i], hid_fn = self.hid_fn[i], - rec_fn = self.rec_fn[i], reconstruction_cost_function = self.reconstruction_cost_function[i], - scale_cost = self.scale_cost[i], interface = False, ignore_missing = self.ignore_missing, + n_hid = self.n_hid[i], regularize = False, tie_weights = tie_weights[i], hid_fn = hid_fn[i], + rec_fn = rec_fn[i], reconstruction_cost_function = reconstruction_cost_function[i], + scale_cost = scale_cost[i], interface = False, ignore_missing = self.ignore_missing, reconstruct_missing = self.reconstruct_missing,corruption_pattern = self.corruption_pattern) if auxin_size[i] is None: offset +=1 @@ -671,3 +690,61 @@ low = -1/numpy.sqrt(inst.daaig[-2].n_hid), high = 1/numpy.sqrt(inst.daaig[-2].n_hid)) inst.daaig[-1].l1 = 0 inst.daaig[-1].l2 = reg_coef #only l2 norm for regularisation to be consitent with the unsup regularisation + + def _instance_save(self,inst,save_dir=''): + + for i in range(self.depth): + save_mat('benc%s.ft'%(i) ,inst.daaig[i].benc, save_dir) + + if self.daaig[i].auxinput is not None: + for j in range(len(inst.daaig[i].wauxenc)): + save_mat('wauxenc%s_%s.ft'%(i,j) ,inst.daaig[i].wauxenc[j], save_dir) + save_mat('bauxdec%s_%s.ft'%(i,j) ,inst.daaig[i].bauxdec[j], save_dir) + + if self.daaig[i].input is not None: + save_mat('wenc%s.ft'%(i) ,inst.daaig[i].wenc, save_dir) + save_mat('bdec%s.ft'%(i) ,inst.daaig[i].bdec, save_dir) + + if not self.daaig[i].tie_weights: + if self.daaig[i].auxinput is not None: + for j in range(len(inst.daaig[i].wauxdec)): + save_mat('wauxdec%s_%s.ft'%(i,j) ,inst.daaig[i].wauxdec[j], save_dir) + + if self.daaig[i].input is not None: + save_mat('wdec%s.ft'%(i) ,inst.daaig[i].wdec, save_dir) + i=i+1 + save_mat('wenc%s.ft'%(i) ,inst.daaig[i].w, save_dir) + save_mat('benc%s.ft'%(i) ,inst.daaig[i].b, save_dir) + + def _instance_load(self,inst,save_dir='',coef = None, Sup_layer = None): + + if coef is None: + coef = [1]*self.depth + + for i in range(self.depth): + inst.daaig[i].benc = load_mat('benc%s.ft'%(i), save_dir)/coef[i] + + if self.daaig[i].auxinput is not None: + for j in range(len(inst.daaig[i].wauxenc)): + inst.daaig[i].wauxenc[j] = load_mat('wauxenc%s_%s.ft'%(i,j),save_dir)/coef[i] + inst.daaig[i].bauxdec[j] = load_mat('bauxdec%s_%s.ft'%(i,j),save_dir)/coef[i] + + if self.daaig[i].input is not None: + inst.daaig[i].wenc = load_mat('wenc%s.ft'%(i),save_dir)/coef[i] + inst.daaig[i].bdec = load_mat('bdec%s.ft'%(i),save_dir)/coef[i] + + if not self.daaig[i].tie_weights: + if self.daaig[i].auxinput is not None: + for j in range(len(inst.daaig[i].wauxdec)): + inst.daaig[i].wauxdec[j] = load_mat('wauxdec%s_%s.ft'%(i,j),save_dir)/coef[i] + + if self.daaig[i].input is not None: + inst.daaig[i].wdec = load_mat('wdec%s.ft'%(i),save_dir)/coef[i] + i=i+1 + if Sup_layer is None: + inst.daaig[i].w = load_mat('wenc%s.ft'%(i),save_dir) + inst.daaig[i].b = load_mat('benc%s.ft'%(i),save_dir) + else: + inst.daaig[i].w = load_mat('wenc%s.ft'%(Sup_layer),save_dir) + inst.daaig[i].b = load_mat('benc%s.ft'%(Sup_layer),save_dir) +