comparison mlp_factory_approach.py @ 211:bd728c83faff

in __get__, problem if the i.stop was None, i being the slice, added one line replacing None by the len(self)
author Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
date Wed, 21 May 2008 17:39:30 -0400
parents bf320808919f
children 9b57ea8c767f
comparison
equal deleted inserted replaced
210:ffd50efefb70 211:bd728c83faff
3 3
4 import theano 4 import theano
5 from theano import tensor as t 5 from theano import tensor as t
6 6
7 from tlearn import dataset, nnet_ops, stopper 7 from tlearn import dataset, nnet_ops, stopper
8
8 9
9 def _randshape(*shape): 10 def _randshape(*shape):
10 return (numpy.random.rand(*shape) -0.5) * 0.001 11 return (numpy.random.rand(*shape) -0.5) * 0.001
11 12
12 def _cache(d, key, valfn): 13 def _cache(d, key, valfn):
29 def update(self, input_target): 30 def update(self, input_target):
30 """Update this model from more training data.""" 31 """Update this model from more training data."""
31 params = self.params 32 params = self.params
32 #TODO: why should we have to unpack target like this? 33 #TODO: why should we have to unpack target like this?
33 for input, target in input_target: 34 for input, target in input_target:
34 self.update_fn(input, target[:,0], *params) 35 rval= self.update_fn(input, target[:,0], *params)
36 print rval[0]
35 37
36 def __call__(self, testset, fieldnames=['output_class']): 38 def __call__(self, testset, fieldnames=['output_class']):
37 """Apply this model (as a function) to new data""" 39 """Apply this model (as a function) to new data"""
38 #TODO: cache fn between calls 40 #TODO: cache fn between calls
39 assert 'input' == testset.fieldNames()[0] 41 assert 'input' == testset.fieldNames()[0]
100 def _fn(self, inputs, outputs): 102 def _fn(self, inputs, outputs):
101 # Caching here would hamper multi-threaded apps 103 # Caching here would hamper multi-threaded apps
102 # prefer caching in _Model.__call__ 104 # prefer caching in _Model.__call__
103 return theano.function(inputs, outputs, unpack_single=False, linker=self.linker) 105 return theano.function(inputs, outputs, unpack_single=False, linker=self.linker)
104 106
105 def __call__(self, trainset=None, iparams=None): 107 def __call__(self, trainset=None, iparams=None, input='input', target='target'):
106 """Allocate and optionally train a model""" 108 """Allocate and optionally train a model"""
107 if iparams is None: 109 if iparams is None:
108 iparams = [_randshape(self.nhid, self.nclass), _randshape(self.nclass)]\ 110 iparams = [_randshape(self.nhid, self.nclass), _randshape(self.nclass)]\
109 + self.v.hid_ivals() 111 + self.v.hid_ivals()
110 rval = _Model(self, iparams) 112 rval = _Model(self, iparams)
117 minset = trainset[:nmin] #real training set for minimizing loss 119 minset = trainset[:nmin] #real training set for minimizing loss
118 valset = trainset[nmin:] #validation set for early stopping 120 valset = trainset[nmin:] #validation set for early stopping
119 best = rval 121 best = rval
120 for stp in self.early_stopper(): 122 for stp in self.early_stopper():
121 rval.update( 123 rval.update(
122 trainset.minibatches(['input', 'target'], minibatch_size=min(32, 124 minset.minibatches([input, target], minibatch_size=min(32,
123 len(trainset)))) 125 len(trainset))))
126 print 'mlp.__call__(), we did an update'
124 if stp.set_score: 127 if stp.set_score:
125 stp.score = rval(valset, ['loss_01']) 128 stp.score = rval(valset, ['loss_01'])
126 if (stp.score < stp.best_score): 129 if (stp.score < stp.best_score):
127 best = copy.copy(rval) 130 best = copy.copy(rval)
128 rval = best 131 rval = best
152 155
153 learn_algo = MultiLayerPerceptron(2, 10, 2, .1 156 learn_algo = MultiLayerPerceptron(2, 10, 2, .1
154 , linker='c&py' 157 , linker='c&py'
155 , early_stopper = lambda:stopper.NStages(100,1)) 158 , early_stopper = lambda:stopper.NStages(100,1))
156 159
157 model1 = learn_algo(training_set1) 160 model1 = learn_algo(training_set1,input='input',target='target')
158 161
159 model2 = learn_algo(training_set2) 162 model2 = learn_algo(training_set2)
160 163
161 n_match = 0 164 n_match = 0
162 for o1, o2 in zip(model1(test_data), model2(test_data)): 165 for o1, o2 in zip(model1(test_data), model2(test_data)):