Mercurial > pylearn
annotate mlp_factory_approach.py @ 208:bf320808919f
back to James' version
author | Yoshua Bengio <bengioy@iro.umontreal.ca> |
---|---|
date | Fri, 16 May 2008 16:39:01 -0400 |
parents | c5a7105fa40b |
children | bd728c83faff |
rev | line source |
---|---|
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
1 import copy, sys |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
2 import numpy |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
3 |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
4 import theano |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
5 from theano import tensor as t |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
6 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
7 from tlearn import dataset, nnet_ops, stopper |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
8 |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
9 def _randshape(*shape): |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
10 return (numpy.random.rand(*shape) -0.5) * 0.001 |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
11 |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
12 def _cache(d, key, valfn): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
13 #valfn() is only evaluated if key isn't in dictionary d |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
14 if key not in d: |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
15 d[key] = valfn() |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
16 return d[key] |
190
aa7a3ecbcc90
progress toward early stopping
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
189
diff
changeset
|
17 |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
18 class _Model(object): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
19 def __init__(self, algo, params): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
20 self.algo = algo |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
21 self.params = params |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
22 v = algo.v |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
23 self.update_fn = algo._fn([v.input, v.target] + v.params, [v.nll] + v.new_params) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
24 self._fn_cache = {} |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
25 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
26 def __copy__(self): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
27 return _Model(self.algo, [copy.copy(p) for p in params]) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
28 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
29 def update(self, input_target): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
30 """Update this model from more training data.""" |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
31 params = self.params |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
32 #TODO: why should we have to unpack target like this? |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
33 for input, target in input_target: |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
34 self.update_fn(input, target[:,0], *params) |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
35 |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
36 def __call__(self, testset, fieldnames=['output_class']): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
37 """Apply this model (as a function) to new data""" |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
38 #TODO: cache fn between calls |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
39 assert 'input' == testset.fieldNames()[0] |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
40 assert len(testset.fieldNames()) <= 2 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
41 v = self.algo.v |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
42 outputs = [getattr(v, name) for name in fieldnames] |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
43 inputs = [v.input] + ([v.target] if 'target' in testset else []) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
44 inputs.extend(v.params) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
45 theano_fn = _cache(self._fn_cache, (tuple(inputs), tuple(outputs)), |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
46 lambda: self.algo._fn(inputs, outputs)) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
47 lambda_fn = lambda *args: theano_fn(*(list(args) + self.params)) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
48 return dataset.ApplyFunctionDataSet(testset, lambda_fn, fieldnames) |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
49 |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
50 class AutonameVars(object): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
51 def __init__(self, dct): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
52 for key, val in dct.items(): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
53 if type(key) is str and hasattr(val, 'name'): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
54 val.name = key |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
55 self.__dict__.update(dct) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
56 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
57 class MultiLayerPerceptron(object): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
58 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
59 def __init__(self, ninputs, nhid, nclass, lr, |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
60 l2coef=0.0, |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
61 linker='c&py', |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
62 hidden_layer=None, |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
63 early_stopper=None, |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
64 validation_portion=0.2, |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
65 V_extern=None): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
66 class V_intern(AutonameVars): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
67 def __init__(v_self, lr, l2coef, **kwargs): |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
68 lr = t.constant(lr) |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
69 l2coef = t.constant(l2coef) |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
70 input = t.matrix() # n_examples x n_inputs |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
71 target = t.ivector() # len: n_examples |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
72 W2, b2 = t.matrix(), t.vector() |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
73 |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
74 if hidden_layer: |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
75 hid, hid_params, hid_ivals, hid_regularization = hidden_layer(input) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
76 else: |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
77 W1, b1 = t.matrix(), t.vector() |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
78 hid = t.tanh(b1 + t.dot(input, W1)) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
79 hid_params = [W1, b1] |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
80 hid_regularization = l2coef * t.sum(W1*W1) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
81 hid_ivals = lambda : [_randshape(ninputs, nhid), _randshape(nhid)] |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
82 |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
83 params = [W2, b2] + hid_params |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
84 activations = b2 + t.dot(hid, W2) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
85 nll, predictions = nnet_ops.crossentropy_softmax_1hot(activations, target) |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
86 regularization = l2coef * t.sum(W2*W2) + hid_regularization |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
87 output_class = t.argmax(activations,1) |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
88 loss_01 = t.neq(output_class, target) |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
89 g_params = t.grad(nll + regularization, params) |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
90 new_params = [t.sub_inplace(p, lr * gp) for p,gp in zip(params, g_params)] |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
91 self.__dict__.update(locals()); del self.self |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
92 AutonameVars.__init__(v_self, locals()) |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
93 self.nhid = nhid |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
94 self.nclass = nclass |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
95 self.v = V_intern(**locals()) if V_extern is None else V_extern(**locals()) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
96 self.linker = linker |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
97 self.early_stopper = early_stopper if early_stopper is not None else lambda: stopper.NStages(10,1) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
98 self.validation_portion = validation_portion |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
99 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
100 def _fn(self, inputs, outputs): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
101 # Caching here would hamper multi-threaded apps |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
102 # prefer caching in _Model.__call__ |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
103 return theano.function(inputs, outputs, unpack_single=False, linker=self.linker) |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
104 |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
105 def __call__(self, trainset=None, iparams=None): |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
106 """Allocate and optionally train a model""" |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
107 if iparams is None: |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
108 iparams = [_randshape(self.nhid, self.nclass), _randshape(self.nclass)]\ |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
109 + self.v.hid_ivals() |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
110 rval = _Model(self, iparams) |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
111 if trainset: |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
112 if len(trainset) == sys.maxint: |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
113 raise NotImplementedError('Learning from infinite streams is not supported') |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
114 nval = int(self.validation_portion * len(trainset)) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
115 nmin = len(trainset) - nval |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
116 assert nmin >= 0 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
117 minset = trainset[:nmin] #real training set for minimizing loss |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
118 valset = trainset[nmin:] #validation set for early stopping |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
119 best = rval |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
120 for stp in self.early_stopper(): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
121 rval.update( |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
122 trainset.minibatches(['input', 'target'], minibatch_size=min(32, |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
123 len(trainset)))) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
124 if stp.set_score: |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
125 stp.score = rval(valset, ['loss_01']) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
126 if (stp.score < stp.best_score): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
127 best = copy.copy(rval) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
128 rval = best |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
129 return rval |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
130 |
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
131 |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
132 import unittest |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
133 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
134 class TestMLP(unittest.TestCase): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
135 def test0(self): |
191
e816821c1e50
added early stopping to mlp.__call__
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
190
diff
changeset
|
136 |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
137 training_set1 = dataset.ArrayDataSet(numpy.array([[0, 0, 0], |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
138 [0, 1, 1], |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
139 [1, 0, 1], |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
140 [1, 1, 1]]), |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
141 {'input':slice(2),'target':2}) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
142 training_set2 = dataset.ArrayDataSet(numpy.array([[0, 0, 0], |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
143 [0, 1, 1], |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
144 [1, 0, 0], |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
145 [1, 1, 1]]), |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
146 {'input':slice(2),'target':2}) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
147 test_data = dataset.ArrayDataSet(numpy.array([[0, 0, 0], |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
148 [0, 1, 1], |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
149 [1, 0, 0], |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
150 [1, 1, 1]]), |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
151 {'input':slice(2)}) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
152 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
153 learn_algo = MultiLayerPerceptron(2, 10, 2, .1 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
154 , linker='c&py' |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
155 , early_stopper = lambda:stopper.NStages(100,1)) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
156 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
157 model1 = learn_algo(training_set1) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
158 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
159 model2 = learn_algo(training_set2) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
160 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
161 n_match = 0 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
162 for o1, o2 in zip(model1(test_data), model2(test_data)): |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
163 #print o1 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
164 #print o2 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
165 n_match += (o1 == o2) |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
166 |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
167 assert n_match == (numpy.sum(training_set1.fields()['target'] == |
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
168 training_set2.fields()['target'])) |
191
e816821c1e50
added early stopping to mlp.__call__
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
190
diff
changeset
|
169 |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
170 if __name__ == '__main__': |
208
bf320808919f
back to James' version
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents:
207
diff
changeset
|
171 unittest.main() |
187
ebbb0e749565
added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff
changeset
|
172 |