annotate mlp_factory_approach.py @ 190:aa7a3ecbcc90

progress toward early stopping
author James Bergstra <bergstrj@iro.umontreal.ca>
date Wed, 14 May 2008 16:24:10 -0400
parents 8f58abb943d4
children e816821c1e50
rev   line source
190
aa7a3ecbcc90 progress toward early stopping
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 189
diff changeset
1 import copy
aa7a3ecbcc90 progress toward early stopping
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 189
diff changeset
2 import numpy
aa7a3ecbcc90 progress toward early stopping
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 189
diff changeset
3
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
4 import theano
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
5 import theano.tensor as t
190
aa7a3ecbcc90 progress toward early stopping
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 189
diff changeset
6
aa7a3ecbcc90 progress toward early stopping
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 189
diff changeset
7 import dataset
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
8 import nnet_ops
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
9
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
10 def _randshape(*shape):
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
11 return (numpy.random.rand(*shape) -0.5) * 0.001
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
12
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
13 class NeuralNet(object):
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
14
189
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
15 class _Model(object):
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
16 def __init__(self, nnet, params):
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
17 self.nnet = nnet
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
18 self.params = params
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
19
190
aa7a3ecbcc90 progress toward early stopping
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 189
diff changeset
20 def __copy__(self):
aa7a3ecbcc90 progress toward early stopping
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 189
diff changeset
21 return _Model(self.nnet, [copy.copy(p) for p in params])
aa7a3ecbcc90 progress toward early stopping
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 189
diff changeset
22
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
23 def update(self, trainset, stopper=None):
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
24 """Update this model from more training data."""
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
25 v = self.nnet.v
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
26 params = self.params
189
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
27 update_fn = self.nnet._fn([v.input, v.target] + v.params, [v.nll] + v.new_params)
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
28 if stopper is not None:
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
29 raise NotImplementedError()
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
30 else:
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
31 for i in xrange(100):
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
32 for input, target in trainset.minibatches(['input', 'target'],
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
33 minibatch_size=min(32, len(trainset))):
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
34 dummy = update_fn(input, target[:,0], *params)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
35 if 0: print dummy[0] #the nll
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
36
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
37 def __call__(self, testset,
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
38 output_fieldnames=['output_class'],
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
39 test_stats_collector=None,
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
40 copy_inputs=False,
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
41 put_stats_in_output_dataset=True,
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
42 output_attributes=[]):
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
43 """Apply this model (as a function) to new data"""
189
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
44 v = self.nnet.v
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
45 outputs = [getattr(self.nnet.v, name) for name in output_fieldnames]
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
46 if 'target' in testset:
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
47 fn = self.nnet._fn([v.input, v.target] + v.params, outputs)
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
48 return dataset.ApplyFunctionDataSet(testset,
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
49 lambda input, target: fn(input, target[:,0], *self.params),
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
50 output_fieldnames)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
51 else:
189
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
52 fn = self.nnet._fn([v.input] + v.params, outputs)
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
53 return dataset.ApplyFunctionDataSet(testset,
189
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
54 lambda input: fn(input, *self.params),
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
55 output_fieldnames)
189
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
56 def _fn(self, inputs, outputs):
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
57 #it is possible for this function to implement function caching
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
58 #... but not necessarily desirable.
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
59 #- caching ruins the possibility of multi-threaded learning
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
60 #- caching demands more efficiency in the face of resizing inputs
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
61 #- caching makes it really hard to borrow references to function outputs
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
62 return theano.function(inputs, outputs, unpack_single=False, linker=self.linker)
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
63
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
64 def __init__(self, ninputs, nhid, nclass, lr, nepochs,
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
65 l2coef=0.0,
189
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
66 linker='c&py',
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
67 hidden_layer=None):
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
68 class Vars:
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
69 def __init__(self, lr, l2coef):
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
70 lr = t.constant(lr)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
71 l2coef = t.constant(l2coef)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
72 input = t.matrix('input') # n_examples x n_inputs
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
73 target = t.ivector('target') # n_examples x 1
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
74 W2 = t.matrix('W2')
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
75 b2 = t.vector('b2')
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
76
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
77 if hidden_layer:
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
78 hid, hid_params, hid_ivals, hid_regularization = hidden_layer(input)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
79 else:
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
80 W1 = t.matrix('W1')
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
81 b1 = t.vector('b1')
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
82 hid = t.tanh(b1 + t.dot(input, W1))
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
83 hid_params = [W1, b1]
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
84 hid_regularization = l2coef * t.sum(W1*W1)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
85 hid_ivals = lambda : [_randshape(ninputs, nhid), _randshape(nhid)]
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
86
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
87 params = [W2, b2] + hid_params
189
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
88 activations = b2 + t.dot(hid, W2)
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
89 nll, predictions = nnet_ops.crossentropy_softmax_1hot(activations, target)
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
90 regularization = l2coef * t.sum(W2*W2) + hid_regularization
189
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
91 output_class = t.argmax(activations,1)
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
92 loss_01 = t.neq(output_class, target)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
93 g_params = t.grad(nll + regularization, params)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
94 new_params = [t.sub_inplace(p, lr * gp) for p,gp in zip(params, g_params)]
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
95 self.__dict__.update(locals()); del self.self
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
96 self.nhid = nhid
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
97 self.nclass = nclass
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
98 self.nepochs = nepochs
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
99 self.v = Vars(lr, l2coef)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
100 self.params = None
189
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
101 self.linker = linker
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
102
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
103 def __call__(self, trainset=None, iparams=None):
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
104 if iparams is None:
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
105 iparams = [_randshape(self.nhid, self.nclass), _randshape(self.nclass)]\
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
106 + self.v.hid_ivals()
189
8f58abb943d4 many changes to NeuralNet
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 187
diff changeset
107 rval = NeuralNet._Model(self, iparams)
187
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
108 if trainset:
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
109 rval.update(trainset)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
110 return rval
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
111
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
112
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
113 if __name__ == '__main__':
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
114 training_set1 = dataset.ArrayDataSet(numpy.array([[0, 0, 0],
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
115 [0, 1, 1],
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
116 [1, 0, 1],
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
117 [1, 1, 1]]),
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
118 {'input':slice(2),'target':2})
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
119 training_set2 = dataset.ArrayDataSet(numpy.array([[0, 0, 0],
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
120 [0, 1, 1],
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
121 [1, 0, 0],
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
122 [1, 1, 1]]),
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
123 {'input':slice(2),'target':2})
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
124 test_data = dataset.ArrayDataSet(numpy.array([[0, 0, 0],
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
125 [0, 1, 1],
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
126 [1, 0, 0],
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
127 [1, 1, 1]]),
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
128 {'input':slice(2)})
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
129
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
130 learn_algo = NeuralNet(2, 10, 3, .1, 1000)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
131
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
132 model1 = learn_algo(training_set1)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
133
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
134 model2 = learn_algo(training_set2)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
135
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
136 n_match = 0
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
137 for o1, o2 in zip(model1(test_data), model2(test_data)):
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
138 n_match += (o1 == o2)
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
139
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
140 print n_match, numpy.sum(training_set1.fields()['target'] ==
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
141 training_set2.fields()['target'])
ebbb0e749565 added mlp_factory_approach
James Bergstra <bergstrj@iro.umontreal.ca>
parents:
diff changeset
142