Mercurial > pylearn
changeset 189:8f58abb943d4
many changes to NeuralNet
author | James Bergstra <bergstrj@iro.umontreal.ca> |
---|---|
date | Wed, 14 May 2008 14:50:07 -0400 |
parents | f01ac276c6fb |
children | aa7a3ecbcc90 |
files | mlp_factory_approach.py |
diffstat | 1 files changed, 21 insertions(+), 12 deletions(-) [+] |
line wrap: on
line diff
--- a/mlp_factory_approach.py Wed May 14 14:49:08 2008 -0400 +++ b/mlp_factory_approach.py Wed May 14 14:50:07 2008 -0400 @@ -6,12 +6,10 @@ def _randshape(*shape): return (numpy.random.rand(*shape) -0.5) * 0.001 -def _function(inputs, outputs, linker='c&py'): - return theano.function(inputs, outputs, unpack_single=False,linker=linker) class NeuralNet(object): - class Model(object): + class _Model(object): def __init__(self, nnet, params): self.nnet = nnet self.params = params @@ -20,7 +18,7 @@ """Update this model from more training data.""" v = self.nnet.v params = self.params - update_fn = _function([v.input, v.target] + v.params, [v.nll] + v.new_params) + update_fn = self.nnet._fn([v.input, v.target] + v.params, [v.nll] + v.new_params) if stopper is not None: raise NotImplementedError() else: @@ -37,20 +35,29 @@ put_stats_in_output_dataset=True, output_attributes=[]): """Apply this model (as a function) to new data""" - inputs = [self.nnet.v.input, self.nnet.v.target] + self.nnet.v.params - fn = _function(inputs, [getattr(self.nnet.v, name) for name in output_fieldnames]) - if 'target' in testset.fields(): + v = self.nnet.v + outputs = [getattr(self.nnet.v, name) for name in output_fieldnames] + if 'target' in testset: + fn = self.nnet._fn([v.input, v.target] + v.params, outputs) return dataset.ApplyFunctionDataSet(testset, lambda input, target: fn(input, target[:,0], *self.params), output_fieldnames) else: + fn = self.nnet._fn([v.input] + v.params, outputs) return dataset.ApplyFunctionDataSet(testset, - lambda input: fn(input, numpy.zeros(1,dtype='int64'), *self.params), + lambda input: fn(input, *self.params), output_fieldnames) + def _fn(self, inputs, outputs): + #it is possible for this function to implement function caching + #... but not necessarily desirable. + #- caching ruins the possibility of multi-threaded learning + #- caching demands more efficiency in the face of resizing inputs + #- caching makes it really hard to borrow references to function outputs + return theano.function(inputs, outputs, unpack_single=False, linker=self.linker) def __init__(self, ninputs, nhid, nclass, lr, nepochs, l2coef=0.0, - linker='c&yp', + linker='c&py', hidden_layer=None): class Vars: def __init__(self, lr, l2coef): @@ -72,9 +79,10 @@ hid_ivals = lambda : [_randshape(ninputs, nhid), _randshape(nhid)] params = [W2, b2] + hid_params - nll, predictions = nnet_ops.crossentropy_softmax_1hot( b2 + t.dot(hid, W2), target) + activations = b2 + t.dot(hid, W2) + nll, predictions = nnet_ops.crossentropy_softmax_1hot(activations, target) regularization = l2coef * t.sum(W2*W2) + hid_regularization - output_class = t.argmax(predictions,1) + output_class = t.argmax(activations,1) loss_01 = t.neq(output_class, target) g_params = t.grad(nll + regularization, params) new_params = [t.sub_inplace(p, lr * gp) for p,gp in zip(params, g_params)] @@ -84,12 +92,13 @@ self.nepochs = nepochs self.v = Vars(lr, l2coef) self.params = None + self.linker = linker def __call__(self, trainset=None, iparams=None): if iparams is None: iparams = [_randshape(self.nhid, self.nclass), _randshape(self.nclass)]\ + self.v.hid_ivals() - rval = NeuralNet.Model(self, iparams) + rval = NeuralNet._Model(self, iparams) if trainset: rval.update(trainset) return rval