Mercurial > pylearn
changeset 133:b4657441dd65
Corrected typos
author | Yoshua Bengio <bengioy@iro.umontreal.ca> |
---|---|
date | Fri, 09 May 2008 13:38:54 -0400 |
parents | f6505ec32dc3 |
children | 3f4e5c9bdc5e |
files | learner.py lookup_list.py mlp.py test_mlp.py |
diffstat | 4 files changed, 31 insertions(+), 11 deletions(-) [+] |
line wrap: on
line diff
--- a/learner.py Thu May 08 00:54:14 2008 -0400 +++ b/learner.py Fri May 09 13:38:54 2008 -0400 @@ -47,7 +47,7 @@ and return the learned function. """ self.forget() - return self.update(learning_task,train_stats_collector) + return self.update(training_set,train_stats_collector) def use(self,input_dataset,output_fieldnames=None, test_stats_collector=None,copy_inputs=True, @@ -254,7 +254,7 @@ Private helper function that maps a list of attribute names to a list of corresponding Op Results (with the same name but with a '_' prefix). """ - return [self.__getattribute__('_'+name).data for name in names] + return [self.__getattribute__('_'+name) for name in names] class MinibatchUpdatesTLearner(TLearner): @@ -311,7 +311,8 @@ def parameterAttributes(self): raise AbstractFunction() - def updateStart(self): pass + def updateStart(self,training_set): + pass def updateEnd(self): self.setAttributes(self.updateEndOutputAttributes(), @@ -343,12 +344,15 @@ """ self.updateStart(training_set) stop=False + if hasattr(self,'_minibatch_size') and self._minibatch_size: + minibatch_size=self._minibatch_size + else: + minibatch_size=min(100,len(training_set)) while not stop: if train_stats_collector: train_stats_collector.forget() # restart stats collectin at the beginning of each epoch - for minibatch in training_set.minibatches(self.training_set_input_fields, - minibatch_size=self.minibatch_size): - self.update_minibatch(minibatch) + for minibatch in training_set.minibatches(minibatch_size=minibatch_size): + self.updateMinibatch(minibatch) if train_stats_collector: minibatch_set = minibatch.examples() minibatch_set.setAttributes(self.attributeNames(),self.attributes()) @@ -390,7 +394,7 @@ return self.parameterAttributes() def updateMinibatchOutputAttributes(self): - return ["_new"+name for name in self.parameterAttributes()] + return ["new_"+name for name in self.parameterAttributes()] def updateEndInputAttributes(self): return self.parameterAttributes()
--- a/lookup_list.py Thu May 08 00:54:14 2008 -0400 +++ b/lookup_list.py Fri May 09 13:38:54 2008 -0400 @@ -95,10 +95,10 @@ def __ne__(self, other): return not self.__eq__(other) - def __hash__(): + def __hash__(self): raise NotImplementedError() - def __call__(*names): + def __call__(self,*names): """ Return a list of values associated with the given names (which must all be keys of the lookup list). """
--- a/mlp.py Thu May 08 00:54:14 2008 -0400 +++ b/mlp.py Fri May 09 13:38:54 2008 -0400 @@ -8,6 +8,7 @@ from learner import * from theano import tensor as t from nnet_ops import * +import math class OneHiddenLayerNNetClassifier(OnlineGradientTLearner): @@ -67,10 +68,13 @@ """ - def __init__(self,n_hidden,n_classes,learning_rate,init_range=1.): + def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,init_range=1.,n_inputs=None,minibatch_size=None): + self._n_inputs = n_inputs self._n_outputs = n_classes self._n_hidden = n_hidden self._init_range = init_range + self._max_n_epochs = max_n_epochs + self._minibatch_size = minibatch_size self.learning_rate = learning_rate # this is the float self._learning_rate = t.scalar('learning_rate') # this is the symbol self._input = t.matrix('input') # n_examples x n_inputs @@ -139,7 +143,11 @@ size=(self._n_outputs,self._n_hidden)) self.b1[:]=0 self.b2[:]=0 + self._n_epochs=0 + def isLastEpoch(self): + self._n_epochs +=1 + return self._n_epochs>=self._max_n_epochs class MLP(MinibatchUpdatesTLearner): """
--- a/test_mlp.py Thu May 08 00:54:14 2008 -0400 +++ b/test_mlp.py Fri May 09 13:38:54 2008 -0400 @@ -1,9 +1,17 @@ from mlp import * +import dataset def test0(): - nnet = OneHiddenLayerNNetClassifier(10,3,.1) + nnet = OneHiddenLayerNNetClassifier(10,3,.1,1000) + training_set = dataset.ArrayDataSet(numpy.array([[0, 0, 0], + [0, 1, 1], + [1, 0, 1], + [1, 1, 1]]), + {'input':slice(2),'target':2}) + fprop=nnet(training_set) + print fprop(training_set) test0()