Mercurial > pylearn
diff mlp.py @ 133:b4657441dd65
Corrected typos
author | Yoshua Bengio <bengioy@iro.umontreal.ca> |
---|---|
date | Fri, 09 May 2008 13:38:54 -0400 |
parents | f6505ec32dc3 |
children | 3f4e5c9bdc5e |
line wrap: on
line diff
--- a/mlp.py Thu May 08 00:54:14 2008 -0400 +++ b/mlp.py Fri May 09 13:38:54 2008 -0400 @@ -8,6 +8,7 @@ from learner import * from theano import tensor as t from nnet_ops import * +import math class OneHiddenLayerNNetClassifier(OnlineGradientTLearner): @@ -67,10 +68,13 @@ """ - def __init__(self,n_hidden,n_classes,learning_rate,init_range=1.): + def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,init_range=1.,n_inputs=None,minibatch_size=None): + self._n_inputs = n_inputs self._n_outputs = n_classes self._n_hidden = n_hidden self._init_range = init_range + self._max_n_epochs = max_n_epochs + self._minibatch_size = minibatch_size self.learning_rate = learning_rate # this is the float self._learning_rate = t.scalar('learning_rate') # this is the symbol self._input = t.matrix('input') # n_examples x n_inputs @@ -139,7 +143,11 @@ size=(self._n_outputs,self._n_hidden)) self.b1[:]=0 self.b2[:]=0 + self._n_epochs=0 + def isLastEpoch(self): + self._n_epochs +=1 + return self._n_epochs>=self._max_n_epochs class MLP(MinibatchUpdatesTLearner): """