comparison mlp.py @ 133:b4657441dd65

Corrected typos
author Yoshua Bengio <bengioy@iro.umontreal.ca>
date Fri, 09 May 2008 13:38:54 -0400
parents f6505ec32dc3
children 3f4e5c9bdc5e
comparison
equal deleted inserted replaced
132:f6505ec32dc3 133:b4657441dd65
6 """ 6 """
7 7
8 from learner import * 8 from learner import *
9 from theano import tensor as t 9 from theano import tensor as t
10 from nnet_ops import * 10 from nnet_ops import *
11 import math
11 12
12 13
13 class OneHiddenLayerNNetClassifier(OnlineGradientTLearner): 14 class OneHiddenLayerNNetClassifier(OnlineGradientTLearner):
14 """ 15 """
15 Implement a straightforward classicial feedforward 16 Implement a straightforward classicial feedforward
65 - 'parameters' = [b1, W1, b2, W2] 66 - 'parameters' = [b1, W1, b2, W2]
66 - 'regularization_term' 67 - 'regularization_term'
67 68
68 """ 69 """
69 70
70 def __init__(self,n_hidden,n_classes,learning_rate,init_range=1.): 71 def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,init_range=1.,n_inputs=None,minibatch_size=None):
72 self._n_inputs = n_inputs
71 self._n_outputs = n_classes 73 self._n_outputs = n_classes
72 self._n_hidden = n_hidden 74 self._n_hidden = n_hidden
73 self._init_range = init_range 75 self._init_range = init_range
76 self._max_n_epochs = max_n_epochs
77 self._minibatch_size = minibatch_size
74 self.learning_rate = learning_rate # this is the float 78 self.learning_rate = learning_rate # this is the float
75 self._learning_rate = t.scalar('learning_rate') # this is the symbol 79 self._learning_rate = t.scalar('learning_rate') # this is the symbol
76 self._input = t.matrix('input') # n_examples x n_inputs 80 self._input = t.matrix('input') # n_examples x n_inputs
77 self._target = t.ivector('target') # n_examples x n_outputs 81 self._target = t.ivector('target') # n_examples x n_outputs
78 self._L2_regularizer = t.scalar('L2_regularizer') 82 self._L2_regularizer = t.scalar('L2_regularizer')
137 r = self._init_range/math.sqrt(self._n_hidden) 141 r = self._init_range/math.sqrt(self._n_hidden)
138 self.W2 = numpy.random.uniform(low=-r,high=r, 142 self.W2 = numpy.random.uniform(low=-r,high=r,
139 size=(self._n_outputs,self._n_hidden)) 143 size=(self._n_outputs,self._n_hidden))
140 self.b1[:]=0 144 self.b1[:]=0
141 self.b2[:]=0 145 self.b2[:]=0
142 146 self._n_epochs=0
147
148 def isLastEpoch(self):
149 self._n_epochs +=1
150 return self._n_epochs>=self._max_n_epochs
143 151
144 class MLP(MinibatchUpdatesTLearner): 152 class MLP(MinibatchUpdatesTLearner):
145 """ 153 """
146 Implement a feedforward multi-layer perceptron, with or without L1 and/or L2 regularization. 154 Implement a feedforward multi-layer perceptron, with or without L1 and/or L2 regularization.
147 155