comparison mlp.py @ 183:25d0a0c713da

did some debugging of test_mlp
author Olivier Breuleux <breuleuo@iro.umontreal.ca>
date Tue, 13 May 2008 18:30:08 -0400
parents 4afb41e61fcf
children 562f308873f0
comparison
equal deleted inserted replaced
182:4afb41e61fcf 183:25d0a0c713da
65 - 'W2' 65 - 'W2'
66 - 'parameters' = [b1, W1, b2, W2] 66 - 'parameters' = [b1, W1, b2, W2]
67 - 'regularization_term' 67 - 'regularization_term'
68 68
69 """ 69 """
70 def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,L2_regularizer=0,init_range=1.,n_inputs=None,minibatch_size=None): 70 def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,L2_regularizer=0,init_range=1.,n_inputs=None,minibatch_size=None,linker='c|py'):
71 self._n_inputs = n_inputs 71 self._n_inputs = n_inputs
72 self._n_outputs = n_classes 72 self._n_outputs = n_classes
73 self._n_hidden = n_hidden 73 self._n_hidden = n_hidden
74 self._init_range = init_range 74 self._init_range = init_range
75 self._max_n_epochs = max_n_epochs 75 self._max_n_epochs = max_n_epochs
76 self._minibatch_size = minibatch_size 76 self._minibatch_size = minibatch_size
77 self.learning_rate = learning_rate # this is the float 77 self.learning_rate = learning_rate # this is the float
78 self.L2_regularizer = L2_regularizer 78 self.L2_regularizer = L2_regularizer
79 self._learning_rate = t.scalar('learning_rate') # this is the symbol 79 self._learning_rate = t.scalar('learning_rate') # this is the symbol
80 self._input = t.matrix('input') # n_examples x n_inputs 80 self._input = t.matrix('input') # n_examples x n_inputs
81 self._target = t.imatrix('target') # n_examples x 1 81 self._target = t.lmatrix('target') # n_examples x 1
82 self._target_vector = self._target[:,0] 82 self._target_vector = self._target[:,0]
83 self._L2_regularizer = t.scalar('L2_regularizer') 83 self._L2_regularizer = t.scalar('L2_regularizer')
84 self._W1 = t.matrix('W1') 84 self._W1 = t.matrix('W1')
85 self._W2 = t.matrix('W2') 85 self._W2 = t.matrix('W2')
86 self._b1 = t.row('b1') 86 self._b1 = t.row('b1')
89 self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T) 89 self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T)
90 self._nll,self._output = crossentropy_softmax_1hot(self._output_activations,self._target_vector) 90 self._nll,self._output = crossentropy_softmax_1hot(self._output_activations,self._target_vector)
91 self._output_class = t.argmax(self._output,1) 91 self._output_class = t.argmax(self._output,1)
92 self._class_error = t.neq(self._output_class,self._target_vector) 92 self._class_error = t.neq(self._output_class,self._target_vector)
93 self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0] 93 self._minibatch_criterion = self._nll + self._regularization_term / t.shape(self._input)[0]
94 OnlineGradientTLearner.__init__(self) 94 OnlineGradientTLearner.__init__(self, linker = linker)
95 95
96 def attributeNames(self): 96 def attributeNames(self):
97 return ["parameters","b1","W2","b2","W2", "L2_regularizer","regularization_term"] 97 return ["parameters","b1","W2","b2","W2", "L2_regularizer","regularization_term"]
98 98
99 def parameterAttributes(self): 99 def parameterAttributes(self):
117 output_fields += ["class_error", "nll"] 117 output_fields += ["class_error", "nll"]
118 return output_fields 118 return output_fields
119 119
120 def updateMinibatch(self,minibatch): 120 def updateMinibatch(self,minibatch):
121 MinibatchUpdatesTLearner.updateMinibatch(self,minibatch) 121 MinibatchUpdatesTLearner.updateMinibatch(self,minibatch)
122 print self.nll 122 #print self.nll
123 123
124 def allocate(self,minibatch): 124 def allocate(self,minibatch):
125 minibatch_n_inputs = minibatch["input"].shape[1] 125 minibatch_n_inputs = minibatch["input"].shape[1]
126 if not self._n_inputs: 126 if not self._n_inputs:
127 self._n_inputs = minibatch_n_inputs 127 self._n_inputs = minibatch_n_inputs