# HG changeset patch # User Yoshua Bengio # Date 1210193886 14400 # Node ID f959ad58facc492787c281b135506fa8ba725d37 # Parent 7d8b3d6dd4e9edde583f30a9d8ee749c4942218d# Parent 4efe6d36c061c2580c05d780888dd44bf0b75f81 Automated merge with ssh://p-omega1@lgcm.iro.umontreal.ca/tlearn diff -r 7d8b3d6dd4e9 -r f959ad58facc learner.py --- a/learner.py Wed May 07 16:22:28 2008 -0400 +++ b/learner.py Wed May 07 16:58:06 2008 -0400 @@ -56,7 +56,7 @@ Optionally, if copy_inputs, the input fields (of the input_dataset) can be made visible in the output DataSet returned by this method. """ - raise NotImplementedError + raise AbstractFunction() def attributeNames(self): """ @@ -228,9 +228,9 @@ If a test_stats_collector is provided, then its attributes (test_stats_collector.AttributeNames()) are also copied into the output dataset attributes. """ - minibatchwise_use_function = minibatchwise_use_functions(input_dataset.fieldNames(), - output_fieldnames, - test_stats_collector) + minibatchwise_use_function = self.minibatchwise_use_functions(input_dataset.fieldNames(), + output_fieldnames, + test_stats_collector) virtual_output_dataset = ApplyFunctionDataSet(input_dataset, minibatchwise_use_function, True,DataSet.numpy_vstack, diff -r 7d8b3d6dd4e9 -r f959ad58facc mlp.py --- a/mlp.py Wed May 07 16:22:28 2008 -0400 +++ b/mlp.py Wed May 07 16:58:06 2008 -0400 @@ -71,13 +71,13 @@ self.learning_rate = learning_rate # this is the float self._learning_rate = t.scalar('learning_rate') # this is the symbol self._input = t.matrix('input') # n_examples x n_inputs - self._target = t.matrix('target','int32') # n_examples x n_outputs + self._target = t.ivector('target') # n_examples x n_outputs self._L2_regularizer = t.scalar('L2_regularizer') self._W1 = t.matrix('W1') self._W2 = t.matrix('W2') self._b1 = t.row('b1') self._b2 = t.row('b2') - self._regularization_term = self._L2_regularizer * (t.dot(self._W1,self._W1) + t.dot(self._W2,self._W2)) + self._regularization_term = self._L2_regularizer * (t.sum(self._W1*self._W1) + t.sum(self._W2*self._W2)) self._output_activations =self._b2+t.dot(t.tanh(self._b1+t.dot(self._input,self._W1.T)),self._W2.T) self._nll,self._output = crossentropy_softmax_1hot(self._output_activations,self._target) self._output_class = t.argmax(self._output,1)