changeset 650:83e8fe9b1c82

factoring out classification from LogReg_New
author James Bergstra <bergstrj@iro.umontreal.ca>
date Wed, 04 Feb 2009 18:04:05 -0500
parents c433b9cf9d09
children d03b5d8e4bf6
files pylearn/algorithms/logistic_regression.py
diffstat 1 files changed, 35 insertions(+), 25 deletions(-) [+]
line wrap: on
line diff
--- a/pylearn/algorithms/logistic_regression.py	Wed Feb 04 15:56:20 2009 -0500
+++ b/pylearn/algorithms/logistic_regression.py	Wed Feb 04 18:04:05 2009 -0500
@@ -192,6 +192,32 @@
                                         updates = dict((p, p - self.lr * g) for p, g in zip(self.params, gparams)))
 
 
+class classification:  #this would go to a file called pylearn/algorithms/classification.py
+
+    @staticmethod
+    def xent(p, q):
+        """The cross-entropy between the prediction from `input`, and the true `target`.
+
+        This function returns a symbolic vector, with the cross-entropy for each row in
+        `input`.  
+        
+        Hint: To sum these costs into a scalar value, use "xent(input, target).sum()"
+        """
+        return p * tensor.log(q)
+
+    @staticmethod
+    def errors(prediction, target):
+        """The zero-one error of the prediction from `input`, with respect to the true `target`.
+
+        This function returns a symbolic vector, with the incorrectness of each prediction
+        (made row-wise from `input`).
+        
+        Hint: Count errors with "errors(prediction, target).sum()", and get the error-rate with
+        "errors(prediction, target).mean()"
+
+        """
+        return tensor.neq(tensor.argmax(prediction), target)
+
 class LogReg_New(module.FancyModule):
     """A symbolic module for performing multi-class logistic regression."""
 
@@ -208,6 +234,11 @@
 
         self.w = w if w is not None else module.Member(T.dmatrix())
         self.b = b if b is not None else module.Member(T.dvector())
+    def _instance_initialize(self, obj):
+        obj.w = N.zeros((self.n_in, self.n_out))
+        obj.b = N.zeros(self.n_out)
+        obj.__pp_hide__ = ['params']
+
 
     def l1(self):
         return abs(self.w).sum()
@@ -216,38 +247,17 @@
         return (self.w**2).sum()
 
     def activation(self, input):
-        return T.dot(self.input, self.w) + self.b
+        return theano.dot(input, self.w) + self.b
 
     def softmax(self, input):
         return nnet.softmax(self.activation(input))
 
     def argmax(self, input):
-        return T.max_and_argmax(self.linear_output(input))[1]
+        return tensor.argmax(self.activation(input))
 
     def xent(self, input, target):
-        """The cross-entropy between the prediction from `input`, and the true `target`.
-
-        This function returns a symbolic vector, with the cross-entropy for each row in
-        `input`.  
-        
-        Hint: To sum these costs into a scalar value, use "xent(input, target).sum()"
-        """
-        return target * T.log(self.softmax(input))
+        return classification.xent(self.softmax(input), target)
 
     def errors(self, input, target):
-        """The zero-one error of the prediction from `input`, with respect to the true `target`.
-
-        This function returns a symbolic vector, with the incorrectness of each prediction
-        (made row-wise from `input`).
-        
-        Hint: Count errors with "errors(input, target).sum()", and get the error-rate with
-        "errors(input, target).mean()"
+        return classification.errors(self.softmax(input), target)
 
-        """
-        return T.neq(self.argmax(input), self.target)
-
-    def _instance_initialize(self, obj):
-        obj.w = N.zeros((self.n_in, self.n_out))
-        obj.b = N.zeros(self.n_out)
-        obj.__pp_hide__ = ['params']
-