diff linear_regression.py @ 107:c4916445e025

Comments from Pascal V.
author Yoshua Bengio <bengioy@iro.umontreal.ca>
date Tue, 06 May 2008 19:54:43 -0400
parents c4726e19b8ec
children 8fa1ef2411a0
line wrap: on
line diff
--- a/linear_regression.py	Tue May 06 10:53:28 2008 -0400
+++ b/linear_regression.py	Tue May 06 19:54:43 2008 -0400
@@ -6,7 +6,7 @@
 
 # this is one of the simplest example of learner, and illustrates
 # the use of theano 
-class LinearRegression(Learner):
+class LinearRegression(OneShotTLearner):
     """
     Implement linear regression, with or without L2 regularization
     (the former is called Ridge Regression and the latter Ordinary Least Squares).
@@ -48,8 +48,6 @@
        - 'output' (optionally produced by use as an output dataset field)
        - 'squared_error' (optionally produced by use as an output dataset field, needs 'target') = example-wise squared error
 
-     - optional input attributes (optionally expected as input_dataset attributes)
-
      - optional attributes (optionally expected as input_dataset attributes)
        (warning, this may be dangerous, the 'use' method will use those provided in the 
        input_dataset rather than those learned during 'update'; currently no support
@@ -59,28 +57,29 @@
        - 'b' 
        - 'W' 
        - 'regularization_term' 
-       - 'XtX'
-       - 'XtY'
+
     """
 
     def attributeNames(self):
-        return ["lambda","b","W","regularization_term","XtX","XtY"]
+        return ["lambda","b","W","regularization_term"]
     
-# definitions specifiques a la regression lineaire:
 
-
-    def global_inputs(self):
+    def __init__(self):
+        self.input = t.matrix('input') # n_examples x n_inputs
+        self.target = t.matrix('target') # n_examples x n_outputs
         self.lambda = as_scalar(0.,'lambda')
         self.theta = t.matrix('theta')
         self.W = self.theta[:,1:] 
         self.b = self.theta[:,0]
         self.XtX = t.matrix('XtX')
         self.XtY = t.matrix('XtY')
-
-    def global_outputs(self):
         self.regularizer = self.lambda * t.dot(self.W,self.W)
+        self.squared_error = 
         self.loss = self.regularizer + t.sum(self.squared_error) # this only makes sense if the whole training set fits in memory in a minibatch
         self.loss_function = Function([self.W,self.lambda,self.squared_error],[self.loss])
+        self.new_XtX = self.XtX + t.dot(self.extended_input.T,self.extended_input)
+        self.new_XtY = self.XtY + t.dot(self.extended_input.T,self.target)
+        self.new_theta = t.solve(self.XtX,self.XtY)
 
     def initialize(self):
         self.XtX.resize((1+self.n_inputs,1+self.n_inputs))
@@ -90,14 +89,8 @@
         numpy.diag(self.XtX.data)[1:]=self.lambda.data
         
     def updated_variables(self):
-        self.new_XtX = self.XtX + t.dot(self.extended_input.T,self.extended_input)
-        self.new_XtY = self.XtY + t.dot(self.extended_input.T,self.target)
-        self.new_theta = t.solve(self.XtX,self.XtY)
     
     def minibatch_wise_inputs(self):
-        self.input = t.matrix('input') # n_examples x n_inputs
-        self.target = t.matrix('target') # n_examples x n_outputs
-        
     def minibatch_wise_outputs(self):
         # self.input is a (n_examples, n_inputs) minibatch matrix
         self.extended_input = t.prepend_one_to_each_row(self.input)