changeset 394:f2d112dc53be

merge
author Joseph Turian <turian@gmail.com>
date Tue, 08 Jul 2008 18:46:37 -0400
parents 36baeb7125a4 (current diff) b4015b07ab17 (diff)
children 70019965f888
files
diffstat 1 files changed, 66 insertions(+), 52 deletions(-) [+]
line wrap: on
line diff
--- a/linear_regression.py	Tue Jul 08 18:46:26 2008 -0400
+++ b/linear_regression.py	Tue Jul 08 18:46:37 2008 -0400
@@ -4,16 +4,35 @@
 the use of theano.
 """
 
-from pylearn import OfflineLearningAlgorithm
+from pylearn.learner import OfflineLearningAlgorithm
 from theano import tensor as T
+from theano.others_ops import prepend_1_to_each_row
 from theano.scalar import as_scalar
 from common.autoname import AutoName
+import theano
+import numpy
 
 class LinearRegression(OfflineLearningAlgorithm):
     """
     Implement linear regression, with or without L2 regularization
     (the former is called Ridge Regression and the latter Ordinary Least Squares).
 
+    Usage:
+
+       linear_regressor=LinearRegression(L2_regularizer=0.1)
+       linear_predictor=linear_regression(training_set)
+       all_results_dataset=linear_predictor(test_set) # creates a dataset with "output" and "squared_error" field
+       outputs = linear_predictor.compute_outputs(inputs) # inputs and outputs are numpy arrays
+       outputs, errors = linear_predictor.compute_outputs_and_errors(inputs,targets)
+       errors = linear_predictor.compute_errors(inputs,targets)
+       mse = linear_predictor.compute_mse(inputs,targets)
+       
+       
+
+    The training_set must have fields "input" and "target".
+    The test_set must have field "input", and needs "target" if
+    we want to compute the squared errors.
+
     The predictor parameters are obtained analytically from the training set.
     Training can proceed sequentially (with multiple calls to update with
     different disjoint subsets of the training sets). After each call to
@@ -52,12 +71,24 @@
        - 'squared_error' (optionally produced by learned model if 'target' is provided)
           = example-wise squared error
     """
-    def __init__(self, L2_regularizer=0):
-        self.predictor = LinearPredictor(None,None
+    def __init__(self, L2_regularizer=0,minibatch_size=10000):
         self.L2_regularizer=L2_regularizer
-        self._XtX = T.matrix('XtX')
-        self._XtY = T.matrix('XtY')
-        self._extended_input = T.prepend_one_to_each_row(self._input)
+        self.equations = LinearRegressionEquations()
+        self.minibatch_size=1000
+
+    def __call__(self,trainset):
+        first_example = trainset[0]
+        n_inputs = first_example['input'].size
+        n_outputs = first_example['target'].size
+        XtX = numpy.zeros((n_inputs+1,n_inputs+1))
+        XtY = numpy.zeros((n_inputs+1,n_outputs))
+        for i in xrange(n_inputs):
+            XtX[i+1,i+1]=self.L2_regularizer
+        mbs=min(self.minibatch_size,len(trainset))
+        for inputs,targets in trainset.minibatches(["input","target"],minibatch_size=mbs):
+            XtX,XtY=self.equations.update(XtX,XtY,inputs,targets)
+        theta=numpy.linalg.solve(XtX,XtY)
+        return LinearPredictor(theta)
 
 class LinearPredictorEquations(AutoName):
     inputs = T.matrix() # minibatchsize x n_inputs
@@ -76,22 +107,37 @@
         def fn(input_vars,output_vars):
             return staticmethod(theano.function(input_vars,output_vars, linker=linker))
 
-        cls.compute_outputs = fn([inputs,theta],[outputs])
-        cls.compute_errors = fn([outputs,targets],[squared_errors])
+        cls.compute_outputs = fn([cls.inputs,cls.theta],[cls.outputs])
+        cls.compute_errors = fn([cls.outputs,cls.targets],[cls.squared_errors])
 
         cls.__compiled = True
 
-    def __init__(self)
+    def __init__(self):
         self.compile()
         
 class LinearRegressionEquations(LinearPredictorEquations):
     P = LinearPredictorEquations
     XtX = T.matrix() # (n_inputs+1) x (n_inputs+1)
     XtY = T.matrix() # (n_inputs+1) x n_outputs
-    extended_input = T.prepend_scalar_to_each_row(1.,P.inputs)
-    new_XtX = add_inplace(XtX,T.dot(extended_input.T,extended_input))
-    new_XtY = add_inplace(XtY,T.dot(extended_input.T,P.targets))
-    new_theta = T.Cholesky_solve_inplace(P.theta,XtX,XtY)  # solve linear system XtX theta = XtY 
+    extended_input = prepend_1_to_each_row(P.inputs)
+    new_XtX = T.add_inplace(XtX,T.dot(extended_input.T,extended_input))
+    new_XtY = T.add_inplace(XtY,T.dot(extended_input.T,P.targets))
+
+    __compiled = False
+    
+    @classmethod
+    def compile(cls,linker='c|py'):
+        if cls.__compiled:
+            return
+        def fn(input_vars,output_vars):
+            return staticmethod(theano.function(input_vars,output_vars, linker=linker))
+
+        cls.update = fn([cls.XtX,cls.XtY,cls.P.inputs,cls.P.targets],[cls.new_XtX,cls.new_XtY])
+
+        cls.__compiled = True
+
+    def __init__(self):
+        self.compile()
 
 class LinearPredictor(object):
     """
@@ -103,15 +149,18 @@
         self.theta=theta
         self.n_inputs=theta.shape[0]-1
         self.n_outputs=theta.shape[1]
-        self.predict_equations = LinearPredictorEquations()
+        self.equations = LinearPredictorEquations()
 
     def compute_outputs(self,inputs):
-        return self.predict_equations.compute_outputs(inputs,self.theta)
+        return self.equations.compute_outputs(inputs,self.theta)
     def compute_errors(self,inputs,targets):
-        return self.predict_equations.compute_errors(self.compute_outputs(inputs),targets)
+        return self.equations.compute_errors(self.compute_outputs(inputs),targets)
     def compute_outputs_and_errors(self,inputs,targets):
         outputs = self.compute_outputs(inputs)
-        return [outputs,self.predict_equations.compute_errors(outputs,targets)]
+        return [outputs,self.equations.compute_errors(outputs,targets)]
+    def compute_mse(self,inputs,targets):
+        errors = self.compute_errors(inputs,targets)
+        return numpy.sum(errors)/errors.size
     
     def __call__(self,dataset,output_fieldnames=None,cached_output_dataset=False):
         assert dataset.hasFields(["input"])
@@ -137,38 +186,3 @@
             return ds
         
 
-        self._XtX = T.matrix('XtX')
-        self._XtY = T.matrix('XtY')
-        self._extended_input = T.prepend_one_to_each_row(self._input)
-        self._output = T.dot(self._input,self._W.T) + self._b  # (n_examples , n_outputs) matrix
-        self._squared_error = T.sum_within_rows(T.sqr(self._output-self._target)) # (n_examples ) vector
-        self._regularizer = self._L2_regularizer * T.dot(self._W,self._W)
-        self._new_XtX = add_inplace(self._XtX,T.dot(self._extended_input.T,self._extended_input))
-        self._new_XtY = add_inplace(self._XtY,T.dot(self._extended_input.T,self._target))
-        self._new_theta = T.solve_inplace(self._theta,self._XtX,self._XtY)
-
-    def allocate(self,dataset):
-        dataset_n_inputs  = dataset["input"].shape[1]
-        dataset_n_outputs = dataset["target"].shape[1]
-        if not self._n_inputs:
-            self._n_inputs = dataset_n_inputs 
-            self._n_outputs = dataset_n_outputs
-            self.XtX = numpy.zeros((1+self._n_inputs,1+self._n_inputs))
-            self.XtY = numpy.zeros((1+self._n_inputs,self._n_outputs))
-            self.theta = numpy.zeros((self._n_outputs,1+self._n_inputs))
-            self.forget()
-        elif self._n_inputs!=dataset_n_inputs or self._n_outputs!=dataset_n_outputs:
-            # if the input or target changes dimension on the fly, we resize and forget everything
-            self.forget()
-            
-    def forget(self):
-        if self._n_inputs and self._n_outputs:
-            self.XtX.resize((1+self.n_inputs,1+self.n_inputs))
-            self.XtY.resize((1+self.n_inputs,self.n_outputs))
-            self.XtX.data[:,:]=0
-            self.XtY.data[:,:]=0
-            numpy.diag(self.XtX.data)[1:]=self.L2_regularizer
-
-    def __call__(self,dataset):
-
-