Mercurial > pylearn
comparison kernel_regression.py @ 422:32c5f87bc54e
Added __len__ to HStackedDataSet and replaced default len() by sys.maxint instead of None
author | Yoshua Bengio <bengioy@iro.umontreal.ca> |
---|---|
date | Sat, 19 Jul 2008 14:12:41 -0400 |
parents | e01f17be270a |
children | e2b46a8f2b7b |
comparison
equal
deleted
inserted
replaced
421:e01f17be270a | 422:32c5f87bc54e |
---|---|
46 posterior for f (whose mean is computed here). | 46 posterior for f (whose mean is computed here). |
47 | 47 |
48 | 48 |
49 Usage: | 49 Usage: |
50 | 50 |
51 kernel_regressor=KernelRegression(L2_regularizer=0.1,kernel=GaussianKernel(gamma=0.5)) | 51 kernel_regressor=KernelRegression(L2_regularizer=0.1,gamma=0.5) (kernel=GaussianKernel(gamma=0.5)) |
52 kernel_predictor=kernel_regressor(training_set) | 52 kernel_predictor=kernel_regressor(training_set) |
53 all_results_dataset=kernel_predictor(test_set) # creates a dataset with "output" and "squared_error" field | 53 all_results_dataset=kernel_predictor(test_set) # creates a dataset with "output" and "squared_error" field |
54 outputs = kernel_predictor.compute_outputs(inputs) # inputs and outputs are numpy arrays | 54 outputs = kernel_predictor.compute_outputs(inputs) # inputs and outputs are numpy arrays |
55 outputs, errors = kernel_predictor.compute_outputs_and_errors(inputs,targets) | 55 outputs, errors = kernel_predictor.compute_outputs_and_errors(inputs,targets) |
56 errors = kernel_predictor.compute_errors(inputs,targets) | 56 errors = kernel_predictor.compute_errors(inputs,targets) |
94 M[i+1,i+1]=self.L2_regularizer | 94 M[i+1,i+1]=self.L2_regularizer |
95 data = trainset.fields() | 95 data = trainset.fields() |
96 train_inputs = numpy.array(data['input']) | 96 train_inputs = numpy.array(data['input']) |
97 Y[0]=1 | 97 Y[0]=1 |
98 Y[1:,:] = numpy.array(data['target']) | 98 Y[1:,:] = numpy.array(data['target']) |
99 M,train_inputs_square=self.equations.compute_system_matrix(train_inputs,M) | 99 train_inputs_square,sumG=self.equations.compute_system_matrix(train_inputs,M) |
100 M[0,1:] = sumG | |
101 M[1:,0] = 1 | |
102 M[0,0] = M.shape[0] | |
103 print M | |
100 theta=numpy.linalg.solve(M,Y) | 104 theta=numpy.linalg.solve(M,Y) |
101 return KernelPredictor(theta,self.gamma, train_inputs, train_inputs_square) | 105 return KernelPredictor(theta,self.gamma, train_inputs, train_inputs_square) |
102 | 106 |
103 class KernelPredictorEquations(AutoName): | 107 class KernelPredictorEquations(AutoName): |
104 train_inputs = T.matrix() # n_examples x n_inputs | 108 train_inputs = T.matrix() # n_examples x n_inputs |
109 gamma = T.scalar() | 113 gamma = T.scalar() |
110 inv_gamma2 = 1./(gamma*gamma) | 114 inv_gamma2 = 1./(gamma*gamma) |
111 b = theta[0] | 115 b = theta[0] |
112 alpha = theta[1:,:] | 116 alpha = theta[1:,:] |
113 inputs_square = T.sum(inputs*inputs,axis=1) | 117 inputs_square = T.sum(inputs*inputs,axis=1) |
114 Kx = exp(-(train_inputs_square-2*dot(inputs,train_inputs.T)+inputs_square)*inv_gamma2) | 118 Kx = T.exp(-(train_inputs_square-2*T.dot(inputs,train_inputs.T)+inputs_square)*inv_gamma2) |
115 outputs = T.dot(Kx,alpha) + b # minibatchsize x n_outputs | 119 outputs = T.dot(Kx,alpha) + b # minibatchsize x n_outputs |
116 squared_errors = T.sum(T.sqr(targets-outputs),axis=1) | 120 squared_errors = T.sum(T.sqr(targets-outputs),axis=1) |
117 | 121 |
118 __compiled = False | 122 __compiled = False |
119 @classmethod | 123 @classmethod |
130 | 134 |
131 def __init__(self): | 135 def __init__(self): |
132 self.compile() | 136 self.compile() |
133 | 137 |
134 class KernelRegressionEquations(KernelPredictorEquations): | 138 class KernelRegressionEquations(KernelPredictorEquations): |
135 # P = KernelPredictorEquations | |
136 M = T.matrix() # (n_examples+1) x (n_examples+1) | 139 M = T.matrix() # (n_examples+1) x (n_examples+1) |
137 inputs = T.matrix() # n_examples x n_inputs | 140 inputs = T.matrix() # n_examples x n_inputs |
138 G = M[1:,1:] | 141 G = M[1:,1:] |
139 new_G = gemm(G,1.,inputs,inputs.T,1.) | 142 new_G = T.gemm(G,1.,inputs,inputs.T,1.) |
140 M2 = T.add_inplace(M,new_G) | 143 sumG = T.sum(new_G,axis=0) |
141 M2[0,0] = M.shape[0] | |
142 M2[1:,0] = 1 | |
143 M2[0,1:] = T.sum(G,axis=0) | |
144 inputs_square = T.sum(inputs*inputs,axis=1) | 144 inputs_square = T.sum(inputs*inputs,axis=1) |
145 | 145 |
146 __compiled = False | 146 __compiled = False |
147 | 147 |
148 @classmethod | 148 @classmethod |
150 if cls.__compiled: | 150 if cls.__compiled: |
151 return | 151 return |
152 def fn(input_vars,output_vars): | 152 def fn(input_vars,output_vars): |
153 return staticmethod(theano.function(input_vars,output_vars, linker=linker)) | 153 return staticmethod(theano.function(input_vars,output_vars, linker=linker)) |
154 | 154 |
155 cls.compute_system_matrix = fn([cls.inputs,cls.M],[cls.M2,cls.inputs_square]) | 155 cls.compute_system_matrix = fn([cls.inputs,cls.M],[cls.inputs_square,cls.sumG]) |
156 | 156 |
157 cls.__compiled = True | 157 cls.__compiled = True |
158 | 158 |
159 def __init__(self): | 159 def __init__(self): |
160 self.compile() | 160 self.compile() |