Mercurial > pylearn
diff kernel_regression.py @ 426:d7611a3811f2
Moved incomplete stuff to sandbox
author | Yoshua Bengio <bengioy@iro.umontreal.ca> |
---|---|
date | Tue, 22 Jul 2008 15:20:25 -0400 |
parents | e2b46a8f2b7b |
children | 0f8c81b0776d |
line wrap: on
line diff
--- a/kernel_regression.py Sat Jul 19 17:57:46 2008 -0400 +++ b/kernel_regression.py Tue Jul 22 15:20:25 2008 -0400 @@ -82,9 +82,11 @@ - 'squared_error' (optionally produced by learned model if 'target' is provided) = example-wise squared error """ - def __init__(self, kernel=None, L2_regularizer=0, gamma=1): + def __init__(self, kernel=None, L2_regularizer=0, gamma=1, use_bias=False): + # THE VERSION WITH BIAS DOES NOT SEEM RIGHT self.kernel = kernel self.L2_regularizer=L2_regularizer + self.use_bias=use_bias self.gamma = gamma # until we fix things, the kernel type is fixed, Gaussian self.equations = KernelRegressionEquations() @@ -93,19 +95,22 @@ first_example = trainset[0] n_inputs = first_example['input'].size n_outputs = first_example['target'].size - M = numpy.zeros((n_examples+1,n_examples+1)) - Y = numpy.zeros((n_examples+1,n_outputs)) + b1=1 if self.use_bias else 0 + M = numpy.zeros((n_examples+b1,n_examples+b1)) + Y = numpy.zeros((n_examples+b1,n_outputs)) for i in xrange(n_examples): - M[i+1,i+1]=self.L2_regularizer + M[i+b1,i+b1]=self.L2_regularizer data = trainset.fields() train_inputs = numpy.array(data['input']) - Y[0]=1 - Y[1:,:] = numpy.array(data['target']) + if self.use_bias: + Y[0]=1 + Y[b1:,:] = numpy.array(data['target']) train_inputs_square,sumG,G=self.equations.compute_system_matrix(train_inputs,self.gamma) - M[1:,1:] += G - M[0,1:] = sumG - M[1:,0] = 1 - M[0,0] = M.shape[0] + M[b1:,b1:] += G + if self.use_bias: + M[0,1:] = sumG + M[1:,0] = 1 + M[0,0] = M.shape[0] self.M=M self.Y=Y theta=numpy.linalg.solve(M,Y) @@ -117,10 +122,11 @@ inputs = T.matrix() # minibatchsize x n_inputs targets = T.matrix() # minibatchsize x n_outputs theta = T.matrix() # (n_examples+1) x n_outputs + b1 = T.shape(train_inputs_square)[0]<T.shape(theta)[0] gamma = T.scalar() inv_gamma2 = 1./(gamma*gamma) - b = theta[0] - alpha = theta[1:,:] + b = b1*theta[0] + alpha = theta[b1:,:] inputs_square = T.sum(inputs*inputs,axis=1) Kx = T.exp(-(row_vector(train_inputs_square)-2*T.dot(inputs,train_inputs.T)+col_vector(inputs_square))*inv_gamma2) outputs = T.dot(Kx,alpha) + b # minibatchsize x n_outputs