Mercurial > pylearn
comparison algorithms/logistic_regression.py @ 503:c7ce66b4e8f4
Extensions to algorithms, and some cleanup (by defining linear_output result).
author | Joseph Turian <turian@gmail.com> |
---|---|
date | Wed, 29 Oct 2008 03:29:18 -0400 |
parents | 17945defd813 |
children | b267a8000f92 |
comparison
equal
deleted
inserted
replaced
502:17945defd813 | 503:c7ce66b4e8f4 |
---|---|
16 | 16 |
17 self.w = N.zeros((n_in, n_out)) | 17 self.w = N.zeros((n_in, n_out)) |
18 self.b = N.zeros(n_out) | 18 self.b = N.zeros(n_out) |
19 self.lr = 0.01 | 19 self.lr = 0.01 |
20 self.__hide__ = ['params'] | 20 self.__hide__ = ['params'] |
21 self.input_dimension = n_in | |
22 self.output_dimension = n_out | |
21 | 23 |
22 class Module_Nclass(module.FancyModule): | 24 class Module_Nclass(module.FancyModule): |
23 InstanceType = LogRegInstanceType | 25 InstanceType = LogRegInstanceType |
24 | 26 |
25 def __init__(self, x=None, targ=None, w=None, b=None, lr=None, regularize=False): | 27 def __init__(self, x=None, targ=None, w=None, b=None, lr=None, regularize=False): |
32 self.b = b if b is not None else module.Member(T.dvector()) | 34 self.b = b if b is not None else module.Member(T.dvector()) |
33 self.lr = lr if lr is not None else module.Member(T.dscalar()) | 35 self.lr = lr if lr is not None else module.Member(T.dscalar()) |
34 | 36 |
35 self.params = [p for p in [self.w, self.b] if p.owner is None] | 37 self.params = [p for p in [self.w, self.b] if p.owner is None] |
36 | 38 |
37 xent, output = nnet.crossentropy_softmax_1hot( | 39 linear_output = T.dot(self.x, self.w) + self.b |
38 T.dot(self.x, self.w) + self.b, self.targ) | 40 |
41 (xent, softmax, max_pr, argmax) = nnet.crossentropy_softmax_max_and_argmax_1hot( | |
42 linear_output, self.targ) | |
39 sum_xent = T.sum(xent) | 43 sum_xent = T.sum(xent) |
40 | 44 |
41 self.output = output | 45 self.softmax = softmax |
46 self.argmax = argmax | |
47 self.max_pr = max_pr | |
42 self.sum_xent = sum_xent | 48 self.sum_xent = sum_xent |
49 | |
50 # Softmax being computed directly. | |
51 softmax_unsupervised = nnet.softmax(linear_output) | |
52 self.softmax_unsupervised = softmax_unsupervised | |
43 | 53 |
44 #compatibility with current implementation of stacker/daa or something | 54 #compatibility with current implementation of stacker/daa or something |
45 #TODO: remove this, make a wrapper | 55 #TODO: remove this, make a wrapper |
46 self.cost = sum_xent | 56 self.cost = self.sum_xent |
47 self.input = self.x | 57 self.input = self.x |
58 # TODO: I want to make output = linear_output. | |
59 self.output = self.softmax_unsupervised | |
48 | 60 |
49 #define the apply method | 61 #define the apply method |
50 self.pred = T.argmax(T.dot(self.input, self.w) + self.b, axis=1) | 62 self.pred = T.argmax(linear_output, axis=1) |
51 self.apply = module.Method([self.input], self.pred) | 63 self.apply = module.Method([self.input], self.pred) |
64 | |
65 self.validate = module.Method([self.input, self.targ], [self.cost, self.argmax, self.max_pr]) | |
66 self.softmax_output = module.Method([self.input], self.softmax_unsupervised) | |
52 | 67 |
53 if self.params: | 68 if self.params: |
54 gparams = T.grad(sum_xent, self.params) | 69 gparams = T.grad(sum_xent, self.params) |
55 | 70 |
56 self.update = module.Method([self.input, self.targ], sum_xent, | 71 self.update = module.Method([self.input, self.targ], sum_xent, |