Mercurial > pylearn
comparison algorithms/logistic_regression.py @ 497:a272f4cbf004
'x' => 'input'
'y' => 'output'
author | Joseph Turian <turian@gmail.com> |
---|---|
date | Tue, 28 Oct 2008 12:25:04 -0400 |
parents | 7560817a07e8 |
children | a419edf4e06c |
comparison
equal
deleted
inserted
replaced
496:f13847478c6d | 497:a272f4cbf004 |
---|---|
16 self.w = N.zeros((n_in, n_out)) | 16 self.w = N.zeros((n_in, n_out)) |
17 self.b = N.zeros(n_out) | 17 self.b = N.zeros(n_out) |
18 self.lr = 0.01 | 18 self.lr = 0.01 |
19 self.__hide__ = ['params'] | 19 self.__hide__ = ['params'] |
20 | 20 |
21 def __init__(self, x=None, targ=None, w=None, b=None, lr=None, regularize=False): | 21 def __init__(self, input=None, targ=None, w=None, b=None, lr=None, regularize=False): |
22 super(Module_Nclass, self).__init__() #boilerplate | 22 super(Module_Nclass, self).__init__() #boilerplate |
23 | 23 |
24 self.x = x if x is not None else T.matrix() | 24 self.input = input if input is not None else T.matrix('input') |
25 self.targ = targ if targ is not None else T.lvector() | 25 self.targ = targ if targ is not None else T.lvector() |
26 | 26 |
27 self.w = w if w is not None else module.Member(T.dmatrix()) | 27 self.w = w if w is not None else module.Member(T.dmatrix()) |
28 self.b = b if b is not None else module.Member(T.dvector()) | 28 self.b = b if b is not None else module.Member(T.dvector()) |
29 self.lr = lr if lr is not None else module.Member(T.dscalar()) | 29 self.lr = lr if lr is not None else module.Member(T.dscalar()) |
30 | 30 |
31 self.params = [p for p in [self.w, self.b] if p.owner is None] | 31 self.params = [p for p in [self.w, self.b] if p.owner is None] |
32 | 32 |
33 xent, y = nnet.crossentropy_softmax_1hot( | 33 xent, output = nnet.crossentropy_softmax_1hot( |
34 T.dot(self.x, self.w) + self.b, self.targ) | 34 T.dot(self.input, self.w) + self.b, self.targ) |
35 sum_xent = T.sum(xent) | 35 sum_xent = T.sum(xent) |
36 | 36 |
37 self.y = y | 37 self.output = output |
38 self.sum_xent = sum_xent | 38 self.sum_xent = sum_xent |
39 self.cost = sum_xent | 39 self.cost = sum_xent |
40 | 40 |
41 #define the apply method | 41 #define the apply method |
42 self.pred = T.argmax(T.dot(self.x, self.w) + self.b, axis=1) | 42 self.pred = T.argmax(T.dot(self.input, self.w) + self.b, axis=1) |
43 self.apply = module.Method([self.x], self.pred) | 43 self.apply = module.Method([self.input], self.pred) |
44 | 44 |
45 if self.params: | 45 if self.params: |
46 gparams = T.grad(sum_xent, self.params) | 46 gparams = T.grad(sum_xent, self.params) |
47 | 47 |
48 self.update = module.Method([self.x, self.targ], sum_xent, | 48 self.update = module.Method([self.input, self.targ], sum_xent, |
49 updates = dict((p, p - self.lr * g) for p, g in zip(self.params, gparams))) | 49 updates = dict((p, p - self.lr * g) for p, g in zip(self.params, gparams))) |
50 | 50 |
51 class Module(module.FancyModule): | 51 class Module(module.FancyModule): |
52 class InstanceType(module.FancyModuleInstance): | 52 class InstanceType(module.FancyModuleInstance): |
53 def initialize(self, n_in): | 53 def initialize(self, n_in): |
56 self.w = N.random.randn(n_in,1) | 56 self.w = N.random.randn(n_in,1) |
57 self.b = N.random.randn(1) | 57 self.b = N.random.randn(1) |
58 self.lr = 0.01 | 58 self.lr = 0.01 |
59 self.__hide__ = ['params'] | 59 self.__hide__ = ['params'] |
60 | 60 |
61 def __init__(self, x=None, targ=None, w=None, b=None, lr=None, regularize=False): | 61 def __init__(self, input=None, targ=None, w=None, b=None, lr=None, regularize=False): |
62 super(Module, self).__init__() #boilerplate | 62 super(Module, self).__init__() #boilerplate |
63 | 63 |
64 self.x = x if x is not None else T.matrix() | 64 self.input = input if input is not None else T.matrix('input') |
65 self.targ = targ if targ is not None else T.lcol() | 65 self.targ = targ if targ is not None else T.lcol() |
66 | 66 |
67 self.w = w if w is not None else module.Member(T.dmatrix()) | 67 self.w = w if w is not None else module.Member(T.dmatrix()) |
68 self.b = b if b is not None else module.Member(T.dvector()) | 68 self.b = b if b is not None else module.Member(T.dvector()) |
69 self.lr = lr if lr is not None else module.Member(T.dscalar()) | 69 self.lr = lr if lr is not None else module.Member(T.dscalar()) |
70 | 70 |
71 self.params = [p for p in [self.w, self.b] if p.owner is None] | 71 self.params = [p for p in [self.w, self.b] if p.owner is None] |
72 | 72 |
73 y = nnet.sigmoid(T.dot(self.x, self.w)) | 73 output = nnet.sigmoid(T.dot(self.x, self.w)) |
74 xent = -self.targ * T.log(y) - (1.0 - self.targ) * T.log(1.0 - y) | 74 xent = -self.targ * T.log(output) - (1.0 - self.targ) * T.log(1.0 - output) |
75 sum_xent = T.sum(xent) | 75 sum_xent = T.sum(xent) |
76 | 76 |
77 self.y = y | 77 self.output = output |
78 self.xent = xent | 78 self.xent = xent |
79 self.sum_xent = sum_xent | 79 self.sum_xent = sum_xent |
80 self.cost = sum_xent | 80 self.cost = sum_xent |
81 | 81 |
82 #define the apply method | 82 #define the apply method |
83 self.pred = (T.dot(self.x, self.w) + self.b) > 0.0 | 83 self.pred = (T.dot(self.input, self.w) + self.b) > 0.0 |
84 self.apply = module.Method([self.x], self.pred) | 84 self.apply = module.Method([self.input], self.pred) |
85 | 85 |
86 #if this module has any internal parameters, define an update function for them | 86 #if this module has any internal parameters, define an update function for them |
87 if self.params: | 87 if self.params: |
88 gparams = T.grad(sum_xent, self.params) | 88 gparams = T.grad(sum_xent, self.params) |
89 self.update = module.Method([self.x, self.targ], sum_xent, | 89 self.update = module.Method([self.input, self.targ], sum_xent, |
90 updates = dict((p, p - self.lr * g) for p, g in zip(self.params, gparams))) | 90 updates = dict((p, p - self.lr * g) for p, g in zip(self.params, gparams))) |
91 | 91 |
92 | 92 |
93 | 93 |
94 class Learner(object): | 94 class Learner(object): |