Mercurial > pylearn
comparison nnet_ops.py @ 25:b63e8c0bf21b
added __init__.py, fixed crossentropy_softmax_1hot function name
author | bergstrj@iro.umontreal.ca |
---|---|
date | Thu, 10 Apr 2008 20:53:44 -0400 |
parents | 2e8be9f5412b |
children | bf0145fa73e8 |
comparison
equal
deleted
inserted
replaced
24:2e8be9f5412b | 25:b63e8c0bf21b |
---|---|
49 self.outputs[0].data = nll | 49 self.outputs[0].data = nll |
50 self.outputs[1].data = sm | 50 self.outputs[1].data = sm |
51 def grad(self, (x, y_idx), (g_nll, g_sm)): | 51 def grad(self, (x, y_idx), (g_nll, g_sm)): |
52 if g_sm is not None: | 52 if g_sm is not None: |
53 raise NotImplementedError() | 53 raise NotImplementedError() |
54 nll, sm = cross_entropy_softmax_1hot(x, y_idx) | 54 nll, sm = crossentropy_softmax_1hot(x, y_idx) |
55 dx = CrossentropySoftmax1Hot.Dx(g_nll, sm, y_idx).outputs[0] | 55 dx = CrossentropySoftmax1Hot.Dx(g_nll, sm, y_idx).outputs[0] |
56 return dx, None | 56 return dx, None |
57 | 57 |
58 class Dx (gof.op.Op): | 58 class Dx (gof.op.Op): |
59 nin=3 | 59 nin=3 |
72 dx[i] = dy[i] * sm[i] #vector scale | 72 dx[i] = dy[i] * sm[i] #vector scale |
73 dx[i, y_idx[i]] -= dy[i] #scalar decrement | 73 dx[i, y_idx[i]] -= dy[i] #scalar decrement |
74 self.outputs[0].data = dx | 74 self.outputs[0].data = dx |
75 def grad(self, *args): | 75 def grad(self, *args): |
76 raise NotImplementedError() | 76 raise NotImplementedError() |
77 cross_entropy_softmax_1hot = gof.op.constructor(CrossentropySoftmax1Hot) | 77 crossentropy_softmax_1hot = gof.op.constructor(CrossentropySoftmax1Hot) |
78 | 78 |
79 #TODO: write a version of CrossentropySoftmax1Hot that accepts a bias for x, if | 79 #TODO: write a version of CrossentropySoftmax1Hot that accepts a bias for x, if |
80 # this op needs to be faster. | 80 # this op needs to be faster. |
81 | 81 |