Mercurial > pylearn
comparison cost.py @ 454:6e7509acb1c0
Merged
author | delallea@valhalla.apstat.com |
---|---|
date | Thu, 02 Oct 2008 13:41:43 -0400 |
parents | d99fefbc9324 |
children | 3daabc7f94ff |
comparison
equal
deleted
inserted
replaced
453:ce6b4fd3ab29 | 454:6e7509acb1c0 |
---|---|
4 @note: All of these functions return one cost per example. So it is your | 4 @note: All of these functions return one cost per example. So it is your |
5 job to perform a tensor.sum over the individual example losses. | 5 job to perform a tensor.sum over the individual example losses. |
6 """ | 6 """ |
7 | 7 |
8 import theano.tensor as T | 8 import theano.tensor as T |
9 from xlogx import xlogx | |
9 | 10 |
10 def quadratic(target, output, axis=1): | 11 def quadratic(target, output, axis=1): |
11 return T.mean(T.sqr(target - output), axis) | 12 return T.mean(T.sqr(target - output), axis) |
12 | 13 |
13 def cross_entropy(target, output, axis=1): | 14 def cross_entropy(target, output, axis=1): |
14 """ | 15 """ |
15 @todo: This is essentially duplicated as nnet_ops.binary_crossentropy | 16 @todo: This is essentially duplicated as nnet_ops.binary_crossentropy |
16 @warning: OUTPUT and TARGET are reversed in nnet_ops.binary_crossentropy | 17 @warning: OUTPUT and TARGET are reversed in nnet_ops.binary_crossentropy |
17 """ | 18 """ |
18 return -T.mean(target * T.log(output) + (1 - target) * T.log(1 - output), axis=axis) | 19 return -T.mean(target * T.log(output) + (1 - target) * T.log(1 - output), axis=axis) |
20 | |
21 def KL_divergence(target, output): | |
22 """ | |
23 @note: We do not compute the mean, because if target and output have | |
24 different shapes then the result will be garbled. | |
25 """ | |
26 return -(target * T.log(output) + (1 - target) * T.log(1 - output)) \ | |
27 + (xlogx(target) + xlogx(1 - target)) | |
28 # return cross_entropy(target, output, axis) - cross_entropy(target, target, axis) |