view pylearn/algorithms/cost.py @ 1498:0f326860210e

Merged
author Olivier Delalleau <delallea@iro>
date Thu, 01 Sep 2011 13:35:15 -0400
parents e915f5c9bb21
children
line wrap: on
line source

"""
Cost functions.

@note: All of these functions return one cost per example. So it is your
job to perform a tensor.sum over the individual example losses.

@todo: Make a Cost class, with a particular contract.

@todo: It would be nice to implement a hinge loss, with a particular margin.
"""

import theano.tensor as T
from theano.tensor.xlogx import xlogx

def quadratic(target, output, axis=1):
    return T.mean(T.sqr(target - output), axis=axis)

def cross_entropy(target, output, mean_axis=0, sum_axis=1):
    """
    This is the cross-entropy over a binomial event, in which each dimension
    is an independent binomial trial.
    @todo: This is essentially duplicated as nnet_ops.binary_crossentropy
    @warning: OUTPUT and TARGET are reversed in nnet_ops.binary_crossentropy
    """
    XE = target * T.log(output) + (1 - target) * T.log(1 - output)
    return -T.mean(T.sum(XE, axis=sum_axis),axis=mean_axis)

def KL_divergence(target, output):
    """
    This is a KL divergence over a binomial event, in which each dimension
    is an independent binomial trial.
    @note: We do not compute the mean, because if target and output have
    different shapes then the result will be garbled.
    """
    return -(target * T.log(output) + (1 - target) * T.log(1 - output)) \
            + (xlogx(target) + xlogx(1 - target))
#    return cross_entropy(target, output, axis) - cross_entropy(target, target, axis)