changeset 725:98a99aafd14a

Merged
author Olivier Delalleau <delallea@iro>
date Wed, 27 May 2009 09:59:39 -0400
parents d42b4bcbb582 (current diff) 2881c67026c1 (diff)
children 2014db81b9bc
files
diffstat 2 files changed, 15 insertions(+), 6 deletions(-) [+]
line wrap: on
line diff
--- a/pylearn/algorithms/cost.py	Wed May 27 09:59:25 2009 -0400
+++ b/pylearn/algorithms/cost.py	Wed May 27 09:59:39 2009 -0400
@@ -17,6 +17,8 @@
 
 def cross_entropy(target, output, mean_axis=0, sum_axis=1):
     """
+    This is the cross-entropy over a binomial event, in which each dimension
+    is an independent binomial trial.
     @todo: This is essentially duplicated as nnet_ops.binary_crossentropy
     @warning: OUTPUT and TARGET are reversed in nnet_ops.binary_crossentropy
     """
@@ -25,6 +27,8 @@
 
 def KL_divergence(target, output):
     """
+    This is a KL divergence over a binomial event, in which each dimension
+    is an independent binomial trial.
     @note: We do not compute the mean, because if target and output have
     different shapes then the result will be garbled.
     """
--- a/pylearn/algorithms/sgd.py	Wed May 27 09:59:25 2009 -0400
+++ b/pylearn/algorithms/sgd.py	Wed May 27 09:59:39 2009 -0400
@@ -5,7 +5,9 @@
 
 class StochasticGradientDescent(theano.Module):
     """Fixed stepsize gradient descent"""
-    def __init__(self, args, cost, params, gradients=None, stepsize=None, updates=None):
+    def __init__(self, args, cost, params, 
+                 gradients=None, stepsize=None, 
+                 updates=None, auxout=None):
         """
         :param stepsize: the step to take in (negative) gradient direction
         :type stepsize: None, scalar value, or scalar TensorVariable
@@ -13,6 +15,8 @@
         :param updates: extra symbolic updates to make when evating either step or step_cost
         (these override the gradients if necessary)
         :type updatess: dict Variable -> Variable
+        :type auxout: auxiliary outputs, list containing output symbols to 
+                      compute at the same time as cost (for efficiency)
         """
         super(StochasticGradientDescent, self).__init__()
         self.stepsize_init = None
@@ -34,12 +38,12 @@
         if updates is not None:
             self._updates.update(updates)
 
-
+        auxout = auxout if auxout else []
         self.step = theano.Method(
-                args, [],
+                args, auxout,
                 updates=self._updates)
         self.step_cost = theano.Method(
-                args, cost,
+                args, [cost]+auxout,
                 updates=self._updates)
 
     updates = property(lambda self: self._updates.copy())
@@ -52,6 +56,7 @@
     
     :returns: standard minimizer constructor f(args, cost, params, gradient=None)
     """
-    def f(args, cost, params, gradient=None, updates=None):
-        return StochasticGradientDescent(args, cost, params, gradient, stepsize, updates=updates)
+    def f(args, cost, params, gradient=None, updates=None, auxout=None):
+        return StochasticGradientDescent(args, cost, params, gradient, stepsize,
+                updates=updates, auxout=auxout)
     return f