Mercurial > pylearn
view examples/theano_update.py @ 451:d99fefbc9324
Added a KL-divergence.
author | Joseph Turian <turian@gmail.com> |
---|---|
date | Thu, 04 Sep 2008 14:46:30 -0400 |
parents | 200a5b0e24ea |
children |
line wrap: on
line source
import theano from theano import tensor import numpy # Two scalar symbolic variables a = tensor.scalar() b = tensor.scalar() # Definition of output symbolic variable c = a * b # Definition of the function computing it fprop = theano.function([a,b], [c]) # Initialize numerical variables a_val = numpy.array(12.) b_val = numpy.array(2.) print 'a_val =', a_val print 'b_val =', b_val # Numerical value of output is returned by the call to "fprop" c_val = fprop(a_val, b_val) print 'c_val =', c_val # Definition of simple update (increment by one) new_b = b + 1 update = theano.function([b], [new_b]) # New numerical value of b is returned by the call to "update" b_val = update(b_val) print 'new b_val =', b_val # We can use the new value in "fprop" c_val = fprop(a_val, b_val) print 'c_val =', c_val # Definition of in-place update (increment by one) re_new_b = tensor.add_inplace(b, 1.) re_update = theano.function([b], [re_new_b]) # "re_update" can be used the same way as "update" b_val = re_update(b_val) print 'new b_val =', b_val # We can use the new value in "fprop" c_val = fprop(a_val, b_val) print 'c_val =', c_val # It is not necessary to keep the return value when the update is done in place re_update(b_val) print 'new b_val =', b_val c_val = fprop(a_val, b_val) print 'c_val =', c_val