Mercurial > pylearn
changeset 1451:8110ca3cec3f
merge
author | James Bergstra <bergstrj@iro.umontreal.ca> |
---|---|
date | Thu, 31 Mar 2011 18:29:11 -0400 |
parents | c421bac46a97 (current diff) fbe470217937 (diff) |
children | d862047c2fe7 |
files | |
diffstat | 7 files changed, 32 insertions(+), 29 deletions(-) [+] |
line wrap: on
line diff
--- a/pylearn/misc/do_nightly_build Thu Mar 31 18:29:02 2011 -0400 +++ b/pylearn/misc/do_nightly_build Thu Mar 31 18:29:11 2011 -0400 @@ -2,22 +2,23 @@ #we set the compiledir to the /Tmp dir to make the test faster by bypassing the nfs network. date ROOT_CWD=/Tmp/nightly_build +COMPILEDIR=/Tmp/lisa_theano_compile_dir_pylearn +NOSETESTS=/usr/bin/nosetests -FLAGS=warn.argmax_pushdown_bug=False,warn.gpusum_01_011_0111_bug=False,warn.sum_sum_bug=False,warn.sum_div_dimshuffle_bug=False,compiledir=/Tmp/lisa_theano_compile_dir_pylearn,seed=0 -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib64/python2.5/config/ -export LIBRARY_PATH=$LIBRARY_PATH:/usr/lib64/python2.5/config/ -export PYTHONPATH=${ROOT_CWD}:$PYTHONPATH + +FLAGS=warn.argmax_pushdown_bug=False,warn.gpusum_01_011_0111_bug=False,warn.sum_sum_bug=False,warn.sum_div_dimshuffle_bug=False,compiledir=${COMPILEDIR} +export PYTHONPATH=${ROOT_CWD}/Theano:${ROOT_CWD}/Pylearn:$PYTHONPATH cd ${ROOT_CWD}/ echo "executing nosetests with mode=FAST_COMPILE" -#THEANO_FLAGS=${FLAGS},mode=FAST_COMPILE /usr/bin/nosetests Pylearn +#THEANO_FLAGS=${FLAGS},mode=FAST_COMPILE ${NOSETESTS} Pylearn echo "executing nosetests with mode=FAST_RUN" -THEANO_FLAGS=${FLAGS},mode=FAST_RUN /usr/bin/nosetests --with-coverage --cover-package=theano --cover-package=pylearn Pylearn +THEANO_FLAGS=${FLAGS},mode=FAST_RUN ${NOSETESTS} --with-coverage --cover-package=theano --cover-package=pylearn Pylearn echo "executing nosetests with mode=FAST_RUN,floatX=float32" -THEANO_FLAGS=${FLAGS},mode=FAST_RUN,floatX=float32 /usr/bin/nosetests Pylearn +THEANO_FLAGS=${FLAGS},mode=FAST_RUN,floatX=float32 ${NOSETESTS} Pylearn #we change the seed and record it everyday to test different combination. We record it to be able to reproduce bug caused by different seed. We don't want multiple test in DEBUG_MODE each day as this take too long. seed=$RANDOM echo "executing nosetests with mode=DEBUG_MODE with seed of the day $seed" -THEANO_FLAGS=${FLAGS},unittests.rseed=$seed,mode=DEBUG_MODE,DebugMode.check_strides=0,DebugMode.patience=3 /usr/bin/nosetests Pylearn +THEANO_FLAGS=${FLAGS},unittests.rseed=$seed,mode=DEBUG_MODE,DebugMode.check_strides=0,DebugMode.patience=3 ${NOSETESTS} Pylearn
--- a/pylearn/sampling/hmc.py Thu Mar 31 18:29:02 2011 -0400 +++ b/pylearn/sampling/hmc.py Thu Mar 31 18:29:11 2011 -0400 @@ -237,7 +237,7 @@ # allocate shared vars if shared_positions_shape==None: - shared_positions_shape = shared_positions.value.shape + shared_positions_shape = shared_positions.get_value(borrow=True).shape batchsize = shared_positions_shape[0] stepsize = shared(numpy.asarray(initial_stepsize).astype(theano.config.floatX), 'hmc_stepsize') @@ -289,7 +289,7 @@ `borrow=True`. """ self.simulate() - return self.positions.value.copy() + return self.positions.get_value(borrow=False) def updates(self): """Returns the update expressions required to simulate the Markov Chain
--- a/pylearn/sampling/mcmc.py Thu Mar 31 18:29:02 2011 -0400 +++ b/pylearn/sampling/mcmc.py Thu Mar 31 18:29:11 2011 -0400 @@ -55,7 +55,7 @@ The len of this vector is the batchsize. """ - batchsize = positions[0].value.shape[0] + batchsize = positions[0].get_value(borrow=True).shape[0] self.s_rng = TT.shared_randomstreams.RandomStreams(seed) self.positions = positions self.prev_energy = shared(np.zeros(batchsize) + float('inf')) @@ -64,7 +64,7 @@ s_stepsize = TT.scalar('stepsize') - new_positions = [p + s_stepsize * self.s_rng.normal(size=p.value.shape) + new_positions = [p + s_stepsize * self.s_rng.normal(size=p.get_value(borrow=True).shape) for p in self.positions] # accept-reject according to Metropolis-Hastings @@ -90,7 +90,7 @@ self.stepsize = min(self.stepsize*self.stepsize_inc,self.stepsize_max) def get_position(self): - return [q.value for q in self.positions] + return [q.get_value(borrow=True) for q in self.positions] def draw(self, n_steps=None): """Return the current sample in the Markov chain as a list of numpy arrays
--- a/pylearn/sampling/tests/test_hmc.py Thu Mar 31 18:29:02 2011 -0400 +++ b/pylearn/sampling/tests/test_hmc.py Thu Mar 31 18:29:11 2011 -0400 @@ -22,16 +22,16 @@ position = shared(rng.randn(batchsize, 2).astype(theano.config.floatX)) sampler = sampler_cls(position, gaussian_energy) - print 'initial position', position.value - print 'initial stepsize', sampler.stepsize.value + print 'initial position', position.get_value(borrow=True) + print 'initial stepsize', sampler.stepsize.get_value(borrow=True) # DRAW SAMPLES samples = [sampler.draw() for r in xrange(burnin)] #burn-in samples = np.asarray([sampler.draw() for r in xrange(n_samples)]) - assert sampler.avg_acceptance_rate.value > 0 - assert sampler.avg_acceptance_rate.value < 1 + assert sampler.avg_acceptance_rate.get_value() > 0 + assert sampler.avg_acceptance_rate.get_value() < 1 # TEST THAT THEY ARE FROM THE RIGHT DISTRIBUTION @@ -42,8 +42,8 @@ #assert np.all(abs(mu - samples.mean(axis=0)) < 1) - print 'final stepsize', sampler.stepsize.value - print 'final acceptance_rate', sampler.avg_acceptance_rate.value + print 'final stepsize', sampler.stepsize.get_value() + print 'final acceptance_rate', sampler.avg_acceptance_rate.get_value() print 'target cov', cov s = samples[:,0,:] @@ -59,7 +59,7 @@ def test_hmc(): print ('HMC') sampler = _sampler_on_2d_gaussian(HMC_sampler.new_from_shared_positions, burnin=3000/20, n_samples=90000/20) - assert abs(sampler.avg_acceptance_rate.value - sampler.target_acceptance_rate) < .1 - assert sampler.stepsize.value >= sampler.stepsize_min - assert sampler.stepsize.value <= sampler.stepsize_max + assert abs(sampler.avg_acceptance_rate.get_value() - sampler.target_acceptance_rate) < .1 + assert sampler.stepsize.get_value() >= sampler.stepsize_min + assert sampler.stepsize.get_value() <= sampler.stepsize_max
--- a/pylearn/sampling/tests/test_mcmc.py Thu Mar 31 18:29:02 2011 -0400 +++ b/pylearn/sampling/tests/test_mcmc.py Thu Mar 31 18:29:11 2011 -0400 @@ -22,7 +22,7 @@ position = shared(rng.randn(batchsize, 2).astype(theano.config.floatX)) sampler = sampler_cls([position], gaussian_energy) - print 'initial position', position.value + print 'initial position', position.get_value(borrow=True) print 'initial stepsize', sampler.stepsize # DRAW SAMPLES
--- a/pylearn/shared/layers/tests/test_kouh2008.py Thu Mar 31 18:29:02 2011 -0400 +++ b/pylearn/shared/layers/tests/test_kouh2008.py Thu Mar 31 18:29:11 2011 -0400 @@ -60,7 +60,9 @@ out = LogisticRegression.new(layer.output, n_out, 2) cost = out.nll(y).sum() #joint optimization except for one of the linear filters - out.w.value += 0.1 * rng.rand(*out.w.value.shape) + out.w.set_value((out.w.get_value(borrow=True) + + 0.1 * rng.rand(*out.w.get_value(borrow=True).shape)), + borrow=True) params = layer.params[:-2] mode = None updates = [(p, p - numpy.asarray(0.001, dtype=dtype)*gp) for p,gp in zip(params, tensor.grad(cost, params)) ]
--- a/pylearn/shared/layers/tests/test_sigmoidal_layer.py Thu Mar 31 18:29:02 2011 -0400 +++ b/pylearn/shared/layers/tests/test_sigmoidal_layer.py Thu Mar 31 18:29:11 2011 -0400 @@ -24,8 +24,8 @@ updates = [(p, p - numpy.asarray(0.01, dtype=dtype)*gp) for p,gp in zip(params, tensor.grad(cost, params)) ] f = pfunc([x, y], cost, updates=updates) - w0 = layer.w.value.copy() - b0 = layer.b.value.copy() + w0 = layer.w.get_value(borrow=False) + b0 = layer.b.get_value(borrow=False) xval = numpy.asarray(rng.rand(bsize, n_in), dtype=dtype) yval = numpy.asarray(rng.randint(0,2,bsize), dtype='int64') @@ -35,7 +35,7 @@ print i, 'rval', fN assert f0 > 6 - assert fN < 2 + assert fN < 2 - assert numpy.all(w0 != layer.w.value) - assert numpy.all(b0 != layer.b.value) + assert numpy.all(w0 != layer.w.get_value(borrow=True)) + assert numpy.all(b0 != layer.b.get_value(borrow=True))