changeset 1466:4d6d6d4eab9e

merge
author gdesjardins
date Wed, 20 Apr 2011 16:35:28 -0400
parents 490616262500 (current diff) 01063efe409f (diff)
children b24ed2aa077e
files
diffstat 18 files changed, 214 insertions(+), 52 deletions(-) [+]
line wrap: on
line diff
--- a/LICENSE	Wed Apr 20 16:30:48 2011 -0400
+++ b/LICENSE	Wed Apr 20 16:35:28 2011 -0400
@@ -1,4 +1,4 @@
-Copyright (c) 2008, Theano Development Team
+Copyright (c) 2008-2011, Pylearn Development Team
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
@@ -8,7 +8,7 @@
     * Redistributions in binary form must reproduce the above copyright
       notice, this list of conditions and the following disclaimer in the
       documentation and/or other materials provided with the distribution.
-    * Neither the name of Theano nor the names of its contributors may be
+    * Neither the name of Pylearn nor the names of its contributors may be
       used to endorse or promote products derived from this software without
       specific prior written permission.
 
--- a/bin/pkldu.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/bin/pkldu.py	Wed Apr 20 16:35:28 2011 -0400
@@ -1,4 +1,4 @@
-#!/bin/env python
+#!/usr/bin/env python
 """
  Script to analyze disk usage of pickled files. See usage.
 """
--- a/pylearn/dataset_ops/cifar10.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/dataset_ops/cifar10.py	Wed Apr 20 16:35:28 2011 -0400
@@ -73,15 +73,21 @@
 def all_labels():
     return all_data_labels()[1]
 
+split_sizes = dict(
+        train=40000,
+        valid=10000,
+        test=10000,
+        all=60000)
 
 def cifar10(s_idx, split, dtype='float64', rasterized=False, color='grey',
         split_options = {'train':(train_data, train_labels),
                 'valid': (valid_data, valid_labels),
                 'test': (test_data, test_labels),
                 'all': (all_data, all_labels),
-                }
+                },
+        loop=False
             ):
-    """ 
+    """
     Returns a pair (img, label) of theano expressions for cifar-10 samples
 
     :param s_idx: the indexes
@@ -110,6 +116,9 @@
     x_op = TensorFnDataset(dtype, (False,), (x_fn, (dtype,)), (3072,))
     y_op = TensorFnDataset('int32', (), y_fn)
 
+    if loop:
+        s_idx = s_idx % split_sizes[split]
+
     x = x_op(s_idx)
     y = y_op(s_idx)
 
--- a/pylearn/dataset_ops/tinyimages.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/dataset_ops/tinyimages.py	Wed Apr 20 16:35:28 2011 -0400
@@ -34,7 +34,7 @@
 # pre-processed data should be stored.  For now it is stored in the current working directory.
 #
 
-def tinyimages_op(s_idx):
+def tinyimages_op(s_idx, loop=False, shuffled=False):
     """Return symbolic tiny_images[s_idx]
 
     If s_idx is a scalar, the return value is a tensor3 of shape 32,32,3 and
@@ -42,9 +42,15 @@
     If s_idx is a vector of len N, the return value
     is a tensor4 of shape N,32,32,3 and dtype uint8.
     """
+    if loop:
+        s_idx = s_idx % tinyimages.n_images
+    if shuffled:
+        fn = tinyimages.get_shuffled_memmapped_file
+    else:
+        fn = tinyimages.get_memmapped_file
     op = TensorFnDataset('uint8',
             bcast=(False, False, False),
-            fn=tinyimages.get_memmapped_file,
+            fn=fn,
             single_shape=(32,32,3))
     return op(s_idx)
 
--- a/pylearn/datasets/tinyimages.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/datasets/tinyimages.py	Wed Apr 20 16:35:28 2011 -0400
@@ -22,6 +22,8 @@
 _tinyimages_root='/data/lisa/data/tinyimages'
 _original=_tinyimages_root+'/tinyimages/original'
 _npy_file=_tinyimages_root+'/tinyimages.npy'
+_shuffled_npy_file=_tinyimages_root+'/tinyimages_shuffled.npy'
+_shuffled_npy_seed=12345
 _README_file=_tinyimages_root+'/README.txt'
 _README = """
 TinyImages is a dataset of 32x32 RGB images.
@@ -86,18 +88,18 @@
         i +=1
 
 
-def arrange_first_N_into_tiling(R,C, filename):
+def arrange_first_N_into_tiling(R,C, fileroot):
     R=int(R)
     C=int(C)
-    A = numpy.asarray([i.copy() for i,ii in zip(image_generator(), xrange(R*C))],
-            dtype='float32')
-    print A.shape
-    A.shape = (R*C, 32*32,3)
-    pylearn.io.image_tiling.save_tiled_raster_images(
-        pylearn.io.image_tiling.tile_raster_images(
-            (A[:,:,0], A[:,:,1], A[:,:,2], None),
-            (32,32)),
-        filename)
+    A = numpy.asarray([i.copy() for i,ii in zip(image_generator(), xrange(R*C))])
+    pylearn.io.image_tiling.tile_slices_to_image_uint8(A,
+            tile_shape=(R,C)).save(fileroot+'_from_imgs.png')
+    A = get_memmapped_file(R*C)
+    pylearn.io.image_tiling.tile_slices_to_image_uint8(A,
+            tile_shape=(R,C)).save(fileroot+'_memmapped.png')
+    A = get_shuffled_memmapped_file(R*C)
+    pylearn.io.image_tiling.tile_slices_to_image_uint8(A,
+            tile_shape=(R,C)).save(fileroot+'_shuffled.png')
 
 
 n_images = 1608356
@@ -107,22 +109,48 @@
             dtype='uint8',
             mode='r',
             shape=(N,32,32,3))
+def get_shuffled_memmapped_file(N=n_images, filename=_shuffled_npy_file):
+    return get_memmapped_file(N, filename)
 
-def rebuild_numpy_file(N=n_images, filename=_npy_file):
+def rebuild_memmapped_file(N=n_images, filename=_npy_file):
     shp = (N,32,32,3)
     print >> sys.stderr, "pylearn.datasets.tinyimages rebuilding", filename, shp, N*32*32*3 / float(1024**3), 'gigabytes'
     open(_README_file, 'w').write(_README)
     mmap = numpy.memmap(filename,
             dtype='uint8',
-            mode='w+',
+            mode='w+', #create over overwrite file for R/W
             shape=shp)
     ig = image_generator()
     for ii in xrange(N):
         mmap[ii] = ig.next()
     mmap.flush()
 
+def rebuild_shuffled_memmapped_file(N=n_images, filename=_shuffled_npy_file,
+        seed=_shuffled_npy_seed,
+        orig_filename=_npy_file):
+    try:
+        orig = get_memmapped_file(N, orig_filename)
+    except IOError:
+        print >> sys.stderr, "pylearn.datasets.tinyimages: rebuild un-shuffled file first"
+        raise
+    shp = orig.shape
+    print >> sys.stderr, "pylearn.datasets.tinyimages rebuilding", filename, shp, N*32*32*3 / float(1024**3), 'gigabytes'
+    mmap = numpy.memmap(filename,
+            dtype='uint8',
+            mode='w+',#create over overwrite file for R/W
+            shape=shp)
+    idxlist = numpy.arange(orig.shape[0])
+    numpy.random.RandomState(seed).shuffle(idxlist)
+    assert idxlist[0] != 0
+    for i0, i1 in enumerate(idxlist):
+        mmap[i0] = orig[i1]
+        if not i0 % 10000:
+            print>> sys.stderr, "%i/%i"%(i0, len(idxlist))
+    mmap.flush()
+
 def main(argv=[]):
     if argv:
+        print "Saving images to ", argv[2]
         arrange_first_N_into_tiling( argv[0], argv[1], argv[2])
     else:
         def iter_len(x):
--- a/pylearn/datasets/utlc.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/datasets/utlc.py	Wed Apr 20 16:35:28 2011 -0400
@@ -27,7 +27,7 @@
 
     :param normalize: If True, we normalize the train dataset
                       before returning it
-    :param transfer: If True also return the transfer label(currently only available for ule)
+    :param transfer: If True also return the transfer labels
     :param normalize_on_the_fly: If True, we return a Theano Variable that will give
                                  as output the normalized value. If the user only
                                  take a subtensor of that variable, Theano optimization
@@ -174,7 +174,7 @@
         else:
             raise Exception("This dataset don't have its normalization defined")
     if transfer:
-        transfer = load_sparse(os.path.join(config.data_root(),"UTLC","sparse",name+"_transfer.npy"))
+        transfer = load_filetensor(os.path.join(config.data_root(),"UTLC","filetensor",name+"_transfer.ft"))
         return train, valid, test, transfer
     else:
         return train, valid, test
--- a/pylearn/formulas/costs.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/formulas/costs.py	Wed Apr 20 16:35:28 2011 -0400
@@ -168,7 +168,7 @@
 # in which file?
 
 from theano import gof
-from theano.tensor.tsor_apply import Apply
+from theano.tensor import Apply
 from theano import tensor
 import numpy as np
 
--- a/pylearn/gd/__init__.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/gd/__init__.py	Wed Apr 20 16:35:28 2011 -0400
@@ -9,3 +9,5 @@
  - Stopping criteria (incl. for use in theano functions)
 
 """
+
+from sgd import sgd_updates, sgd_momentum_updates
--- a/pylearn/gd/sgd.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/gd/sgd.py	Wed Apr 20 16:35:28 2011 -0400
@@ -16,11 +16,32 @@
     """
     try:
         iter(stepsizes)
-    except:
+    except Exception:
         stepsizes = [stepsizes for p in params]
+    if len(params) != len(grads):
+        raise ValueError('params and grads have different lens')
     updates = [(p, p - step * gp) for (step, p, gp) in zip(stepsizes, params, grads)]
     return updates
 
+def sgd_momentum_updates(params, grads, stepsizes, momentum=0.9):
+    # if stepsizes is just a scalar, expand it to match params
+    try:
+        iter(stepsizes)
+    except Exception:
+        stepsizes = [stepsizes for p in params]
+    try:
+        iter(momentum)
+    except Exception:
+        momentum = [momentum for p in params]
+    if len(params) != len(grads):
+        raise ValueError('params and grads have different lens')
+    headings = [theano.shared(p.get_value(borrow=False)*0) for p in params]
+    updates = []
+    for s, p, gp, m, h in zip(stepsizes, params, grads, momentum, headings):
+        updates.append((p, p + s * h))
+        updates.append((h, m*h - (1-m)*gp))
+    return updates
+
 
 class StochasticGradientDescent(theano.Module):
     """Fixed stepsize gradient descent
--- a/pylearn/io/image_tiling.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/io/image_tiling.py	Wed Apr 20 16:35:28 2011 -0400
@@ -11,9 +11,9 @@
     ndar *= 1.0 / max(ndar.max(),eps)
     return ndar
 
-def tile_raster_images(X, img_shape, 
+def tile_raster_images(X, img_shape,
         tile_shape=None, tile_spacing=(1,1),
-        scale_rows_to_unit_interval=True, 
+        scale_rows_to_unit_interval=True,
         output_pixel_vals=True,
         min_dynamic_range=1e-4,
         ):
@@ -40,7 +40,23 @@
     :rtype: a 2-d array with same dtype as X.
 
     """
-    if isinstance(X, tuple): 
+    # This is premature when tile_slices_to_image is not documented at all yet,
+    # but ultimately true:
+    #print >> sys.stderr, "WARN: tile_raster_images sucks, use tile_slices_to_image"
+    if len(img_shape)==3 and img_shape[2]==3:
+        # make this save an rgb image
+        if scale_rows_to_unit_interval:
+            print >> sys.stderr, "WARN: tile_raster_images' scaling routine messes up colour - try tile_slices_to_image"
+        return tile_raster_images(
+                (X[:,0::3], X[:,1::3], X[:,2::3], None),
+                img_shape=img_shape[:2],
+                tile_shape=tile_shape,
+                tile_spacing=tile_spacing,
+                scale_rows_to_unit_interval=scale_rows_to_unit_interval,
+                output_pixel_vals=output_pixel_vals,
+                min_dynamic_range=min_dynamic_range)
+
+    if isinstance(X, tuple):
         n_images_in_x = X[0].shape[0]
     else:
         n_images_in_x = X.shape[0]
@@ -57,6 +73,8 @@
         in zip(img_shape, tile_shape, tile_spacing)]
 
     if isinstance(X, tuple):
+        if scale_rows_to_unit_interval:
+            raise NotImplementedError()
         assert len(X) == 4
         if output_pixel_vals:
             out_array = numpy.zeros((out_shape[0], out_shape[1], 4), dtype='uint8')
@@ -129,3 +147,50 @@
     img.save(filename)
     return img
 
+def tile_slices_to_image_uint8(X, tile_shape=None):
+    if str(X.dtype) != 'uint8':
+        raise TypeError(X)
+    if tile_shape is None:
+        #how many tile rows and cols
+        (TR, TC) = most_square_shape(X.shape[0])
+    H, W = X.shape[1], X.shape[2]
+
+    Hs = H+1 #spacing between tiles
+    Ws = W+1 #spacing between tiles
+
+    trows, tcols= most_square_shape(X.shape[0])
+    outrows = trows * Hs - 1
+    outcols = tcols * Ws - 1
+    out = numpy.zeros((outrows, outcols,3), dtype='uint8')
+    tr_stride= 1+X.shape[1]
+    for tr in range(trows):
+        for tc in range(tcols):
+            Xrc = X[tr*tcols+tc]
+            if Xrc.ndim==2: # if no color channel make it broadcast
+                Xrc=Xrc[:,:,None]
+            #print Xrc.shape
+            #print out[tr*Hs:tr*Hs+H,tc*Ws:tc*Ws+W].shape
+            out[tr*Hs:tr*Hs+H,tc*Ws:tc*Ws+W] = Xrc
+    img = Image.fromarray(out, 'RGB')
+    return img
+
+def tile_slices_to_image(X,
+        tile_shape=None,
+        scale_each=True,
+        min_dynamic_range=1e-4):
+    #always returns an RGB image
+    def scale_0_255(x):
+        xmin = x.min()
+        xmax = x.max()
+        return numpy.asarray(
+                255 * (x - xmin) / max(xmax - xmin, min_dynamic_range),
+                dtype='uint8')
+
+    if scale_each:
+        uintX = numpy.empty(X.shape, dtype='uint8')
+        for i, Xi in enumerate(X):
+            uintX[i] = scale_0_255(Xi)
+        X = uintX
+    else:
+        X = scale_0_255(X)
+    return tile_slices_to_image_uint8(X, tile_shape=tile_shape)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pylearn/misc/do_nightly_build	Wed Apr 20 16:35:28 2011 -0400
@@ -0,0 +1,24 @@
+#!/bin/bash
+#we set the compiledir to the /Tmp dir to make the test faster by bypassing the nfs network.
+date
+ROOT_CWD=/Tmp/nightly_build
+COMPILEDIR=/Tmp/lisa_theano_compile_dir_pylearn
+NOSETESTS=/usr/bin/nosetests
+
+
+FLAGS=warn.argmax_pushdown_bug=False,warn.gpusum_01_011_0111_bug=False,warn.sum_sum_bug=False,warn.sum_div_dimshuffle_bug=False,compiledir=${COMPILEDIR}
+export PYTHONPATH=${ROOT_CWD}/Theano:${ROOT_CWD}/Pylearn:$PYTHONPATH
+
+cd ${ROOT_CWD}/
+echo "executing nosetests with mode=FAST_COMPILE"
+#THEANO_FLAGS=${FLAGS},mode=FAST_COMPILE ${NOSETESTS} Pylearn
+echo "executing nosetests with mode=FAST_RUN"
+THEANO_FLAGS=${FLAGS},mode=FAST_RUN ${NOSETESTS} --with-coverage --cover-package=theano --cover-package=pylearn Pylearn
+echo "executing nosetests with mode=FAST_RUN,floatX=float32"
+THEANO_FLAGS=${FLAGS},mode=FAST_RUN,floatX=float32 ${NOSETESTS} Pylearn
+
+#we change the seed and record it everyday to test different combination. We record it to be able to reproduce bug caused by different seed. We don't want multiple test in DEBUG_MODE each day as this take too long.
+seed=$RANDOM
+echo "executing nosetests with mode=DEBUG_MODE with seed of the day $seed"
+THEANO_FLAGS=${FLAGS},unittests.rseed=$seed,mode=DEBUG_MODE,DebugMode.check_strides=0,DebugMode.patience=3 ${NOSETESTS} Pylearn
+
--- a/pylearn/preprocessing/pca.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/preprocessing/pca.py	Wed Apr 20 16:35:28 2011 -0400
@@ -72,7 +72,8 @@
     return w,v
 
 
-def pca_from_examples(X, max_components=None, max_energy_fraction=None, x_centered=False):
+def pca_from_examples(X, max_components=None, max_energy_fraction=None,
+        x_centered=False, inplace=False):
     """Return ((eigvals, eigvecs), centered_X) of observations `X` (1-per-row)
 
     This function exists to wrap several algorithms for getting the principle components.
@@ -87,13 +88,17 @@
     :param x_centered:
         True means to consider X as having mean 0 (even if it actually doesn't!)
 
+    :param inplace:
+        If False, we copy X before using it. Otherwise we modify it.
+
     :returns: ((eigvals, eigvecs), centered_X) of PCA decomposition
 
     """
-    if x_centered:
-        centered_X = X
-    else:
-        centered_X = X - numpy.mean(X, axis=0)
+    if not inplace:
+        X = X.copy()
+    centered_X = X
+    if not x_centered:
+        centered_X -= numpy.mean(centered_X, axis=0)
     cov_X = numpy.dot(centered_X.T, centered_X) / (len(X)- 1)
     evals, evecs = pca_from_cov(cov_X, max_components=max_components,
             max_energy_fraction=max_energy_fraction)
--- a/pylearn/sampling/hmc.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/sampling/hmc.py	Wed Apr 20 16:35:28 2011 -0400
@@ -237,7 +237,7 @@
         # allocate shared vars
 
         if shared_positions_shape==None:
-            shared_positions_shape = shared_positions.value.shape
+            shared_positions_shape = shared_positions.get_value(borrow=True).shape
         batchsize = shared_positions_shape[0]
 
         stepsize = shared(numpy.asarray(initial_stepsize).astype(theano.config.floatX), 'hmc_stepsize')
@@ -289,7 +289,7 @@
         `borrow=True`.
         """
         self.simulate()
-        return self.positions.value.copy()
+        return self.positions.get_value(borrow=False)
 
     def updates(self):
         """Returns the update expressions required to simulate the Markov Chain
--- a/pylearn/sampling/mcmc.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/sampling/mcmc.py	Wed Apr 20 16:35:28 2011 -0400
@@ -55,7 +55,7 @@
             The len of this vector is the batchsize.
         """
 
-        batchsize = positions[0].value.shape[0]
+        batchsize = positions[0].get_value(borrow=True).shape[0]
         self.s_rng = TT.shared_randomstreams.RandomStreams(seed)
         self.positions = positions
         self.prev_energy = shared(np.zeros(batchsize) + float('inf'))
@@ -64,7 +64,7 @@
 
         s_stepsize = TT.scalar('stepsize')
 
-        new_positions = [p + s_stepsize * self.s_rng.normal(size=p.value.shape)
+        new_positions = [p + s_stepsize * self.s_rng.normal(size=p.get_value(borrow=True).shape)
                 for p in self.positions]
 
         # accept-reject according to Metropolis-Hastings 
@@ -90,7 +90,7 @@
                 self.stepsize = min(self.stepsize*self.stepsize_inc,self.stepsize_max)
 
     def get_position(self):
-        return [q.value for q in self.positions]
+        return [q.get_value(borrow=True) for q in self.positions]
 
     def draw(self, n_steps=None):
         """Return the current sample in the Markov chain as a list of numpy arrays
--- a/pylearn/sampling/tests/test_hmc.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/sampling/tests/test_hmc.py	Wed Apr 20 16:35:28 2011 -0400
@@ -22,16 +22,16 @@
     position = shared(rng.randn(batchsize, 2).astype(theano.config.floatX))
     sampler = sampler_cls(position, gaussian_energy)
 
-    print 'initial position', position.value
-    print 'initial stepsize', sampler.stepsize.value
+    print 'initial position', position.get_value(borrow=True)
+    print 'initial stepsize', sampler.stepsize.get_value(borrow=True)
 
     # DRAW SAMPLES
 
     samples = [sampler.draw() for r in xrange(burnin)] #burn-in
     samples = np.asarray([sampler.draw() for r in xrange(n_samples)])
 
-    assert sampler.avg_acceptance_rate.value > 0
-    assert sampler.avg_acceptance_rate.value < 1
+    assert sampler.avg_acceptance_rate.get_value() > 0
+    assert sampler.avg_acceptance_rate.get_value() < 1
 
     # TEST THAT THEY ARE FROM THE RIGHT DISTRIBUTION
 
@@ -42,8 +42,8 @@
     #assert np.all(abs(mu - samples.mean(axis=0)) < 1)
 
 
-    print 'final stepsize', sampler.stepsize.value
-    print 'final acceptance_rate', sampler.avg_acceptance_rate.value
+    print 'final stepsize', sampler.stepsize.get_value()
+    print 'final acceptance_rate', sampler.avg_acceptance_rate.get_value()
 
     print 'target cov', cov
     s = samples[:,0,:]
@@ -59,7 +59,7 @@
 def test_hmc():
     print ('HMC')
     sampler = _sampler_on_2d_gaussian(HMC_sampler.new_from_shared_positions, burnin=3000/20, n_samples=90000/20)
-    assert abs(sampler.avg_acceptance_rate.value - sampler.target_acceptance_rate) < .1
-    assert sampler.stepsize.value >= sampler.stepsize_min
-    assert sampler.stepsize.value <= sampler.stepsize_max
+    assert abs(sampler.avg_acceptance_rate.get_value() - sampler.target_acceptance_rate) < .1
+    assert sampler.stepsize.get_value() >= sampler.stepsize_min
+    assert sampler.stepsize.get_value() <= sampler.stepsize_max
 
--- a/pylearn/sampling/tests/test_mcmc.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/sampling/tests/test_mcmc.py	Wed Apr 20 16:35:28 2011 -0400
@@ -22,7 +22,7 @@
     position = shared(rng.randn(batchsize, 2).astype(theano.config.floatX))
     sampler = sampler_cls([position], gaussian_energy)
 
-    print 'initial position', position.value
+    print 'initial position', position.get_value(borrow=True)
     print 'initial stepsize', sampler.stepsize
 
     # DRAW SAMPLES
--- a/pylearn/shared/layers/tests/test_kouh2008.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/shared/layers/tests/test_kouh2008.py	Wed Apr 20 16:35:28 2011 -0400
@@ -60,7 +60,9 @@
     out = LogisticRegression.new(layer.output, n_out, 2)
     cost = out.nll(y).sum()
     #joint optimization except for one of the linear filters
-    out.w.value += 0.1 * rng.rand(*out.w.value.shape)
+    out.w.set_value((out.w.get_value(borrow=True) +
+                     0.1 * rng.rand(*out.w.get_value(borrow=True).shape)),
+            borrow=True)
     params = layer.params[:-2]
     mode = None
     updates = [(p, p - numpy.asarray(0.001, dtype=dtype)*gp) for p,gp in zip(params, tensor.grad(cost, params)) ]
--- a/pylearn/shared/layers/tests/test_sigmoidal_layer.py	Wed Apr 20 16:30:48 2011 -0400
+++ b/pylearn/shared/layers/tests/test_sigmoidal_layer.py	Wed Apr 20 16:35:28 2011 -0400
@@ -24,8 +24,8 @@
     updates = [(p, p - numpy.asarray(0.01, dtype=dtype)*gp) for p,gp in zip(params, tensor.grad(cost, params)) ]
     f = pfunc([x, y], cost, updates=updates)
 
-    w0 = layer.w.value.copy()
-    b0 = layer.b.value.copy()
+    w0 = layer.w.get_value(borrow=False)
+    b0 = layer.b.get_value(borrow=False)
 
     xval = numpy.asarray(rng.rand(bsize, n_in), dtype=dtype)
     yval = numpy.asarray(rng.randint(0,2,bsize), dtype='int64')
@@ -35,7 +35,7 @@
         print i, 'rval', fN
 
     assert f0 > 6
-    assert fN < 2 
+    assert fN < 2
 
-    assert numpy.all(w0 != layer.w.value)
-    assert numpy.all(b0 != layer.b.value)
+    assert numpy.all(w0 != layer.w.get_value(borrow=True))
+    assert numpy.all(b0 != layer.b.get_value(borrow=True))