# HG changeset patch # User Olivier Breuleux # Date 1217881770 14400 # Node ID eac0a7d44ff075bc31ed483eaf4681512ee37963 # Parent 0f366ecb11ee6f862c89dab2d3f9d2ca3b4f99fd# Parent 200a5b0e24ea93527b433e65c6fdf719487c66b9 merge diff -r 0f366ecb11ee -r eac0a7d44ff0 _test_filetensor.py --- a/_test_filetensor.py Mon Aug 04 16:21:59 2008 -0400 +++ b/_test_filetensor.py Mon Aug 04 16:29:30 2008 -0400 @@ -30,8 +30,12 @@ def test_filename(self): gen = numpy.random.rand(1) - write(self.fname, gen) - mat = read(self.fname, None, debug=False) #load from filename + f = file(self.fname, 'w') + write(f, gen) + f.close() + f = file(self.fname, 'r') + mat = read(f, None, debug=False) #load from filename + f.close() self.failUnless(gen.shape == mat.shape) self.failUnless(numpy.all(gen == mat)) diff -r 0f366ecb11ee -r eac0a7d44ff0 _test_linear_regression.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/_test_linear_regression.py Mon Aug 04 16:29:30 2008 -0400 @@ -0,0 +1,25 @@ + +import unittest +from linear_regression import * +from make_test_datasets import * +import numpy + +class test_linear_regression(unittest.TestCase): + + def test1(self): + trainset,testset,theta=make_artificial_datasets_from_function(n_inputs=3, + n_targets=2, + n_examples=100, + f=linear_predictor) + + assert trainset.fields()['input'].shape==(50,3) + assert testset.fields()['target'].shape==(50,2) + regressor = LinearRegression(L2_regularizer=0.1) + predictor = regressor(trainset) + test_data = testset.fields() + mse = predictor.compute_mse(test_data['input'],test_data['target']) + print 'mse = ',mse + +if __name__ == '__main__': + unittest.main() + diff -r 0f366ecb11ee -r eac0a7d44ff0 _test_nnet_ops.py --- a/_test_nnet_ops.py Mon Aug 04 16:21:59 2008 -0400 +++ b/_test_nnet_ops.py Mon Aug 04 16:29:30 2008 -0400 @@ -1,5 +1,6 @@ import unittest +import theano import theano._test_tensor as TT import numpy @@ -35,6 +36,43 @@ return crossentropy_softmax_1hot(a, y_idx)[0:1] TT.verify_grad(self, Dummy(), [numpy.random.rand(3,4)]) +class T_prepend(unittest.TestCase): + def test0(self): + """basic functionality""" + x=tensor.matrix('x') + y=Prepend_scalar_constant_to_each_row(4.)(x) + f=theano.function([x],[y]) + m=numpy.random.rand(3,5) + my = f(m) + self.failUnless(my.shape == (3, 6), my.shape) + self.failUnless(numpy.all( my[:,0] == 4.0)) + + +class T_prepend(unittest.TestCase): + def test0(self): + """basic functionality""" + x=tensor.matrix('x') + y=Prepend_scalar_to_each_row()(5.,x) + f=theano.function([x],[y]) + m=numpy.ones((3,5),dtype="float32") + my = f(m) + self.failUnless(str(my.dtype) == 'float64') + self.failUnless(my.shape == (3, 6)) + self.failUnless(numpy.all(my[:,0] == 5.0)) + +class T_solve(unittest.TestCase): + def setUp(self): + self.rng = numpy.random.RandomState(666) + + def test0(self): + A=self.rng.randn(5,5) + b=numpy.array(range(5),dtype=float) + x=numpy.linalg.solve(A,b) + Ax = numpy.dot(A,x) + are = theano.gradient.numeric_grad.abs_rel_err(Ax, b) + self.failUnless(numpy.all(are < 1.0e-5), (are, Ax, b)) + #print A,b + #print numpy.dot(A,x) if __name__ == '__main__': diff -r 0f366ecb11ee -r eac0a7d44ff0 dataset.py --- a/dataset.py Mon Aug 04 16:21:59 2008 -0400 +++ b/dataset.py Mon Aug 04 16:29:30 2008 -0400 @@ -220,7 +220,8 @@ Sub-classes which implement finite-length datasets should redefine this method. Some methods only make sense for finite-length datasets. """ - return None + from sys import maxint + return maxint class MinibatchToSingleExampleIterator(object): @@ -943,6 +944,9 @@ del self.fieldname2dataset[fieldname] self.fieldname2dataset[rename_field(fieldname,self.datasets[i],i)]=i + def __len__(self): + return len(self.datasets[0]) + def hasFields(self,*fieldnames): for fieldname in fieldnames: if not fieldname in self.fieldname2dataset: @@ -1223,13 +1227,12 @@ else: self.fields_columns[fieldname]=fieldcolumns elif type(fieldcolumns) is slice: - start,step=None,None - if not fieldcolumns.start: + start,step=fieldcolumns.start,fieldcolumns.step + if not start: start=0 - if not fieldcolumns.step: + if not step: step=1 - if start or step: - self.fields_columns[fieldname]=slice(start,fieldcolumns.stop,step) + self.fields_columns[fieldname]=slice(start,fieldcolumns.stop,step) elif hasattr(fieldcolumns,"__iter__"): # something like a list for i in fieldcolumns: assert i>=0 and i