Mercurial > pylearn
diff dataset.py @ 135:0d8e721cc63c
Fixed bugs in dataset to make test_mlp.py work
author | Yoshua Bengio <bengioy@iro.umontreal.ca> |
---|---|
date | Mon, 12 May 2008 14:30:21 -0400 |
parents | 3f4e5c9bdc5e |
children | ceae4de18981 |
line wrap: on
line diff
--- a/dataset.py Fri May 09 17:38:57 2008 -0400 +++ b/dataset.py Mon May 12 14:30:21 2008 -0400 @@ -429,8 +429,8 @@ rows=None # or a slice if type(i) is slice: - if not i.start: i.start=0 - if not i.step: i.step=1 + if not i.start: i=slice(0,i.stop,i.step) + if not i.step: i=slice(i.start,i.stop,1) if i.step is 1: return self.minibatches(minibatch_size=i.stop-i.start,n_batches=1,offset=i.start).next().examples() rows = range(i.start,i.stop,i.step) @@ -497,7 +497,7 @@ dataset1 | dataset2 returns a dataset whose list of fields is the concatenation of the list of fields of the argument datasets. This only works if they all have the same length. """ - return HStackedDataSet(self,other) + return HStackedDataSet([self,other]) def __and__(self,other): """ @@ -505,7 +505,7 @@ (and whose length is the sum of the length of the argument datasets). This only works if they all have the same fields. """ - return VStackedDataSet(self,other) + return VStackedDataSet([self,other]) def hstack(datasets): """ @@ -1068,7 +1068,7 @@ def next(self): upper = self.current+minibatch_size cache_len = len(self.dataset.cached_examples) - if upper>=cache_len: # whole minibatch is not already in cache + if upper>cache_len: # whole minibatch is not already in cache # cache everything from current length to upper for example in self.dataset.source_dataset[cache_len:upper]: self.dataset.cached_examples.append(example)