Mercurial > pylearn
changeset 253:394e07e2b0fd
code clean up
author | Frederic Bastien <bastienf@iro.umontreal.ca> |
---|---|
date | Tue, 03 Jun 2008 13:23:28 -0400 |
parents | 856d14dc4468 |
children | 8ec867d12428 |
files | dataset.py |
diffstat | 1 files changed, 0 insertions(+), 28 deletions(-) [+] |
line wrap: on
line diff
--- a/dataset.py Tue Jun 03 13:22:45 2008 -0400 +++ b/dataset.py Tue Jun 03 13:23:28 2008 -0400 @@ -1186,34 +1186,6 @@ return CacheIteratorIter(self) -# class CachedDataSetIterator(object): -# def __init__(self,dataset,fieldnames):#,minibatch_size,n_batches,offset): -# # if fieldnames is None: fieldnames = dataset.fieldNames() -# # store the resulting minibatch in a lookup-list of values -# self.minibatch = LookupList(fieldnames,[0]*len(fieldnames)) -# self.dataset=dataset -# # self.minibatch_size=minibatch_size -# # assert offset>=0 and offset<len(dataset.data) -# # assert offset+minibatch_size<=len(dataset.data) -# self.current=0 -# self.columns = [self.dataset.fields_columns[f] -# for f in self.minibatch._names] -# self.l = len(self.dataset) -# def __iter__(self): -# return self -# def next(self): -# #@todo: we suppose that we need to stop only when minibatch_size == 1. -# # Otherwise, MinibatchWrapAroundIterator do it. -# if self.current>=self.l: -# raise StopIteration -# sub_data = self.dataset.data[self.current] -# self.minibatch._values = [sub_data[c] for c in self.columns] - -# self.current+=self.minibatch_size -# return self.minibatch - -# return CachedDataSetIterator(self,self.fieldNames())#,1,0,0) - class ApplyFunctionDataSet(DataSet): """ A L{DataSet} that contains as fields the results of applying a