view dataset.py @ 20:266c68cb6136

Minor editions, plus adding untested ApplyFunctionDataset for GradientLearner in the works.
author bengioy@bengiomac.local
date Mon, 07 Apr 2008 09:48:39 -0400
parents 57f4015e2e09
children b6b36f65664f
line wrap: on
line source


from lookup_list import LookupList
Example = LookupList

class AbstractFunction (Exception): """Derived class must override this function"""
        
class DataSet(object):
    """A virtual base class for datasets.

    A DataSet is a generator of iterators; these iterators can run through the
    examples in a variety of ways.  A DataSet need not necessarily have a finite
    or known length, so this class can be used to interface to a 'stream' which
    feeds on-line learning.

    To iterate over examples, there are several possibilities:
    - for example in dataset.zip([field1, field2,field3, ...])
    - for val1,val2,val3 in dataset.zip([field1, field2,field3])
    - for minibatch in dataset.minibatches([field1, field2, ...],minibatch_size=N)
    - for example in dataset
    Each of these is documented below.

    Note: For a dataset of fixed and known length, which can implement item
    random-access efficiently (e.g. indexing and slicing), and which can profit
    from the FiniteDataSetIterator, consider using base class FiniteDataSet.

    Note: Fields are not mutually exclusive, i.e. two fields can overlap in their actual content.

    Note: The content of a field can be of any type.

    """

    def __init__(self):
        pass
    
    def __iter__(self):
        """Supports the syntax "for i in dataset: ..."

        Using this syntax, "i" will be an Example instance (or equivalent) with
        all the fields of DataSet self.  Every field of "i" will give access to
        a field of a single example.  Fields should be accessible via
        i["fielname"] or i[3] (in the fieldNames() order), but the derived class is free
        to accept any type of identifier, and add extra functionality to the iterator.
        """
        return self.zip(*self.fieldNames())

    def zip(self, *fieldnames):
        """
        Supports two forms of syntax:

            for i in dataset.zip([f1, f2, f3]): ...

            for i1, i2, i3 in dataset.zip([f1, f2, f3]): ...

        Using the first syntax, "i" will be an indexable object, such as a list,
        tuple, or Example instance, such that on every iteration, i[0] is the f1
        field of the current example, i[1] is the f2 field, and so on.

        Using the second syntax, i1, i2, i3 will contain the the contents of the
        f1, f2, and f3 fields of a single example on each loop iteration.

        The derived class may accept fieldname arguments of any type.

        """
        class Iter(LookupList):
            def __init__(self, ll):
                LookupList.__init__(self, ll.keys(), ll.values())
                self.ll = ll
            def __iter__(self): #makes for loop work
                return self
            def next(self):
                self.ll.next()
                self._values = [v[0] for v in self.ll._values]
                return self
        return Iter(self.minibatches(fieldnames, minibatch_size = 1))

    minibatches_fieldnames = None
    minibatches_minibatch_size = 1
    minibatches_n_batches = None
    def minibatches(self,
            fieldnames = minibatches_fieldnames,
            minibatch_size = minibatches_minibatch_size,
            n_batches = minibatches_n_batches):
        """
        Supports two forms of syntax:

            for i in dataset.minibatches([f1, f2, f3],**kwargs): ...

            for i1, i2, i3 in dataset.minibatches([f1, f2, f3],**kwargs): ...

        Using the first syntax, "i" will be an indexable object, such as a list,
        tuple, or Example instance, such that on every iteration, i[0] is a
        list-like container of the f1 field of a batch current examples, i[1] is
        a list-like container of the f2 field, etc.

        Using the second syntax, i1, i2, i3 will be list-like containers of the
        f1, f2, and f3 fields of a batch of examples on each loop iteration.

        PARAMETERS
        - fieldnames (list of any type, default None):
        The loop variables i1, i2, i3 (in the example above) should contain the
        f1, f2, and f3 fields of the current batch of examples.  If None, the
        derived class can choose a default, e.g. all fields.

        - minibatch_size (integer, default 1)
        On every iteration, the variables i1, i2, i3 will have
        exactly minibatch_size elements. e.g. len(i1) == minibatch_size

        - n_batches (integer, default None)
        The iterator will loop exactly this many times, and then stop.  If None,
        the derived class can choose a default.  If (-1), then the returned
        iterator should support looping indefinitely.

        Note: A list-like container is something like a tuple, list, numpy.ndarray or
        any other object that supports integer indexing and slicing.

        """
        raise AbstractFunction()
    
    def fieldNames(self):
        #Yoshua- 
        # This list may not be finite; what would make sense in the use you have
        # in mind?
        # -JB
        #James-
        # You are right. I had put this to be able to iterate over the fields
        # but maybe an iterator mechanism (over fields rather than examples)
        # would be more appropriate. Fieldnames are needed in general
        # by the iterators over examples or minibatches, to construct
        # examples or minibatches with the corresponding names as attributes.
        # -YB
        """
        Return an iterator (an object with an __iter__ method) that
        iterates over the names of the fields. As a special cases,
        a list or a tuple of field names can be returned.
        """"
        # Note that some datasets
        # may have virtual fields and support a virtually infinite number
        # of possible field names. In that case, fieldNames() should
        # either raise an error or iterate over a particular set of
        # names as appropriate. Another option would be to iterate
        # over the sub-datasets comprising a single field at a time.
        # I am not sure yet what is most appropriate.
        #  -YB
        """
        raise AbstractFunction()

    def rename(*new_field_specifications):
        #Yoshua- 
        # Do you mean for this to be a virtual method?
        # Wouldn't this functionality be easier to provide via a
        # RenamingDataSet, such as the one I've written below?
        # -JB
        # You are right. Whichever implementation, however, we need a generic way to
        # 'concatenate' fields, to handle the ([old_field1, old_field2, ...], new_field) semantics.
        # -YB
        """
        Return a new dataset that maps old fields (of self) to new fields (of the returned 
        dataset). The minimal syntax that should be supported is the following:
           new_field_specifications = [new_field_spec1, new_field_spec2, ...]
           new_field_spec = ([old_field1, old_field2, ...], new_field)
        In general both old_field and new_field should be strings, but some datasets may also
        support additional indexing schemes within each field (e.g. column slice
        of a matrix-like field).
        """
        raise AbstractFunction()


    def apply_function(function, input_fields, output_fields, copy_inputs=True, accept_minibatches=True, cache=True):
        """
        Return a dataset that contains as fields the results of applying
        the given function (example-wise) to the specified input_fields. The
        function should return a sequence whose elements will be stored in
        fields whose names are given in the output_fields list. If copy_inputs
        is True then the resulting dataset will also contain the fields of self.
        If accept_minibatches, then the function may be called
        with minibatches as arguments (what is returned by the minibatches
        iterator). In any case, the computations may be delayed until the examples
        of the resulting dataset are requested. If cache is True, then
        once the output fields for some examples have been computed, then
        are cached (to avoid recomputation if the same examples are again
        requested).
        """
        return ApplyFunctionDataSet(function, input_fields, output_fields, copy_inputs, accept_minibatches, cache)

class RenamingDataSet(DataSet):
    """A DataSet that wraps another one, and makes it look like the field names
    are different

    Renaming is done by a dictionary that maps new names to the old ones used in
    self.src.
    """
    def __init__(self, src, rename_dct):
        DataSet.__init__(self)
        self.src = src
        self.rename_dct = copy.copy(rename_dct)

    def minibatches(self,
            fieldnames = DataSet.minibatches_fieldnames,
            minibatch_size = DataSet.minibatches_minibatch_size,
            n_batches = DataSet.minibatches_n_batches):
        dct = self.rename_dct
        new_fieldnames = [dct.get(f, f) for f in fieldnames]
        return self.src.minibatches(new_fieldnames, minibatches_size, n_batches)

    def fieldNames(self):
        return [dct.get(f, f) for f in self.src.fieldNames()]


class FiniteDataSet(DataSet):
    """
    Virtual interface, a subclass of DataSet for datasets which have a finite, known length.
    Examples are indexed by an integer between 0 and self.length()-1,
    and a subdataset can be obtained by slicing. This may not be appropriate in general
    but only for datasets which can be thought of like ones that access rows AND fields
    in an efficient random access way. Users are encouraged to expect only the generic dataset
    interface in general. A FiniteDataSet is mainly useful when one has to obtain
    a subset of examples (e.g. for splitting a dataset into training and test sets).
    """

    class FiniteDataSetIterator(object):
        """
        If the fieldnames list is empty, it means that we want to see ALL the fields.
        """
        def __init__(self,dataset,minibatch_size=1,fieldnames=[]):
            self.dataset=dataset
            self.minibatch_size=minibatch_size
            assert minibatch_size>=1 and minibatch_size<=len(dataset)
            self.current = -self.minibatch_size
            self.fieldnames = fieldnames
            if len(dataset) % minibatch_size:
                raise NotImplementedError()

        def __iter__(self):
            return self
        
        def next(self):
            self.current+=self.minibatch_size
            if self.current>=len(self.dataset):
                self.current=-self.minibatch_size
                raise StopIteration
            if self.minibatch_size==1:
                complete_example=self.dataset[self.current]
            else:
                complete_example=self.dataset[self.current:self.current+self.minibatch_size]
            if self.fieldnames:
                return Example(self.fieldnames,list(complete_example))
            else:
                return complete_example

    def __init__(self):
        pass

    def minibatches(self,
            fieldnames = DataSet.minibatches_fieldnames,
            minibatch_size = DataSet.minibatches_minibatch_size,
            n_batches = DataSet.minibatches_n_batches):
        """
        If the fieldnames list is empty, it means that we want to see ALL the fields.

        If the n_batches is empty, we want to see all the examples possible
        for the give minibatch_size.
        """
        # substitute the defaults:
        if fieldnames is None: fieldnames = self.fieldNames()
        if n_batches is None: n_batches = len(self) / minibatch_size
        return DataSet.Iterator(self, fieldnames, minibatch_size, n_batches)

    def __getattr__(self,fieldname):
        """Return an that can iterate over the values of the field in this dataset."""
        return self(fieldname)

    def __call__(self,*fieldnames):
        """Return a sub-dataset containing only the given fieldnames as fields.
        
        The return value's default iterator will iterate only over the given
        fields.
        """
        raise AbstractFunction()

    def __len__(self):
        """len(dataset) returns the number of examples in the dataset."""
        raise AbstractFunction()
    
    def __getitem__(self,i):
        """dataset[i] returns the (i+1)-th example of the dataset."""
        raise AbstractFunction()

    def __getslice__(self,*slice_args):
        """dataset[i:j] returns the subdataset with examples i,i+1,...,j-1."""
        raise AbstractFunction()

# we may want ArrayDataSet defined in another python file

import numpy

def as_array_dataset(dataset):
    # Generally datasets can be efficient by making data fields overlap, but
    # this function doesn't know which fields overlap.  So, it should check if
    # dataset supports an as_array_dataset member function, and return that if
    # possible.
    if hasattr(dataset, 'as_array_dataset'):
        return dataset.as_array_dataset()

    raise NotImplementedError()

    # Make ONE big minibatch with all the examples, to separate the fields.
    n_examples = len(dataset)
    batch = dataset.minibatches( minibatch_size = len(dataset)).next()

    # Each field of the underlying dataset must be convertible to a numpy array of the same type
    # currently just double, but should use the smallest compatible dtype
    n_fields = len(batch)
    fieldnames = batch.fields.keys()
    total_width = 0
    type = None
    fields = LookupList()
    for i in xrange(n_fields):
        field = array(batch[i])
        assert field.shape[0]==n_examples
        width = field.shape[1]
        start=total_width
        total_width += width
        fields[fieldnames[i]]=slice(start,total_width,1)
    # many complicated things remain to be done:
    #  - find common dtype
    #  - decide what to do with extra dimensions if not the same in all fields
    #  - try to see if we can avoid the copy?

class ArrayDataSet(FiniteDataSet):
    """
    An ArrayDataSet behaves like a numpy array but adds the notion of named fields
    from DataSet (and the ability to view the values of multiple fields as an 'Example').
    It is a  fixed-length and fixed-width dataset 
    in which each element is a fixed dimension numpy array or a number, hence the whole 
    dataset corresponds to a numpy array. Fields
    must correspond to a slice of array columns. If the dataset has fields,
    each 'example' is just a one-row ArrayDataSet, otherwise it is a numpy array.
    Any dataset can also be converted to a numpy array (losing the notion of fields
    by the numpy.array(dataset) call.
    """

    class Iterator(LookupList):
        """An iterator over a finite dataset that implements wrap-around"""
        def __init__(self, dataset, fieldnames, minibatch_size, next_max):
            LookupList.__init__(self, fieldnames, [0] * len(fieldnames))
            self.dataset=dataset
            self.minibatch_size=minibatch_size
            self.next_count = 0
            self.next_max = next_max
            self.current = -self.minibatch_size
            assert minibatch_size > 0
            if minibatch_size >= len(dataset):
                raise NotImplementedError()

        def __iter__(self): #makes for loop work
            return self

        @staticmethod
        def matcat(a, b):
            a0, a1 = a.shape
            b0, b1 = b.shape
            assert a1 == b1
            assert a.dtype is b.dtype
            rval = numpy.empty( (a0 + b0, a1), dtype=a.dtype)
            rval[:a0,:] = a
            rval[a0:,:] = b
            return rval
        
        def next(self):

            #check for end-of-loop
            self.next_count += 1
            if self.next_count == self.next_max:
                raise StopIteration

            #determine the first and last elements of the slice we'll return
            rows = self.dataset.data.shape[0]
            self.current += self.minibatch_size
            if self.current >= rows:
                self.current -= rows
            upper = self.current + self.minibatch_size

            data = self.dataset.data

            if upper <= rows:
                #this is the easy case, we only need once slice
                dataview = data[self.current:upper]
            else:
                # the minibatch wraps around the end of the dataset
                dataview = data[self.current:]
                upper -= rows
                assert upper > 0
                dataview = self.matcat(dataview, data[:upper])


            self._values = [dataview[:, self.dataset.fields[f]]\
                    for f in self._names]

            return self


    def __init__(self, data, fields=None):
        """
        There are two ways to construct an ArrayDataSet: (1) from an
        existing dataset (which may result in a copy of the data in a numpy array),
        or (2) from a numpy.array (the data argument), along with an optional description
        of the fields (a LookupList of column slices indexed by field names).
        """
        self.data=data
        self.fields=fields
        rows, cols = data.shape

        if fields:
            for fieldname,fieldslice in fields.items():
                # make sure fieldslice.start and fieldslice.step are defined
                start=fieldslice.start
                step=fieldslice.step
                if not start:
                    start=0
                if not step:
                    step=1
                if not fieldslice.start or not fieldslice.step:
                    fields[fieldname] = fieldslice = slice(start,fieldslice.stop,step)
                # and coherent with the data array
                assert fieldslice.start >= 0 and fieldslice.stop <= cols

    def minibatches(self,
            fieldnames = DataSet.minibatches_fieldnames,
            minibatch_size = DataSet.minibatches_minibatch_size,
            n_batches = DataSet.minibatches_n_batches):
        """
        If the fieldnames list is empty, it means that we want to see ALL the fields.

        If the n_batches is empty, we want to see all the examples possible
        for the give minibatch_size.
        """
        # substitute the defaults:
        if fieldnames is None: fieldnames = self.fieldNames()
        if n_batches is None: n_batches = len(self) / minibatch_size
        return ArrayDataSet.Iterator(self, fieldnames, minibatch_size, n_batches)

    def __getattr__(self,fieldname):
        """
        Return a numpy array with the content associated with the given field name.
        If this is a one-example dataset, then a row, i.e., numpy array (of one less dimension
        than the dataset itself) is returned.
        """
        if len(self.data)==1:
            return self.data[0,self.fields[fieldname]]
        return self.data[:,self.fields[fieldname]]

    def __call__(self,*fieldnames):
        """Return a sub-dataset containing only the given fieldnames as fields."""
        min_col=self.data.shape[1]
        max_col=0
        for field_slice in self.fields.values():
            min_col=min(min_col,field_slice.start)
            max_col=max(max_col,field_slice.stop)
        new_fields=LookupList()
        for fieldname,fieldslice in self.fields.items():
            new_fields[fieldname]=slice(fieldslice.start-min_col,fieldslice.stop-min_col,fieldslice.step)
        return ArrayDataSet(self.data[:,min_col:max_col],fields=new_fields)

    def fieldNames(self):
        """Return the list of field names that are supported by getattr and getFields."""
        return self.fields.keys()

    def __len__(self):
        """len(dataset) returns the number of examples in the dataset."""
        return len(self.data)
    
    def __getitem__(self,i):
        """
        dataset[i] returns the (i+1)-th Example of the dataset. If there are no fields
        the result is just a numpy array (for the i-th row of the dataset data matrix).
        """
        if self.fields:
            fieldnames,fieldslices=zip(*self.fields.items())
            return Example(self.fields.keys(),[self.data[i,fieldslice] for fieldslice in self.fields.values()])
        else:
            return self.data[i]

    def __getslice__(self,*args):
        """dataset[i:j] returns the subdataset with examples i,i+1,...,j-1."""
        return ArrayDataSet(self.data.__getslice__(*args), fields=self.fields)

    def __array__(self):
        """Return a view of this dataset which is an numpy.ndarray (i.e. losing
        the identity and name of fields within the dataset).

        Numpy uses this special function name to retrieve an ndarray view for
        function such as numpy.sum, numpy.dot, numpy.asarray, etc.

        If this dataset has no fields, then we simply return self.data,
        otherwise things are complicated. 
        - why do we want this behaviour when there are fields? (JB)
        - for convenience and completeness (but maybe it would make
          more sense to implement this through a 'field-merging'
          dataset). (YB)
        """
        if not self.fields:
            return self.data
        # else, select subsets of columns mapped by the fields
        columns_used = numpy.zeros((self.data.shape[1]),dtype=bool)
        for field_slice in self.fields.values():
            for c in xrange(field_slice.start,field_slice.stop,field_slice.step):
                columns_used[c]=True
        # try to figure out if we can map all the slices into one slice:
        mappable_to_one_slice = True
        start=0
        while start<len(columns_used) and not columns_used[start]:
            start+=1
        stop=len(columns_used)
        while stop>0 and not columns_used[stop-1]:
            stop-=1
        step=0
        i=start
        while i<stop:
            j=i+1
            while j<stop and not columns_used[j]:
                j+=1
            if step:
                if step!=j-i:
                    mappable_to_one_slice = False
                    break
            else:
                step = j-i
            i=j
        if mappable_to_one_slice:
            return self.data[:,slice(start,stop,step)]
        # else make contiguous copy
        n_columns = sum(columns_used)
        result = zeros((len(self.data),n_columns)+self.data.shape[2:],self.data.dtype)
        print result.shape
        c=0
        for field_slice in self.fields.values():
            slice_width=field_slice.stop-field_slice.start/field_slice.step
            # copy the field here
            result[:,slice(c,slice_width)]=self.data[:,field_slice]
            c+=slice_width
        return result

class ApplyFunctionDataset(DataSet):
    """
    A dataset that contains as fields the results of applying
    a given function (example-wise) to specified input_fields of a source
    dataset. The function should return a sequence whose elements will be stored in
    fields whose names are given in the output_fields list. If copy_inputs
    is True then the resulting dataset will also contain the fields of the source.
    dataset. If accept_minibatches, then the function expects 
    minibatches as arguments (what is returned by the minibatches
    iterator). In any case, the computations may be delayed until the examples
    of self are requested. If cache is True, then
    once the output fields for some examples have been computed, then
    are cached (to avoid recomputation if the same examples are again requested).
    """
    def __init__(src,function, input_fields, output_fields, copy_inputs=True, accept_minibatches=True, cache=True):
        DataSet.__init__(self)
        self.src=src
        self.function=function
        self.input_fields=input_fields
        self.output_fields=output_fields
        self.copy_inputs=copy_inputs
        self.accept_minibatches=accept_minibatches
        src_fieldnames = src.fieldNames()
        if copy_inputs:
            for src_field in src_fieldnames:
                assert src_field not in output_fields
            self.fieldnames=src_fieldnames+output_fields
        else:
            self.fieldnames=output_fields
        for input_field in input_fields:
            assert input_field in src_fieldnames
        self.cache=cache
        if cache:
            # maybe a fixed-size array kind of structure would be more efficient than a list
            # in the case where src is FiniteDataSet. -YB
            self.cached_examples = [] 

    def fieldNames(self): return self.fieldnames
    
    def minibatches(self,
                    fieldnames = DataSet.minibatches_fieldnames,
                    minibatch_size = DataSet.minibatches_minibatch_size,
                    n_batches = DataSet.minibatches_n_batches):

        class Iterator(LookupList):

            def __init__(self,dataset):
                LookupList.__init__(self, fieldnames, [0]*len(fieldnames))
                self.dataset=dataset
                if dataset.copy_inputs:
                    src_fields=dataset.fieldNames()
                else:
                    src_fields=dataset.input_fields
                self.src_iterator=self.src.minibatches(src_fields,minibatch_size,n_batches)
                                                       
            def __iter__(self):
                return self

            def next(self):
                src_examples = self.src_iterator.next()
                if self.dataset.copy_inputs:
                    function_inputs = src_examples
                else:
                    function_inputs = 
                    [src_examples[field_name] for field_name in self.dataset.input_fields])
                return self.dataset.function(*function_inputs)

        for fieldname in fieldnames:
            assert fieldname in self.input_fields
        return Iterator(self)