view dataset.py @ 32:039c0f249859

added C impl for softmax dx
author bergstrj@iro.umontreal.ca
date Sun, 13 Apr 2008 00:21:11 -0400
parents 46c5c90019c2
children 438440ba0627
line wrap: on
line source


from lookup_list import LookupList
Example = LookupList
import copy

class AbstractFunction (Exception): """Derived class must override this function"""
        
class DataSet(object):
    """A virtual base class for datasets.

    A DataSet is a generator of iterators; these iterators can run through the
    examples in a variety of ways.  A DataSet need not necessarily have a finite
    or known length, so this class can be used to interface to a 'stream' which
    feeds on-line learning. 

    To iterate over examples, there are several possibilities:
    - for example in dataset.zip([field1, field2,field3, ...])
    - for val1,val2,val3 in dataset.zip([field1, field2,field3])
    - for minibatch in dataset.minibatches([field1, field2, ...],minibatch_size=N)
    - for example in dataset
    Each of these is documented below. All of these iterators are expected
    to provide, in addition to the usual 'next()' method, a 'next_index()' method
    which returns a non-negative integer pointing to the position of the next
    example that will be returned by 'next()' (or of the first example in the
    next minibatch returned). This is important because these iterators
    can wrap around the dataset in order to do multiple passes through it,
    in possibly unregular ways if the minibatch size is not a divisor of the
    dataset length.

    Note: Fields are not mutually exclusive, i.e. two fields can overlap in their actual content.

    Note: The content of a field can be of any type.

    Note: A dataset can recognize a potentially infinite number of field names (i.e. the field
    values can be computed on-demand, when particular field names are used in one of the
    iterators).

    Datasets of finite length should be sub-classes of FiniteLengthDataSet.

    Datasets whose elements can be indexed and whose sub-datasets (with a subset
    of examples) can be extracted should be sub-classes of
    SliceableDataSet.

    Datasets with a finite number of fields should be sub-classes of
    FiniteWidthDataSet.
    """

    def __init__(self):
        pass
    
    class Iterator(LookupList):
        def __init__(self, ll):
            LookupList.__init__(self, ll.keys(), ll.values())
            self.ll = ll
        def __iter__(self): #makes for loop work
            return self
        def next(self):
            self.ll.next()
            self._values = [v[0] for v in self.ll._values]
            return self
        def next_index(self):
            return self.ll.next_index()

    def __iter__(self):
        """Supports the syntax "for i in dataset: ..."

        Using this syntax, "i" will be an Example instance (or equivalent) with
        all the fields of DataSet self.  Every field of "i" will give access to
        a field of a single example.  Fields should be accessible via
        i["fielname"] or i[3] (in the order defined by the elements of the
        Example returned by this iterator), but the derived class is free
        to accept any type of identifier, and add extra functionality to the iterator.
        """
        return DataSet.Iterator(self.minibatches(None, minibatch_size = 1))

    def zip(self, *fieldnames):
        """
        Supports two forms of syntax:

            for i in dataset.zip([f1, f2, f3]): ...

            for i1, i2, i3 in dataset.zip([f1, f2, f3]): ...

        Using the first syntax, "i" will be an indexable object, such as a list,
        tuple, or Example instance, such that on every iteration, i[0] is the f1
        field of the current example, i[1] is the f2 field, and so on.

        Using the second syntax, i1, i2, i3 will contain the the contents of the
        f1, f2, and f3 fields of a single example on each loop iteration.

        The derived class may accept fieldname arguments of any type.

        """
        return DataSet.Iterator(self.minibatches(fieldnames, minibatch_size = 1))

    minibatches_fieldnames = None
    minibatches_minibatch_size = 1
    minibatches_n_batches = None
    def minibatches(self,
            fieldnames = minibatches_fieldnames,
            minibatch_size = minibatches_minibatch_size,
            n_batches = minibatches_n_batches):
        """
        Supports three forms of syntax:

            for i in dataset.minibatches(None,**kwargs): ...

            for i in dataset.minibatches([f1, f2, f3],**kwargs): ...

            for i1, i2, i3 in dataset.minibatches([f1, f2, f3],**kwargs): ...

        Using the first two syntaxes, "i" will be an indexable object, such as a list,
        tuple, or Example instance. In both cases, i[k] is a list-like container
        of a batch of current examples. In the second case, i[0] is
        list-like container of the f1 field of a batch current examples, i[1] is
        a list-like container of the f2 field, etc.

        Using the first syntax, all the fields will be returned in "i".
        Beware that some datasets may not support this syntax, if the number
        of fields is infinite (i.e. field values may be computed "on demand").

        Using the third syntax, i1, i2, i3 will be list-like containers of the
        f1, f2, and f3 fields of a batch of examples on each loop iteration.

        PARAMETERS
        - fieldnames (list of any type, default None):
        The loop variables i1, i2, i3 (in the example above) should contain the
        f1, f2, and f3 fields of the current batch of examples.  If None, the
        derived class can choose a default, e.g. all fields.

        - minibatch_size (integer, default 1)
        On every iteration, the variables i1, i2, i3 will have
        exactly minibatch_size elements. e.g. len(i1) == minibatch_size

        - n_batches (integer, default None)
        The iterator will loop exactly this many times, and then stop.  If None,
        the derived class can choose a default.  If (-1), then the returned
        iterator should support looping indefinitely.

        Note: A list-like container is something like a tuple, list, numpy.ndarray or
        any other object that supports integer indexing and slicing.

        """
        raise AbstractFunction()

    def hasFields(self,*fieldnames):
        """
        Return true if the given field name (or field names, if multiple arguments are
        given) is recognized by the DataSet (i.e. can be used as a field name in one
        of the iterators).
        """
        raise AbstractFunction()

        
    def merge_fields(self,*specifications):
        """
        Return a new dataset that maps old fields (of self) to new fields (of the returned 
        dataset). The minimal syntax that should be supported is the following:
           new_field_specifications = [new_field_spec1, new_field_spec2, ...]
           new_field_spec = ([old_field1, old_field2, ...], new_field)
        In general both old_field and new_field should be strings, but some datasets may also
        support additional indexing schemes within each field (e.g. column slice
        of a matrix-like field).
        """
        raise AbstractFunction()

    def merge_field_values(self,*field_value_pairs):
        """
        Return the value that corresponds to merging the values of several fields,
        given as arguments (field_name, field_value) pairs with self.hasField(field_name).
        This may be used by implementations of merge_fields.
        Raise a ValueError if the operation is not possible.
        """
        fieldnames,fieldvalues = zip(*field_value_pairs)
        raise ValueError("Unable to merge values of these fields:"+repr(fieldnames))

    def examples2minibatch(self,examples):
        """
        Combine a list of Examples into a minibatch. A minibatch is an Example whose fields
        are iterable over the examples of the minibatch.
        """
        raise AbstractFunction()
    
    def rename(self,rename_dict):
        """
        Changes a dataset into one that renames fields, using a dictionnary that maps old field
        names to new field names. The only fields visible by the returned dataset are those
        whose names are keys of the rename_dict.
        """
        self_class = self.__class__
        class SelfRenamingDataSet(RenamingDataSet,self_class):
            pass
        self.__class__ = SelfRenamingDataSet
        # set the rename_dict and src fields
        SelfRenamingDataSet.__init__(self,self,rename_dict)
        return self
        
    def apply_function(self,function, input_fields, output_fields, copy_inputs=True, accept_minibatches=True, cache=True):
        """
        Changes a dataset into one that contains as fields the results of applying
        the given function (example-wise) to the specified input_fields. The
        function should return a sequence whose elements will be stored in
        fields whose names are given in the output_fields list. If copy_inputs
        is True then the resulting dataset will also contain the fields of self.
        If accept_minibatches, then the function may be called
        with minibatches as arguments (what is returned by the minibatches
        iterator). In any case, the computations may be delayed until the examples
        of the resulting dataset are requested. If cache is True, then
        once the output fields for some examples have been computed, then
        are cached (to avoid recomputation if the same examples are again
        requested).
        """
        self_class = self.__class__
        class SelfApplyFunctionDataSet(ApplyFunctionDataSet,self_class):
            pass
        self.__class__ = SelfApplyFunctionDataSet
        # set the required additional fields
        ApplyFunctionDataSet.__init__(self,self,function, input_fields, output_fields, copy_inputs, accept_minibatches, cache)
        return self


class FiniteLengthDataSet(DataSet):
    """
    Virtual interface for datasets that have a finite length (number of examples),
    and thus recognize a len(dataset) call.
    """
    def __init__(self):
        DataSet.__init__(self)

    def __len__(self):
        """len(dataset) returns the number of examples in the dataset."""
        raise AbstractFunction()

    def __call__(self,fieldname_or_fieldnames):
        """
        Extract one or more fields. This may be an expensive operation when the
        dataset is large. It is not the recommanded way to access individual values
        (use the iterators instead). If the argument is a string fieldname, then the result
        is a sequence (iterable object) of values for that field, for the whole dataset. If the
        argument is a list of field names, then the result is a 'batch', i.e., an Example with keys
        corresponding to the given field names and values being iterable objects over the
        individual example values.
        """
        if type(fieldname_or_fieldnames) is string:
            minibatch = self.minibatches([fieldname_or_fieldnames],len(self)).next()
            return minibatch[fieldname_or_fieldnames]
        return self.minibatches(fieldname_or_fieldnames,len(self)).next()
                 
class SliceableDataSet(DataSet):
    """
    Virtual interface, a subclass of DataSet for datasets which are sliceable
    and whose individual elements can be accessed, generally respecting the
    python semantics for [spec], where spec is either a non-negative integer
    (for selecting one example), a python slice(start,stop,step) for selecting a regular
    sub-dataset comprising examples start,start+step,start+2*step,...,n (with n<stop), or a
    sequence (e.g. a list) of integers [i1,i2,...,in] for selecting
    an arbitrary subset of examples. This is useful for obtaining
    sub-datasets, e.g. for splitting a dataset into training and test sets.
    """
    def __init__(self):
        DataSet.__init__(self)
        
    def minibatches(self,
            fieldnames = DataSet.minibatches_fieldnames,
            minibatch_size = DataSet.minibatches_minibatch_size,
            n_batches = DataSet.minibatches_n_batches):
        """
        If the n_batches is empty, we want to see all the examples possible
        for the given minibatch_size (possibly missing a few at the end of the dataset).
        """
        # substitute the defaults:
        if n_batches is None: n_batches = len(self) / minibatch_size
        return DataSet.Iterator(self, fieldnames, minibatch_size, n_batches)

    def __getitem__(self,i):
        """
        dataset[i] returns the (i+1)-th example of the dataset.
        dataset[i:j] returns the subdataset with examples i,i+1,...,j-1.
        dataset[i:j:s] returns the subdataset with examples i,i+2,i+4...,j-2.
        dataset[[i1,i2,..,in]] returns the subdataset with examples i1,i2,...,in.
        """
        raise AbstractFunction()

    def __getslice__(self,*slice_args):
        """
        dataset[i:j] returns the subdataset with examples i,i+1,...,j-1.
        dataset[i:j:s] returns the subdataset with examples i,i+2,i+4...,j-2.
        """
        raise AbstractFunction()


class FiniteWidthDataSet(DataSet):
    """
    Virtual interface for datasets that have a finite width (number of fields),
    and thus return a list of fieldNames. 
    """
    def __init__(self):
        DataSet.__init__(self)

    def hasFields(self,*fields):
        has_fields=True
        fieldnames = self.fieldNames()
        for name in fields:
            if name not in fieldnames:
                has_fields=False
        return has_fields
                
    def fieldNames(self):
        """Return the list of field names that are supported by the iterators,
        and for which hasFields(fieldname) would return True."""
        raise AbstractFunction()


class RenamingDataSet(FiniteWidthDataSet):
    """A DataSet that wraps another one, and makes it look like the field names
    are different

    Renaming is done by a dictionary that maps new names to the old ones used in
    self.src.
    """
    def __init__(self, src, rename_dct):
        DataSet.__init__(self)
        self.src = src
        self.rename_dct = copy.copy(rename_dct)

    def fieldNames(self):
        return self.rename_dct.keys()
    
    def minibatches(self,
            fieldnames = DataSet.minibatches_fieldnames,
            minibatch_size = DataSet.minibatches_minibatch_size,
            n_batches = DataSet.minibatches_n_batches):
        dct = self.rename_dct
        new_fieldnames = [dct.get(f, f) for f in fieldnames]
        return self.src.minibatches(new_fieldnames, minibatches_size, n_batches)


# we may want ArrayDataSet defined in another python file

import numpy

def as_array_dataset(dataset):
    # Generally datasets can be efficient by making data fields overlap, but
    # this function doesn't know which fields overlap.  So, it should check if
    # dataset supports an as_array_dataset member function, and return that if
    # possible.
    if hasattr(dataset, 'as_array_dataset'):
        return dataset.as_array_dataset()

    raise NotImplementedError

    # Make ONE big minibatch with all the examples, to separate the fields.
    n_examples = len(dataset)
    batch = dataset.minibatches( minibatch_size = len(dataset)).next()

    # Each field of the underlying dataset must be convertible to a numpy array of the same type
    # currently just double, but should use the smallest compatible dtype
    n_fields = len(batch)
    fieldnames = batch.fields.keys()
    total_width = 0
    type = None
    fields = LookupList()
    for i in xrange(n_fields):
        field = array(batch[i])
        assert field.shape[0]==n_examples
        width = field.shape[1]
        start=total_width
        total_width += width
        fields[fieldnames[i]]=slice(start,total_width,1)
    # many complicated things remain to be done:
    #  - find common dtype
    #  - decide what to do with extra dimensions if not the same in all fields
    #  - try to see if we can avoid the copy?

class ArrayDataSet(FiniteLengthDataSet,FiniteWidthDataSet,SliceableDataSet):
    """
    An ArrayDataSet behaves like a numpy array but adds the notion of named fields
    from DataSet (and the ability to view the values of multiple fields as an 'Example').
    It is a  fixed-length and fixed-width dataset 
    in which each element is a fixed dimension numpy array or a number, hence the whole 
    dataset corresponds to a numpy array. Fields
    must correspond to a slice of array columns or to a list of column numbers.
    If the dataset has fields,
    each 'example' is just a one-row ArrayDataSet, otherwise it is a numpy array.
    Any dataset can also be converted to a numpy array (losing the notion of fields
    by the numpy.array(dataset) call.
    """

    class Iterator(LookupList):
        """An iterator over a finite dataset that implements wrap-around"""
        def __init__(self, dataset, fieldnames, minibatch_size, next_max):
            if fieldnames is None: fieldnames = dataset.fieldNames()
            LookupList.__init__(self, fieldnames, [0]*len(fieldnames))
            self.dataset=dataset
            self.minibatch_size=minibatch_size
            self.next_count = 0
            self.next_max = next_max
            self.current = -self.minibatch_size
            assert minibatch_size > 0
            if minibatch_size >= len(dataset):
                raise NotImplementedError()

        def __iter__(self): #makes for loop work
            return self

        @staticmethod
        def matcat(a, b):
            a0, a1 = a.shape
            b0, b1 = b.shape
            assert a1 == b1
            assert a.dtype is b.dtype
            rval = numpy.empty( (a0 + b0, a1), dtype=a.dtype)
            rval[:a0,:] = a
            rval[a0:,:] = b
            return rval

        def next_index(self):
            n_rows = self.dataset.data.shape[0]
            next_i = self.current+self.minibatch_size
            if next_i >= n_rows:
                next_i -= n_rows
            return next_i
        
        def next(self):

            #check for end-of-loop
            self.next_count += 1
            if self.next_count == self.next_max:
                raise StopIteration

            #determine the first and last elements of the minibatch slice we'll return
            n_rows = self.dataset.data.shape[0]
            self.current = self.next_index()
            upper = self.current + self.minibatch_size

            data = self.dataset.data

            if upper <= n_rows:
                #this is the easy case, we only need once slice
                dataview = data[self.current:upper]
            else:
                # the minibatch wraps around the end of the dataset
                dataview = data[self.current:]
                upper -= n_rows
                assert upper > 0
                dataview = self.matcat(dataview, data[:upper])

            self._values = [dataview[:, self.dataset.fields[f]]\
                    for f in self._names]
            return self


    def __init__(self, data, fields=None):
        """
        There are two ways to construct an ArrayDataSet: (1) from an
        existing dataset (which may result in a copy of the data in a numpy array),
        or (2) from a numpy.array (the data argument), along with an optional description
        of the fields (a LookupList of column slices (or column lists) indexed by field names).
        """
        self.data=data
        self.fields=fields
        rows, cols = data.shape

        if fields:
            for fieldname,fieldslice in fields.items():
                assert type(fieldslice) is int or isinstance(fieldslice,slice) or hasattr(fieldslice,"__iter__")
                if hasattr(fieldslice,"__iter__"): # is a sequence
                    for i in fieldslice:
                        assert type(i) is int
                elif isinstance(fieldslice,slice):
                    # make sure fieldslice.start and fieldslice.step are defined
                    start=fieldslice.start
                    step=fieldslice.step
                    if not start:
                        start=0
                    if not step:
                        step=1
                    if not fieldslice.start or not fieldslice.step:
                        fields[fieldname] = fieldslice = slice(start,fieldslice.stop,step)
                    # and coherent with the data array
                    assert fieldslice.start >= 0 and fieldslice.stop <= cols

    def minibatches(self,
            fieldnames = DataSet.minibatches_fieldnames,
            minibatch_size = DataSet.minibatches_minibatch_size,
            n_batches = DataSet.minibatches_n_batches):
        """
        If the fieldnames list is None, it means that we want to see ALL the fields.

        If the n_batches is None, we want to see all the examples possible
        for the given minibatch_size (possibly missing some near the end).
        """
        # substitute the defaults:
        if n_batches is None: n_batches = len(self) / minibatch_size
        return ArrayDataSet.Iterator(self, fieldnames, minibatch_size, n_batches)

    def fieldNames(self):
        """Return the list of field names that are supported by getattr and hasField."""
        return self.fields.keys()

    def __len__(self):
        """len(dataset) returns the number of examples in the dataset."""
        return len(self.data)
    
    def __getitem__(self,i):
        """
        dataset[i] returns the (i+1)-th Example of the dataset.
        If there are no fields the result is just a numpy array (for the i-th row of the dataset data matrix).
        dataset[i:j] returns the subdataset with examples i,i+1,...,j-1.
        dataset[i:j:s] returns the subdataset with examples i,i+2,i+4...,j-2.
        dataset[[i1,i2,..,in]] returns the subdataset with examples i1,i2,...,in.
        """
        if self.fields:
            fieldnames,fieldslices=zip(*self.fields.items())
            return Example(self.fields.keys(),[self.data[i,fieldslice] for fieldslice in self.fields.values()])
        else:
            return self.data[i]

    def __getslice__(self,*args):
        """
        dataset[i:j] returns the subdataset with examples i,i+1,...,j-1.
        dataset[i:j:s] returns the subdataset with examples i,i+2,i+4...,j-2.
        """
        return ArrayDataSet(self.data.__getslice__(*args), fields=self.fields)

    def indices_of_unique_columns_used(self):
        """
        Return the unique indices of the columns actually used by the fields, and a boolean
        that signals (if True) that used columns overlap. If they do then the
        indices are not repeated in the result.
        """
        columns_used = numpy.zeros((self.data.shape[1]),dtype=bool)
        overlapping_columns = False
        for field_slice in self.fields.values():
            if sum(columns_used[field_slice])>0: overlapping_columns=True
            columns_used[field_slice]=True
        return [i for i,used in enumerate(columns_used) if used],overlapping_columns

    def slice_of_unique_columns_used(self):
        """
        Return None if the indices_of_unique_columns_used do not form a slice. If they do,
        return that slice. It means that the columns used can be extracted
        from the data array without making a copy. If the fields overlap
        but their unique columns used form a slice, still return that slice.
        """
        columns_used,overlapping_columns = self.indices_of_columns_used()
        mappable_to_one_slice = True
        if not overlapping_fields:
            start=0
            while start<len(columns_used) and not columns_used[start]:
                start+=1
            stop=len(columns_used)
            while stop>0 and not columns_used[stop-1]:
                stop-=1
            step=0
            i=start
            while i<stop:
                j=i+1
                while j<stop and not columns_used[j]:
                    j+=1
                if step:
                    if step!=j-i:
                        mappable_to_one_slice = False
                        break
                else:
                    step = j-i
                i=j
        return slice(start,stop,step)
    
class ApplyFunctionDataSet(FiniteWidthDataSet):
    """
    A dataset that contains as fields the results of applying
    a given function (example-wise) to specified input_fields of a source
    dataset. The function should return a sequence whose elements will be stored in
    fields whose names are given in the output_fields list. If copy_inputs
    is True then the resulting dataset will also contain the fields of the source.
    dataset. If accept_minibatches, then the function expects 
    minibatches as arguments (what is returned by the minibatches
    iterator). In any case, the computations may be delayed until the examples
    of self are requested. If cache is True, then
    once the output fields for some examples have been computed, then
    are cached (to avoid recomputation if the same examples are again requested).
    """
    def __init__(src,function, input_fields, output_fields, copy_inputs=True, accept_minibatches=True, cache=True, compute_now=False):
        DataSet.__init__(self)
        self.src=src
        self.function=function
        assert src.hasFields(input_fields)
        self.input_fields=input_fields
        self.output_fields=output_fields
        assert not (copy_inputs and compute_now and not hasattr(src,'fieldNames'))
        self.copy_inputs=copy_inputs
        self.accept_minibatches=accept_minibatches
        self.cache=cache
        self.compute_now=compute_now
        if compute_now:
            assert hasattr(src,'__len__') and len(src)>=0
            fieldnames = output_fields
            if copy_inputs: fieldnames = src.fieldNames() + output_fields
            if accept_minibatches:
                # make a single minibatch with all the inputs
                inputs = src.minibatches(input_fields,len(src)).next()
                # and apply the function to it, and transpose into a list of examples (field values, actually)
                self.cached_examples = zip(*Example(output_fields,function(*inputs)))
            else:
                # compute a list with one tuple per example, with the function outputs
                self.cached_examples = [ function(input) for input in src.zip(input_fields) ]
        elif cache:
            # maybe a fixed-size array kind of structure would be more efficient than a list
            # in the case where src is FiniteDataSet. -YB
            self.cached_examples = []

    def fieldNames(self):
        if self.copy_inputs:
            return self.output_fields + self.src.fieldNames()
        return self.output_fields
    
    def minibatches(self,
                    fieldnames = DataSet.minibatches_fieldnames,
                    minibatch_size = DataSet.minibatches_minibatch_size,
                    n_batches = DataSet.minibatches_n_batches):
        
        class Iterator(LookupList):

            def __init__(self,dataset):
                if fieldnames is None:
                    assert hasattr(dataset,"fieldNames")
                    fieldnames = dataset.fieldNames()
                self.example_index=0
                LookupList.__init__(self, fieldnames, [0]*len(fieldnames))
                self.dataset=dataset
                self.src_iterator=self.src.minibatches(list(set.union(set(fieldnames),set(dataset.input_fields))),
                                                       minibatch_size,n_batches)
                self.fieldnames_not_in_input = []
                if self.copy_inputs:
                    self.fieldnames_not_in_input = filter(lambda x: not x in dataset.input_fields, fieldnames)
                                                       
            def __iter__(self):
                return self

            def next_index(self):
                return self.src_iterator.next_index()
            
            def next(self):
                example_index = self.src_iterator.next_index()
                src_examples = self.src_iterator.next()
                if self.dataset.copy_inputs:
                    function_inputs = [src_examples[field_name] for field_name in self.dataset.input_fields]
                else:
                    function_inputs = src_examples
                if self.dataset.cached_examples:
                    cache_len=len(self.cached_examples)
                    if example_index<cache_len+minibatch_size:
                        outputs_list = self.cached_examples[example_index:example_index+minibatch_size]
                        # convert the minibatch list of examples 
                        # into a list of fields each of which iterate over the minibatch
                        outputs = zip(*outputs_list)
                    else:
                        outputs = self.dataset.function(*function_inputs)
                        if self.dataset.cache:
                            # convert the list of fields, each of which can iterate over the minibatch
                            # into a list of examples in the minibatch (each of which is a list of field values)
                            outputs_list = zip(*outputs)
                            # copy the outputs_list into the cache
                            for i in xrange(cache_len,example_index):
                                self.cached_examples.append(None)
                            self.cached_examples += outputs_list
                else:
                    outputs = self.dataset.function(*function_inputs)
                
                return Example(self.fieldnames_not_in_input+self.dataset.output_fields,
                               [src_examples[field_name] for field_name in self.fieldnames_not_in_input]+outputs)
                               

        for fieldname in fieldnames:
            assert fieldname in self.output_fields or self.src.hasFields(fieldname)
        return Iterator(self)

    
def supervised_learning_dataset(src_dataset,input_fields,target_fields,weight_field=None):
    """
    Wraps an arbitrary DataSet into one for supervised learning tasks by forcing the
    user to define a set of fields as the 'input' field and a set of fields
    as the 'target' field. Optionally, a single weight_field can also be defined.
    """
    args = ((input_fields,'input'),(output_fields,'target'))
    if weight_field: args+=(([weight_field],'weight'))
    return src_dataset.rename(*args)