changeset 1392:2d3cbbb36178

merge
author gdesjardins
date Mon, 20 Dec 2010 18:09:11 -0500
parents 124b939d997f (current diff) 0ff6c613cdf0 (diff)
children 8ecc6da87350
files
diffstat 11 files changed, 532 insertions(+), 214 deletions(-) [+]
line wrap: on
line diff
--- a/doc/conf.py	Mon Dec 20 18:08:48 2010 -0500
+++ b/doc/conf.py	Mon Dec 20 18:09:11 2010 -0500
@@ -33,6 +33,11 @@
 except ImportError:
     pass
 
+try:
+    import numpydoc
+    extensions.append('numpydoc')
+except ImportError:
+    pass
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['.templates']
--- a/doc/formulas.txt	Mon Dec 20 18:08:48 2010 -0500
+++ b/doc/formulas.txt	Mon Dec 20 18:09:11 2010 -0500
@@ -20,4 +20,8 @@
 .. automodule:: pylearn.formulas.noise
     :members:
  
+pylearn.formulas.regularization
+-------------------------------
+.. automodule:: pylearn.formulas.regularization
+    :members:
 
--- a/doc/v2_planning/code_review.txt	Mon Dec 20 18:08:48 2010 -0500
+++ b/doc/v2_planning/code_review.txt	Mon Dec 20 18:09:11 2010 -0500
@@ -10,13 +10,91 @@
 TODO
 ----
 
-- Install Review Board and try it
-    - test review of merge
-    - how work with branch
-- Write our proposed politic
+- Test the 2 proposed workflows (review board and github)
+
+Reasons for code review
+-----------------------
+
+- We want at least 2 people to read all code. That means we need a reviewer.
+- This helps to find better solutions to problems.
+- This helps to train people on our tools and framework.
+- This gives better code and less bugs in the end (everybody makes mistakes).
+
+Proposed Policy
+---------------
+
+There are 2 proposals we want to test:
+
+- GitHub fork/merge/pull request for new Pylearn
+    - When someone asks for a pull request, someone else does the review.
+    - Everyone should work within their own Pylearn fork, and submit pull
+      requests to merge their code into the master Pylearn repository.
+- Review Board post-commit for Theano
+    - For each commit message, if the author does not want this commit to be reviewed, say it in the message
+        - Useful for Official repo when we commit something that is disabled by default and we want to split it in many commits or start using it even if not fully tested.
+        - Reviewer should still check that it is not enabled by default and when enabled it prints a warning (once per job execution).
+    - We check all commits to Theano and Jobman (official tools).
+    - If the official reviewer is not the right person for the task (gpu code, ...)
+      he has the responsability to find someone else (ask people in the lab, mailing-list, ...).
+    - The official reviewer should:
+         - Review all code (see the checklist) and ask an expert when needed.
+         - Commit easy fixes and ask the original committer to review them
+           (non-trivial fixes are the responsibility of the original committer).
+    - The official reviewer is chosen among the Theano users in the lab with commit rights.
+    - In this test, we ask the official reviewer to review all commits from one day.
+    - We will set up a list of experts by domain (gpu, optimization, algo,...).
+    - After this test, we may want to make it longer (1 week?)
+         - If somebody breaks the build bot, he becomes the reviewer for the next week/days.
+         - Maximum of one week by month.
+         - If there is a big week or a rush that includes everybody in the lab, we can change more frequently.
+         - If a commit has problems, the original reviewer should follow-up on it.
 
-Some system that we should check:
----------------------------------
+Other general comments:
+- We should never be the official reviewer of our own code. When this happens,
+  ask the reviewer for the next day to take care of it.
+- Experimental code (not in Theano) may be tagged as not being reviewed (in
+  commit message).
+
+Checklist for review
+--------------------
+
+- Are there tests and do they cover all cases?
+- Is there documentation in the code file?
+    - Should public (HTML) documentation be updated as well?
+- Are additions well integrated into our framework?
+- Is the code placed in the right files, and the right place in those files?
+- Try not to duplicate code.
+- Is the code clear and easy to understand?
+- Are there comments describing what is being done?
+- Answer potential questions by the committer (in the code). This can also help to train people.
+- Check for typos.
+- No debug code (print, breakpoints, ...).
+- If commit message says not to review, check that the code is disabled by default and that when enabled prints a warning.
+- Check for conformity to our coding guidelines.
+
+Some system that we checked
+---------------------------
+
+- `Review Board <http://www.reviewboard.org>`_
+    - Interesting, but some questions remain (how well it integrates with hg,
+      notably)
+    - Some advantages over Google code (comment on multi-line chunks, list of
+      unreviewed commits, more esthetics, handle many repo, keep assemble easily)
+    - Fred will install it so we can test it more thoroughly
+- `GitHub pull request <https://github.com/blog/712-pull-requests-2-0>`_
+    - pre-commit review
+
+- `Google Code <http://code.google.com/p/support/wiki/CodeReviews>`_
+    - Test bench with a clone of Theano at
+      http://code.google.com/p/theanoclone/
+    - post-commit
+    - no list of not reviewed commit
+    - no python syntax highlight
+    - weird comment by line
+    - diff of merge seam bugged
+    - Maybe
+
+- `Kiln <http://fogcreek.com/Kiln/LearnMore.html?section=StartReviewsEffortlessly>`_
 
 - `rietveld <http://code.google.com/p/rietveld/>`_
     - Made by Guido van Rossum, seam basic and svn only
@@ -26,13 +104,6 @@
     - git only
     - No
 
-- `Review Board <http://www.reviewboard.org>`_
-    - Interesting, but some questions remain (how well it integrates with hg,
-      notably)
-    - Some advantages over Google code (comment on multi-line chunks, list of
-      unreviewed commits, more esthetics, handle many repo, keep assemble easily)
-    - Fred will install it so we can test it more thoroughly
-
 - `Code Striker <http://codestriker.sourceforge.net/>`_
     - hg added? David told in May 2009 it can do it easily.
     - Seems less interesting than Review Board
@@ -45,21 +116,16 @@
     - Could be integrated with the current ticket system?, not maintained, review code in general, not commit.
     - No
 
+- `track CodeReviewPlugin <http://trac-hacks.org/wiki/CodeReviewPlugin/Concepts>`_
+
+- `track ExoWebCodeReviewPlugin <http://trac-hacks.org/wiki/ExoWebCodeReviewPlugin>`_
+
 - `feature request at assembla <http://feedback.assembla.com/forums/5433-feature-requests/suggestions/253297-add-a-code-review-tool-e-g-reviewboard->`_
     - No (we will not wait until the feature is added...)
 
 - `JCR <http://jcodereview.sourceforge.net/>`_
     - No
 
-- `Google Code <http://code.google.com/>`_
-    - Test bench with a clone of Theano at
-      http://code.google.com/p/theanoclone/
-    - post-commit
-    - no list of not reviewed commit
-    - no python syntax highlight
-    - weird comment by line
-    - diff of merge seam bugged
-    - Maybe
 
 What we could want from our code review
 ---------------------------------------
@@ -96,44 +162,3 @@
 
 We seam to do Over-the-shoulder, email and variant of pair programming from time to time. Some people read rapidly the commit of Theano and Pylearn.
 
-Reason for the code review
---------------------------
-
-- We want at least 2 people to read all code. That mean we need a reviewer
-- This help to find better solution to problem
-- This help to train people on our tools and framework.
-
-Check list for review
----------------------
-
-- Is their tests and do they test all case?
-- Is their documentation in the file?
-    - Do this need doc in the html doc?
-- Is the addition well integrated into our framework
-- Is the code well placed in the right files and right place in them?
-- Try to don't duplicate code
-- Is the code clear/comprehensible
-- Are the comment describing what is being done?
-- Answer question by de commiter, this can also serve to train people
-- Check for typo
-- No debug code(print, breakpoint,...)
-- If commit message tell to don't review, check that the code is disabled by default and that when enabled print a warning.
-
-Proposed Politic
-----------------
-
-- For each commit message, if the author don't want this commit to be reviewed, tell it in the message
-   - Usefull for experimental repository, not Theano
-   - Usefull for Official repo when we commit something that is disabled by default and we want to split in many commits or start using event if not fully tested.
-   - Reviewer should check that the check is not enabled by default and when enabled should print a warning.
-- We check all commit to Theano, Pylearn and Jobman.(Official tools)
-- We check experimental repos when asked.
-- One official reviewer per week.
-    - He review all code and ask expert when needed.
-    - Should check the check list again all review.
-    - We choose the reviewer in the theano user of the lab with commit right.
-    - On fait une list d'expert par demain de problem(gpu, optimization, algo,...)
-    - If some body break the build bot, it is him the reviewer for the next week
-    - Maximum of one week by mount.
-    - If their is big week or during rush that include every body of the lab, we can change more frequently.
-    - If a commit have problem, it is the original reviewer that should make the follow up.
--- a/doc/v2_planning/datalearn.txt	Mon Dec 20 18:08:48 2010 -0500
+++ b/doc/v2_planning/datalearn.txt	Mon Dec 20 18:09:11 2010 -0500
@@ -1,11 +1,14 @@
 DataLearn: How to plug Datasets & Learner together?
 ===================================================
 
+
 Participants
 ------------
 - Yoshua
 - Razvan
 - Olivier D [leader]
+- James
+
 
 High-Level Objectives
 ---------------------
@@ -18,6 +21,7 @@
      framework
    * It should be possible to replace [parts of] this framework with C++ code
 
+
 Theano-Like Data Flow
 ---------------------
 
@@ -37,6 +41,235 @@
 individual features. How to handle field names, non-tensor-like data, etc. is
 a very important topic that is not yet discussed in this file.
 
+The main idea in this proposal is to consider some Data object as a Theano
+Variable (we call 'data' an object that is either a sample, or a collection of
+samples i.e a dataset). Because the Data API (for the Machine Learning user)
+may conflict with the Variable API, in the following we take the approach that
+a data object contains a Theano variable accessible through data.variable
+(instead of Data being a subclass of Variable). For instance a basic way of
+printing the content of a dataset could be:
+
+    .. code-block:: python
+
+        dataset = NumpyDataset(some_numpy_array)  # View array as dataset.
+        index = theano.tensor.lscalar()
+        get_sample_value = theano.function([index], dataset[index].variable)
+        for i in xrange(len(dataset)):
+            print get_sample_value(i)
+
+There may also exist some helper function for the common task on iterating
+over the numeric values found in a dataset, which would allow one to simply
+write:
+
+    .. code-block:: python
+
+        for sample_value in theano_iterate(dataset):
+            print sample_value
+
+where the theano_iterate function would take care of the extra work:
+
+    .. code-block:: python
+
+        def theano_iterate(dataset, index=None, condition=None,
+                           stop_exceptions=(IndexError, )):
+            if index is None:
+                index = theano.tensor.lscalar()
+            if condition is None:
+                condition = index < len(dataset)
+            get_value = theano.function([index],
+                                        [dataset[index].variable, condition])
+            i = 0
+            while True:
+                try:
+                    output, cond = get_value(i)
+                except stop_exceptions:
+                    break
+                i += 1
+                if cond:
+                    yield output
+                else:
+                    break
+
+Now imagine a similar situation (willing to iterate on a dataset) where the
+datsaet is the result of some transformation parameterized by another
+Variable. For instance, let's say there exists a GetColumnDataset class such
+that GetColumnDataset(dataset, index_variable) is a dataset whose associated
+variable is dataset.variable[:, index_variable] (assuming here that
+dataset.variable is a matrix variable). One would like to write:
+
+    .. code-block:: python
+
+        for j in xrange(dataset.n_columns()):
+            print 'Printing column %s' % j
+            for sample_value in theano_iterate(GetColumnDataset(dataset, j)):
+                print sample_value
+
+Although this would work, note that it would compile a new Theano function
+each time theano_iterate is called (one for each value of j), which may be a
+performance bottleneck. One way to avoid this is to just ignore the helper
+function and manually compile a function that also takes the column index as 
+input parameter:
+
+    .. code-block:: python
+
+        sample_idx = theano.tensor.lscalar()
+        column_idx = theano.tensor.lscalar()
+        get_value = theano.function(
+            [sample_idx, column_idx],
+            GetColumnDataset(dataset, column_idx)[sample_idx].variable)
+        for j in xrange(dataset.n_columns()):
+            print 'Printing column %s' % j
+            for i in xrange(len(dataset)):
+                print get_value(i, j)
+
+It is however possible to use the helper function if it can accept an extra
+argument ('givens') to be provided to the theano compilation step:
+
+    .. code-block:: python
+
+        def theano_iterate(dataset, index=None, condition=None,
+                           stop_exceptions=(IndexError, ),
+                           givens={}):
+            (...)
+            get_value = theano.function([index],
+                                        [dataset[index].variable, condition],
+                                        givens=givens)
+            (...)
+        
+        column_idx = theano.tensor.lscalar()
+        shared_column_idx = theano.shared(0)
+        iterate = theano_iterate(GetColumnDataset(dataset, column_idx),
+                                 givens={column_idx: shared_column_idx})
+        for j in xrange(dataset.n_columns()):
+            print 'Printing column %s' % j
+            shared_column_idx.value = j
+            for sample_value in iterate:
+                print sample_value
+
+Note there are a couple oddities in the example above:
+   1. The way theano_iterate was written, it is not possible to iterate on it
+      more than once. This is easily fixed by making it an iterable object.
+   2. It would make more sense here to remove 'column_idx' and directly use
+      GetColumnDataset(dataset, shared_column_idx), in which case there is no
+      need to use the 'givens' keyword. But the goal here is to illustrate a
+      situation where one is given a dataset defined from a symbolic variable,
+      and we want to compute it for different numeric values of this variable.
+      This dataset may have been provided by code the user has no control on,
+      thus the need for 'givens' to replace the variable with a shared one
+      whose value can be updated between successive calls to the same
+      function.
+
+In summary:
+    - Data (samples and datasets) are basically Theano Variables, and a data
+      transformation an Op.
+    - When writing code that requires some data numeric value, one has to compile
+      a Theano function to obtain it. This is done either manually or through some
+      helper Pylearn functions for common tasks. In both cases, the user should
+      have enough control to be able to obtain an efficient implementation.
+
+
+What About Learners?
+--------------------
+
+The discussion above only mentioned datasets, but not learners. The learning
+part of a learner is not a main concern (currently). What matters most w.r.t.
+what was discussed above is how a learner takes as input a dataset and outputs
+another dataset that can be used with the dataset API.
+
+A Learner may be able to compute various things. For instance, a Neural
+Network may output a ``prediction`` vector (whose elements correspond to
+estimated probabilities of each class in a classification task), as well as a
+``cost`` vector (whose elements correspond to the penalized NLL, the NLL alone
+and the classification error). We would want to be able to build a dataset
+that contains some of these quantities computed on each sample in the input
+dataset.
+
+The Neural Network code would then look something like this:
+
+    .. code-block:: python
+
+        class NeuralNetwork(Learner):
+
+            # The decorator below is reponsible for turning a function that
+            # takes a symbolic sample as input, and outputs a Theano variable,
+            # into a function that can also be applied on numeric sample data,
+            # or symbolic datasets.
+            # Other approaches than a decorator are possible (e.g. using
+            # different function names).
+            def compute_prediction(self, sample):
+                return softmax(theano.tensor.dot(self.weights, sample.input))
+
+            @datalearn
+            def compute_nll(self, sample):
+                return - log(self.compute_prediction(sample)[sample.target])
+
+            @datalearn
+            def compute_penalized_nll(self, sample):
+                return (self.compute_nll(self, sample) +
+                        theano.tensor.sum(self.weights**2))
+
+            @datalearn
+            def compute_class_error(self, sample):
+                probabilities = self.compute_prediction(sample)
+                predicted_class = theano.tensor.argmax(probabilities)
+                return predicted_class != sample.target
+
+            @datalearn
+            def compute_cost(self, sample):
+                return theano.tensor.concatenate([
+                        self.compute_penalized_nll(sample),
+                        self.compute_nll(sample),
+                        self.compute_class_error(sample),
+                        ])
+            
+The ``@datalearn`` decorator would allow such a Learner to be used e.g. like
+this:
+
+    .. code-block:: python
+
+        nnet = NeuralNetwork()
+        # Symbolic dataset that represents the output on symbolic input data.
+        predict_dataset = nnet.compute_prediction(dataset)
+        for sample in dataset:
+            # Symbolic sample that represents the output on a single symbolic
+            # input sample.
+            predict_sample = nnet.compute_prediction(sample)
+        # Numeric prediction.
+        predict_numeric = nnet.compute_prediction({'input': numpy.zeros(10)})
+        # Combining multiple symbolic outputs.
+        multiple_fields_dataset = ConcatDataSet([
+                nnet.compute_prediction(dataset),
+                nnet.compute_cost(dataset),
+                ])
+        
+In the code above, if one wants to obtain the numeric value of an element of
+``multiple_fields_dataset``, the Theano function being compiled should be able
+to optimize computations so that the simultaneous computation of
+``prediction`` and ``cost`` is done efficiently.
+
+
+Open Problems
+-------------
+
+The above is not yet a practical proposal. Investigation of the following
+topics is still missing:
+
+    - Datasets whose variables are not matrices (e.g. large datasets that do not
+      fit in memory, non fixed-length vector samples, ...)
+    - Field names.
+    - Typical input / target / weight split.
+    - Learners whose output on a dataset cannot be obtained by computing outputs
+      on individual samples (e.g. a Learner that ranks samples based on pair-wise
+      comparisons).
+    - Code parallelization, stop & restart.
+    - Modular C++ implementation without Theano.
+    - How do we take care of model learning within such a Theano graph?
+    - ...
+
+
+Previous Introduction (deprecated)
+----------------------------------
+
 A question we did not discuss much is to which extent the architecture could
 be "theanified", i.e. whether a whole experiment could be defined as a Theano
 graph on which high level optimizations could be made possible, while also
@@ -122,85 +355,6 @@
 sensible behavior for those who do not want to worry about it. Whether this is
 possible / desirable is still to-be-determined.
 
-What About Learners?
---------------------
-
-The discussion above only mentioned datasets, but not learners. The learning
-part of a learner is not a main concern (currently). What matters most w.r.t.
-what was discussed above is how a learner takes as input a dataset and outputs
-another dataset that can be used with the dataset API.
-
-A Learner may be able to compute various things. For instance, a Neural
-Network may output a ``prediction`` vector (whose elements correspond to
-estimated probabilities of each class in a classification task), as well as a
-``cost`` vector (whose elements correspond to the penalized NLL, the NLL alone
-and the classification error). We would want to be able to build a dataset
-that contains some of these quantities computed on each sample in the input
-dataset.
-
-The Neural Network code would then look something like this:
-
-    .. code-block:: python
-
-        class NeuralNetwork(Learner):
-
-            # The decorator below is reponsible for turning a function that
-            # takes a symbolic sample as input, and outputs a Theano variable,
-            # into a function that can also be applied on numeric sample data,
-            # or symbolic datasets.
-            # Other approaches than a decorator are possible (e.g. using
-            # different function names).
-            @datalearn(..)
-            def compute_prediction(self, sample):
-                return softmax(theano.tensor.dot(self.weights, sample.input))
-
-            @datalearn(..)
-            def compute_nll(self, sample):
-                return - log(self.compute_prediction(sample)[sample.target])
-
-            @datalearn(..)
-            def compute_penalized_nll(self, sample):
-                return (self.compute_nll(self, sample) +
-                        theano.tensor.sum(self.weights**2))
-
-            @datalearn(..)
-            def compute_class_error(self, sample):
-                probabilities = self.compute_prediction(sample)
-                predicted_class = theano.tensor.argmax(probabilities)
-                return predicted_class != sample.target
-
-            @datalearn(..)
-            def compute_cost(self, sample):
-                return theano.tensor.concatenate([
-                        self.compute_penalized_nll(sample),
-                        self.compute_nll(sample),
-                        self.compute_class_error(sample),
-                        ])
-            
-The ``@datalearn`` decorator would allow such a Learner to be used e.g. like
-this:
-
-    .. code-block:: python
-
-        nnet = NeuralNetwork()
-        # Symbolic dataset that represents the output on symbolic input data.
-        predict_dataset = nnet.compute_prediction(dataset)
-        for sample in dataset:
-            # Symbolic sample that represents the output on a single symbolic
-            # input sample.
-            predict_sample = nnet.compute_prediction(sample)
-        # Numeric prediction.
-        predict_numeric = nnet.compute_prediction({'input': numpy.zeros(10)})
-        # Combining multiple symbolic outputs.
-        multiple_fields_dataset = ConcatDataSet([
-                nnet.compute_prediction(dataset),
-                nnet.compute_cost(dataset),
-                ])
-        
-In the code above, if one wants to obtain the numeric value of an element of
-``multiple_fields_dataset``, the Theano function being compiled should be able
-to optimize computations so that the simultaneous computation of
-``prediction`` and ``cost`` is done efficiently.
 
 Discussion: Are Datasets Variables / Ops?
 -----------------------------------------
@@ -264,6 +418,7 @@
 numeric function, and dataset in this case is the result of some
 computations on a initial dataset.
 I would differentiate the two approaches (1) and (2) as :
+
  - first of all whatever you can do with (1) you can do with (2)
  - approach (1) hides the fact that you are working with symbolic graphs.
    You apply functions to datasets, and when you want to see values a
@@ -390,6 +545,7 @@
 and valid options.
 </Razvan comments>
 
+
 Discussion: Fixed Parameters vs. Function Arguments
 ---------------------------------------------------
 
@@ -534,6 +690,7 @@
   once. Maybe this can be solved at the Theano level with an efficient
   function cache?
 
+
 Discussion: Dataset as Learner Ouptut
 -------------------------------------
 
--- a/pylearn/datasets/embeddings/parameters.py	Mon Dec 20 18:08:48 2010 -0500
+++ b/pylearn/datasets/embeddings/parameters.py	Mon Dec 20 18:09:11 2010 -0500
@@ -1,8 +1,8 @@
 """
 Locations of the embedding data files.
 """
-WEIGHTSFILE     = "/home/fringant2/lisa/data/word_embeddings.collobert-and-weston/lm-weights.txt"
-VOCABFILE       = "/home/fringant2/lisa/data/word_embeddings.collobert-and-weston/words.asc"
+WEIGHTSFILE     = "/data/lisa/data/word_embeddings.collobert-and-weston/lm-weights.txt"
+VOCABFILE       = "/data/lisa/data/word_embeddings.collobert-and-weston/words.asc"
 #WEIGHTSFILE     = "/home/joseph/data/word_embeddings.collobert-and-weston/lm-weights.txt"
 #VOCABFILE       = "/home/joseph/data/word_embeddings.collobert-and-weston/words.asc"
 NUMBER_OF_WORDS = 30000
--- a/pylearn/datasets/miniblocks.py	Mon Dec 20 18:08:48 2010 -0500
+++ b/pylearn/datasets/miniblocks.py	Mon Dec 20 18:09:11 2010 -0500
@@ -17,7 +17,7 @@
 
     #from plearn.pyext import pl
     #data = pl.AutoVMatrix(filename='/u/delallea/LisaPLearn/UserExp/delallea/perso/gen_compare/1DBall_12.amat').getMat()
-    #data = pl.AutoVMatrix(filename='/home/fringant2/lisa/delallea/python_modules/LeDeepNet/mnist_binarized.pmat').getMat()
+    #data = pl.AutoVMatrix(filename='/data/lisa/exp/delallea/python_modules/LeDeepNet/mnist_binarized.pmat').getMat()
     #input = data
 
     # Note that the target being returned seems to be a dummy target. So
--- a/pylearn/datasets/smallNorb.py	Mon Dec 20 18:08:48 2010 -0500
+++ b/pylearn/datasets/smallNorb.py	Mon Dec 20 18:09:11 2010 -0500
@@ -4,7 +4,7 @@
 from pylearn.datasets.config import data_root
 
 #Path = '/u/bergstrj/pub/data/smallnorb'
-#Path = '/home/fringant2/lisa/louradoj/data/smallnorb'
+#Path = '/data/lisa/datasmallnorb'
 #Path = '/home/louradou/data/norb'
 
 class Paths(object):
--- a/pylearn/datasets/tinyimages.py	Mon Dec 20 18:08:48 2010 -0500
+++ b/pylearn/datasets/tinyimages.py	Mon Dec 20 18:09:11 2010 -0500
@@ -9,6 +9,8 @@
 import PIL.Image
 import numpy
 
+import pylearn.io.image_tiling
+
 logger = logging.getLogger('pylearn.datasets.tinyimages')
 
 def sorted_listdir(*path):
@@ -61,20 +63,39 @@
         yield it.next()
         i +=1
 
+
+def arrange_first_N_into_tiling(R,C, filename):
+    R=int(R)
+    C=int(C)
+    A = numpy.asarray([i.copy() for i,ii in zip(image_generator(), xrange(R*C))],
+            dtype='float32')
+    print A.shape
+    A.shape = (R*C, 32*32,3)
+    pylearn.io.image_tiling.save_tiled_raster_images(
+        pylearn.io.image_tiling.tile_raster_images(
+            (A[:,:,0], A[:,:,1], A[:,:,2], None),
+            (32,32)),
+        filename)
+
+
 n_images = 1608356 
 
-def main():
-    def iter_len(x):
-        i = 0
-        for xx in x:
-            i += 1
-        return i
-    n_files = iter_len(iterate_over_filenames())
-    print 'got %i files' % n_files
-    assert n_images == n_files
+def main(argv=[]):
+    if argv:
+        arrange_first_N_into_tiling( argv[0], argv[1], argv[2])
+    else:
+        def iter_len(x):
+            i = 0
+            for xx in x:
+                i += 1
+            return i
+        n_files = iter_len(iterate_over_filenames())
+        print 'got %i files' % n_files
+        assert n_images == n_files
 
-    for p in load_first_N(10):
-        load_image(os.path.join(*p))
+        for p in load_first_N(10):
+            load_image(os.path.join(*p))
+
 
 if __name__ == '__main__':
-    sys.exit(main())
+    sys.exit(main(sys.argv[1:]))
--- a/pylearn/formulas/activations.py	Mon Dec 20 18:08:48 2010 -0500
+++ b/pylearn/formulas/activations.py	Mon Dec 20 18:09:11 2010 -0500
@@ -24,6 +24,7 @@
     function of the input x.
 
     .. math::
+
         \\textrm{sigmoid}(x) = \\frac{1}{1 + e^x}
 
     The image of :math:`\\textrm{sigmoid}(x)` is the open interval (0,
@@ -31,13 +32,18 @@
     point representations, :math:`\\textrm{sigmoid}(x)` will lie in the
     closed range [0, 1].
 
-    :param x: tensor-like (a Theano variable with type theano.Tensor,
-              or a value that can be converted to one) :math:`\in
-              \mathbb{R}^n`
+    Parameters
+    ----------
+    x : tensor-like
+        A Theano variable with type theano.Tensor, or a value that can be 
+        converted to one :math:`\in \mathbb{R}^n`
 
-    :return: a Theano variable with the same shape as the input, where
-             the sigmoid function is mapped to each element of the
-             input x.
+    Returns
+    -------
+    ret : a Theano variable with the same shape as the input
+        where the sigmoid function is mapped to each element of the 
+        input `x`.
+
     """
     return theano.tensor.nnet.sigmoid(x)
 
@@ -52,6 +58,7 @@
     tangent) of the input x.
 
     .. math::
+
         \\textrm{tanh}(x) = \\frac{e^{2x} - 1}{e^{2x} + 1}
 
     The image of :math:`\\textrm{tanh}(x)` is the open interval (-1,
@@ -59,13 +66,16 @@
     point representations, :math:`\\textrm{tanh}(x)` will lie in the
     closed range [-1, 1].
 
-    :param x: tensor-like (a Theano variable with type theano.Tensor,
-              or a value that can be converted to one) :math:`\in
-              \mathbb{R}^n`
+    Parameters
+    ----------
+    x : tensor-like
+        A Theano variable with type theano.Tensor, or a value that can be 
+        converted to one :math:`\in \mathbb{R}^n`
 
-    :return: a Theano variable with the same shape as the input, where
-             the tanh function is mapped to each element of the input
-             x.
+    Returns
+    -------
+    ret : a Theano variable with the same shape as the input
+        where the tanh function is mapped to each element of the input `x`.
     """
     return theano.tensor.tanh(x)
 
@@ -81,6 +91,7 @@
     TODO: where does 1.759 come from? why is it normalized like that?
 
     .. math::
+
         \\textrm{tanh\_normalized}(x) = 1.759\\textrm{ tanh}\left(\\frac{2x}{3}\\right)
 
     The image of :math:`\\textrm{tanh\_normalized}(x)` is the open
@@ -90,13 +101,17 @@
     closed range [-1.759, 1.759]. The exact bound depends on the
     precision of the floating point representation.
 
-    :param x: tensor-like (a Theano variable with type theano.Tensor,
-              or a value that can be converted to one) :math:`\in
-              \mathbb{R}^n`
+    Parameters
+    ----------
+    x : tensor-like
+        A Theano variable with type theano.Tensor, or a value that can be 
+        converted to one :math:`\in \mathbb{R}^n`
 
-    :return: a Theano variable with the same shape as the input, where
-             the tanh\_normalized function is mapped to each element of
-             the input x.
+    Returns
+    -------
+    ret : a Theano variable with the same shape as the input
+        where the tanh_normalized function is mapped to each element of 
+        the input `x`.
     """
     return 1.759*theano.tensor.tanh(0.6666*x)
 
@@ -111,6 +126,7 @@
     hyperbolic tangent of x.
 
     .. math::
+
         \\textrm{abs\_tanh}(x) = |\\textrm{tanh}(x)|
 
     The image of :math:`\\textrm{abs\_tanh}(x)` is the interval [0, 1),
@@ -118,13 +134,17 @@
     point representations, :math:`\\textrm{abs\_tanh}(x)` will lie in
     the range [0, 1].
 
-    :param x: tensor-like (a Theano variable with type theano.Tensor,
-              or a value that can be converted to one) :math:`\in
-              \mathbb{R}^n`
+    Parameters
+    ----------
+    x : tensor-like
+        A Theano variable with type theano.Tensor, or a value that can be 
+        converted to one :math:`\in \mathbb{R}^n`
 
-    :return: a Theano variable with the same shape as the input, where
-             the abs_tanh function is mapped to each element of the
-             input x.
+    Returns
+    -------
+    ret : a Theano variable with the same shape as the input
+        where the abs_tanh function is mapped to each element of 
+        the input `x`.
     """
     return theano.tensor.abs_(theano.tensor.tanh(x))
 
@@ -140,6 +160,7 @@
     TODO: where does 1.759 come from? why is it normalized like that?
 
     .. math::
+
         \\textrm{abs\_tanh\_normalized}(x) = \left|1.759\\textrm{ tanh}\left(\\frac{2x}{3}\\right)\\right|
 
     The image of :math:`\\textrm{abs\_tanh\_normalized}(x)` is the range
@@ -149,13 +170,17 @@
     approximative closed range [0, 1.759]. The exact upper bound
     depends on the precision of the floating point representation.
 
-    :param x: tensor-like (a Theano variable with type theano.Tensor,
-              or a value that can be converted to one) :math:`\in
-              \mathbb{R}^n`
+    Parameters
+    ----------
+    x: tensor-like
+        A Theano variable with type theano.Tensor, or a value that can be 
+        converted to one :math:`\in \mathbb{R}^n`
 
-    :return: a Theano variable with the same shape as the input, where
-             the abs_tanh_normalized function is mapped to each
-             element of the input x.
+    Returns
+    -------
+    ret: a Theano variable with the same shape as the input
+        where the abs_tanh_normalized function is mapped to each
+        element of the input `x`.
     """
     return theano.tensor.abs_(1.759*theano.tensor.tanh(0.6666*x))
 
@@ -167,13 +192,20 @@
     Returns a symbolic variable that computes the softsign of ``input``.
     
     .. math::
+
                 f(input) = \\frac{input}{1.0 + |input|}
 
-    :type input:  tensor-like
-    :param input: input tensor to which softsign should be applied
-    :rtype:       Theano variable
-    :return:      tensor obtained after applying the softsign function
+    Parameters
+    ----------
+    input : tensor-like
+        A Theano variable with type theano.Tensor, or a value that can be 
+        converted to one :math:`\in \mathbb{R}^n`
 
+    Returns
+    -------
+    ret : a Theano variable with the same shape as the input
+        where the softsign function is mapped to each
+        element of the input `x`.
     """
     return input/(1.0 + tensor.abs_(input))
 
@@ -186,11 +218,17 @@
     .. math::
                 f(input) = \left| \\frac{input}{1.0 +|input|} \\right|
 
-    :type input:  tensor-like
-    :param input: input tensor to which softsign should be applied
-    :rtype:       Tensor variable
-    :return:      tensor obtained by taking the absolute value of softsign 
-                  of the input
+    Parameters
+    ----------
+    input : tensor-like
+        A Theano variable with type theano.Tensor, or a value that can be 
+        converted to one :math:`\in \mathbb{R}^n`
+
+    Returns
+    -------
+    ret : a Theano variable with the same shape as the input
+        where the absolute value of the softsign function is mapped to each
+        element of the input `x`.
     """
     return tensor.abs_(input)/(1.0 + tensor.abs_(input))
 
@@ -202,19 +240,24 @@
     and only if it is positive, 0 otherwise.
 
     .. math::
+
                 f(input) = \left \lbrace \\begin{array}{l}
                             input \quad \\text{ if } input > 0 \\
                             0     \quad \\text{ else }
                          \end{array}
                          \\right \}
 
-    :type input:  tensor-like
-    :param input: input tensor to which the rectifier activation function 
-                  will be applied
-    :rtype:       Tensor variable
-    :return:      always positive tensor which equals with the input if it is also 
-                  positive or to 0 otherwise
+    Parameters
+    ----------
+    input : tensor-like
+        A Theano variable with type theano.Tensor, or a value that can be 
+        converted to one :math:`\in \mathbb{R}^n`
 
+    Returns
+    -------
+    ret : a Theano variable with the same shape as the input
+        A tensor always positive whose element equals the inputs if it is also 
+        positive or to 0 otherwise
     """
     return input*(input>=0)
 
@@ -226,12 +269,20 @@
            at initialization.
 
     .. math::
+
                 f(input) = ln \left( 1 + e^{input} \\right)
 
-    :type input:  tensor-like
-    :param input: input tensor to which the softplus should be applied
-    :rtype:       Theano variable
-    :return:      tensor obtained by applying softsign on the input
+    Parameters
+    ----------
+    input : tensor-like
+        A Theano variable with type theano.Tensor, or a value that can be 
+        converted to one :math:`\in \mathbb{R}^n`
+
+    Returns
+    -------
+    ret : a Theano variable with the same shape as the input
+        where the softsign function is mapped to each
+        element of the input `x`.
     """
     return tensor.nnet.softplus(input)
 
@@ -242,15 +293,21 @@
     ``input``.
 
     .. math::
+
                 f(input) = |input|
 
-    :type input:  tensor-like
-    :param input: input tensor
-    :rtype:       Theano variable
-    :return:      tensor that represents the absolute value of the input
+    Parameters
+    ----------
+    input : tensor-like
+        A Theano variable with type theano.Tensor, or a value that can be 
+        converted to one :math:`\in \mathbb{R}^n`
 
-
-    """
+    Returns
+    -------
+    ret : a Theano variable with the same shape as the input
+        where the absolute function is mapped to each
+        element of the input `x`.
+     """
     return theano.tensor.abs_(input)
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pylearn/formulas/regularization.py	Mon Dec 20 18:09:11 2010 -0500
@@ -0,0 +1,49 @@
+
+"""
+Different symbolic regularization and sparsity functions.
+"""
+
+import theano
+import theano.tensor as T
+
+from tags import tags
+
+__authors__   = "Frederic Bastien, Nicolas Boulanger-Lewandowski, .."
+__copyright__ = "(c) 2010, Universite de Montreal"
+__license__   = "3-clause BSD License"
+__contact__   = "theano-user <theano-users@googlegroups.com>"
+
+@tags('regularization', 'L1')
+def l1(x, target = 0, axis_sum = -1, axis_mean = 0):
+    """ Construct the L1 regularization penalty :math:`\sum|x-target|`
+
+    :type x: Theano variable
+    :param x: Weights or other variable to regularize
+    :type target: Theano variable
+    :param target: Target of x
+    :type axis_sum: Scalar
+    :param axis_sum: Axis along which the penalty terms will be summed (e.g. output units)
+    :type axis_mean: Scalar
+    :param axis_mean: Axis along which the penalty terms will be averaged (e.g. minibatches)    
+
+    :note: no stabilization required
+    """
+    return T.mean(T.sum(T.abs_(x - target), axis_sum), axis_mean)
+
+@tags('regularization', 'L2')
+def l2(x, target = 0, axis_sum = -1, axis_mean = 0):
+    """ Construct the L2 regularization penalty :math:`\sum(x-target)^2`
+
+    :type x: Theano variable
+    :param x: Weights or other variable to regularize
+    :type target: Theano variable
+    :param target: Target of x
+    :type axis_sum: Scalar
+    :param axis_sum: Axis along which the penalty terms will be summed (e.g. output units)
+    :type axis_mean: Scalar
+    :param axis_mean: Axis along which the penalty terms will be averaged (e.g. minibatches)    
+
+    :note: no stabilization required
+    """
+    return T.mean(T.sum((x - target)**2, axis_sum), axis_mean)
+
--- a/pylearn/shared/layers/tests/test_kouh2008.py	Mon Dec 20 18:08:48 2010 -0500
+++ b/pylearn/shared/layers/tests/test_kouh2008.py	Mon Dec 20 18:09:11 2010 -0500
@@ -46,7 +46,7 @@
             fN = f(xval, yval)
             assert fN  < f0
             f0 = fN
-            if 0 ==  i % 5: print i, 'rval', fN
+            #if 0 ==  i % 5: print i, 'rval', fN
 
     return fN