# HG changeset patch # User Yoshua Bengio # Date 1275358544 14400 # Node ID 19eab4daf212e2e0d394696d90a698b495c9f414 # Parent d6cf4912abb0b837de7e2292720c68f4b611f707# Parent ee9836baade3f2862be60ff77b9ad31e6a7b12fd merge diff -r ee9836baade3 -r 19eab4daf212 writeup/nips2010_submission.tex --- a/writeup/nips2010_submission.tex Mon May 31 19:07:59 2010 -0700 +++ b/writeup/nips2010_submission.tex Mon May 31 22:15:44 2010 -0400 @@ -516,7 +516,8 @@ \begin{figure}[h] \resizebox{.99\textwidth}{!}{\includegraphics{images/error_rates_charts.pdf}}\\ -\caption{Charts corresponding to table \ref{tab:sda-vs-mlp-vs-humans}. Left: overall results; error bars indicate a 95\% confidence interval. Right: error rates on NIST test digits only, with results from litterature. } +\caption{Left: overall results; error bars indicate a 95\% confidence interval. +Right: error rates on NIST test digits only, with results from literature. } \label{fig:error-rates-charts} \end{figure} @@ -556,7 +557,13 @@ \begin{figure}[h] \resizebox{.99\textwidth}{!}{\includegraphics{images/improvements_charts.pdf}}\\ -\caption{Charts corresponding to tables \ref{tab:perturbation-effect} (left) and \ref{tab:multi-task} (right).} +\caption{Relative improvement in error rate due to self-taught learning. +Left: Improvement (or loss, when negative) +induced by out-of-distribution examples (perturbed data). +Right: Improvement (or loss, when negative) induced by multi-task +learning (training on all classes and testing only on either digits, +upper case, or lower-case). The deep learner (SDA) benefits more from +both self-taught learning scenarios, compared to the shallow MLP.} \label{fig:improvements-charts} \end{figure}