# HG changeset patch # User Yoshua Bengio # Date 1275358472 14400 # Node ID d6cf4912abb0b837de7e2292720c68f4b611f707 # Parent 21787ac4e5a06a3e24ba8d5addd9926201c89129 caption + consequent diff -r 21787ac4e5a0 -r d6cf4912abb0 writeup/nips2010_submission.tex --- a/writeup/nips2010_submission.tex Mon May 31 22:04:44 2010 -0400 +++ b/writeup/nips2010_submission.tex Mon May 31 22:14:32 2010 -0400 @@ -516,7 +516,8 @@ \begin{figure}[h] \resizebox{.99\textwidth}{!}{\includegraphics{images/error_rates_charts.pdf}}\\ -\caption{Charts corresponding to table \ref{tab:sda-vs-mlp-vs-humans}. Left: overall results; error bars indicate a 95\% confidence interval. Right: error rates on NIST test digits only, with results from litterature. } +\caption{Left: overall results; error bars indicate a 95\% confidence interval. +Right: error rates on NIST test digits only, with results from literature. } \label{fig:error-rates-charts} \end{figure} @@ -556,7 +557,13 @@ \begin{figure}[h] \resizebox{.99\textwidth}{!}{\includegraphics{images/improvements_charts.pdf}}\\ -\caption{Charts corresponding to tables \ref{tab:perturbation-effect} (left) and \ref{tab:multi-task} (right).} +\caption{Relative improvement in error rate due to self-taught learning. +Left: Improvement (or loss, when negative) +induced by out-of-distribution examples (perturbed data). +Right: Improvement (or loss, when negative) induced by multi-task +learning (training on all classes and testing only on either digits, +upper case, or lower-case). The deep learner (SDA) benefits more from +both self-taught learning scenarios, compared to the shallow MLP.} \label{fig:improvements-charts} \end{figure}