diff writeup/nips2010_submission.tex @ 490:d6cf4912abb0

caption + consequent
author Yoshua Bengio <bengioy@iro.umontreal.ca>
date Mon, 31 May 2010 22:14:32 -0400
parents 21787ac4e5a0
children 19eab4daf212
line wrap: on
line diff
--- a/writeup/nips2010_submission.tex	Mon May 31 22:04:44 2010 -0400
+++ b/writeup/nips2010_submission.tex	Mon May 31 22:14:32 2010 -0400
@@ -516,7 +516,8 @@
 
 \begin{figure}[h]
 \resizebox{.99\textwidth}{!}{\includegraphics{images/error_rates_charts.pdf}}\\
-\caption{Charts corresponding to table \ref{tab:sda-vs-mlp-vs-humans}. Left: overall results; error bars indicate a 95\% confidence interval. Right: error rates on NIST test digits only, with results from litterature. }
+\caption{Left: overall results; error bars indicate a 95\% confidence interval. 
+Right: error rates on NIST test digits only, with results from literature. }
 \label{fig:error-rates-charts}
 \end{figure}
 
@@ -556,7 +557,13 @@
 
 \begin{figure}[h]
 \resizebox{.99\textwidth}{!}{\includegraphics{images/improvements_charts.pdf}}\\
-\caption{Charts corresponding to tables \ref{tab:perturbation-effect} (left) and \ref{tab:multi-task} (right).}
+\caption{Relative improvement in error rate due to self-taught learning. 
+Left: Improvement (or loss, when negative)
+induced by out-of-distribution examples (perturbed data). 
+Right: Improvement (or loss, when negative) induced by multi-task 
+learning (training on all classes and testing only on either digits,
+upper case, or lower-case). The deep learner (SDA) benefits more from
+both self-taught learning scenarios, compared to the shallow MLP.}
 \label{fig:improvements-charts}
 \end{figure}