Mercurial > ift6266
changeset 590:4672fb6b4385
Changé mlj_submission.tex pour merge
author | fsavard |
---|---|
date | Thu, 30 Sep 2010 17:54:56 -0400 |
parents | 9a6abcf143e8 |
children | 8bd4ff0c5c05 |
files | writeup/mlj_submission.tex |
diffstat | 1 files changed, 19 insertions(+), 19 deletions(-) [+] |
line wrap: on
line diff
--- a/writeup/mlj_submission.tex Thu Sep 30 17:51:46 2010 -0400 +++ b/writeup/mlj_submission.tex Thu Sep 30 17:54:56 2010 -0400 @@ -129,7 +129,7 @@ learning, often in an greedy layer-wise ``unsupervised pre-training'' stage~\citep{Bengio-2009}. One of these layer initialization techniques, applied here, is the Denoising -Auto-encoder~(DA)~\citep{VincentPLarochelleH2008} (see Figure~\ref{fig:da}), +Auto-encoder~(DA)~\citep{VincentPLarochelleH2008-very-small} (see Figure~\ref{fig:da}), which performed similarly or better than previously proposed Restricted Boltzmann Machines in terms of unsupervised extraction of a hierarchy of features @@ -203,7 +203,7 @@ %\begin{minipage}[b]{0.14\linewidth} %\vspace*{-5mm} \begin{center} -\includegraphics[scale=.4]{images/Original.png}\\ +\includegraphics[scale=.4]{Original.png}\\ {\bf Original} \end{center} \end{wrapfigure} @@ -240,7 +240,7 @@ %\centering \begin{center} \vspace*{-5mm} -\includegraphics[scale=.4]{images/Thick_only.png}\\ +\includegraphics[scale=.4]{Thick_only.png}\\ %{\bf Thickness} \end{center} \vspace{.6cm} @@ -268,7 +268,7 @@ \begin{minipage}[b]{0.14\linewidth} \centering -\includegraphics[scale=.4]{images/Slant_only.png}\\ +\includegraphics[scale=.4]{Slant_only.png}\\ %{\bf Slant} \end{minipage}% \hspace{0.3cm} @@ -290,7 +290,7 @@ %\centering %\begin{wrapfigure}[8]{l}{0.15\textwidth} \begin{center} -\includegraphics[scale=.4]{images/Affine_only.png} +\includegraphics[scale=.4]{Affine_only.png} \vspace*{6mm} %{\small {\bf Affine \mbox{Transformation}}} \end{center} @@ -320,7 +320,7 @@ %\centering \begin{center} \vspace*{5mm} -\includegraphics[scale=.4]{images/Localelasticdistorsions_only.png} +\includegraphics[scale=.4]{Localelasticdistorsions_only.png} %{\bf Local Elastic Deformation} \end{center} %\end{wrapfigure} @@ -347,7 +347,7 @@ %\begin{wrapfigure}[7]{l}{0.15\textwidth} %\vspace*{-5mm} \begin{center} -\includegraphics[scale=.4]{images/Pinch_only.png}\\ +\includegraphics[scale=.4]{Pinch_only.png}\\ \vspace*{15mm} %{\bf Pinch} \end{center} @@ -384,7 +384,7 @@ \begin{minipage}[t]{0.14\linewidth} \centering \vspace*{0mm} -\includegraphics[scale=.4]{images/Motionblur_only.png} +\includegraphics[scale=.4]{Motionblur_only.png} %{\bf Motion Blur} \end{minipage}% \hspace{0.3cm}\begin{minipage}[t]{0.83\linewidth} @@ -405,7 +405,7 @@ \begin{minipage}[t]{0.14\linewidth} \centering \vspace*{3mm} -\includegraphics[scale=.4]{images/occlusion_only.png}\\ +\includegraphics[scale=.4]{occlusion_only.png}\\ %{\bf Occlusion} %%\vspace{.5cm} \end{minipage}% @@ -432,7 +432,7 @@ \begin{center} %\centering \vspace*{6mm} -\includegraphics[scale=.4]{images/Bruitgauss_only.png} +\includegraphics[scale=.4]{Bruitgauss_only.png} %{\bf Gaussian Smoothing} \end{center} %\end{wrapfigure} @@ -468,7 +468,7 @@ %\vspace*{-5mm} \begin{center} \vspace*{1mm} -\includegraphics[scale=.4]{images/Permutpixel_only.png} +\includegraphics[scale=.4]{Permutpixel_only.png} %{\small\bf Permute Pixels} \end{center} %\end{wrapfigure} @@ -495,7 +495,7 @@ %\hspace*{-3mm}\begin{minipage}[t]{0.18\linewidth} %\centering \vspace*{0mm} -\includegraphics[scale=.4]{images/Distorsiongauss_only.png} +\includegraphics[scale=.4]{Distorsiongauss_only.png} %{\small \bf Gauss. Noise} \end{center} %\end{wrapfigure} @@ -517,7 +517,7 @@ \begin{minipage}[t]{0.14\linewidth} \centering \vspace*{0mm} -\includegraphics[scale=.4]{images/background_other_only.png} +\includegraphics[scale=.4]{background_other_only.png} %{\small \bf Bg Image} \end{minipage}% \hspace{0.3cm}\begin{minipage}[t]{0.83\linewidth} @@ -536,7 +536,7 @@ \begin{minipage}[t]{0.14\linewidth} \centering \vspace*{0mm} -\includegraphics[scale=.4]{images/Poivresel_only.png} +\includegraphics[scale=.4]{Poivresel_only.png} %{\small \bf Salt \& Pepper} \end{minipage}% \hspace{0.3cm}\begin{minipage}[t]{0.83\linewidth} @@ -558,7 +558,7 @@ \begin{center} \vspace*{4mm} %\hspace*{-1mm} -\includegraphics[scale=.4]{images/Rature_only.png}\\ +\includegraphics[scale=.4]{Rature_only.png}\\ %{\bf Scratches} \end{center} \end{minipage}% @@ -584,7 +584,7 @@ \begin{minipage}[t]{0.15\linewidth} \centering \vspace*{0mm} -\includegraphics[scale=.4]{images/Contrast_only.png} +\includegraphics[scale=.4]{Contrast_only.png} %{\bf Grey Level \& Contrast} \end{minipage}% \hspace{3mm}\begin{minipage}[t]{0.85\linewidth} @@ -791,7 +791,7 @@ \begin{figure}[ht] %\vspace*{-2mm} -\centerline{\resizebox{0.8\textwidth}{!}{\includegraphics{images/denoising_autoencoder_small.pdf}}} +\centerline{\resizebox{0.8\textwidth}{!}{\includegraphics{denoising_autoencoder_small.pdf}}} %\vspace*{-2mm} \caption{Illustration of the computations and training criterion for the denoising auto-encoder used to pre-train each layer of the deep architecture. Input $x$ of @@ -840,7 +840,7 @@ \begin{figure}[ht] %\vspace*{-2mm} -\centerline{\resizebox{.99\textwidth}{!}{\includegraphics{images/error_rates_charts.pdf}}} +\centerline{\resizebox{.99\textwidth}{!}{\includegraphics{error_rates_charts.pdf}}} %\vspace*{-3mm} \caption{SDAx are the {\bf deep} models. Error bars indicate a 95\% confidence interval. 0 indicates that the model was trained on NIST, 1 on NISTP, and 2 on P07. Left: overall results @@ -855,7 +855,7 @@ \begin{figure}[ht] %\vspace*{-3mm} -\centerline{\resizebox{.99\textwidth}{!}{\includegraphics{images/improvements_charts.pdf}}} +\centerline{\resizebox{.99\textwidth}{!}{\includegraphics{improvements_charts.pdf}}} %\vspace*{-3mm} \caption{Relative improvement in error rate due to self-taught learning. Left: Improvement (or loss, when negative)