# HG changeset patch # User Joseph Turian # Date 1224111595 14400 # Node ID 23221eefb70eb499cd8b2d78698bf82cd19b73cd # Parent 8cde974b648666c01b399c04dc02c7537abbd931 Added pylearn.sandbox.weights.random_weights diff -r 8cde974b6486 -r 23221eefb70e sandbox/weights.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/sandbox/weights.py Wed Oct 15 18:59:55 2008 -0400 @@ -0,0 +1,25 @@ +""" +Routine to initialize weights. + +@note: We assume that numpy.random.seed() has already been performed. +""" + +from math import sqrt +import numpy.random +def random_weights(nin, nout, scale_by=sqrt(3)): + """ + Generate an initial weight matrix with nin inputs (rows) and nout + outputs (cols). + Each weight is chosen uniformly at random to be in range: + [-scale_by/sqrt(nin), +scale_by/sqrt(nin)] + @note: Play with scale_by! + Ronan derives scale_by=sqrt(3) because that gives variance of + 1 to something (I forget, ask Yoshua for the derivation). However, + Ronan got better results by accidentally using scale_by=1. Yoshua + hypothesizes this is because the variance will get telescopically + smaller as we go up the layers [need more explanation of this + argument]. + @note: Things may get even trickier if the same weights are being + shared in multiple places. + """ + return (numpy.random.rand(nin, nout) * 2.0 - 1) * scale_by / sqrt(nin)