# HG changeset patch # User Joseph Turian # Date 1215553305 14400 # Node ID ec8aadb6694dfbb0c8a8f32e5c110ecba3f1c722 # Parent 98ca97cc9910eaf60704da48f1e14e4c46258fa1 Renamed simple AA directory diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator.py/README.txt --- a/simple_autoassociator.py/README.txt Tue Jul 08 17:41:26 2008 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1 +0,0 @@ -This may be buggy. -jpt diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator.py/__init__.py diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator.py/globals.py --- a/simple_autoassociator.py/globals.py Tue Jul 08 17:41:26 2008 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,12 +0,0 @@ -""" -Global variables. -""" - -#INPUT_DIMENSION = 1000 -#INPUT_DIMENSION = 100 -INPUT_DIMENSION = 10 -#HIDDEN_DIMENSION = 20 -HIDDEN_DIMENSION = 4 -LEARNING_RATE = 0.01 -LR = LEARNING_RATE -SEED = 666 diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator.py/graph.py --- a/simple_autoassociator.py/graph.py Tue Jul 08 17:41:26 2008 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,26 +0,0 @@ -""" -Theano graph for a simple autoassociator. -@todo: Make nearly everything private. -""" - -from pylearn.nnet_ops import sigmoid, binary_crossentropy -from theano import tensor as t -from theano.tensor import dot -x = t.dvector() -w1 = t.dmatrix() -b1 = t.dvector() -w2 = t.dmatrix() -b2 = t.dvector() -h = sigmoid(dot(x, w1) + b1) -y = sigmoid(dot(h, w2) + b2) - -loss_unsummed = binary_crossentropy(y, x) -loss = t.sum(loss_unsummed) - -(gw1, gb1, gw2, gb2) = t.grad(loss, [w1, b1, w2, b2]) - -import theano.compile - -inputs = [x, w1, b1, w2, b2] -outputs = [y, h, loss, loss_unsummed, gw1, gb1, gw2, gb2] -trainfn = theano.compile.function(inputs, outputs) diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator.py/main.py --- a/simple_autoassociator.py/main.py Tue Jul 08 17:41:26 2008 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,31 +0,0 @@ -#!/usr/bin/python -""" - A simple autoassociator. - - The learned model is:: - h = sigmoid(dot(x, w1) + b1) - y = sigmoid(dot(h, w2) + b2) - - Binary xent loss. - - LIMITATIONS: - - Only does pure stochastic gradient (batchsize = 1). -""" - - -import numpy - -nonzero_instances = [] -nonzero_instances.append({1: 0.1, 5: 0.5, 9: 1}) -nonzero_instances.append({2: 0.3, 5: 0.5, 8: 0.8}) -#nonzero_instances.append({1: 0.2, 2: 0.3, 5: 0.5}) - -import model -model = model.Model() - -for i in xrange(100000): - # Select an instance - instance = nonzero_instances[i % len(nonzero_instances)] - - # SGD update over instance - model.update(instance) diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator.py/model.py --- a/simple_autoassociator.py/model.py Tue Jul 08 17:41:26 2008 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,55 +0,0 @@ -""" -The model for an autoassociator for sparse inputs, using Ronan Collobert + Jason -Weston's sampling trick (2008). -""" - -from graph import trainfn -import parameters - -import globals -from globals import LR - -import numpy -import random -random.seed(globals.SEED) - -class Model: - def __init__(self): - self.parameters = parameters.Parameters(randomly_initialize=True) - - def update(self, instance): - """ - Update the L{Model} using one training instance. - @param instance: A dict from feature index to (non-zero) value. - @todo: Should assert that nonzero_indices and zero_indices - are correct (i.e. are truly nonzero/zero). - """ - x = numpy.zeros(globals.INPUT_DIMENSION) - for idx in instance.keys(): - x[idx] = instance[idx] - - (y, h, loss, loss_unsummed, gw1, gb1, gw2, gb2) = trainfn(x, self.parameters.w1, self.parameters.b1, self.parameters.w2, self.parameters.b2) - print - print "instance:", instance - print "x:", x - print "OLD y:", y - print "NEW loss (unsummed):", loss_unsummed - print "OLD total loss:", loss - print "gw1:", gw1 - print "gb1:", gb1 - print "gw2:", gw2 - print "gb2:", gb2 - - # SGD update - self.parameters.w1 -= LR * gw1 - self.parameters.b1 -= LR * gb1 - self.parameters.w2 -= LR * gw2 - self.parameters.b2 -= LR * gb2 - - # Recompute the loss, to make sure it's descreasing - (y, h, loss, loss_unsummed, gw1, gb1, gw2, gb2) = trainfn(x, self.parameters.w1, self.parameters.b1, self.parameters.w2, self.parameters.b2) - print "NEW y:", y - print "NEW loss (unsummed):", loss_unsummed - print "NEW total loss:", loss - print h - print self.parameters diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator.py/parameters.py --- a/simple_autoassociator.py/parameters.py Tue Jul 08 17:41:26 2008 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,36 +0,0 @@ -""" -Parameters (weights) used by the L{Model}. -""" - -import numpy -import globals - -class Parameters: - """ - Parameters used by the L{Model}. - """ - def __init__(self, input_dimension=globals.INPUT_DIMENSION, hidden_dimension=globals.HIDDEN_DIMENSION, randomly_initialize=False, seed=globals.SEED): - """ - Initialize L{Model} parameters. - @param randomly_initialize: If True, then randomly initialize - according to the given seed. If False, then just use zeroes. - """ - if randomly_initialize: - numpy.random.seed(seed) - self.w1 = (numpy.random.rand(input_dimension, hidden_dimension)-0.5)/input_dimension - self.w2 = (numpy.random.rand(hidden_dimension, input_dimension)-0.5)/hidden_dimension - self.b1 = numpy.zeros(hidden_dimension) - self.b2 = numpy.zeros(input_dimension) - else: - self.w1 = numpy.zeros((input_dimension, hidden_dimension)) - self.w2 = numpy.zeros((hidden_dimension, input_dimension)) - self.b1 = numpy.zeros(hidden_dimension) - self.b2 = numpy.zeros(input_dimension) - - def __str__(self): - s = "" - s += "w1: %s\n" % self.w1 - s += "b1: %s\n" % self.b1 - s += "w2: %s\n" % self.w2 - s += "b2: %s\n" % self.b2 - return s diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator/README.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/simple_autoassociator/README.txt Tue Jul 08 17:41:45 2008 -0400 @@ -0,0 +1,1 @@ +This may be buggy. -jpt diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator/__init__.py diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator/globals.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/simple_autoassociator/globals.py Tue Jul 08 17:41:45 2008 -0400 @@ -0,0 +1,12 @@ +""" +Global variables. +""" + +#INPUT_DIMENSION = 1000 +#INPUT_DIMENSION = 100 +INPUT_DIMENSION = 10 +#HIDDEN_DIMENSION = 20 +HIDDEN_DIMENSION = 4 +LEARNING_RATE = 0.01 +LR = LEARNING_RATE +SEED = 666 diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator/graph.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/simple_autoassociator/graph.py Tue Jul 08 17:41:45 2008 -0400 @@ -0,0 +1,26 @@ +""" +Theano graph for a simple autoassociator. +@todo: Make nearly everything private. +""" + +from pylearn.nnet_ops import sigmoid, binary_crossentropy +from theano import tensor as t +from theano.tensor import dot +x = t.dvector() +w1 = t.dmatrix() +b1 = t.dvector() +w2 = t.dmatrix() +b2 = t.dvector() +h = sigmoid(dot(x, w1) + b1) +y = sigmoid(dot(h, w2) + b2) + +loss_unsummed = binary_crossentropy(y, x) +loss = t.sum(loss_unsummed) + +(gw1, gb1, gw2, gb2) = t.grad(loss, [w1, b1, w2, b2]) + +import theano.compile + +inputs = [x, w1, b1, w2, b2] +outputs = [y, h, loss, loss_unsummed, gw1, gb1, gw2, gb2] +trainfn = theano.compile.function(inputs, outputs) diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator/main.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/simple_autoassociator/main.py Tue Jul 08 17:41:45 2008 -0400 @@ -0,0 +1,31 @@ +#!/usr/bin/python +""" + A simple autoassociator. + + The learned model is:: + h = sigmoid(dot(x, w1) + b1) + y = sigmoid(dot(h, w2) + b2) + + Binary xent loss. + + LIMITATIONS: + - Only does pure stochastic gradient (batchsize = 1). +""" + + +import numpy + +nonzero_instances = [] +nonzero_instances.append({1: 0.1, 5: 0.5, 9: 1}) +nonzero_instances.append({2: 0.3, 5: 0.5, 8: 0.8}) +#nonzero_instances.append({1: 0.2, 2: 0.3, 5: 0.5}) + +import model +model = model.Model() + +for i in xrange(100000): + # Select an instance + instance = nonzero_instances[i % len(nonzero_instances)] + + # SGD update over instance + model.update(instance) diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator/model.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/simple_autoassociator/model.py Tue Jul 08 17:41:45 2008 -0400 @@ -0,0 +1,55 @@ +""" +The model for an autoassociator for sparse inputs, using Ronan Collobert + Jason +Weston's sampling trick (2008). +""" + +from graph import trainfn +import parameters + +import globals +from globals import LR + +import numpy +import random +random.seed(globals.SEED) + +class Model: + def __init__(self): + self.parameters = parameters.Parameters(randomly_initialize=True) + + def update(self, instance): + """ + Update the L{Model} using one training instance. + @param instance: A dict from feature index to (non-zero) value. + @todo: Should assert that nonzero_indices and zero_indices + are correct (i.e. are truly nonzero/zero). + """ + x = numpy.zeros(globals.INPUT_DIMENSION) + for idx in instance.keys(): + x[idx] = instance[idx] + + (y, h, loss, loss_unsummed, gw1, gb1, gw2, gb2) = trainfn(x, self.parameters.w1, self.parameters.b1, self.parameters.w2, self.parameters.b2) + print + print "instance:", instance + print "x:", x + print "OLD y:", y + print "NEW loss (unsummed):", loss_unsummed + print "OLD total loss:", loss + print "gw1:", gw1 + print "gb1:", gb1 + print "gw2:", gw2 + print "gb2:", gb2 + + # SGD update + self.parameters.w1 -= LR * gw1 + self.parameters.b1 -= LR * gb1 + self.parameters.w2 -= LR * gw2 + self.parameters.b2 -= LR * gb2 + + # Recompute the loss, to make sure it's descreasing + (y, h, loss, loss_unsummed, gw1, gb1, gw2, gb2) = trainfn(x, self.parameters.w1, self.parameters.b1, self.parameters.w2, self.parameters.b2) + print "NEW y:", y + print "NEW loss (unsummed):", loss_unsummed + print "NEW total loss:", loss + print h + print self.parameters diff -r 98ca97cc9910 -r ec8aadb6694d simple_autoassociator/parameters.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/simple_autoassociator/parameters.py Tue Jul 08 17:41:45 2008 -0400 @@ -0,0 +1,36 @@ +""" +Parameters (weights) used by the L{Model}. +""" + +import numpy +import globals + +class Parameters: + """ + Parameters used by the L{Model}. + """ + def __init__(self, input_dimension=globals.INPUT_DIMENSION, hidden_dimension=globals.HIDDEN_DIMENSION, randomly_initialize=False, seed=globals.SEED): + """ + Initialize L{Model} parameters. + @param randomly_initialize: If True, then randomly initialize + according to the given seed. If False, then just use zeroes. + """ + if randomly_initialize: + numpy.random.seed(seed) + self.w1 = (numpy.random.rand(input_dimension, hidden_dimension)-0.5)/input_dimension + self.w2 = (numpy.random.rand(hidden_dimension, input_dimension)-0.5)/hidden_dimension + self.b1 = numpy.zeros(hidden_dimension) + self.b2 = numpy.zeros(input_dimension) + else: + self.w1 = numpy.zeros((input_dimension, hidden_dimension)) + self.w2 = numpy.zeros((hidden_dimension, input_dimension)) + self.b1 = numpy.zeros(hidden_dimension) + self.b2 = numpy.zeros(input_dimension) + + def __str__(self): + s = "" + s += "w1: %s\n" % self.w1 + s += "b1: %s\n" % self.b1 + s += "w2: %s\n" % self.w2 + s += "b2: %s\n" % self.b2 + return s