view sandbox/simple_autoassociator/model.py @ 437:2d8490d76b3e

added two methods to make_test_datasets
author Olivier Breuleux <breuleuo@iro.umontreal.ca>
date Wed, 06 Aug 2008 19:39:36 -0400
parents 4f61201fa9a9
children
line wrap: on
line source

"""
The model for an autoassociator for sparse inputs, using Ronan Collobert + Jason
Weston's sampling trick (2008).
"""

from graph import trainfn
import parameters

import numpy
import random

import pylearn.sparse_instance

class Model:
    """
    @todo: Add momentum.
    @todo: Add learning rate decay schedule.
    """
    def __init__(self, input_dimension, hidden_dimension, learning_rate = 0.1, weight_decay = 0.0002, random_seed = 666):
        self.input_dimension    = input_dimension
        self.hidden_dimension   = hidden_dimension
        self.learning_rate      = learning_rate
        self.weight_decay       = weight_decay
        self.random_seed        = random_seed

        random.seed(random_seed)

        self.parameters = parameters.Parameters(input_dimension=self.input_dimension, hidden_dimension=self.hidden_dimension, randomly_initialize=True, random_seed=self.random_seed)

    def deterministic_reconstruction(self, x):
        (y, h, loss, gw1, gb1, gw2, gb2) = trainfn(x, self.parameters.w1, self.parameters.b1, self.parameters.w2, self.parameters.b2)
        return y

    def update(self, instances):
        """
        Update the L{Model} using one training instance.
        @param instances: A list of dict from feature index to (non-zero) value.
        @todo: Should assert that nonzero_indices and zero_indices
        are correct (i.e. are truly nonzero/zero).
        @todo: Multiply L{self.weight_decay} by L{self.learning_rate}, as done in Semantic Hashing?
        @todo: Decay the biases too?
        """
        minibatch = len(instances)
        x = pylearn.sparse_instance.to_vector(instances, self.input_dimension)

        (y, h, loss, gw1, gb1, gw2, gb2) = trainfn(x, self.parameters.w1, self.parameters.b1, self.parameters.w2, self.parameters.b2)
#        print
#        print "instance:", instance
#        print "x:", x
#        print "OLD y:", y
        print "OLD total loss:", loss
#        print "gw1:", gw1
#        print "gb1:", gb1
#        print "gw2:", gw2
#        print "gb2:", gb2

        self.parameters.w1 *= (1 - self.weight_decay)
        self.parameters.w2 *= (1 - self.weight_decay)

        # SGD update
        self.parameters.w1  -= self.learning_rate * gw1 / minibatch
        self.parameters.b1  -= self.learning_rate * gb1 / minibatch
        self.parameters.w2  -= self.learning_rate * gw2 / minibatch
        self.parameters.b2  -= self.learning_rate * gb2 / minibatch

#        # Recompute the loss, to make sure it's descreasing
#        (y, h, loss, gw1, gb1, gw2, gb2) = trainfn(x, self.parameters.w1, self.parameters.b1, self.parameters.w2, self.parameters.b2)
##        print "NEW y:", y
#        print "NEW total loss:", loss
##        print "h:", h
##        print self.parameters