Mercurial > pylearn
view sparse_random_autoassociator/model.py @ 371:22463a194c90
Update doc
author | Joseph Turian <turian@gmail.com> |
---|---|
date | Mon, 07 Jul 2008 01:57:49 -0400 |
parents | a1bbcde6b456 |
children | 75bab24bb2d8 |
line wrap: on
line source
""" The model for an autoassociator for sparse inputs, using Ronan Collobert + Jason Weston's sampling trick (2008). """ from graph import trainfn import parameters import numpy from globals import LR class Model: def __init__(self): self.parameters = parameters.Parameters(randomly_initialize=True) def update(self, instance, nonzero_indexes, zero_indexes): xnonzero = numpy.asarray([instance[idx] for idx in nonzero_indexes]) print print "xnonzero:", xnonzero (ynonzero, yzero, loss, gw1nonzero, gb1, gw2nonzero, gw2zero, gb2nonzero, gb2zero) = trainfn(xnonzero, self.parameters.w1[nonzero_indexes, :], self.parameters.b1, self.parameters.w2[:, nonzero_indexes], self.parameters.w2[:, zero_indexes], self.parameters.b2[nonzero_indexes], self.parameters.b2[zero_indexes]) print "OLD ynonzero:", ynonzero print "OLD yzero:", yzero print "OLD total loss:", loss # SGD update self.parameters.w1[nonzero_indexes, :] -= LR * gw1nonzero self.parameters.b1 -= LR * gb1 self.parameters.w2[:, nonzero_indexes] -= LR * gw2nonzero self.parameters.w2[:, zero_indexes] -= LR * gw2zero self.parameters.b2[nonzero_indexes] -= LR * gb2nonzero self.parameters.b2[zero_indexes] -= LR * gb2zero # Recompute the loss, to make sure it's descreasing (ynonzero, yzero, loss, gw1nonzero, gb1, gw2nonzero, gw2zero, gb2nonzero, gb2zero) = trainfn(xnonzero, self.parameters.w1[nonzero_indexes, :], self.parameters.b1, self.parameters.w2[:, nonzero_indexes], self.parameters.w2[:, zero_indexes], self.parameters.b2[nonzero_indexes], self.parameters.b2[zero_indexes]) print "NEW ynonzero:", ynonzero print "NEW yzero:", yzero print "NEW total loss:", loss