comparison simple_autoassociator.py/model.py @ 386:a474341861fa

Added a simple AA
author Joseph Turian <turian@gmail.com>
date Tue, 08 Jul 2008 02:27:00 -0400
parents sparse_random_autoassociator/model.py@42cc94cf6c12
children 98ca97cc9910
comparison
equal deleted inserted replaced
385:db28ff3fb887 386:a474341861fa
1 """
2 The model for an autoassociator for sparse inputs, using Ronan Collobert + Jason
3 Weston's sampling trick (2008).
4 """
5
6 from graph import trainfn
7 import parameters
8
9 import globals
10 from globals import LR
11
12 import numpy
13 import random
14 random.seed(globals.SEED)
15
16 class Model:
17 def __init__(self):
18 self.parameters = parameters.Parameters(randomly_initialize=True)
19
20 def update(self, instance):
21 """
22 Update the L{Model} using one training instance.
23 @param instance: A dict from feature index to (non-zero) value.
24 @todo: Should assert that nonzero_indices and zero_indices
25 are correct (i.e. are truly nonzero/zero).
26 """
27 x = numpy.zeros(globals.INPUT_DIMENSION)
28 for idx in instance.keys():
29 x[idx] = instance[idx]
30
31 (y, loss, gw1, gb1, gw2, gb2) = trainfn(x, self.parameters.w1, self.parameters.b1, self.parameters.w2, self.parameters.b2)
32 print
33 print "instance:", instance
34 print "OLD y:", y
35 print "OLD total loss:", loss
36
37 # SGD update
38 self.parameters.w1 -= LR * gw1
39 self.parameters.b1 -= LR * gb1
40 self.parameters.w2 -= LR * gw2
41 self.parameters.b2 -= LR * gb2
42
43 # Recompute the loss, to make sure it's descreasing
44 (y, loss, gw1, gb1, gw2, gb2) = trainfn(x, self.parameters.w1, self.parameters.b1, self.parameters.w2, self.parameters.b2)
45 print "NEW y:", y
46 print "NEW total loss:", loss