Mercurial > pylearn
comparison sparse_random_autoassociator/model.py @ 372:75bab24bb2d8
Moved more logic into model.py
author | Joseph Turian <turian@gmail.com> |
---|---|
date | Mon, 07 Jul 2008 02:06:15 -0400 |
parents | a1bbcde6b456 |
children | 42cc94cf6c12 |
comparison
equal
deleted
inserted
replaced
371:22463a194c90 | 372:75bab24bb2d8 |
---|---|
3 Weston's sampling trick (2008). | 3 Weston's sampling trick (2008). |
4 """ | 4 """ |
5 | 5 |
6 from graph import trainfn | 6 from graph import trainfn |
7 import parameters | 7 import parameters |
8 | |
9 import globals | |
10 from globals import LR | |
11 | |
8 import numpy | 12 import numpy |
9 from globals import LR | 13 import random |
14 random.seed(globals.SEED) | |
15 | |
16 def _select_indices(instance): | |
17 """ | |
18 Choose nonzero and zero indices (feature columns) of the instance. | |
19 We select B{all} nonzero indices. | |
20 We select L{globals.ZERO_SAMPLE_SIZE} zero indices randomly, | |
21 without replacement. | |
22 @bug: If there are not ZERO_SAMPLE_SIZE zeroes, we will enter | |
23 an endless loop. | |
24 @return: (nonzero_indices, zero_indices) | |
25 """ | |
26 # Get the nonzero indices | |
27 nonzero_indices = instance.keys() | |
28 nonzero_indices.sort() | |
29 | |
30 # Get the zero indices | |
31 # @bug: If there are not ZERO_SAMPLE_SIZE zeroes, we will enter an endless loop. | |
32 zero_indices = [] | |
33 while len(zero_indices) < globals.ZERO_SAMPLE_SIZE: | |
34 idx = random.randint(0, globals.INPUT_DIMENSION - 1) | |
35 if idx in nonzero_indices or idx in zero_indices: continue | |
36 zero_indices.append(idx) | |
37 zero_indices.sort() | |
38 | |
39 return (nonzero_indices, zero_indices) | |
10 | 40 |
11 class Model: | 41 class Model: |
12 def __init__(self): | 42 def __init__(self): |
13 self.parameters = parameters.Parameters(randomly_initialize=True) | 43 self.parameters = parameters.Parameters(randomly_initialize=True) |
14 | 44 |
15 def update(self, instance, nonzero_indexes, zero_indexes): | 45 def update(self, instance): |
16 xnonzero = numpy.asarray([instance[idx] for idx in nonzero_indexes]) | 46 """ |
47 Update the L{Model} using one training instance. | |
48 @param instance: A dict from feature index to (non-zero) value. | |
49 @todo: Should assert that nonzero_indices and zero_indices | |
50 are correct (i.e. are truly nonzero/zero). | |
51 """ | |
52 (nonzero_indices, zero_indices) = _select_indices(instance) | |
53 xnonzero = numpy.asarray([instance[idx] for idx in nonzero_indices]) | |
17 print | 54 print |
18 print "xnonzero:", xnonzero | 55 print "xnonzero:", xnonzero |
19 | 56 |
20 (ynonzero, yzero, loss, gw1nonzero, gb1, gw2nonzero, gw2zero, gb2nonzero, gb2zero) = trainfn(xnonzero, self.parameters.w1[nonzero_indexes, :], self.parameters.b1, self.parameters.w2[:, nonzero_indexes], self.parameters.w2[:, zero_indexes], self.parameters.b2[nonzero_indexes], self.parameters.b2[zero_indexes]) | 57 (ynonzero, yzero, loss, gw1nonzero, gb1, gw2nonzero, gw2zero, gb2nonzero, gb2zero) = trainfn(xnonzero, self.parameters.w1[nonzero_indices, :], self.parameters.b1, self.parameters.w2[:, nonzero_indices], self.parameters.w2[:, zero_indices], self.parameters.b2[nonzero_indices], self.parameters.b2[zero_indices]) |
21 print "OLD ynonzero:", ynonzero | 58 print "OLD ynonzero:", ynonzero |
22 print "OLD yzero:", yzero | 59 print "OLD yzero:", yzero |
23 print "OLD total loss:", loss | 60 print "OLD total loss:", loss |
24 | 61 |
25 # SGD update | 62 # SGD update |
26 self.parameters.w1[nonzero_indexes, :] -= LR * gw1nonzero | 63 self.parameters.w1[nonzero_indices, :] -= LR * gw1nonzero |
27 self.parameters.b1 -= LR * gb1 | 64 self.parameters.b1 -= LR * gb1 |
28 self.parameters.w2[:, nonzero_indexes] -= LR * gw2nonzero | 65 self.parameters.w2[:, nonzero_indices] -= LR * gw2nonzero |
29 self.parameters.w2[:, zero_indexes] -= LR * gw2zero | 66 self.parameters.w2[:, zero_indices] -= LR * gw2zero |
30 self.parameters.b2[nonzero_indexes] -= LR * gb2nonzero | 67 self.parameters.b2[nonzero_indices] -= LR * gb2nonzero |
31 self.parameters.b2[zero_indexes] -= LR * gb2zero | 68 self.parameters.b2[zero_indices] -= LR * gb2zero |
32 | 69 |
33 # Recompute the loss, to make sure it's descreasing | 70 # Recompute the loss, to make sure it's descreasing |
34 (ynonzero, yzero, loss, gw1nonzero, gb1, gw2nonzero, gw2zero, gb2nonzero, gb2zero) = trainfn(xnonzero, self.parameters.w1[nonzero_indexes, :], self.parameters.b1, self.parameters.w2[:, nonzero_indexes], self.parameters.w2[:, zero_indexes], self.parameters.b2[nonzero_indexes], self.parameters.b2[zero_indexes]) | 71 (ynonzero, yzero, loss, gw1nonzero, gb1, gw2nonzero, gw2zero, gb2nonzero, gb2zero) = trainfn(xnonzero, self.parameters.w1[nonzero_indices, :], self.parameters.b1, self.parameters.w2[:, nonzero_indices], self.parameters.w2[:, zero_indices], self.parameters.b2[nonzero_indices], self.parameters.b2[zero_indices]) |
35 print "NEW ynonzero:", ynonzero | 72 print "NEW ynonzero:", ynonzero |
36 print "NEW yzero:", yzero | 73 print "NEW yzero:", yzero |
37 print "NEW total loss:", loss | 74 print "NEW total loss:", loss |