Mercurial > pylearn
comparison algorithms/stacker.py @ 476:8fcd0f3d9a17
added a few algorithms
author | Olivier Breuleux <breuleuo@iro.umontreal.ca> |
---|---|
date | Mon, 27 Oct 2008 17:26:00 -0400 |
parents | |
children | bb6bdd3b7ff3 |
comparison
equal
deleted
inserted
replaced
475:11e0357f06f4 | 476:8fcd0f3d9a17 |
---|---|
1 | |
2 import theano | |
3 from theano import tensor as T | |
4 import sys | |
5 import numpy as N | |
6 | |
7 class Stacker(T.RModule): | |
8 | |
9 def __init__(self, submodules, input = None, regularize = False): | |
10 super(Stacker, self).__init__() | |
11 | |
12 current = input | |
13 layers = [] | |
14 for i, (submodule, outname) in enumerate(submodules): | |
15 layer = submodule(current, regularize = regularize) | |
16 layers.append(layer) | |
17 current = layer[outname] | |
18 self.layers = layers | |
19 | |
20 self.input = self.layers[0].input | |
21 self.output = current | |
22 | |
23 local_update = [] | |
24 global_update = [] | |
25 to_update = [] | |
26 all_kits = [] | |
27 for layer in layers: | |
28 u = layer.update | |
29 u.resolve_all() | |
30 to_update += u.updates.keys() | |
31 all_kits += u.kits | |
32 # the input is the whole deep model's input instead of the layer's own | |
33 # input (which is previous_layer[outname]) | |
34 inputs = [self.input] + u.inputs[1:] | |
35 method = theano.Method(inputs, u.outputs, u.updates, u.kits) | |
36 local_update.append(method) | |
37 global_update.append( | |
38 theano.Method(inputs, | |
39 u.outputs, | |
40 # we update the params of the previous layers too but wrt | |
41 # this layer's cost | |
42 dict((param, param - layer.lr * T.grad(layer.cost, param)) | |
43 for param in to_update), | |
44 list(all_kits))) | |
45 | |
46 self.local_update = local_update | |
47 self.global_update = global_update | |
48 self.update = self.global_update[-1] | |
49 self.compute = theano.Method(self.input, self.output) | |
50 ll = self.layers[-1] | |
51 for name, method in ll.components_map(): | |
52 if isinstance(method, theano.Method) and not hasattr(self, name): | |
53 m = method.dup() | |
54 m.resolve_all() | |
55 m.inputs = [self.input if x is ll.input else x for x in m.inputs] | |
56 setattr(self, name, m) | |
57 | |
58 def _instance_initialize(self, obj, nunits = None, lr = 0.01, seed = None, **kwargs): | |
59 super(Stacker, self)._instance_initialize(obj, **kwargs) | |
60 if seed is not None: | |
61 R = N.random.RandomState(seed) | |
62 else: | |
63 R = N.random | |
64 for layer in obj.layers: | |
65 if layer.lr is None: | |
66 layer.lr = lr | |
67 if nunits: | |
68 if len(nunits) != len(obj.layers) + 1: | |
69 raise ValueError('You should give exactly one more unit numbers as there are layers.') | |
70 for ni, no, layer in zip(nunits[:-1], nunits[1:], obj.layers): | |
71 if seed is not None: | |
72 layer.initialize(ni, no, seed = R.random_integers(sys.maxint - 1)) | |
73 else: | |
74 layer.initialize(ni, no) | |
75 if seed is not None: | |
76 obj.seed(seed) | |
77 | |
78 def _instance_flops_approx(self, obj): | |
79 rval = 0 | |
80 for layer in obj.layers: | |
81 rval += layer.flops_approx() | |
82 return rval | |
83 |