Mercurial > pylearn
comparison algorithms/tests/test_daa.py @ 501:4fb6f7320518
N-class logistic regression top-layer works
author | Joseph Turian <turian@gmail.com> |
---|---|
date | Tue, 28 Oct 2008 13:54:01 -0400 |
parents | 3c60c2db0319 |
children | de974b4fc4ea |
comparison
equal
deleted
inserted
replaced
500:3c60c2db0319 | 501:4fb6f7320518 |
---|---|
21 model.layers[0].noise_level = 0.3 | 21 model.layers[0].noise_level = 0.3 |
22 model.layers[1].noise_level = 0.3 | 22 model.layers[1].noise_level = 0.3 |
23 model.layers[2].noise_level = 0.3 | 23 model.layers[2].noise_level = 0.3 |
24 | 24 |
25 # Update the first hidden layer | 25 # Update the first hidden layer |
26 model.local_update[0]([[0, 1, 0, 1]]) | 26 for l in range(3): |
27 model.local_update[1]([[0, 1, 0, 1]]) | 27 for i in range(10): |
28 model.local_update[2]([[0, 1, 0, 1]]) | 28 model.local_update[l]([[0, 1, 0, 1]]) |
29 model.local_update[l]([[1, 0, 1, 0]]) | |
29 | 30 |
30 model.update([[0, 1, 0, 1]], [[0]]) | 31 for i in range(1): |
32 model.update([[0, 1, 0, 1]], [[1]]) | |
33 model.update([[1, 0, 1, 0]], [[0]]) | |
31 print model.classify([[0, 1, 0, 1]]) | 34 print model.classify([[0, 1, 0, 1]]) |
35 print model.classify([[1, 0, 1, 0]]) | |
32 | 36 |
33 | 37 |
34 def test_train_daa2(mode = theano.Mode('c|py', 'fast_run')): | 38 def test_train_daa2(mode = theano.Mode('c|py', 'fast_run')): |
35 | 39 |
36 ndaa = 3 | 40 ndaa = 3 |
37 daa = models.Stacker([(models.SigmoidXEDenoisingAA, 'hidden')] * ndaa + [(pylearn.algorithms.logistic_regression.Module_Nclass, 'output')], | 41 daa = models.Stacker([(models.SigmoidXEDenoisingAA, 'hidden')] * ndaa + [(pylearn.algorithms.logistic_regression.Module_Nclass, 'pred')], |
38 regularize = False) | 42 regularize = False) |
39 | 43 |
40 model = daa.make([4, 20, 20, 20, 1], | 44 model = daa.make([4, 20, 20, 20, 10], |
41 lr = 0.01, | 45 lr = 0.01, |
42 mode = mode, | 46 mode = mode, |
43 seed = 10) | 47 seed = 10) |
44 | 48 |
45 model.layers[0].noise_level = 0.3 | 49 model.layers[0].noise_level = 0.3 |
46 model.layers[1].noise_level = 0.3 | 50 model.layers[1].noise_level = 0.3 |
47 model.layers[2].noise_level = 0.3 | 51 model.layers[2].noise_level = 0.3 |
48 | 52 |
49 # Update the first hidden layer | 53 for l in range(3): |
50 model.local_update[0]([[0, 1, 0, 1]]) | 54 for i in range(10): |
51 model.local_update[1]([[0, 1, 0, 1]]) | 55 model.local_update[l]([[0, 1, 0, 1]]) |
52 model.local_update[2]([[0, 1, 0, 1]]) | 56 model.local_update[l]([[1, 0, 1, 0]]) |
53 | 57 |
54 model.update([[0, 1, 0, 1]], [0]) | 58 for i in range(1): |
55 print model.classify([[0, 1, 0, 1]]) | 59 model.update([[0, 1, 0, 1]], [1]) |
56 | 60 model.update([[1, 0, 1, 0]], [0]) |
61 print model.apply([[0, 1, 0, 1]]) | |
62 print model.apply([[1, 0, 1, 0]]) | |
57 | 63 |
58 | 64 |
59 | 65 |
60 | 66 |
61 if __name__ == '__main__': | 67 if __name__ == '__main__': |
68 # print 'not optimized:' | 74 # print 'not optimized:' |
69 # t2 = test_train_daa(theano.Mode('c|py', 'fast_compile')) | 75 # t2 = test_train_daa(theano.Mode('c|py', 'fast_compile')) |
70 ## print 'time:',t2 | 76 ## print 'time:',t2 |
71 | 77 |
72 # test_train_daa(theano.compile.Mode('c&py', 'merge')) | 78 # test_train_daa(theano.compile.Mode('c&py', 'merge')) |
73 test_train_daa(theano.compile.Mode('c|py', 'merge')) | 79 # test_train_daa(theano.compile.Mode('c|py', 'merge')) |
80 test_train_daa(theano.compile.Mode('py', 'merge')) | |
81 | |
74 test_train_daa2(theano.compile.Mode('c|py', 'merge')) | 82 test_train_daa2(theano.compile.Mode('c|py', 'merge')) |