annotate nnet_ops.py @ 449:2bb67e978c28

updated doc
author Joseph Turian <turian@gmail.com>
date Wed, 03 Sep 2008 17:14:49 -0400
parents 0961d4b56ec5
children 34acf8db186d
rev   line source
419
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
1 ## This file contain ops that are not currently integrated in the core of threano.
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
2 ## Not all of those ops have been thoroughly tested.
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
3
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
4 import theano
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
5 from theano import tensor, scalar
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
6 import numpy
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
7
69
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
8 ############
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
9 #
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
10 # SCALAR OPS
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
11 #
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
12
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
13 class ScalarSigmoid(scalar.UnaryScalarOp):
69
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
14 @staticmethod
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
15 def st_impl(x):
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
16 if x < -30.0:
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
17 return 0.0
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
18 if x > 30.0:
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
19 return 1.0
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
20 return 1.0 / (1.0 + numpy.exp(-x))
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
21 def impl(self, x):
69
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
22 return ScalarSigmoid.st_impl(x)
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
23 def grad(self, (x,), (gz,)):
69
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
24 y = scalar_sigmoid(x)
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
25 return [gz * y * (1.0 - y)]
181
1b06bc2c3ca9 fixed c_code for the ops in nnet_ops.py
Olivier Breuleux <breuleuo@iro.umontreal.ca>
parents: 121
diff changeset
26 def c_code(self, node, name, (x,), (z,), sub):
1b06bc2c3ca9 fixed c_code for the ops in nnet_ops.py
Olivier Breuleux <breuleuo@iro.umontreal.ca>
parents: 121
diff changeset
27 if node.inputs[0].type in [scalar.float32, scalar.float64]:
69
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
28 return """%(z)s =
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
29 %(x)s < -30.0
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
30 ? 0.0
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
31 : %(x)s > 30.0
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
32 ? 1.0
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
33 : 1.0 /(1.0+exp(-%(x)s));""" % locals()
181
1b06bc2c3ca9 fixed c_code for the ops in nnet_ops.py
Olivier Breuleux <breuleuo@iro.umontreal.ca>
parents: 121
diff changeset
34 raise NotImplementedError('only floatingpoint is implemented')
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
35 scalar_sigmoid = ScalarSigmoid(scalar.upgrade_to_float, name='scalar_sigmoid')
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
36 sigmoid = tensor.Elemwise(scalar_sigmoid, name='sigmoid')
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
37
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
38 class ScalarSoftplus(scalar.UnaryScalarOp):
69
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
39 @staticmethod
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
40 def static_impl(x):
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
41 if x < -30.0:
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
42 return 0.0
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
43 if x > 30.0:
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
44 return x
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
45 return numpy.log1p(numpy.exp(x))
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
46 def impl(self, x):
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
47 return ScalarSoftplus.static_impl(x)
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
48 def grad(self, (x,), (gz,)):
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
49 return [gz * scalar_sigmoid(x)]
222
f6a7eb1b7970 redo what James had done, so invert node and name
Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca>
parents: 218
diff changeset
50 def c_code(self, node, name, (x,), (z,), sub):
181
1b06bc2c3ca9 fixed c_code for the ops in nnet_ops.py
Olivier Breuleux <breuleuo@iro.umontreal.ca>
parents: 121
diff changeset
51 if node.inputs[0].type in [scalar.float32, scalar.float64]:
69
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
52 return """%(z)s =
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
53 %(x)s < -30.0
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
54 ? 0.0
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
55 : %(x)s > 30.0
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
56 ? %(x)s
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
57 : log1p(exp(%(x)s));""" % locals()
181
1b06bc2c3ca9 fixed c_code for the ops in nnet_ops.py
Olivier Breuleux <breuleuo@iro.umontreal.ca>
parents: 121
diff changeset
58 raise NotImplementedError('only floating point x is implemented')
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
59 scalar_softplus = ScalarSoftplus(scalar.upgrade_to_float, name='scalar_softplus')
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
60 softplus = tensor.Elemwise(scalar_softplus, name='softplus')
69
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
61
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
62
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
63 ############
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
64 #
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
65 # TENSOR OPS
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
66 #
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
67
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
68
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
69 class SoftmaxWithBias(theano.Op):
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
70 """
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
71 An L{Op} for the output of neural-net multiclass classifiers.
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
72
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
73 @type x: is a matrix of floats (32 or 64)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
74 @type b: is a [row] vector of floats (32 or 64), length is number of cols in x
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
75
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
76 This L{Op}'s output is softmax(x+b).
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
77 softmax(x[i]) is the i'th distribution over len(x[i]) options.
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
78 """
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
79
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
80 nin = 2
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
81 nout = 1
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
82 def __init__(self, **kwargs):
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
83 theano.Op.__init__(self, **kwargs)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
84
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
85 def make_node(self, x, b):
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
86 x = tensor.as_tensor(x)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
87 b = tensor.as_tensor(b)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
88 if x.type.ndim != 2 \
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
89 or x.type.dtype not in ['float32', 'float64']:
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
90 raise ValueError('x must be 2-d tensor of floats')
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
91 if b.type.ndim != 1 \
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
92 or x.type.dtype not in ['float32', 'float64']:
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
93 raise ValueError('b must be 1-d tensor of floats')
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
94
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
95 sm = x.type.make_result()
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
96 return theano.Apply(self, [x, b], [sm])
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
97
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
98 def perform(self, node, input_storage, output_storage):
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
99 x, b = input_storage
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
100 if b.shape[0] != x.shape[1]:
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
101 raise ValueError('b must have same number of columns as x')
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
102
442
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
103 sm = numpy.zeros_like(x)
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
104 for i in xrange(sm.shape[0]):
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
105 row = x[i] + b
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
106 sm[i] = numpy.exp(row - numpy.max(row))
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
107 sm[i] *= 1.0 / numpy.sum(sm[i])
443
060c12314734 Hopefully last bugfix in Softmax
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 442
diff changeset
108 output_storage[0][0] = sm
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
109
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
110 def grad(self, (x, b), (g_sm,)):
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
111 sm = softmax_with_bias(x, b)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
112 dx = SoftmaxWithBiasDx()(g_sm, sm)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
113 db = tensor.sum(dx, axis = 0)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
114 return dx, db
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
115
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
116 def c_headers(self):
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
117 return ['<iostream>']
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
118
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
119 @staticmethod
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
120 def c_code_template():
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
121 # this implementation was lifted from
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
122 # /u/bergstrj/cvs/bergstrj/src/feb07/nn.cxx
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
123
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
124 #TODO: put this into a templated function, in the support code
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
125 #TODO: declare the max of each row as an Op output
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
126
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
127 #TODO: set error messages for failures in this code
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
128
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
129 #TODO: use this to accept float32 and int32: node.inputs[0].type.dtype_specs()[1]
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
130 init_decl = """
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
131 npy_intp* Nx = %(x)s->dimensions;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
132
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
133 if (%(x)s->nd != 2)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
134 {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
135 PyErr_SetString(PyExc_ValueError, "a not 2d tensor");
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
136 %(fail)s;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
137 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
138 if (%(b)s->nd != 1)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
139 {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
140 PyErr_SetString(PyExc_ValueError, "b not 1d tensor");
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
141 %(fail)s;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
142 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
143 if (%(x)s->descr->type_num != PyArray_DOUBLE)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
144 {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
145 PyErr_SetString(PyExc_TypeError, "a not float64");
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
146 %(fail)s;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
147 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
148 if (%(b)s->descr->type_num != PyArray_DOUBLE)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
149 {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
150 PyErr_SetString(PyExc_TypeError, "b not float64");
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
151 %(fail)s;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
152 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
153 if ((%(x)s->dimensions[1] != %(b)s->dimensions[0]))
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
154 {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
155 PyErr_SetString(PyExc_ValueError, "dimension mismatch in arguments");
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
156 %(fail)s;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
157 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
158
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
159 if ((NULL == %(sm)s)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
160 || (%(sm)s->dimensions[0] != %(x)s->dimensions[0])
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
161 || (%(sm)s->dimensions[1] != %(x)s->dimensions[1]))
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
162 {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
163 if (NULL != %(sm)s) Py_XDECREF(%(sm)s);
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
164 %(sm)s = (PyArrayObject*)PyArray_SimpleNew(2, PyArray_DIMS(%(x)s), type_num_%(x)s);
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
165 if(!%(sm)s) {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
166 PyErr_SetString(PyExc_MemoryError, "failed to alloc sm output");
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
167 %(fail)s
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
168 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
169 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
170 """
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
171
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
172 begin_row_loop = """
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
173 for (size_t i = 0; i < Nx[0]; ++i)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
174 {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
175 size_t j;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
176 double sum = 0.0;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
177 bool discount_max = false;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
178
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
179 const double* __restrict__ x_i = (double*)(%(x)s->data + %(x)s->strides[0] * i);
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
180 const double* __restrict__ b_i = (double*)(%(b)s->data);
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
181 double* __restrict__ sm_i = (double*)(%(sm)s->data + %(sm)s->strides[0] * i);
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
182 """
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
183
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
184 inside_row_loop = """
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
185 npy_intp Sx = %(x)s->strides[1]/sizeof(double);
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
186 npy_intp Sb = %(b)s->strides[0]/sizeof(double);
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
187 npy_intp Ssm = %(sm)s->strides[1]/sizeof(double);
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
188
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
189 size_t row_max_j=0;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
190 double row_max = x_i[0] + b_i[0];
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
191 // Get the maximum value of the row
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
192 for (j = 0; j < Nx[1]; ++j)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
193 {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
194 double row_ij = x_i[j * Sx] + b_i[j * Sb];
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
195 row_max_j = (row_ij > row_max) ? j : row_max_j;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
196 row_max = (row_ij > row_max) ? row_ij : row_max;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
197 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
198
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
199 for (j = 0; j < Nx[1]; ++j)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
200 {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
201 double row_ij = x_i[j * Sx] + b_i[j * Sb];
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
202 double sm_ij = exp(row_ij - row_max);
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
203 sum += sm_ij;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
204 sm_i[j * Ssm] = sm_ij;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
205 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
206 if ( (0.0 == sum) || (isinf(sum)))
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
207 {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
208 //that was our best...
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
209 %(fail)s;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
210 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
211
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
212 //cblas_dscal(x.N, 1.0 / sum, &mat_at(s,i,0), s.n);
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
213 double sum_inv = 1.0 / sum;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
214 for (j = 0; j < Nx[1]; ++j)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
215 {
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
216 sm_i[j * Ssm] *= sum_inv;
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
217 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
218
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
219 """
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
220
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
221 end_row_loop = """
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
222 }
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
223 """
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
224
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
225 return (init_decl, begin_row_loop, inside_row_loop, end_row_loop)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
226
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
227
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
228 def c_code(self, node, name, (x, b), (sm,), sub):
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
229 code_template = ''.join(self.c_code_template())
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
230 return code_template % dict(locals(), **sub)
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
231
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
232 softmax_with_bias = SoftmaxWithBias()
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
233
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
234
442
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
235 class SoftmaxWithBiasDx(theano.Op):
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
236 nin = 2
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
237 nout = 1
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
238 """Gradient wrt x of the SoftmaxWithBias Op"""
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
239
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
240 def __init__(self, **kwargs):
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
241 theano.Op.__init__(self, **kwargs)
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
242
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
243 def make_node(self, dy, sm, **kwargs):
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
244 dy = tensor.as_tensor(dy)
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
245 sm = tensor.as_tensor(sm)
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
246 return theano.Apply(self, [dy, sm], [sm.type.make_result()])
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
247
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
248 def perform(self, node, input_storage, output_storage):
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
249 dy, sm = input_storage
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
250 dx = numpy.zeros_like(sm)
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
251 #dx[i,j] = - (\sum_k dy[i,k] sm[i,k]) sm[i,j] + dy[i,j] sm[i,j]
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
252 for i in xrange(sm.shape[0]):
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
253 dy_times_sm_i = dy[i] * sm[i]
443
060c12314734 Hopefully last bugfix in Softmax
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 442
diff changeset
254 dx[i] = dy_times_sm_i - sum(dy_times_sm_i) * sm[i]
442
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
255 output_storage[0][0] = dx
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
256
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
257 def grad(self, *args):
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
258 raise NotImplementedError()
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
259
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
260 def c_code(self, node, name, (dy, sm), (dx,), sub):
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
261 return '''
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
262 if ((%(dy)s->descr->type_num != PyArray_DOUBLE)
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
263 || (%(sm)s->descr->type_num != PyArray_DOUBLE))
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
264 {
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
265 PyErr_SetString(PyExc_TypeError, "types should be float64, float64");
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
266 %(fail)s;
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
267 }
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
268 if ((%(dy)s->nd != 2)
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
269 || (%(sm)s->nd != 2))
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
270 {
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
271 PyErr_SetString(PyExc_ValueError, "rank error");
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
272 %(fail)s;
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
273 }
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
274 if (%(dy)s->dimensions[0] != %(sm)s->dimensions[0])
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
275 {
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
276 PyErr_SetString(PyExc_ValueError, "dimension mismatch");
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
277 %(fail)s;
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
278 }
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
279 if ((NULL == %(dx)s)
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
280 || (%(dx)s->dimensions[0] != %(sm)s->dimensions[0])
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
281 || (%(dx)s->dimensions[1] != %(sm)s->dimensions[1]))
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
282 {
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
283 Py_XDECREF(%(dx)s);
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
284 %(dx)s = (PyArrayObject*) PyArray_SimpleNew(2, PyArray_DIMS(%(sm)s),
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
285 type_num_%(sm)s);
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
286 if (!%(dx)s)
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
287 {
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
288 PyErr_SetString(PyExc_MemoryError, "failed to alloc dx output");
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
289 %(fail)s;
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
290 }
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
291 }
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
292
447
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
293 for (size_t i = 0; i < %(dx)s->dimensions[0]; ++i)
442
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
294 {
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
295 const double* __restrict__ dy_i = (double*) (%(dy)s->data + %(dy)s->strides[0] * i);
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
296 npy_intp Sdy = %(dy)s->strides[1]/sizeof(double);
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
297 const double* __restrict__ sm_i = (double*) (%(sm)s->data + %(sm)s->strides[0] * i);
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
298 npy_intp Ssm = %(sm)s->strides[1]/sizeof(double);
447
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
299 double* __restrict__ dx_i = (double*) (%(dx)s->data + %(dx)s->strides[0] * i);
442
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
300 npy_intp Sdx = %(dx)s->strides[1]/sizeof(double);
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
301
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
302 double sum_dy_times_sm = 0.;
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
303 for (size_t j = 0; j < %(dx)s->dimensions[1]; ++j)
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
304 {
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
305 dx_i[j * Sdx] = dy_i[j * Sdy] * sm_i[j * Ssm];
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
306 sum_dy_times_sm += dx_i[j * Sdx];
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
307 }
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
308 for (size_t j = 0; j < %(dx)s->dimensions[1]; ++j)
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
309 {
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
310 dx_i[j * Sdx] -= sum_dy_times_sm * sm_i[j * Ssm];
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
311 }
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
312 }
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
313 ''' % dict(locals(), **sub)
b3315b252824 Finished derivative of softmax gradient.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 440
diff changeset
314
443
060c12314734 Hopefully last bugfix in Softmax
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 442
diff changeset
315 def softmax(x, **kwargs):
060c12314734 Hopefully last bugfix in Softmax
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 442
diff changeset
316 b = tensor.zeros_like(x[0,:])
060c12314734 Hopefully last bugfix in Softmax
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 442
diff changeset
317 return softmax_with_bias(x, b, **kwargs)
060c12314734 Hopefully last bugfix in Softmax
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 442
diff changeset
318
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
319
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
320 class CrossentropySoftmaxArgmax1HotWithBias(theano.Op):
70
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
321 """A special compound L{Op} for the output of neural-net classifiers.
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
322
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
323 @type x: is a matrix of floats (32 or 64)
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
324 @type b: is a [row] vector of floats (32 or 64), length is number of cols in x
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
325 @type y_idx: a [column] vector of int (32 or 64), length is number of rows in x
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
326
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
327 @precondition: every entry in y_idx is a valid (non-negative) column index into x
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
328
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
329 This L{Op} has three outputs:
70
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
330 - KL(softmax(x+b), y)
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
331 - softmax(x+b)
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
332 - argmax(x+b)
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
333
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
334 softmax(x[i]) is the i'th distribution over len(x[i]) options
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
335 argmax(x) is the index of x's greatest element
70
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
336 y_idx[i] is an integer index, encoding a 1-hot distribution.
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
337
70
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
338 In practice, when we're trying to do classification, we have one row in x
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
339 and y_idx per example, and y[i] is the index of the (correct) class of the
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
340 i'th example.
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
341
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
342 """
70
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
343 nin=3
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
344 nout=3
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
345 def __init__(self, **kwargs):
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
346 theano.Op.__init__(self, **kwargs)
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
347
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
348 def make_node(self, x, b, y_idx):
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
349 x = tensor.as_tensor(x)
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
350 b = tensor.as_tensor(b)
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
351 y_idx = tensor.as_tensor(y_idx)
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
352 if x.type.ndim != 2 \
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
353 or x.type.dtype not in ['float32', 'float64']:
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
354 raise ValueError('x must be 2-d tensor of floats')
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
355 if b.type.ndim != 1 \
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
356 or x.type.dtype not in ['float32', 'float64']:
121
2ca8dccba270 debugging mlp.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents: 117
diff changeset
357 raise ValueError('b must be 1-d tensor of floats')
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
358 if y_idx.type.ndim != 1 \
185
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
359 or y_idx.type.dtype not in ['int8', 'int16', 'int32', 'int64']:
121
2ca8dccba270 debugging mlp.py
Yoshua Bengio <bengioy@iro.umontreal.ca>
parents: 117
diff changeset
360 raise ValueError('y_idx must be 1-d tensor of ints')
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
361
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
362 # TODO: Is this correct? It used to be y, not y_idx
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
363 nll = tensor.Tensor(x.type.dtype,
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
364 y_idx.type.broadcastable).make_result()
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
365 # nll = Tensor(x.dtype, y.broadcastable)
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
366 sm = x.type.make_result()
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
367 am = y_idx.type.make_result()
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
368 return theano.Apply(self, [x, b, y_idx], [nll, sm, am])
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
369 def perform(self, node, input_storage, output_storage):
447
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
370 """
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
371 The math, where x is an input vector, and t is a target index:
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
372
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
373 softmax(x)[i] = exp(x[i]) / sum_j(exp(x[j]))
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
374 nll(x,t) = -log(softmax(x)[t])
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
375
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
376 We compute this by subtracting off the max of x. This avoids numerical instability.
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
377
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
378 m = max_j x[j]
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
379 softmax(x)[i] = exp(x[i] -m) / sum_j(exp(x[j] - m))
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
380
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
381 nll = -log(exp(x[t] -m) / sum_j(exp(x[j] - m)))
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
382 = -x[t] + m + log( sum_j(exp(x[j] - m)))
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
383
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
384 """
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
385 x, b, y_idx = input_storage
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
386 if b.shape[0] != x.shape[1]:
70
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
387 raise ValueError('b must have same number of columns as x')
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
388 if y_idx.shape[0] != x.shape[0]:
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
389 raise ValueError('y_idx must have same number of rows as x')
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
390
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
391 sm = numpy.zeros_like(x) # softmax
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
392 nll = numpy.zeros(x.shape[0]) #nll(y | softmax(x))
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
393 am = numpy.zeros_like(y_idx)
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
394 for i in xrange(sm.shape[0]):
447
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
395 #add the bias vector to the i'th row of x
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
396 row = x[i] + b
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
397
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
398 #get the maximum value of i'th row for numerically safe softmax / nll
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
399 am[i] = numpy.argmax(row)
447
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
400 m = row[am[i]]
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
401
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
402 #compute the unnormalized softmax, and normalization constant
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
403 sm[i] = numpy.exp(row - m)
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
404 sum_j = numpy.sum(sm[i]) # sum_j(exp(x[j] - m))
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
405
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
406 #normalized our softmax
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
407 sm[i] *= 1.0 / sum_j
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
408
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
409 # store the nll
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
410 nll[i] = -row[y_idx[i]] + m + numpy.log(sum_j)
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
411
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
412 output_storage[0][0] = nll
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
413 output_storage[1][0] = sm
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
414 output_storage[2][0] = am
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
415 def grad(self, (x, b, y_idx), (g_nll, g_sm, g_am)):
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
416 if g_sm is not None or g_am is not None:
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
417 raise NotImplementedError()
70
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
418 nll, sm = crossentropy_softmax_1hot_with_bias(x, b, y_idx)
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
419 dx = CrossentropySoftmax1HotWithBiasDx()(g_nll, sm, y_idx)
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
420 db = tensor.sum(dx, axis = [0])
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
421 return dx, db, None
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
422
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
423 def c_headers(self): return ['<iostream>']
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
424
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
425 @staticmethod
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
426 def c_code_template():
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
427 # this implementation was lifted from
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
428 # /u/bergstrj/cvs/bergstrj/src/feb07/nn.cxx
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
429
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
430 #TODO: put this into a templated function, in the support code
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
431 #TODO: declare the max of each row as an Op output
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
432
32
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
433 #TODO: set error messages for failures in this code
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
434
184
9a2aecc57a79 added TODO to nnet_ops
Olivier Breuleux <breuleuo@iro.umontreal.ca>
parents: 181
diff changeset
435 #TODO: use this to accept float32 and int32: node.inputs[0].type.dtype_specs()[1]
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
436 (init_decl, begin_row_loop, inside_row_loop, end_row_loop) = \
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
437 SoftmaxWithBias.c_code_template()
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
438 return (init_decl,
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
439 """
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
440 if (%(y_idx)s->nd != 1)
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
441 {
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
442 PyErr_SetString(PyExc_ValueError, "y_idx not 1d tensor");
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
443 %(fail)s;
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
444 }
185
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
445 if ((%(y_idx)s->descr->type_num != PyArray_INT64)
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
446 && (%(y_idx)s->descr->type_num != PyArray_INT32)
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
447 && (%(y_idx)s->descr->type_num != PyArray_INT16)
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
448 && (%(y_idx)s->descr->type_num != PyArray_INT8))
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
449 {
185
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
450 PyErr_SetString(PyExc_TypeError, "y_idx not int8, int16, int32, or int64");
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
451 %(fail)s;
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
452 }
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
453 if (%(x)s->dimensions[0] != %(y_idx)s->dimensions[0])
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
454 {
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
455 PyErr_SetString(PyExc_ValueError, "dimension mismatch in arguments");
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
456 %(fail)s;
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
457 }
34
1b152f46ad0c consolidated code
bergstrj@iro.umontreal.ca
parents: 32
diff changeset
458
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
459 if ((NULL == %(nll)s) //initial condition
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
460 || (%(nll)s->dimensions[0] != %(y_idx)s->dimensions[0]))
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
461 {
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
462 if (NULL != %(nll)s) Py_XDECREF(%(nll)s);
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
463 %(nll)s = (PyArrayObject*)PyArray_SimpleNew(1, PyArray_DIMS(%(y_idx)s), type_num_%(x)s);
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
464 if(!%(nll)s)
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
465 {
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
466 PyErr_SetString(PyExc_MemoryError, "failed to alloc nll output");
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
467 %(fail)s;
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
468 }
34
1b152f46ad0c consolidated code
bergstrj@iro.umontreal.ca
parents: 32
diff changeset
469 }
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
470 if ((NULL == %(am)s)
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
471 || (%(am)s->dimensions[0] != %(y_idx)s->dimensions[0]))
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
472 {
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
473 Py_XDECREF(%(am)s);
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
474 %(am)s = (PyArrayObject*) PyArray_SimpleNew(1, PyArray_DIMS(%(y_idx)s), type_num_%(y_idx)s);
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
475 if(!%(am)s)
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
476 {
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
477 PyErr_SetString(PyExc_MemoryError, "failed to alloc am output");
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
478 %(fail)s;
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
479 }
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
480 }
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
481 """,
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
482 begin_row_loop,
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
483 """
185
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
484 const %(y_idx_type)s y_i = ((%(y_idx_type)s*)(%(y_idx)s->data + %(y_idx)s->strides[0] * i))[0];
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
485 double* __restrict__ nll_i = (double*)(%(nll)s->data + %(nll)s->strides[0] * i);
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
486 %(am_type)s* __restrict__ am_i = (%(am_type)s*) (%(am)s->data + %(am)s->strides[0] * i);
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
487 """,
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
488 inside_row_loop,
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
489 """
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
490 nll_i[0] = - x_i[y_i*Sx]
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
491 - b_i[y_i*Sb]
447
0392b666320a fixed c typos, math error in nnet_ops.py
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 446
diff changeset
492 + row_max
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
493 + log(sum);
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
494 am_i[0] = row_max_j;
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
495 """,
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
496 end_row_loop)
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
497
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
498
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
499 def c_code(self, node, name, (x, b, y_idx), (nll, sm, am), sub):
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
500 y_idx_type = node.inputs[2].type.dtype_specs()[1]
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
501 am_type = y_idx_type
440
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
502 code_template = ''.join(self.c_code_template())
18dbc1c11647 Work on softmax operators
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 419
diff changeset
503 return code_template % dict(locals(), **sub)
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
504
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
505 class CrossentropySoftmax1HotWithBiasDx (theano.Op):
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
506 nin=3
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
507 nout=1
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
508 """Gradient wrt x of the CrossentropySoftmax1Hot Op"""
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
509 def __init__(self, **kwargs):
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
510 theano.Op.__init__(self,**kwargs)
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
511 def make_node(self, dy, sm, y_idx,**kwargs):
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
512 dy = tensor.as_tensor(dy)
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
513 sm = tensor.as_tensor(sm)
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
514 y_idx = tensor.as_tensor(y_idx)
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
515 return theano.Apply(self, [dy, sm, y_idx],[sm.type.make_result()])
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
516 def perform(self, node, input_storage, output_storage):
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
517 dy,sm,y_idx = input_storage
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
518 dx = numpy.zeros_like(sm)
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
519 for i in xrange(sm.shape[0]):
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
520 dx[i] = dy[i] * sm[i] #vector scale
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
521 dx[i, y_idx[i]] -= dy[i] #scalar decrement
117
3ef569b92fba ported nnet_ops to new theano
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 70
diff changeset
522 output_storage[0][0] = dx
30
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
523 def grad(self, *args):
bf0145fa73e8 added c implementation for CrossentropySoftmax1Hot
bergstrj@iro.umontreal.ca
parents: 25
diff changeset
524 raise NotImplementedError()
181
1b06bc2c3ca9 fixed c_code for the ops in nnet_ops.py
Olivier Breuleux <breuleuo@iro.umontreal.ca>
parents: 121
diff changeset
525 def c_code(self, node, name, (dnll, sm, y_idx), (dx,), sub):
185
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
526 y_idx_type = node.inputs[2].type.dtype_specs()[1]
32
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
527 return """
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
528
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
529 if ((%(dnll)s->descr->type_num != PyArray_DOUBLE)
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
530 || (%(sm)s->descr->type_num != PyArray_DOUBLE)
185
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
531 )
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
532 {
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
533 PyErr_SetString(PyExc_TypeError, "types should be float64, float64, int64");
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
534 %(fail)s;
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
535 }
185
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
536 if ((%(y_idx)s->descr->type_num != PyArray_INT64)
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
537 && (%(y_idx)s->descr->type_num != PyArray_INT32)
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
538 && (%(y_idx)s->descr->type_num != PyArray_INT16)
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
539 && (%(y_idx)s->descr->type_num != PyArray_INT8))
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
540 {
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
541 PyErr_SetString(PyExc_TypeError, "y_idx not int8, int16, int32, or int64");
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
542 %(fail)s;
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
543 }
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
544 if ((%(dnll)s->nd != 1)
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
545 || (%(sm)s->nd != 2)
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
546 || (%(y_idx)s->nd != 1))
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
547 {
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
548 PyErr_SetString(PyExc_ValueError, "rank error");
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
549 %(fail)s;
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
550 }
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
551 if ((%(dnll)s->dimensions[0] != %(sm)s->dimensions[0])
68
315eb36ff954 fixed typo in crossentropy_dx.c_code
bergstra@is23.m
parents: 67
diff changeset
552 || (%(dnll)s->dimensions[0] != %(y_idx)s->dimensions[0]))
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
553 {
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
554 PyErr_SetString(PyExc_ValueError, "dimension mismatch");
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
555 %(fail)s;
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
556 }
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
557 if ((NULL == %(dx)s)
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
558 || (%(dx)s->dimensions[0] != %(sm)s->dimensions[0])
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
559 || (%(dx)s->dimensions[1] != %(sm)s->dimensions[1]))
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
560 {
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
561 if (NULL != %(dx)s) Py_XDECREF(%(dx)s);
68
315eb36ff954 fixed typo in crossentropy_dx.c_code
bergstra@is23.m
parents: 67
diff changeset
562 %(dx)s = (PyArrayObject*)PyArray_SimpleNew(2, PyArray_DIMS(%(sm)s), type_num_%(sm)s);
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
563 if(!%(dx)s) {
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
564 PyErr_SetString(PyExc_MemoryError, "failed to alloc dx output");
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
565 %(fail)s
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
566 }
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
567 }
24
2e8be9f5412b added nnet_ops
bergstrj@iro.umontreal.ca
parents:
diff changeset
568
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
569 for (size_t i = 0; i < %(dx)s->dimensions[0]; ++i)
32
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
570 {
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
571 const double dnll_i = ((double*)(%(dnll)s->data + %(dnll)s->strides[0] * i))[0];
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
572
185
3d953844abd3 support for more int types in crossentropysoftmax1hot
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 184
diff changeset
573 const %(y_idx_type)s y_i = ((%(y_idx_type)s*)(%(y_idx)s->data + %(y_idx)s->strides[0] * i))[0];
32
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
574
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
575 const double* __restrict__ sm_i = (double*)(%(sm)s->data + %(sm)s->strides[0] * i);
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
576 npy_intp Ssm = %(sm)s->strides[1]/sizeof(double);
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
577
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
578 double* __restrict__ dx_i = (double*)(%(dx)s->data + %(dx)s->strides[0] * i);
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
579 npy_intp Sdx = %(dx)s->strides[1]/sizeof(double);
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
580
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
581 for (size_t j = 0; j < %(dx)s->dimensions[1]; ++j)
32
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
582 {
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
583 dx_i[j * Sdx] = dnll_i * sm_i[j * Ssm];
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
584 }
67
810a8e3c85e1 fixed horrible memory leak from crossentropy...
bergstra@is23.m
parents: 34
diff changeset
585 if (y_i >= %(dx)s->dimensions[1])
32
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
586 {
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
587 %(fail)s;
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
588 }
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
589 dx_i[y_i * Sdx] -= dnll_i;
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
590 }
039c0f249859 added C impl for softmax dx
bergstrj@iro.umontreal.ca
parents: 30
diff changeset
591 """ % dict(locals(), **sub)
69
8c2607f387e6 added softplus, elaborated sigmoid
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 68
diff changeset
592
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
593 crossentropy_softmax_argmax_1hot_with_bias = \
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
594 CrossentropySoftmaxArgmax1HotWithBias()
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
595
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
596 def crossentropy_softmax_1hot_with_bias(x, b, y_idx, **kwargs):
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
597 return crossentropy_softmax_argmax_1hot_with_bias(x, b, y_idx, **kwargs)[0:2]
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
598
70
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
599 def crossentropy_softmax_1hot(x, y_idx, **kwargs):
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
600 b = tensor.zeros_like(x[0,:])
76e5c0f37165 better docs & precondition testing for cross_entropy_softmax_1hot & friends
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 69
diff changeset
601 return crossentropy_softmax_1hot_with_bias(x, b, y_idx, **kwargs)
382
b4efd192d880 Moved xent loss to nnet_ups
Joseph Turian <turian@gmail.com>
parents: 381
diff changeset
602
446
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
603
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
604 class MultinomialCrossentropy1Hot(theano.Op):
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
605 pass
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
606
23960ee12b52 Add argmax as output of the big softmax-NLL thingy.
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 443
diff changeset
607
382
b4efd192d880 Moved xent loss to nnet_ups
Joseph Turian <turian@gmail.com>
parents: 381
diff changeset
608 def binary_crossentropy(output, target):
b4efd192d880 Moved xent loss to nnet_ups
Joseph Turian <turian@gmail.com>
parents: 381
diff changeset
609 """
b4efd192d880 Moved xent loss to nnet_ups
Joseph Turian <turian@gmail.com>
parents: 381
diff changeset
610 Compute the crossentropy of binary output wrt binary target.
b4efd192d880 Moved xent loss to nnet_ups
Joseph Turian <turian@gmail.com>
parents: 381
diff changeset
611 @note: We do not sum, crossentropy is computed by component.
b4efd192d880 Moved xent loss to nnet_ups
Joseph Turian <turian@gmail.com>
parents: 381
diff changeset
612 @todo: Rewrite as a scalar, and then broadcast to tensor.
448
0961d4b56ec5 Added some documentation
Joseph Turian <turian@gmail.com>
parents: 447
diff changeset
613 @todo: This is essentially duplicated as cost.cross_entropy
449
2bb67e978c28 updated doc
Joseph Turian <turian@gmail.com>
parents: 448
diff changeset
614 @warning: OUTPUT and TARGET are reversed in cost.cross_entropy
382
b4efd192d880 Moved xent loss to nnet_ups
Joseph Turian <turian@gmail.com>
parents: 381
diff changeset
615 """
383
344d1f874af7 Small fix
Joseph Turian <turian@gmail.com>
parents: 382
diff changeset
616 return -(target * tensor.log(output) + (1 - target) * tensor.log(1 - output))
419
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
617
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
618
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
619
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
620 class Prepend_scalar_constant_to_each_row(theano.Op):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
621 def __init__(self, val = 0):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
622 if isinstance(val, float):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
623 val = scalar.constant(val)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
624 self.val = val
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
625
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
626 def make_node(self, mat):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
627 #check type of input
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
628 if not isinstance(mat,theano.Result) or not mat.type==tensor.matrix().type:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
629 raise TypeError("Expected a matrix as input")
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
630 x = tensor.as_tensor(mat)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
631 y = tensor.as_tensor(self.val)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
632 if x.type.dtype != y.type.dtype:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
633 TypeError("the value to prepend don't have the same type as the matrix")
443
060c12314734 Hopefully last bugfix in Softmax
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 442
diff changeset
634
419
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
635 node = theano.Apply(op=self, inputs=[mat], outputs=[tensor.matrix()])
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
636 return node
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
637
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
638 def perform(self, node, (mat, ), (output, )):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
639 new_shape=(mat.shape[0],mat.shape[1]+1)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
640 if output[0] == None:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
641 output[0]=numpy.empty(new_shape,dtype=mat.dtype)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
642 out=output[0]
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
643 else:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
644 if output[0].shape!=new_shape:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
645 try:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
646 output[0].resize(new_shape)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
647 except:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
648 output[0]=numpy.empty(new_shape, dtype=mat.dtype)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
649 out=output[0]
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
650
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
651 out[:,0].fill(self.val.data)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
652 out[:,1:]=mat
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
653
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
654 def grad(self, (mat,), (goutput,)):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
655 return goutput[:,1:]
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
656
443
060c12314734 Hopefully last bugfix in Softmax
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 442
diff changeset
657 class Prepend_scalar_to_each_row(theano.Op):
419
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
658 def make_node(self, val, mat):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
659 #check type of input
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
660 if isinstance(val, float):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
661 val = scalar.constant(val)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
662 if not isinstance(mat,theano.Result) or not mat.type==tensor.matrix().type:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
663 raise TypeError("Expected a matrix as input")
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
664 x = tensor.as_tensor(mat)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
665 y = tensor.as_tensor(val)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
666 if x.type.dtype != y.type.dtype:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
667 TypeError("the value to prepend don't have the same type as the matrix")
443
060c12314734 Hopefully last bugfix in Softmax
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 442
diff changeset
668
419
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
669 node = theano.Apply(op=self, inputs=[val,mat], outputs=[tensor.matrix()])
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
670 return node
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
671
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
672 def perform(self, node, (val,mat), (output, )):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
673 new_shape=(mat.shape[0],mat.shape[1]+1)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
674 if output[0] == None:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
675 output[0]=numpy.empty(new_shape,dtype=mat.dtype)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
676 out=output[0]
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
677 else:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
678 if output[0].shape!=new_shape:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
679 try:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
680 output[0].resize(new_shape)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
681 except:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
682 output[0]=numpy.empty(new_shape, dtype=mat.dtype)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
683 out=output[0]
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
684 out[:,0].fill(val)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
685 out[:,1:]=mat
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
686
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
687 def grad(self, (val, mat), (goutput,)):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
688 return goutput[:,0], goutput[:,1:]
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
689
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
690 prepend_scalar_to_each_row = Prepend_scalar_to_each_row()
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
691 prepend_0_to_each_row = Prepend_scalar_constant_to_each_row(0.)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
692 prepend_1_to_each_row = Prepend_scalar_constant_to_each_row(1.)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
693
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
694 class solve(theano.Op):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
695 """
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
696 Find the solution to the linear equation Ax=b,
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
697 where A is a 2d matrix and b is a 1d or 2d matrix.
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
698 It use numpy.solve to find the solution.
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
699 """
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
700
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
701 def make_node(self, A, b):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
702 if not isinstance(A, theano.Result) or not A.type==tensor.matrix().type:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
703 raise TypeError("We expected that A had a matrix type")
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
704 if not isinstance(B, theano.Result) or not B.type==tensor.matrix().type:
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
705 raise TypeError("We expected that B had a matrix type")
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
706
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
707 node = theano.Apply(op=self, inputs=[A, B], outputs=[tensor.matrix()])
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
708 return node
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
709
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
710 def perform(self, node, (A, B), (output, )):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
711 ret=numpy.solve(A,B)
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
712 output[0]=ret
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
713
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
714 def grad(self, (theta, A, B), (gtheta,)):
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
715 raise NotImplementedError()
43d9aa93934e added other_ops.py to nnet_ops; added basic tests, no docs.
James Bergstra <bergstrj@iro.umontreal.ca>
parents: 383
diff changeset
716
443
060c12314734 Hopefully last bugfix in Softmax
Pascal Lamblin <lamblinp@iro.umontreal.ca>
parents: 442
diff changeset
717