Mercurial > pylearn
comparison onehotop.py.scalar @ 356:18702ceb2096
Added more functions
author | Joseph Turian <turian@iro.umontreal.ca> |
---|---|
date | Thu, 19 Jun 2008 16:18:37 -0400 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
355:430c9e92cd23 | 356:18702ceb2096 |
---|---|
1 """ | |
2 One hot Op | |
3 """ | |
4 | |
5 #from theano import tensor | |
6 from theano.tensor import as_tensor, Tensor | |
7 #from theano import scalar | |
8 from theano.scalar import as_scalar | |
9 from theano.gof import op | |
10 from theano.gof.graph import Apply | |
11 | |
12 import numpy | |
13 | |
14 class OneHot(op.Op): | |
15 """ | |
16 Construct a one-hot vector, x out of y. | |
17 | |
18 @todo: Document inputs and outputs | |
19 @todo: Use 'bool' as output dtype? Or, at least 'int64' ? Not float64! | |
20 @todo: Use 'bool' as output dtype, not 'int64' ? | |
21 @todo: Allow this to operate on column vectors (Tensor) | |
22 @todo: Describe better. | |
23 @todo: What type is y? | |
24 @todo: What about operating on L{Scalar}s? | |
25 """ | |
26 | |
27 def make_node(self, x, y): | |
28 """ | |
29 @type x: Vector L{Tensor} of integers | |
30 @param x: The entries of the one-hot vector to be one. | |
31 @type y: Integer L{Scalar} | |
32 @param y: The length (#columns) of the one-hot vectors. | |
33 @return: A L{Tensor} of one-hot vectors | |
34 | |
35 @precondition: x < y for all entries of x | |
36 @todo: Check that x and y are int types | |
37 """ | |
38 #x = tensor.as_tensor(x) | |
39 #y = scalar.as_scalar(y) | |
40 x = as_tensor(x) | |
41 y = as_scalar(y) | |
42 #assert x.dtype[0:3] == "int" | |
43 #assert y.dtype[0:3] == "int" | |
44 inputs = [x, y] | |
45 ##outputs = [tensor.Tensor("int64", broadcastable=[False, False])] | |
46 #outputs = [tensor.Tensor("float64", broadcastable=[False, False])] | |
47 #outputs = [Tensor("int64", broadcastable=[False, False])] | |
48 outputs = [Tensor("float64", broadcastable=[False, False]).make_result()] | |
49 node = Apply(op = self, inputs = inputs, outputs = outputs) | |
50 return node | |
51 | |
52 def perform(self, node, (x, y), (out, )): | |
53 assert x.dtype == "int64" | |
54 assert type(y) == numpy.int64 | |
55 assert x.ndim == 1 | |
56 #out = numpy.zeros((x.shape[0], y), dtype="int64") | |
57 out[0] = numpy.zeros((x.shape[0], y), dtype="float64") | |
58 for c in range(x.shape[0]): | |
59 assert x[c] < y | |
60 out[0][c, x[c]] = 1 | |
61 | |
62 def grad(self, (x, y), (out_gradient, )): | |
63 return None, None | |
64 one_hot = OneHot() |