Mercurial > pylearn
comparison onehotop.py @ 356:18702ceb2096
Added more functions
author | Joseph Turian <turian@iro.umontreal.ca> |
---|---|
date | Thu, 19 Jun 2008 16:18:37 -0400 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
355:430c9e92cd23 | 356:18702ceb2096 |
---|---|
1 """ | |
2 One hot Op | |
3 """ | |
4 | |
5 #from theano import tensor | |
6 from theano.tensor import as_tensor, Tensor | |
7 from theano.gof import op | |
8 from theano.gof.graph import Apply | |
9 | |
10 import numpy | |
11 | |
12 class OneHot(op.Op): | |
13 """ | |
14 Construct a one-hot vector, x out of y. | |
15 | |
16 @todo: Document inputs and outputs | |
17 @todo: Use 'bool' as output dtype? Or, at least 'int64' ? Not float64! | |
18 @todo: Use 'bool' as output dtype, not 'int64' ? | |
19 @todo: Allow this to operate on column vectors (Tensor) | |
20 @todo: Describe better. | |
21 """ | |
22 | |
23 def make_node(self, x, y): | |
24 """ | |
25 @type x: Vector L{Tensor} of integers | |
26 @param x: The entries of the one-hot vector to be one. | |
27 @type y: Integer scalar L{Tensor} | |
28 @param y: The length (#columns) of the one-hot vectors. | |
29 @return: A L{Tensor} of one-hot vectors | |
30 | |
31 @precondition: x < y for all entries of x | |
32 @todo: Check that x and y are int types | |
33 """ | |
34 x = as_tensor(x) | |
35 y = as_tensor(y) | |
36 #assert x.dtype[0:3] == "int" | |
37 #assert y.dtype[0:3] == "int" | |
38 inputs = [x, y] | |
39 ##outputs = [tensor.Tensor("int64", broadcastable=[False, False])] | |
40 #outputs = [tensor.Tensor("float64", broadcastable=[False, False])] | |
41 #outputs = [Tensor("int64", broadcastable=[False, False])] | |
42 outputs = [Tensor("float64", broadcastable=[False, False]).make_result()] | |
43 node = Apply(op = self, inputs = inputs, outputs = outputs) | |
44 return node | |
45 | |
46 def perform(self, node, (x, y), (out, )): | |
47 assert x.dtype == "int64" or x.dtype == "int32" | |
48 assert x.ndim == 1 | |
49 assert y.dtype == "int64" or x.dtype == "int32" | |
50 assert y.ndim == 0 | |
51 out[0] = numpy.zeros((x.shape[0], y), dtype="float64") | |
52 for c in range(x.shape[0]): | |
53 assert x[c] < y | |
54 out[0][c, x[c]] = 1 | |
55 | |
56 def grad(self, (x, y), (out_gradient, )): | |
57 return None, None | |
58 one_hot = OneHot() |