Mercurial > pylearn
diff mlp_factory_approach.py @ 299:eded3cb54930
small bug fixed
author | Thierry Bertin-Mahieux <bertinmt@iro.umontreal.ca> |
---|---|
date | Fri, 06 Jun 2008 17:58:45 -0400 |
parents | ae0a8345869b |
children | 6ead65d30f1e |
line wrap: on
line diff
--- a/mlp_factory_approach.py Fri Jun 06 17:55:14 2008 -0400 +++ b/mlp_factory_approach.py Fri Jun 06 17:58:45 2008 -0400 @@ -4,7 +4,8 @@ import theano from theano import tensor as T -from pylearn import dataset, nnet_ops, stopper, LookupList, filetensor +import dataset, nnet_ops, stopper, filetensor +from lookup_list import LookupList class AbstractFunction (Exception): pass @@ -54,7 +55,9 @@ return d[key] def update_minibatch(self, minibatch): - #assert isinstance(minibatch, LookupList) # why false??? + if not isinstance(minibatch, LookupList): + print type(minibatch) + assert isinstance(minibatch, LookupList) self.update_fn(minibatch['input'], minibatch['target'], *self.params) def update(self, dataset, @@ -216,6 +219,7 @@ l2coef = T.constant(l2coef_val) input = T.matrix() # n_examples x n_inputs target = T.ivector() # len: n_examples + #target = T.matrix() W2, b2 = T.matrix(), T.vector() W1, b1 = T.matrix(), T.vector() @@ -224,7 +228,7 @@ params = [W1, b1, W2, b2] activations = b2 + T.dot(hid, W2) - nll, predictions = nnet_ops.crossentropy_softmax_1hot(activations, target) + nll, predictions = nnet_ops.crossentropy_softmax_1hot(activations, target ) regularization = l2coef * T.sum(W2*W2) + hid_regularization output_class = T.argmax(activations,1) loss_01 = T.neq(output_class, target)