diff mlp.py @ 178:4090779e39a9

merged
author James Bergstra <bergstrj@iro.umontreal.ca>
date Tue, 13 May 2008 15:12:20 -0400
parents ae5651a3696b
children 9911d2cc3c01
line wrap: on
line diff
--- a/mlp.py	Tue May 13 15:11:47 2008 -0400
+++ b/mlp.py	Tue May 13 15:12:20 2008 -0400
@@ -10,6 +10,21 @@
 from nnet_ops import *
 import math
 
+def sum_l2_cost(*params):
+    p = params[0]
+    rval = t.sum(p*p)
+    for p in params[1:]:
+        rval = rval + t.sum(p*p)
+    return rval
+
+def activation(w, b, v, c, x):
+    return t.dot(t.tanh(t.dot(x, w) + b), v) + c
+def nll(w, b, v, c, x, y):
+    return  crossentropy_softmax_1hot(prediction(w, b, v, c, x), y)[0]
+def output(w, b, v, c, x, y):
+    return  crossentropy_softmax_1hot(prediction(w, b, v, c, x), y)[1]
+
+
 
 class OneHiddenLayerNNetClassifier(OnlineGradientTLearner):
     """
@@ -67,7 +82,6 @@
        - 'regularization_term'
 
     """
-
     def __init__(self,n_hidden,n_classes,learning_rate,max_n_epochs,L2_regularizer=0,init_range=1.,n_inputs=None,minibatch_size=None):
         self._n_inputs = n_inputs
         self._n_outputs = n_classes
@@ -142,6 +156,25 @@
         self._n_epochs +=1
         return self._n_epochs>=self._max_n_epochs
 
+    def updateMinibatch(self,minibatch):
+        # make sure all required fields are allocated and initialized
+        self.allocate(minibatch)
+        input_attributes = self.names2attributes(self.updateMinibatchInputAttributes())
+        input_fields = minibatch(*self.updateMinibatchInputFields())
+        print 'input attributes', input_attributes
+        print 'input fields', input_fields
+        results = self.update_minibatch_function(*(input_attributes+input_fields))
+        print 'output attributes', self.updateMinibatchOutputAttributes()
+        print 'results', results
+        self.setAttributes(self.updateMinibatchOutputAttributes(),
+                           results)
+
+        if 0:
+            print 'n0', self.names2OpResults(self.updateMinibatchOutputAttributes()+ self.updateMinibatchInputFields())
+            print 'n1', self.names2OpResults(self.updateMinibatchOutputAttributes())
+            print 'n2', self.names2OpResults(self.updateEndInputAttributes())
+            print 'n3', self.names2OpResults(self.updateEndOutputAttributes())
+
 class MLP(MinibatchUpdatesTLearner):
     """
     Implement a feedforward multi-layer perceptron, with or without L1 and/or L2 regularization.