diff deep/convolutional_dae/stacked_convolutional_dae.py @ 248:7e6fecabb656

Optimized the call of ConvOp by specifying additional parameters. Specified image shape of the da_conv layer.
author humel
date Tue, 16 Mar 2010 14:46:25 -0400
parents 4d109b648c31
children 1bf046c0c84a 3919c71e3091
line wrap: on
line diff
--- a/deep/convolutional_dae/stacked_convolutional_dae.py	Tue Mar 16 13:16:28 2010 -0400
+++ b/deep/convolutional_dae/stacked_convolutional_dae.py	Tue Mar 16 14:46:25 2010 -0400
@@ -14,6 +14,9 @@
 from ift6266 import datasets
 from ift6266.baseline.log_reg.log_reg import LogisticRegression
 
+batch_size = 100
+
+
 class SigmoidalLayer(object):
     def __init__(self, rng, input, n_in, n_out):
 
@@ -67,7 +70,7 @@
     self.tilde_x = theano_rng.binomial( self.x.shape, 1, 1 - corruption_level,dtype=theano.config.floatX) * self.x
 
     conv1_out = conv.conv2d(self.tilde_x, self.W, filter_shape=filter_shape,
-                            image_shape=image_shape, border_mode='valid')
+                            image_shape=image_shape, unroll_kern=4,unroll_batch=4, border_mode='valid')
 
     
     self.y = T.tanh(conv1_out + self.b.dimshuffle('x', 0, 'x', 'x'))
@@ -75,6 +78,7 @@
     
     da_filter_shape = [ filter_shape[1], filter_shape[0], filter_shape[2],\
                        filter_shape[3] ]
+    da_image_shape = [ image_shape[0], filter_shape[0], image_shape[2]-filter_shape[2]+1, image_shape[3]-filter_shape[3]+1 ]
     initial_W_prime =  numpy.asarray( numpy.random.uniform( \
               low = -numpy.sqrt(6./(fan_in+fan_out)), \
               high = numpy.sqrt(6./(fan_in+fan_out)), \
@@ -82,7 +86,9 @@
     self.W_prime = theano.shared(value = initial_W_prime, name = "W_prime")
 
     conv2_out = conv.conv2d(self.y, self.W_prime,
-                            filter_shape = da_filter_shape,
+                            filter_shape = da_filter_shape,\
+                            image_shape = da_image_shape, \
+                            unroll_kern=4,unroll_batch=4, \
                             border_mode='full')
 
     self.z =  (T.tanh(conv2_out + self.b_prime.dimshuffle('x', 0, 'x', 'x'))+center) / scale
@@ -107,7 +113,7 @@
         self.b = theano.shared(value=b_values)
  
         conv_out = conv.conv2d(input, self.W,
-                filter_shape=filter_shape, image_shape=image_shape)
+                filter_shape=filter_shape, image_shape=image_shape, unroll_kern=4,unroll_batch=4)
  
 
         fan_in = numpy.prod(filter_shape[1:])
@@ -214,12 +220,11 @@
 
 def sgd_optimization_mnist( learning_rate=0.1, pretraining_epochs = 1, \
                             pretrain_lr = 0.1, training_epochs = 1000, \
-                            kernels = [ [2,5,5] , [2,3,3] ], mlp_layers=[500], \
-                            corruption_levels = [ 0.2, 0.2, 0.2], \
+                            kernels = [ [4,5,5] , [4,3,3] ], mlp_layers=[500], \
+                            corruption_levels = [ 0.2, 0.2, 0.2], batch_size = batch_size, \
                             max_pool_layers = [ [2,2] , [2,2] ], \
                             dataset=datasets.nist_digits):
     
-    batch_size = 100 # size of the minibatch
  
     # allocate symbolic variables for the data
     index = T.lscalar() # index to a [mini]batch
@@ -231,15 +236,20 @@
     
     rng = numpy.random.RandomState(1234)
     conv_layers=[]
-    init_layer = [ [ kernels[0][0],1,kernels[0][1],kernels[0][2] ], None, max_pool_layers[0] ]
+    init_layer = [ [ kernels[0][0],1,kernels[0][1],kernels[0][2] ],\
+                   [ batch_size , 1, 32, 32 ],    
+                    max_pool_layers[0] ]
     conv_layers.append(init_layer)
+
     conv_n_out = (32-kernels[0][2]+1)/max_pool_layers[0][0]
 
     for i in range(1,len(kernels)):    
-        layer = [ [ kernels[i][0],kernels[i-1][0],kernels[i][1],kernels[i][2] ], None, max_pool_layers[i] ]
+        layer = [ [ kernels[i][0],kernels[i-1][0],kernels[i][1],kernels[i][2] ],\
+                  [ batch_size, kernels[i-1][0], conv_n_out,conv_n_out ],    
+                   max_pool_layers[i] ]
         conv_layers.append(layer)
         conv_n_out =  (conv_n_out - kernels[i][2]+1)/max_pool_layers[i][0]
-
+        print layer [1]
     network = SdA(input = layer0_input, n_ins_mlp = kernels[-1][0]*conv_n_out**2,
                   conv_hidden_layers_sizes = conv_layers,
                   mlp_hidden_layers_sizes = mlp_layers,