changeset 783:f84e6f301a18

Merged
author Olivier Delalleau <delallea@iro>
date Fri, 26 Jun 2009 16:44:55 -0400
parents ba068f7d4d3e (current diff) b6670cb57101 (diff)
children ba65e95d1221
files
diffstat 1 files changed, 189 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- a/pylearn/sandbox/scan_inputs_groups.py	Fri Jun 26 09:55:32 2009 -0400
+++ b/pylearn/sandbox/scan_inputs_groups.py	Fri Jun 26 16:44:55 2009 -0400
@@ -657,6 +657,195 @@
     def grad(self, inputs, (out_grad, mask_grad, )):
         return [out_grad]
 
+#def c():
+    def c_no_compile_args(self):
+#-ffast-math and "-ffinite-math-only" SHOULD NOT BE ACTIVATED as they make isnan don't work!
+        return ["-ffast-math", "-ffinite-math-only"]
+
+    def c_headers(self):
+        return ['"Python.h"', '"numpy/noprefix.h"', '<math.h>']
+
+    def c_support_code(self):
+        return """                      
+using namespace std;
+"""
+
+    def c_code(self, node, name, (input,), (value, mask), sub):
+        if self.fill_with==None:
+            print "OPTIMISATION WARNING: FillMissing don't implement this case in c. We don't support fill_with=None in c. We revert to python version", self.fill_with_is_array, node.inputs[0].ndim
+            return super(FillMissing,self).c_code(node, name, (input,),(value,mask), sub)
+        if (self.fill_with_is_array and not node.inputs[0].ndim in [1,2]) or (not node.inputs[0].ndim in [1,2,3]):
+            print "OPTIMISATION WARNING: FillMissing don't implement this case in c. We revert to python version", self.fill_with_is_array, node.inputs[0].ndim
+            return super(FillMissing,self).c_code(node, name, (input,),(value,mask), sub)
+            
+
+        d=locals()
+        d.update(sub)
+        d["self.fill_with_is_array"] = 1 if self.fill_with_is_array else 0
+        d["self.fill_with"] = self.fill_with
+        if self.fill_with_is_array:
+            d["self.fill_with_length"]=str(self.fill_with.size)
+            s=""
+            for i in self.fill_with.flatten():
+                s+=","+str(i)
+            d["self.fill_with_data"]=s[1:]
+            d["self.fill_with.ndim"]=str(self.fill_with.ndim)
+        else:
+            d["self.fill_with_length"]=str(1)
+            d["self.fill_with_data"]=str(self.fill_with)
+            d["self.fill_with.ndim"]=0
+        if node.inputs[0].type.dtype=="float32": d["type"]="float"
+        elif node.inputs[0].type.dtype=="float64": d["type"]="double"
+        else: raise Exception("Type %s not implemented "%node.inputs[0].type.dtype)
+                              
+        return """
+int typenum;
+PyArrayObject* input = %(input)s, *value = %(value)s, *mask = %(mask)s;
+%(type)s fill_with[%(self.fill_with_length)s] = {%(self.fill_with_data)s};
+
+if(!PyArray_Check(input)){
+  PyErr_SetString(PyExc_ValueError, "input must be an ndarray");
+  %(fail)s;
+}
+
+typenum = PyArray_ObjectType((PyObject*)input, 0);
+if(!value || !PyArray_SAMESHAPE(value,input)){
+  Py_XDECREF(value);
+  value = (PyArrayObject*) PyArray_ZEROS(input->nd, input->dimensions, typenum,0);
+  %(value)s = value;
+}
+
+if (!mask || !PyArray_SAMESHAPE(mask,input)){
+  Py_XDECREF(mask); 
+  mask = (PyArrayObject*) PyArray_ZEROS(input->nd, input->dimensions, typenum,0);
+  %(mask)s = mask;
+}
+
+if(!PyArray_ISCONTIGUOUS(input)){
+  cout<<"OPTIMISATION WARNING: in FillMissing, the input is not contiguous in memory, so we create a new version that is contiguous. This can be optimized by using directly the data."<<endl;
+  input = PyArray_GETCONTIGUOUS((PyArrayObject*)input);
+  if(!PyArray_ISCONTIGUOUS(input)){
+    PyErr_SetString(PyExc_ValueError, "input is not continuous in memory");
+    %(fail)s;
+  }
+}
+if(!PyArray_ISCONTIGUOUS(value)){
+  cout<<"OPTIMISATION WARNING: in FillMissing, the value is not contiguous in memory, so we create a new version that is contiguous. This can be optimized by using directly the data."<<endl;
+  value = PyArray_GETCONTIGUOUS((PyArrayObject*)value);
+  if(!PyArray_ISCONTIGUOUS(value)){
+    PyErr_SetString(PyExc_ValueError, "value is not continuous in memory");
+    %(fail)s;
+  }
+}
+if(!PyArray_ISCONTIGUOUS(mask)){
+  cout<<"OPTIMISATION WARNING: in FillMissing, the mask is not contiguous in memory, so we create a new version that is contiguous. This can be optimized by using directly the data."<<endl;
+  mask = PyArray_GETCONTIGUOUS((PyArrayObject*)mask);
+  if(!PyArray_ISCONTIGUOUS(mask)){
+    PyErr_SetString(PyExc_ValueError, "mask is not continuous in memory");
+    %(fail)s;
+  }
+}
+
+assert(input->nd==value->nd==mask->nd);
+#if %(self.fill_with_is_array)s
+  if(input->nd==1){
+    %(type)s* value_  = (%(type)s*)(value->data);
+    %(type)s* mask_   = (%(type)s*)(mask->data);
+    %(type)s* input_ = (%(type)s*)(input->data);
+    for(int i=0;i<input->dimensions[0];i++){
+      if(isnan(input_[i])){
+        value_[i]=fill_with[i];
+        mask_[i]=0;
+      }else{
+        value_[i]=input_[i];
+        mask_[i]=1;
+
+      }
+    }
+  }else if(input->nd==2 && %(self.fill_with.ndim)s==1){
+    for(int i=0; i<input->dimensions[0];i++){
+      %(type)s* value_  = (%(type)s*) PyArray_GETPTR2(value,i,0);
+      %(type)s* mask_   = (%(type)s*) PyArray_GETPTR2(mask,i,0);
+      %(type)s* input_ = (%(type)s*) PyArray_GETPTR2(input,i,0);
+      for(int j=0; j<input->dimensions[1];j++){
+        if(isnan(input_[j])){
+          value_[j]=fill_with[j];
+          mask_[j]=0;
+        }else{
+          value_[j]=input_[j];
+          mask_[j]=1;
+        }
+      }
+    }
+  }else{//not implemented!
+//SHOULD not happen as c_code should revert to the python  version in that case
+    std:stringstream temp;
+    temp << "In FillMissing, we try to fill with an array and the input ndim is implemented only for 1 and 2. This case is not implemented."<<endl;
+    temp << " ndim="<<input->nd<<endl;;
+    std::string param = temp.str();
+    PyErr_SetString(PyExc_ValueError, param.c_str());
+    %(fail)s
+  }
+#else
+//we fill with a scalar
+  if(input->nd==1){
+    %(type)s* value_  = (%(type)s*)(value->data);
+    %(type)s* mask_   = (%(type)s*)(mask->data);
+    %(type)s* input_ = (%(type)s*)(input->data);
+    for(int i=0;i<input->dimensions[0];i++){
+      if(isnan(input_[i])){
+        value_[i]=%(self.fill_with)s;
+        mask_[i]=0;
+      }else{
+        value_[i]=input_[i];
+        mask_[i]=1;
+
+      }
+    }
+  }else if(input->nd==2){
+    for(int i=0;i<input->dimensions[0];i++){
+      %(type)s* value_  = (%(type)s*) PyArray_GETPTR2(value,i,0);
+      %(type)s* mask_   = (%(type)s*) PyArray_GETPTR2(mask,i,0);
+      %(type)s* input_ = (%(type)s*) PyArray_GETPTR2(input,i,0);
+      for(int j=0;j<input->dimensions[1];j++){
+        if(isnan(input_[j])){
+          value_[j]=%(self.fill_with)s;
+          mask_[j]=0;
+        }else{
+          value_[j]=input_[j];
+          mask_[j]=1;
+        }
+      }
+    }
+  }else if(input->nd==3){
+    for(int i=0;i<input->dimensions[0];i++){
+      for(int j=0;j<input->dimensions[1];j++){
+        %(type)s* value_  = (%(type)s*) PyArray_GETPTR3(value,i,j,0);
+        %(type)s* mask_   = (%(type)s*) PyArray_GETPTR3(mask,i,j,0);
+        %(type)s* input_ = (%(type)s*) PyArray_GETPTR3(input,i,j,0);
+        for(int k=0;k<input->dimensions[2];k++){
+          if(isnan(input_[k])){
+            value_[k]=%(self.fill_with)s;
+            mask_[k]=0;
+          }else{
+            value_[k]=input_[k];
+            mask_[k]=1;
+          }
+        }
+      }
+    }
+  }else{//not implemented!
+//SHOULD not happen as c_code should revert to the python  version in that case
+    std:stringstream temp;
+    temp << "In FillMissing, we try to fill with a constant and the input ndim is implemented only for 1, 2 and 3.";
+    temp << " ndim="<<input->nd<<endl;;
+    std::string param = temp.str();
+    PyErr_SetString(PyExc_ValueError, param.c_str());
+    %(fail)s
+  }
+#endif
+
+"""%d
 fill_missing_with_zeros = FillMissing(0)
 
 class MaskGradient(Op):