]> SALOME platform Git repositories - tools/medcoupling.git/commitdiff
Salome HOME
[DEC] Enhance ctors on Python side to accept mpi4py communicators
authorabn <adrien.bruneton@cea.fr>
Fri, 12 May 2023 13:10:52 +0000 (15:10 +0200)
committerabn <adrien.bruneton@cea.fr>
Mon, 15 May 2023 14:30:38 +0000 (16:30 +0200)
+ factory pattern with 'New' static method
+ can use 'MPI._addressof(<my_communicator>)' in direct constructors

CMakeLists.txt
doc/developer/doxygen/doxfiles/reference/misc/swig_mpi.dox
resources/dev/mc_suppr_valgrind
src/MEDCoupling_Swig/CMakeLists.txt
src/ParaMEDMEM/InterpKernelDEC.hxx
src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i
src/ParaMEDMEM_Swig/test_InterpKernelDEC.py
src/ParaMEDMEM_Swig/test_OverlapDEC.py

index f130ad150c9a29b58633af258113af565d75a12f..952609497182e0fc73b61d86e526ea2a8e38e860 100644 (file)
@@ -231,6 +231,7 @@ IF(MEDCOUPLING_BUILD_DOC)
   SALOME_LOG_OPTIONAL_PACKAGE(Sphinx MEDCOUPLING_BUILD_DOC)
 ENDIF(MEDCOUPLING_BUILD_DOC)
 
+
 # Detection report
 SALOME_PACKAGE_REPORT_AND_CHECK()
 
index 84004333ea28ac4a00498abaaa968fc69f449755..a11cc962b0bdddc23ab1cdd2d8bd628d5fd636ec 100644 (file)
@@ -21,4 +21,11 @@ void method_taking_a_MPI_Comm(MPI_Comm mpicomm);
 
 where the path to mpi4py.i needs to be adapted to your configuration.
 
+In MEDCoupling DEC APIs (InterpKernelDEC and OverlapDEC constructors), the Python interface deals with that using the following trick to get the C++ address of the MPI communicator:
+
+\code{.py}
+from mpi4py import MPI
+MPI._addressof(mpicomm) # returns the C++ address of the MPI communicator
+\endcode
+
 */
index 7f536fdc9740b57075f08d1020230b938ddd009b..9609c3887585008d015777a4d016914c2f4b5a51 100644 (file)
    fun:*PyInit*
 }
 
-
+{
+   <raise_excep>
+   Memcheck:Leak
+   fun:*alloc
+   ...
+   fun:_dl_catch_exception
+}
 
index 125dda076ed3ee61f812049677ad1805cad49821..387a34a08bf834794e7f46271cf691eb0c3d9637 100644 (file)
@@ -37,7 +37,7 @@ ENDIF()
 SET(SWIG_MODULE_MEDCoupling_EXTRA_FLAGS "${NUMPY_DEFINITIONS};${SCIPY_DEFINITIONS}")
 IF(MEDCOUPLING_USE_64BIT_IDS)
   STRING(APPEND SWIG_MODULE_MEDCoupling_EXTRA_FLAGS ";-DMEDCOUPLING_USE_64BIT_IDS")
-ENDIF(MEDCOUPLING_USE_64BIT_IDS)
+ENDIF()
 
 SET (MEDCoupling_SWIG_DPYS_FILES
     MEDCouplingCommon.i
index 95c40f705ed23592f0a678e0d1209e9e66c61aea..f4b156329c65bd88e52e5232fa57591d62deab31 100644 (file)
@@ -130,8 +130,7 @@ namespace MEDCoupling
   public:  
     InterpKernelDEC();
     InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
-    InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids,
-                    const MPI_Comm& world_comm=MPI_COMM_WORLD);
+    InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD);
     virtual ~InterpKernelDEC();
     void release();
 
index 290da2a8a008e39bbe4787b4bec22efad6ced024..d47b8bf4806da191a999fe116b24c79fa9d93958 100644 (file)
@@ -54,9 +54,7 @@ using namespace ICoCo;
 %include "ComponentTopology.hxx"
 %include "DEC.hxx"
 %include "DisjointDEC.hxx"
-%include "InterpKernelDEC.hxx"
 %include "StructuredCoincidentDEC.hxx"
-%include "OverlapDEC.hxx"
 
 %newobject MEDCoupling::ParaUMesh::New;
 %newobject MEDCoupling::ParaUMesh::getMesh;
@@ -75,6 +73,10 @@ using namespace ICoCo;
 %newobject MEDCoupling::ParaSkyLineArray::getSkyLineArray;
 %newobject MEDCoupling::ParaSkyLineArray::getGlobalIdsArray;
 
+%newobject MEDCoupling::InterpKernelDEC::_NewWithPG_internal;
+%newobject MEDCoupling::InterpKernelDEC::_NewWithComm_internal;
+%newobject MEDCoupling::OverlapDEC::_NewWithComm_internal;
+
 %feature("unref") ParaSkyLineArray "$this->decrRef();"
 %feature("unref") ParaUMesh "$this->decrRef();"
 %feature("unref") ParaDataArrayInt32 "$this->decrRef();"
@@ -274,16 +276,97 @@ namespace MEDCoupling
       }
     }
   };
-}
 
-/* This object can be used only if MED_ENABLE_FVM is defined*/
-#ifdef MED_ENABLE_FVM
-class NonCoincidentDEC : public DEC
-{
-public:
-  NonCoincidentDEC(ProcessorGroup& source, ProcessorGroup& target);
-};
-#endif
+  /* This object can be used only if MED_ENABLE_FVM is defined*/
+  #ifdef MED_ENABLE_FVM
+  class NonCoincidentDEC : public DEC
+  {
+  public:
+    NonCoincidentDEC(ProcessorGroup& source, ProcessorGroup& target);
+  };
+  #endif
+
+  class InterpKernelDEC : public DisjointDEC, public INTERP_KERNEL::InterpolationOptions
+  {
+    public:
+      InterpKernelDEC();
+      InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
+      InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids); // hide last optional parameter!
+      virtual ~InterpKernelDEC();
+      void release();
+
+      void synchronize();
+      void recvData();
+      void recvData(double time);
+      void sendData();
+      void sendData(double time , double deltatime);
+      void prepareSourceDE();
+      void prepareTargetDE();
+
+      %extend {
+        // Provides a direct ctor for which the communicator can be passed with "MPI._addressof(the_com)":
+        InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids, long long comm_ptr)
+        {
+            return new InterpKernelDEC(src_ids, trg_ids, *((MPI_Comm*)comm_ptr));
+        }
+
+        // This one should really not be called directly by the user since it still has an interface with a pointer to MPI_Comm 
+        // which Swig doesn't handle nicely.
+        // It is just here to provide a constructor taking a **pointer** to a comm - See pythoncode below.
+        static InterpKernelDEC* _NewWithPG_internal(ProcessorGroup& source_group, ProcessorGroup& target_group)
+        {
+          return new InterpKernelDEC(source_group,target_group);
+        }
+
+        static InterpKernelDEC* _NewWithComm_internal(const std::set<int>& src_ids, const std::set<int>& trg_ids, long long another_comm)
+        {
+          return new InterpKernelDEC(src_ids,trg_ids, *(MPI_Comm*)another_comm); // I know, ugly cast ...
+        }
+      }
+  };
+
+  class OverlapDEC : public DEC, public INTERP_KERNEL::InterpolationOptions
+  {
+      public:
+        OverlapDEC(const std::set<int>& procIds);  // hide optional param comm
+        virtual ~OverlapDEC();
+        void release();
+
+        void sendRecvData(bool way=true);
+        void sendData();
+        void recvData();
+        void synchronize();
+        void attachSourceLocalField(ParaFIELD *field, bool ownPt=false);
+        void attachTargetLocalField(ParaFIELD *field, bool ownPt=false);
+        void attachSourceLocalField(MEDCouplingFieldDouble *field);
+        void attachTargetLocalField(MEDCouplingFieldDouble *field);
+        void attachSourceLocalField(ICoCo::MEDDoubleField *field);
+        void attachTargetLocalField(ICoCo::MEDDoubleField *field);
+        ProcessorGroup *getGroup();
+        bool isInGroup() const;
+
+        void setDefaultValue(double val);
+        void setWorkSharingAlgo(int method);
+
+        void debugPrintWorkSharing(std::ostream & ostr) const;
+
+        %extend {
+          OverlapDEC(const std::set<int>& ids, long long comm_ptr)
+          {
+             return new OverlapDEC(ids, *((MPI_Comm*)comm_ptr));
+          }
+
+          // This one should really not be called directly by the user since it still has an interface with a pointer to MPI_Comm 
+          // which Swig doesn't handle nicely.
+          // It is just here to provide a constructor taking a **pointer** to a comm - See pythoncode below.
+          static OverlapDEC* _NewWithComm_internal(const std::set<int>& ids, long long another_comm)
+          {
+            return new OverlapDEC(ids, *(MPI_Comm*)another_comm); // I know, ugly cast ...
+          }
+        }
+   };
+
+} // end namespace MEDCoupling
 
 %extend MEDCoupling::ParaMESH
 {
@@ -324,3 +407,54 @@ if MEDCouplingUse64BitIDs():
 else:
   ParaDataArrayInt = ParaDataArrayInt32
 %}
+
+%pythoncode %{
+
+# And here we use mpi4py ability to provide its internal (C++) pointer to the communicator:
+# NB: doing a proper typemap from MPI_Comm from Python to C++ requires the inclusion of mpi4py headers and .i file ... an extra dependency ...
+def _IKDEC_WithComm_internal(src_procs, tgt_procs, mpicomm=None):
+    from mpi4py import MPI
+    # Check iterable:
+    try:
+        s, t = [el for el in src_procs], [el for el in tgt_procs]
+    except:
+        s, t = None, None
+    msg =  "InterpKernelDEC: invalid type in ctor arguments! Possible signatures are:\n"
+    msg += "   - InterpKernelDEC(ProcessorGroup, ProcessorGroup)\n"
+    msg += "   - InterpKernelDEC(<iterable>, <iterable>)\n"
+    msg += "   - InterpKernelDEC(<iterable>, <iterable>, MPI_Comm*) : WARNING here the address of the communicator should be passed with MPI._addressof(the_com)\n"
+    msg += "   - InterpKernelDEC.New(ProcessorGroup, ProcessorGroup)\n"
+    msg += "   - InterpKernelDEC.New(<iterable>, <iterable>)\n"
+    msg += "   - InterpKernelDEC.New(<iterable>, <iterable>, MPI_Comm)\n"
+    if mpicomm is None:
+        if isinstance(src_procs, ProcessorGroup) and isinstance(tgt_procs, ProcessorGroup):
+            return InterpKernelDEC._NewWithPG_internal(src_procs, tgt_procs)
+        elif not s is None:  # iterable
+            return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(MPI.COMM_WORLD))
+        else:
+            raise InterpKernelException(msg)
+    else:
+        if s is None: raise InterpKernelException(msg)  # must be iterable
+        return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(mpicomm))
+
+def _ODEC_WithComm_internal(procs, mpicomm=None):
+    from mpi4py import MPI
+    # Check iterable:
+    try:
+        g = [el for el in procs]
+    except:
+        msg =  "OverlapDEC: invalid type in ctor arguments! Possible signatures are:\n"
+        msg += "   - OverlapDEC.New(<iterable>)\n"
+        msg += "   - OverlapDEC.New(<iterable>, MPI_Comm)\n"
+        msg += "   - OverlapDEC(<iterable>)\n"
+        msg += "   - OverlapDEC(<iterable>, MPI_Comm*) : WARNING here the address of the communicator should be passed with MPI._addressof(the_com)\n"
+        raise InterpKernelException(msg)
+    if mpicomm is None:
+        return OverlapDEC(g)
+    else:
+        return OverlapDEC._NewWithComm_internal(g, MPI._addressof(mpicomm))
+
+InterpKernelDEC.New = _IKDEC_WithComm_internal
+OverlapDEC.New = _ODEC_WithComm_internal
+
+%}
index ef68d76a063972e6f1559b830f0ad008128ed6da..fe30d14cbfb10331e35935a0fcefb3c1f74fa189 100755 (executable)
@@ -82,6 +82,37 @@ class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
         fld.setMesh(sub_m)
         return sub_m, fld
 
+    def testInterpKernelDEC_ctor(self):
+        """ Test the various Python ctors """
+        size = MPI.COMM_WORLD.size
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+        # Define two processor groups
+        nproc_source = 2
+        l1, l2 = range(nproc_source), range(size - nproc_source, size)
+        # With 2 iterables:
+        i1 = InterpKernelDEC.New(l1, l2)
+        # Should also work directly:
+        i2 = InterpKernelDEC(l1, l2)
+        # With 2 proc groups:
+        interface = CommInterface()
+        source_group = MPIProcessorGroup(interface, list(l1))
+        target_group = MPIProcessorGroup(interface, list(l2))
+        i3 = InterpKernelDEC.New(source_group, target_group)
+        # Should also work directly:
+        i4 = InterpKernelDEC(source_group, target_group)
+        # With 2 iterables and a custom comm:
+        i5 = InterpKernelDEC.New(l1, l2, MPI.COMM_WORLD)
+        # Work directly with the **hack**
+        i6 = InterpKernelDEC(l1, l2, MPI._addressof(MPI.COMM_WORLD))
+        # Should fail with 2 proc groups **and** a communicator
+        self.assertRaises(InterpKernelException, InterpKernelDEC.New, source_group, target_group, MPI.COMM_WORLD)
+        self.assertRaises(NotImplementedError, InterpKernelDEC, source_group, target_group, MPI.COMM_WORLD)
+        i6.release(); i5.release(); i4.release(); i3.release(); i2.release(); i1.release()
+        source_group.release()
+        target_group.release()
+
     @WriteInTmpDir
     def testInterpKernelDEC_2D_py_1(self):
         """ This test illustrates a basic use of the InterpKernelDEC.
index 48df8f4dc17051c1b63cdb467b2786ce774abdaf..778f40b03f3d71e63b1369c95f94ded6b71362a8 100755 (executable)
@@ -88,6 +88,25 @@ class ParaMEDMEM_O_DEC_Tests(unittest.TestCase):
         fld.setMesh(sub_m)
         return sub_m, fld
 
+    def testOverlapDEC_ctor(self):
+        """ Test the various Python ctors """
+        size = MPI.COMM_WORLD.size
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+        # Define processor group
+        proc_group = list(range(size))
+        # With 2 iterables:
+        o1 = OverlapDEC.New(proc_group)
+        # Should also work directly:
+        o2 = OverlapDEC(proc_group)
+        # With an iterable and a custom comm:
+        o3 = OverlapDEC.New(proc_group, MPI.COMM_WORLD)
+        # Also work directly with the **hack** on the comm:
+        o4 = OverlapDEC(proc_group, MPI._addressof(MPI.COMM_WORLD))
+        self.assertRaises(NotImplementedError, OverlapDEC, proc_group, MPI.COMM_WORLD)
+        o4.release(); o3.release(); o2.release(); o1.release()
+
     @WriteInTmpDir
     def testOverlapDEC_2D_py_1(self):
         """ The main method of the test """