]> SALOME platform Git repositories - tools/medcoupling.git/commitdiff
Salome HOME
should be the good one!
authorabn <adrien.bruneton@cea.fr>
Mon, 15 May 2023 08:24:51 +0000 (10:24 +0200)
committerabn <adrien.bruneton@cea.fr>
Mon, 15 May 2023 08:24:51 +0000 (10:24 +0200)
doc/developer/doxygen/doxfiles/reference/misc/swig_mpi.dox
resources/dev/mc_suppr_valgrind
src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i
src/ParaMEDMEM_Swig/test_InterpKernelDEC.py
src/ParaMEDMEM_Swig/test_OverlapDEC.py

index dc8898b455d9a1094eb0678bd5f97b35c53cee44..a11cc962b0bdddc23ab1cdd2d8bd628d5fd636ec 100644 (file)
@@ -21,11 +21,11 @@ void method_taking_a_MPI_Comm(MPI_Comm mpicomm);
 
 where the path to mpi4py.i needs to be adapted to your configuration.
 
-In MEDCoupling DEC APIs (InterpKernelDEC and OverlapDEC constructors), the Python interface deals with that using the following trick to get the C++ adress of the MPI communicator:
+In MEDCoupling DEC APIs (InterpKernelDEC and OverlapDEC constructors), the Python interface deals with that using the following trick to get the C++ address of the MPI communicator:
 
 \code{.py}
 from mpi4py import MPI
-MPI._addressof(mpicomm) # returns the C++ adress of the MPI communicator
+MPI._addressof(mpicomm) # returns the C++ address of the MPI communicator
 \endcode
 
 */
index 7f536fdc9740b57075f08d1020230b938ddd009b..9609c3887585008d015777a4d016914c2f4b5a51 100644 (file)
    fun:*PyInit*
 }
 
-
+{
+   <raise_excep>
+   Memcheck:Leak
+   fun:*alloc
+   ...
+   fun:_dl_catch_exception
+}
 
index 50c42b136e0d43901a3deeb99f5f100622f2eda0..87fe9941e5e39baab23f3338cbbd1ea10b69eac5 100644 (file)
@@ -79,6 +79,7 @@ using namespace ICoCo;
 %newobject MEDCoupling::ParaSkyLineArray::getSkyLineArray;
 %newobject MEDCoupling::ParaSkyLineArray::getGlobalIdsArray;
 
+%newobject MEDCoupling::InterpKernelDEC::_NewWithPG_internal;
 %newobject MEDCoupling::InterpKernelDEC::_NewWithComm_internal;
 %newobject MEDCoupling::OverlapDEC::_NewWithComm_internal;
 
@@ -311,6 +312,11 @@ namespace MEDCoupling
         // This one should really not be called directly by the user since it still has an interface with a pointer to MPI_Comm 
         // which Swig doesn't handle nicely.
         // It is just here to provide a constructor taking a **pointer** to a comm - See pythoncode below.
+        static InterpKernelDEC* _NewWithPG_internal(ProcessorGroup& source_group, ProcessorGroup& target_group)
+        {
+          return new InterpKernelDEC(source_group,target_group);
+        }
+        
         static InterpKernelDEC* _NewWithComm_internal(const std::set<int>& src_ids, const std::set<int>& trg_ids, long another_comm)
         {
           return new InterpKernelDEC(src_ids,trg_ids, *(MPI_Comm*)another_comm); // I know, ugly cast ...
@@ -399,15 +405,44 @@ else:
 %pythoncode %{
 
 # And here we use mpi4py ability to provide its internal (C++) pointer to the communicator:
-def _InterpKernelDEC_WithComm_internal(src_procs, tgt_procs, mpicomm):
-  from mpi4py import MPI
-  return InterpKernelDEC._NewWithComm_internal(src_procs, tgt_procs, MPI._addressof(mpicomm))
-
-def _OverlapDEC_WithComm_internal(procs, mpicomm):
-  from mpi4py import MPI
-  return OverlapDEC._NewWithComm_internal(procs, MPI._addressof(mpicomm))
-
-InterpKernelDEC.NewWithCustomComm = _InterpKernelDEC_WithComm_internal
-OverlapDEC.NewWithCustomComm = _OverlapDEC_WithComm_internal
+def _IKDEC_WithComm_internal(src_procs, tgt_procs, mpicomm=None):
+    from mpi4py import MPI
+    # Check iterable:
+    try:
+        s, t = [el for el in src_procs], [el for el in tgt_procs]
+    except:
+        s, t = None, None
+    msg =  "InterpKernelDEC: invalid type in ctor arguments! Possible signatures are:\n"
+    msg += "   - InterpKernelDEC.New(ProcessorGroup, ProcessorGroup)\n"
+    msg += "   - InterpKernelDEC.New(<iterable>, <iterable>)\n"
+    msg += "   - InterpKernelDEC.New(<iterable>, <iterable>, MPI_Comm)\n"
+    if mpicomm is None:
+        if isinstance(src_procs, ProcessorGroup) and isinstance(tgt_procs, ProcessorGroup):
+            return InterpKernelDEC._NewWithPG_internal(src_procs, tgt_procs)
+        elif not s is None:  # iterable
+            return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(MPI.COMM_WORLD))
+        else:
+            raise InterpKernelException(msg)
+    else:
+        if s is None: raise InterpKernelException(msg)  # must be iterable
+        return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(mpicomm))
+
+def _ODEC_WithComm_internal(procs, mpicomm=None):
+    from mpi4py import MPI
+    # Check iterable:
+    try:
+        g = [el for el in procs]
+    except:
+        msg =  "OverlapDEC: invalid type in ctor arguments! Possible signatures are:\n"
+        msg += "   - OverlapDEC.New(<iterable>)\n"
+        msg += "   - OverlapDEC.New(<iterable>, MPI_Comm)\n"
+        raise InterpKernelException(msg)
+    if mpicomm is None:
+        return OverlapDEC(g)
+    else:
+        return OverlapDEC._NewWithComm_internal(g, MPI._addressof(mpicomm))
+
+InterpKernelDEC.New = _IKDEC_WithComm_internal
+OverlapDEC.New = _ODEC_WithComm_internal
 
 %}
index 79f8686b4698b6a022d9fcdca164ad52eb92f001..e9c5a0d212dd43a4221d1238497dee67e27ccda6 100755 (executable)
@@ -82,6 +82,32 @@ class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
         fld.setMesh(sub_m)
         return sub_m, fld
 
+    def testInterpKernelDEC_ctor(self):
+        """ Test the various Python ctors """
+        size = MPI.COMM_WORLD.size
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+        # Define two processor groups
+        nproc_source = 2
+        l1, l2 = range(nproc_source), range(size - nproc_source, size)
+        # With 2 iterables:
+        i1 = InterpKernelDEC.New(l1, l2)
+        # With 2 proc groups:
+        interface = CommInterface()
+        source_group = MPIProcessorGroup(interface, list(l1))
+        target_group = MPIProcessorGroup(interface, list(l2))
+        i2 = InterpKernelDEC.New(source_group, target_group)
+        # Should also work directly:
+        i3 = InterpKernelDEC(source_group, target_group)
+        # With 2 iterables and a custom comm:
+        i4 = InterpKernelDEC.New(l1, l2, MPI.COMM_WORLD)
+        # Should fail with 2 proc groups **and** a communicator
+        self.assertRaises(InterpKernelException, InterpKernelDEC.New, source_group, target_group, MPI.COMM_WORLD)
+        i4.release(); i3.release(); i2.release(); i1.release()
+        source_group.release()
+        target_group.release()
+
     @WriteInTmpDir
     def testInterpKernelDEC_2D_py_1(self):
         """ This test illustrates a basic use of the InterpKernelDEC.
@@ -101,8 +127,7 @@ class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
         interface = CommInterface()
         source_group = MPIProcessorGroup(interface, procs_source)
         target_group = MPIProcessorGroup(interface, procs_target)
-        #idec = InterpKernelDEC(source_group, target_group)
-        idec = InterpKernelDEC.NewWithCustomComm(procs_source, procs_target, MPI.COMM_WORLD)
+        idec = InterpKernelDEC(source_group, target_group)
 
         # Write out full size meshes/fields for inspection
         if rank == 0:
index 3cbc2ffc374802bde4327454d9e813bf30407d4f..80f075686f760879ef355775b96e8cf42cd3472f 100755 (executable)
@@ -88,6 +88,22 @@ class ParaMEDMEM_O_DEC_Tests(unittest.TestCase):
         fld.setMesh(sub_m)
         return sub_m, fld
 
+    def testOverlapDEC_ctor(self):
+        """ Test the various Python ctors """
+        size = MPI.COMM_WORLD.size
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+        # Define processor group
+        proc_group = list(range(size))
+        # With 2 iterables:
+        o1 = OverlapDEC.New(proc_group)
+        # Should also work directly:
+        o2 = OverlapDEC(proc_group)
+        # With an iterable and a custom comm:
+        o3 = OverlapDEC.New(proc_group, MPI.COMM_WORLD)
+        o3.release(); o2.release(); o1.release()
+
     @WriteInTmpDir
     def testOverlapDEC_2D_py_1(self):
         """ The main method of the test """
@@ -98,8 +114,7 @@ class ParaMEDMEM_O_DEC_Tests(unittest.TestCase):
 
         # Define (single) processor group - note the difference with InterpKernelDEC which needs two groups.
         proc_group = list(range(size))   # No need for ProcessorGroup object here.
-        #odec = OverlapDEC(proc_group)
-        odec = OverlapDEC.NewWithCustomComm(proc_group, MPI.COMM_WORLD)
+        odec = OverlapDEC(proc_group)
 
         # Write out full size meshes/fields for inspection
         if rank == 0: