From: abn Date: Mon, 15 May 2023 08:24:51 +0000 (+0200) Subject: should be the good one! X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=4ea8b7573b0d1c49dcf37d40150109cce4a4ba76;p=tools%2Fmedcoupling.git should be the good one! --- diff --git a/doc/developer/doxygen/doxfiles/reference/misc/swig_mpi.dox b/doc/developer/doxygen/doxfiles/reference/misc/swig_mpi.dox index dc8898b45..a11cc962b 100644 --- a/doc/developer/doxygen/doxfiles/reference/misc/swig_mpi.dox +++ b/doc/developer/doxygen/doxfiles/reference/misc/swig_mpi.dox @@ -21,11 +21,11 @@ void method_taking_a_MPI_Comm(MPI_Comm mpicomm); where the path to mpi4py.i needs to be adapted to your configuration. -In MEDCoupling DEC APIs (InterpKernelDEC and OverlapDEC constructors), the Python interface deals with that using the following trick to get the C++ adress of the MPI communicator: +In MEDCoupling DEC APIs (InterpKernelDEC and OverlapDEC constructors), the Python interface deals with that using the following trick to get the C++ address of the MPI communicator: \code{.py} from mpi4py import MPI -MPI._addressof(mpicomm) # returns the C++ adress of the MPI communicator +MPI._addressof(mpicomm) # returns the C++ address of the MPI communicator \endcode */ diff --git a/resources/dev/mc_suppr_valgrind b/resources/dev/mc_suppr_valgrind index 7f536fdc9..9609c3887 100644 --- a/resources/dev/mc_suppr_valgrind +++ b/resources/dev/mc_suppr_valgrind @@ -62,5 +62,11 @@ fun:*PyInit* } - +{ + + Memcheck:Leak + fun:*alloc + ... + fun:_dl_catch_exception +} diff --git a/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i b/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i index 50c42b136..87fe9941e 100644 --- a/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i +++ b/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i @@ -79,6 +79,7 @@ using namespace ICoCo; %newobject MEDCoupling::ParaSkyLineArray::getSkyLineArray; %newobject MEDCoupling::ParaSkyLineArray::getGlobalIdsArray; +%newobject MEDCoupling::InterpKernelDEC::_NewWithPG_internal; %newobject MEDCoupling::InterpKernelDEC::_NewWithComm_internal; %newobject MEDCoupling::OverlapDEC::_NewWithComm_internal; @@ -311,6 +312,11 @@ namespace MEDCoupling // This one should really not be called directly by the user since it still has an interface with a pointer to MPI_Comm // which Swig doesn't handle nicely. // It is just here to provide a constructor taking a **pointer** to a comm - See pythoncode below. + static InterpKernelDEC* _NewWithPG_internal(ProcessorGroup& source_group, ProcessorGroup& target_group) + { + return new InterpKernelDEC(source_group,target_group); + } + static InterpKernelDEC* _NewWithComm_internal(const std::set& src_ids, const std::set& trg_ids, long another_comm) { return new InterpKernelDEC(src_ids,trg_ids, *(MPI_Comm*)another_comm); // I know, ugly cast ... @@ -399,15 +405,44 @@ else: %pythoncode %{ # And here we use mpi4py ability to provide its internal (C++) pointer to the communicator: -def _InterpKernelDEC_WithComm_internal(src_procs, tgt_procs, mpicomm): - from mpi4py import MPI - return InterpKernelDEC._NewWithComm_internal(src_procs, tgt_procs, MPI._addressof(mpicomm)) - -def _OverlapDEC_WithComm_internal(procs, mpicomm): - from mpi4py import MPI - return OverlapDEC._NewWithComm_internal(procs, MPI._addressof(mpicomm)) - -InterpKernelDEC.NewWithCustomComm = _InterpKernelDEC_WithComm_internal -OverlapDEC.NewWithCustomComm = _OverlapDEC_WithComm_internal +def _IKDEC_WithComm_internal(src_procs, tgt_procs, mpicomm=None): + from mpi4py import MPI + # Check iterable: + try: + s, t = [el for el in src_procs], [el for el in tgt_procs] + except: + s, t = None, None + msg = "InterpKernelDEC: invalid type in ctor arguments! Possible signatures are:\n" + msg += " - InterpKernelDEC.New(ProcessorGroup, ProcessorGroup)\n" + msg += " - InterpKernelDEC.New(, )\n" + msg += " - InterpKernelDEC.New(, , MPI_Comm)\n" + if mpicomm is None: + if isinstance(src_procs, ProcessorGroup) and isinstance(tgt_procs, ProcessorGroup): + return InterpKernelDEC._NewWithPG_internal(src_procs, tgt_procs) + elif not s is None: # iterable + return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(MPI.COMM_WORLD)) + else: + raise InterpKernelException(msg) + else: + if s is None: raise InterpKernelException(msg) # must be iterable + return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(mpicomm)) + +def _ODEC_WithComm_internal(procs, mpicomm=None): + from mpi4py import MPI + # Check iterable: + try: + g = [el for el in procs] + except: + msg = "OverlapDEC: invalid type in ctor arguments! Possible signatures are:\n" + msg += " - OverlapDEC.New()\n" + msg += " - OverlapDEC.New(, MPI_Comm)\n" + raise InterpKernelException(msg) + if mpicomm is None: + return OverlapDEC(g) + else: + return OverlapDEC._NewWithComm_internal(g, MPI._addressof(mpicomm)) + +InterpKernelDEC.New = _IKDEC_WithComm_internal +OverlapDEC.New = _ODEC_WithComm_internal %} diff --git a/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py b/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py index 79f8686b4..e9c5a0d21 100755 --- a/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py +++ b/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py @@ -82,6 +82,32 @@ class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase): fld.setMesh(sub_m) return sub_m, fld + def testInterpKernelDEC_ctor(self): + """ Test the various Python ctors """ + size = MPI.COMM_WORLD.size + if size != 4: + print("Should be run on 4 procs!") + return + # Define two processor groups + nproc_source = 2 + l1, l2 = range(nproc_source), range(size - nproc_source, size) + # With 2 iterables: + i1 = InterpKernelDEC.New(l1, l2) + # With 2 proc groups: + interface = CommInterface() + source_group = MPIProcessorGroup(interface, list(l1)) + target_group = MPIProcessorGroup(interface, list(l2)) + i2 = InterpKernelDEC.New(source_group, target_group) + # Should also work directly: + i3 = InterpKernelDEC(source_group, target_group) + # With 2 iterables and a custom comm: + i4 = InterpKernelDEC.New(l1, l2, MPI.COMM_WORLD) + # Should fail with 2 proc groups **and** a communicator + self.assertRaises(InterpKernelException, InterpKernelDEC.New, source_group, target_group, MPI.COMM_WORLD) + i4.release(); i3.release(); i2.release(); i1.release() + source_group.release() + target_group.release() + @WriteInTmpDir def testInterpKernelDEC_2D_py_1(self): """ This test illustrates a basic use of the InterpKernelDEC. @@ -101,8 +127,7 @@ class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase): interface = CommInterface() source_group = MPIProcessorGroup(interface, procs_source) target_group = MPIProcessorGroup(interface, procs_target) - #idec = InterpKernelDEC(source_group, target_group) - idec = InterpKernelDEC.NewWithCustomComm(procs_source, procs_target, MPI.COMM_WORLD) + idec = InterpKernelDEC(source_group, target_group) # Write out full size meshes/fields for inspection if rank == 0: diff --git a/src/ParaMEDMEM_Swig/test_OverlapDEC.py b/src/ParaMEDMEM_Swig/test_OverlapDEC.py index 3cbc2ffc3..80f075686 100755 --- a/src/ParaMEDMEM_Swig/test_OverlapDEC.py +++ b/src/ParaMEDMEM_Swig/test_OverlapDEC.py @@ -88,6 +88,22 @@ class ParaMEDMEM_O_DEC_Tests(unittest.TestCase): fld.setMesh(sub_m) return sub_m, fld + def testOverlapDEC_ctor(self): + """ Test the various Python ctors """ + size = MPI.COMM_WORLD.size + if size != 4: + print("Should be run on 4 procs!") + return + # Define processor group + proc_group = list(range(size)) + # With 2 iterables: + o1 = OverlapDEC.New(proc_group) + # Should also work directly: + o2 = OverlapDEC(proc_group) + # With an iterable and a custom comm: + o3 = OverlapDEC.New(proc_group, MPI.COMM_WORLD) + o3.release(); o2.release(); o1.release() + @WriteInTmpDir def testOverlapDEC_2D_py_1(self): """ The main method of the test """ @@ -98,8 +114,7 @@ class ParaMEDMEM_O_DEC_Tests(unittest.TestCase): # Define (single) processor group - note the difference with InterpKernelDEC which needs two groups. proc_group = list(range(size)) # No need for ProcessorGroup object here. - #odec = OverlapDEC(proc_group) - odec = OverlapDEC.NewWithCustomComm(proc_group, MPI.COMM_WORLD) + odec = OverlapDEC(proc_group) # Write out full size meshes/fields for inspection if rank == 0: