where the path to mpi4py.i needs to be adapted to your configuration.
-In MEDCoupling DEC APIs (InterpKernelDEC and OverlapDEC constructors), the Python interface deals with that using the following trick to get the C++ adress of the MPI communicator:
+In MEDCoupling DEC APIs (InterpKernelDEC and OverlapDEC constructors), the Python interface deals with that using the following trick to get the C++ address of the MPI communicator:
\code{.py}
from mpi4py import MPI
-MPI._addressof(mpicomm) # returns the C++ adress of the MPI communicator
+MPI._addressof(mpicomm) # returns the C++ address of the MPI communicator
\endcode
*/
fun:*PyInit*
}
-
+{
+ <raise_excep>
+ Memcheck:Leak
+ fun:*alloc
+ ...
+ fun:_dl_catch_exception
+}
%newobject MEDCoupling::ParaSkyLineArray::getSkyLineArray;
%newobject MEDCoupling::ParaSkyLineArray::getGlobalIdsArray;
+%newobject MEDCoupling::InterpKernelDEC::_NewWithPG_internal;
%newobject MEDCoupling::InterpKernelDEC::_NewWithComm_internal;
%newobject MEDCoupling::OverlapDEC::_NewWithComm_internal;
// This one should really not be called directly by the user since it still has an interface with a pointer to MPI_Comm
// which Swig doesn't handle nicely.
// It is just here to provide a constructor taking a **pointer** to a comm - See pythoncode below.
+ static InterpKernelDEC* _NewWithPG_internal(ProcessorGroup& source_group, ProcessorGroup& target_group)
+ {
+ return new InterpKernelDEC(source_group,target_group);
+ }
+
static InterpKernelDEC* _NewWithComm_internal(const std::set<int>& src_ids, const std::set<int>& trg_ids, long another_comm)
{
return new InterpKernelDEC(src_ids,trg_ids, *(MPI_Comm*)another_comm); // I know, ugly cast ...
%pythoncode %{
# And here we use mpi4py ability to provide its internal (C++) pointer to the communicator:
-def _InterpKernelDEC_WithComm_internal(src_procs, tgt_procs, mpicomm):
- from mpi4py import MPI
- return InterpKernelDEC._NewWithComm_internal(src_procs, tgt_procs, MPI._addressof(mpicomm))
-
-def _OverlapDEC_WithComm_internal(procs, mpicomm):
- from mpi4py import MPI
- return OverlapDEC._NewWithComm_internal(procs, MPI._addressof(mpicomm))
-
-InterpKernelDEC.NewWithCustomComm = _InterpKernelDEC_WithComm_internal
-OverlapDEC.NewWithCustomComm = _OverlapDEC_WithComm_internal
+def _IKDEC_WithComm_internal(src_procs, tgt_procs, mpicomm=None):
+ from mpi4py import MPI
+ # Check iterable:
+ try:
+ s, t = [el for el in src_procs], [el for el in tgt_procs]
+ except:
+ s, t = None, None
+ msg = "InterpKernelDEC: invalid type in ctor arguments! Possible signatures are:\n"
+ msg += " - InterpKernelDEC.New(ProcessorGroup, ProcessorGroup)\n"
+ msg += " - InterpKernelDEC.New(<iterable>, <iterable>)\n"
+ msg += " - InterpKernelDEC.New(<iterable>, <iterable>, MPI_Comm)\n"
+ if mpicomm is None:
+ if isinstance(src_procs, ProcessorGroup) and isinstance(tgt_procs, ProcessorGroup):
+ return InterpKernelDEC._NewWithPG_internal(src_procs, tgt_procs)
+ elif not s is None: # iterable
+ return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(MPI.COMM_WORLD))
+ else:
+ raise InterpKernelException(msg)
+ else:
+ if s is None: raise InterpKernelException(msg) # must be iterable
+ return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(mpicomm))
+
+def _ODEC_WithComm_internal(procs, mpicomm=None):
+ from mpi4py import MPI
+ # Check iterable:
+ try:
+ g = [el for el in procs]
+ except:
+ msg = "OverlapDEC: invalid type in ctor arguments! Possible signatures are:\n"
+ msg += " - OverlapDEC.New(<iterable>)\n"
+ msg += " - OverlapDEC.New(<iterable>, MPI_Comm)\n"
+ raise InterpKernelException(msg)
+ if mpicomm is None:
+ return OverlapDEC(g)
+ else:
+ return OverlapDEC._NewWithComm_internal(g, MPI._addressof(mpicomm))
+
+InterpKernelDEC.New = _IKDEC_WithComm_internal
+OverlapDEC.New = _ODEC_WithComm_internal
%}
fld.setMesh(sub_m)
return sub_m, fld
+ def testInterpKernelDEC_ctor(self):
+ """ Test the various Python ctors """
+ size = MPI.COMM_WORLD.size
+ if size != 4:
+ print("Should be run on 4 procs!")
+ return
+ # Define two processor groups
+ nproc_source = 2
+ l1, l2 = range(nproc_source), range(size - nproc_source, size)
+ # With 2 iterables:
+ i1 = InterpKernelDEC.New(l1, l2)
+ # With 2 proc groups:
+ interface = CommInterface()
+ source_group = MPIProcessorGroup(interface, list(l1))
+ target_group = MPIProcessorGroup(interface, list(l2))
+ i2 = InterpKernelDEC.New(source_group, target_group)
+ # Should also work directly:
+ i3 = InterpKernelDEC(source_group, target_group)
+ # With 2 iterables and a custom comm:
+ i4 = InterpKernelDEC.New(l1, l2, MPI.COMM_WORLD)
+ # Should fail with 2 proc groups **and** a communicator
+ self.assertRaises(InterpKernelException, InterpKernelDEC.New, source_group, target_group, MPI.COMM_WORLD)
+ i4.release(); i3.release(); i2.release(); i1.release()
+ source_group.release()
+ target_group.release()
+
@WriteInTmpDir
def testInterpKernelDEC_2D_py_1(self):
""" This test illustrates a basic use of the InterpKernelDEC.
interface = CommInterface()
source_group = MPIProcessorGroup(interface, procs_source)
target_group = MPIProcessorGroup(interface, procs_target)
- #idec = InterpKernelDEC(source_group, target_group)
- idec = InterpKernelDEC.NewWithCustomComm(procs_source, procs_target, MPI.COMM_WORLD)
+ idec = InterpKernelDEC(source_group, target_group)
# Write out full size meshes/fields for inspection
if rank == 0:
fld.setMesh(sub_m)
return sub_m, fld
+ def testOverlapDEC_ctor(self):
+ """ Test the various Python ctors """
+ size = MPI.COMM_WORLD.size
+ if size != 4:
+ print("Should be run on 4 procs!")
+ return
+ # Define processor group
+ proc_group = list(range(size))
+ # With 2 iterables:
+ o1 = OverlapDEC.New(proc_group)
+ # Should also work directly:
+ o2 = OverlapDEC(proc_group)
+ # With an iterable and a custom comm:
+ o3 = OverlapDEC.New(proc_group, MPI.COMM_WORLD)
+ o3.release(); o2.release(); o1.release()
+
@WriteInTmpDir
def testOverlapDEC_2D_py_1(self):
""" The main method of the test """
# Define (single) processor group - note the difference with InterpKernelDEC which needs two groups.
proc_group = list(range(size)) # No need for ProcessorGroup object here.
- #odec = OverlapDEC(proc_group)
- odec = OverlapDEC.NewWithCustomComm(proc_group, MPI.COMM_WORLD)
+ odec = OverlapDEC(proc_group)
# Write out full size meshes/fields for inspection
if rank == 0: