public:
InterpKernelDEC();
InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
+ InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids); // hide last optional parameter!
virtual ~InterpKernelDEC();
void release();
void prepareTargetDE();
%extend {
+ // Provides a direct ctor for which the communicator can be passed with "MPI._addressof(the_com)":
+ InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids, long long comm_ptr)
+ {
+ return new InterpKernelDEC(src_ids, trg_ids, *((MPI_Comm*)comm_ptr));
+ }
+
// This one should really not be called directly by the user since it still has an interface with a pointer to MPI_Comm
// which Swig doesn't handle nicely.
// It is just here to provide a constructor taking a **pointer** to a comm - See pythoncode below.
{
return new InterpKernelDEC(source_group,target_group);
}
-
- static InterpKernelDEC* _NewWithComm_internal(const std::set<int>& src_ids, const std::set<int>& trg_ids, long another_comm)
+
+ static InterpKernelDEC* _NewWithComm_internal(const std::set<int>& src_ids, const std::set<int>& trg_ids, long long another_comm)
{
return new InterpKernelDEC(src_ids,trg_ids, *(MPI_Comm*)another_comm); // I know, ugly cast ...
}
class OverlapDEC : public DEC, public INTERP_KERNEL::InterpolationOptions
{
public:
- OverlapDEC(const std::set<int>& procIds);
+ OverlapDEC(const std::set<int>& procIds); // hide optional param comm
virtual ~OverlapDEC();
void release();
void debugPrintWorkSharing(std::ostream & ostr) const;
%extend {
+ OverlapDEC(const std::set<int>& ids, long long comm_ptr)
+ {
+ return new OverlapDEC(ids, *((MPI_Comm*)comm_ptr));
+ }
+
// This one should really not be called directly by the user since it still has an interface with a pointer to MPI_Comm
// which Swig doesn't handle nicely.
// It is just here to provide a constructor taking a **pointer** to a comm - See pythoncode below.
- static OverlapDEC* _NewWithComm_internal(const std::set<int>& ids, long another_comm)
+ static OverlapDEC* _NewWithComm_internal(const std::set<int>& ids, long long another_comm)
{
return new OverlapDEC(ids, *(MPI_Comm*)another_comm); // I know, ugly cast ...
}
%pythoncode %{
# And here we use mpi4py ability to provide its internal (C++) pointer to the communicator:
+# NB: doing a proper typemap from MPI_Comm from Python to C++ requires the inclusion of mpi4py headers and .i file ... an extra dependency ...
def _IKDEC_WithComm_internal(src_procs, tgt_procs, mpicomm=None):
from mpi4py import MPI
# Check iterable:
except:
s, t = None, None
msg = "InterpKernelDEC: invalid type in ctor arguments! Possible signatures are:\n"
+ msg += " - InterpKernelDEC(ProcessorGroup, ProcessorGroup)\n"
+ msg += " - InterpKernelDEC(<iterable>, <iterable>)\n"
+ msg += " - InterpKernelDEC(<iterable>, <iterable>, MPI_Comm*) : WARNING here the address of the communicator should be passed with MPI._addressof(the_com)\n"
msg += " - InterpKernelDEC.New(ProcessorGroup, ProcessorGroup)\n"
msg += " - InterpKernelDEC.New(<iterable>, <iterable>)\n"
msg += " - InterpKernelDEC.New(<iterable>, <iterable>, MPI_Comm)\n"
msg = "OverlapDEC: invalid type in ctor arguments! Possible signatures are:\n"
msg += " - OverlapDEC.New(<iterable>)\n"
msg += " - OverlapDEC.New(<iterable>, MPI_Comm)\n"
+ msg += " - OverlapDEC(<iterable>)\n"
+ msg += " - OverlapDEC(<iterable>, MPI_Comm*) : WARNING here the address of the communicator should be passed with MPI._addressof(the_com)\n"
raise InterpKernelException(msg)
if mpicomm is None:
return OverlapDEC(g)
l1, l2 = range(nproc_source), range(size - nproc_source, size)
# With 2 iterables:
i1 = InterpKernelDEC.New(l1, l2)
+ # Should also work directly:
+ i2 = InterpKernelDEC(l1, l2)
# With 2 proc groups:
interface = CommInterface()
source_group = MPIProcessorGroup(interface, list(l1))
target_group = MPIProcessorGroup(interface, list(l2))
- i2 = InterpKernelDEC.New(source_group, target_group)
+ i3 = InterpKernelDEC.New(source_group, target_group)
# Should also work directly:
- i3 = InterpKernelDEC(source_group, target_group)
+ i4 = InterpKernelDEC(source_group, target_group)
# With 2 iterables and a custom comm:
- i4 = InterpKernelDEC.New(l1, l2, MPI.COMM_WORLD)
+ i5 = InterpKernelDEC.New(l1, l2, MPI.COMM_WORLD)
+ # Work directly with the **hack**
+ i6 = InterpKernelDEC(l1, l2, MPI._addressof(MPI.COMM_WORLD))
# Should fail with 2 proc groups **and** a communicator
self.assertRaises(InterpKernelException, InterpKernelDEC.New, source_group, target_group, MPI.COMM_WORLD)
- i4.release(); i3.release(); i2.release(); i1.release()
+ self.assertRaises(NotImplementedError, InterpKernelDEC, source_group, target_group, MPI.COMM_WORLD)
+ i6.release(); i5.release(); i4.release(); i3.release(); i2.release(); i1.release()
source_group.release()
target_group.release()