1 // Copyright (C) 2017-2023 CEA, EDF
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
22 %template() std::set<int>;
25 #include "CommInterface.hxx"
26 #include "ProcessorGroup.hxx"
27 #include "Topology.hxx"
28 #include "MPIProcessorGroup.hxx"
30 #include "InterpKernelDEC.hxx"
31 #include "NonCoincidentDEC.hxx"
32 #include "StructuredCoincidentDEC.hxx"
33 #include "OverlapDEC.hxx"
34 #include "ParaMESH.hxx"
35 #include "ParaFIELD.hxx"
36 #include "ICoCoMEDDoubleField.hxx"
37 #include "ICoCoMEDIntField.hxx"
38 #include "ComponentTopology.hxx"
39 #include "ParaUMesh.hxx"
40 #include "ParaSkyLineArray.hxx"
41 #include "ParaDataArray.hxx"
43 using namespace INTERP_KERNEL;
44 using namespace MEDCoupling;
45 using namespace ICoCo;
48 %include "InterpolationOptions.hxx"
49 %include "ProcessorGroup.hxx"
50 %include "DECOptions.hxx"
51 %include "ParaMESH.hxx"
52 %include "ParaFIELD.hxx"
53 %include "MPIProcessorGroup.hxx"
54 %include "ComponentTopology.hxx"
56 %include "DisjointDEC.hxx"
57 %include "StructuredCoincidentDEC.hxx"
59 %newobject MEDCoupling::ParaUMesh::New;
60 %newobject MEDCoupling::ParaUMesh::getMesh;
61 %newobject MEDCoupling::ParaUMesh::getGlobalCellIds;
62 %newobject MEDCoupling::ParaUMesh::getGlobalNodeIds;
63 %newobject MEDCoupling::ParaUMesh::getCellIdsLyingOnNodes;
64 %newobject MEDCoupling::ParaUMesh::redistributeCells;
65 %newobject MEDCoupling::ParaUMesh::redistributeCellField;
66 %newobject MEDCoupling::ParaUMesh::redistributeNodeField;
67 %newobject MEDCoupling::ParaDataArrayInt32::New;
68 %newobject MEDCoupling::ParaDataArrayInt32::buildComplement;
69 %newobject MEDCoupling::ParaDataArrayInt64::New;
70 %newobject MEDCoupling::ParaDataArrayInt64::buildComplement;
71 %newobject MEDCoupling::ParaSkyLineArray::New;
72 %newobject MEDCoupling::ParaSkyLineArray::equiRedistribute;
73 %newobject MEDCoupling::ParaSkyLineArray::getSkyLineArray;
74 %newobject MEDCoupling::ParaSkyLineArray::getGlobalIdsArray;
76 %newobject MEDCoupling::InterpKernelDEC::_NewWithPG_internal;
77 %newobject MEDCoupling::InterpKernelDEC::_NewWithComm_internal;
78 %newobject MEDCoupling::InterpKernelDEC::retrieveNonFetchedIds;
79 %newobject MEDCoupling::OverlapDEC::_NewWithComm_internal;
81 %feature("unref") ParaSkyLineArray "$this->decrRef();"
82 %feature("unref") ParaUMesh "$this->decrRef();"
83 %feature("unref") ParaDataArrayInt32 "$this->decrRef();"
84 %feature("unref") ParaDataArrayInt64 "$this->decrRef();"
94 virtual ~CommInterface();
95 int worldSize() const;
96 int commSize(MPI_Comm comm, int* size) const;
97 int commRank(MPI_Comm comm, int* rank) const;
98 int commGroup(MPI_Comm comm, MPI_Group* group) const;
99 int groupIncl(MPI_Group group, int size, int* ranks, MPI_Group* group_output) const;
100 int commCreate(MPI_Comm comm, MPI_Group group, MPI_Comm* comm_output) const;
101 int groupFree(MPI_Group* group) const;
102 int commFree(MPI_Comm* comm) const;
104 int send(void* buffer, int count, MPI_Datatype datatype, int target, int tag, MPI_Comm comm) const;
105 int recv(void* buffer, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status* status) const;
106 int sendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
107 int dest, int sendtag, void* recvbuf, int recvcount,
108 MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm,
111 int Isend(void* buffer, int count, MPI_Datatype datatype, int target,
112 int tag, MPI_Comm comm, MPI_Request *request) const;
113 int Irecv(void* buffer, int count, MPI_Datatype datatype, int source,
114 int tag, MPI_Comm comm, MPI_Request* request) const;
116 int wait(MPI_Request *request, MPI_Status *status) const;
117 int test(MPI_Request *request, int *flag, MPI_Status *status) const;
118 int requestFree(MPI_Request *request) const;
119 int waitany(int count, MPI_Request *array_of_requests, int *index, MPI_Status *status) const;
120 int testany(int count, MPI_Request *array_of_requests, int *index, int *flag, MPI_Status *status) const;
121 int waitall(int count, MPI_Request *array_of_requests, MPI_Status *array_of_status) const { return MPI_Waitall(count, array_of_requests, array_of_status); }
122 int testall(int count, MPI_Request *array_of_requests, int *flag, MPI_Status *array_of_status) const;
123 int waitsome(int incount, MPI_Request *array_of_requests,int *outcount, int *array_of_indices, MPI_Status *array_of_status) const;
124 int testsome(int incount, MPI_Request *array_of_requests, int *outcount,
125 int *array_of_indices, MPI_Status *array_of_status) const;
126 int probe(int source, int tag, MPI_Comm comm, MPI_Status *status) const;
127 int Iprobe(int source, int tag, MPI_Comm comm, int *flag, MPI_Status *status) const;
128 int cancel(MPI_Request *request) const;
129 int testCancelled(MPI_Status *status, int *flag) const;
130 int barrier(MPI_Comm comm) const;
131 int errorString(int errorcode, char *string, int *resultlen) const;
132 int getCount(MPI_Status *status, MPI_Datatype datatype, int *count) const;
134 int broadcast(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm) const;
135 int allGather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
136 void* recvbuf, int recvcount, MPI_Datatype recvtype,
137 MPI_Comm comm) const;
138 int allToAll(void* sendbuf, int sendcount, MPI_Datatype sendtype,
139 void* recvbuf, int recvcount, MPI_Datatype recvtype,
140 MPI_Comm comm) const;
141 int allToAllV(void* sendbuf, int* sendcounts, int* senddispls,
142 MPI_Datatype sendtype, void* recvbuf, int* recvcounts,
143 int* recvdispls, MPI_Datatype recvtype,
144 MPI_Comm comm) const;
146 int reduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) const;
147 int allReduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) const;
150 PyObject *allGatherArrays(const DataArrayIdType *array) const
152 std::vector< MCAuto<DataArrayIdType> > ret;
153 self->allGatherArrays(MPI_COMM_WORLD,array,ret);
154 return convertFromVectorAutoObjToPyObj<DataArrayIdType>(ret,SWIGTITraits<mcIdType>::TI);
157 PyObject *allToAllArrays(PyObject *arrays) const
159 std::vector< DataArrayIdType * > arraysIn;
160 std::vector< MCAuto<DataArrayIdType> > arrayOut;
161 convertFromPyObjVectorOfObj<MEDCoupling::DataArrayIdType*>(arrays,SWIGTITraits<mcIdType>::TI,"DataArrayIdType",arraysIn);
162 std::vector< MCAuto<DataArrayIdType> > arraysIn2(FromVecToVecAuto<DataArrayIdType>(arraysIn));
163 self->allToAllArrays(MPI_COMM_WORLD,arraysIn2,arrayOut);
164 return convertFromVectorAutoObjToPyObj<DataArrayIdType>(arrayOut,SWIGTITraits<mcIdType>::TI);
169 class ParaUMesh : public RefCountObject
172 static ParaUMesh *New(MEDCouplingUMesh *mesh, DataArrayIdType *globalCellIds, DataArrayIdType *globalNodeIds);
173 ParaUMesh *redistributeCells(const DataArrayIdType *globalCellIds) const;
174 DataArrayIdType *redistributeCellField(const DataArrayIdType *globalCellIds, const DataArrayIdType *fieldValueToRed) const;
175 DataArrayDouble *redistributeCellField(const DataArrayIdType *globalCellIds, const DataArrayDouble *fieldValueToRed) const;
176 DataArrayIdType *redistributeNodeField(const DataArrayIdType *globalCellIds, const DataArrayIdType *fieldValueToRed) const;
177 DataArrayDouble *redistributeNodeField(const DataArrayIdType *globalCellIds, const DataArrayDouble *fieldValueToRed) const;
180 ParaUMesh(MEDCouplingUMesh *mesh, DataArrayIdType *globalCellIds, DataArrayIdType *globalNodeIds)
182 return ParaUMesh::New(mesh,globalCellIds,globalNodeIds);
185 MEDCouplingUMesh *getMesh()
187 MEDCouplingUMesh *ret(self->getMesh());
188 if(ret) ret->incrRef();
192 DataArrayIdType *getGlobalCellIds()
194 DataArrayIdType *ret(self->getGlobalCellIds());
195 if(ret) ret->incrRef();
199 DataArrayIdType *getGlobalNodeIds()
201 DataArrayIdType *ret(self->getGlobalNodeIds());
202 if(ret) ret->incrRef();
206 DataArrayIdType *getCellIdsLyingOnNodes(const DataArrayIdType *globalNodeIds, bool fullyIn) const
208 MCAuto<DataArrayIdType> ret(self->getCellIdsLyingOnNodes(globalNodeIds,fullyIn));
214 class ParaDataArray : public RefCountObject
218 class ParaDataArrayInt32 : public ParaDataArray
221 static ParaDataArrayInt32 *New(DataArrayInt32 *seqDa);
222 DataArrayIdType *buildComplement(int nbOfElems) const;
225 ParaDataArrayInt32(DataArrayInt32 *seqDa)
227 return ParaDataArrayInt32::New(seqDa);
232 class ParaDataArrayInt64 : public ParaDataArray
235 static ParaDataArrayInt64 *New(DataArrayInt64 *seqDa);
236 DataArrayIdType *buildComplement(long nbOfElems) const;
239 ParaDataArrayInt64(DataArrayInt64 *seqDa)
241 return ParaDataArrayInt64::New(seqDa);
246 class ParaSkyLineArray : public RefCountObject
249 static ParaSkyLineArray *New(MEDCouplingSkyLineArray *ska, DataArrayIdType *globalIds);
252 ParaSkyLineArray(MEDCouplingSkyLineArray *ska, DataArrayIdType *globalIds)
254 return ParaSkyLineArray::New(ska,globalIds);
257 ParaSkyLineArray *equiRedistribute(mcIdType nbOfEntities) const
259 MCAuto<ParaSkyLineArray> ret(self->equiRedistribute(nbOfEntities));
263 MEDCouplingSkyLineArray *getSkyLineArray() const
265 MEDCouplingSkyLineArray *ret(self->getSkyLineArray());
271 DataArrayIdType *getGlobalIdsArray() const
273 DataArrayIdType *ret(self->getGlobalIdsArray());
281 /* This object can be used only if MED_ENABLE_FVM is defined*/
282 #ifdef MED_ENABLE_FVM
283 class NonCoincidentDEC : public DEC
286 NonCoincidentDEC(ProcessorGroup& source, ProcessorGroup& target);
290 class InterpKernelDEC : public DisjointDEC, public INTERP_KERNEL::InterpolationOptions
294 InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
295 InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids); // hide last optional parameter!
296 virtual ~InterpKernelDEC();
300 void synchronizeWithDefaultValue(double val);
302 void recvData(double time);
304 void sendData(double time , double deltatime);
305 void prepareSourceDE();
306 void prepareTargetDE();
309 // Provides a direct ctor for which the communicator can be passed with "MPI._addressof(the_com)":
310 InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids, long long comm_ptr)
312 return new InterpKernelDEC(src_ids, trg_ids, *((MPI_Comm*)comm_ptr));
315 // This one should really not be called directly by the user since it still has an interface with a pointer to MPI_Comm
316 // which Swig doesn't handle nicely.
317 // It is just here to provide a constructor taking a **pointer** to a comm - See pythoncode below.
318 static InterpKernelDEC* _NewWithPG_internal(ProcessorGroup& source_group, ProcessorGroup& target_group)
320 return new InterpKernelDEC(source_group,target_group);
323 static InterpKernelDEC* _NewWithComm_internal(const std::set<int>& src_ids, const std::set<int>& trg_ids, long long another_comm)
325 return new InterpKernelDEC(src_ids,trg_ids, *(MPI_Comm*)another_comm); // I know, ugly cast ...
328 DataArrayIdType *retrieveNonFetchedIds() const
330 MCAuto<DataArrayIdType> ret = self->retrieveNonFetchedIds();
336 class OverlapDEC : public DEC, public INTERP_KERNEL::InterpolationOptions
339 OverlapDEC(const std::set<int>& procIds); // hide optional param comm
340 virtual ~OverlapDEC();
343 void sendRecvData(bool way=true);
347 void attachSourceLocalField(ParaFIELD *field, bool ownPt=false);
348 void attachTargetLocalField(ParaFIELD *field, bool ownPt=false);
349 void attachSourceLocalField(MEDCouplingFieldDouble *field);
350 void attachTargetLocalField(MEDCouplingFieldDouble *field);
351 void attachSourceLocalField(ICoCo::MEDDoubleField *field);
352 void attachTargetLocalField(ICoCo::MEDDoubleField *field);
353 ProcessorGroup *getGroup();
354 bool isInGroup() const;
356 void setDefaultValue(double val);
357 void setWorkSharingAlgo(int method);
359 void debugPrintWorkSharing(std::ostream & ostr) const;
362 OverlapDEC(const std::set<int>& ids, long long comm_ptr)
364 return new OverlapDEC(ids, *((MPI_Comm*)comm_ptr));
367 // This one should really not be called directly by the user since it still has an interface with a pointer to MPI_Comm
368 // which Swig doesn't handle nicely.
369 // It is just here to provide a constructor taking a **pointer** to a comm - See pythoncode below.
370 static OverlapDEC* _NewWithComm_internal(const std::set<int>& ids, long long another_comm)
372 return new OverlapDEC(ids, *(MPI_Comm*)another_comm); // I know, ugly cast ...
377 } // end namespace MEDCoupling
379 %extend MEDCoupling::ParaMESH
381 PyObject *getGlobalNumberingCell2() const
383 const mcIdType *tmp=self->getGlobalNumberingCell();
384 mcIdType size=self->getCellMesh()->getNumberOfCells();
385 PyObject *ret=PyList_New(size);
386 for(mcIdType i=0;i<size;i++)
387 PyList_SetItem(ret,i,PyInt_FromLong(tmp[i]));
391 PyObject *getGlobalNumberingFace2() const
393 const mcIdType *tmp=self->getGlobalNumberingFace();
394 mcIdType size=self->getFaceMesh()->getNumberOfCells();
395 PyObject *ret=PyList_New(size);
396 for(mcIdType i=0;i<size;i++)
397 PyList_SetItem(ret,i,PyInt_FromLong(tmp[i]));
401 PyObject *getGlobalNumberingNode2() const
403 const mcIdType *tmp=self->getGlobalNumberingNode();
404 mcIdType size=self->getCellMesh()->getNumberOfNodes();
405 PyObject *ret=PyList_New(size);
406 for(mcIdType i=0;i<size;i++)
407 PyList_SetItem(ret,i,PyInt_FromLong(tmp[i]));
413 if MEDCouplingUse64BitIDs():
414 ParaDataArrayInt = ParaDataArrayInt64
416 ParaDataArrayInt = ParaDataArrayInt32
421 # And here we use mpi4py ability to provide its internal (C++) pointer to the communicator:
422 # NB: doing a proper typemap from MPI_Comm from Python to C++ requires the inclusion of mpi4py headers and .i file ... an extra dependency ...
423 def _IKDEC_WithComm_internal(src_procs, tgt_procs, mpicomm=None):
424 from mpi4py import MPI
427 s, t = [el for el in src_procs], [el for el in tgt_procs]
430 msg = "InterpKernelDEC: invalid type in ctor arguments! Possible signatures are:\n"
431 msg += " - InterpKernelDEC(ProcessorGroup, ProcessorGroup)\n"
432 msg += " - InterpKernelDEC(<iterable>, <iterable>)\n"
433 msg += " - InterpKernelDEC(<iterable>, <iterable>, MPI_Comm*) : WARNING here the address of the communicator should be passed with MPI._addressof(the_com)\n"
434 msg += " - InterpKernelDEC.New(ProcessorGroup, ProcessorGroup)\n"
435 msg += " - InterpKernelDEC.New(<iterable>, <iterable>)\n"
436 msg += " - InterpKernelDEC.New(<iterable>, <iterable>, MPI_Comm)\n"
438 if isinstance(src_procs, ProcessorGroup) and isinstance(tgt_procs, ProcessorGroup):
439 return InterpKernelDEC._NewWithPG_internal(src_procs, tgt_procs)
440 elif not s is None: # iterable
441 return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(MPI.COMM_WORLD))
443 raise InterpKernelException(msg)
445 if s is None: raise InterpKernelException(msg) # must be iterable
446 return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(mpicomm))
448 def _ODEC_WithComm_internal(procs, mpicomm=None):
449 from mpi4py import MPI
452 g = [el for el in procs]
454 msg = "OverlapDEC: invalid type in ctor arguments! Possible signatures are:\n"
455 msg += " - OverlapDEC.New(<iterable>)\n"
456 msg += " - OverlapDEC.New(<iterable>, MPI_Comm)\n"
457 msg += " - OverlapDEC(<iterable>)\n"
458 msg += " - OverlapDEC(<iterable>, MPI_Comm*) : WARNING here the address of the communicator should be passed with MPI._addressof(the_com)\n"
459 raise InterpKernelException(msg)
463 return OverlapDEC._NewWithComm_internal(g, MPI._addressof(mpicomm))
465 InterpKernelDEC.New = _IKDEC_WithComm_internal
466 OverlapDEC.New = _ODEC_WithComm_internal