1 // Copyright (C) 2017-2023 CEA, EDF
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
22 %template() std::set<int>;
25 #include "CommInterface.hxx"
26 #include "ProcessorGroup.hxx"
27 #include "Topology.hxx"
28 #include "MPIProcessorGroup.hxx"
30 #include "InterpKernelDEC.hxx"
31 #include "NonCoincidentDEC.hxx"
32 #include "StructuredCoincidentDEC.hxx"
33 #include "OverlapDEC.hxx"
34 #include "ParaMESH.hxx"
35 #include "ParaFIELD.hxx"
36 #include "ICoCoMEDDoubleField.hxx"
37 #include "ICoCoMEDIntField.hxx"
38 #include "ComponentTopology.hxx"
39 #include "ParaUMesh.hxx"
40 #include "ParaSkyLineArray.hxx"
41 #include "ParaDataArray.hxx"
43 using namespace INTERP_KERNEL;
44 using namespace MEDCoupling;
45 using namespace ICoCo;
48 %include "InterpolationOptions.hxx"
49 %include "ProcessorGroup.hxx"
50 %include "DECOptions.hxx"
51 %include "ParaMESH.hxx"
52 %include "ParaFIELD.hxx"
53 %include "MPIProcessorGroup.hxx"
54 %include "ComponentTopology.hxx"
56 %include "DisjointDEC.hxx"
57 %include "StructuredCoincidentDEC.hxx"
59 %newobject MEDCoupling::ParaUMesh::New;
60 %newobject MEDCoupling::ParaUMesh::getMesh;
61 %newobject MEDCoupling::ParaUMesh::getGlobalCellIds;
62 %newobject MEDCoupling::ParaUMesh::getGlobalNodeIds;
63 %newobject MEDCoupling::ParaUMesh::getCellIdsLyingOnNodes;
64 %newobject MEDCoupling::ParaUMesh::redistributeCells;
65 %newobject MEDCoupling::ParaUMesh::redistributeCellField;
66 %newobject MEDCoupling::ParaUMesh::redistributeNodeField;
67 %newobject MEDCoupling::ParaDataArrayInt32::New;
68 %newobject MEDCoupling::ParaDataArrayInt32::buildComplement;
69 %newobject MEDCoupling::ParaDataArrayInt64::New;
70 %newobject MEDCoupling::ParaDataArrayInt64::buildComplement;
71 %newobject MEDCoupling::ParaSkyLineArray::New;
72 %newobject MEDCoupling::ParaSkyLineArray::equiRedistribute;
73 %newobject MEDCoupling::ParaSkyLineArray::getSkyLineArray;
74 %newobject MEDCoupling::ParaSkyLineArray::getGlobalIdsArray;
76 %newobject MEDCoupling::InterpKernelDEC::_NewWithPG_internal;
77 %newobject MEDCoupling::InterpKernelDEC::_NewWithComm_internal;
78 %newobject MEDCoupling::OverlapDEC::_NewWithComm_internal;
80 %feature("unref") ParaSkyLineArray "$this->decrRef();"
81 %feature("unref") ParaUMesh "$this->decrRef();"
82 %feature("unref") ParaDataArrayInt32 "$this->decrRef();"
83 %feature("unref") ParaDataArrayInt64 "$this->decrRef();"
93 virtual ~CommInterface();
94 int worldSize() const;
95 int commSize(MPI_Comm comm, int* size) const;
96 int commRank(MPI_Comm comm, int* rank) const;
97 int commGroup(MPI_Comm comm, MPI_Group* group) const;
98 int groupIncl(MPI_Group group, int size, int* ranks, MPI_Group* group_output) const;
99 int commCreate(MPI_Comm comm, MPI_Group group, MPI_Comm* comm_output) const;
100 int groupFree(MPI_Group* group) const;
101 int commFree(MPI_Comm* comm) const;
103 int send(void* buffer, int count, MPI_Datatype datatype, int target, int tag, MPI_Comm comm) const;
104 int recv(void* buffer, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status* status) const;
105 int sendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
106 int dest, int sendtag, void* recvbuf, int recvcount,
107 MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm,
110 int Isend(void* buffer, int count, MPI_Datatype datatype, int target,
111 int tag, MPI_Comm comm, MPI_Request *request) const;
112 int Irecv(void* buffer, int count, MPI_Datatype datatype, int source,
113 int tag, MPI_Comm comm, MPI_Request* request) const;
115 int wait(MPI_Request *request, MPI_Status *status) const;
116 int test(MPI_Request *request, int *flag, MPI_Status *status) const;
117 int requestFree(MPI_Request *request) const;
118 int waitany(int count, MPI_Request *array_of_requests, int *index, MPI_Status *status) const;
119 int testany(int count, MPI_Request *array_of_requests, int *index, int *flag, MPI_Status *status) const;
120 int waitall(int count, MPI_Request *array_of_requests, MPI_Status *array_of_status) const { return MPI_Waitall(count, array_of_requests, array_of_status); }
121 int testall(int count, MPI_Request *array_of_requests, int *flag, MPI_Status *array_of_status) const;
122 int waitsome(int incount, MPI_Request *array_of_requests,int *outcount, int *array_of_indices, MPI_Status *array_of_status) const;
123 int testsome(int incount, MPI_Request *array_of_requests, int *outcount,
124 int *array_of_indices, MPI_Status *array_of_status) const;
125 int probe(int source, int tag, MPI_Comm comm, MPI_Status *status) const;
126 int Iprobe(int source, int tag, MPI_Comm comm, int *flag, MPI_Status *status) const;
127 int cancel(MPI_Request *request) const;
128 int testCancelled(MPI_Status *status, int *flag) const;
129 int barrier(MPI_Comm comm) const;
130 int errorString(int errorcode, char *string, int *resultlen) const;
131 int getCount(MPI_Status *status, MPI_Datatype datatype, int *count) const;
133 int broadcast(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm) const;
134 int allGather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
135 void* recvbuf, int recvcount, MPI_Datatype recvtype,
136 MPI_Comm comm) const;
137 int allToAll(void* sendbuf, int sendcount, MPI_Datatype sendtype,
138 void* recvbuf, int recvcount, MPI_Datatype recvtype,
139 MPI_Comm comm) const;
140 int allToAllV(void* sendbuf, int* sendcounts, int* senddispls,
141 MPI_Datatype sendtype, void* recvbuf, int* recvcounts,
142 int* recvdispls, MPI_Datatype recvtype,
143 MPI_Comm comm) const;
145 int reduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) const;
146 int allReduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) const;
149 PyObject *allGatherArrays(const DataArrayIdType *array) const
151 std::vector< MCAuto<DataArrayIdType> > ret;
152 self->allGatherArrays(MPI_COMM_WORLD,array,ret);
153 return convertFromVectorAutoObjToPyObj<DataArrayIdType>(ret,SWIGTITraits<mcIdType>::TI);
156 PyObject *allToAllArrays(PyObject *arrays) const
158 std::vector< DataArrayIdType * > arraysIn;
159 std::vector< MCAuto<DataArrayIdType> > arrayOut;
160 convertFromPyObjVectorOfObj<MEDCoupling::DataArrayIdType*>(arrays,SWIGTITraits<mcIdType>::TI,"DataArrayIdType",arraysIn);
161 std::vector< MCAuto<DataArrayIdType> > arraysIn2(FromVecToVecAuto<DataArrayIdType>(arraysIn));
162 self->allToAllArrays(MPI_COMM_WORLD,arraysIn2,arrayOut);
163 return convertFromVectorAutoObjToPyObj<DataArrayIdType>(arrayOut,SWIGTITraits<mcIdType>::TI);
168 class ParaUMesh : public RefCountObject
171 static ParaUMesh *New(MEDCouplingUMesh *mesh, DataArrayIdType *globalCellIds, DataArrayIdType *globalNodeIds);
172 ParaUMesh *redistributeCells(const DataArrayIdType *globalCellIds) const;
173 DataArrayIdType *redistributeCellField(const DataArrayIdType *globalCellIds, const DataArrayIdType *fieldValueToRed) const;
174 DataArrayDouble *redistributeCellField(const DataArrayIdType *globalCellIds, const DataArrayDouble *fieldValueToRed) const;
175 DataArrayIdType *redistributeNodeField(const DataArrayIdType *globalCellIds, const DataArrayIdType *fieldValueToRed) const;
176 DataArrayDouble *redistributeNodeField(const DataArrayIdType *globalCellIds, const DataArrayDouble *fieldValueToRed) const;
179 ParaUMesh(MEDCouplingUMesh *mesh, DataArrayIdType *globalCellIds, DataArrayIdType *globalNodeIds)
181 return ParaUMesh::New(mesh,globalCellIds,globalNodeIds);
184 MEDCouplingUMesh *getMesh()
186 MEDCouplingUMesh *ret(self->getMesh());
187 if(ret) ret->incrRef();
191 DataArrayIdType *getGlobalCellIds()
193 DataArrayIdType *ret(self->getGlobalCellIds());
194 if(ret) ret->incrRef();
198 DataArrayIdType *getGlobalNodeIds()
200 DataArrayIdType *ret(self->getGlobalNodeIds());
201 if(ret) ret->incrRef();
205 DataArrayIdType *getCellIdsLyingOnNodes(const DataArrayIdType *globalNodeIds, bool fullyIn) const
207 MCAuto<DataArrayIdType> ret(self->getCellIdsLyingOnNodes(globalNodeIds,fullyIn));
213 class ParaDataArray : public RefCountObject
217 class ParaDataArrayInt32 : public ParaDataArray
220 static ParaDataArrayInt32 *New(DataArrayInt32 *seqDa);
221 DataArrayIdType *buildComplement(int nbOfElems) const;
224 ParaDataArrayInt32(DataArrayInt32 *seqDa)
226 return ParaDataArrayInt32::New(seqDa);
231 class ParaDataArrayInt64 : public ParaDataArray
234 static ParaDataArrayInt64 *New(DataArrayInt64 *seqDa);
235 DataArrayIdType *buildComplement(long nbOfElems) const;
238 ParaDataArrayInt64(DataArrayInt64 *seqDa)
240 return ParaDataArrayInt64::New(seqDa);
245 class ParaSkyLineArray : public RefCountObject
248 static ParaSkyLineArray *New(MEDCouplingSkyLineArray *ska, DataArrayIdType *globalIds);
251 ParaSkyLineArray(MEDCouplingSkyLineArray *ska, DataArrayIdType *globalIds)
253 return ParaSkyLineArray::New(ska,globalIds);
256 ParaSkyLineArray *equiRedistribute(mcIdType nbOfEntities) const
258 MCAuto<ParaSkyLineArray> ret(self->equiRedistribute(nbOfEntities));
262 MEDCouplingSkyLineArray *getSkyLineArray() const
264 MEDCouplingSkyLineArray *ret(self->getSkyLineArray());
270 DataArrayIdType *getGlobalIdsArray() const
272 DataArrayIdType *ret(self->getGlobalIdsArray());
280 /* This object can be used only if MED_ENABLE_FVM is defined*/
281 #ifdef MED_ENABLE_FVM
282 class NonCoincidentDEC : public DEC
285 NonCoincidentDEC(ProcessorGroup& source, ProcessorGroup& target);
289 class InterpKernelDEC : public DisjointDEC, public INTERP_KERNEL::InterpolationOptions
293 InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
294 InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids); // hide last optional parameter!
295 virtual ~InterpKernelDEC();
300 void recvData(double time);
302 void sendData(double time , double deltatime);
303 void prepareSourceDE();
304 void prepareTargetDE();
307 // Provides a direct ctor for which the communicator can be passed with "MPI._addressof(the_com)":
308 InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids, long long comm_ptr)
310 return new InterpKernelDEC(src_ids, trg_ids, *((MPI_Comm*)comm_ptr));
313 // This one should really not be called directly by the user since it still has an interface with a pointer to MPI_Comm
314 // which Swig doesn't handle nicely.
315 // It is just here to provide a constructor taking a **pointer** to a comm - See pythoncode below.
316 static InterpKernelDEC* _NewWithPG_internal(ProcessorGroup& source_group, ProcessorGroup& target_group)
318 return new InterpKernelDEC(source_group,target_group);
321 static InterpKernelDEC* _NewWithComm_internal(const std::set<int>& src_ids, const std::set<int>& trg_ids, long long another_comm)
323 return new InterpKernelDEC(src_ids,trg_ids, *(MPI_Comm*)another_comm); // I know, ugly cast ...
328 class OverlapDEC : public DEC, public INTERP_KERNEL::InterpolationOptions
331 OverlapDEC(const std::set<int>& procIds); // hide optional param comm
332 virtual ~OverlapDEC();
335 void sendRecvData(bool way=true);
339 void attachSourceLocalField(ParaFIELD *field, bool ownPt=false);
340 void attachTargetLocalField(ParaFIELD *field, bool ownPt=false);
341 void attachSourceLocalField(MEDCouplingFieldDouble *field);
342 void attachTargetLocalField(MEDCouplingFieldDouble *field);
343 void attachSourceLocalField(ICoCo::MEDDoubleField *field);
344 void attachTargetLocalField(ICoCo::MEDDoubleField *field);
345 ProcessorGroup *getGroup();
346 bool isInGroup() const;
348 void setDefaultValue(double val);
349 void setWorkSharingAlgo(int method);
351 void debugPrintWorkSharing(std::ostream & ostr) const;
354 OverlapDEC(const std::set<int>& ids, long long comm_ptr)
356 return new OverlapDEC(ids, *((MPI_Comm*)comm_ptr));
359 // This one should really not be called directly by the user since it still has an interface with a pointer to MPI_Comm
360 // which Swig doesn't handle nicely.
361 // It is just here to provide a constructor taking a **pointer** to a comm - See pythoncode below.
362 static OverlapDEC* _NewWithComm_internal(const std::set<int>& ids, long long another_comm)
364 return new OverlapDEC(ids, *(MPI_Comm*)another_comm); // I know, ugly cast ...
369 } // end namespace MEDCoupling
371 %extend MEDCoupling::ParaMESH
373 PyObject *getGlobalNumberingCell2() const
375 const mcIdType *tmp=self->getGlobalNumberingCell();
376 mcIdType size=self->getCellMesh()->getNumberOfCells();
377 PyObject *ret=PyList_New(size);
378 for(mcIdType i=0;i<size;i++)
379 PyList_SetItem(ret,i,PyInt_FromLong(tmp[i]));
383 PyObject *getGlobalNumberingFace2() const
385 const mcIdType *tmp=self->getGlobalNumberingFace();
386 mcIdType size=self->getFaceMesh()->getNumberOfCells();
387 PyObject *ret=PyList_New(size);
388 for(mcIdType i=0;i<size;i++)
389 PyList_SetItem(ret,i,PyInt_FromLong(tmp[i]));
393 PyObject *getGlobalNumberingNode2() const
395 const mcIdType *tmp=self->getGlobalNumberingNode();
396 mcIdType size=self->getCellMesh()->getNumberOfNodes();
397 PyObject *ret=PyList_New(size);
398 for(mcIdType i=0;i<size;i++)
399 PyList_SetItem(ret,i,PyInt_FromLong(tmp[i]));
405 if MEDCouplingUse64BitIDs():
406 ParaDataArrayInt = ParaDataArrayInt64
408 ParaDataArrayInt = ParaDataArrayInt32
413 # And here we use mpi4py ability to provide its internal (C++) pointer to the communicator:
414 # NB: doing a proper typemap from MPI_Comm from Python to C++ requires the inclusion of mpi4py headers and .i file ... an extra dependency ...
415 def _IKDEC_WithComm_internal(src_procs, tgt_procs, mpicomm=None):
416 from mpi4py import MPI
419 s, t = [el for el in src_procs], [el for el in tgt_procs]
422 msg = "InterpKernelDEC: invalid type in ctor arguments! Possible signatures are:\n"
423 msg += " - InterpKernelDEC(ProcessorGroup, ProcessorGroup)\n"
424 msg += " - InterpKernelDEC(<iterable>, <iterable>)\n"
425 msg += " - InterpKernelDEC(<iterable>, <iterable>, MPI_Comm*) : WARNING here the address of the communicator should be passed with MPI._addressof(the_com)\n"
426 msg += " - InterpKernelDEC.New(ProcessorGroup, ProcessorGroup)\n"
427 msg += " - InterpKernelDEC.New(<iterable>, <iterable>)\n"
428 msg += " - InterpKernelDEC.New(<iterable>, <iterable>, MPI_Comm)\n"
430 if isinstance(src_procs, ProcessorGroup) and isinstance(tgt_procs, ProcessorGroup):
431 return InterpKernelDEC._NewWithPG_internal(src_procs, tgt_procs)
432 elif not s is None: # iterable
433 return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(MPI.COMM_WORLD))
435 raise InterpKernelException(msg)
437 if s is None: raise InterpKernelException(msg) # must be iterable
438 return InterpKernelDEC._NewWithComm_internal(s, t, MPI._addressof(mpicomm))
440 def _ODEC_WithComm_internal(procs, mpicomm=None):
441 from mpi4py import MPI
444 g = [el for el in procs]
446 msg = "OverlapDEC: invalid type in ctor arguments! Possible signatures are:\n"
447 msg += " - OverlapDEC.New(<iterable>)\n"
448 msg += " - OverlapDEC.New(<iterable>, MPI_Comm)\n"
449 msg += " - OverlapDEC(<iterable>)\n"
450 msg += " - OverlapDEC(<iterable>, MPI_Comm*) : WARNING here the address of the communicator should be passed with MPI._addressof(the_com)\n"
451 raise InterpKernelException(msg)
455 return OverlapDEC._NewWithComm_internal(g, MPI._addressof(mpicomm))
457 InterpKernelDEC.New = _IKDEC_WithComm_internal
458 OverlapDEC.New = _ODEC_WithComm_internal