X-Git-Url: http://git.salome-platform.org/gitweb/?a=blobdiff_plain;f=src%2FParaMEDMEM%2FCommInterface.hxx;h=9999b4f65327bbf5b99e89df4a25b2ebe44d7088;hb=34b392fa962cf123d25a685aa983d79ede02f3cd;hp=e969f93940947d35b5faffcee367bb671a2ba13d;hpb=b607ffc713080a567fb90595118069ac18181e99;p=tools%2Fmedcoupling.git diff --git a/src/ParaMEDMEM/CommInterface.hxx b/src/ParaMEDMEM/CommInterface.hxx index e969f9394..9999b4f65 100644 --- a/src/ParaMEDMEM/CommInterface.hxx +++ b/src/ParaMEDMEM/CommInterface.hxx @@ -1,4 +1,4 @@ -// Copyright (C) 2007-2020 CEA/DEN, EDF R&D +// Copyright (C) 2007-2021 CEA/DEN, EDF R&D // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -53,6 +53,40 @@ namespace MEDCoupling static MPI_Datatype MPIDataType; }; + /*! \anchor CommInterface-det + \class CommInterface + + The class \a CommInterface is the gateway to the MPI library. + It is a wrapper around all MPI calls, thus trying to abstract the rest of the code from using the direct MPI API + (but this is not strictly respected overall in practice ...). It is used in all + the \ref parallel "DEC related classes". + + It is typically instantiated after the MPI_Init() call in a program and is afterwards passed as a + parameter to the constructors of various \ref parallel "parallel objects" so that they access the + MPI library via this common interface. + + As an example, the following code excerpt initializes a processor group made of the zero processor. + + \verbatim + #include "CommInterface.hxx" + #include "ProcessorGroup.hxx" + + int main(int argc, char** argv) + { + //initialization + MPI_Init(&argc, &argv); + MEDCoupling::CommInterface comm_interface; + + //setting up a processor group with proc 0 + set procs; + procs.insert(0); + MEDCoupling::ProcessorGroup group(procs, comm_interface); + + //cleanup + MPI_Finalize(); + } + \endverbatim + */ class CommInterface { public: @@ -101,28 +135,115 @@ namespace MEDCoupling int getCount(MPI_Status *status, MPI_Datatype datatype, int *count) const { return MPI_Get_count(status, datatype, count); } int broadcast(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm) const { return MPI_Bcast(buffer, count, datatype, root, comm); } + int gather(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) const { return MPI_Gather(const_cast(sendbuf),sendcount,sendtype,recvbuf,recvcount,recvtype,root,comm); } + int gatherV(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, int root, MPI_Comm comm) const { return MPI_Gatherv(const_cast(sendbuf),sendcount,sendtype,recvbuf,const_cast(recvcounts),const_cast(displs),recvtype,root,comm); } int allGather(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) const { return MPI_Allgather(sendbuf,sendcount, sendtype, recvbuf, recvcount, recvtype, comm); } int allGatherV(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], - const int displs[], MPI_Datatype recvtype, MPI_Comm comm) const { return MPI_Allgatherv(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs,recvtype,comm); } + const int displs[], MPI_Datatype recvtype, MPI_Comm comm) const { return MPI_Allgatherv(const_cast(sendbuf),sendcount,sendtype,recvbuf,const_cast(recvcounts),const_cast(displs),recvtype,comm); } int allToAll(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) const { return MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); } int allToAllV(const void* sendbuf, int* sendcounts, int* senddispls, MPI_Datatype sendtype, void* recvbuf, int* recvcounts, int* recvdispls, MPI_Datatype recvtype, - MPI_Comm comm) const { return MPI_Alltoallv(sendbuf, sendcounts, senddispls, sendtype, recvbuf, recvcounts, recvdispls, recvtype, comm); } + MPI_Comm comm) const { return MPI_Alltoallv(const_cast(sendbuf), sendcounts, senddispls, sendtype, recvbuf, recvcounts, recvdispls, recvtype, comm); } int reduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) const { return MPI_Reduce(sendbuf, recvbuf, count, datatype, op, root, comm); } int allReduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) const { return MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm); } public: + void gatherArrays(MPI_Comm comm, int root, const DataArrayIdType *array, std::vector< MCAuto >& arraysOut) const; void allGatherArrays(MPI_Comm comm, const DataArrayIdType *array, std::vector< MCAuto >& arraysOut) const; int allGatherArrays(MPI_Comm comm, const DataArrayIdType *array, std::unique_ptr& result, std::unique_ptr& resultIndex) const; void allToAllArrays(MPI_Comm comm, const std::vector< MCAuto >& arrays, std::vector< MCAuto >& arraysOut) const; void allToAllArrays(MPI_Comm comm, const std::vector< MCAuto >& arrays, std::vector< MCAuto >& arraysOut) const; void allToAllArrays(MPI_Comm comm, const std::vector< MCAuto >& arrays, MCAuto& arraysOut) const; + + template + int gatherArraysT(MPI_Comm comm, int root, const typename Traits::ArrayType *array, std::unique_ptr& result, std::unique_ptr& resultIndex, int& rank) const + { + int size; + this->commSize(comm,&size); + rank = -1; + this->commRank(comm,&rank); + std::unique_ptr nbOfElems; + if(rank==root) + nbOfElems.reset(new mcIdType[size]); + mcIdType nbOfCellsRequested(array->getNumberOfTuples()); + this->gather(&nbOfCellsRequested,1,MPI_ID_TYPE,nbOfElems.get(),1,MPI_ID_TYPE,root,comm); + std::unique_ptr nbOfElemsInt,offsetsIn; + if(rank==root) + { + mcIdType nbOfCellIdsSum(std::accumulate(nbOfElems.get(),nbOfElems.get()+size,0)); + result.reset(new T[nbOfCellIdsSum]); + nbOfElemsInt = CommInterface::ToIntArray(nbOfElems,size); + offsetsIn = CommInterface::ComputeOffset(nbOfElemsInt,size); + } + this->gatherV(array->begin(),nbOfCellsRequested,ParaTraits::MPIDataType,result.get(),nbOfElemsInt.get(),offsetsIn.get(),ParaTraits::MPIDataType,root,comm); + if(rank==root) + { + resultIndex = ComputeOffsetFull(nbOfElems,size); + } + return size; + } + + template + void gatherArraysT2(MPI_Comm comm, int root, const typename Traits::ArrayType *array, std::vector< MCAuto::ArrayType> >& arraysOut) const + { + using DataArrayT = typename Traits::ArrayType; + std::unique_ptr result; + std::unique_ptr resultIndex; + int rank(-1); + int size(this->gatherArraysT(comm,root,array,result,resultIndex,rank)); + arraysOut.resize(size); + for(int i = 0 ; i < size ; ++i) + { + arraysOut[i] = DataArrayT::New(); + if(rank == root) + { + mcIdType nbOfEltPack(resultIndex[i+1]-resultIndex[i]); + arraysOut[i]->alloc(nbOfEltPack,1); + std::copy(result.get()+resultIndex[i],result.get()+resultIndex[i+1],arraysOut[i]->getPointer()); + } + } + } + + template + int allGatherArraysT(MPI_Comm comm, const typename Traits::ArrayType *array, std::unique_ptr& result, std::unique_ptr& resultIndex) const + { + int size; + this->commSize(comm,&size); + std::unique_ptr nbOfElems(new mcIdType[size]); + mcIdType nbOfCellsRequested(array->getNumberOfTuples()); + this->allGather(&nbOfCellsRequested,1,MPI_ID_TYPE,nbOfElems.get(),1,MPI_ID_TYPE,comm); + mcIdType nbOfCellIdsSum(std::accumulate(nbOfElems.get(),nbOfElems.get()+size,0)); + result.reset(new T[nbOfCellIdsSum]); + std::unique_ptr nbOfElemsInt( CommInterface::ToIntArray(nbOfElems,size) ); + std::unique_ptr offsetsIn( CommInterface::ComputeOffset(nbOfElemsInt,size) ); + this->allGatherV(array->begin(),nbOfCellsRequested,ParaTraits::MPIDataType,result.get(),nbOfElemsInt.get(),offsetsIn.get(),ParaTraits::MPIDataType,comm); + resultIndex = ComputeOffsetFull(nbOfElems,size); + return size; + } + + template + void allGatherArraysT2(MPI_Comm comm, const typename Traits::ArrayType *array, std::vector< MCAuto::ArrayType> >& arraysOut) const + { + using DataArrayT = typename Traits::ArrayType; + std::unique_ptr result; + std::unique_ptr resultIndex; + int size(this->allGatherArraysT(comm,array,result,resultIndex)); + arraysOut.resize(size); + for(int i = 0 ; i < size ; ++i) + { + arraysOut[i] = DataArrayT::New(); + mcIdType nbOfEltPack(resultIndex[i+1]-resultIndex[i]); + arraysOut[i]->alloc(nbOfEltPack,1); + std::copy(result.get()+resultIndex[i],result.get()+resultIndex[i+1],arraysOut[i]->getPointer()); + } + } + template int allToAllArraysT2(MPI_Comm comm, const std::vector< MCAuto::ArrayType> >& arrays, MCAuto::ArrayType>& arrayOut, std::unique_ptr& nbOfElems2, mcIdType& nbOfComponents) const {