From 54b955607de6f7044a882903e331fd5df0927f7d Mon Sep 17 00:00:00 2001 From: Anthony Geay Date: Tue, 14 Apr 2020 23:08:27 +0200 Subject: [PATCH] WIP --- src/ParaMEDMEM/CommInterface.hxx | 2 +- src/ParaMEDMEM/ParaUMesh.cxx | 37 ++++++++++++++++++++++++++------ 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/src/ParaMEDMEM/CommInterface.hxx b/src/ParaMEDMEM/CommInterface.hxx index 9d96cde93..967501cdb 100644 --- a/src/ParaMEDMEM/CommInterface.hxx +++ b/src/ParaMEDMEM/CommInterface.hxx @@ -81,7 +81,7 @@ namespace MEDCoupling void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) const { return MPI_Allgather(sendbuf,sendcount, sendtype, recvbuf, recvcount, recvtype, comm); } int allGatherV(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], - const int displs[], MPI_Datatype recvtype, MPI_Comm comm) { return MPI_Allgatherv(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs,recvtype,comm); } + const int displs[], MPI_Datatype recvtype, MPI_Comm comm) const { return MPI_Allgatherv(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs,recvtype,comm); } int allToAll(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) const { return MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); } diff --git a/src/ParaMEDMEM/ParaUMesh.cxx b/src/ParaMEDMEM/ParaUMesh.cxx index 0b939fd17..080a9ab4a 100644 --- a/src/ParaMEDMEM/ParaUMesh.cxx +++ b/src/ParaMEDMEM/ParaUMesh.cxx @@ -117,6 +117,25 @@ MCAuto ParaUMesh::getCellIdsLyingOnNodes(const DataArrayIdType return cellIdsFromProcs; } +/*! + * Generalized AllGather collective communication. + * This method send input \a array to all procs. + */ +void AllGatherArrays(const CommInterface& ci, MPI_Comm comm, const DataArrayIdType *array, std::unique_ptr& result, std::unique_ptr& resultIndex) +{ + int size; + ci.commSize(comm,&size); + std::unique_ptr nbOfElems(new mcIdType[size]); + mcIdType nbOfCellsRequested(array->getNumberOfTuples()); + ci.allGather(&nbOfCellsRequested,1,MPI_ID_TYPE,nbOfElems.get(),1,MPI_ID_TYPE,comm); + mcIdType nbOfCellIdsSum(std::accumulate(nbOfElems.get(),nbOfElems.get()+size,0)); + result.reset(new mcIdType[nbOfCellIdsSum]); + std::unique_ptr nbOfElemsInt( CommInterface::ToIntArray(nbOfElems,size) ); + std::unique_ptr offsetsIn( CommInterface::ComputeOffset(nbOfElemsInt,size) ); + ci.allGatherV(array->begin(),nbOfCellsRequested,MPI_ID_TYPE,result.get(),nbOfElemsInt.get(),offsetsIn.get(),MPI_ID_TYPE,comm); + resultIndex = std::move(nbOfElems); +} + /*! */ MCAuto ParaUMesh::redistributeCells(const DataArrayIdType *globalCellIds) const @@ -125,27 +144,33 @@ MCAuto ParaUMesh::redistributeCells(const DataArrayIdType *globalCell CommInterface ci; int size; ci.commSize(comm,&size); - std::unique_ptr nbOfElems(new mcIdType[size]),nbOfElems2(new mcIdType[size]); - mcIdType nbOfNodeIdsLoc(globalCellIds->getNumberOfTuples()); - ci.allGather(&nbOfNodeIdsLoc,1,MPI_ID_TYPE,nbOfElems.get(),1,MPI_ID_TYPE,comm); + std::unique_ptr nbOfElems(new mcIdType[size]); + mcIdType nbOfCellsRequested(globalCellIds->getNumberOfTuples()); + ci.allGather(&nbOfCellsRequested,1,MPI_ID_TYPE,nbOfElems.get(),1,MPI_ID_TYPE,comm); mcIdType nbOfCellIdsSum(std::accumulate(nbOfElems.get(),nbOfElems.get()+size,0)); std::unique_ptr allGlobalCellIds(new mcIdType[nbOfCellIdsSum]); std::unique_ptr nbOfElemsInt( CommInterface::ToIntArray(nbOfElems,size) ); std::unique_ptr offsetsIn( CommInterface::ComputeOffset(nbOfElemsInt,size) ); - ci.allGatherV(globalCellIds->begin(),nbOfNodeIdsLoc,MPI_ID_TYPE,allGlobalCellIds.get(),nbOfElemsInt.get(),offsetsIn.get(),MPI_ID_TYPE,comm); + ci.allGatherV(globalCellIds->begin(),nbOfCellsRequested,MPI_ID_TYPE,allGlobalCellIds.get(),nbOfElemsInt.get(),offsetsIn.get(),MPI_ID_TYPE,comm); mcIdType offset(0); + // Prepare ParaUMesh parts to be sent : compute for each proc the contribution of current rank. + std::vector< MCAuto > globalCellIdsToBeSent(size),globalNodeIdsToBeSent(size); + std::vector< MCAuto > meshPartsToBeSent(size); for(int curRk = 0 ; curRk < size ; ++curRk) { MCAuto globalCellIdsOfCurProc(DataArrayIdType::New()); globalCellIdsOfCurProc->useArray(allGlobalCellIds.get()+offset,false,DeallocType::CPP_DEALLOC,nbOfElems[curRk],1); offset += nbOfElems[curRk]; - // prepare all2all session + // the key call is here : compute for rank curRk the cells to be sent MCAuto globalCellIdsCaptured(_cell_global->buildIntersection(globalCellIdsOfCurProc));// OK for the global cellIds MCAuto localCellIdsCaptured(_node_global->findIdForEach(globalCellIdsCaptured->begin(),globalCellIdsCaptured->end())); MCAuto meshPart(_mesh->buildPartOfMySelf(localCellIdsCaptured->begin(),localCellIdsCaptured->end(),true)); MCAuto o2n(meshPart->zipCoordsTraducer());// OK for the mesh MCAuto n2o(o2n->invertArrayO2N2N2O(meshPart->getNumberOfNodes())); MCAuto globalNodeIdsPart(_node_global->selectByTupleIdSafe(n2o->begin(),n2o->end())); // OK for the global nodeIds + meshPartsToBeSent[curRk] = meshPart; + globalCellIdsToBeSent[curRk] = globalCellIdsCaptured; + globalNodeIdsToBeSent[curRk] = globalNodeIdsPart; } - + // Receive } -- 2.39.2