X-Git-Url: http://git.salome-platform.org/gitweb/?a=blobdiff_plain;f=src%2FMEDPartitioner%2FMEDPARTITIONER_UtilsPara.cxx;h=851d3e6ab65a35d6286eb19dd9399e8592c88e13;hb=ffb8188e28b2b60ee207a8644286821bc4e8fcdc;hp=65629addd8d4e410934764c3bbc741bf02060df4;hpb=1123dccd6613b2e8abba35182759d5c4a11ecc8d;p=tools%2Fmedcoupling.git diff --git a/src/MEDPartitioner/MEDPARTITIONER_UtilsPara.cxx b/src/MEDPartitioner/MEDPARTITIONER_UtilsPara.cxx index 65629addd..851d3e6ab 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_UtilsPara.cxx +++ b/src/MEDPartitioner/MEDPARTITIONER_UtilsPara.cxx @@ -1,4 +1,4 @@ -// Copyright (C) 2007-2014 CEA/DEN, EDF R&D +// Copyright (C) 2007-2020 CEA/DEN, EDF R&D // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -26,7 +26,8 @@ #include "MEDCouplingUMesh.hxx" #include "MEDCouplingFieldDouble.hxx" #include "InterpKernelException.hxx" -#include "MEDCouplingAutoRefCountObjectPtr.hxx" +#include "MCAuto.hxx" +#include "MEDCouplingMemArray.txx" #include "InterpKernelAutoPtr.hxx" #include @@ -35,8 +36,16 @@ #include #include -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI + #include + +#ifndef MEDCOUPLING_USE_64BIT_IDS +#define MPI_ID_TYPE MPI_INT +#else +#define MPI_ID_TYPE MPI_LONG +#endif + #endif using namespace MEDPARTITIONER; @@ -54,9 +63,9 @@ std::vector MEDPARTITIONER::SendAndReceiveVectorOfString(const std: if (rank == source) { std::string str=SerializeFromVectorOfString(vec); - int size=str.length(); + int size=(int)str.length(); MPI_Send( &size, 1, MPI_INT, target, tag, MPI_COMM_WORLD ); - MPI_Send( (void*)str.data(), str.length(), MPI_CHAR, target, tag+100, MPI_COMM_WORLD ); + MPI_Send( (void*)str.data(), (int)str.length(), MPI_CHAR, target, tag+100, MPI_COMM_WORLD ); } int recSize=0; @@ -83,7 +92,7 @@ std::vector MEDPARTITIONER::AllgathervVectorOfString(const std::vec std::string str=SerializeFromVectorOfString(vec); std::vector indexes(world_size); - int size=str.length(); + int size=(int)str.length(); MPI_Allgather(&size, 1, MPI_INT, &indexes[0], 1, MPI_INT, MPI_COMM_WORLD); @@ -92,7 +101,7 @@ std::vector MEDPARTITIONER::AllgathervVectorOfString(const std::vec for (int i=0; i MEDPARTITIONER::AllgathervVectorOfString(const std::vec void MEDPARTITIONER::SendDoubleVec(const std::vector& vec, const int target) { int tag = 111002; - int size=vec.size(); + int size=(int)vec.size(); if (MyGlobals::_Verbose>1000) std::cout << "proc " << MyGlobals::_Rank << " : --> SendDoubleVec " << size << std::endl; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD); MPI_Send(const_cast(&vec[0]), size, MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD); #endif @@ -134,7 +143,7 @@ std::vector* MEDPARTITIONER::RecvDoubleVec(const int source) { int tag = 111002; int size; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) @@ -150,7 +159,7 @@ void MEDPARTITIONER::RecvDoubleVec(std::vector& vec, const int source) { int tag = 111002; int size; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) @@ -164,15 +173,15 @@ void MEDPARTITIONER::RecvDoubleVec(std::vector& vec, const int source) \param vec vector to be sent \param target processor id of the target */ -void MEDPARTITIONER::SendIntVec(const std::vector& vec, const int target) +void MEDPARTITIONER::SendIntVec(const std::vector& vec, const int target) { int tag = 111003; - int size=vec.size(); + int size=(int)vec.size(); if (MyGlobals::_Verbose>1000) std::cout << "proc " << MyGlobals::_Rank << " : --> SendIntVec " << size << std::endl; -#ifdef HAVE_MPI2 - MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD); - MPI_Send(const_cast(&vec[0]), size,MPI_INT, target, tag+100, MPI_COMM_WORLD); +#ifdef HAVE_MPI + MPI_Send(&size, 1, MPI_ID_TYPE, target, tag, MPI_COMM_WORLD); + MPI_Send(const_cast(&vec[0]), size,MPI_ID_TYPE, target, tag+100, MPI_COMM_WORLD); #endif } @@ -185,7 +194,7 @@ std::vector *MEDPARTITIONER::RecvIntVec(const int source) { int tag = 111003; int size; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) @@ -197,17 +206,17 @@ std::vector *MEDPARTITIONER::RecvIntVec(const int source) return vec; } -void MEDPARTITIONER::RecvIntVec(std::vector& vec, const int source) +void MEDPARTITIONER::RecvIntVec(std::vector& vec, const int source) { int tag = 111003; int size; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl; vec.resize(size); - MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD,&status); + MPI_Recv(&vec[0], size, MPI_ID_TYPE, source, tag+100, MPI_COMM_WORLD,&status); #endif } @@ -217,18 +226,18 @@ void MEDPARTITIONER::RecvIntVec(std::vector& vec, const int source) \param da dataArray to be sent \param target processor id of the target */ -void MEDPARTITIONER::SendDataArrayInt(const ParaMEDMEM::DataArrayInt *da, const int target) +void MEDPARTITIONER::SendDataArrayInt(const MEDCoupling::DataArrayInt *da, const int target) { if (da==0) throw INTERP_KERNEL::Exception("Problem send DataArrayInt* NULL"); int tag = 111004; int size[3]; - size[0]=da->getNbOfElems(); - size[1]=da->getNumberOfTuples(); - size[2]=da->getNumberOfComponents(); + size[0]=(int)da->getNbOfElems(); + size[1]=(int)da->getNumberOfTuples(); + size[2]=(int)da->getNumberOfComponents(); if (MyGlobals::_Verbose>1000) std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayInt " << size[0] << std::endl; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD); const int *p=da->getConstPointer(); MPI_Send(const_cast(&p[0]), size[0] ,MPI_INT, target, tag+100, MPI_COMM_WORLD); @@ -240,18 +249,18 @@ void MEDPARTITIONER::SendDataArrayInt(const ParaMEDMEM::DataArrayInt *da, const \param da dataArrayInt that is filled \param source processor id of the incoming messages */ -ParaMEDMEM::DataArrayInt *MEDPARTITIONER::RecvDataArrayInt(const int source) +MEDCoupling::DataArrayInt *MEDPARTITIONER::RecvDataArrayInt(const int source) { int tag = 111004; int size[3]; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayInt " << size[0] << std::endl; if (size[0]!=(size[1]*size[2])) throw INTERP_KERNEL::Exception("Problem in RecvDataArrayInt incoherent sizes"); - ParaMEDMEM::DataArrayInt* da=ParaMEDMEM::DataArrayInt::New(); + MEDCoupling::DataArrayInt* da=MEDCoupling::DataArrayInt::New(); da->alloc(size[1],size[2]); int *p=da->getPointer(); MPI_Recv(const_cast(&p[0]), size[0], MPI_INT, source, tag+100, MPI_COMM_WORLD, &status); @@ -265,18 +274,18 @@ ParaMEDMEM::DataArrayInt *MEDPARTITIONER::RecvDataArrayInt(const int source) \param da dataArray to be sent \param target processor id of the target */ -void MEDPARTITIONER::SendDataArrayDouble(const ParaMEDMEM::DataArrayDouble *da, const int target) +void MEDPARTITIONER::SendDataArrayDouble(const MEDCoupling::DataArrayDouble *da, const int target) { if (da==0) throw INTERP_KERNEL::Exception("Problem send DataArrayDouble* NULL"); int tag = 111005; int size[3]; - size[0]=da->getNbOfElems(); - size[1]=da->getNumberOfTuples(); - size[2]=da->getNumberOfComponents(); + size[0]=(int)da->getNbOfElems(); + size[1]=(int)da->getNumberOfTuples(); + size[2]=(int)da->getNumberOfComponents(); if (MyGlobals::_Verbose>1000) std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayDouble " << size[0] << std::endl; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD); const double *p=da->getConstPointer(); MPI_Send(const_cast(&p[0]), size[0] ,MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD); @@ -288,18 +297,18 @@ void MEDPARTITIONER::SendDataArrayDouble(const ParaMEDMEM::DataArrayDouble *da, \param da dataArrayDouble that is filled \param source processor id of the incoming messages */ -ParaMEDMEM::DataArrayDouble* MEDPARTITIONER::RecvDataArrayDouble(const int source) +MEDCoupling::DataArrayDouble* MEDPARTITIONER::RecvDataArrayDouble(const int source) { int tag = 111005; int size[3]; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayDouble " << size[0] << std::endl; if (size[0]!=(size[1]*size[2])) throw INTERP_KERNEL::Exception("Problem in RecvDataArrayDouble incoherent sizes"); - ParaMEDMEM::DataArrayDouble* da=ParaMEDMEM::DataArrayDouble::New(); + MEDCoupling::DataArrayDouble* da=MEDCoupling::DataArrayDouble::New(); da->alloc(size[1],size[2]); double *p=da->getPointer(); MPI_Recv(const_cast(&p[0]), size[0], MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status); @@ -380,7 +389,7 @@ void MEDPARTITIONER::TestVectorOfStringMpi() void MEDPARTITIONER::TestMapOfStringIntMpi() { int rank=MyGlobals::_Rank; - std::map myMap; + std::map myMap; myMap["one"]=1; myMap["two"]=22; //a bug myMap["three"]=3; @@ -389,7 +398,7 @@ void MEDPARTITIONER::TestMapOfStringIntMpi() if (rank==0) { std::vector v2=VectorizeFromMapOfStringInt(myMap); - std::map m3=DevectorizeToMapOfStringInt(v2); + std::map m3=DevectorizeToMapOfStringInt(v2); if (ReprMapOfStringInt(m3)!=ReprMapOfStringInt(myMap)) throw INTERP_KERNEL::Exception("Problem in (de)vectorize MapOfStringInt"); } @@ -399,7 +408,7 @@ void MEDPARTITIONER::TestMapOfStringIntMpi() { std::cout << "v2 is : a vector of size " << v2.size() << std::endl; std::cout << ReprVectorOfString(v2) << std::endl; - std::map m2=DevectorizeToMapOfStringInt(v2); + std::map m2=DevectorizeToMapOfStringInt(v2); std::cout << "m2 is : a map of size " << m2.size() << std::endl; std::cout << ReprMapOfStringInt(m2) << std::endl; } @@ -458,8 +467,8 @@ void MEDPARTITIONER::TestDataArrayMpi() int rank=MyGlobals::_Rank; //int { - ParaMEDMEM::DataArrayInt* send=ParaMEDMEM::DataArrayInt::New(); - ParaMEDMEM::DataArrayInt* recv=0; + MEDCoupling::DataArrayInt* send=MEDCoupling::DataArrayInt::New(); + MEDCoupling::DataArrayInt* recv=0; int nbOfTuples=5; int numberOfComponents=3; send->alloc(nbOfTuples,numberOfComponents); @@ -487,8 +496,8 @@ void MEDPARTITIONER::TestDataArrayMpi() } //double { - ParaMEDMEM::DataArrayDouble* send=ParaMEDMEM::DataArrayDouble::New(); - ParaMEDMEM::DataArrayDouble* recv=0; + MEDCoupling::DataArrayDouble* send=MEDCoupling::DataArrayDouble::New(); + MEDCoupling::DataArrayDouble* recv=0; int nbOfTuples=5; int numberOfComponents=3; send->alloc(nbOfTuples,numberOfComponents);