From: Bernard Sécher Date: Mon, 9 Feb 2015 09:50:28 +0000 (+0100) Subject: changes for mpi compilation X-Git-Tag: V7_6_0a1~14^2~1 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=65b0f5283d8f00d48a9733b25967ac6de7c854f7;p=modules%2Fmed.git changes for mpi compilation --- diff --git a/CMakeLists.txt b/CMakeLists.txt index c9c6c58a9..b2087b90c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -83,7 +83,7 @@ CMAKE_DEPENDENT_OPTION(SALOME_BUILD_GUI "Build GUI of MED." ON "NOT SALOME_MED_STANDALONE" OFF) OPTION(SALOME_BUILD_TESTS "Build MED tests." ON) OPTION(SALOME_BUILD_DOC "Build MED doc." ON) -CMAKE_DEPENDENT_OPTION(SALOME_MED_PARTITIONER_METIS "Enable metis graph library in MEDPartitioner." ON "SALOME_MED_ENABLE_PARTITIONER;NOT SALOME_USE_MPI" OFF) +CMAKE_DEPENDENT_OPTION(SALOME_MED_PARTITIONER_METIS "Enable metis graph library in MEDPartitioner." ON "SALOME_MED_ENABLE_PARTITIONER" OFF) CMAKE_DEPENDENT_OPTION(SALOME_MED_PARTITIONER_SCOTCH "Enable scotch graph library in MEDPartitioner." ON "SALOME_MED_ENABLE_PARTITIONER;NOT SALOME_USE_MPI" OFF) CMAKE_DEPENDENT_OPTION(SALOME_MED_PARTITIONER_PARMETIS "Enable parmetis graph library in MEDPartitioner." ON "SALOME_MED_ENABLE_PARTITIONER;SALOME_USE_MPI" OFF) IF(WIN32) @@ -168,7 +168,7 @@ ENDIF(SALOME_BUILD_TESTS) IF(SALOME_USE_MPI) FIND_PACKAGE(SalomeMPI REQUIRED) - ADD_DEFINITIONS("-DHAVE_MPI2") + ADD_DEFINITIONS("-DHAVE_MPI") IF(SALOME_MED_PARTITIONER_PARMETIS) FIND_PACKAGE(SalomeParMetis) SALOME_LOG_OPTIONAL_PACKAGE(ParMetis SALOME_MED_PARTITIONER_PARMETIS) diff --git a/src/MEDPartitioner/MEDPARTITIONER_MeshCollection.cxx b/src/MEDPartitioner/MEDPARTITIONER_MeshCollection.cxx index 6307ebd43..c298d68f2 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_MeshCollection.cxx +++ b/src/MEDPartitioner/MEDPARTITIONER_MeshCollection.cxx @@ -25,7 +25,7 @@ #include "MEDPARTITIONER_Topology.hxx" #include "MEDPARTITIONER_ParallelTopology.hxx" -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI #include "MEDPARTITIONER_JointFinder.hxx" #endif @@ -43,7 +43,7 @@ #include "MEDCouplingAutoRefCountObjectPtr.hxx" -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI #include #endif @@ -115,7 +115,7 @@ MEDPARTITIONER::MeshCollection::MeshCollection(MeshCollection& initialCollection //treating faces ///////////////// -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if (MyGlobals::_Verbose>0 && MyGlobals::_World_Size>1) MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose messages #endif @@ -131,7 +131,7 @@ MEDPARTITIONER::MeshCollection::MeshCollection(MeshCollection& initialCollection //////////////////// //treating families //////////////////// -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if (MyGlobals::_Verbose>0 && MyGlobals::_World_Size>1) MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose messages #endif @@ -155,7 +155,7 @@ MEDPARTITIONER::MeshCollection::MeshCollection(MeshCollection& initialCollection "faceFamily"); //treating groups -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if (MyGlobals::_Verbose>0 && MyGlobals::_World_Size>1) MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose messages #endif @@ -164,7 +164,7 @@ MEDPARTITIONER::MeshCollection::MeshCollection(MeshCollection& initialCollection _family_info=initialCollection.getFamilyInfo(); _group_info=initialCollection.getGroupInfo(); -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if (MyGlobals::_Verbose>0 && MyGlobals::_World_Size>1) MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose messages #endif @@ -240,7 +240,7 @@ void MEDPARTITIONER::MeshCollection::castCellMeshes(MeshCollection& initialColle } } } -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if (isParallelMode()) { //if (MyGlobals::_Verbose>300) std::cout<<"proc "<nbDomain(); inew++) { -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI //sending meshes for parallel computation if (isParallelMode() && _domain_selector->isMyDomain(inew) && !_domain_selector->isMyDomain(iold)) _domain_selector->sendMesh(*(getMesh(inew)), _domain_selector->getProcessorID(iold)); @@ -538,7 +538,7 @@ void MEDPARTITIONER::MeshCollection::castFaceMeshes(MeshCollection& initialColle } } -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI //send/receive stuff if (isParallelMode()) { @@ -643,7 +643,7 @@ void MEDPARTITIONER::MeshCollection::castIntField(std::vectordecrRef(); } // send-recv operations -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI for (int inew=0; inewisMyDomain(inew1); mesh2Here = _domain_selector->isMyDomain(inew2); if ( !mesh1Here && mesh2Here ) @@ -1036,7 +1036,7 @@ void MEDPARTITIONER::MeshCollection::buildConnectZones() if ( isParallelMode()) { -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if ( mesh1Here && !mesh2Here ) { //send faces2 to domain of recvMesh @@ -1286,7 +1286,7 @@ MEDPARTITIONER::MeshCollection::MeshCollection(const std::string& filename, Para f<1) MPI_Barrier(MPI_COMM_WORLD); //wait for creation of nameFileXml #endif @@ -1326,7 +1326,7 @@ MEDPARTITIONER::MeshCollection::MeshCollection(const std::string& filename, Para try { //check for all proc/file compatibility of _field_descriptions -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI _field_descriptions=AllgathervVectorOfString(MyGlobals::_Field_Descriptions); #else _field_descriptions=MyGlobals::_Field_Descriptions; @@ -1337,7 +1337,7 @@ MEDPARTITIONER::MeshCollection::MeshCollection(const std::string& filename, Para std::cerr << "proc " << MyGlobals::_Rank << " : INTERP_KERNEL_Exception : " << e.what() << std::endl; throw INTERP_KERNEL::Exception("Something wrong verifying coherency of files med ands fields"); } -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI try { //check for all proc/file compatibility of _family_info @@ -1424,7 +1424,7 @@ MEDPARTITIONER::MeshCollection::~MeshCollection() delete _driver; if (_topology!=0 && _owns_topology) delete _topology; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI delete _joint_finder; #endif } @@ -1703,7 +1703,7 @@ void MEDPARTITIONER::MeshCollection::buildParallelCellGraph(MEDPARTITIONER::SkyL std::vector > > commonDistantNodes; int nbdomain=_topology->nbDomain(); -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if (isParallelMode()) { _joint_finder=new JointFinder(*this); @@ -1745,7 +1745,7 @@ void MEDPARTITIONER::MeshCollection::buildParallelCellGraph(MEDPARTITIONER::SkyL } revConn->decrRef(); index->decrRef(); -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI for (int iother=0; iother::iterator it; diff --git a/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx b/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx index e6beacf19..0076dbaaf 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx +++ b/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx @@ -26,7 +26,7 @@ #include #include -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI #include #endif @@ -37,7 +37,7 @@ MEDPARTITIONER::ParaDomainSelector::ParaDomainSelector(bool mesure_memory) :_rank(0),_world_size(1), _nb_result_domains(-1), _init_time(0.0), _mesure_memory(mesure_memory), _init_memory(0), _max_memory(0) { -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if (MyGlobals::_Rank==-1) { MPI_Init(0,0); //do once only @@ -76,7 +76,7 @@ bool MEDPARTITIONER::ParaDomainSelector::isOnDifferentHosts() const evaluateMemory(); if ( _world_size < 2 ) return false; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI char name_here[ MPI_MAX_PROCESSOR_NAME+1 ], name_there[ MPI_MAX_PROCESSOR_NAME+1 ]; int size; MPI_Get_processor_name( name_here, &size); @@ -152,11 +152,11 @@ void MEDPARTITIONER::ParaDomainSelector::gatherNbOf(const std::vector1"); + throw INTERP_KERNEL::Exception("not(HAVE_MPI) incompatible with MPI_World_Size>1"); #endif } int total_nb_cells=0, total_nb_nodes=0; @@ -289,7 +289,7 @@ std::auto_ptr MEDPARTITIONER::ParaDomainSelector::gatherG Graph* glob_graph = 0; evaluateMemory(); -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI // --------------- // Gather indices @@ -379,7 +379,7 @@ std::auto_ptr MEDPARTITIONER::ParaDomainSelector::gatherG delete [] partition; -#endif // HAVE_MPI2 +#endif // HAVE_MPI return std::auto_ptr( glob_graph ); } @@ -430,7 +430,7 @@ void MEDPARTITIONER::ParaDomainSelector::gatherNbCellPairs() evaluateMemory(); std::vector< int > send_buf = _nb_cell_pairs_by_joint; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Allreduce((void*)&send_buf[0], (void*)&_nb_cell_pairs_by_joint[0], _nb_cell_pairs_by_joint.size(), @@ -477,7 +477,7 @@ int *MEDPARTITIONER::ParaDomainSelector::exchangeSubentityIds( int loc_domain, i const std::vector& loc_ids_here ) const { int* loc_ids_dist = new int[ loc_ids_here.size()]; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI int dest = getProcessorID( dist_domain ); int tag = 2002 + jointId( loc_domain, dist_domain ); MPI_Status status; @@ -516,7 +516,7 @@ int MEDPARTITIONER::ParaDomainSelector::jointId( int local_domain, int distant_d double MEDPARTITIONER::ParaDomainSelector::getPassedTime() const { -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI return MPI_Wtime() - _init_time; #else return 0.0; @@ -531,7 +531,7 @@ double MEDPARTITIONER::ParaDomainSelector::getPassedTime() const void MEDPARTITIONER::ParaDomainSelector::sendMesh(const ParaMEDMEM::MEDCouplingUMesh& mesh, int target) const { -#ifndef HAVE_MPI2 +#ifndef HAVE_MPI throw INTERP_KERNEL::Exception("ParaDomainSelector::sendMesh : incoherent call in non_MPI mode"); #else if (MyGlobals::_Verbose>600) @@ -584,7 +584,7 @@ void MEDPARTITIONER::ParaDomainSelector::sendMesh(const ParaMEDMEM::MEDCouplingU */ void MEDPARTITIONER::ParaDomainSelector::recvMesh(ParaMEDMEM::MEDCouplingUMesh*& mesh, int source)const { -#ifndef HAVE_MPI2 +#ifndef HAVE_MPI throw INTERP_KERNEL::Exception("ParaDomainSelector::recvMesh : incoherent call in non_MPI mode"); #else // First stage : exchanging sizes diff --git a/src/MEDPartitioner/MEDPARTITIONER_ParallelTopology.cxx b/src/MEDPartitioner/MEDPARTITIONER_ParallelTopology.cxx index 15e07a8b9..e163962e0 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_ParallelTopology.cxx +++ b/src/MEDPartitioner/MEDPARTITIONER_ParallelTopology.cxx @@ -32,7 +32,7 @@ #include #include -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI #include #endif @@ -106,7 +106,7 @@ void ParallelTopology::setGlobalNumerotationDefault(ParaDomainSelector* domainSe std::cout << "c" << idomain << "|" << i << "|" << global << " "; } } -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if (MyGlobals::_Verbose>500 && MyGlobals::_World_Size>1) MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose trace #endif if (MyGlobals::_Is0verbose>500) std::cout << std::endl; @@ -125,7 +125,7 @@ void ParallelTopology::setGlobalNumerotationDefault(ParaDomainSelector* domainSe std::cout << "n" << idomain << "|" << i << "|" << global << " "; } } -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if (MyGlobals::_Verbose>500 && MyGlobals::_World_Size>1) MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose trace #endif if (MyGlobals::_Is0verbose>500) std::cout << std::endl; diff --git a/src/MEDPartitioner/MEDPARTITIONER_Utils.hxx b/src/MEDPartitioner/MEDPARTITIONER_Utils.hxx index 75a7f0b27..4f996803f 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_Utils.hxx +++ b/src/MEDPartitioner/MEDPARTITIONER_Utils.hxx @@ -88,7 +88,7 @@ namespace MEDPARTITIONER std::vector BrowseAllFieldsOnMesh(const std::string& myfile, const std::string& mymesh, const int idomain); std::vector GetInfosOfField(const char *fileName, const char *meshName, const int idomain ); -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI //not adviced, interblocking, use sendAndReceive //void SendVectorOfString(const std::vector& vec, const int target); //std::vector RecvVectorOfString(const int source); diff --git a/src/MEDPartitioner/MEDPARTITIONER_UtilsPara.cxx b/src/MEDPartitioner/MEDPARTITIONER_UtilsPara.cxx index 65629addd..7511263d3 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_UtilsPara.cxx +++ b/src/MEDPartitioner/MEDPARTITIONER_UtilsPara.cxx @@ -35,7 +35,7 @@ #include #include -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI #include #endif @@ -118,7 +118,7 @@ void MEDPARTITIONER::SendDoubleVec(const std::vector& vec, const int tar int size=vec.size(); if (MyGlobals::_Verbose>1000) std::cout << "proc " << MyGlobals::_Rank << " : --> SendDoubleVec " << size << std::endl; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD); MPI_Send(const_cast(&vec[0]), size, MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD); #endif @@ -134,7 +134,7 @@ std::vector* MEDPARTITIONER::RecvDoubleVec(const int source) { int tag = 111002; int size; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) @@ -150,7 +150,7 @@ void MEDPARTITIONER::RecvDoubleVec(std::vector& vec, const int source) { int tag = 111002; int size; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) @@ -170,7 +170,7 @@ void MEDPARTITIONER::SendIntVec(const std::vector& vec, const int target) int size=vec.size(); if (MyGlobals::_Verbose>1000) std::cout << "proc " << MyGlobals::_Rank << " : --> SendIntVec " << size << std::endl; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD); MPI_Send(const_cast(&vec[0]), size,MPI_INT, target, tag+100, MPI_COMM_WORLD); #endif @@ -185,7 +185,7 @@ std::vector *MEDPARTITIONER::RecvIntVec(const int source) { int tag = 111003; int size; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) @@ -201,7 +201,7 @@ void MEDPARTITIONER::RecvIntVec(std::vector& vec, const int source) { int tag = 111003; int size; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) @@ -228,7 +228,7 @@ void MEDPARTITIONER::SendDataArrayInt(const ParaMEDMEM::DataArrayInt *da, const size[2]=da->getNumberOfComponents(); if (MyGlobals::_Verbose>1000) std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayInt " << size[0] << std::endl; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD); const int *p=da->getConstPointer(); MPI_Send(const_cast(&p[0]), size[0] ,MPI_INT, target, tag+100, MPI_COMM_WORLD); @@ -244,7 +244,7 @@ ParaMEDMEM::DataArrayInt *MEDPARTITIONER::RecvDataArrayInt(const int source) { int tag = 111004; int size[3]; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) @@ -276,7 +276,7 @@ void MEDPARTITIONER::SendDataArrayDouble(const ParaMEDMEM::DataArrayDouble *da, size[2]=da->getNumberOfComponents(); if (MyGlobals::_Verbose>1000) std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayDouble " << size[0] << std::endl; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD); const double *p=da->getConstPointer(); MPI_Send(const_cast(&p[0]), size[0] ,MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD); @@ -292,7 +292,7 @@ ParaMEDMEM::DataArrayDouble* MEDPARTITIONER::RecvDataArrayDouble(const int sourc { int tag = 111005; int size[3]; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Status status; MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status); if (MyGlobals::_Verbose>1000) diff --git a/src/MEDPartitioner/Test/MEDPARTITIONERTest.cxx b/src/MEDPartitioner/Test/MEDPARTITIONERTest.cxx index c49dcc6fd..c7686401a 100644 --- a/src/MEDPartitioner/Test/MEDPARTITIONERTest.cxx +++ b/src/MEDPartitioner/Test/MEDPARTITIONERTest.cxx @@ -43,7 +43,7 @@ #include #include -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI #include #endif @@ -108,7 +108,7 @@ std::string MEDPARTITIONERTest::getPartitionerExe() const void MEDPARTITIONERTest::setUp() { this->_verbose=0; -#if defined(HAVE_MPI2) +#if defined(HAVE_MPI) if (MyGlobals::_Rank==-1) //do once only { MPI_Init(0,0); @@ -123,10 +123,10 @@ void MEDPARTITIONERTest::setUp() if (_verbose>10) { -#if defined(HAVE_MPI2) - cout<<"\ndefined(HAVE_MPI2)"< #include #include +#include -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI #include #endif diff --git a/src/RENUMBER/CMakeLists.txt b/src/RENUMBER/CMakeLists.txt index 9990f5090..3bc84a408 100644 --- a/src/RENUMBER/CMakeLists.txt +++ b/src/RENUMBER/CMakeLists.txt @@ -53,6 +53,10 @@ SET(renumber_SOURCES SET(renumbercpp_LDFLAGS medloader) +IF(PARMETIS_FOUND) + INCLUDE_DIRECTORIES(${PARMETIS_INCLUDE_DIRS}) +ENDIF(PARMETIS_FOUND) + IF(METIS_FOUND) INCLUDE_DIRECTORIES(${METIS_INCLUDE_DIRS})