"NOT SALOME_MED_STANDALONE" OFF)
OPTION(SALOME_BUILD_TESTS "Build MED tests." ON)
OPTION(SALOME_BUILD_DOC "Build MED doc." ON)
-CMAKE_DEPENDENT_OPTION(SALOME_MED_PARTITIONER_METIS "Enable metis graph library in MEDPartitioner." ON "SALOME_MED_ENABLE_PARTITIONER;NOT SALOME_USE_MPI" OFF)
+CMAKE_DEPENDENT_OPTION(SALOME_MED_PARTITIONER_METIS "Enable metis graph library in MEDPartitioner." ON "SALOME_MED_ENABLE_PARTITIONER" OFF)
CMAKE_DEPENDENT_OPTION(SALOME_MED_PARTITIONER_SCOTCH "Enable scotch graph library in MEDPartitioner." ON "SALOME_MED_ENABLE_PARTITIONER;NOT SALOME_USE_MPI" OFF)
CMAKE_DEPENDENT_OPTION(SALOME_MED_PARTITIONER_PARMETIS "Enable parmetis graph library in MEDPartitioner." ON "SALOME_MED_ENABLE_PARTITIONER;SALOME_USE_MPI" OFF)
IF(WIN32)
IF(SALOME_USE_MPI)
FIND_PACKAGE(SalomeMPI REQUIRED)
- ADD_DEFINITIONS("-DHAVE_MPI2")
+ ADD_DEFINITIONS("-DHAVE_MPI")
IF(SALOME_MED_PARTITIONER_PARMETIS)
FIND_PACKAGE(SalomeParMetis)
SALOME_LOG_OPTIONAL_PACKAGE(ParMetis SALOME_MED_PARTITIONER_PARMETIS)
#include "MEDPARTITIONER_Topology.hxx"
#include "MEDPARTITIONER_ParallelTopology.hxx"
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
#include "MEDPARTITIONER_JointFinder.hxx"
#endif
#include "MEDCouplingAutoRefCountObjectPtr.hxx"
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
#include <mpi.h>
#endif
//treating faces
/////////////////
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
if (MyGlobals::_Verbose>0 && MyGlobals::_World_Size>1)
MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose messages
#endif
////////////////////
//treating families
////////////////////
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
if (MyGlobals::_Verbose>0 && MyGlobals::_World_Size>1)
MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose messages
#endif
"faceFamily");
//treating groups
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
if (MyGlobals::_Verbose>0 && MyGlobals::_World_Size>1)
MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose messages
#endif
_family_info=initialCollection.getFamilyInfo();
_group_info=initialCollection.getGroupInfo();
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
if (MyGlobals::_Verbose>0 && MyGlobals::_World_Size>1)
MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose messages
#endif
}
}
}
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
if (isParallelMode())
{
//if (MyGlobals::_Verbose>300) std::cout<<"proc "<<rank<<" : castCellMeshes send/receive"<<std::endl;
for (int inew=0; inew<_topology->nbDomain(); inew++)
{
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
//sending meshes for parallel computation
if (isParallelMode() && _domain_selector->isMyDomain(inew) && !_domain_selector->isMyDomain(iold))
_domain_selector->sendMesh(*(getMesh(inew)), _domain_selector->getProcessorID(iold));
}
}
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
//send/receive stuff
if (isParallelMode())
{
sourceCoords->decrRef();
}
// send-recv operations
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
for (int inew=0; inew<inewMax; inew++)
{
for (int iold=0; iold<ioldMax; iold++)
std::string descriptionField=initialCollection.getFieldDescriptions()[ifield];
if (descriptionField.find(nameTo)==std::string::npos)
continue; //only nameTo accepted in Fields name description
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
for (int inew=0; inew<inewMax; inew++)
{
for (int iold=0; iold<ioldMax; iold++)
bool mesh1Here = true, mesh2Here = true;
if (isParallelMode())
{
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
mesh1Here = _domain_selector->isMyDomain(inew1);
mesh2Here = _domain_selector->isMyDomain(inew2);
if ( !mesh1Here && mesh2Here )
if ( isParallelMode())
{
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
if ( mesh1Here && !mesh2Here )
{
//send faces2 to domain of recvMesh
f<<xml;
f.close();
}
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
if (MyGlobals::_World_Size>1)
MPI_Barrier(MPI_COMM_WORLD); //wait for creation of nameFileXml
#endif
try
{
//check for all proc/file compatibility of _field_descriptions
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
_field_descriptions=AllgathervVectorOfString(MyGlobals::_Field_Descriptions);
#else
_field_descriptions=MyGlobals::_Field_Descriptions;
std::cerr << "proc " << MyGlobals::_Rank << " : INTERP_KERNEL_Exception : " << e.what() << std::endl;
throw INTERP_KERNEL::Exception("Something wrong verifying coherency of files med ands fields");
}
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
try
{
//check for all proc/file compatibility of _family_info
delete _driver;
if (_topology!=0 && _owns_topology)
delete _topology;
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
delete _joint_finder;
#endif
}
std::vector<std::vector<std::multimap<int,int> > > commonDistantNodes;
int nbdomain=_topology->nbDomain();
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
if (isParallelMode())
{
_joint_finder=new JointFinder(*this);
}
revConn->decrRef();
index->decrRef();
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
for (int iother=0; iother<nbdomain; iother++)
{
std::multimap<int,int>::iterator it;
#include <iostream>
#include <numeric>
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
#include <mpi.h>
#endif
:_rank(0),_world_size(1), _nb_result_domains(-1), _init_time(0.0),
_mesure_memory(mesure_memory), _init_memory(0), _max_memory(0)
{
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
if (MyGlobals::_Rank==-1)
{
MPI_Init(0,0); //do once only
evaluateMemory();
if ( _world_size < 2 ) return false;
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
char name_here[ MPI_MAX_PROCESSOR_NAME+1 ], name_there[ MPI_MAX_PROCESSOR_NAME+1 ];
int size;
MPI_Get_processor_name( name_here, &size);
}
else
{
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
all_nb_elems.resize( nb_domains*2 );
MPI_Allreduce((void*)&nb_elems[0], (void*)&all_nb_elems[0], nb_domains*2, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
#else
- throw INTERP_KERNEL::Exception("not(HAVE_MPI2) incompatible with MPI_World_Size>1");
+ throw INTERP_KERNEL::Exception("not(HAVE_MPI) incompatible with MPI_World_Size>1");
#endif
}
int total_nb_cells=0, total_nb_nodes=0;
Graph* glob_graph = 0;
evaluateMemory();
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
// ---------------
// Gather indices
delete [] partition;
-#endif // HAVE_MPI2
+#endif // HAVE_MPI
return std::auto_ptr<Graph>( glob_graph );
}
evaluateMemory();
std::vector< int > send_buf = _nb_cell_pairs_by_joint;
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
MPI_Allreduce((void*)&send_buf[0],
(void*)&_nb_cell_pairs_by_joint[0],
_nb_cell_pairs_by_joint.size(),
const std::vector<int>& loc_ids_here ) const
{
int* loc_ids_dist = new int[ loc_ids_here.size()];
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
int dest = getProcessorID( dist_domain );
int tag = 2002 + jointId( loc_domain, dist_domain );
MPI_Status status;
double MEDPARTITIONER::ParaDomainSelector::getPassedTime() const
{
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
return MPI_Wtime() - _init_time;
#else
return 0.0;
void MEDPARTITIONER::ParaDomainSelector::sendMesh(const ParaMEDMEM::MEDCouplingUMesh& mesh, int target) const
{
-#ifndef HAVE_MPI2
+#ifndef HAVE_MPI
throw INTERP_KERNEL::Exception("ParaDomainSelector::sendMesh : incoherent call in non_MPI mode");
#else
if (MyGlobals::_Verbose>600)
*/
void MEDPARTITIONER::ParaDomainSelector::recvMesh(ParaMEDMEM::MEDCouplingUMesh*& mesh, int source)const
{
-#ifndef HAVE_MPI2
+#ifndef HAVE_MPI
throw INTERP_KERNEL::Exception("ParaDomainSelector::recvMesh : incoherent call in non_MPI mode");
#else
// First stage : exchanging sizes
#include <vector>
#include <iostream>
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
#include <mpi.h>
#endif
std::cout << "c" << idomain << "|" << i << "|" << global << " ";
}
}
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
if (MyGlobals::_Verbose>500 && MyGlobals::_World_Size>1) MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose trace
#endif
if (MyGlobals::_Is0verbose>500) std::cout << std::endl;
std::cout << "n" << idomain << "|" << i << "|" << global << " ";
}
}
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
if (MyGlobals::_Verbose>500 && MyGlobals::_World_Size>1) MPI_Barrier(MPI_COMM_WORLD); //synchronize verbose trace
#endif
if (MyGlobals::_Is0verbose>500) std::cout << std::endl;
std::vector<std::string> BrowseAllFieldsOnMesh(const std::string& myfile, const std::string& mymesh, const int idomain);
std::vector<std::string> GetInfosOfField(const char *fileName, const char *meshName, const int idomain );
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
//not adviced, interblocking, use sendAndReceive
//void SendVectorOfString(const std::vector<std::string>& vec, const int target);
//std::vector<std::string> RecvVectorOfString(const int source);
#include <sstream>
#include <string>
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
#include <mpi.h>
#endif
int size=vec.size();
if (MyGlobals::_Verbose>1000)
std::cout << "proc " << MyGlobals::_Rank << " : --> SendDoubleVec " << size << std::endl;
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD);
MPI_Send(const_cast<double*>(&vec[0]), size, MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD);
#endif
{
int tag = 111002;
int size;
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
MPI_Status status;
MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
if (MyGlobals::_Verbose>1000)
{
int tag = 111002;
int size;
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
MPI_Status status;
MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
if (MyGlobals::_Verbose>1000)
int size=vec.size();
if (MyGlobals::_Verbose>1000)
std::cout << "proc " << MyGlobals::_Rank << " : --> SendIntVec " << size << std::endl;
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD);
MPI_Send(const_cast<int*>(&vec[0]), size,MPI_INT, target, tag+100, MPI_COMM_WORLD);
#endif
{
int tag = 111003;
int size;
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
MPI_Status status;
MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
if (MyGlobals::_Verbose>1000)
{
int tag = 111003;
int size;
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
MPI_Status status;
MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
if (MyGlobals::_Verbose>1000)
size[2]=da->getNumberOfComponents();
if (MyGlobals::_Verbose>1000)
std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayInt " << size[0] << std::endl;
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD);
const int *p=da->getConstPointer();
MPI_Send(const_cast<int*>(&p[0]), size[0] ,MPI_INT, target, tag+100, MPI_COMM_WORLD);
{
int tag = 111004;
int size[3];
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
MPI_Status status;
MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
if (MyGlobals::_Verbose>1000)
size[2]=da->getNumberOfComponents();
if (MyGlobals::_Verbose>1000)
std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayDouble " << size[0] << std::endl;
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD);
const double *p=da->getConstPointer();
MPI_Send(const_cast<double*>(&p[0]), size[0] ,MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD);
{
int tag = 111005;
int size[3];
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
MPI_Status status;
MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
if (MyGlobals::_Verbose>1000)
#include <cstdlib>
#include <vector>
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
#include <mpi.h>
#endif
void MEDPARTITIONERTest::setUp()
{
this->_verbose=0;
-#if defined(HAVE_MPI2)
+#if defined(HAVE_MPI)
if (MyGlobals::_Rank==-1) //do once only
{
MPI_Init(0,0);
if (_verbose>10)
{
-#if defined(HAVE_MPI2)
- cout<<"\ndefined(HAVE_MPI2)"<<endl;
+#if defined(HAVE_MPI)
+ cout<<"\ndefined(HAVE_MPI)"<<endl;
#else
- cout<<"\nNOT defined(HAVE_MPI2)"<<endl;
+ cout<<"\nNOT defined(HAVE_MPI)"<<endl;
#endif
#if defined(MED_ENABLE_PARMETIS)
cout<<"defined(MED_ENABLE_PARMETIS)"<<endl;
void MEDPARTITIONERTest::testMetisSmallSize()
{
- //#if !defined(HAVE_MPI2)
+ //#if !defined(HAVE_MPI)
setSmallSize();
createTestMeshes();
std::string MetisOrScotch("metis");
void MEDPARTITIONERTest::testScotchSmallSize()
{
- //#if !defined(HAVE_MPI2)
+ //#if !defined(HAVE_MPI)
setSmallSize();
createTestMeshes();
std::string MetisOrScotch("scotch");
CPPUNIT_TEST( testScotchSmallSize );
#endif
-#if defined(HAVE_MPI2)
+#if defined(HAVE_MPI)
#if defined(MED_ENABLE_PARMETIS)
//test with mpi on system
CPPUNIT_TEST( testMpirunSmallSize );
void testScotchSmallSize();
#endif
-#if defined(HAVE_MPI2)
+#if defined(HAVE_MPI)
void testMpirunSmallSize();
void testMpirunMedianSize();
void testMpirunHugeSize();
using namespace ParaMEDMEM;
using namespace MEDPARTITIONER;
-#if defined(HAVE_MPI2)
+#if defined(HAVE_MPI)
void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForMesh()
{
int res;
#include <iomanip>
#include <sstream>
#include <string>
+#include <cstring>
-#ifdef HAVE_MPI2
+#ifdef HAVE_MPI
#include <mpi.h>
#endif
SET(renumbercpp_LDFLAGS medloader)
+IF(PARMETIS_FOUND)
+ INCLUDE_DIRECTORIES(${PARMETIS_INCLUDE_DIRS})
+ENDIF(PARMETIS_FOUND)
+
IF(METIS_FOUND)
INCLUDE_DIRECTORIES(${METIS_INCLUDE_DIRS})