#include "MEDPARTITIONER_MeshCollectionDriver.hxx"
#include "MEDPARTITIONER_MeshCollectionMedXmlDriver.hxx"
#include "MEDPARTITIONER_MeshCollectionMedAsciiDriver.hxx"
-#include "MEDPARTITIONER_ParallelTopology.hxx"
#include "MEDPARTITIONER_ParaDomainSelector.hxx"
+#include "MEDPARTITIONER_Topology.hxx"
+#ifdef HAVE_MPI2
#include "MEDPARTITIONER_JointFinder.hxx"
+#include "MEDPARTITIONER_ParallelTopology.hxx"
+#endif
#include "MEDPARTITIONER_Graph.hxx"
#include "MEDPARTITIONER_UserGraph.hxx"
#include "MEDPARTITIONER_Utils.hxx"
}
}
}
-
+#ifdef HAVE_MPI2
if (isParallelMode())
{
//if (MyGlobals::_Verbose>300) std::cout<<"proc "<<rank<<" : castCellMeshes send/receive"<<std::endl;
}
}
-
+#endif
//fusing the split meshes
if (MyGlobals::_Verbose>200)
std::cout << "proc " << rank << " : castCellMeshes fusing" << std::endl;
for (int inew=0; inew<_topology->nbDomain(); inew++) //cvwat12
{
+#ifdef HAVE_MPI2
//sending meshes for parallel computation
if (isParallelMode() && _domain_selector->isMyDomain(inew) && !_domain_selector->isMyDomain(iold))
_domain_selector->sendMesh(*(getMesh(inew)), _domain_selector->getProcessorID(iold));
mesh->decrRef();
}
else if (!isParallelMode() || (_domain_selector->isMyDomain(inew) && _domain_selector->isMyDomain(iold)))
+#else
+ if (!isParallelMode() || (_domain_selector->isMyDomain(inew) && _domain_selector->isMyDomain(iold)))
+#endif
{
ParaMEDMEM::DataArrayDouble* coords = getMesh(inew)->getCoords();
for (int inode=0; inode<_mesh[inew]->getNumberOfNodes();inode++)
}
// send/receive stuff
+#ifdef HAVE_MP2
if (isParallelMode())
{
ParaMEDMEM::MEDCouplingUMesh *empty=CreateEmptyMEDCouplingUMesh();
}
empty->decrRef();
}
-
+#endif
//recollecting the bits of splitMeshes to fuse them into one
if (MyGlobals::_Verbose>300) std::cout<<"proc "<<MyGlobals::_Rank<<" : fuse splitMeshes"<<std::endl;
meshesCastTo.resize(newSize);
int ioldMax=meshesCastFrom.size();
int inewMax=meshesCastTo.size();
// send-recv operations
+#ifdef HAVE_MPI2
for (int inew=0; inew<inewMax; inew++)
{
for (int iold=0; iold<ioldMax; iold++)
size=0;
sendIds.resize(size);
}
- //std::cout<<"proc "<<_domain_selector->rank()<<" : castIntField SendIntVec "<<size<<std::endl;
SendIntVec(sendIds, _domain_selector->getProcessorID(inew));
}
//receiving arrays from distant domains
}
}
}
+#endif
//local contributions and aggregation
for (int inew=0; inew<inewMax; inew++)
std::string descriptionField=initialCollection.getFieldDescriptions()[ifield];
if (descriptionField.find(nameTo)==std::string::npos)
continue; //only nameTo accepted in Fields name description
+#ifdef HAVE_MPI2
for (int inew=0; inew<inewMax; inew++)
{
for (int iold=0; iold<ioldMax; iold++)
}
}
}
-
+#endif
//local contributions and aggregation
for (int inew=0; inew<inewMax; inew++)
{
try
{
//check for all proc/file compatibility of _field_descriptions
- //*MyGlobals::_File_Names=AllgathervVectorOfString(*MyGlobals::_File_Names);
+#ifdef HAVE_MPI2
_field_descriptions=AllgathervVectorOfString(MyGlobals::_Field_Descriptions);
+#else
+ _field_descriptions=MyGlobals::_Field_Descriptions;
+#endif
}
catch(INTERP_KERNEL::Exception& e)
{
std::cerr << "proc " << MyGlobals::_Rank << " : INTERP_KERNEL_Exception : " << e.what() << std::endl;
throw INTERP_KERNEL::Exception("Something wrong verifying coherency of files med ands fields");
}
-
+#ifdef HAVE_MPI2
try
{
//check for all proc/file compatibility of _family_info
std::cerr << "proc " << MyGlobals::_Rank << " : INTERP_KERNEL_Exception : " << e.what() << std::endl;
throw INTERP_KERNEL::Exception("Something wrong merging all groupInfo");
}
+#endif
}
/*! constructing the MESH collection from a sequential MED-file
delete _driver;
if (_topology!=0 && _owns_topology)
delete _topology;
-
+#ifdef HAVE_MPI2
delete _joint_finder;
+#endif
}
/*! constructing the MESH collection from a file
std::vector<std::vector<std::multimap<int,int> > > commonDistantNodes;
int nbdomain=_topology->nbDomain();
+#ifdef HAVE_MPI2
if (isParallelMode())
{
_joint_finder=new JointFinder(*this);
commonDistantNodes=_joint_finder->getDistantNodeCell();
}
- if (MyGlobals::_Verbose>500) _joint_finder->print();
-
+ if (MyGlobals::_Verbose>500)
+ _joint_finder->print();
+#endif
//looking for reverse nodal connectivity i global numbering
for (int idomain=0; idomain<nbdomain; idomain++)
{
}
revConn->decrRef();
index->decrRef();
+#ifdef HAVE_MPI2
for (int iother=0; iother<nbdomain; iother++)
{
std::multimap<int,int>::iterator it;
cell2node.insert(make_pair(globalCell, globalNode));
}
}
+#endif
} //endfor idomain
//creating graph arcs (cell to cell relations)
if (MyGlobals::_Is0verbose>10)
std::cout << "building new topology" << std::endl;
//cellGraph is a shared pointer
- Topology* topology=new ParallelTopology (cellGraph, getTopology(), nbdomain, getMeshDimension());
+ Topology *topology=0;
+#ifdef HAVE_MPI2
+ topology=new ParallelTopology (cellGraph, getTopology(), nbdomain, getMeshDimension());
+#endif
//cleaning
delete [] edgeweights;
delete cellGraph;
cellGraph=new UserGraph(array, partition, _topology->nbCells());
//cellGraph is a shared pointer
- Topology* topology = new ParallelTopology (cellGraph, getTopology(), nbdomain, getMeshDimension());
-
+ Topology *topology=0;
+#ifdef HAVE_MPI2
+ topology=new ParallelTopology (cellGraph, getTopology(), nbdomain, getMeshDimension());
+#endif
// if (array!=0) delete array;
delete cellGraph;
return topology;
#include <iomanip>
#include <sstream>
#include <string>
-
-#ifdef HAVE_MPI2
-#include <mpi.h>
-#endif
+#include <cstring>
using namespace MEDPARTITIONER;
return res;
}
-/*!
- * not optimized but suffisant
- * return empty vector if i am not target
- */
-std::vector<std::string> MEDPARTITIONER::SendAndReceiveVectorOfString(const std::vector<std::string>& vec, const int source, const int target)
-{
- int rank=MyGlobals::_Rank;
-
- MPI_Status status;
- int tag = 111001;
- if (rank == source)
- {
- std::string str=SerializeFromVectorOfString(vec);
- int size=str.length();
- MPI_Send( &size, 1, MPI_INT, target, tag, MPI_COMM_WORLD );
- MPI_Send( (void*)str.data(), str.length(), MPI_CHAR, target, tag+100, MPI_COMM_WORLD );
- }
-
- int recSize=0;
- if (rank == target)
- {
- MPI_Recv(&recSize, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
- std::string recData(recSize,'x');
- MPI_Recv((void*)recData.data(), recSize, MPI_CHAR, source, tag+100, MPI_COMM_WORLD, &status);
- return DeserializeToVectorOfString(recData); //not empty one for target proc
- }
- std::vector<std::string> res;
- return res; //empty one for other proc
-}
-
-/*!
- * strings NO need all same size!!!!
- */
-std::vector<std::string> MEDPARTITIONER::AllgathervVectorOfString(const std::vector<std::string>& vec)
-{
- int world_size=MyGlobals::_World_Size;
- std::string str=SerializeFromVectorOfString(vec);
-
- std::vector<int> indexes(world_size);
- int size=str.length();
- MPI_Allgather(&size, 1, MPI_INT,
- &indexes[0], 1, MPI_INT, MPI_COMM_WORLD);
-
- //calcul of displacement
- std::vector< int > disp(1,0);
- for (int i=0; i<world_size; i++) disp.push_back( disp.back() + indexes[i] );
-
- std::string recData(disp.back(),'x');
- MPI_Allgatherv((void*)str.data(), str.length(), MPI_CHAR,
- (void*)recData.data(), &indexes[0], &disp[0], MPI_CHAR,
- MPI_COMM_WORLD);
-
- //really extraordinary verbose for debug
- std::vector<std::string> deserial=DeserializeToVectorOfString(recData);
- if (MyGlobals::_Verbose>1000)
- {
- std::cout << "proc "<<MyGlobals::_Rank<<" : receive '" << recData << "'" << std::endl;
- std::cout << "deserialize is : a vector of size " << deserial.size() << std::endl;
- std::cout << ReprVectorOfString(deserial) << std::endl;
- }
- return deserial;
-}
-
//void MEDPARTITIONER::sendRecvVectorOfString(const std::vector<string>& vec, const int source, const int target)
//TODO
std::string MEDPARTITIONER::Cle1ToStr(const std::string& s, const int inew)
return res;
}
-/*!
- Sends content of \a vec to processor \a target. To be used with \a RecvDoubleVec method.
- \param vec vector to be sent
- \param target processor id of the target
-*/
-void MEDPARTITIONER::SendDoubleVec(const std::vector<double>& vec, const int target)
-{
- int tag = 111002;
- int size=vec.size();
- if (MyGlobals::_Verbose>1000)
- std::cout << "proc " << MyGlobals::_Rank << " : --> SendDoubleVec " << size << std::endl;
-#ifdef HAVE_MPI2
- MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD);
- MPI_Send(const_cast<double*>(&vec[0]), size, MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD);
-#endif
-}
-
-/*! Receives messages from proc \a source to fill vector<int> vec.
- To be used with \a SendDoubleVec method.
-
- \param vec vector that is filled
- \param source processor id of the incoming messages
-*/
-std::vector<double>* MEDPARTITIONER::RecvDoubleVec(const int source)
-{
- int tag = 111002;
- int size;
-#ifdef HAVE_MPI2
- MPI_Status status;
- MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
- if (MyGlobals::_Verbose>1000)
- std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl;
- std::vector<double>* vec=new std::vector<double>;
- vec->resize(size);
- MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
-#endif
- return vec;
-}
-
-void MEDPARTITIONER::RecvDoubleVec(std::vector<double>& vec, const int source)
-{
- int tag = 111002;
- int size;
-#ifdef HAVE_MPI2
- MPI_Status status;
- MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
- if (MyGlobals::_Verbose>1000)
- std::cout<< "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl;;
- vec.resize(size);
- MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
-#endif
-}
-/*!
- Sends content of \a vec to processor \a target. To be used with \a RecvIntVec method.
- \param vec vector to be sent
- \param target processor id of the target
-*/
-void MEDPARTITIONER::SendIntVec(const std::vector<int>& vec, const int target)
-{
- int tag = 111003;
- int size=vec.size();
- if (MyGlobals::_Verbose>1000)
- std::cout << "proc " << MyGlobals::_Rank << " : --> SendIntVec " << size << std::endl;
-#ifdef HAVE_MPI2
- MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD);
- MPI_Send(const_cast<int*>(&vec[0]), size,MPI_INT, target, tag+100, MPI_COMM_WORLD);
-#endif
-}
-
-/*! Receives messages from proc \a source to fill vector<int> vec.
- To be used with \a SendIntVec method.
- \param vec vector that is filled
- \param source processor id of the incoming messages
-*/
-std::vector<int> *MEDPARTITIONER::RecvIntVec(const int source)
-{
- int tag = 111003;
- int size;
-#ifdef HAVE_MPI2
- MPI_Status status;
- MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
- if (MyGlobals::_Verbose>1000)
- std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl;
- std::vector<int> *vec=new std::vector<int>;
- vec->resize(size);
- MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD, &status);
-#endif
- return vec;
-}
-
-void MEDPARTITIONER::RecvIntVec(std::vector<int>& vec, const int source)
-{
- int tag = 111003;
- int size;
-#ifdef HAVE_MPI2
- MPI_Status status;
- MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
- if (MyGlobals::_Verbose>1000)
- std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl;
- vec.resize(size);
- MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD,&status);
-#endif
-}
-
-/*!
- Sends content of \a dataArrayInt to processor \a target.
- To be used with \a RecvDataArrayInt method.
- \param da dataArray to be sent
- \param target processor id of the target
-*/
-void MEDPARTITIONER::SendDataArrayInt(const ParaMEDMEM::DataArrayInt *da, const int target)
-{
- if (da==0)
- throw INTERP_KERNEL::Exception("Problem send DataArrayInt* NULL");
- int tag = 111004;
- int size[3];
- size[0]=da->getNbOfElems();
- size[1]=da->getNumberOfTuples();
- size[2]=da->getNumberOfComponents();
- if (MyGlobals::_Verbose>1000)
- std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayInt " << size[0] << std::endl;
-#ifdef HAVE_MPI2
- MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD);
- const int *p=da->getConstPointer();
- MPI_Send(const_cast<int*>(&p[0]), size[0] ,MPI_INT, target, tag+100, MPI_COMM_WORLD);
-#endif
-}
-
-/*! Receives messages from proc \a source to fill dataArrayInt da.
- To be used with \a SendIntVec method.
- \param da dataArrayInt that is filled
- \param source processor id of the incoming messages
-*/
-ParaMEDMEM::DataArrayInt *MEDPARTITIONER::RecvDataArrayInt(const int source)
-{
- int tag = 111004;
- int size[3];
-#ifdef HAVE_MPI2
- MPI_Status status;
- MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
- if (MyGlobals::_Verbose>1000)
- std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayInt " << size[0] << std::endl;
- if (size[0]!=(size[1]*size[2]))
- throw INTERP_KERNEL::Exception("Problem in RecvDataArrayInt incoherent sizes");
- ParaMEDMEM::DataArrayInt* da=ParaMEDMEM::DataArrayInt::New();
- da->alloc(size[1],size[2]);
- int *p=da->getPointer();
- MPI_Recv(const_cast<int*>(&p[0]), size[0], MPI_INT, source, tag+100, MPI_COMM_WORLD, &status);
-#endif
- return da;
-}
-
-/*!
- Sends content of \a dataArrayInt to processor \a target.
- To be used with \a RecvDataArrayDouble method.
- \param da dataArray to be sent
- \param target processor id of the target
-*/
-void MEDPARTITIONER::SendDataArrayDouble(const ParaMEDMEM::DataArrayDouble *da, const int target)
-{
- if (da==0)
- throw INTERP_KERNEL::Exception("Problem send DataArrayDouble* NULL");
- int tag = 111005;
- int size[3];
- size[0]=da->getNbOfElems();
- size[1]=da->getNumberOfTuples();
- size[2]=da->getNumberOfComponents();
- if (MyGlobals::_Verbose>1000)
- std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayDouble " << size[0] << std::endl;
-#ifdef HAVE_MPI2
- MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD);
- const double *p=da->getConstPointer();
- MPI_Send(const_cast<double*>(&p[0]), size[0] ,MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD);
-#endif
-}
-
-/*! Receives messages from proc \a source to fill dataArrayDouble da.
- To be used with \a SendDoubleVec method.
- \param da dataArrayDouble that is filled
- \param source processor id of the incoming messages
-*/
-ParaMEDMEM::DataArrayDouble* MEDPARTITIONER::RecvDataArrayDouble(const int source)
-{
- int tag = 111005;
- int size[3];
-#ifdef HAVE_MPI2
- MPI_Status status;
- MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
- if (MyGlobals::_Verbose>1000)
- std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayDouble " << size[0] << std::endl;
- if (size[0]!=(size[1]*size[2]))
- throw INTERP_KERNEL::Exception("Problem in RecvDataArrayDouble incoherent sizes");
- ParaMEDMEM::DataArrayDouble* da=ParaMEDMEM::DataArrayDouble::New();
- da->alloc(size[1],size[2]);
- double *p=da->getPointer();
- MPI_Recv(const_cast<double*>(&p[0]), size[0], MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
-#endif
- return da;
-}
-
/*!
* create empty MEDCouplingUMesh* dim 3
*/
umesh->checkCoherency();
return umesh;
}
-
-void MEDPARTITIONER::TestVectorOfStringMpi()
-{
- int rank=MyGlobals::_Rank;
- int world_size=MyGlobals::_World_Size;
- std::vector<std::string> myVector;
- std::ostringstream oss;
- oss << "hello from " << std::setw(5) << rank << " " << std::string(rank+1,'n') <<
- " next is an empty one";
- myVector.push_back(oss.str());
- myVector.push_back("");
- myVector.push_back("next is an singleton");
- myVector.push_back("1");
-
- if (rank==0)
- {
- std::string s0=SerializeFromVectorOfString(myVector);
- std::vector<std::string> res=DeserializeToVectorOfString(s0);
- if (res.size()!=myVector.size())
- throw INTERP_KERNEL::Exception("Problem in (de)serialise VectorOfString incoherent sizes");
- for (std::size_t i=0; i<myVector.size(); i++)
- if (res[i]!=myVector[i])
- throw INTERP_KERNEL::Exception("Problem in (de)serialise VectorOfString incoherent elements");
- }
-
- for (int i=0; i<world_size; i++)
- {
- for (int j=0; j<world_size; j++)
- {
- std::vector<std::string> res=SendAndReceiveVectorOfString(myVector, i, j);
- if ((rank==j) && MyGlobals::_Verbose>20)
- std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl;
- if (rank==j)
- {
- if (res.size()!=myVector.size())
- throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString incoherent sizes");
- for (std::size_t ii=1; ii<myVector.size(); ii++) //first is different
- if (res[i]!=myVector[ii])
- throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString incoherent elements");
- }
- else
- {
- if (res.size()!=0)
- throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString size have to be 0");
- }
- }
- }
- std::vector<std::string> res=AllgathervVectorOfString(myVector);
- //sometimes for test
- res=AllgathervVectorOfString(myVector);
- res=AllgathervVectorOfString(myVector);
- if (rank==0 && MyGlobals::_Verbose>20)
- std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl;
- if (res.size()!=myVector.size()*world_size)
- throw INTERP_KERNEL::Exception("Problem in AllgathervVectorOfString incoherent sizes");
- int jj=-1;
- for (int j=0; j<world_size; j++)
- {
- for (int i=0; i<(int)myVector.size(); i++)
- {
- jj=jj+1;
- if (i==0)
- continue; //first is different
- if (res[jj]!=myVector[i])
- throw INTERP_KERNEL::Exception("Problem in AllgathervVectorOfString incoherent elements");
- }
- }
- if (MyGlobals::_Verbose)
- std::cout << "proc " << rank << " : OK TestVectorOfStringMpi END" << std::endl;
-}
-
-void MEDPARTITIONER::TestMapOfStringIntMpi()
-{
- int rank=MyGlobals::_Rank;
- std::map<std::string,int> myMap;
- myMap["one"]=1;
- myMap["two"]=22; //a bug
- myMap["three"]=3;
- myMap["two"]=2; //last speaking override
-
- if (rank==0)
- {
- std::vector<std::string> v2=VectorizeFromMapOfStringInt(myMap);
- std::map<std::string,int> m3=DevectorizeToMapOfStringInt(v2);
- if (ReprMapOfStringInt(m3)!=ReprMapOfStringInt(myMap))
- throw INTERP_KERNEL::Exception("Problem in (de)vectorize MapOfStringInt");
- }
-
- std::vector<std::string> v2=AllgathervVectorOfString(VectorizeFromMapOfStringInt(myMap));
- if (rank==0 && MyGlobals::_Verbose>20)
- {
- std::cout << "v2 is : a vector of size " << v2.size() << std::endl;
- std::cout << ReprVectorOfString(v2) << std::endl;
- std::map<std::string,int> m2=DevectorizeToMapOfStringInt(v2);
- std::cout << "m2 is : a map of size " << m2.size() << std::endl;
- std::cout << ReprMapOfStringInt(m2) << std::endl;
- }
- if (MyGlobals::_Verbose)
- std::cout << "proc " << rank << " : OK TestMapOfStringIntMpi END" << std::endl;
-}
-
-void MEDPARTITIONER::TestMapOfStringVectorOfStringMpi()
-{
- int rank=MyGlobals::_Rank;
- std::vector<std::string> myVector;
- std::ostringstream oss;
- oss << "hello from " << std::setw(5) << MyGlobals::_Rank << " " << std::string(rank+1,'n') << " next is an empty one";
- myVector.push_back(oss.str());
- myVector.push_back("");
- myVector.push_back("next is an singleton");
- myVector.push_back("1");
-
- if (rank==0)
- {
- std::map< std::string,std::vector<std::string> > m2;
- m2["first key"]=myVector;
- m2["second key"]=myVector;
- std::vector<std::string> v2=VectorizeFromMapOfStringVectorOfString(m2);
- std::map< std::string,std::vector<std::string> > m3=DevectorizeToMapOfStringVectorOfString(v2);
- if (rank==0 && MyGlobals::_Verbose>20)
- std::cout << "m2 is : a MapOfStringVectorOfString of size " << m2.size() << std::endl;
- std::cout << ReprMapOfStringVectorOfString(m2) << std::endl;
- std::cout << "v2 is : a vector of size " << v2.size() << std::endl;
- std::cout << ReprVectorOfString(v2) << std::endl;
- std::cout << "m3 is : a map of size "<<m3.size() << std::endl;
- std::cout << ReprMapOfStringVectorOfString(m3) << std::endl;
- if (ReprMapOfStringVectorOfString(m3)!=ReprMapOfStringVectorOfString(m2))
- throw INTERP_KERNEL::Exception("Problem in (de)vectorize MapOfStringVectorOfString");
- }
-
- std::map< std::string,std::vector<std::string> > m4;
- m4["1rst key"]=myVector;
- m4["2snd key"]=myVector;
- std::vector<std::string> v4=AllgathervVectorOfString(VectorizeFromMapOfStringVectorOfString(m4));
- if (rank==0 && MyGlobals::_Verbose>20)
- {
- std::map< std::string,std::vector<std::string> > m5=DevectorizeToMapOfStringVectorOfString(v4);
- std::map< std::string,std::vector<std::string> > m6=DeleteDuplicatesInMapOfStringVectorOfString(m5);
- std::cout<< "m5 is : a map of size "<<m5.size() << std::endl;
- std::cout<< ReprMapOfStringVectorOfString(m5) << std::endl;
- std::cout<< "m6 is : a map from m5 with deleteDuplicates of size " << m6.size() << std::endl;
- std::cout<< ReprMapOfStringVectorOfString(m6) << std::endl;
- }
- if (MyGlobals::_Verbose)
- std::cout<<"proc " << rank << " : OK TestMapOfStringVectorOfStringMpi END" << std::endl;
-}
-
-void MEDPARTITIONER::TestDataArrayMpi()
-{
- int rank=MyGlobals::_Rank;
- //int
- {
- ParaMEDMEM::DataArrayInt* send=ParaMEDMEM::DataArrayInt::New();
- ParaMEDMEM::DataArrayInt* recv=0;
- int nbOfTuples=5;
- int numberOfComponents=3;
- send->alloc(nbOfTuples,numberOfComponents);
- std::vector<int> vals;
- for (int j=0; j<nbOfTuples; j++)
- for (int i=0; i<numberOfComponents; i++) vals.push_back((j+1)*10+i+1);
- std::copy(vals.begin(),vals.end(),send->getPointer());
- if (rank==0)
- SendDataArrayInt(send, 1);
- if (rank==1)
- recv=RecvDataArrayInt(0);
- if (rank==1 && MyGlobals::_Verbose>20)
- {
- std::cout << send->repr() << std::endl;
- std::cout << recv->repr() << std::endl;
- }
- if (rank==1)
- {
- if (send->repr()!=recv->repr())
- throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayInt");
- }
- send->decrRef();
- if (rank==1)
- recv->decrRef();
- }
- //double
- {
- ParaMEDMEM::DataArrayDouble* send=ParaMEDMEM::DataArrayDouble::New();
- ParaMEDMEM::DataArrayDouble* recv=0;
- int nbOfTuples=5;
- int numberOfComponents=3;
- send->alloc(nbOfTuples,numberOfComponents);
- std::vector<double> vals;
- for (int j=0; j<nbOfTuples; j++)
- for (int i=0; i<numberOfComponents; i++) vals.push_back(double(j+1)+double(i+1)/10);
- std::copy(vals.begin(),vals.end(),send->getPointer());
- if (rank==0) SendDataArrayDouble(send, 1);
- if (rank==1) recv=RecvDataArrayDouble(0);
- if (rank==1 && MyGlobals::_Verbose>20)
- {
- std::cout << send->repr() << std::endl;
- std::cout << recv->repr() << std::endl;
- }
- if (rank==1)
- {
- if (send->repr()!=recv->repr())
- throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayDouble");
- }
- send->decrRef();
- if (rank==1) recv->decrRef();
- }
-
- if (MyGlobals::_Verbose)
- std::cout << "proc " << rank << " : OK TestDataArrayMpi END" << std::endl;
-}
-
-void MEDPARTITIONER::TestPersistantMpi0To1(int taille, int nb)
-{
- double temps_debut=MPI_Wtime();
- int rank=MyGlobals::_Rank;
- std::vector<int> x, y;
- int tag=111111;
- MPI_Request requete0, requete1;
- MPI_Status statut;
- int ok=0;
- std::string res;
- if (rank==0)
- {
- x.resize(taille);
- MPI_Ssend_init(&x[0], taille, MPI_INT, 1, tag, MPI_COMM_WORLD , &requete0);
- for(int k=0; k<nb; k++)
- {
- for (int i=0; i<taille; ++i) x[i]=k;
- //Envoi d’un gros message --> cela peut prendre du temps
- MPI_Start(&requete0);
- //Traitement sequentiel independant de "x"
- //...
- MPI_Wait(&requete0, &statut);
- //Traitement sequentiel impliquant une modification de "x" en memoire
- //x=...
- }
- MPI_Request_free(&requete0);
- }
- else if (rank == 1)
- {
- y.resize(taille);
- MPI_Recv_init(&y[0], taille, MPI_INT, 0, tag, MPI_COMM_WORLD , &requete1);
- for(int k=0; k<nb; k++)
- {
- //Pre-traitement sequentiel
- //...
- for (int i=0; i<taille; ++i) y[i]=(-1);
- //Reception du gros message --> cela peut prendre du temps
- MPI_Start(&requete1);
- //Traitement sequentiel independant de "y"
- //...
- MPI_Wait(&requete1, &statut);
- //Traitement sequentiel dependant de "y"
- //...=f(y)
- int nbb=0;
- for (int i=0; i<taille; ++i)
- if (y[i]==k)
- nbb++;
- if (nbb==taille)
- ok++;
- if (MyGlobals::_Verbose>9)
- {
- res="0K";
- if (nbb!=taille)
- res="KO";
- std::cout << res << k << " ";
- }
- }
- res="0K";
- if (ok!=nb)
- res="BAD";
- if (MyGlobals::_Verbose>1)
- std::cout << "result " << res << " time(sec) " << MPI_Wtime()-temps_debut << std::endl;
- MPI_Request_free(&requete1);
- }
- //end_time=(MPI_WTIME()-start_time);
-}
-
-void MEDPARTITIONER::TestPersistantMpiRing(int taille, int nb)
-{
- double temps_debut=MPI_Wtime();
- int befo, next, rank, wsize, tagbefo, tagnext;
- rank=MyGlobals::_Rank;
- wsize=MyGlobals::_World_Size;
- befo=rank-1; if (befo<0) befo=wsize-1;
- next=rank+1; if (next>=wsize) next=0;
- std::vector<int> x, y;
- tagbefo=111111+befo;
- tagnext=111111+rank;
- MPI_Request requete0, requete1;
- MPI_Status statut1, statut2;
- int ok=0;
- std::string res;
- //cout<<"ini|"<<rank<<'|'<<befo<<'|'<<next<<' ';
- {
- x.resize(taille);
- y.resize(taille);
- MPI_Ssend_init(&x[0], taille, MPI_INT, next, tagnext, MPI_COMM_WORLD , &requete0);
- MPI_Recv_init(&y[0], taille, MPI_INT, befo, tagbefo, MPI_COMM_WORLD , &requete1);
- for(int k=0; k<nb; k++)
- {
- for (int i=0; i<taille; ++i) x[i]=k+rank;
- //Envoi d’un gros message --> cela peut prendre du temps
- MPI_Start(&requete0);
- //Reception du gros message --> cela peut prendre du temps
- for (int i=0; i<taille; ++i) y[i]=(-1);
- MPI_Start(&requete1);
- //Traitement sequentiel independant de "x"
- //...
- //Traitement sequentiel independant de "y"
- //...
- MPI_Wait(&requete1, &statut1);
- //Traitement sequentiel dependant de "y"
- //...=f(y)
- int nbb=0;
- for (int i=0; i<taille; ++i)
- if (y[i]==k+befo)
- nbb++;
- if (nbb==taille)
- ok++;
- if (MyGlobals::_Verbose>9)
- {
- res="0K"+IntToStr(rank);
- if (nbb!=taille)
- res="KO"+IntToStr(rank);
- std::cout << res << k << " ";
- }
- MPI_Wait(&requete0, &statut2);
- //Traitement sequentiel impliquant une modification de "x" en memoire
- //x=...
- }
- res="0K"; if (ok!=nb) res="MAUVAIS";
- temps_debut=MPI_Wtime()-temps_debut;
- MPI_Request_free(&requete1);
- MPI_Request_free(&requete0);
- }
- //end_time=(MPI_WTIME()-start_time);
- if (MyGlobals::_Verbose>1)
- std::cout << "result on proc " << rank << " " << res << " time(sec) " << temps_debut << std::endl;
-}
-
-void MEDPARTITIONER::TestPersistantMpiRingOnCommSplit(int size, int nb)
-{
- double temps_debut=MPI_Wtime();
- int rank=MyGlobals::_Rank;
- MPI_Comm newcomm;
- int color=1;
- int rankMax=4;
- if (rank>=rankMax)
- color=MPI_UNDEFINED;
- //MPI_Comm_dup (MPI_COMM_WORLD, &newcomm) ;
- MPI_Comm_split(MPI_COMM_WORLD, color, rank, &newcomm);
-
- int befo, next, wsize, tagbefo, tagnext;
- wsize=rankMax;
- if (wsize>MyGlobals::_World_Size)
- wsize=MyGlobals::_World_Size;
- befo=rank-1;
- if (befo<0)
- befo=wsize-1;
- next=rank+1;
- if (next>=wsize)
- next=0;
- std::vector<int> x, y;
- tagbefo=111111+befo;
- tagnext=111111+rank;
- MPI_Request requete0, requete1;
- MPI_Status statut1, statut2;
- int ok=0;
- std::string res;
-
- if (color==1)
- {
- x.resize(size);
- y.resize(size);
- MPI_Ssend_init(&x[0], size, MPI_INT, next, tagnext, newcomm , &requete0);
- MPI_Recv_init(&y[0], size, MPI_INT, befo, tagbefo, newcomm , &requete1);
- for(int k=0; k<nb; k++)
- {
- for (int i=0; i<size; ++i)
- x[i]=k+rank;
- //Send of big message --> time consuming
- MPI_Start(&requete0);
- //Reception of big message --> time consuming
- for (int i=0; i<size; ++i)
- y[i]=-1;
- MPI_Start(&requete1);
- //Traitement sequentiel independant de "x"
- //...
- //Traitement sequentiel independant de "y"
- //...
- //cout<<"dsr|"<<rank<<' ';
- MPI_Wait(&requete1, &statut1);
- //Traitement sequentiel dependant de "y"
- //...=f(y)
- int nbb=0;
- for (int i=0; i<size; ++i)
- if (y[i]==k+befo)
- nbb++;
- if (nbb==size)
- ok++;
- if (MyGlobals::_Verbose>9)
- {
- res="0K"+IntToStr(rank);
- if (nbb!=size)
- res="KO"+IntToStr(rank);
- std::cout << res << k << " ";
- }
- MPI_Wait(&requete0, &statut2);
- //Traitement sequentiel impliquant une modification de "x" en memoire
- //x=...
- }
- res="0K";
- if (ok!=nb)
- res="MAUVAIS";
- temps_debut=MPI_Wtime()-temps_debut;
- MPI_Request_free(&requete1);
- MPI_Request_free(&requete0);
- }
- //MPI_Barrier(MPI_COMM_WORLD);
- if (color==1)
- MPI_Comm_free(&newcomm);
- if (MyGlobals::_Verbose>1)
- std::cout << "resultat proc " << rank <<" " << res << " time(sec) " << temps_debut << std::endl;
-}
-
-
ParaMEDMEM::DataArrayInt *CreateDataArrayIntFromVector(const std::vector<int>& v);
ParaMEDMEM::DataArrayInt *CreateDataArrayIntFromVector(const std::vector<int>& v, const int nbComponents);
ParaMEDMEM::DataArrayDouble *CreateDataArrayDoubleFromVector(const std::vector<double>& v);
+
+ ParaMEDMEM::MEDCouplingUMesh *CreateEmptyMEDCouplingUMesh();
+ std::vector<std::string> BrowseFieldDouble(const ParaMEDMEM::MEDCouplingFieldDouble* fd);
+ std::vector<std::string> BrowseAllFields(const std::string& myfile);
+ std::vector<std::string> BrowseAllFieldsOnMesh(const std::string& myfile, const std::string& mymesh, const int idomain);
+ std::vector<std::string> GetInfosOfField(const char *fileName, const char *meshName, const int idomain );
+
+#ifdef HAVE_MPI2
//not adviced, interblocking, use sendAndReceive
//void SendVectorOfString(const std::vector<std::string>& vec, const int target);
//std::vector<std::string> RecvVectorOfString(const int source);
std::vector<std::string> SendAndReceiveVectorOfString(const std::vector<std::string>& vec, const int source, const int target);
std::vector<std::string> AllgathervVectorOfString(const std::vector<std::string>& vec);
- std::vector<std::string> BrowseFieldDouble(const ParaMEDMEM::MEDCouplingFieldDouble* fd);
- std::vector<std::string> BrowseAllFields(const std::string& myfile);
- std::vector<std::string> BrowseAllFieldsOnMesh(const std::string& myfile, const std::string& mymesh, const int idomain);
- std::vector<std::string> GetInfosOfField(const char *fileName, const char *meshName, const int idomain );
-
void SendDoubleVec(const std::vector<double>& vec, const int target);
std::vector<double> *RecvDoubleVec(const int source);
void RecvDoubleVec(std::vector<double>& vec, const int source);
ParaMEDMEM::DataArrayInt *RecvDataArrayInt(const int source);
void SendDataArrayDouble(const ParaMEDMEM::DataArrayDouble* da, const int target);
ParaMEDMEM::DataArrayDouble *RecvDataArrayDouble(const int source);
-
- ParaMEDMEM::MEDCouplingUMesh *CreateEmptyMEDCouplingUMesh();
-
+
void TestVectorOfStringMpi();
void TestMapOfStringIntMpi();
void TestMapOfStringVectorOfStringMpi();
void TestPersistantMpi0To1(int taille, int nb);
void TestPersistantMpiRing(int taille, int nb);
void TestPersistantMpiRingOnCommSplit(int taille, int nb);
+#endif
class MyGlobals
{
--- /dev/null
+// Copyright (C) 2007-2011 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include "MEDPARTITIONER_Utils.hxx"
+
+#include "MEDLoader.hxx"
+#include "MEDLoaderBase.hxx"
+#include "MEDFileUtilities.hxx"
+#include "CellModel.hxx"
+#include "MEDCouplingUMesh.hxx"
+#include "MEDCouplingFieldDouble.hxx"
+#include "InterpKernelException.hxx"
+#include "MEDCouplingAutoRefCountObjectPtr.hxx"
+#include "InterpKernelAutoPtr.hxx"
+
+#include <fstream>
+#include <iostream>
+#include <iomanip>
+#include <sstream>
+#include <string>
+
+#ifdef HAVE_MPI2
+#include <mpi.h>
+#endif
+
+using namespace MEDPARTITIONER;
+
+/*!
+ * not optimized but suffisant
+ * return empty vector if i am not target
+ */
+std::vector<std::string> MEDPARTITIONER::SendAndReceiveVectorOfString(const std::vector<std::string>& vec, const int source, const int target)
+{
+ int rank=MyGlobals::_Rank;
+
+ MPI_Status status;
+ int tag = 111001;
+ if (rank == source)
+ {
+ std::string str=SerializeFromVectorOfString(vec);
+ int size=str.length();
+ MPI_Send( &size, 1, MPI_INT, target, tag, MPI_COMM_WORLD );
+ MPI_Send( (void*)str.data(), str.length(), MPI_CHAR, target, tag+100, MPI_COMM_WORLD );
+ }
+
+ int recSize=0;
+ if (rank == target)
+ {
+ MPI_Recv(&recSize, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
+ std::string recData(recSize,'x');
+ MPI_Recv((void*)recData.data(), recSize, MPI_CHAR, source, tag+100, MPI_COMM_WORLD, &status);
+ return DeserializeToVectorOfString(recData); //not empty one for target proc
+ }
+ std::vector<std::string> res;
+ return res; //empty one for other proc
+}
+
+/*!
+ * strings NO need all same size!!!!
+ */
+std::vector<std::string> MEDPARTITIONER::AllgathervVectorOfString(const std::vector<std::string>& vec)
+{
+ int world_size=MyGlobals::_World_Size;
+ std::string str=SerializeFromVectorOfString(vec);
+
+ std::vector<int> indexes(world_size);
+ int size=str.length();
+ MPI_Allgather(&size, 1, MPI_INT,
+ &indexes[0], 1, MPI_INT, MPI_COMM_WORLD);
+
+ //calcul of displacement
+ std::vector< int > disp(1,0);
+ for (int i=0; i<world_size; i++) disp.push_back( disp.back() + indexes[i] );
+
+ std::string recData(disp.back(),'x');
+ MPI_Allgatherv((void*)str.data(), str.length(), MPI_CHAR,
+ (void*)recData.data(), &indexes[0], &disp[0], MPI_CHAR,
+ MPI_COMM_WORLD);
+
+ //really extraordinary verbose for debug
+ std::vector<std::string> deserial=DeserializeToVectorOfString(recData);
+ if (MyGlobals::_Verbose>1000)
+ {
+ std::cout << "proc "<<MyGlobals::_Rank<<" : receive '" << recData << "'" << std::endl;
+ std::cout << "deserialize is : a vector of size " << deserial.size() << std::endl;
+ std::cout << ReprVectorOfString(deserial) << std::endl;
+ }
+ return deserial;
+}
+
+/*!
+ Sends content of \a vec to processor \a target. To be used with \a RecvDoubleVec method.
+ \param vec vector to be sent
+ \param target processor id of the target
+*/
+void MEDPARTITIONER::SendDoubleVec(const std::vector<double>& vec, const int target)
+{
+ int tag = 111002;
+ int size=vec.size();
+ if (MyGlobals::_Verbose>1000)
+ std::cout << "proc " << MyGlobals::_Rank << " : --> SendDoubleVec " << size << std::endl;
+#ifdef HAVE_MPI2
+ MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD);
+ MPI_Send(const_cast<double*>(&vec[0]), size, MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD);
+#endif
+}
+
+/*! Receives messages from proc \a source to fill vector<int> vec.
+ To be used with \a SendDoubleVec method.
+
+ \param vec vector that is filled
+ \param source processor id of the incoming messages
+*/
+std::vector<double>* MEDPARTITIONER::RecvDoubleVec(const int source)
+{
+ int tag = 111002;
+ int size;
+#ifdef HAVE_MPI2
+ MPI_Status status;
+ MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
+ if (MyGlobals::_Verbose>1000)
+ std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl;
+ std::vector<double>* vec=new std::vector<double>;
+ vec->resize(size);
+ MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
+#endif
+ return vec;
+}
+
+void MEDPARTITIONER::RecvDoubleVec(std::vector<double>& vec, const int source)
+{
+ int tag = 111002;
+ int size;
+#ifdef HAVE_MPI2
+ MPI_Status status;
+ MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
+ if (MyGlobals::_Verbose>1000)
+ std::cout<< "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl;;
+ vec.resize(size);
+ MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
+#endif
+}
+/*!
+ Sends content of \a vec to processor \a target. To be used with \a RecvIntVec method.
+ \param vec vector to be sent
+ \param target processor id of the target
+*/
+void MEDPARTITIONER::SendIntVec(const std::vector<int>& vec, const int target)
+{
+ int tag = 111003;
+ int size=vec.size();
+ if (MyGlobals::_Verbose>1000)
+ std::cout << "proc " << MyGlobals::_Rank << " : --> SendIntVec " << size << std::endl;
+#ifdef HAVE_MPI2
+ MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD);
+ MPI_Send(const_cast<int*>(&vec[0]), size,MPI_INT, target, tag+100, MPI_COMM_WORLD);
+#endif
+}
+
+/*! Receives messages from proc \a source to fill vector<int> vec.
+ To be used with \a SendIntVec method.
+ \param vec vector that is filled
+ \param source processor id of the incoming messages
+*/
+std::vector<int> *MEDPARTITIONER::RecvIntVec(const int source)
+{
+ int tag = 111003;
+ int size;
+#ifdef HAVE_MPI2
+ MPI_Status status;
+ MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
+ if (MyGlobals::_Verbose>1000)
+ std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl;
+ std::vector<int> *vec=new std::vector<int>;
+ vec->resize(size);
+ MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD, &status);
+#endif
+ return vec;
+}
+
+void MEDPARTITIONER::RecvIntVec(std::vector<int>& vec, const int source)
+{
+ int tag = 111003;
+ int size;
+#ifdef HAVE_MPI2
+ MPI_Status status;
+ MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
+ if (MyGlobals::_Verbose>1000)
+ std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl;
+ vec.resize(size);
+ MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD,&status);
+#endif
+}
+
+/*!
+ Sends content of \a dataArrayInt to processor \a target.
+ To be used with \a RecvDataArrayInt method.
+ \param da dataArray to be sent
+ \param target processor id of the target
+*/
+void MEDPARTITIONER::SendDataArrayInt(const ParaMEDMEM::DataArrayInt *da, const int target)
+{
+ if (da==0)
+ throw INTERP_KERNEL::Exception("Problem send DataArrayInt* NULL");
+ int tag = 111004;
+ int size[3];
+ size[0]=da->getNbOfElems();
+ size[1]=da->getNumberOfTuples();
+ size[2]=da->getNumberOfComponents();
+ if (MyGlobals::_Verbose>1000)
+ std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayInt " << size[0] << std::endl;
+#ifdef HAVE_MPI2
+ MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD);
+ const int *p=da->getConstPointer();
+ MPI_Send(const_cast<int*>(&p[0]), size[0] ,MPI_INT, target, tag+100, MPI_COMM_WORLD);
+#endif
+}
+
+/*! Receives messages from proc \a source to fill dataArrayInt da.
+ To be used with \a SendIntVec method.
+ \param da dataArrayInt that is filled
+ \param source processor id of the incoming messages
+*/
+ParaMEDMEM::DataArrayInt *MEDPARTITIONER::RecvDataArrayInt(const int source)
+{
+ int tag = 111004;
+ int size[3];
+#ifdef HAVE_MPI2
+ MPI_Status status;
+ MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
+ if (MyGlobals::_Verbose>1000)
+ std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayInt " << size[0] << std::endl;
+ if (size[0]!=(size[1]*size[2]))
+ throw INTERP_KERNEL::Exception("Problem in RecvDataArrayInt incoherent sizes");
+ ParaMEDMEM::DataArrayInt* da=ParaMEDMEM::DataArrayInt::New();
+ da->alloc(size[1],size[2]);
+ int *p=da->getPointer();
+ MPI_Recv(const_cast<int*>(&p[0]), size[0], MPI_INT, source, tag+100, MPI_COMM_WORLD, &status);
+#endif
+ return da;
+}
+
+/*!
+ Sends content of \a dataArrayInt to processor \a target.
+ To be used with \a RecvDataArrayDouble method.
+ \param da dataArray to be sent
+ \param target processor id of the target
+*/
+void MEDPARTITIONER::SendDataArrayDouble(const ParaMEDMEM::DataArrayDouble *da, const int target)
+{
+ if (da==0)
+ throw INTERP_KERNEL::Exception("Problem send DataArrayDouble* NULL");
+ int tag = 111005;
+ int size[3];
+ size[0]=da->getNbOfElems();
+ size[1]=da->getNumberOfTuples();
+ size[2]=da->getNumberOfComponents();
+ if (MyGlobals::_Verbose>1000)
+ std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayDouble " << size[0] << std::endl;
+#ifdef HAVE_MPI2
+ MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD);
+ const double *p=da->getConstPointer();
+ MPI_Send(const_cast<double*>(&p[0]), size[0] ,MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD);
+#endif
+}
+
+/*! Receives messages from proc \a source to fill dataArrayDouble da.
+ To be used with \a SendDoubleVec method.
+ \param da dataArrayDouble that is filled
+ \param source processor id of the incoming messages
+*/
+ParaMEDMEM::DataArrayDouble* MEDPARTITIONER::RecvDataArrayDouble(const int source)
+{
+ int tag = 111005;
+ int size[3];
+#ifdef HAVE_MPI2
+ MPI_Status status;
+ MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
+ if (MyGlobals::_Verbose>1000)
+ std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayDouble " << size[0] << std::endl;
+ if (size[0]!=(size[1]*size[2]))
+ throw INTERP_KERNEL::Exception("Problem in RecvDataArrayDouble incoherent sizes");
+ ParaMEDMEM::DataArrayDouble* da=ParaMEDMEM::DataArrayDouble::New();
+ da->alloc(size[1],size[2]);
+ double *p=da->getPointer();
+ MPI_Recv(const_cast<double*>(&p[0]), size[0], MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
+#endif
+ return da;
+}
+
+void MEDPARTITIONER::TestVectorOfStringMpi()
+{
+ int rank=MyGlobals::_Rank;
+ int world_size=MyGlobals::_World_Size;
+ std::vector<std::string> myVector;
+ std::ostringstream oss;
+ oss << "hello from " << std::setw(5) << rank << " " << std::string(rank+1,'n') <<
+ " next is an empty one";
+ myVector.push_back(oss.str());
+ myVector.push_back("");
+ myVector.push_back("next is an singleton");
+ myVector.push_back("1");
+
+ if (rank==0)
+ {
+ std::string s0=SerializeFromVectorOfString(myVector);
+ std::vector<std::string> res=DeserializeToVectorOfString(s0);
+ if (res.size()!=myVector.size())
+ throw INTERP_KERNEL::Exception("Problem in (de)serialise VectorOfString incoherent sizes");
+ for (std::size_t i=0; i<myVector.size(); i++)
+ if (res[i]!=myVector[i])
+ throw INTERP_KERNEL::Exception("Problem in (de)serialise VectorOfString incoherent elements");
+ }
+
+ for (int i=0; i<world_size; i++)
+ {
+ for (int j=0; j<world_size; j++)
+ {
+ std::vector<std::string> res=SendAndReceiveVectorOfString(myVector, i, j);
+ if ((rank==j) && MyGlobals::_Verbose>20)
+ std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl;
+ if (rank==j)
+ {
+ if (res.size()!=myVector.size())
+ throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString incoherent sizes");
+ for (std::size_t ii=1; ii<myVector.size(); ii++) //first is different
+ if (res[i]!=myVector[ii])
+ throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString incoherent elements");
+ }
+ else
+ {
+ if (res.size()!=0)
+ throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString size have to be 0");
+ }
+ }
+ }
+ std::vector<std::string> res=AllgathervVectorOfString(myVector);
+ //sometimes for test
+ res=AllgathervVectorOfString(myVector);
+ res=AllgathervVectorOfString(myVector);
+ if (rank==0 && MyGlobals::_Verbose>20)
+ std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl;
+ if (res.size()!=myVector.size()*world_size)
+ throw INTERP_KERNEL::Exception("Problem in AllgathervVectorOfString incoherent sizes");
+ int jj=-1;
+ for (int j=0; j<world_size; j++)
+ {
+ for (int i=0; i<(int)myVector.size(); i++)
+ {
+ jj=jj+1;
+ if (i==0)
+ continue; //first is different
+ if (res[jj]!=myVector[i])
+ throw INTERP_KERNEL::Exception("Problem in AllgathervVectorOfString incoherent elements");
+ }
+ }
+ if (MyGlobals::_Verbose)
+ std::cout << "proc " << rank << " : OK TestVectorOfStringMpi END" << std::endl;
+}
+
+void MEDPARTITIONER::TestMapOfStringIntMpi()
+{
+ int rank=MyGlobals::_Rank;
+ std::map<std::string,int> myMap;
+ myMap["one"]=1;
+ myMap["two"]=22; //a bug
+ myMap["three"]=3;
+ myMap["two"]=2; //last speaking override
+
+ if (rank==0)
+ {
+ std::vector<std::string> v2=VectorizeFromMapOfStringInt(myMap);
+ std::map<std::string,int> m3=DevectorizeToMapOfStringInt(v2);
+ if (ReprMapOfStringInt(m3)!=ReprMapOfStringInt(myMap))
+ throw INTERP_KERNEL::Exception("Problem in (de)vectorize MapOfStringInt");
+ }
+
+ std::vector<std::string> v2=AllgathervVectorOfString(VectorizeFromMapOfStringInt(myMap));
+ if (rank==0 && MyGlobals::_Verbose>20)
+ {
+ std::cout << "v2 is : a vector of size " << v2.size() << std::endl;
+ std::cout << ReprVectorOfString(v2) << std::endl;
+ std::map<std::string,int> m2=DevectorizeToMapOfStringInt(v2);
+ std::cout << "m2 is : a map of size " << m2.size() << std::endl;
+ std::cout << ReprMapOfStringInt(m2) << std::endl;
+ }
+ if (MyGlobals::_Verbose)
+ std::cout << "proc " << rank << " : OK TestMapOfStringIntMpi END" << std::endl;
+}
+
+void MEDPARTITIONER::TestMapOfStringVectorOfStringMpi()
+{
+ int rank=MyGlobals::_Rank;
+ std::vector<std::string> myVector;
+ std::ostringstream oss;
+ oss << "hello from " << std::setw(5) << MyGlobals::_Rank << " " << std::string(rank+1,'n') << " next is an empty one";
+ myVector.push_back(oss.str());
+ myVector.push_back("");
+ myVector.push_back("next is an singleton");
+ myVector.push_back("1");
+
+ if (rank==0)
+ {
+ std::map< std::string,std::vector<std::string> > m2;
+ m2["first key"]=myVector;
+ m2["second key"]=myVector;
+ std::vector<std::string> v2=VectorizeFromMapOfStringVectorOfString(m2);
+ std::map< std::string,std::vector<std::string> > m3=DevectorizeToMapOfStringVectorOfString(v2);
+ if (rank==0 && MyGlobals::_Verbose>20)
+ std::cout << "m2 is : a MapOfStringVectorOfString of size " << m2.size() << std::endl;
+ std::cout << ReprMapOfStringVectorOfString(m2) << std::endl;
+ std::cout << "v2 is : a vector of size " << v2.size() << std::endl;
+ std::cout << ReprVectorOfString(v2) << std::endl;
+ std::cout << "m3 is : a map of size "<<m3.size() << std::endl;
+ std::cout << ReprMapOfStringVectorOfString(m3) << std::endl;
+ if (ReprMapOfStringVectorOfString(m3)!=ReprMapOfStringVectorOfString(m2))
+ throw INTERP_KERNEL::Exception("Problem in (de)vectorize MapOfStringVectorOfString");
+ }
+
+ std::map< std::string,std::vector<std::string> > m4;
+ m4["1rst key"]=myVector;
+ m4["2snd key"]=myVector;
+ std::vector<std::string> v4=AllgathervVectorOfString(VectorizeFromMapOfStringVectorOfString(m4));
+ if (rank==0 && MyGlobals::_Verbose>20)
+ {
+ std::map< std::string,std::vector<std::string> > m5=DevectorizeToMapOfStringVectorOfString(v4);
+ std::map< std::string,std::vector<std::string> > m6=DeleteDuplicatesInMapOfStringVectorOfString(m5);
+ std::cout<< "m5 is : a map of size "<<m5.size() << std::endl;
+ std::cout<< ReprMapOfStringVectorOfString(m5) << std::endl;
+ std::cout<< "m6 is : a map from m5 with deleteDuplicates of size " << m6.size() << std::endl;
+ std::cout<< ReprMapOfStringVectorOfString(m6) << std::endl;
+ }
+ if (MyGlobals::_Verbose)
+ std::cout<<"proc " << rank << " : OK TestMapOfStringVectorOfStringMpi END" << std::endl;
+}
+
+void MEDPARTITIONER::TestDataArrayMpi()
+{
+ int rank=MyGlobals::_Rank;
+ //int
+ {
+ ParaMEDMEM::DataArrayInt* send=ParaMEDMEM::DataArrayInt::New();
+ ParaMEDMEM::DataArrayInt* recv=0;
+ int nbOfTuples=5;
+ int numberOfComponents=3;
+ send->alloc(nbOfTuples,numberOfComponents);
+ std::vector<int> vals;
+ for (int j=0; j<nbOfTuples; j++)
+ for (int i=0; i<numberOfComponents; i++) vals.push_back((j+1)*10+i+1);
+ std::copy(vals.begin(),vals.end(),send->getPointer());
+ if (rank==0)
+ SendDataArrayInt(send, 1);
+ if (rank==1)
+ recv=RecvDataArrayInt(0);
+ if (rank==1 && MyGlobals::_Verbose>20)
+ {
+ std::cout << send->repr() << std::endl;
+ std::cout << recv->repr() << std::endl;
+ }
+ if (rank==1)
+ {
+ if (send->repr()!=recv->repr())
+ throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayInt");
+ }
+ send->decrRef();
+ if (rank==1)
+ recv->decrRef();
+ }
+ //double
+ {
+ ParaMEDMEM::DataArrayDouble* send=ParaMEDMEM::DataArrayDouble::New();
+ ParaMEDMEM::DataArrayDouble* recv=0;
+ int nbOfTuples=5;
+ int numberOfComponents=3;
+ send->alloc(nbOfTuples,numberOfComponents);
+ std::vector<double> vals;
+ for (int j=0; j<nbOfTuples; j++)
+ for (int i=0; i<numberOfComponents; i++) vals.push_back(double(j+1)+double(i+1)/10);
+ std::copy(vals.begin(),vals.end(),send->getPointer());
+ if (rank==0) SendDataArrayDouble(send, 1);
+ if (rank==1) recv=RecvDataArrayDouble(0);
+ if (rank==1 && MyGlobals::_Verbose>20)
+ {
+ std::cout << send->repr() << std::endl;
+ std::cout << recv->repr() << std::endl;
+ }
+ if (rank==1)
+ {
+ if (send->repr()!=recv->repr())
+ throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayDouble");
+ }
+ send->decrRef();
+ if (rank==1) recv->decrRef();
+ }
+
+ if (MyGlobals::_Verbose)
+ std::cout << "proc " << rank << " : OK TestDataArrayMpi END" << std::endl;
+}
+
+void MEDPARTITIONER::TestPersistantMpi0To1(int taille, int nb)
+{
+ double temps_debut=MPI_Wtime();
+ int rank=MyGlobals::_Rank;
+ std::vector<int> x, y;
+ int tag=111111;
+ MPI_Request requete0, requete1;
+ MPI_Status statut;
+ int ok=0;
+ std::string res;
+ if (rank==0)
+ {
+ x.resize(taille);
+ MPI_Ssend_init(&x[0], taille, MPI_INT, 1, tag, MPI_COMM_WORLD , &requete0);
+ for(int k=0; k<nb; k++)
+ {
+ for (int i=0; i<taille; ++i) x[i]=k;
+ //Envoi d’un gros message --> cela peut prendre du temps
+ MPI_Start(&requete0);
+ //Traitement sequentiel independant de "x"
+ //...
+ MPI_Wait(&requete0, &statut);
+ //Traitement sequentiel impliquant une modification de "x" en memoire
+ //x=...
+ }
+ MPI_Request_free(&requete0);
+ }
+ else if (rank == 1)
+ {
+ y.resize(taille);
+ MPI_Recv_init(&y[0], taille, MPI_INT, 0, tag, MPI_COMM_WORLD , &requete1);
+ for(int k=0; k<nb; k++)
+ {
+ //Pre-traitement sequentiel
+ //...
+ for (int i=0; i<taille; ++i) y[i]=(-1);
+ //Reception du gros message --> cela peut prendre du temps
+ MPI_Start(&requete1);
+ //Traitement sequentiel independant de "y"
+ //...
+ MPI_Wait(&requete1, &statut);
+ //Traitement sequentiel dependant de "y"
+ //...=f(y)
+ int nbb=0;
+ for (int i=0; i<taille; ++i)
+ if (y[i]==k)
+ nbb++;
+ if (nbb==taille)
+ ok++;
+ if (MyGlobals::_Verbose>9)
+ {
+ res="0K";
+ if (nbb!=taille)
+ res="KO";
+ std::cout << res << k << " ";
+ }
+ }
+ res="0K";
+ if (ok!=nb)
+ res="BAD";
+ if (MyGlobals::_Verbose>1)
+ std::cout << "result " << res << " time(sec) " << MPI_Wtime()-temps_debut << std::endl;
+ MPI_Request_free(&requete1);
+ }
+ //end_time=(MPI_WTIME()-start_time);
+}
+
+void MEDPARTITIONER::TestPersistantMpiRing(int taille, int nb)
+{
+ double temps_debut=MPI_Wtime();
+ int befo, next, rank, wsize, tagbefo, tagnext;
+ rank=MyGlobals::_Rank;
+ wsize=MyGlobals::_World_Size;
+ befo=rank-1; if (befo<0) befo=wsize-1;
+ next=rank+1; if (next>=wsize) next=0;
+ std::vector<int> x, y;
+ tagbefo=111111+befo;
+ tagnext=111111+rank;
+ MPI_Request requete0, requete1;
+ MPI_Status statut1, statut2;
+ int ok=0;
+ std::string res;
+ //cout<<"ini|"<<rank<<'|'<<befo<<'|'<<next<<' ';
+ {
+ x.resize(taille);
+ y.resize(taille);
+ MPI_Ssend_init(&x[0], taille, MPI_INT, next, tagnext, MPI_COMM_WORLD , &requete0);
+ MPI_Recv_init(&y[0], taille, MPI_INT, befo, tagbefo, MPI_COMM_WORLD , &requete1);
+ for(int k=0; k<nb; k++)
+ {
+ for (int i=0; i<taille; ++i) x[i]=k+rank;
+ //Envoi d’un gros message --> cela peut prendre du temps
+ MPI_Start(&requete0);
+ //Reception du gros message --> cela peut prendre du temps
+ for (int i=0; i<taille; ++i) y[i]=(-1);
+ MPI_Start(&requete1);
+ //Traitement sequentiel independant de "x"
+ //...
+ //Traitement sequentiel independant de "y"
+ //...
+ MPI_Wait(&requete1, &statut1);
+ //Traitement sequentiel dependant de "y"
+ //...=f(y)
+ int nbb=0;
+ for (int i=0; i<taille; ++i)
+ if (y[i]==k+befo)
+ nbb++;
+ if (nbb==taille)
+ ok++;
+ if (MyGlobals::_Verbose>9)
+ {
+ res="0K"+IntToStr(rank);
+ if (nbb!=taille)
+ res="KO"+IntToStr(rank);
+ std::cout << res << k << " ";
+ }
+ MPI_Wait(&requete0, &statut2);
+ //Traitement sequentiel impliquant une modification de "x" en memoire
+ //x=...
+ }
+ res="0K"; if (ok!=nb) res="MAUVAIS";
+ temps_debut=MPI_Wtime()-temps_debut;
+ MPI_Request_free(&requete1);
+ MPI_Request_free(&requete0);
+ }
+ //end_time=(MPI_WTIME()-start_time);
+ if (MyGlobals::_Verbose>1)
+ std::cout << "result on proc " << rank << " " << res << " time(sec) " << temps_debut << std::endl;
+}
+
+void MEDPARTITIONER::TestPersistantMpiRingOnCommSplit(int size, int nb)
+{
+ double temps_debut=MPI_Wtime();
+ int rank=MyGlobals::_Rank;
+ MPI_Comm newcomm;
+ int color=1;
+ int rankMax=4;
+ if (rank>=rankMax)
+ color=MPI_UNDEFINED;
+ //MPI_Comm_dup (MPI_COMM_WORLD, &newcomm) ;
+ MPI_Comm_split(MPI_COMM_WORLD, color, rank, &newcomm);
+
+ int befo, next, wsize, tagbefo, tagnext;
+ wsize=rankMax;
+ if (wsize>MyGlobals::_World_Size)
+ wsize=MyGlobals::_World_Size;
+ befo=rank-1;
+ if (befo<0)
+ befo=wsize-1;
+ next=rank+1;
+ if (next>=wsize)
+ next=0;
+ std::vector<int> x, y;
+ tagbefo=111111+befo;
+ tagnext=111111+rank;
+ MPI_Request requete0, requete1;
+ MPI_Status statut1, statut2;
+ int ok=0;
+ std::string res;
+
+ if (color==1)
+ {
+ x.resize(size);
+ y.resize(size);
+ MPI_Ssend_init(&x[0], size, MPI_INT, next, tagnext, newcomm , &requete0);
+ MPI_Recv_init(&y[0], size, MPI_INT, befo, tagbefo, newcomm , &requete1);
+ for(int k=0; k<nb; k++)
+ {
+ for (int i=0; i<size; ++i)
+ x[i]=k+rank;
+ //Send of big message --> time consuming
+ MPI_Start(&requete0);
+ //Reception of big message --> time consuming
+ for (int i=0; i<size; ++i)
+ y[i]=-1;
+ MPI_Start(&requete1);
+ //Traitement sequentiel independant de "x"
+ //...
+ //Traitement sequentiel independant de "y"
+ //...
+ //cout<<"dsr|"<<rank<<' ';
+ MPI_Wait(&requete1, &statut1);
+ //Traitement sequentiel dependant de "y"
+ //...=f(y)
+ int nbb=0;
+ for (int i=0; i<size; ++i)
+ if (y[i]==k+befo)
+ nbb++;
+ if (nbb==size)
+ ok++;
+ if (MyGlobals::_Verbose>9)
+ {
+ res="0K"+IntToStr(rank);
+ if (nbb!=size)
+ res="KO"+IntToStr(rank);
+ std::cout << res << k << " ";
+ }
+ MPI_Wait(&requete0, &statut2);
+ //Traitement sequentiel impliquant une modification de "x" en memoire
+ //x=...
+ }
+ res="0K";
+ if (ok!=nb)
+ res="MAUVAIS";
+ temps_debut=MPI_Wtime()-temps_debut;
+ MPI_Request_free(&requete1);
+ MPI_Request_free(&requete0);
+ }
+ //MPI_Barrier(MPI_COMM_WORLD);
+ if (color==1)
+ MPI_Comm_free(&newcomm);
+ if (MyGlobals::_Verbose>1)
+ std::cout << "resultat proc " << rank <<" " << res << " time(sec) " << temps_debut << std::endl;
+}
MEDPARTITIONER_MeshCollectionMedAsciiDriver.cxx \
MEDPARTITIONER_Graph.cxx\
MEDPARTITIONER_UserGraph.cxx\
-MEDPARTITIONER_JointFinder.cxx \
MEDPARTITIONER_SkyLineArray.cxx \
-MEDPARTITIONER_ConnectZone.cxx
+MEDPARTITIONER_ConnectZone.cxx \
+MEDPARTITIONER_Utils.cxx
libmedpartitioner_la_CPPFLAGS = $(MPI_INCLUDES) $(MED3_INCLUDES) $(HDF5_INCLUDES) \
$(LIBXML_INCLUDES) -I$(srcdir)/../INTERP_KERNEL/Bases -I$(srcdir)/../MEDCoupling \
if MPI_IS_OK
dist_libmedpartitioner_la_SOURCES += MEDPARTITIONER_ParaDomainSelector.cxx \
- MEDPARTITIONER_Utils.cxx \
- MEDPARTITIONER_ParallelTopology.cxx
+ MEDPARTITIONER_UtilsPara.cxx \
+ MEDPARTITIONER_ParallelTopology.cxx \
+ MEDPARTITIONER_JointFinder.cxx
if MED_ENABLE_PARMETIS
dist_libmedpartitioner_la_SOURCES += MEDPARTITIONER_MetisGraph.cxx
medpartitioner_para_CPPFLAGS = -I$(srcdir)/../INTERP_KERNEL/Bases -I$(srcdir)/../MEDCoupling \
-I$(srcdir)/../MEDLoader -I$(srcdir)/../INTERP_KERNEL $(MPI_INCLUDES) $(PARMETIS_CPPFLAGS)
medpartitioner_para_LDADD = libmedpartitioner.la
+else !MPI_IS_OK
+ bin_PROGRAMS = medpartitioner
+ dist_medpartitioner_SOURCES = medpartitioner.cxx
+ medpartitioner_CPPFLAGS = -I$(srcdir)/../INTERP_KERNEL/Bases -I$(srcdir)/../MEDCoupling \
+ -I$(srcdir)/../MEDLoader -I$(srcdir)/../INTERP_KERNEL $(METIS_CPPFLAGS) $(SCOTCH_CPPFLAGS)
+ medpartitioner_LDADD = libmedpartitioner.la
endif
OBSOLETE_FILES =
--- /dev/null
+// Copyright (C) 2007-2011 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+// MED medsplitter : tool to split n MED files into p separate
+// MED files with a partitioning specified
+// by an external tool
+// File : medsplitter.cxx
+// Author : Vincent BERGEAUD (CEA-DEN/DANS/DM2S/SFME/LGLS)
+// Module : MED
+//
+
+#include "MEDPARTITIONER_Graph.hxx"
+#include "MEDPARTITIONER_Topology.hxx"
+#include "MEDPARTITIONER_MeshCollection.hxx"
+
+#ifdef BOOST_PROGRAM_OPTIONS_LIB
+#include <boost/program_options.hpp>
+#endif
+
+#include <string>
+#include <fstream>
+#include <cstring>
+#include <cstdlib>
+#include <iostream>
+
+#ifdef BOOST_PROGRAM_OPTIONS_LIB
+namespace po=boost::program_options;
+#endif
+
+int main(int argc, char** argv)
+{
+#ifndef ENABLE_METIS
+#ifndef ENABLE_SCOTCH
+ std::cout << "Sorry, no one split method is available. Please, compile with METIS or SCOTCH." << std::endl;
+ return 1;
+#endif
+#endif
+
+ // Defining options
+ // by parsing the command line
+ bool mesh_only = false;
+ bool is_sequential = true;
+ bool xml_output_master=true;
+ bool creates_boundary_faces=false;
+ bool split_families=false;
+ bool empty_groups=false;
+
+ std::string input;
+ std::string output;
+ std::string meshname;
+ std::string library;
+ int ndomains;
+
+#ifdef BOOST_PROGRAM_OPTIONS_LIB
+ // Use boost::program_options for command-line options parsing
+ po::options_description desc("Available options of medpartitioner V1.0");
+ desc.add_options()
+ ("help","produces this help message")
+ ("mesh-only","prevents the splitter from creating the fields contained in the original file(s)")
+ ("distributed","specifies that the input file is distributed")
+ ("input-file",po::value<std::string>(),"name of the input MED file")
+ ("output-file",po::value<std::string>(),"name of the resulting file")
+ ("meshname",po::value<std::string>(),"name of the input mesh")
+#ifdef ENABLE_METIS
+#ifdef ENABLE_SCOTCH
+ ("split-method",po::value<std::string>(&library)->default_value("metis"),"name of the splitting library (metis,scotch)")
+#endif
+#endif
+ ("ndomains",po::value<int>(&ndomains)->default_value(1),"number of subdomains in the output file")
+ ("plain-master","creates a plain masterfile instead of an XML file")
+ ("creates-boundary-faces","creates the necessary faces so that faces joints are created in the output files")
+ ("family-splitting","preserves the family names instead of focusing on the groups")
+ ("empty-groups","creates empty groups in zones that do not contain a group from the original domain");
+
+ po::variables_map vm;
+ po::store(po::parse_command_line(argc,argv,desc),vm);
+ po::notify(vm);
+
+ if (vm.count("help"))
+ {
+ std::cout << desc << "\n";
+ return 1;
+ }
+
+ if (!vm.count("ndomains"))
+ {
+ std::cout << "ndomains must be specified !"<< std::endl;
+ return 1;
+ }
+
+ ndomains = vm["ndomains"].as<int>();
+ if (!vm.count("input-file") || !vm.count("output-file"))
+ {
+ std::cout << "input-file and output-file names must be specified" << std::endl;
+ return 1;
+ }
+
+ if (!vm.count("distributed") && !vm.count("meshname") )
+ {
+ std::cout << "for a serial MED file, mesh name must be selected with --meshname=..." << std::endl;
+ return 1;
+ }
+
+ input = vm["input-file"].as<std::string>();
+ output = vm["output-file"].as<std::string>();
+
+ if (vm.count("mesh-only"))
+ mesh_only=true;
+
+ if (vm.count("distributed"))
+ is_sequential=false;
+
+ if (is_sequential)
+ meshname = vm["meshname"].as<std::string>();
+
+ if (vm.count("plain-master"))
+ xml_output_master=false;
+
+ if (vm.count("creates-boundary-faces"))
+ creates_boundary_faces=true;
+
+ if (vm.count("split-families"))
+ split_families=true;
+
+ if (vm.count("empty-groups"))
+ empty_groups=true;
+
+#else // BOOST_PROGRAM_OPTIONS_LIB
+
+ // Primitive parsing of command-line options
+
+ std::string desc ("Available options of medpartitioner V1.0:\n"
+ "\t--help : produces this help message\n"
+ "\t--mesh-only : do not create the fields contained in the original file(s)\n"
+ "\t--distributed : specifies that the input file is distributed\n"
+ "\t--input-file=<string> : name of the input MED file\n"
+ "\t--output-file=<string> : name of the resulting file\n"
+ "\t--meshname=<string> : name of the input mesh (not used with --distributed option)\n"
+ "\t--ndomains=<number> : number of subdomains in the output file, default is 1\n"
+#ifdef ENABLE_METIS
+#ifdef ENABLE_SCOTCH
+ "\t--split-method=<string>: name of the splitting library (metis/scotch), default is metis\n"
+#endif
+#endif
+ "\t--plain-master : creates a plain masterfile instead of an XML file\n"
+ "\t--creates-boundary-faces: creates the necessary faces so that faces joints are created in the output files\n"
+ "\t--family-splitting : preserves the family names instead of focusing on the groups\n"
+ "\t--empty-groups : creates empty groups in zones that do not contain a group from the original domain"
+ );
+
+ if (argc < 4)
+ {
+ std::cout << desc.c_str() << std::endl;
+ return 1;
+ }
+
+ for (int i = 1; i < argc; i++)
+ {
+ if (strlen(argv[i]) < 3)
+ {
+ std::cout << desc.c_str() << std::endl;
+ return 1;
+ }
+
+ if (strncmp(argv[i],"--m",3) == 0)
+ {
+ if (strcmp(argv[i],"--mesh-only") == 0)
+ {
+ mesh_only = true;
+ std::cout << "\tmesh-only = " << mesh_only << std::endl; // tmp
+ }
+ else if (strlen(argv[i]) > 11)
+ { // "--meshname="
+ meshname = (argv[i] + 11);
+ std::cout << "\tmeshname = " << meshname << std::endl; // tmp
+ }
+ }
+ else if (strncmp(argv[i],"--d",3) == 0)
+ {
+ is_sequential = false;
+ std::cout << "\tis_sequential = " << is_sequential << std::endl; // tmp
+ }
+ else if (strncmp(argv[i],"--i",3) == 0)
+ {
+ if (strlen(argv[i]) > 13)
+ { // "--input-file="
+ input = (argv[i] + 13);
+ std::cout << "\tinput-file = " << input << std::endl; // tmp
+ }
+ }
+ else if (strncmp(argv[i],"--o",3) == 0)
+ {
+ if (strlen(argv[i]) > 14)
+ { // "--output-file="
+ output = (argv[i] + 14);
+ std::cout << "\toutput-file = " << output << std::endl; // tmp
+ }
+ }
+ else if (strncmp(argv[i],"--s",3) == 0)
+ {
+ if (strlen(argv[i]) > 15)
+ { // "--split-method="
+ library = (argv[i] + 15);
+ std::cout << "\tsplit-method = " << library << std::endl; // tmp
+ }
+ }
+ else if (strncmp(argv[i],"--f",3) == 0)
+ { //"--family-splitting"
+ split_families=true;
+ std::cout << "\tfamily-splitting true" << std::endl; // tmp
+ }
+ else if (strncmp(argv[i],"--n",3) == 0)
+ {
+ if (strlen(argv[i]) > 11)
+ { // "--ndomains="
+ ndomains = atoi(argv[i] + 11);
+ std::cout << "\tndomains = " << ndomains << std::endl; // tmp
+ }
+ }
+ else if (strncmp(argv[i],"--p",3) == 0)
+ { // "--plain-master"
+ xml_output_master = false;
+ std::cout << "\txml_output_master = " << xml_output_master << std::endl; // tmp
+ }
+ else if (strncmp(argv[i],"--c",3) == 0)
+ { // "--creates-boundary-faces"
+ creates_boundary_faces = true;
+ std::cout << "\tcreates_boundary_faces = " << creates_boundary_faces << std::endl; // tmp
+ }
+ else if (strncmp(argv[i],"--e",3) == 0)
+ { // "--empty-groups"
+ empty_groups = true;
+ std::cout << "\tempty_groups = true" << std::endl; // tmp
+ }
+ else
+ {
+ std::cout << desc.c_str() << std::endl;
+ return 1;
+ }
+ }
+
+ if (is_sequential && meshname.empty())
+ {
+ std::cout << "Mesh name must be given for sequential(not distributed) input file." << std::endl;
+ std::cout << desc << std::endl;
+ return 1;
+ }
+
+#endif // BOOST_PROGRAM_OPTIONS_LIB
+
+
+ //testing whether it is possible to write a file at the specified location
+ std::string outputtest = output + ".testioms.";
+ std::ofstream testfile (outputtest.c_str());
+ if (testfile.fail())
+ {
+ std::cout << "MEDPARTITIONER : output-file directory does not exist or is in read-only access" << std::endl;
+ return 1;
+ };
+ //deletes test file
+ remove(outputtest.c_str());
+
+ // Beginning of the computation
+
+ // Loading the mesh collection
+ MEDPARTITIONER::MeshCollection* collection;
+ std::cout << "MEDPARTITIONER : reading input files "<< std::endl;
+ if (is_sequential)
+ collection = new MEDPARTITIONER::MeshCollection(input,meshname);
+ else
+ collection = new MEDPARTITIONER::MeshCollection(input);
+
+ std::cout << "MEDPARTITIONER : computing partition "<< std::endl;
+
+ // Creating the graph and partitioning it
+#ifdef ENABLE_METIS
+#ifndef ENABLE_SCOTCH
+ library = "metis";
+#endif
+#else
+ library = "scotch";
+#endif
+ std::cout << "\tsplit-method = " << library << std::endl; // tmp
+
+ MEDPARTITIONER::Topology* new_topo;
+ if (library == "metis")
+ new_topo = collection->createPartition(ndomains,MEDPARTITIONER::Graph::METIS);
+ else
+ new_topo = collection->createPartition(ndomains,MEDPARTITIONER::Graph::SCOTCH);
+
+ std::cout << "MEDPARTITIONER : creating new meshes"<< std::endl;
+
+ // Creating a new mesh collection from the partitioning
+ MEDPARTITIONER::MeshCollection new_collection(*collection, new_topo, split_families, empty_groups);
+ if (mesh_only)
+ {
+ delete collection;
+ collection=0;
+ }
+
+ if (!xml_output_master)
+ new_collection.setDriverType(MEDPARTITIONER::MedAscii);
+
+ // new_collection.setSubdomainBoundaryCreates(creates_boundary_faces);
+
+ std::cout << "MEDPARTITIONER : writing output files "<< std::endl;
+ new_collection.write(output);
+
+ // Casting the fields on the new collection
+ // if (!mesh_only)
+ // new_collection.castAllFields(*collection);
+
+
+ // Cleaning memory
+ delete collection;
+ delete new_topo;
+
+ return 0;
+}