From: ageay Date: Thu, 15 Mar 2012 14:55:27 +0000 (+0000) Subject: Some imp. X-Git-Tag: V6_main_FINAL~787 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=b230c6c64856613b825b68ec9257691ee8de6cb3;p=tools%2Fmedcoupling.git Some imp. --- diff --git a/src/MEDPartitioner/MEDPARTITIONER_MeshCollection.cxx b/src/MEDPartitioner/MEDPARTITIONER_MeshCollection.cxx index 7fe5a4f73..b3560e5b9 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_MeshCollection.cxx +++ b/src/MEDPartitioner/MEDPARTITIONER_MeshCollection.cxx @@ -21,9 +21,12 @@ #include "MEDPARTITIONER_MeshCollectionDriver.hxx" #include "MEDPARTITIONER_MeshCollectionMedXmlDriver.hxx" #include "MEDPARTITIONER_MeshCollectionMedAsciiDriver.hxx" -#include "MEDPARTITIONER_ParallelTopology.hxx" #include "MEDPARTITIONER_ParaDomainSelector.hxx" +#include "MEDPARTITIONER_Topology.hxx" +#ifdef HAVE_MPI2 #include "MEDPARTITIONER_JointFinder.hxx" +#include "MEDPARTITIONER_ParallelTopology.hxx" +#endif #include "MEDPARTITIONER_Graph.hxx" #include "MEDPARTITIONER_UserGraph.hxx" #include "MEDPARTITIONER_Utils.hxx" @@ -204,7 +207,7 @@ void MEDPARTITIONER::MeshCollection::castCellMeshes(MeshCollection& initialColle } } } - +#ifdef HAVE_MPI2 if (isParallelMode()) { //if (MyGlobals::_Verbose>300) std::cout<<"proc "<200) std::cout << "proc " << rank << " : castCellMeshes fusing" << std::endl; @@ -292,6 +295,7 @@ void MEDPARTITIONER::MeshCollection::createNodeMapping( MeshCollection& initialC for (int inew=0; inew<_topology->nbDomain(); inew++) //cvwat12 { +#ifdef HAVE_MPI2 //sending meshes for parallel computation if (isParallelMode() && _domain_selector->isMyDomain(inew) && !_domain_selector->isMyDomain(iold)) _domain_selector->sendMesh(*(getMesh(inew)), _domain_selector->getProcessorID(iold)); @@ -311,6 +315,9 @@ void MEDPARTITIONER::MeshCollection::createNodeMapping( MeshCollection& initialC mesh->decrRef(); } else if (!isParallelMode() || (_domain_selector->isMyDomain(inew) && _domain_selector->isMyDomain(iold))) +#else + if (!isParallelMode() || (_domain_selector->isMyDomain(inew) && _domain_selector->isMyDomain(iold))) +#endif { ParaMEDMEM::DataArrayDouble* coords = getMesh(inew)->getCoords(); for (int inode=0; inode<_mesh[inew]->getNumberOfNodes();inode++) @@ -479,6 +486,7 @@ void MEDPARTITIONER::MeshCollection::castFaceMeshes(MeshCollection& initialColle } // send/receive stuff +#ifdef HAVE_MP2 if (isParallelMode()) { ParaMEDMEM::MEDCouplingUMesh *empty=CreateEmptyMEDCouplingUMesh(); @@ -501,7 +509,7 @@ void MEDPARTITIONER::MeshCollection::castFaceMeshes(MeshCollection& initialColle } empty->decrRef(); } - +#endif //recollecting the bits of splitMeshes to fuse them into one if (MyGlobals::_Verbose>300) std::cout<<"proc "<rank()<<" : castIntField SendIntVec "<getProcessorID(inew)); } //receiving arrays from distant domains @@ -614,6 +622,7 @@ void MEDPARTITIONER::MeshCollection::castIntField2(std::vector > > commonDistantNodes; int nbdomain=_topology->nbDomain(); +#ifdef HAVE_MPI2 if (isParallelMode()) { _joint_finder=new JointFinder(*this); @@ -1218,8 +1234,9 @@ void MEDPARTITIONER::MeshCollection::buildCellGraph(MEDPARTITIONER::SkyLineArray commonDistantNodes=_joint_finder->getDistantNodeCell(); } - if (MyGlobals::_Verbose>500) _joint_finder->print(); - + if (MyGlobals::_Verbose>500) + _joint_finder->print(); +#endif //looking for reverse nodal connectivity i global numbering for (int idomain=0; idomaindecrRef(); index->decrRef(); +#ifdef HAVE_MPI2 for (int iother=0; iother::iterator it; @@ -1261,6 +1279,7 @@ void MEDPARTITIONER::MeshCollection::buildCellGraph(MEDPARTITIONER::SkyLineArray cell2node.insert(make_pair(globalCell, globalNode)); } } +#endif } //endfor idomain //creating graph arcs (cell to cell relations) @@ -1423,7 +1442,10 @@ MEDPARTITIONER::Topology* MEDPARTITIONER::MeshCollection::createPartition(int nb if (MyGlobals::_Is0verbose>10) std::cout << "building new topology" << std::endl; //cellGraph is a shared pointer - Topology* topology=new ParallelTopology (cellGraph, getTopology(), nbdomain, getMeshDimension()); + Topology *topology=0; +#ifdef HAVE_MPI2 + topology=new ParallelTopology (cellGraph, getTopology(), nbdomain, getMeshDimension()); +#endif //cleaning delete [] edgeweights; delete cellGraph; @@ -1455,8 +1477,10 @@ MEDPARTITIONER::Topology* MEDPARTITIONER::MeshCollection::createPartition(const cellGraph=new UserGraph(array, partition, _topology->nbCells()); //cellGraph is a shared pointer - Topology* topology = new ParallelTopology (cellGraph, getTopology(), nbdomain, getMeshDimension()); - + Topology *topology=0; +#ifdef HAVE_MPI2 + topology=new ParallelTopology (cellGraph, getTopology(), nbdomain, getMeshDimension()); +#endif // if (array!=0) delete array; delete cellGraph; return topology; diff --git a/src/MEDPartitioner/MEDPARTITIONER_Utils.cxx b/src/MEDPartitioner/MEDPARTITIONER_Utils.cxx index 46b5f14ff..cc5c87482 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_Utils.cxx +++ b/src/MEDPartitioner/MEDPARTITIONER_Utils.cxx @@ -34,10 +34,7 @@ #include #include #include - -#ifdef HAVE_MPI2 -#include -#endif +#include using namespace MEDPARTITIONER; @@ -420,69 +417,6 @@ std::map< std::string,std::vector > MEDPARTITIONER::DeleteDuplicate return res; } -/*! - * not optimized but suffisant - * return empty vector if i am not target - */ -std::vector MEDPARTITIONER::SendAndReceiveVectorOfString(const std::vector& vec, const int source, const int target) -{ - int rank=MyGlobals::_Rank; - - MPI_Status status; - int tag = 111001; - if (rank == source) - { - std::string str=SerializeFromVectorOfString(vec); - int size=str.length(); - MPI_Send( &size, 1, MPI_INT, target, tag, MPI_COMM_WORLD ); - MPI_Send( (void*)str.data(), str.length(), MPI_CHAR, target, tag+100, MPI_COMM_WORLD ); - } - - int recSize=0; - if (rank == target) - { - MPI_Recv(&recSize, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); - std::string recData(recSize,'x'); - MPI_Recv((void*)recData.data(), recSize, MPI_CHAR, source, tag+100, MPI_COMM_WORLD, &status); - return DeserializeToVectorOfString(recData); //not empty one for target proc - } - std::vector res; - return res; //empty one for other proc -} - -/*! - * strings NO need all same size!!!! - */ -std::vector MEDPARTITIONER::AllgathervVectorOfString(const std::vector& vec) -{ - int world_size=MyGlobals::_World_Size; - std::string str=SerializeFromVectorOfString(vec); - - std::vector indexes(world_size); - int size=str.length(); - MPI_Allgather(&size, 1, MPI_INT, - &indexes[0], 1, MPI_INT, MPI_COMM_WORLD); - - //calcul of displacement - std::vector< int > disp(1,0); - for (int i=0; i deserial=DeserializeToVectorOfString(recData); - if (MyGlobals::_Verbose>1000) - { - std::cout << "proc "<& vec, const int source, const int target) //TODO std::string MEDPARTITIONER::Cle1ToStr(const std::string& s, const int inew) @@ -875,206 +809,6 @@ std::vector MEDPARTITIONER::BrowseAllFieldsOnMesh(const std::string return res; } -/*! - Sends content of \a vec to processor \a target. To be used with \a RecvDoubleVec method. - \param vec vector to be sent - \param target processor id of the target -*/ -void MEDPARTITIONER::SendDoubleVec(const std::vector& vec, const int target) -{ - int tag = 111002; - int size=vec.size(); - if (MyGlobals::_Verbose>1000) - std::cout << "proc " << MyGlobals::_Rank << " : --> SendDoubleVec " << size << std::endl; -#ifdef HAVE_MPI2 - MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD); - MPI_Send(const_cast(&vec[0]), size, MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD); -#endif -} - -/*! Receives messages from proc \a source to fill vector vec. - To be used with \a SendDoubleVec method. - - \param vec vector that is filled - \param source processor id of the incoming messages -*/ -std::vector* MEDPARTITIONER::RecvDoubleVec(const int source) -{ - int tag = 111002; - int size; -#ifdef HAVE_MPI2 - MPI_Status status; - MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); - if (MyGlobals::_Verbose>1000) - std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl; - std::vector* vec=new std::vector; - vec->resize(size); - MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status); -#endif - return vec; -} - -void MEDPARTITIONER::RecvDoubleVec(std::vector& vec, const int source) -{ - int tag = 111002; - int size; -#ifdef HAVE_MPI2 - MPI_Status status; - MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); - if (MyGlobals::_Verbose>1000) - std::cout<< "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl;; - vec.resize(size); - MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status); -#endif -} -/*! - Sends content of \a vec to processor \a target. To be used with \a RecvIntVec method. - \param vec vector to be sent - \param target processor id of the target -*/ -void MEDPARTITIONER::SendIntVec(const std::vector& vec, const int target) -{ - int tag = 111003; - int size=vec.size(); - if (MyGlobals::_Verbose>1000) - std::cout << "proc " << MyGlobals::_Rank << " : --> SendIntVec " << size << std::endl; -#ifdef HAVE_MPI2 - MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD); - MPI_Send(const_cast(&vec[0]), size,MPI_INT, target, tag+100, MPI_COMM_WORLD); -#endif -} - -/*! Receives messages from proc \a source to fill vector vec. - To be used with \a SendIntVec method. - \param vec vector that is filled - \param source processor id of the incoming messages -*/ -std::vector *MEDPARTITIONER::RecvIntVec(const int source) -{ - int tag = 111003; - int size; -#ifdef HAVE_MPI2 - MPI_Status status; - MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); - if (MyGlobals::_Verbose>1000) - std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl; - std::vector *vec=new std::vector; - vec->resize(size); - MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD, &status); -#endif - return vec; -} - -void MEDPARTITIONER::RecvIntVec(std::vector& vec, const int source) -{ - int tag = 111003; - int size; -#ifdef HAVE_MPI2 - MPI_Status status; - MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); - if (MyGlobals::_Verbose>1000) - std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl; - vec.resize(size); - MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD,&status); -#endif -} - -/*! - Sends content of \a dataArrayInt to processor \a target. - To be used with \a RecvDataArrayInt method. - \param da dataArray to be sent - \param target processor id of the target -*/ -void MEDPARTITIONER::SendDataArrayInt(const ParaMEDMEM::DataArrayInt *da, const int target) -{ - if (da==0) - throw INTERP_KERNEL::Exception("Problem send DataArrayInt* NULL"); - int tag = 111004; - int size[3]; - size[0]=da->getNbOfElems(); - size[1]=da->getNumberOfTuples(); - size[2]=da->getNumberOfComponents(); - if (MyGlobals::_Verbose>1000) - std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayInt " << size[0] << std::endl; -#ifdef HAVE_MPI2 - MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD); - const int *p=da->getConstPointer(); - MPI_Send(const_cast(&p[0]), size[0] ,MPI_INT, target, tag+100, MPI_COMM_WORLD); -#endif -} - -/*! Receives messages from proc \a source to fill dataArrayInt da. - To be used with \a SendIntVec method. - \param da dataArrayInt that is filled - \param source processor id of the incoming messages -*/ -ParaMEDMEM::DataArrayInt *MEDPARTITIONER::RecvDataArrayInt(const int source) -{ - int tag = 111004; - int size[3]; -#ifdef HAVE_MPI2 - MPI_Status status; - MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status); - if (MyGlobals::_Verbose>1000) - std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayInt " << size[0] << std::endl; - if (size[0]!=(size[1]*size[2])) - throw INTERP_KERNEL::Exception("Problem in RecvDataArrayInt incoherent sizes"); - ParaMEDMEM::DataArrayInt* da=ParaMEDMEM::DataArrayInt::New(); - da->alloc(size[1],size[2]); - int *p=da->getPointer(); - MPI_Recv(const_cast(&p[0]), size[0], MPI_INT, source, tag+100, MPI_COMM_WORLD, &status); -#endif - return da; -} - -/*! - Sends content of \a dataArrayInt to processor \a target. - To be used with \a RecvDataArrayDouble method. - \param da dataArray to be sent - \param target processor id of the target -*/ -void MEDPARTITIONER::SendDataArrayDouble(const ParaMEDMEM::DataArrayDouble *da, const int target) -{ - if (da==0) - throw INTERP_KERNEL::Exception("Problem send DataArrayDouble* NULL"); - int tag = 111005; - int size[3]; - size[0]=da->getNbOfElems(); - size[1]=da->getNumberOfTuples(); - size[2]=da->getNumberOfComponents(); - if (MyGlobals::_Verbose>1000) - std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayDouble " << size[0] << std::endl; -#ifdef HAVE_MPI2 - MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD); - const double *p=da->getConstPointer(); - MPI_Send(const_cast(&p[0]), size[0] ,MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD); -#endif -} - -/*! Receives messages from proc \a source to fill dataArrayDouble da. - To be used with \a SendDoubleVec method. - \param da dataArrayDouble that is filled - \param source processor id of the incoming messages -*/ -ParaMEDMEM::DataArrayDouble* MEDPARTITIONER::RecvDataArrayDouble(const int source) -{ - int tag = 111005; - int size[3]; -#ifdef HAVE_MPI2 - MPI_Status status; - MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status); - if (MyGlobals::_Verbose>1000) - std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayDouble " << size[0] << std::endl; - if (size[0]!=(size[1]*size[2])) - throw INTERP_KERNEL::Exception("Problem in RecvDataArrayDouble incoherent sizes"); - ParaMEDMEM::DataArrayDouble* da=ParaMEDMEM::DataArrayDouble::New(); - da->alloc(size[1],size[2]); - double *p=da->getPointer(); - MPI_Recv(const_cast(&p[0]), size[0], MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status); -#endif - return da; -} - /*! * create empty MEDCouplingUMesh* dim 3 */ @@ -1092,429 +826,3 @@ ParaMEDMEM::MEDCouplingUMesh* MEDPARTITIONER::CreateEmptyMEDCouplingUMesh() umesh->checkCoherency(); return umesh; } - -void MEDPARTITIONER::TestVectorOfStringMpi() -{ - int rank=MyGlobals::_Rank; - int world_size=MyGlobals::_World_Size; - std::vector myVector; - std::ostringstream oss; - oss << "hello from " << std::setw(5) << rank << " " << std::string(rank+1,'n') << - " next is an empty one"; - myVector.push_back(oss.str()); - myVector.push_back(""); - myVector.push_back("next is an singleton"); - myVector.push_back("1"); - - if (rank==0) - { - std::string s0=SerializeFromVectorOfString(myVector); - std::vector res=DeserializeToVectorOfString(s0); - if (res.size()!=myVector.size()) - throw INTERP_KERNEL::Exception("Problem in (de)serialise VectorOfString incoherent sizes"); - for (std::size_t i=0; i res=SendAndReceiveVectorOfString(myVector, i, j); - if ((rank==j) && MyGlobals::_Verbose>20) - std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl; - if (rank==j) - { - if (res.size()!=myVector.size()) - throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString incoherent sizes"); - for (std::size_t ii=1; ii res=AllgathervVectorOfString(myVector); - //sometimes for test - res=AllgathervVectorOfString(myVector); - res=AllgathervVectorOfString(myVector); - if (rank==0 && MyGlobals::_Verbose>20) - std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl; - if (res.size()!=myVector.size()*world_size) - throw INTERP_KERNEL::Exception("Problem in AllgathervVectorOfString incoherent sizes"); - int jj=-1; - for (int j=0; j myMap; - myMap["one"]=1; - myMap["two"]=22; //a bug - myMap["three"]=3; - myMap["two"]=2; //last speaking override - - if (rank==0) - { - std::vector v2=VectorizeFromMapOfStringInt(myMap); - std::map m3=DevectorizeToMapOfStringInt(v2); - if (ReprMapOfStringInt(m3)!=ReprMapOfStringInt(myMap)) - throw INTERP_KERNEL::Exception("Problem in (de)vectorize MapOfStringInt"); - } - - std::vector v2=AllgathervVectorOfString(VectorizeFromMapOfStringInt(myMap)); - if (rank==0 && MyGlobals::_Verbose>20) - { - std::cout << "v2 is : a vector of size " << v2.size() << std::endl; - std::cout << ReprVectorOfString(v2) << std::endl; - std::map m2=DevectorizeToMapOfStringInt(v2); - std::cout << "m2 is : a map of size " << m2.size() << std::endl; - std::cout << ReprMapOfStringInt(m2) << std::endl; - } - if (MyGlobals::_Verbose) - std::cout << "proc " << rank << " : OK TestMapOfStringIntMpi END" << std::endl; -} - -void MEDPARTITIONER::TestMapOfStringVectorOfStringMpi() -{ - int rank=MyGlobals::_Rank; - std::vector myVector; - std::ostringstream oss; - oss << "hello from " << std::setw(5) << MyGlobals::_Rank << " " << std::string(rank+1,'n') << " next is an empty one"; - myVector.push_back(oss.str()); - myVector.push_back(""); - myVector.push_back("next is an singleton"); - myVector.push_back("1"); - - if (rank==0) - { - std::map< std::string,std::vector > m2; - m2["first key"]=myVector; - m2["second key"]=myVector; - std::vector v2=VectorizeFromMapOfStringVectorOfString(m2); - std::map< std::string,std::vector > m3=DevectorizeToMapOfStringVectorOfString(v2); - if (rank==0 && MyGlobals::_Verbose>20) - std::cout << "m2 is : a MapOfStringVectorOfString of size " << m2.size() << std::endl; - std::cout << ReprMapOfStringVectorOfString(m2) << std::endl; - std::cout << "v2 is : a vector of size " << v2.size() << std::endl; - std::cout << ReprVectorOfString(v2) << std::endl; - std::cout << "m3 is : a map of size "< > m4; - m4["1rst key"]=myVector; - m4["2snd key"]=myVector; - std::vector v4=AllgathervVectorOfString(VectorizeFromMapOfStringVectorOfString(m4)); - if (rank==0 && MyGlobals::_Verbose>20) - { - std::map< std::string,std::vector > m5=DevectorizeToMapOfStringVectorOfString(v4); - std::map< std::string,std::vector > m6=DeleteDuplicatesInMapOfStringVectorOfString(m5); - std::cout<< "m5 is : a map of size "<alloc(nbOfTuples,numberOfComponents); - std::vector vals; - for (int j=0; jgetPointer()); - if (rank==0) - SendDataArrayInt(send, 1); - if (rank==1) - recv=RecvDataArrayInt(0); - if (rank==1 && MyGlobals::_Verbose>20) - { - std::cout << send->repr() << std::endl; - std::cout << recv->repr() << std::endl; - } - if (rank==1) - { - if (send->repr()!=recv->repr()) - throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayInt"); - } - send->decrRef(); - if (rank==1) - recv->decrRef(); - } - //double - { - ParaMEDMEM::DataArrayDouble* send=ParaMEDMEM::DataArrayDouble::New(); - ParaMEDMEM::DataArrayDouble* recv=0; - int nbOfTuples=5; - int numberOfComponents=3; - send->alloc(nbOfTuples,numberOfComponents); - std::vector vals; - for (int j=0; jgetPointer()); - if (rank==0) SendDataArrayDouble(send, 1); - if (rank==1) recv=RecvDataArrayDouble(0); - if (rank==1 && MyGlobals::_Verbose>20) - { - std::cout << send->repr() << std::endl; - std::cout << recv->repr() << std::endl; - } - if (rank==1) - { - if (send->repr()!=recv->repr()) - throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayDouble"); - } - send->decrRef(); - if (rank==1) recv->decrRef(); - } - - if (MyGlobals::_Verbose) - std::cout << "proc " << rank << " : OK TestDataArrayMpi END" << std::endl; -} - -void MEDPARTITIONER::TestPersistantMpi0To1(int taille, int nb) -{ - double temps_debut=MPI_Wtime(); - int rank=MyGlobals::_Rank; - std::vector x, y; - int tag=111111; - MPI_Request requete0, requete1; - MPI_Status statut; - int ok=0; - std::string res; - if (rank==0) - { - x.resize(taille); - MPI_Ssend_init(&x[0], taille, MPI_INT, 1, tag, MPI_COMM_WORLD , &requete0); - for(int k=0; k cela peut prendre du temps - MPI_Start(&requete0); - //Traitement sequentiel independant de "x" - //... - MPI_Wait(&requete0, &statut); - //Traitement sequentiel impliquant une modification de "x" en memoire - //x=... - } - MPI_Request_free(&requete0); - } - else if (rank == 1) - { - y.resize(taille); - MPI_Recv_init(&y[0], taille, MPI_INT, 0, tag, MPI_COMM_WORLD , &requete1); - for(int k=0; k cela peut prendre du temps - MPI_Start(&requete1); - //Traitement sequentiel independant de "y" - //... - MPI_Wait(&requete1, &statut); - //Traitement sequentiel dependant de "y" - //...=f(y) - int nbb=0; - for (int i=0; i9) - { - res="0K"; - if (nbb!=taille) - res="KO"; - std::cout << res << k << " "; - } - } - res="0K"; - if (ok!=nb) - res="BAD"; - if (MyGlobals::_Verbose>1) - std::cout << "result " << res << " time(sec) " << MPI_Wtime()-temps_debut << std::endl; - MPI_Request_free(&requete1); - } - //end_time=(MPI_WTIME()-start_time); -} - -void MEDPARTITIONER::TestPersistantMpiRing(int taille, int nb) -{ - double temps_debut=MPI_Wtime(); - int befo, next, rank, wsize, tagbefo, tagnext; - rank=MyGlobals::_Rank; - wsize=MyGlobals::_World_Size; - befo=rank-1; if (befo<0) befo=wsize-1; - next=rank+1; if (next>=wsize) next=0; - std::vector x, y; - tagbefo=111111+befo; - tagnext=111111+rank; - MPI_Request requete0, requete1; - MPI_Status statut1, statut2; - int ok=0; - std::string res; - //cout<<"ini|"< cela peut prendre du temps - MPI_Start(&requete0); - //Reception du gros message --> cela peut prendre du temps - for (int i=0; i9) - { - res="0K"+IntToStr(rank); - if (nbb!=taille) - res="KO"+IntToStr(rank); - std::cout << res << k << " "; - } - MPI_Wait(&requete0, &statut2); - //Traitement sequentiel impliquant une modification de "x" en memoire - //x=... - } - res="0K"; if (ok!=nb) res="MAUVAIS"; - temps_debut=MPI_Wtime()-temps_debut; - MPI_Request_free(&requete1); - MPI_Request_free(&requete0); - } - //end_time=(MPI_WTIME()-start_time); - if (MyGlobals::_Verbose>1) - std::cout << "result on proc " << rank << " " << res << " time(sec) " << temps_debut << std::endl; -} - -void MEDPARTITIONER::TestPersistantMpiRingOnCommSplit(int size, int nb) -{ - double temps_debut=MPI_Wtime(); - int rank=MyGlobals::_Rank; - MPI_Comm newcomm; - int color=1; - int rankMax=4; - if (rank>=rankMax) - color=MPI_UNDEFINED; - //MPI_Comm_dup (MPI_COMM_WORLD, &newcomm) ; - MPI_Comm_split(MPI_COMM_WORLD, color, rank, &newcomm); - - int befo, next, wsize, tagbefo, tagnext; - wsize=rankMax; - if (wsize>MyGlobals::_World_Size) - wsize=MyGlobals::_World_Size; - befo=rank-1; - if (befo<0) - befo=wsize-1; - next=rank+1; - if (next>=wsize) - next=0; - std::vector x, y; - tagbefo=111111+befo; - tagnext=111111+rank; - MPI_Request requete0, requete1; - MPI_Status statut1, statut2; - int ok=0; - std::string res; - - if (color==1) - { - x.resize(size); - y.resize(size); - MPI_Ssend_init(&x[0], size, MPI_INT, next, tagnext, newcomm , &requete0); - MPI_Recv_init(&y[0], size, MPI_INT, befo, tagbefo, newcomm , &requete1); - for(int k=0; k time consuming - MPI_Start(&requete0); - //Reception of big message --> time consuming - for (int i=0; i9) - { - res="0K"+IntToStr(rank); - if (nbb!=size) - res="KO"+IntToStr(rank); - std::cout << res << k << " "; - } - MPI_Wait(&requete0, &statut2); - //Traitement sequentiel impliquant une modification de "x" en memoire - //x=... - } - res="0K"; - if (ok!=nb) - res="MAUVAIS"; - temps_debut=MPI_Wtime()-temps_debut; - MPI_Request_free(&requete1); - MPI_Request_free(&requete0); - } - //MPI_Barrier(MPI_COMM_WORLD); - if (color==1) - MPI_Comm_free(&newcomm); - if (MyGlobals::_Verbose>1) - std::cout << "resultat proc " << rank <<" " << res << " time(sec) " << temps_debut << std::endl; -} - - diff --git a/src/MEDPartitioner/MEDPARTITIONER_Utils.hxx b/src/MEDPartitioner/MEDPARTITIONER_Utils.hxx index e6a617088..8b548df3e 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_Utils.hxx +++ b/src/MEDPartitioner/MEDPARTITIONER_Utils.hxx @@ -77,7 +77,15 @@ namespace MEDPARTITIONER ParaMEDMEM::DataArrayInt *CreateDataArrayIntFromVector(const std::vector& v); ParaMEDMEM::DataArrayInt *CreateDataArrayIntFromVector(const std::vector& v, const int nbComponents); ParaMEDMEM::DataArrayDouble *CreateDataArrayDoubleFromVector(const std::vector& v); + + ParaMEDMEM::MEDCouplingUMesh *CreateEmptyMEDCouplingUMesh(); + std::vector BrowseFieldDouble(const ParaMEDMEM::MEDCouplingFieldDouble* fd); + std::vector BrowseAllFields(const std::string& myfile); + std::vector BrowseAllFieldsOnMesh(const std::string& myfile, const std::string& mymesh, const int idomain); + std::vector GetInfosOfField(const char *fileName, const char *meshName, const int idomain ); + +#ifdef HAVE_MPI2 //not adviced, interblocking, use sendAndReceive //void SendVectorOfString(const std::vector& vec, const int target); //std::vector RecvVectorOfString(const int source); @@ -85,11 +93,6 @@ namespace MEDPARTITIONER std::vector SendAndReceiveVectorOfString(const std::vector& vec, const int source, const int target); std::vector AllgathervVectorOfString(const std::vector& vec); - std::vector BrowseFieldDouble(const ParaMEDMEM::MEDCouplingFieldDouble* fd); - std::vector BrowseAllFields(const std::string& myfile); - std::vector BrowseAllFieldsOnMesh(const std::string& myfile, const std::string& mymesh, const int idomain); - std::vector GetInfosOfField(const char *fileName, const char *meshName, const int idomain ); - void SendDoubleVec(const std::vector& vec, const int target); std::vector *RecvDoubleVec(const int source); void RecvDoubleVec(std::vector& vec, const int source); @@ -102,9 +105,7 @@ namespace MEDPARTITIONER ParaMEDMEM::DataArrayInt *RecvDataArrayInt(const int source); void SendDataArrayDouble(const ParaMEDMEM::DataArrayDouble* da, const int target); ParaMEDMEM::DataArrayDouble *RecvDataArrayDouble(const int source); - - ParaMEDMEM::MEDCouplingUMesh *CreateEmptyMEDCouplingUMesh(); - + void TestVectorOfStringMpi(); void TestMapOfStringIntMpi(); void TestMapOfStringVectorOfStringMpi(); @@ -112,6 +113,7 @@ namespace MEDPARTITIONER void TestPersistantMpi0To1(int taille, int nb); void TestPersistantMpiRing(int taille, int nb); void TestPersistantMpiRingOnCommSplit(int taille, int nb); +#endif class MyGlobals { diff --git a/src/MEDPartitioner/MEDPARTITIONER_UtilsPara.cxx b/src/MEDPartitioner/MEDPARTITIONER_UtilsPara.cxx new file mode 100644 index 000000000..e52f30260 --- /dev/null +++ b/src/MEDPartitioner/MEDPARTITIONER_UtilsPara.cxx @@ -0,0 +1,729 @@ +// Copyright (C) 2007-2011 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "MEDPARTITIONER_Utils.hxx" + +#include "MEDLoader.hxx" +#include "MEDLoaderBase.hxx" +#include "MEDFileUtilities.hxx" +#include "CellModel.hxx" +#include "MEDCouplingUMesh.hxx" +#include "MEDCouplingFieldDouble.hxx" +#include "InterpKernelException.hxx" +#include "MEDCouplingAutoRefCountObjectPtr.hxx" +#include "InterpKernelAutoPtr.hxx" + +#include +#include +#include +#include +#include + +#ifdef HAVE_MPI2 +#include +#endif + +using namespace MEDPARTITIONER; + +/*! + * not optimized but suffisant + * return empty vector if i am not target + */ +std::vector MEDPARTITIONER::SendAndReceiveVectorOfString(const std::vector& vec, const int source, const int target) +{ + int rank=MyGlobals::_Rank; + + MPI_Status status; + int tag = 111001; + if (rank == source) + { + std::string str=SerializeFromVectorOfString(vec); + int size=str.length(); + MPI_Send( &size, 1, MPI_INT, target, tag, MPI_COMM_WORLD ); + MPI_Send( (void*)str.data(), str.length(), MPI_CHAR, target, tag+100, MPI_COMM_WORLD ); + } + + int recSize=0; + if (rank == target) + { + MPI_Recv(&recSize, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); + std::string recData(recSize,'x'); + MPI_Recv((void*)recData.data(), recSize, MPI_CHAR, source, tag+100, MPI_COMM_WORLD, &status); + return DeserializeToVectorOfString(recData); //not empty one for target proc + } + std::vector res; + return res; //empty one for other proc +} + +/*! + * strings NO need all same size!!!! + */ +std::vector MEDPARTITIONER::AllgathervVectorOfString(const std::vector& vec) +{ + int world_size=MyGlobals::_World_Size; + std::string str=SerializeFromVectorOfString(vec); + + std::vector indexes(world_size); + int size=str.length(); + MPI_Allgather(&size, 1, MPI_INT, + &indexes[0], 1, MPI_INT, MPI_COMM_WORLD); + + //calcul of displacement + std::vector< int > disp(1,0); + for (int i=0; i deserial=DeserializeToVectorOfString(recData); + if (MyGlobals::_Verbose>1000) + { + std::cout << "proc "<& vec, const int target) +{ + int tag = 111002; + int size=vec.size(); + if (MyGlobals::_Verbose>1000) + std::cout << "proc " << MyGlobals::_Rank << " : --> SendDoubleVec " << size << std::endl; +#ifdef HAVE_MPI2 + MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD); + MPI_Send(const_cast(&vec[0]), size, MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD); +#endif +} + +/*! Receives messages from proc \a source to fill vector vec. + To be used with \a SendDoubleVec method. + + \param vec vector that is filled + \param source processor id of the incoming messages +*/ +std::vector* MEDPARTITIONER::RecvDoubleVec(const int source) +{ + int tag = 111002; + int size; +#ifdef HAVE_MPI2 + MPI_Status status; + MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); + if (MyGlobals::_Verbose>1000) + std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl; + std::vector* vec=new std::vector; + vec->resize(size); + MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status); +#endif + return vec; +} + +void MEDPARTITIONER::RecvDoubleVec(std::vector& vec, const int source) +{ + int tag = 111002; + int size; +#ifdef HAVE_MPI2 + MPI_Status status; + MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); + if (MyGlobals::_Verbose>1000) + std::cout<< "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl;; + vec.resize(size); + MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status); +#endif +} +/*! + Sends content of \a vec to processor \a target. To be used with \a RecvIntVec method. + \param vec vector to be sent + \param target processor id of the target +*/ +void MEDPARTITIONER::SendIntVec(const std::vector& vec, const int target) +{ + int tag = 111003; + int size=vec.size(); + if (MyGlobals::_Verbose>1000) + std::cout << "proc " << MyGlobals::_Rank << " : --> SendIntVec " << size << std::endl; +#ifdef HAVE_MPI2 + MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD); + MPI_Send(const_cast(&vec[0]), size,MPI_INT, target, tag+100, MPI_COMM_WORLD); +#endif +} + +/*! Receives messages from proc \a source to fill vector vec. + To be used with \a SendIntVec method. + \param vec vector that is filled + \param source processor id of the incoming messages +*/ +std::vector *MEDPARTITIONER::RecvIntVec(const int source) +{ + int tag = 111003; + int size; +#ifdef HAVE_MPI2 + MPI_Status status; + MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); + if (MyGlobals::_Verbose>1000) + std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl; + std::vector *vec=new std::vector; + vec->resize(size); + MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD, &status); +#endif + return vec; +} + +void MEDPARTITIONER::RecvIntVec(std::vector& vec, const int source) +{ + int tag = 111003; + int size; +#ifdef HAVE_MPI2 + MPI_Status status; + MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status); + if (MyGlobals::_Verbose>1000) + std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl; + vec.resize(size); + MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD,&status); +#endif +} + +/*! + Sends content of \a dataArrayInt to processor \a target. + To be used with \a RecvDataArrayInt method. + \param da dataArray to be sent + \param target processor id of the target +*/ +void MEDPARTITIONER::SendDataArrayInt(const ParaMEDMEM::DataArrayInt *da, const int target) +{ + if (da==0) + throw INTERP_KERNEL::Exception("Problem send DataArrayInt* NULL"); + int tag = 111004; + int size[3]; + size[0]=da->getNbOfElems(); + size[1]=da->getNumberOfTuples(); + size[2]=da->getNumberOfComponents(); + if (MyGlobals::_Verbose>1000) + std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayInt " << size[0] << std::endl; +#ifdef HAVE_MPI2 + MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD); + const int *p=da->getConstPointer(); + MPI_Send(const_cast(&p[0]), size[0] ,MPI_INT, target, tag+100, MPI_COMM_WORLD); +#endif +} + +/*! Receives messages from proc \a source to fill dataArrayInt da. + To be used with \a SendIntVec method. + \param da dataArrayInt that is filled + \param source processor id of the incoming messages +*/ +ParaMEDMEM::DataArrayInt *MEDPARTITIONER::RecvDataArrayInt(const int source) +{ + int tag = 111004; + int size[3]; +#ifdef HAVE_MPI2 + MPI_Status status; + MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status); + if (MyGlobals::_Verbose>1000) + std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayInt " << size[0] << std::endl; + if (size[0]!=(size[1]*size[2])) + throw INTERP_KERNEL::Exception("Problem in RecvDataArrayInt incoherent sizes"); + ParaMEDMEM::DataArrayInt* da=ParaMEDMEM::DataArrayInt::New(); + da->alloc(size[1],size[2]); + int *p=da->getPointer(); + MPI_Recv(const_cast(&p[0]), size[0], MPI_INT, source, tag+100, MPI_COMM_WORLD, &status); +#endif + return da; +} + +/*! + Sends content of \a dataArrayInt to processor \a target. + To be used with \a RecvDataArrayDouble method. + \param da dataArray to be sent + \param target processor id of the target +*/ +void MEDPARTITIONER::SendDataArrayDouble(const ParaMEDMEM::DataArrayDouble *da, const int target) +{ + if (da==0) + throw INTERP_KERNEL::Exception("Problem send DataArrayDouble* NULL"); + int tag = 111005; + int size[3]; + size[0]=da->getNbOfElems(); + size[1]=da->getNumberOfTuples(); + size[2]=da->getNumberOfComponents(); + if (MyGlobals::_Verbose>1000) + std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayDouble " << size[0] << std::endl; +#ifdef HAVE_MPI2 + MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD); + const double *p=da->getConstPointer(); + MPI_Send(const_cast(&p[0]), size[0] ,MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD); +#endif +} + +/*! Receives messages from proc \a source to fill dataArrayDouble da. + To be used with \a SendDoubleVec method. + \param da dataArrayDouble that is filled + \param source processor id of the incoming messages +*/ +ParaMEDMEM::DataArrayDouble* MEDPARTITIONER::RecvDataArrayDouble(const int source) +{ + int tag = 111005; + int size[3]; +#ifdef HAVE_MPI2 + MPI_Status status; + MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status); + if (MyGlobals::_Verbose>1000) + std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayDouble " << size[0] << std::endl; + if (size[0]!=(size[1]*size[2])) + throw INTERP_KERNEL::Exception("Problem in RecvDataArrayDouble incoherent sizes"); + ParaMEDMEM::DataArrayDouble* da=ParaMEDMEM::DataArrayDouble::New(); + da->alloc(size[1],size[2]); + double *p=da->getPointer(); + MPI_Recv(const_cast(&p[0]), size[0], MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status); +#endif + return da; +} + +void MEDPARTITIONER::TestVectorOfStringMpi() +{ + int rank=MyGlobals::_Rank; + int world_size=MyGlobals::_World_Size; + std::vector myVector; + std::ostringstream oss; + oss << "hello from " << std::setw(5) << rank << " " << std::string(rank+1,'n') << + " next is an empty one"; + myVector.push_back(oss.str()); + myVector.push_back(""); + myVector.push_back("next is an singleton"); + myVector.push_back("1"); + + if (rank==0) + { + std::string s0=SerializeFromVectorOfString(myVector); + std::vector res=DeserializeToVectorOfString(s0); + if (res.size()!=myVector.size()) + throw INTERP_KERNEL::Exception("Problem in (de)serialise VectorOfString incoherent sizes"); + for (std::size_t i=0; i res=SendAndReceiveVectorOfString(myVector, i, j); + if ((rank==j) && MyGlobals::_Verbose>20) + std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl; + if (rank==j) + { + if (res.size()!=myVector.size()) + throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString incoherent sizes"); + for (std::size_t ii=1; ii res=AllgathervVectorOfString(myVector); + //sometimes for test + res=AllgathervVectorOfString(myVector); + res=AllgathervVectorOfString(myVector); + if (rank==0 && MyGlobals::_Verbose>20) + std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl; + if (res.size()!=myVector.size()*world_size) + throw INTERP_KERNEL::Exception("Problem in AllgathervVectorOfString incoherent sizes"); + int jj=-1; + for (int j=0; j myMap; + myMap["one"]=1; + myMap["two"]=22; //a bug + myMap["three"]=3; + myMap["two"]=2; //last speaking override + + if (rank==0) + { + std::vector v2=VectorizeFromMapOfStringInt(myMap); + std::map m3=DevectorizeToMapOfStringInt(v2); + if (ReprMapOfStringInt(m3)!=ReprMapOfStringInt(myMap)) + throw INTERP_KERNEL::Exception("Problem in (de)vectorize MapOfStringInt"); + } + + std::vector v2=AllgathervVectorOfString(VectorizeFromMapOfStringInt(myMap)); + if (rank==0 && MyGlobals::_Verbose>20) + { + std::cout << "v2 is : a vector of size " << v2.size() << std::endl; + std::cout << ReprVectorOfString(v2) << std::endl; + std::map m2=DevectorizeToMapOfStringInt(v2); + std::cout << "m2 is : a map of size " << m2.size() << std::endl; + std::cout << ReprMapOfStringInt(m2) << std::endl; + } + if (MyGlobals::_Verbose) + std::cout << "proc " << rank << " : OK TestMapOfStringIntMpi END" << std::endl; +} + +void MEDPARTITIONER::TestMapOfStringVectorOfStringMpi() +{ + int rank=MyGlobals::_Rank; + std::vector myVector; + std::ostringstream oss; + oss << "hello from " << std::setw(5) << MyGlobals::_Rank << " " << std::string(rank+1,'n') << " next is an empty one"; + myVector.push_back(oss.str()); + myVector.push_back(""); + myVector.push_back("next is an singleton"); + myVector.push_back("1"); + + if (rank==0) + { + std::map< std::string,std::vector > m2; + m2["first key"]=myVector; + m2["second key"]=myVector; + std::vector v2=VectorizeFromMapOfStringVectorOfString(m2); + std::map< std::string,std::vector > m3=DevectorizeToMapOfStringVectorOfString(v2); + if (rank==0 && MyGlobals::_Verbose>20) + std::cout << "m2 is : a MapOfStringVectorOfString of size " << m2.size() << std::endl; + std::cout << ReprMapOfStringVectorOfString(m2) << std::endl; + std::cout << "v2 is : a vector of size " << v2.size() << std::endl; + std::cout << ReprVectorOfString(v2) << std::endl; + std::cout << "m3 is : a map of size "< > m4; + m4["1rst key"]=myVector; + m4["2snd key"]=myVector; + std::vector v4=AllgathervVectorOfString(VectorizeFromMapOfStringVectorOfString(m4)); + if (rank==0 && MyGlobals::_Verbose>20) + { + std::map< std::string,std::vector > m5=DevectorizeToMapOfStringVectorOfString(v4); + std::map< std::string,std::vector > m6=DeleteDuplicatesInMapOfStringVectorOfString(m5); + std::cout<< "m5 is : a map of size "<alloc(nbOfTuples,numberOfComponents); + std::vector vals; + for (int j=0; jgetPointer()); + if (rank==0) + SendDataArrayInt(send, 1); + if (rank==1) + recv=RecvDataArrayInt(0); + if (rank==1 && MyGlobals::_Verbose>20) + { + std::cout << send->repr() << std::endl; + std::cout << recv->repr() << std::endl; + } + if (rank==1) + { + if (send->repr()!=recv->repr()) + throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayInt"); + } + send->decrRef(); + if (rank==1) + recv->decrRef(); + } + //double + { + ParaMEDMEM::DataArrayDouble* send=ParaMEDMEM::DataArrayDouble::New(); + ParaMEDMEM::DataArrayDouble* recv=0; + int nbOfTuples=5; + int numberOfComponents=3; + send->alloc(nbOfTuples,numberOfComponents); + std::vector vals; + for (int j=0; jgetPointer()); + if (rank==0) SendDataArrayDouble(send, 1); + if (rank==1) recv=RecvDataArrayDouble(0); + if (rank==1 && MyGlobals::_Verbose>20) + { + std::cout << send->repr() << std::endl; + std::cout << recv->repr() << std::endl; + } + if (rank==1) + { + if (send->repr()!=recv->repr()) + throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayDouble"); + } + send->decrRef(); + if (rank==1) recv->decrRef(); + } + + if (MyGlobals::_Verbose) + std::cout << "proc " << rank << " : OK TestDataArrayMpi END" << std::endl; +} + +void MEDPARTITIONER::TestPersistantMpi0To1(int taille, int nb) +{ + double temps_debut=MPI_Wtime(); + int rank=MyGlobals::_Rank; + std::vector x, y; + int tag=111111; + MPI_Request requete0, requete1; + MPI_Status statut; + int ok=0; + std::string res; + if (rank==0) + { + x.resize(taille); + MPI_Ssend_init(&x[0], taille, MPI_INT, 1, tag, MPI_COMM_WORLD , &requete0); + for(int k=0; k cela peut prendre du temps + MPI_Start(&requete0); + //Traitement sequentiel independant de "x" + //... + MPI_Wait(&requete0, &statut); + //Traitement sequentiel impliquant une modification de "x" en memoire + //x=... + } + MPI_Request_free(&requete0); + } + else if (rank == 1) + { + y.resize(taille); + MPI_Recv_init(&y[0], taille, MPI_INT, 0, tag, MPI_COMM_WORLD , &requete1); + for(int k=0; k cela peut prendre du temps + MPI_Start(&requete1); + //Traitement sequentiel independant de "y" + //... + MPI_Wait(&requete1, &statut); + //Traitement sequentiel dependant de "y" + //...=f(y) + int nbb=0; + for (int i=0; i9) + { + res="0K"; + if (nbb!=taille) + res="KO"; + std::cout << res << k << " "; + } + } + res="0K"; + if (ok!=nb) + res="BAD"; + if (MyGlobals::_Verbose>1) + std::cout << "result " << res << " time(sec) " << MPI_Wtime()-temps_debut << std::endl; + MPI_Request_free(&requete1); + } + //end_time=(MPI_WTIME()-start_time); +} + +void MEDPARTITIONER::TestPersistantMpiRing(int taille, int nb) +{ + double temps_debut=MPI_Wtime(); + int befo, next, rank, wsize, tagbefo, tagnext; + rank=MyGlobals::_Rank; + wsize=MyGlobals::_World_Size; + befo=rank-1; if (befo<0) befo=wsize-1; + next=rank+1; if (next>=wsize) next=0; + std::vector x, y; + tagbefo=111111+befo; + tagnext=111111+rank; + MPI_Request requete0, requete1; + MPI_Status statut1, statut2; + int ok=0; + std::string res; + //cout<<"ini|"< cela peut prendre du temps + MPI_Start(&requete0); + //Reception du gros message --> cela peut prendre du temps + for (int i=0; i9) + { + res="0K"+IntToStr(rank); + if (nbb!=taille) + res="KO"+IntToStr(rank); + std::cout << res << k << " "; + } + MPI_Wait(&requete0, &statut2); + //Traitement sequentiel impliquant une modification de "x" en memoire + //x=... + } + res="0K"; if (ok!=nb) res="MAUVAIS"; + temps_debut=MPI_Wtime()-temps_debut; + MPI_Request_free(&requete1); + MPI_Request_free(&requete0); + } + //end_time=(MPI_WTIME()-start_time); + if (MyGlobals::_Verbose>1) + std::cout << "result on proc " << rank << " " << res << " time(sec) " << temps_debut << std::endl; +} + +void MEDPARTITIONER::TestPersistantMpiRingOnCommSplit(int size, int nb) +{ + double temps_debut=MPI_Wtime(); + int rank=MyGlobals::_Rank; + MPI_Comm newcomm; + int color=1; + int rankMax=4; + if (rank>=rankMax) + color=MPI_UNDEFINED; + //MPI_Comm_dup (MPI_COMM_WORLD, &newcomm) ; + MPI_Comm_split(MPI_COMM_WORLD, color, rank, &newcomm); + + int befo, next, wsize, tagbefo, tagnext; + wsize=rankMax; + if (wsize>MyGlobals::_World_Size) + wsize=MyGlobals::_World_Size; + befo=rank-1; + if (befo<0) + befo=wsize-1; + next=rank+1; + if (next>=wsize) + next=0; + std::vector x, y; + tagbefo=111111+befo; + tagnext=111111+rank; + MPI_Request requete0, requete1; + MPI_Status statut1, statut2; + int ok=0; + std::string res; + + if (color==1) + { + x.resize(size); + y.resize(size); + MPI_Ssend_init(&x[0], size, MPI_INT, next, tagnext, newcomm , &requete0); + MPI_Recv_init(&y[0], size, MPI_INT, befo, tagbefo, newcomm , &requete1); + for(int k=0; k time consuming + MPI_Start(&requete0); + //Reception of big message --> time consuming + for (int i=0; i9) + { + res="0K"+IntToStr(rank); + if (nbb!=size) + res="KO"+IntToStr(rank); + std::cout << res << k << " "; + } + MPI_Wait(&requete0, &statut2); + //Traitement sequentiel impliquant une modification de "x" en memoire + //x=... + } + res="0K"; + if (ok!=nb) + res="MAUVAIS"; + temps_debut=MPI_Wtime()-temps_debut; + MPI_Request_free(&requete1); + MPI_Request_free(&requete0); + } + //MPI_Barrier(MPI_COMM_WORLD); + if (color==1) + MPI_Comm_free(&newcomm); + if (MyGlobals::_Verbose>1) + std::cout << "resultat proc " << rank <<" " << res << " time(sec) " << temps_debut << std::endl; +} diff --git a/src/MEDPartitioner/Makefile.am b/src/MEDPartitioner/Makefile.am index 8b5fbf268..5a3168d91 100644 --- a/src/MEDPartitioner/Makefile.am +++ b/src/MEDPartitioner/Makefile.am @@ -59,9 +59,9 @@ MEDPARTITIONER_MeshCollectionMedXmlDriver.cxx \ MEDPARTITIONER_MeshCollectionMedAsciiDriver.cxx \ MEDPARTITIONER_Graph.cxx\ MEDPARTITIONER_UserGraph.cxx\ -MEDPARTITIONER_JointFinder.cxx \ MEDPARTITIONER_SkyLineArray.cxx \ -MEDPARTITIONER_ConnectZone.cxx +MEDPARTITIONER_ConnectZone.cxx \ +MEDPARTITIONER_Utils.cxx libmedpartitioner_la_CPPFLAGS = $(MPI_INCLUDES) $(MED3_INCLUDES) $(HDF5_INCLUDES) \ $(LIBXML_INCLUDES) -I$(srcdir)/../INTERP_KERNEL/Bases -I$(srcdir)/../MEDCoupling \ @@ -71,8 +71,9 @@ libmedpartitioner_la_LDFLAGS = if MPI_IS_OK dist_libmedpartitioner_la_SOURCES += MEDPARTITIONER_ParaDomainSelector.cxx \ - MEDPARTITIONER_Utils.cxx \ - MEDPARTITIONER_ParallelTopology.cxx + MEDPARTITIONER_UtilsPara.cxx \ + MEDPARTITIONER_ParallelTopology.cxx \ + MEDPARTITIONER_JointFinder.cxx if MED_ENABLE_PARMETIS dist_libmedpartitioner_la_SOURCES += MEDPARTITIONER_MetisGraph.cxx @@ -106,6 +107,12 @@ if MPI_IS_OK medpartitioner_para_CPPFLAGS = -I$(srcdir)/../INTERP_KERNEL/Bases -I$(srcdir)/../MEDCoupling \ -I$(srcdir)/../MEDLoader -I$(srcdir)/../INTERP_KERNEL $(MPI_INCLUDES) $(PARMETIS_CPPFLAGS) medpartitioner_para_LDADD = libmedpartitioner.la +else !MPI_IS_OK + bin_PROGRAMS = medpartitioner + dist_medpartitioner_SOURCES = medpartitioner.cxx + medpartitioner_CPPFLAGS = -I$(srcdir)/../INTERP_KERNEL/Bases -I$(srcdir)/../MEDCoupling \ + -I$(srcdir)/../MEDLoader -I$(srcdir)/../INTERP_KERNEL $(METIS_CPPFLAGS) $(SCOTCH_CPPFLAGS) + medpartitioner_LDADD = libmedpartitioner.la endif OBSOLETE_FILES = diff --git a/src/MEDPartitioner/medpartitioner.cxx b/src/MEDPartitioner/medpartitioner.cxx new file mode 100644 index 000000000..f23552f3f --- /dev/null +++ b/src/MEDPartitioner/medpartitioner.cxx @@ -0,0 +1,334 @@ +// Copyright (C) 2007-2011 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// +// MED medsplitter : tool to split n MED files into p separate +// MED files with a partitioning specified +// by an external tool +// File : medsplitter.cxx +// Author : Vincent BERGEAUD (CEA-DEN/DANS/DM2S/SFME/LGLS) +// Module : MED +// + +#include "MEDPARTITIONER_Graph.hxx" +#include "MEDPARTITIONER_Topology.hxx" +#include "MEDPARTITIONER_MeshCollection.hxx" + +#ifdef BOOST_PROGRAM_OPTIONS_LIB +#include +#endif + +#include +#include +#include +#include +#include + +#ifdef BOOST_PROGRAM_OPTIONS_LIB +namespace po=boost::program_options; +#endif + +int main(int argc, char** argv) +{ +#ifndef ENABLE_METIS +#ifndef ENABLE_SCOTCH + std::cout << "Sorry, no one split method is available. Please, compile with METIS or SCOTCH." << std::endl; + return 1; +#endif +#endif + + // Defining options + // by parsing the command line + bool mesh_only = false; + bool is_sequential = true; + bool xml_output_master=true; + bool creates_boundary_faces=false; + bool split_families=false; + bool empty_groups=false; + + std::string input; + std::string output; + std::string meshname; + std::string library; + int ndomains; + +#ifdef BOOST_PROGRAM_OPTIONS_LIB + // Use boost::program_options for command-line options parsing + po::options_description desc("Available options of medpartitioner V1.0"); + desc.add_options() + ("help","produces this help message") + ("mesh-only","prevents the splitter from creating the fields contained in the original file(s)") + ("distributed","specifies that the input file is distributed") + ("input-file",po::value(),"name of the input MED file") + ("output-file",po::value(),"name of the resulting file") + ("meshname",po::value(),"name of the input mesh") +#ifdef ENABLE_METIS +#ifdef ENABLE_SCOTCH + ("split-method",po::value(&library)->default_value("metis"),"name of the splitting library (metis,scotch)") +#endif +#endif + ("ndomains",po::value(&ndomains)->default_value(1),"number of subdomains in the output file") + ("plain-master","creates a plain masterfile instead of an XML file") + ("creates-boundary-faces","creates the necessary faces so that faces joints are created in the output files") + ("family-splitting","preserves the family names instead of focusing on the groups") + ("empty-groups","creates empty groups in zones that do not contain a group from the original domain"); + + po::variables_map vm; + po::store(po::parse_command_line(argc,argv,desc),vm); + po::notify(vm); + + if (vm.count("help")) + { + std::cout << desc << "\n"; + return 1; + } + + if (!vm.count("ndomains")) + { + std::cout << "ndomains must be specified !"<< std::endl; + return 1; + } + + ndomains = vm["ndomains"].as(); + if (!vm.count("input-file") || !vm.count("output-file")) + { + std::cout << "input-file and output-file names must be specified" << std::endl; + return 1; + } + + if (!vm.count("distributed") && !vm.count("meshname") ) + { + std::cout << "for a serial MED file, mesh name must be selected with --meshname=..." << std::endl; + return 1; + } + + input = vm["input-file"].as(); + output = vm["output-file"].as(); + + if (vm.count("mesh-only")) + mesh_only=true; + + if (vm.count("distributed")) + is_sequential=false; + + if (is_sequential) + meshname = vm["meshname"].as(); + + if (vm.count("plain-master")) + xml_output_master=false; + + if (vm.count("creates-boundary-faces")) + creates_boundary_faces=true; + + if (vm.count("split-families")) + split_families=true; + + if (vm.count("empty-groups")) + empty_groups=true; + +#else // BOOST_PROGRAM_OPTIONS_LIB + + // Primitive parsing of command-line options + + std::string desc ("Available options of medpartitioner V1.0:\n" + "\t--help : produces this help message\n" + "\t--mesh-only : do not create the fields contained in the original file(s)\n" + "\t--distributed : specifies that the input file is distributed\n" + "\t--input-file= : name of the input MED file\n" + "\t--output-file= : name of the resulting file\n" + "\t--meshname= : name of the input mesh (not used with --distributed option)\n" + "\t--ndomains= : number of subdomains in the output file, default is 1\n" +#ifdef ENABLE_METIS +#ifdef ENABLE_SCOTCH + "\t--split-method=: name of the splitting library (metis/scotch), default is metis\n" +#endif +#endif + "\t--plain-master : creates a plain masterfile instead of an XML file\n" + "\t--creates-boundary-faces: creates the necessary faces so that faces joints are created in the output files\n" + "\t--family-splitting : preserves the family names instead of focusing on the groups\n" + "\t--empty-groups : creates empty groups in zones that do not contain a group from the original domain" + ); + + if (argc < 4) + { + std::cout << desc.c_str() << std::endl; + return 1; + } + + for (int i = 1; i < argc; i++) + { + if (strlen(argv[i]) < 3) + { + std::cout << desc.c_str() << std::endl; + return 1; + } + + if (strncmp(argv[i],"--m",3) == 0) + { + if (strcmp(argv[i],"--mesh-only") == 0) + { + mesh_only = true; + std::cout << "\tmesh-only = " << mesh_only << std::endl; // tmp + } + else if (strlen(argv[i]) > 11) + { // "--meshname=" + meshname = (argv[i] + 11); + std::cout << "\tmeshname = " << meshname << std::endl; // tmp + } + } + else if (strncmp(argv[i],"--d",3) == 0) + { + is_sequential = false; + std::cout << "\tis_sequential = " << is_sequential << std::endl; // tmp + } + else if (strncmp(argv[i],"--i",3) == 0) + { + if (strlen(argv[i]) > 13) + { // "--input-file=" + input = (argv[i] + 13); + std::cout << "\tinput-file = " << input << std::endl; // tmp + } + } + else if (strncmp(argv[i],"--o",3) == 0) + { + if (strlen(argv[i]) > 14) + { // "--output-file=" + output = (argv[i] + 14); + std::cout << "\toutput-file = " << output << std::endl; // tmp + } + } + else if (strncmp(argv[i],"--s",3) == 0) + { + if (strlen(argv[i]) > 15) + { // "--split-method=" + library = (argv[i] + 15); + std::cout << "\tsplit-method = " << library << std::endl; // tmp + } + } + else if (strncmp(argv[i],"--f",3) == 0) + { //"--family-splitting" + split_families=true; + std::cout << "\tfamily-splitting true" << std::endl; // tmp + } + else if (strncmp(argv[i],"--n",3) == 0) + { + if (strlen(argv[i]) > 11) + { // "--ndomains=" + ndomains = atoi(argv[i] + 11); + std::cout << "\tndomains = " << ndomains << std::endl; // tmp + } + } + else if (strncmp(argv[i],"--p",3) == 0) + { // "--plain-master" + xml_output_master = false; + std::cout << "\txml_output_master = " << xml_output_master << std::endl; // tmp + } + else if (strncmp(argv[i],"--c",3) == 0) + { // "--creates-boundary-faces" + creates_boundary_faces = true; + std::cout << "\tcreates_boundary_faces = " << creates_boundary_faces << std::endl; // tmp + } + else if (strncmp(argv[i],"--e",3) == 0) + { // "--empty-groups" + empty_groups = true; + std::cout << "\tempty_groups = true" << std::endl; // tmp + } + else + { + std::cout << desc.c_str() << std::endl; + return 1; + } + } + + if (is_sequential && meshname.empty()) + { + std::cout << "Mesh name must be given for sequential(not distributed) input file." << std::endl; + std::cout << desc << std::endl; + return 1; + } + +#endif // BOOST_PROGRAM_OPTIONS_LIB + + + //testing whether it is possible to write a file at the specified location + std::string outputtest = output + ".testioms."; + std::ofstream testfile (outputtest.c_str()); + if (testfile.fail()) + { + std::cout << "MEDPARTITIONER : output-file directory does not exist or is in read-only access" << std::endl; + return 1; + }; + //deletes test file + remove(outputtest.c_str()); + + // Beginning of the computation + + // Loading the mesh collection + MEDPARTITIONER::MeshCollection* collection; + std::cout << "MEDPARTITIONER : reading input files "<< std::endl; + if (is_sequential) + collection = new MEDPARTITIONER::MeshCollection(input,meshname); + else + collection = new MEDPARTITIONER::MeshCollection(input); + + std::cout << "MEDPARTITIONER : computing partition "<< std::endl; + + // Creating the graph and partitioning it +#ifdef ENABLE_METIS +#ifndef ENABLE_SCOTCH + library = "metis"; +#endif +#else + library = "scotch"; +#endif + std::cout << "\tsplit-method = " << library << std::endl; // tmp + + MEDPARTITIONER::Topology* new_topo; + if (library == "metis") + new_topo = collection->createPartition(ndomains,MEDPARTITIONER::Graph::METIS); + else + new_topo = collection->createPartition(ndomains,MEDPARTITIONER::Graph::SCOTCH); + + std::cout << "MEDPARTITIONER : creating new meshes"<< std::endl; + + // Creating a new mesh collection from the partitioning + MEDPARTITIONER::MeshCollection new_collection(*collection, new_topo, split_families, empty_groups); + if (mesh_only) + { + delete collection; + collection=0; + } + + if (!xml_output_master) + new_collection.setDriverType(MEDPARTITIONER::MedAscii); + + // new_collection.setSubdomainBoundaryCreates(creates_boundary_faces); + + std::cout << "MEDPARTITIONER : writing output files "<< std::endl; + new_collection.write(output); + + // Casting the fields on the new collection + // if (!mesh_only) + // new_collection.castAllFields(*collection); + + + // Cleaning memory + delete collection; + delete new_topo; + + return 0; +}