1 // Copyright (C) 2007-2021 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "MEDPARTITIONER_Utils.hxx"
22 #include "MEDLoader.hxx"
23 #include "MEDLoaderBase.hxx"
24 #include "MEDFileUtilities.hxx"
25 #include "CellModel.hxx"
26 #include "MEDCouplingUMesh.hxx"
27 #include "MEDCouplingFieldDouble.hxx"
28 #include "InterpKernelException.hxx"
30 #include "MEDCouplingMemArray.txx"
31 #include "InterpKernelAutoPtr.hxx"
43 #ifndef MEDCOUPLING_USE_64BIT_IDS
44 #define MPI_ID_TYPE MPI_INT
46 #define MPI_ID_TYPE MPI_LONG
51 using namespace MEDPARTITIONER;
54 * not optimized but suffisant
55 * return empty vector if i am not target
57 std::vector<std::string> MEDPARTITIONER::SendAndReceiveVectorOfString(const std::vector<std::string>& vec, const int source, const int target)
59 int rank=MyGlobals::_Rank;
65 std::string str=SerializeFromVectorOfString(vec);
66 int size=(int)str.length();
67 MPI_Send( &size, 1, MPI_INT, target, tag, MPI_COMM_WORLD );
68 MPI_Send( (void*)str.data(), (int)str.length(), MPI_CHAR, target, tag+100, MPI_COMM_WORLD );
74 MPI_Recv(&recSize, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
75 std::string recData(recSize,'x');
76 MPI_Recv((void*)recData.data(), recSize, MPI_CHAR, source, tag+100, MPI_COMM_WORLD, &status);
77 return DeserializeToVectorOfString(recData); //not empty one for target proc
79 std::vector<std::string> res;
80 return res; //empty one for other proc
84 * strings NO need all same size!!!!
86 std::vector<std::string> MEDPARTITIONER::AllgathervVectorOfString(const std::vector<std::string>& vec)
88 if (MyGlobals::_World_Size==1) //nothing to do
91 int world_size=MyGlobals::_World_Size;
92 std::string str=SerializeFromVectorOfString(vec);
94 std::vector<int> indexes(world_size);
95 int size=(int)str.length();
96 MPI_Allgather(&size, 1, MPI_INT,
97 &indexes[0], 1, MPI_INT, MPI_COMM_WORLD);
99 //calcul of displacement
100 std::vector< int > disp(1,0);
101 for (int i=0; i<world_size; i++) disp.push_back( disp.back() + indexes[i] );
103 std::string recData(disp.back(),'x');
104 MPI_Allgatherv((void*)str.data(), (int)str.length(), MPI_CHAR,
105 (void*)recData.data(), &indexes[0], &disp[0], MPI_CHAR,
108 //really extraordinary verbose for debug
109 std::vector<std::string> deserial=DeserializeToVectorOfString(recData);
110 if (MyGlobals::_Verbose>1000)
112 std::cout << "proc "<<MyGlobals::_Rank<<" : receive '" << recData << "'" << std::endl;
113 std::cout << "deserialize is : a vector of size " << deserial.size() << std::endl;
114 std::cout << ReprVectorOfString(deserial) << std::endl;
120 Sends content of \a vec to processor \a target. To be used with \a RecvDoubleVec method.
121 \param vec vector to be sent
122 \param target processor id of the target
124 void MEDPARTITIONER::SendDoubleVec(const std::vector<double>& vec, const int target)
127 int size=(int)vec.size();
128 if (MyGlobals::_Verbose>1000)
129 std::cout << "proc " << MyGlobals::_Rank << " : --> SendDoubleVec " << size << std::endl;
131 MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD);
132 MPI_Send(const_cast<double*>(&vec[0]), size, MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD);
136 /*! Receives messages from proc \a source to fill vector<int> vec.
137 To be used with \a SendDoubleVec method.
139 \param vec vector that is filled
140 \param source processor id of the incoming messages
142 std::vector<double>* MEDPARTITIONER::RecvDoubleVec(const int source)
148 MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
149 if (MyGlobals::_Verbose>1000)
150 std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl;
151 std::vector<double>* vec=new std::vector<double>;
153 MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
158 void MEDPARTITIONER::RecvDoubleVec(std::vector<double>& vec, const int source)
164 MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
165 if (MyGlobals::_Verbose>1000)
166 std::cout<< "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl;;
168 MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
172 Sends content of \a vec to processor \a target. To be used with \a RecvIntVec method.
173 \param vec vector to be sent
174 \param target processor id of the target
176 void MEDPARTITIONER::SendIntVec(const std::vector<mcIdType>& vec, const int target)
179 int size=(int)vec.size();
180 if (MyGlobals::_Verbose>1000)
181 std::cout << "proc " << MyGlobals::_Rank << " : --> SendIntVec " << size << std::endl;
183 MPI_Send(&size, 1, MPI_ID_TYPE, target, tag, MPI_COMM_WORLD);
184 MPI_Send(const_cast<mcIdType*>(&vec[0]), size,MPI_ID_TYPE, target, tag+100, MPI_COMM_WORLD);
188 /*! Receives messages from proc \a source to fill vector<int> vec.
189 To be used with \a SendIntVec method.
190 \param vec vector that is filled
191 \param source processor id of the incoming messages
193 std::vector<int> *MEDPARTITIONER::RecvIntVec(const int source)
199 MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
200 if (MyGlobals::_Verbose>1000)
201 std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl;
202 std::vector<int> *vec=new std::vector<int>;
204 MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD, &status);
209 void MEDPARTITIONER::RecvIntVec(std::vector<mcIdType>& vec, const int source)
215 MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
216 if (MyGlobals::_Verbose>1000)
217 std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl;
219 MPI_Recv(&vec[0], size, MPI_ID_TYPE, source, tag+100, MPI_COMM_WORLD,&status);
224 Sends content of \a dataArrayInt to processor \a target.
225 To be used with \a RecvDataArrayInt method.
226 \param da dataArray to be sent
227 \param target processor id of the target
229 void MEDPARTITIONER::SendDataArrayInt(const MEDCoupling::DataArrayInt *da, const int target)
232 throw INTERP_KERNEL::Exception("Problem send DataArrayInt* NULL");
235 size[0]=(int)da->getNbOfElems();
236 size[1]=(int)da->getNumberOfTuples();
237 size[2]=(int)da->getNumberOfComponents();
238 if (MyGlobals::_Verbose>1000)
239 std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayInt " << size[0] << std::endl;
241 MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD);
242 const int *p=da->getConstPointer();
243 MPI_Send(const_cast<int*>(&p[0]), size[0] ,MPI_INT, target, tag+100, MPI_COMM_WORLD);
247 /*! Receives messages from proc \a source to fill dataArrayInt da.
248 To be used with \a SendIntVec method.
249 \param da dataArrayInt that is filled
250 \param source processor id of the incoming messages
252 MEDCoupling::DataArrayInt *MEDPARTITIONER::RecvDataArrayInt(const int source)
258 MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
259 if (MyGlobals::_Verbose>1000)
260 std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayInt " << size[0] << std::endl;
261 if (size[0]!=(size[1]*size[2]))
262 throw INTERP_KERNEL::Exception("Problem in RecvDataArrayInt incoherent sizes");
263 MEDCoupling::DataArrayInt* da=MEDCoupling::DataArrayInt::New();
264 da->alloc(size[1],size[2]);
265 int *p=da->getPointer();
266 MPI_Recv(const_cast<int*>(&p[0]), size[0], MPI_INT, source, tag+100, MPI_COMM_WORLD, &status);
272 Sends content of \a dataArrayInt to processor \a target.
273 To be used with \a RecvDataArrayDouble method.
274 \param da dataArray to be sent
275 \param target processor id of the target
277 void MEDPARTITIONER::SendDataArrayDouble(const MEDCoupling::DataArrayDouble *da, const int target)
280 throw INTERP_KERNEL::Exception("Problem send DataArrayDouble* NULL");
283 size[0]=(int)da->getNbOfElems();
284 size[1]=(int)da->getNumberOfTuples();
285 size[2]=(int)da->getNumberOfComponents();
286 if (MyGlobals::_Verbose>1000)
287 std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayDouble " << size[0] << std::endl;
289 MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD);
290 const double *p=da->getConstPointer();
291 MPI_Send(const_cast<double*>(&p[0]), size[0] ,MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD);
295 /*! Receives messages from proc \a source to fill dataArrayDouble da.
296 To be used with \a SendDoubleVec method.
297 \param da dataArrayDouble that is filled
298 \param source processor id of the incoming messages
300 MEDCoupling::DataArrayDouble* MEDPARTITIONER::RecvDataArrayDouble(const int source)
306 MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
307 if (MyGlobals::_Verbose>1000)
308 std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayDouble " << size[0] << std::endl;
309 if (size[0]!=(size[1]*size[2]))
310 throw INTERP_KERNEL::Exception("Problem in RecvDataArrayDouble incoherent sizes");
311 MEDCoupling::DataArrayDouble* da=MEDCoupling::DataArrayDouble::New();
312 da->alloc(size[1],size[2]);
313 double *p=da->getPointer();
314 MPI_Recv(const_cast<double*>(&p[0]), size[0], MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
319 void MEDPARTITIONER::TestVectorOfStringMpi()
321 int rank=MyGlobals::_Rank;
322 int world_size=MyGlobals::_World_Size;
323 std::vector<std::string> myVector;
324 std::ostringstream oss;
325 oss << "hello from " << std::setw(5) << rank << " " << std::string(rank+1,'n') <<
326 " next is an empty one";
327 myVector.push_back(oss.str());
328 myVector.push_back("");
329 myVector.push_back("next is an singleton");
330 myVector.push_back("1");
334 std::string s0=SerializeFromVectorOfString(myVector);
335 std::vector<std::string> res=DeserializeToVectorOfString(s0);
336 if (res.size()!=myVector.size())
337 throw INTERP_KERNEL::Exception("Problem in (de)serialise VectorOfString incoherent sizes");
338 for (std::size_t i=0; i<myVector.size(); i++)
339 if (res[i]!=myVector[i])
340 throw INTERP_KERNEL::Exception("Problem in (de)serialise VectorOfString incoherent elements");
343 for (int i=0; i<world_size; i++)
345 for (int j=0; j<world_size; j++)
347 std::vector<std::string> res=SendAndReceiveVectorOfString(myVector, i, j);
348 if ((rank==j) && MyGlobals::_Verbose>20)
349 std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl;
352 if (res.size()!=myVector.size())
353 throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString incoherent sizes");
354 for (std::size_t ii=1; ii<myVector.size(); ii++) //first is different
355 if (res[i]!=myVector[ii])
356 throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString incoherent elements");
361 throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString size have to be 0");
365 std::vector<std::string> res=AllgathervVectorOfString(myVector);
367 res=AllgathervVectorOfString(myVector);
368 res=AllgathervVectorOfString(myVector);
369 if (rank==0 && MyGlobals::_Verbose>20)
370 std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl;
371 if (res.size()!=myVector.size()*world_size)
372 throw INTERP_KERNEL::Exception("Problem in AllgathervVectorOfString incoherent sizes");
374 for (int j=0; j<world_size; j++)
376 for (int i=0; i<(int)myVector.size(); i++)
380 continue; //first is different
381 if (res[jj]!=myVector[i])
382 throw INTERP_KERNEL::Exception("Problem in AllgathervVectorOfString incoherent elements");
385 if (MyGlobals::_Verbose)
386 std::cout << "proc " << rank << " : OK TestVectorOfStringMpi END" << std::endl;
389 void MEDPARTITIONER::TestMapOfStringIntMpi()
391 int rank=MyGlobals::_Rank;
392 std::map<std::string,mcIdType> myMap;
394 myMap["two"]=22; //a bug
396 myMap["two"]=2; //last speaking override
400 std::vector<std::string> v2=VectorizeFromMapOfStringInt(myMap);
401 std::map<std::string,mcIdType> m3=DevectorizeToMapOfStringInt(v2);
402 if (ReprMapOfStringInt(m3)!=ReprMapOfStringInt(myMap))
403 throw INTERP_KERNEL::Exception("Problem in (de)vectorize MapOfStringInt");
406 std::vector<std::string> v2=AllgathervVectorOfString(VectorizeFromMapOfStringInt(myMap));
407 if (rank==0 && MyGlobals::_Verbose>20)
409 std::cout << "v2 is : a vector of size " << v2.size() << std::endl;
410 std::cout << ReprVectorOfString(v2) << std::endl;
411 std::map<std::string,mcIdType> m2=DevectorizeToMapOfStringInt(v2);
412 std::cout << "m2 is : a map of size " << m2.size() << std::endl;
413 std::cout << ReprMapOfStringInt(m2) << std::endl;
415 if (MyGlobals::_Verbose)
416 std::cout << "proc " << rank << " : OK TestMapOfStringIntMpi END" << std::endl;
419 void MEDPARTITIONER::TestMapOfStringVectorOfStringMpi()
421 int rank=MyGlobals::_Rank;
422 std::vector<std::string> myVector;
423 std::ostringstream oss;
424 oss << "hello from " << std::setw(5) << MyGlobals::_Rank << " " << std::string(rank+1,'n') << " next is an empty one";
425 myVector.push_back(oss.str());
426 myVector.push_back("");
427 myVector.push_back("next is an singleton");
428 myVector.push_back("1");
432 std::map< std::string,std::vector<std::string> > m2;
433 m2["first key"]=myVector;
434 m2["second key"]=myVector;
435 std::vector<std::string> v2=VectorizeFromMapOfStringVectorOfString(m2);
436 std::map< std::string,std::vector<std::string> > m3=DevectorizeToMapOfStringVectorOfString(v2);
437 if (rank==0 && MyGlobals::_Verbose>20)
438 std::cout << "m2 is : a MapOfStringVectorOfString of size " << m2.size() << std::endl;
439 std::cout << ReprMapOfStringVectorOfString(m2) << std::endl;
440 std::cout << "v2 is : a vector of size " << v2.size() << std::endl;
441 std::cout << ReprVectorOfString(v2) << std::endl;
442 std::cout << "m3 is : a map of size "<<m3.size() << std::endl;
443 std::cout << ReprMapOfStringVectorOfString(m3) << std::endl;
444 if (ReprMapOfStringVectorOfString(m3)!=ReprMapOfStringVectorOfString(m2))
445 throw INTERP_KERNEL::Exception("Problem in (de)vectorize MapOfStringVectorOfString");
448 std::map< std::string,std::vector<std::string> > m4;
449 m4["1rst key"]=myVector;
450 m4["2snd key"]=myVector;
451 std::vector<std::string> v4=AllgathervVectorOfString(VectorizeFromMapOfStringVectorOfString(m4));
452 if (rank==0 && MyGlobals::_Verbose>20)
454 std::map< std::string,std::vector<std::string> > m5=DevectorizeToMapOfStringVectorOfString(v4);
455 std::map< std::string,std::vector<std::string> > m6=DeleteDuplicatesInMapOfStringVectorOfString(m5);
456 std::cout<< "m5 is : a map of size "<<m5.size() << std::endl;
457 std::cout<< ReprMapOfStringVectorOfString(m5) << std::endl;
458 std::cout<< "m6 is : a map from m5 with deleteDuplicates of size " << m6.size() << std::endl;
459 std::cout<< ReprMapOfStringVectorOfString(m6) << std::endl;
461 if (MyGlobals::_Verbose)
462 std::cout<<"proc " << rank << " : OK TestMapOfStringVectorOfStringMpi END" << std::endl;
465 void MEDPARTITIONER::TestDataArrayMpi()
467 int rank=MyGlobals::_Rank;
470 MEDCoupling::DataArrayInt* send=MEDCoupling::DataArrayInt::New();
471 MEDCoupling::DataArrayInt* recv=0;
473 int numberOfComponents=3;
474 send->alloc(nbOfTuples,numberOfComponents);
475 std::vector<int> vals;
476 for (int j=0; j<nbOfTuples; j++)
477 for (int i=0; i<numberOfComponents; i++) vals.push_back((j+1)*10+i+1);
478 std::copy(vals.begin(),vals.end(),send->getPointer());
480 SendDataArrayInt(send, 1);
482 recv=RecvDataArrayInt(0);
483 if (rank==1 && MyGlobals::_Verbose>20)
485 std::cout << send->repr() << std::endl;
486 std::cout << recv->repr() << std::endl;
490 if (send->repr()!=recv->repr())
491 throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayInt");
499 MEDCoupling::DataArrayDouble* send=MEDCoupling::DataArrayDouble::New();
500 MEDCoupling::DataArrayDouble* recv=0;
502 int numberOfComponents=3;
503 send->alloc(nbOfTuples,numberOfComponents);
504 std::vector<double> vals;
505 for (int j=0; j<nbOfTuples; j++)
506 for (int i=0; i<numberOfComponents; i++) vals.push_back(double(j+1)+double(i+1)/10);
507 std::copy(vals.begin(),vals.end(),send->getPointer());
508 if (rank==0) SendDataArrayDouble(send, 1);
509 if (rank==1) recv=RecvDataArrayDouble(0);
510 if (rank==1 && MyGlobals::_Verbose>20)
512 std::cout << send->repr() << std::endl;
513 std::cout << recv->repr() << std::endl;
517 if (send->repr()!=recv->repr())
518 throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayDouble");
521 if (rank==1) recv->decrRef();
524 if (MyGlobals::_Verbose)
525 std::cout << "proc " << rank << " : OK TestDataArrayMpi END" << std::endl;
528 void MEDPARTITIONER::TestPersistantMpi0To1(int taille, int nb)
530 double temps_debut=MPI_Wtime();
531 int rank=MyGlobals::_Rank;
532 std::vector<int> x, y;
534 MPI_Request requete0, requete1;
541 MPI_Ssend_init(&x[0], taille, MPI_INT, 1, tag, MPI_COMM_WORLD , &requete0);
542 for(int k=0; k<nb; k++)
544 for (int i=0; i<taille; ++i) x[i]=k;
545 //Envoi d’un gros message --> cela peut prendre du temps
546 MPI_Start(&requete0);
547 //Traitement sequentiel independant de "x"
549 MPI_Wait(&requete0, &statut);
550 //Traitement sequentiel impliquant une modification de "x" en memoire
553 MPI_Request_free(&requete0);
558 MPI_Recv_init(&y[0], taille, MPI_INT, 0, tag, MPI_COMM_WORLD , &requete1);
559 for(int k=0; k<nb; k++)
561 //Pre-traitement sequentiel
563 for (int i=0; i<taille; ++i) y[i]=(-1);
564 //Reception du gros message --> cela peut prendre du temps
565 MPI_Start(&requete1);
566 //Traitement sequentiel independant de "y"
568 MPI_Wait(&requete1, &statut);
569 //Traitement sequentiel dependant de "y"
572 for (int i=0; i<taille; ++i)
577 if (MyGlobals::_Verbose>9)
582 std::cout << res << k << " ";
588 if (MyGlobals::_Verbose>1)
589 std::cout << "result " << res << " time(sec) " << MPI_Wtime()-temps_debut << std::endl;
590 MPI_Request_free(&requete1);
592 //end_time=(MPI_WTIME()-start_time);
595 void MEDPARTITIONER::TestPersistantMpiRing(int taille, int nb)
597 double temps_debut=MPI_Wtime();
598 int befo, next, rank, wsize, tagbefo, tagnext;
599 rank=MyGlobals::_Rank;
600 wsize=MyGlobals::_World_Size;
601 befo=rank-1; if (befo<0) befo=wsize-1;
602 next=rank+1; if (next>=wsize) next=0;
603 std::vector<int> x, y;
606 MPI_Request requete0, requete1;
607 MPI_Status statut1, statut2;
610 //cout<<"ini|"<<rank<<'|'<<befo<<'|'<<next<<' ';
614 MPI_Ssend_init(&x[0], taille, MPI_INT, next, tagnext, MPI_COMM_WORLD , &requete0);
615 MPI_Recv_init(&y[0], taille, MPI_INT, befo, tagbefo, MPI_COMM_WORLD , &requete1);
616 for(int k=0; k<nb; k++)
618 for (int i=0; i<taille; ++i) x[i]=k+rank;
619 //Envoi d’un gros message --> cela peut prendre du temps
620 MPI_Start(&requete0);
621 //Reception du gros message --> cela peut prendre du temps
622 for (int i=0; i<taille; ++i) y[i]=(-1);
623 MPI_Start(&requete1);
624 //Traitement sequentiel independant de "x"
626 //Traitement sequentiel independant de "y"
628 MPI_Wait(&requete1, &statut1);
629 //Traitement sequentiel dependant de "y"
632 for (int i=0; i<taille; ++i)
637 if (MyGlobals::_Verbose>9)
639 res="0K"+IntToStr(rank);
641 res="KO"+IntToStr(rank);
642 std::cout << res << k << " ";
644 MPI_Wait(&requete0, &statut2);
645 //Traitement sequentiel impliquant une modification de "x" en memoire
648 res="0K"; if (ok!=nb) res="MAUVAIS";
649 temps_debut=MPI_Wtime()-temps_debut;
650 MPI_Request_free(&requete1);
651 MPI_Request_free(&requete0);
653 //end_time=(MPI_WTIME()-start_time);
654 if (MyGlobals::_Verbose>1)
655 std::cout << "result on proc " << rank << " " << res << " time(sec) " << temps_debut << std::endl;
658 void MEDPARTITIONER::TestPersistantMpiRingOnCommSplit(int size, int nb)
660 double temps_debut=MPI_Wtime();
661 int rank=MyGlobals::_Rank;
667 //MPI_Comm_dup (MPI_COMM_WORLD, &newcomm) ;
668 MPI_Comm_split(MPI_COMM_WORLD, color, rank, &newcomm);
670 int befo, next, wsize, tagbefo, tagnext;
672 if (wsize>MyGlobals::_World_Size)
673 wsize=MyGlobals::_World_Size;
680 std::vector<int> x, y;
683 MPI_Request requete0, requete1;
684 MPI_Status statut1, statut2;
692 MPI_Ssend_init(&x[0], size, MPI_INT, next, tagnext, newcomm , &requete0);
693 MPI_Recv_init(&y[0], size, MPI_INT, befo, tagbefo, newcomm , &requete1);
694 for(int k=0; k<nb; k++)
696 for (int i=0; i<size; ++i)
698 //Send of big message --> time consuming
699 MPI_Start(&requete0);
700 //Reception of big message --> time consuming
701 for (int i=0; i<size; ++i)
703 MPI_Start(&requete1);
704 //Traitement sequentiel independant de "x"
706 //Traitement sequentiel independant de "y"
708 //cout<<"dsr|"<<rank<<' ';
709 MPI_Wait(&requete1, &statut1);
710 //Traitement sequentiel dependant de "y"
713 for (int i=0; i<size; ++i)
718 if (MyGlobals::_Verbose>9)
720 res="0K"+IntToStr(rank);
722 res="KO"+IntToStr(rank);
723 std::cout << res << k << " ";
725 MPI_Wait(&requete0, &statut2);
726 //Traitement sequentiel impliquant une modification de "x" en memoire
732 temps_debut=MPI_Wtime()-temps_debut;
733 MPI_Request_free(&requete1);
734 MPI_Request_free(&requete0);
736 //MPI_Barrier(MPI_COMM_WORLD);
738 MPI_Comm_free(&newcomm);
739 if (MyGlobals::_Verbose>1)
740 std::cout << "resultat proc " << rank <<" " << res << " time(sec) " << temps_debut << std::endl;