1 // Copyright (C) 2007-2015 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "MEDPARTITIONER_Utils.hxx"
22 #include "MEDLoader.hxx"
23 #include "MEDLoaderBase.hxx"
24 #include "MEDFileUtilities.hxx"
25 #include "CellModel.hxx"
26 #include "MEDCouplingUMesh.hxx"
27 #include "MEDCouplingFieldDouble.hxx"
28 #include "InterpKernelException.hxx"
29 #include "MEDCouplingAutoRefCountObjectPtr.hxx"
30 #include "InterpKernelAutoPtr.hxx"
42 using namespace MEDPARTITIONER;
45 * not optimized but suffisant
46 * return empty vector if i am not target
48 std::vector<std::string> MEDPARTITIONER::SendAndReceiveVectorOfString(const std::vector<std::string>& vec, const int source, const int target)
50 int rank=MyGlobals::_Rank;
56 std::string str=SerializeFromVectorOfString(vec);
57 int size=str.length();
58 MPI_Send( &size, 1, MPI_INT, target, tag, MPI_COMM_WORLD );
59 MPI_Send( (void*)str.data(), str.length(), MPI_CHAR, target, tag+100, MPI_COMM_WORLD );
65 MPI_Recv(&recSize, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
66 std::string recData(recSize,'x');
67 MPI_Recv((void*)recData.data(), recSize, MPI_CHAR, source, tag+100, MPI_COMM_WORLD, &status);
68 return DeserializeToVectorOfString(recData); //not empty one for target proc
70 std::vector<std::string> res;
71 return res; //empty one for other proc
75 * strings NO need all same size!!!!
77 std::vector<std::string> MEDPARTITIONER::AllgathervVectorOfString(const std::vector<std::string>& vec)
79 if (MyGlobals::_World_Size==1) //nothing to do
82 int world_size=MyGlobals::_World_Size;
83 std::string str=SerializeFromVectorOfString(vec);
85 std::vector<int> indexes(world_size);
86 int size=str.length();
87 MPI_Allgather(&size, 1, MPI_INT,
88 &indexes[0], 1, MPI_INT, MPI_COMM_WORLD);
90 //calcul of displacement
91 std::vector< int > disp(1,0);
92 for (int i=0; i<world_size; i++) disp.push_back( disp.back() + indexes[i] );
94 std::string recData(disp.back(),'x');
95 MPI_Allgatherv((void*)str.data(), str.length(), MPI_CHAR,
96 (void*)recData.data(), &indexes[0], &disp[0], MPI_CHAR,
99 //really extraordinary verbose for debug
100 std::vector<std::string> deserial=DeserializeToVectorOfString(recData);
101 if (MyGlobals::_Verbose>1000)
103 std::cout << "proc "<<MyGlobals::_Rank<<" : receive '" << recData << "'" << std::endl;
104 std::cout << "deserialize is : a vector of size " << deserial.size() << std::endl;
105 std::cout << ReprVectorOfString(deserial) << std::endl;
111 Sends content of \a vec to processor \a target. To be used with \a RecvDoubleVec method.
112 \param vec vector to be sent
113 \param target processor id of the target
115 void MEDPARTITIONER::SendDoubleVec(const std::vector<double>& vec, const int target)
119 if (MyGlobals::_Verbose>1000)
120 std::cout << "proc " << MyGlobals::_Rank << " : --> SendDoubleVec " << size << std::endl;
122 MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD);
123 MPI_Send(const_cast<double*>(&vec[0]), size, MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD);
127 /*! Receives messages from proc \a source to fill vector<int> vec.
128 To be used with \a SendDoubleVec method.
130 \param vec vector that is filled
131 \param source processor id of the incoming messages
133 std::vector<double>* MEDPARTITIONER::RecvDoubleVec(const int source)
139 MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
140 if (MyGlobals::_Verbose>1000)
141 std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl;
142 std::vector<double>* vec=new std::vector<double>;
144 MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
149 void MEDPARTITIONER::RecvDoubleVec(std::vector<double>& vec, const int source)
155 MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
156 if (MyGlobals::_Verbose>1000)
157 std::cout<< "proc " << MyGlobals::_Rank << " : <-- RecvDoubleVec " << size << std::endl;;
159 MPI_Recv(&vec[0], size, MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
163 Sends content of \a vec to processor \a target. To be used with \a RecvIntVec method.
164 \param vec vector to be sent
165 \param target processor id of the target
167 void MEDPARTITIONER::SendIntVec(const std::vector<int>& vec, const int target)
171 if (MyGlobals::_Verbose>1000)
172 std::cout << "proc " << MyGlobals::_Rank << " : --> SendIntVec " << size << std::endl;
174 MPI_Send(&size, 1, MPI_INT, target, tag, MPI_COMM_WORLD);
175 MPI_Send(const_cast<int*>(&vec[0]), size,MPI_INT, target, tag+100, MPI_COMM_WORLD);
179 /*! Receives messages from proc \a source to fill vector<int> vec.
180 To be used with \a SendIntVec method.
181 \param vec vector that is filled
182 \param source processor id of the incoming messages
184 std::vector<int> *MEDPARTITIONER::RecvIntVec(const int source)
190 MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
191 if (MyGlobals::_Verbose>1000)
192 std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl;
193 std::vector<int> *vec=new std::vector<int>;
195 MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD, &status);
200 void MEDPARTITIONER::RecvIntVec(std::vector<int>& vec, const int source)
206 MPI_Recv(&size, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
207 if (MyGlobals::_Verbose>1000)
208 std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvIntVec " << size << std::endl;
210 MPI_Recv(&vec[0], size, MPI_INT, source, tag+100, MPI_COMM_WORLD,&status);
215 Sends content of \a dataArrayInt to processor \a target.
216 To be used with \a RecvDataArrayInt method.
217 \param da dataArray to be sent
218 \param target processor id of the target
220 void MEDPARTITIONER::SendDataArrayInt(const MEDCoupling::DataArrayInt *da, const int target)
223 throw INTERP_KERNEL::Exception("Problem send DataArrayInt* NULL");
226 size[0]=da->getNbOfElems();
227 size[1]=da->getNumberOfTuples();
228 size[2]=da->getNumberOfComponents();
229 if (MyGlobals::_Verbose>1000)
230 std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayInt " << size[0] << std::endl;
232 MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD);
233 const int *p=da->getConstPointer();
234 MPI_Send(const_cast<int*>(&p[0]), size[0] ,MPI_INT, target, tag+100, MPI_COMM_WORLD);
238 /*! Receives messages from proc \a source to fill dataArrayInt da.
239 To be used with \a SendIntVec method.
240 \param da dataArrayInt that is filled
241 \param source processor id of the incoming messages
243 MEDCoupling::DataArrayInt *MEDPARTITIONER::RecvDataArrayInt(const int source)
249 MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
250 if (MyGlobals::_Verbose>1000)
251 std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayInt " << size[0] << std::endl;
252 if (size[0]!=(size[1]*size[2]))
253 throw INTERP_KERNEL::Exception("Problem in RecvDataArrayInt incoherent sizes");
254 MEDCoupling::DataArrayInt* da=MEDCoupling::DataArrayInt::New();
255 da->alloc(size[1],size[2]);
256 int *p=da->getPointer();
257 MPI_Recv(const_cast<int*>(&p[0]), size[0], MPI_INT, source, tag+100, MPI_COMM_WORLD, &status);
263 Sends content of \a dataArrayInt to processor \a target.
264 To be used with \a RecvDataArrayDouble method.
265 \param da dataArray to be sent
266 \param target processor id of the target
268 void MEDPARTITIONER::SendDataArrayDouble(const MEDCoupling::DataArrayDouble *da, const int target)
271 throw INTERP_KERNEL::Exception("Problem send DataArrayDouble* NULL");
274 size[0]=da->getNbOfElems();
275 size[1]=da->getNumberOfTuples();
276 size[2]=da->getNumberOfComponents();
277 if (MyGlobals::_Verbose>1000)
278 std::cout << "proc " << MyGlobals::_Rank << " : --> SendDataArrayDouble " << size[0] << std::endl;
280 MPI_Send(&size, 3, MPI_INT, target, tag, MPI_COMM_WORLD);
281 const double *p=da->getConstPointer();
282 MPI_Send(const_cast<double*>(&p[0]), size[0] ,MPI_DOUBLE, target, tag+100, MPI_COMM_WORLD);
286 /*! Receives messages from proc \a source to fill dataArrayDouble da.
287 To be used with \a SendDoubleVec method.
288 \param da dataArrayDouble that is filled
289 \param source processor id of the incoming messages
291 MEDCoupling::DataArrayDouble* MEDPARTITIONER::RecvDataArrayDouble(const int source)
297 MPI_Recv(size, 3, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
298 if (MyGlobals::_Verbose>1000)
299 std::cout << "proc " << MyGlobals::_Rank << " : <-- RecvDataArrayDouble " << size[0] << std::endl;
300 if (size[0]!=(size[1]*size[2]))
301 throw INTERP_KERNEL::Exception("Problem in RecvDataArrayDouble incoherent sizes");
302 MEDCoupling::DataArrayDouble* da=MEDCoupling::DataArrayDouble::New();
303 da->alloc(size[1],size[2]);
304 double *p=da->getPointer();
305 MPI_Recv(const_cast<double*>(&p[0]), size[0], MPI_DOUBLE, source, tag+100, MPI_COMM_WORLD, &status);
310 void MEDPARTITIONER::TestVectorOfStringMpi()
312 int rank=MyGlobals::_Rank;
313 int world_size=MyGlobals::_World_Size;
314 std::vector<std::string> myVector;
315 std::ostringstream oss;
316 oss << "hello from " << std::setw(5) << rank << " " << std::string(rank+1,'n') <<
317 " next is an empty one";
318 myVector.push_back(oss.str());
319 myVector.push_back("");
320 myVector.push_back("next is an singleton");
321 myVector.push_back("1");
325 std::string s0=SerializeFromVectorOfString(myVector);
326 std::vector<std::string> res=DeserializeToVectorOfString(s0);
327 if (res.size()!=myVector.size())
328 throw INTERP_KERNEL::Exception("Problem in (de)serialise VectorOfString incoherent sizes");
329 for (std::size_t i=0; i<myVector.size(); i++)
330 if (res[i]!=myVector[i])
331 throw INTERP_KERNEL::Exception("Problem in (de)serialise VectorOfString incoherent elements");
334 for (int i=0; i<world_size; i++)
336 for (int j=0; j<world_size; j++)
338 std::vector<std::string> res=SendAndReceiveVectorOfString(myVector, i, j);
339 if ((rank==j) && MyGlobals::_Verbose>20)
340 std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl;
343 if (res.size()!=myVector.size())
344 throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString incoherent sizes");
345 for (std::size_t ii=1; ii<myVector.size(); ii++) //first is different
346 if (res[i]!=myVector[ii])
347 throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString incoherent elements");
352 throw INTERP_KERNEL::Exception("Problem in SendAndReceiveVectorOfString size have to be 0");
356 std::vector<std::string> res=AllgathervVectorOfString(myVector);
358 res=AllgathervVectorOfString(myVector);
359 res=AllgathervVectorOfString(myVector);
360 if (rank==0 && MyGlobals::_Verbose>20)
361 std::cout << "proc " << rank << " : receive \n" << ReprVectorOfString(res) << std::endl;
362 if (res.size()!=myVector.size()*world_size)
363 throw INTERP_KERNEL::Exception("Problem in AllgathervVectorOfString incoherent sizes");
365 for (int j=0; j<world_size; j++)
367 for (int i=0; i<(int)myVector.size(); i++)
371 continue; //first is different
372 if (res[jj]!=myVector[i])
373 throw INTERP_KERNEL::Exception("Problem in AllgathervVectorOfString incoherent elements");
376 if (MyGlobals::_Verbose)
377 std::cout << "proc " << rank << " : OK TestVectorOfStringMpi END" << std::endl;
380 void MEDPARTITIONER::TestMapOfStringIntMpi()
382 int rank=MyGlobals::_Rank;
383 std::map<std::string,int> myMap;
385 myMap["two"]=22; //a bug
387 myMap["two"]=2; //last speaking override
391 std::vector<std::string> v2=VectorizeFromMapOfStringInt(myMap);
392 std::map<std::string,int> m3=DevectorizeToMapOfStringInt(v2);
393 if (ReprMapOfStringInt(m3)!=ReprMapOfStringInt(myMap))
394 throw INTERP_KERNEL::Exception("Problem in (de)vectorize MapOfStringInt");
397 std::vector<std::string> v2=AllgathervVectorOfString(VectorizeFromMapOfStringInt(myMap));
398 if (rank==0 && MyGlobals::_Verbose>20)
400 std::cout << "v2 is : a vector of size " << v2.size() << std::endl;
401 std::cout << ReprVectorOfString(v2) << std::endl;
402 std::map<std::string,int> m2=DevectorizeToMapOfStringInt(v2);
403 std::cout << "m2 is : a map of size " << m2.size() << std::endl;
404 std::cout << ReprMapOfStringInt(m2) << std::endl;
406 if (MyGlobals::_Verbose)
407 std::cout << "proc " << rank << " : OK TestMapOfStringIntMpi END" << std::endl;
410 void MEDPARTITIONER::TestMapOfStringVectorOfStringMpi()
412 int rank=MyGlobals::_Rank;
413 std::vector<std::string> myVector;
414 std::ostringstream oss;
415 oss << "hello from " << std::setw(5) << MyGlobals::_Rank << " " << std::string(rank+1,'n') << " next is an empty one";
416 myVector.push_back(oss.str());
417 myVector.push_back("");
418 myVector.push_back("next is an singleton");
419 myVector.push_back("1");
423 std::map< std::string,std::vector<std::string> > m2;
424 m2["first key"]=myVector;
425 m2["second key"]=myVector;
426 std::vector<std::string> v2=VectorizeFromMapOfStringVectorOfString(m2);
427 std::map< std::string,std::vector<std::string> > m3=DevectorizeToMapOfStringVectorOfString(v2);
428 if (rank==0 && MyGlobals::_Verbose>20)
429 std::cout << "m2 is : a MapOfStringVectorOfString of size " << m2.size() << std::endl;
430 std::cout << ReprMapOfStringVectorOfString(m2) << std::endl;
431 std::cout << "v2 is : a vector of size " << v2.size() << std::endl;
432 std::cout << ReprVectorOfString(v2) << std::endl;
433 std::cout << "m3 is : a map of size "<<m3.size() << std::endl;
434 std::cout << ReprMapOfStringVectorOfString(m3) << std::endl;
435 if (ReprMapOfStringVectorOfString(m3)!=ReprMapOfStringVectorOfString(m2))
436 throw INTERP_KERNEL::Exception("Problem in (de)vectorize MapOfStringVectorOfString");
439 std::map< std::string,std::vector<std::string> > m4;
440 m4["1rst key"]=myVector;
441 m4["2snd key"]=myVector;
442 std::vector<std::string> v4=AllgathervVectorOfString(VectorizeFromMapOfStringVectorOfString(m4));
443 if (rank==0 && MyGlobals::_Verbose>20)
445 std::map< std::string,std::vector<std::string> > m5=DevectorizeToMapOfStringVectorOfString(v4);
446 std::map< std::string,std::vector<std::string> > m6=DeleteDuplicatesInMapOfStringVectorOfString(m5);
447 std::cout<< "m5 is : a map of size "<<m5.size() << std::endl;
448 std::cout<< ReprMapOfStringVectorOfString(m5) << std::endl;
449 std::cout<< "m6 is : a map from m5 with deleteDuplicates of size " << m6.size() << std::endl;
450 std::cout<< ReprMapOfStringVectorOfString(m6) << std::endl;
452 if (MyGlobals::_Verbose)
453 std::cout<<"proc " << rank << " : OK TestMapOfStringVectorOfStringMpi END" << std::endl;
456 void MEDPARTITIONER::TestDataArrayMpi()
458 int rank=MyGlobals::_Rank;
461 MEDCoupling::DataArrayInt* send=MEDCoupling::DataArrayInt::New();
462 MEDCoupling::DataArrayInt* recv=0;
464 int numberOfComponents=3;
465 send->alloc(nbOfTuples,numberOfComponents);
466 std::vector<int> vals;
467 for (int j=0; j<nbOfTuples; j++)
468 for (int i=0; i<numberOfComponents; i++) vals.push_back((j+1)*10+i+1);
469 std::copy(vals.begin(),vals.end(),send->getPointer());
471 SendDataArrayInt(send, 1);
473 recv=RecvDataArrayInt(0);
474 if (rank==1 && MyGlobals::_Verbose>20)
476 std::cout << send->repr() << std::endl;
477 std::cout << recv->repr() << std::endl;
481 if (send->repr()!=recv->repr())
482 throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayInt");
490 MEDCoupling::DataArrayDouble* send=MEDCoupling::DataArrayDouble::New();
491 MEDCoupling::DataArrayDouble* recv=0;
493 int numberOfComponents=3;
494 send->alloc(nbOfTuples,numberOfComponents);
495 std::vector<double> vals;
496 for (int j=0; j<nbOfTuples; j++)
497 for (int i=0; i<numberOfComponents; i++) vals.push_back(double(j+1)+double(i+1)/10);
498 std::copy(vals.begin(),vals.end(),send->getPointer());
499 if (rank==0) SendDataArrayDouble(send, 1);
500 if (rank==1) recv=RecvDataArrayDouble(0);
501 if (rank==1 && MyGlobals::_Verbose>20)
503 std::cout << send->repr() << std::endl;
504 std::cout << recv->repr() << std::endl;
508 if (send->repr()!=recv->repr())
509 throw INTERP_KERNEL::Exception("Problem in send&recv DataArrayDouble");
512 if (rank==1) recv->decrRef();
515 if (MyGlobals::_Verbose)
516 std::cout << "proc " << rank << " : OK TestDataArrayMpi END" << std::endl;
519 void MEDPARTITIONER::TestPersistantMpi0To1(int taille, int nb)
521 double temps_debut=MPI_Wtime();
522 int rank=MyGlobals::_Rank;
523 std::vector<int> x, y;
525 MPI_Request requete0, requete1;
532 MPI_Ssend_init(&x[0], taille, MPI_INT, 1, tag, MPI_COMM_WORLD , &requete0);
533 for(int k=0; k<nb; k++)
535 for (int i=0; i<taille; ++i) x[i]=k;
536 //Envoi d’un gros message --> cela peut prendre du temps
537 MPI_Start(&requete0);
538 //Traitement sequentiel independant de "x"
540 MPI_Wait(&requete0, &statut);
541 //Traitement sequentiel impliquant une modification de "x" en memoire
544 MPI_Request_free(&requete0);
549 MPI_Recv_init(&y[0], taille, MPI_INT, 0, tag, MPI_COMM_WORLD , &requete1);
550 for(int k=0; k<nb; k++)
552 //Pre-traitement sequentiel
554 for (int i=0; i<taille; ++i) y[i]=(-1);
555 //Reception du gros message --> cela peut prendre du temps
556 MPI_Start(&requete1);
557 //Traitement sequentiel independant de "y"
559 MPI_Wait(&requete1, &statut);
560 //Traitement sequentiel dependant de "y"
563 for (int i=0; i<taille; ++i)
568 if (MyGlobals::_Verbose>9)
573 std::cout << res << k << " ";
579 if (MyGlobals::_Verbose>1)
580 std::cout << "result " << res << " time(sec) " << MPI_Wtime()-temps_debut << std::endl;
581 MPI_Request_free(&requete1);
583 //end_time=(MPI_WTIME()-start_time);
586 void MEDPARTITIONER::TestPersistantMpiRing(int taille, int nb)
588 double temps_debut=MPI_Wtime();
589 int befo, next, rank, wsize, tagbefo, tagnext;
590 rank=MyGlobals::_Rank;
591 wsize=MyGlobals::_World_Size;
592 befo=rank-1; if (befo<0) befo=wsize-1;
593 next=rank+1; if (next>=wsize) next=0;
594 std::vector<int> x, y;
597 MPI_Request requete0, requete1;
598 MPI_Status statut1, statut2;
601 //cout<<"ini|"<<rank<<'|'<<befo<<'|'<<next<<' ';
605 MPI_Ssend_init(&x[0], taille, MPI_INT, next, tagnext, MPI_COMM_WORLD , &requete0);
606 MPI_Recv_init(&y[0], taille, MPI_INT, befo, tagbefo, MPI_COMM_WORLD , &requete1);
607 for(int k=0; k<nb; k++)
609 for (int i=0; i<taille; ++i) x[i]=k+rank;
610 //Envoi d’un gros message --> cela peut prendre du temps
611 MPI_Start(&requete0);
612 //Reception du gros message --> cela peut prendre du temps
613 for (int i=0; i<taille; ++i) y[i]=(-1);
614 MPI_Start(&requete1);
615 //Traitement sequentiel independant de "x"
617 //Traitement sequentiel independant de "y"
619 MPI_Wait(&requete1, &statut1);
620 //Traitement sequentiel dependant de "y"
623 for (int i=0; i<taille; ++i)
628 if (MyGlobals::_Verbose>9)
630 res="0K"+IntToStr(rank);
632 res="KO"+IntToStr(rank);
633 std::cout << res << k << " ";
635 MPI_Wait(&requete0, &statut2);
636 //Traitement sequentiel impliquant une modification de "x" en memoire
639 res="0K"; if (ok!=nb) res="MAUVAIS";
640 temps_debut=MPI_Wtime()-temps_debut;
641 MPI_Request_free(&requete1);
642 MPI_Request_free(&requete0);
644 //end_time=(MPI_WTIME()-start_time);
645 if (MyGlobals::_Verbose>1)
646 std::cout << "result on proc " << rank << " " << res << " time(sec) " << temps_debut << std::endl;
649 void MEDPARTITIONER::TestPersistantMpiRingOnCommSplit(int size, int nb)
651 double temps_debut=MPI_Wtime();
652 int rank=MyGlobals::_Rank;
658 //MPI_Comm_dup (MPI_COMM_WORLD, &newcomm) ;
659 MPI_Comm_split(MPI_COMM_WORLD, color, rank, &newcomm);
661 int befo, next, wsize, tagbefo, tagnext;
663 if (wsize>MyGlobals::_World_Size)
664 wsize=MyGlobals::_World_Size;
671 std::vector<int> x, y;
674 MPI_Request requete0, requete1;
675 MPI_Status statut1, statut2;
683 MPI_Ssend_init(&x[0], size, MPI_INT, next, tagnext, newcomm , &requete0);
684 MPI_Recv_init(&y[0], size, MPI_INT, befo, tagbefo, newcomm , &requete1);
685 for(int k=0; k<nb; k++)
687 for (int i=0; i<size; ++i)
689 //Send of big message --> time consuming
690 MPI_Start(&requete0);
691 //Reception of big message --> time consuming
692 for (int i=0; i<size; ++i)
694 MPI_Start(&requete1);
695 //Traitement sequentiel independant de "x"
697 //Traitement sequentiel independant de "y"
699 //cout<<"dsr|"<<rank<<' ';
700 MPI_Wait(&requete1, &statut1);
701 //Traitement sequentiel dependant de "y"
704 for (int i=0; i<size; ++i)
709 if (MyGlobals::_Verbose>9)
711 res="0K"+IntToStr(rank);
713 res="KO"+IntToStr(rank);
714 std::cout << res << k << " ";
716 MPI_Wait(&requete0, &statut2);
717 //Traitement sequentiel impliquant une modification de "x" en memoire
723 temps_debut=MPI_Wtime()-temps_debut;
724 MPI_Request_free(&requete1);
725 MPI_Request_free(&requete0);
727 //MPI_Barrier(MPI_COMM_WORLD);
729 MPI_Comm_free(&newcomm);
730 if (MyGlobals::_Verbose>1)
731 std::cout << "resultat proc " << rank <<" " << res << " time(sec) " << temps_debut << std::endl;