Salome HOME
Update copyrights 2014.
[modules/med.git] / src / ParaMEDMEM / ElementLocator.cxx
1 // Copyright (C) 2007-2014  CEA/DEN, EDF R&D
2 //
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
7 //
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 // Lesser General Public License for more details.
12 //
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
16 //
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
18 //
19
20 #include <mpi.h>
21 #include "CommInterface.hxx"
22 #include "ElementLocator.hxx"
23 #include "Topology.hxx"
24 #include "BlockTopology.hxx"
25 #include "ParaFIELD.hxx"
26 #include "ParaMESH.hxx"
27 #include "ProcessorGroup.hxx"
28 #include "MPIProcessorGroup.hxx"
29 #include "MEDCouplingFieldDouble.hxx"
30 #include "MEDCouplingAutoRefCountObjectPtr.hxx"
31 #include "DirectedBoundingBox.hxx"
32
33 #include <map>
34 #include <set>
35 #include <limits>
36
37 using namespace std;
38
39 //#define USE_DIRECTED_BB
40
41 namespace ParaMEDMEM 
42
43   ElementLocator::ElementLocator(const ParaFIELD& sourceField,
44                                  const ProcessorGroup& distant_group,
45                                  const ProcessorGroup& local_group)
46     : _local_para_field(sourceField),
47       _local_cell_mesh(sourceField.getSupport()->getCellMesh()),
48       _local_face_mesh(sourceField.getSupport()->getFaceMesh()),
49       _distant_group(distant_group),
50       _local_group(local_group)
51   { 
52     _union_group = _local_group.fuse(distant_group);
53     _computeBoundingBoxes();
54     _comm=getCommunicator();
55   }
56
57   ElementLocator::~ElementLocator()
58   {
59     delete _union_group;
60     delete [] _domain_bounding_boxes;
61   }
62
63   const MPI_Comm *ElementLocator::getCommunicator() const
64   {
65     MPIProcessorGroup* group=static_cast<MPIProcessorGroup*> (_union_group);
66     return group->getComm();
67   }
68
69   NatureOfField ElementLocator::getLocalNature() const
70   {
71     return _local_para_field.getField()->getNature();
72   }
73
74   // ==========================================================================
75   // Procedure for exchanging mesh between a distant proc and a local processor
76   // param idistantrank  proc id on distant group
77   // param distant_mesh on return , points to a local reconstruction of
78   //  the distant mesh
79   // param distant_ids on return, contains a vector defining a correspondence
80   // between the distant ids and the ids of the local reconstruction 
81   // ==========================================================================
82   void ElementLocator::exchangeMesh(int idistantrank,
83                                     MEDCouplingPointSet*& distant_mesh,
84                                     int*& distant_ids)
85   {
86     int rank = _union_group->translateRank(&_distant_group,idistantrank);
87
88     if (find(_distant_proc_ids.begin(), _distant_proc_ids.end(),rank)==_distant_proc_ids.end())
89       return;
90    
91     MEDCouplingAutoRefCountObjectPtr<DataArrayInt> elems;
92 #ifdef USE_DIRECTED_BB
93     INTERP_KERNEL::DirectedBoundingBox dbb;
94     double* distant_bb = _domain_bounding_boxes+rank*dbb.dataSize(_local_cell_mesh_space_dim);
95     dbb.setData(distant_bb);
96     elems=_local_cell_mesh->getCellsInBoundingBox(dbb,getBoundingBoxAdjustment());
97 #else
98     double* distant_bb = _domain_bounding_boxes+rank*2*_local_cell_mesh_space_dim;
99     elems=_local_cell_mesh->getCellsInBoundingBox(distant_bb,getBoundingBoxAdjustment());
100 #endif
101     
102     DataArrayInt *distant_ids_send;
103     MEDCouplingPointSet *send_mesh = (MEDCouplingPointSet *)_local_para_field.getField()->buildSubMeshData(elems->begin(),elems->end(),distant_ids_send);
104     _exchangeMesh(send_mesh, distant_mesh, idistantrank, distant_ids_send, distant_ids);
105     distant_ids_send->decrRef();
106     
107     if(send_mesh)
108       send_mesh->decrRef();
109   }
110
111   void ElementLocator::exchangeMethod(const std::string& sourceMeth, int idistantrank, std::string& targetMeth)
112   {
113     CommInterface comm_interface=_union_group->getCommInterface();
114     MPIProcessorGroup* group=static_cast<MPIProcessorGroup*> (_union_group);
115     const MPI_Comm* comm=(group->getComm());
116     MPI_Status status;
117     // it must be converted to union numbering before communication
118     int idistRankInUnion = group->translateRank(&_distant_group,idistantrank);
119     char *recv_buffer=new char[4];
120     std::vector<char> send_buffer(4);
121     std::copy(sourceMeth.begin(),sourceMeth.end(),send_buffer.begin());
122     comm_interface.sendRecv(&send_buffer[0], 4, MPI_CHAR,idistRankInUnion, 1112,
123                             recv_buffer, 4, MPI_CHAR,idistRankInUnion, 1112,
124                             *comm, &status);
125     targetMeth=recv_buffer;
126     delete [] recv_buffer;
127   }
128
129
130   // ======================
131   // Compute bounding boxes
132   // ======================
133
134   void ElementLocator::_computeBoundingBoxes()
135   {
136     CommInterface comm_interface =_union_group->getCommInterface();
137     MPIProcessorGroup* group=static_cast<MPIProcessorGroup*> (_union_group);
138     const MPI_Comm* comm = group->getComm();
139     _local_cell_mesh_space_dim = -1;
140     if(_local_cell_mesh->getMeshDimension() != -1)
141       _local_cell_mesh_space_dim=_local_cell_mesh->getSpaceDimension();
142     int *spaceDimForAll=new int[_union_group->size()];
143     comm_interface.allGather(&_local_cell_mesh_space_dim, 1, MPI_INT,
144                              spaceDimForAll,1, MPI_INT, 
145                              *comm);
146     _local_cell_mesh_space_dim=*std::max_element(spaceDimForAll,spaceDimForAll+_union_group->size());
147     _is_m1d_corr=((*std::min_element(spaceDimForAll,spaceDimForAll+_union_group->size()))==-1);
148     for(int i=0;i<_union_group->size();i++)
149       if(spaceDimForAll[i]!=_local_cell_mesh_space_dim && spaceDimForAll[i]!=-1)
150         throw INTERP_KERNEL::Exception("Spacedim not matches !");
151     delete [] spaceDimForAll;
152 #ifdef USE_DIRECTED_BB
153     INTERP_KERNEL::DirectedBoundingBox dbb;
154     int bbSize = dbb.dataSize(_local_cell_mesh_space_dim);
155     _domain_bounding_boxes = new double[bbSize*_union_group->size()];
156     if(_local_cell_mesh->getMeshDimension() != -1)
157       dbb = INTERP_KERNEL::DirectedBoundingBox(_local_cell_mesh->getCoords()->getPointer(),
158                                                _local_cell_mesh->getNumberOfNodes(),
159                                                _local_cell_mesh_space_dim);
160     std::vector<double> dbbData = dbb.getData();
161     if ( dbbData.size() < bbSize ) dbbData.resize(bbSize,0);
162     double * minmax= &dbbData[0];
163 #else
164     int bbSize = 2*_local_cell_mesh_space_dim;
165     _domain_bounding_boxes = new double[bbSize*_union_group->size()];
166     double * minmax=new double [bbSize];
167     if(_local_cell_mesh->getMeshDimension() != -1)
168       _local_cell_mesh->getBoundingBox(minmax);
169     else
170       for(int i=0;i<_local_cell_mesh_space_dim;i++)
171         {
172           minmax[i*2]=-std::numeric_limits<double>::max();
173           minmax[i*2+1]=std::numeric_limits<double>::max();
174         }
175 #endif
176
177     comm_interface.allGather(minmax, bbSize, MPI_DOUBLE,
178                              _domain_bounding_boxes,bbSize, MPI_DOUBLE, 
179                              *comm);
180   
181     for (int i=0; i< _distant_group.size(); i++)
182       {
183         int rank=_union_group->translateRank(&_distant_group,i);
184
185         if (_intersectsBoundingBox(rank))
186           {
187             _distant_proc_ids.push_back(rank);
188           }
189       }
190 #ifdef USE_DIRECTED_BB
191 #else
192     delete [] minmax;
193 #endif
194   }
195
196
197   // =============================================
198   // Intersect Bounding Box (with a given "irank")
199   // =============================================
200   bool ElementLocator::_intersectsBoundingBox(int irank)
201   {
202 #ifdef USE_DIRECTED_BB
203     INTERP_KERNEL::DirectedBoundingBox local_dbb, distant_dbb;
204     local_dbb.setData( _domain_bounding_boxes+_union_group->myRank()*local_dbb.dataSize( _local_cell_mesh_space_dim ));
205     distant_dbb.setData( _domain_bounding_boxes+irank*distant_dbb.dataSize( _local_cell_mesh_space_dim ));
206     return !local_dbb.isDisjointWith( distant_dbb );
207 #else
208     double*  local_bb = _domain_bounding_boxes+_union_group->myRank()*2*_local_cell_mesh_space_dim;
209     double*  distant_bb =  _domain_bounding_boxes+irank*2*_local_cell_mesh_space_dim;
210
211     for (int idim=0; idim < _local_cell_mesh_space_dim; idim++)
212       {
213         const double eps =  1e-12;
214         bool intersects = (distant_bb[idim*2]<local_bb[idim*2+1]+eps)
215           && (local_bb[idim*2]<distant_bb[idim*2+1]+eps);
216         if (!intersects) return false; 
217       }
218     return true;
219 #endif
220   } 
221
222   // ======================
223   // Exchanging meshes data
224   // ======================
225   void ElementLocator::_exchangeMesh( MEDCouplingPointSet* local_mesh,
226                                       MEDCouplingPointSet*& distant_mesh,
227                                       int iproc_distant,
228                                       const DataArrayInt* distant_ids_send,
229                                       int*& distant_ids_recv)
230   {
231     CommInterface comm_interface=_union_group->getCommInterface();
232   
233     // First stage : exchanging sizes
234     // ------------------------------
235     vector<double> tinyInfoLocalD,tinyInfoDistantD(1);//not used for the moment
236     vector<int> tinyInfoLocal,tinyInfoDistant;
237     vector<string> tinyInfoLocalS;
238     //Getting tiny info of local mesh to allow the distant proc to initialize and allocate
239     //the transmitted mesh.
240     local_mesh->getTinySerializationInformation(tinyInfoLocalD,tinyInfoLocal,tinyInfoLocalS);
241     tinyInfoLocal.push_back(distant_ids_send->getNumberOfTuples());
242     tinyInfoDistant.resize(tinyInfoLocal.size());
243     std::fill(tinyInfoDistant.begin(),tinyInfoDistant.end(),0);
244     MPIProcessorGroup* group=static_cast<MPIProcessorGroup*> (_union_group);
245     const MPI_Comm* comm=group->getComm();
246     MPI_Status status; 
247     
248     // iproc_distant is the number of proc in distant group
249     // it must be converted to union numbering before communication
250     int iprocdistant_in_union = group->translateRank(&_distant_group,
251                                                      iproc_distant);
252     
253     comm_interface.sendRecv(&tinyInfoLocal[0], tinyInfoLocal.size(), MPI_INT, iprocdistant_in_union, 1112,
254                             &tinyInfoDistant[0], tinyInfoDistant.size(), MPI_INT,iprocdistant_in_union,1112,
255                             *comm, &status);
256     DataArrayInt *v1Local=0;
257     DataArrayDouble *v2Local=0;
258     DataArrayInt *v1Distant=DataArrayInt::New();
259     DataArrayDouble *v2Distant=DataArrayDouble::New();
260     //serialization of local mesh to send data to distant proc.
261     local_mesh->serialize(v1Local,v2Local);
262     //Building the right instance of copy of distant mesh.
263     MEDCouplingPointSet *distant_mesh_tmp=MEDCouplingPointSet::BuildInstanceFromMeshType((MEDCouplingMeshType)tinyInfoDistant[0]);
264     std::vector<std::string> unusedTinyDistantSts;
265     distant_mesh_tmp->resizeForUnserialization(tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts);
266     int nbLocalElems=0;
267     int nbDistElem=0;
268     int *ptLocal=0;
269     int *ptDist=0;
270     if(v1Local)
271       {
272         nbLocalElems=v1Local->getNbOfElems();
273         ptLocal=v1Local->getPointer();
274       }
275     if(v1Distant)
276       {
277         nbDistElem=v1Distant->getNbOfElems();
278         ptDist=v1Distant->getPointer();
279       }
280     comm_interface.sendRecv(ptLocal, nbLocalElems, MPI_INT,
281                             iprocdistant_in_union, 1111,
282                             ptDist, nbDistElem, MPI_INT,
283                             iprocdistant_in_union,1111,
284                             *comm, &status);
285     nbLocalElems=0;
286     double *ptLocal2=0;
287     double *ptDist2=0;
288     if(v2Local)
289       {
290         nbLocalElems=v2Local->getNbOfElems();
291         ptLocal2=v2Local->getPointer();
292       }
293     nbDistElem=0;
294     if(v2Distant)
295       {
296         nbDistElem=v2Distant->getNbOfElems();
297         ptDist2=v2Distant->getPointer();
298       }
299     comm_interface.sendRecv(ptLocal2, nbLocalElems, MPI_DOUBLE,
300                             iprocdistant_in_union, 1112,
301                             ptDist2, nbDistElem, MPI_DOUBLE,
302                             iprocdistant_in_union, 1112, 
303                             *comm, &status);
304     //
305     distant_mesh=distant_mesh_tmp;
306     //finish unserialization
307     distant_mesh->unserialization(tinyInfoDistantD,tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts);
308     //
309     distant_ids_recv=new int[tinyInfoDistant.back()];
310     comm_interface.sendRecv(const_cast<void *>(reinterpret_cast<const void *>(distant_ids_send->getConstPointer())),tinyInfoLocal.back(), MPI_INT,
311                             iprocdistant_in_union, 1113,
312                             distant_ids_recv,tinyInfoDistant.back(), MPI_INT,
313                             iprocdistant_in_union,1113,
314                             *comm, &status);
315     if(v1Local)
316       v1Local->decrRef();
317     if(v2Local)
318       v2Local->decrRef();
319     if(v1Distant)
320       v1Distant->decrRef();
321     if(v2Distant)
322       v2Distant->decrRef();
323   }
324   
325   /*!
326    * connected with ElementLocator::sendPolicyToWorkingSideL
327    */
328   void ElementLocator::recvPolicyFromLazySideW(std::vector<int>& policy)
329   {
330     policy.resize(_distant_proc_ids.size());
331     int procId=0;
332     CommInterface comm;
333     MPI_Status status;
334     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
335       {
336         int toRecv;
337         comm.recv((void *)&toRecv,1,MPI_INT,*iter,1120,*_comm,&status);
338         policy[procId]=toRecv;
339       }
340   }
341
342   /*!
343    * connected with ElementLocator::recvFromWorkingSideL
344    */
345   void ElementLocator::sendSumToLazySideW(const std::vector< std::vector<int> >& distantLocEltIds, const std::vector< std::vector<double> >& partialSumRelToDistantIds)
346   {
347     int procId=0;
348     CommInterface comm;
349     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
350       {
351         const vector<int>& eltIds=distantLocEltIds[procId];
352         const vector<double>& valued=partialSumRelToDistantIds[procId];
353         int lgth=eltIds.size();
354         comm.send(&lgth,1,MPI_INT,*iter,1114,*_comm);
355         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&eltIds[0])),lgth,MPI_INT,*iter,1115,*_comm);
356         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&valued[0])),lgth,MPI_DOUBLE,*iter,1116,*_comm);
357       }
358   }
359
360   /*!
361    * connected with ElementLocator::sendToWorkingSideL
362    */
363   void ElementLocator::recvSumFromLazySideW(std::vector< std::vector<double> >& globalSumRelToDistantIds)
364   {
365     int procId=0;
366     CommInterface comm;
367     MPI_Status status;
368     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
369       {
370         std::vector<double>& vec=globalSumRelToDistantIds[procId];
371         comm.recv(&vec[0],vec.size(),MPI_DOUBLE,*iter,1117,*_comm,&status);
372       }
373   }
374
375   /*!
376    * connected with ElementLocator::recvLocalIdsFromWorkingSideL
377    */
378   void ElementLocator::sendLocalIdsToLazyProcsW(const std::vector< std::vector<int> >& distantLocEltIds)
379   {
380     int procId=0;
381     CommInterface comm;
382     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
383       {
384         const vector<int>& eltIds=distantLocEltIds[procId];
385         int lgth=eltIds.size();
386         comm.send(&lgth,1,MPI_INT,*iter,1121,*_comm);
387         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&eltIds[0])),lgth,MPI_INT,*iter,1122,*_comm);
388       }
389   }
390
391   /*!
392    * connected with ElementLocator::sendGlobalIdsToWorkingSideL
393    */
394   void ElementLocator::recvGlobalIdsFromLazyProcsW(const std::vector< std::vector<int> >& distantLocEltIds, std::vector< std::vector<int> >& globalIds)
395   {
396     int procId=0;
397     CommInterface comm;
398     MPI_Status status;
399     globalIds.resize(_distant_proc_ids.size());
400     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
401       {
402         const std::vector<int>& vec=distantLocEltIds[procId];
403         std::vector<int>& global=globalIds[procId];
404         global.resize(vec.size());
405         comm.recv(&global[0],vec.size(),MPI_INT,*iter,1123,*_comm,&status);
406       }
407   }
408   
409   /*!
410    * connected with ElementLocator::sendCandidatesGlobalIdsToWorkingSideL
411    */
412   void ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW(std::vector< std::vector<int> >& globalIds)
413   {
414     int procId=0;
415     CommInterface comm;
416     MPI_Status status;
417     globalIds.resize(_distant_proc_ids.size());
418     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
419       {
420         std::vector<int>& global=globalIds[procId];
421         int lgth;
422         comm.recv(&lgth,1,MPI_INT,*iter,1132,*_comm,&status);
423         global.resize(lgth);
424         comm.recv(&global[0],lgth,MPI_INT,*iter,1133,*_comm,&status);
425       }
426   }
427   
428   /*!
429    * connected with ElementLocator::recvSumFromWorkingSideL
430    */
431   void ElementLocator::sendPartialSumToLazyProcsW(const std::vector<int>& distantGlobIds, const std::vector<double>& sum)
432   {
433     int procId=0;
434     CommInterface comm;
435     int lgth=distantGlobIds.size();
436     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
437       {
438         comm.send(&lgth,1,MPI_INT,*iter,1124,*_comm);
439         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&distantGlobIds[0])),lgth,MPI_INT,*iter,1125,*_comm);
440         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&sum[0])),lgth,MPI_DOUBLE,*iter,1126,*_comm);
441       }
442   }
443
444   /*!
445    * connected with ElementLocator::recvCandidatesForAddElementsL
446    */
447   void ElementLocator::sendCandidatesForAddElementsW(const std::vector<int>& distantGlobIds)
448   {
449     int procId=0;
450     CommInterface comm;
451     int lgth=distantGlobIds.size();
452     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
453       {
454         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&lgth)),1,MPI_INT,*iter,1128,*_comm);
455         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&distantGlobIds[0])),lgth,MPI_INT,*iter,1129,*_comm);
456       }
457   }
458   
459   /*!
460    * connected with ElementLocator::sendAddElementsToWorkingSideL
461    */
462   void ElementLocator::recvAddElementsFromLazyProcsW(std::vector<std::vector<int> >& elementsToAdd)
463   {
464     int procId=0;
465     CommInterface comm;
466     MPI_Status status;
467     int lgth=_distant_proc_ids.size();
468     elementsToAdd.resize(lgth);
469     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
470       {
471         int locLgth;
472         std::vector<int>& eltToFeed=elementsToAdd[procId];
473         comm.recv(&locLgth,1,MPI_INT,*iter,1130,*_comm,&status);
474         eltToFeed.resize(locLgth);
475         comm.recv(&eltToFeed[0],locLgth,MPI_INT,*iter,1131,*_comm,&status);
476       }
477   }
478
479   /*!
480    * connected with ElementLocator::recvPolicyFromLazySideW
481    */
482   int ElementLocator::sendPolicyToWorkingSideL()
483   {
484     CommInterface comm;
485     int toSend;
486     DataArrayInt *isCumulative=_local_para_field.returnCumulativeGlobalNumbering();
487     if(isCumulative)
488       {
489         toSend=CUMULATIVE_POLICY;
490         isCumulative->decrRef();
491       }
492     else
493       toSend=NO_POST_TREATMENT_POLICY;
494     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++)
495       comm.send(&toSend,1,MPI_INT,*iter,1120,*_comm);
496     return toSend;
497   }
498
499   /*!
500    * connected with ElementLocator::sendSumToLazySideW
501    */
502   void ElementLocator::recvFromWorkingSideL()
503   {
504     _values_added.resize(_local_para_field.getField()->getNumberOfTuples());
505     int procId=0;
506     CommInterface comm;
507     _ids_per_working_proc.resize(_distant_proc_ids.size());
508     MPI_Status status;
509     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
510       {
511         int lgth;
512         comm.recv(&lgth,1,MPI_INT,*iter,1114,*_comm,&status);
513         vector<int>& ids=_ids_per_working_proc[procId];
514         ids.resize(lgth);
515         vector<double> values(lgth);
516         comm.recv(&ids[0],lgth,MPI_INT,*iter,1115,*_comm,&status);
517         comm.recv(&values[0],lgth,MPI_DOUBLE,*iter,1116,*_comm,&status);
518         for(int i=0;i<lgth;i++)
519           _values_added[ids[i]]+=values[i];
520       }
521   }
522
523   /*!
524    * connected with ElementLocator::recvSumFromLazySideW
525    */
526   void ElementLocator::sendToWorkingSideL()
527   {
528     int procId=0;
529     CommInterface comm;
530     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
531       {
532         vector<int>& ids=_ids_per_working_proc[procId];
533         vector<double> valsToSend(ids.size());
534         vector<double>::iterator iter3=valsToSend.begin();
535         for(vector<int>::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter3++)
536           *iter3=_values_added[*iter2];
537         comm.send(&valsToSend[0],ids.size(),MPI_DOUBLE,*iter,1117,*_comm);
538         //ids.clear();
539       }
540     //_ids_per_working_proc.clear();
541   }
542
543   /*!
544    * connected with ElementLocator::sendLocalIdsToLazyProcsW
545    */
546   void ElementLocator::recvLocalIdsFromWorkingSideL()
547   {
548     int procId=0;
549     CommInterface comm;
550     _ids_per_working_proc.resize(_distant_proc_ids.size());
551     MPI_Status status;
552     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
553       {
554         int lgth;
555         vector<int>& ids=_ids_per_working_proc[procId];
556         comm.recv(&lgth,1,MPI_INT,*iter,1121,*_comm,&status);
557         ids.resize(lgth);
558         comm.recv(&ids[0],lgth,MPI_INT,*iter,1122,*_comm,&status);
559       }
560   }
561
562   /*!
563    * connected with ElementLocator::recvGlobalIdsFromLazyProcsW
564    */
565   void ElementLocator::sendGlobalIdsToWorkingSideL()
566   {
567     int procId=0;
568     CommInterface comm;
569     DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering();
570     const int *globalIdsC=globalIds->getConstPointer();
571     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
572       {
573         const vector<int>& ids=_ids_per_working_proc[procId];
574         vector<int> valsToSend(ids.size());
575         vector<int>::iterator iter1=valsToSend.begin();
576         for(vector<int>::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++)
577           *iter1=globalIdsC[*iter2];
578         comm.send(&valsToSend[0],ids.size(),MPI_INT,*iter,1123,*_comm);
579       }
580     if(globalIds)
581       globalIds->decrRef();
582   }
583
584   /*!
585    * connected with ElementLocator::sendPartialSumToLazyProcsW
586    */
587   void ElementLocator::recvSumFromWorkingSideL()
588   {
589     int procId=0;
590     int wProcSize=_distant_proc_ids.size();
591     CommInterface comm;
592     _ids_per_working_proc.resize(wProcSize);
593     _values_per_working_proc.resize(wProcSize);
594     MPI_Status status;
595     std::map<int,double> sums;
596     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
597       {
598         int lgth;
599         comm.recv(&lgth,1,MPI_INT,*iter,1124,*_comm,&status);
600         vector<int>& ids=_ids_per_working_proc[procId];
601         vector<double>& vals=_values_per_working_proc[procId];
602         ids.resize(lgth);
603         vals.resize(lgth);
604         comm.recv(&ids[0],lgth,MPI_INT,*iter,1125,*_comm,&status);
605         comm.recv(&vals[0],lgth,MPI_DOUBLE,*iter,1126,*_comm,&status);
606         vector<int>::const_iterator iter1=ids.begin();
607         vector<double>::const_iterator iter2=vals.begin();
608         for(;iter1!=ids.end();iter1++,iter2++)
609           sums[*iter1]+=*iter2;
610       }
611     //assign sum to prepare sending to working side
612     for(procId=0;procId<wProcSize;procId++)
613       {
614         vector<int>& ids=_ids_per_working_proc[procId];
615         vector<double>& vals=_values_per_working_proc[procId];
616         vector<int>::const_iterator iter1=ids.begin();
617         vector<double>::iterator iter2=vals.begin();
618         for(;iter1!=ids.end();iter1++,iter2++)
619           *iter2=sums[*iter1];
620         ids.clear();
621       }
622   }
623
624   /*!
625    * Foreach working procs Wi compute and push it in _ids_per_working_proc3,
626    * if it exist, local id of nodes that are in interaction with an another lazy proc than this
627    * and that exists in this \b but with no interaction with this.
628    * The computation is performed here. sendAddElementsToWorkingSideL is only in charge to send
629    * precomputed _ids_per_working_proc3 attribute.
630    * connected with ElementLocator::sendCandidatesForAddElementsW
631    */
632   void ElementLocator::recvCandidatesForAddElementsL()
633   {
634     int procId=0;
635     int wProcSize=_distant_proc_ids.size();
636     CommInterface comm;
637     _ids_per_working_proc3.resize(wProcSize);
638     MPI_Status status;
639     std::map<int,double> sums;
640     DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering();
641     const int *globalIdsC=globalIds->getConstPointer();
642     int nbElts=globalIds->getNumberOfTuples();
643     std::set<int> globalIdsS(globalIdsC,globalIdsC+nbElts);
644     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
645       {
646         const std::vector<int>& ids0=_ids_per_working_proc[procId];
647         int lgth0=ids0.size();
648         std::set<int> elts0;
649         for(int i=0;i<lgth0;i++)
650           elts0.insert(globalIdsC[ids0[i]]);
651         int lgth;
652         comm.recv(&lgth,1,MPI_INT,*iter,1128,*_comm,&status);
653         vector<int> ids(lgth);
654         comm.recv(&ids[0],lgth,MPI_INT,*iter,1129,*_comm,&status);
655         set<int> ids1(ids.begin(),ids.end());
656         ids.clear();
657         set<int> tmp5,tmp6;
658         set_intersection(globalIdsS.begin(),globalIdsS.end(),ids1.begin(),ids1.end(),inserter(tmp5,tmp5.begin()));
659         set_difference(tmp5.begin(),tmp5.end(),elts0.begin(),elts0.end(),inserter(tmp6,tmp6.begin()));
660         std::vector<int>& ids2=_ids_per_working_proc3[procId];
661         ids2.resize(tmp6.size());
662         std::copy(tmp6.begin(),tmp6.end(),ids2.begin());
663         //global->local
664         for(std::vector<int>::iterator iter2=ids2.begin();iter2!=ids2.end();iter2++)
665           *iter2=std::find(globalIdsC,globalIdsC+nbElts,*iter2)-globalIdsC;
666       }
667     if(globalIds)
668       globalIds->decrRef();
669   }
670
671   /*!
672    * connected with ElementLocator::recvAddElementsFromLazyProcsW
673    */
674   void ElementLocator::sendAddElementsToWorkingSideL()
675   {
676     int procId=0;
677     CommInterface comm;
678     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
679       {
680         const std::vector<int>& vals=_ids_per_working_proc3[procId];
681         int size=vals.size();
682         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&size)),1,MPI_INT,*iter,1130,*_comm);
683         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&vals[0])),size,MPI_INT,*iter,1131,*_comm);
684       }
685   }
686
687   /*!
688    * This method sends to working side Wi only nodes in interaction with Wi \b and located on boundary, to reduce number.
689    * connected with ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW
690    */
691   void ElementLocator::sendCandidatesGlobalIdsToWorkingSideL()
692   { 
693     int procId=0;
694     CommInterface comm;
695     DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering();
696     const int *globalIdsC=globalIds->getConstPointer();
697     MEDCouplingAutoRefCountObjectPtr<DataArrayInt> candidates=_local_para_field.getSupport()->getCellMesh()->findBoundaryNodes();
698     for(int *iter1=candidates->getPointer();iter1!=candidates->getPointer()+candidates->getNumberOfTuples();iter1++)
699       (*iter1)=globalIdsC[*iter1];
700     std::set<int> candidatesS(candidates->begin(),candidates->end());
701     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
702       {
703         const vector<int>& ids=_ids_per_working_proc[procId];
704         vector<int> valsToSend(ids.size());
705         vector<int>::iterator iter1=valsToSend.begin();
706         for(vector<int>::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++)
707           *iter1=globalIdsC[*iter2];
708         std::set<int> tmp2(valsToSend.begin(),valsToSend.end());
709         std::vector<int> tmp3;
710         set_intersection(candidatesS.begin(),candidatesS.end(),tmp2.begin(),tmp2.end(),std::back_insert_iterator< std::vector<int> >(tmp3));
711         int lgth=tmp3.size();
712         comm.send(&lgth,1,MPI_INT,*iter,1132,*_comm);
713         comm.send(&tmp3[0],lgth,MPI_INT,*iter,1133,*_comm);
714       }
715     if(globalIds)
716       globalIds->decrRef();
717   }
718 }