Salome HOME
8a6aaa042acc505f0820cb093f8bd05d3e864bdc
[tools/medcoupling.git] / src / ParaMEDMEM / ElementLocator.cxx
1 // Copyright (C) 2007-2015  CEA/DEN, EDF R&D
2 //
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
7 //
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 // Lesser General Public License for more details.
12 //
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
16 //
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
18 //
19
20 #include <mpi.h>
21 #include "CommInterface.hxx"
22 #include "ElementLocator.hxx"
23 #include "Topology.hxx"
24 #include "BlockTopology.hxx"
25 #include "ParaFIELD.hxx"
26 #include "ParaMESH.hxx"
27 #include "ProcessorGroup.hxx"
28 #include "MPIProcessorGroup.hxx"
29 #include "MEDCouplingFieldDouble.hxx"
30 #include "MCAuto.hxx"
31 #include "DirectedBoundingBox.hxx"
32
33 #include <map>
34 #include <set>
35 #include <limits>
36
37 using namespace std;
38
39 //#define USE_DIRECTED_BB
40
41 namespace MEDCoupling 
42
43   ElementLocator::ElementLocator(const ParaFIELD& sourceField,
44                                  const ProcessorGroup& distant_group,
45                                  const ProcessorGroup& local_group)
46     : _local_para_field(sourceField),
47       _local_cell_mesh(sourceField.getSupport()->getCellMesh()),
48       _local_face_mesh(sourceField.getSupport()->getFaceMesh()),
49       _distant_group(distant_group),
50       _local_group(local_group)
51   { 
52     _union_group = _local_group.fuse(distant_group);
53     _computeBoundingBoxes();
54     _comm=getCommunicator();
55   }
56
57   ElementLocator::~ElementLocator()
58   {
59     delete _union_group;
60     delete [] _domain_bounding_boxes;
61   }
62
63   const MPI_Comm *ElementLocator::getCommunicator() const
64   {
65     MPIProcessorGroup* group=static_cast<MPIProcessorGroup*> (_union_group);
66     return group->getComm();
67   }
68
69   NatureOfField ElementLocator::getLocalNature() const
70   {
71     return _local_para_field.getField()->getNature();
72   }
73
74
75   /*! Procedure for exchanging a mesh between a distant proc and a local processor
76    \param idistantrank  proc id on distant group
77    \param distant_mesh on return , points to a local reconstruction of
78           the distant mesh
79    \param distant_ids on return, contains a vector defining a correspondence
80           between the distant ids and the ids of the local reconstruction
81   */
82   void ElementLocator::exchangeMesh(int idistantrank,
83                                     MEDCouplingPointSet*& distant_mesh,
84                                     int*& distant_ids)
85   {
86     int rank = _union_group->translateRank(&_distant_group,idistantrank);
87
88     if (find(_distant_proc_ids.begin(), _distant_proc_ids.end(),rank)==_distant_proc_ids.end())
89       return;
90    
91     MCAuto<DataArrayInt> elems;
92 #ifdef USE_DIRECTED_BB
93     INTERP_KERNEL::DirectedBoundingBox dbb;
94     double* distant_bb = _domain_bounding_boxes+rank*dbb.dataSize(_local_cell_mesh_space_dim);
95     dbb.setData(distant_bb);
96     elems=_local_cell_mesh->getCellsInBoundingBox(dbb,getBoundingBoxAdjustment());
97 #else
98     double* distant_bb = _domain_bounding_boxes+rank*2*_local_cell_mesh_space_dim;
99     elems=_local_cell_mesh->getCellsInBoundingBox(distant_bb,getBoundingBoxAdjustment());
100 #endif
101     
102     DataArrayInt *distant_ids_send;
103     MEDCouplingPointSet *send_mesh = (MEDCouplingPointSet *)_local_para_field.getField()->buildSubMeshData(elems->begin(),elems->end(),distant_ids_send);
104     _exchangeMesh(send_mesh, distant_mesh, idistantrank, distant_ids_send, distant_ids);
105     distant_ids_send->decrRef();
106     
107     if(send_mesh)
108       send_mesh->decrRef();
109   }
110
111   void ElementLocator::exchangeMethod(const std::string& sourceMeth, int idistantrank, std::string& targetMeth)
112   {
113     CommInterface comm_interface=_union_group->getCommInterface();
114     MPIProcessorGroup* group=static_cast<MPIProcessorGroup*> (_union_group);
115     const MPI_Comm* comm=(group->getComm());
116     MPI_Status status;
117     // it must be converted to union numbering before communication
118     int idistRankInUnion = group->translateRank(&_distant_group,idistantrank);
119     char *recv_buffer=new char[4];
120     std::vector<char> send_buffer(4);
121     std::copy(sourceMeth.begin(),sourceMeth.end(),send_buffer.begin());
122     comm_interface.sendRecv(&send_buffer[0], 4, MPI_CHAR,idistRankInUnion, 1112,
123                             recv_buffer, 4, MPI_CHAR,idistRankInUnion, 1112,
124                             *comm, &status);
125     targetMeth=recv_buffer;
126     delete [] recv_buffer;
127   }
128
129
130
131   /*!
132    Compute bounding boxes
133   */
134   void ElementLocator::_computeBoundingBoxes()
135   {
136     CommInterface comm_interface =_union_group->getCommInterface();
137     MPIProcessorGroup* group=static_cast<MPIProcessorGroup*> (_union_group);
138     const MPI_Comm* comm = group->getComm();
139     _local_cell_mesh_space_dim = -1;
140     if(_local_cell_mesh->getMeshDimension() != -1)
141       _local_cell_mesh_space_dim=_local_cell_mesh->getSpaceDimension();
142     int *spaceDimForAll=new int[_union_group->size()];
143     comm_interface.allGather(&_local_cell_mesh_space_dim, 1, MPI_INT,
144                              spaceDimForAll,1, MPI_INT, 
145                              *comm);
146     _local_cell_mesh_space_dim=*std::max_element(spaceDimForAll,spaceDimForAll+_union_group->size());
147     _is_m1d_corr=((*std::min_element(spaceDimForAll,spaceDimForAll+_union_group->size()))==-1);
148     for(int i=0;i<_union_group->size();i++)
149       if(spaceDimForAll[i]!=_local_cell_mesh_space_dim && spaceDimForAll[i]!=-1)
150         throw INTERP_KERNEL::Exception("Spacedim not matches !");
151     delete [] spaceDimForAll;
152 #ifdef USE_DIRECTED_BB
153     INTERP_KERNEL::DirectedBoundingBox dbb;
154     int bbSize = dbb.dataSize(_local_cell_mesh_space_dim);
155     _domain_bounding_boxes = new double[bbSize*_union_group->size()];
156     if(_local_cell_mesh->getMeshDimension() != -1)
157       dbb = INTERP_KERNEL::DirectedBoundingBox(_local_cell_mesh->getCoords()->getPointer(),
158                                                _local_cell_mesh->getNumberOfNodes(),
159                                                _local_cell_mesh_space_dim);
160     std::vector<double> dbbData = dbb.getData();
161     if ( dbbData.size() < bbSize ) dbbData.resize(bbSize,0);
162     double * minmax= &dbbData[0];
163 #else
164     int bbSize = 2*_local_cell_mesh_space_dim;
165     _domain_bounding_boxes = new double[bbSize*_union_group->size()];
166     double * minmax=new double [bbSize];
167     if(_local_cell_mesh->getMeshDimension() != -1)
168       _local_cell_mesh->getBoundingBox(minmax);
169     else
170       for(int i=0;i<_local_cell_mesh_space_dim;i++)
171         {
172           minmax[i*2]=-std::numeric_limits<double>::max();
173           minmax[i*2+1]=std::numeric_limits<double>::max();
174         }
175 #endif
176
177     comm_interface.allGather(minmax, bbSize, MPI_DOUBLE,
178                              _domain_bounding_boxes,bbSize, MPI_DOUBLE, 
179                              *comm);
180   
181     for (int i=0; i< _distant_group.size(); i++)
182       {
183         int rank=_union_group->translateRank(&_distant_group,i);
184
185         if (_intersectsBoundingBox(rank))
186           {
187             _distant_proc_ids.push_back(rank);
188           }
189       }
190 #ifdef USE_DIRECTED_BB
191 #else
192     delete [] minmax;
193 #endif
194   }
195
196
197
198   /*!
199    * Intersect local bounding box with a given distant bounding box on "irank"
200    */
201   bool ElementLocator::_intersectsBoundingBox(int irank)
202   {
203 #ifdef USE_DIRECTED_BB
204     INTERP_KERNEL::DirectedBoundingBox local_dbb, distant_dbb;
205     local_dbb.setData( _domain_bounding_boxes+_union_group->myRank()*local_dbb.dataSize( _local_cell_mesh_space_dim ));
206     distant_dbb.setData( _domain_bounding_boxes+irank*distant_dbb.dataSize( _local_cell_mesh_space_dim ));
207     return !local_dbb.isDisjointWith( distant_dbb );
208 #else
209     double*  local_bb = _domain_bounding_boxes+_union_group->myRank()*2*_local_cell_mesh_space_dim;
210     double*  distant_bb =  _domain_bounding_boxes+irank*2*_local_cell_mesh_space_dim;
211
212     const double eps = 1e-12;
213     for (int idim=0; idim < _local_cell_mesh_space_dim; idim++)
214       {
215         bool intersects = (distant_bb[idim*2]<local_bb[idim*2+1]+eps)
216           && (local_bb[idim*2]<distant_bb[idim*2+1]+eps);
217         if (!intersects) return false; 
218       }
219     return true;
220 #endif
221   } 
222
223
224   /*!
225    *  Exchange mesh data
226    */
227   void ElementLocator::_exchangeMesh( MEDCouplingPointSet* local_mesh,
228                                       MEDCouplingPointSet*& distant_mesh,
229                                       int iproc_distant,
230                                       const DataArrayInt* distant_ids_send,
231                                       int*& distant_ids_recv)
232   {
233     CommInterface comm_interface=_union_group->getCommInterface();
234   
235     // First stage : exchanging sizes
236     // ------------------------------
237     vector<double> tinyInfoLocalD,tinyInfoDistantD(1);//not used for the moment
238     vector<int> tinyInfoLocal,tinyInfoDistant;
239     vector<string> tinyInfoLocalS;
240     //Getting tiny info of local mesh to allow the distant proc to initialize and allocate
241     //the transmitted mesh.
242     local_mesh->getTinySerializationInformation(tinyInfoLocalD,tinyInfoLocal,tinyInfoLocalS);
243     tinyInfoLocal.push_back(distant_ids_send->getNumberOfTuples());
244     tinyInfoDistant.resize(tinyInfoLocal.size());
245     std::fill(tinyInfoDistant.begin(),tinyInfoDistant.end(),0);
246     MPIProcessorGroup* group=static_cast<MPIProcessorGroup*> (_union_group);
247     const MPI_Comm* comm=group->getComm();
248     MPI_Status status; 
249     
250     // iproc_distant is the number of proc in distant group
251     // it must be converted to union numbering before communication
252     int iprocdistant_in_union = group->translateRank(&_distant_group,
253                                                      iproc_distant);
254     
255     comm_interface.sendRecv(&tinyInfoLocal[0], tinyInfoLocal.size(), MPI_INT, iprocdistant_in_union, 1112,
256                             &tinyInfoDistant[0], tinyInfoDistant.size(), MPI_INT,iprocdistant_in_union,1112,
257                             *comm, &status);
258     DataArrayInt *v1Local=0;
259     DataArrayDouble *v2Local=0;
260     DataArrayInt *v1Distant=DataArrayInt::New();
261     DataArrayDouble *v2Distant=DataArrayDouble::New();
262     //serialization of local mesh to send data to distant proc.
263     local_mesh->serialize(v1Local,v2Local);
264     //Building the right instance of copy of distant mesh.
265     MEDCouplingPointSet *distant_mesh_tmp=MEDCouplingPointSet::BuildInstanceFromMeshType((MEDCouplingMeshType)tinyInfoDistant[0]);
266     std::vector<std::string> unusedTinyDistantSts;
267     distant_mesh_tmp->resizeForUnserialization(tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts);
268     int nbLocalElems=0;
269     int nbDistElem=0;
270     int *ptLocal=0;
271     int *ptDist=0;
272     if(v1Local)
273       {
274         nbLocalElems=v1Local->getNbOfElems();
275         ptLocal=v1Local->getPointer();
276       }
277     if(v1Distant)
278       {
279         nbDistElem=v1Distant->getNbOfElems();
280         ptDist=v1Distant->getPointer();
281       }
282     comm_interface.sendRecv(ptLocal, nbLocalElems, MPI_INT,
283                             iprocdistant_in_union, 1111,
284                             ptDist, nbDistElem, MPI_INT,
285                             iprocdistant_in_union,1111,
286                             *comm, &status);
287     nbLocalElems=0;
288     double *ptLocal2=0;
289     double *ptDist2=0;
290     if(v2Local)
291       {
292         nbLocalElems=v2Local->getNbOfElems();
293         ptLocal2=v2Local->getPointer();
294       }
295     nbDistElem=0;
296     if(v2Distant)
297       {
298         nbDistElem=v2Distant->getNbOfElems();
299         ptDist2=v2Distant->getPointer();
300       }
301     comm_interface.sendRecv(ptLocal2, nbLocalElems, MPI_DOUBLE,
302                             iprocdistant_in_union, 1112,
303                             ptDist2, nbDistElem, MPI_DOUBLE,
304                             iprocdistant_in_union, 1112, 
305                             *comm, &status);
306     //
307     distant_mesh=distant_mesh_tmp;
308     //finish unserialization
309     distant_mesh->unserialization(tinyInfoDistantD,tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts);
310     //
311     distant_ids_recv=new int[tinyInfoDistant.back()];
312     comm_interface.sendRecv(const_cast<void *>(reinterpret_cast<const void *>(distant_ids_send->getConstPointer())),tinyInfoLocal.back(), MPI_INT,
313                             iprocdistant_in_union, 1113,
314                             distant_ids_recv,tinyInfoDistant.back(), MPI_INT,
315                             iprocdistant_in_union,1113,
316                             *comm, &status);
317     if(v1Local)
318       v1Local->decrRef();
319     if(v2Local)
320       v2Local->decrRef();
321     if(v1Distant)
322       v1Distant->decrRef();
323     if(v2Distant)
324       v2Distant->decrRef();
325   }
326   
327   /*!
328    * connected with ElementLocator::sendPolicyToWorkingSideL
329    */
330   void ElementLocator::recvPolicyFromLazySideW(std::vector<int>& policy)
331   {
332     policy.resize(_distant_proc_ids.size());
333     int procId=0;
334     CommInterface comm;
335     MPI_Status status;
336     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
337       {
338         int toRecv;
339         comm.recv((void *)&toRecv,1,MPI_INT,*iter,1120,*_comm,&status);
340         policy[procId]=toRecv;
341       }
342   }
343
344   /*!
345    * connected with ElementLocator::recvFromWorkingSideL
346    */
347   void ElementLocator::sendSumToLazySideW(const std::vector< std::vector<int> >& distantLocEltIds, const std::vector< std::vector<double> >& partialSumRelToDistantIds)
348   {
349     int procId=0;
350     CommInterface comm;
351     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
352       {
353         const vector<int>& eltIds=distantLocEltIds[procId];
354         const vector<double>& valued=partialSumRelToDistantIds[procId];
355         int lgth=eltIds.size();
356         comm.send(&lgth,1,MPI_INT,*iter,1114,*_comm);
357         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&eltIds[0])),lgth,MPI_INT,*iter,1115,*_comm);
358         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&valued[0])),lgth,MPI_DOUBLE,*iter,1116,*_comm);
359       }
360   }
361
362   /*!
363    * connected with ElementLocator::sendToWorkingSideL
364    */
365   void ElementLocator::recvSumFromLazySideW(std::vector< std::vector<double> >& globalSumRelToDistantIds)
366   {
367     int procId=0;
368     CommInterface comm;
369     MPI_Status status;
370     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
371       {
372         std::vector<double>& vec=globalSumRelToDistantIds[procId];
373         comm.recv(&vec[0],vec.size(),MPI_DOUBLE,*iter,1117,*_comm,&status);
374       }
375   }
376
377   /*!
378    * connected with ElementLocator::recvLocalIdsFromWorkingSideL
379    */
380   void ElementLocator::sendLocalIdsToLazyProcsW(const std::vector< std::vector<int> >& distantLocEltIds)
381   {
382     int procId=0;
383     CommInterface comm;
384     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
385       {
386         const vector<int>& eltIds=distantLocEltIds[procId];
387         int lgth=eltIds.size();
388         comm.send(&lgth,1,MPI_INT,*iter,1121,*_comm);
389         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&eltIds[0])),lgth,MPI_INT,*iter,1122,*_comm);
390       }
391   }
392
393   /*!
394    * connected with ElementLocator::sendGlobalIdsToWorkingSideL
395    */
396   void ElementLocator::recvGlobalIdsFromLazyProcsW(const std::vector< std::vector<int> >& distantLocEltIds, std::vector< std::vector<int> >& globalIds)
397   {
398     int procId=0;
399     CommInterface comm;
400     MPI_Status status;
401     globalIds.resize(_distant_proc_ids.size());
402     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
403       {
404         const std::vector<int>& vec=distantLocEltIds[procId];
405         std::vector<int>& global=globalIds[procId];
406         global.resize(vec.size());
407         comm.recv(&global[0],vec.size(),MPI_INT,*iter,1123,*_comm,&status);
408       }
409   }
410   
411   /*!
412    * connected with ElementLocator::sendCandidatesGlobalIdsToWorkingSideL
413    */
414   void ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW(std::vector< std::vector<int> >& globalIds)
415   {
416     int procId=0;
417     CommInterface comm;
418     MPI_Status status;
419     globalIds.resize(_distant_proc_ids.size());
420     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
421       {
422         std::vector<int>& global=globalIds[procId];
423         int lgth;
424         comm.recv(&lgth,1,MPI_INT,*iter,1132,*_comm,&status);
425         global.resize(lgth);
426         comm.recv(&global[0],lgth,MPI_INT,*iter,1133,*_comm,&status);
427       }
428   }
429   
430   /*!
431    * connected with ElementLocator::recvSumFromWorkingSideL
432    */
433   void ElementLocator::sendPartialSumToLazyProcsW(const std::vector<int>& distantGlobIds, const std::vector<double>& sum)
434   {
435     int procId=0;
436     CommInterface comm;
437     int lgth=distantGlobIds.size();
438     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
439       {
440         comm.send(&lgth,1,MPI_INT,*iter,1124,*_comm);
441         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&distantGlobIds[0])),lgth,MPI_INT,*iter,1125,*_comm);
442         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&sum[0])),lgth,MPI_DOUBLE,*iter,1126,*_comm);
443       }
444   }
445
446   /*!
447    * connected with ElementLocator::recvCandidatesForAddElementsL
448    */
449   void ElementLocator::sendCandidatesForAddElementsW(const std::vector<int>& distantGlobIds)
450   {
451     int procId=0;
452     CommInterface comm;
453     int lgth=distantGlobIds.size();
454     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
455       {
456         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&lgth)),1,MPI_INT,*iter,1128,*_comm);
457         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&distantGlobIds[0])),lgth,MPI_INT,*iter,1129,*_comm);
458       }
459   }
460   
461   /*!
462    * connected with ElementLocator::sendAddElementsToWorkingSideL
463    */
464   void ElementLocator::recvAddElementsFromLazyProcsW(std::vector<std::vector<int> >& elementsToAdd)
465   {
466     int procId=0;
467     CommInterface comm;
468     MPI_Status status;
469     int lgth=_distant_proc_ids.size();
470     elementsToAdd.resize(lgth);
471     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
472       {
473         int locLgth;
474         std::vector<int>& eltToFeed=elementsToAdd[procId];
475         comm.recv(&locLgth,1,MPI_INT,*iter,1130,*_comm,&status);
476         eltToFeed.resize(locLgth);
477         comm.recv(&eltToFeed[0],locLgth,MPI_INT,*iter,1131,*_comm,&status);
478       }
479   }
480
481   /*!
482    * connected with ElementLocator::recvPolicyFromLazySideW
483    */
484   int ElementLocator::sendPolicyToWorkingSideL()
485   {
486     CommInterface comm;
487     int toSend;
488     DataArrayInt *isCumulative=_local_para_field.returnCumulativeGlobalNumbering();
489     if(isCumulative)
490       {
491         toSend=CUMULATIVE_POLICY;
492         isCumulative->decrRef();
493       }
494     else
495       toSend=NO_POST_TREATMENT_POLICY;
496     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++)
497       comm.send(&toSend,1,MPI_INT,*iter,1120,*_comm);
498     return toSend;
499   }
500
501   /*!
502    * connected with ElementLocator::sendSumToLazySideW
503    */
504   void ElementLocator::recvFromWorkingSideL()
505   {
506     _values_added.resize(_local_para_field.getField()->getNumberOfTuples());
507     int procId=0;
508     CommInterface comm;
509     _ids_per_working_proc.resize(_distant_proc_ids.size());
510     MPI_Status status;
511     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
512       {
513         int lgth;
514         comm.recv(&lgth,1,MPI_INT,*iter,1114,*_comm,&status);
515         vector<int>& ids=_ids_per_working_proc[procId];
516         ids.resize(lgth);
517         vector<double> values(lgth);
518         comm.recv(&ids[0],lgth,MPI_INT,*iter,1115,*_comm,&status);
519         comm.recv(&values[0],lgth,MPI_DOUBLE,*iter,1116,*_comm,&status);
520         for(int i=0;i<lgth;i++)
521           _values_added[ids[i]]+=values[i];
522       }
523   }
524
525   /*!
526    * connected with ElementLocator::recvSumFromLazySideW
527    */
528   void ElementLocator::sendToWorkingSideL()
529   {
530     int procId=0;
531     CommInterface comm;
532     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
533       {
534         vector<int>& ids=_ids_per_working_proc[procId];
535         vector<double> valsToSend(ids.size());
536         vector<double>::iterator iter3=valsToSend.begin();
537         for(vector<int>::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter3++)
538           *iter3=_values_added[*iter2];
539         comm.send(&valsToSend[0],ids.size(),MPI_DOUBLE,*iter,1117,*_comm);
540         //ids.clear();
541       }
542     //_ids_per_working_proc.clear();
543   }
544
545   /*!
546    * connected with ElementLocator::sendLocalIdsToLazyProcsW
547    */
548   void ElementLocator::recvLocalIdsFromWorkingSideL()
549   {
550     int procId=0;
551     CommInterface comm;
552     _ids_per_working_proc.resize(_distant_proc_ids.size());
553     MPI_Status status;
554     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
555       {
556         int lgth;
557         vector<int>& ids=_ids_per_working_proc[procId];
558         comm.recv(&lgth,1,MPI_INT,*iter,1121,*_comm,&status);
559         ids.resize(lgth);
560         comm.recv(&ids[0],lgth,MPI_INT,*iter,1122,*_comm,&status);
561       }
562   }
563
564   /*!
565    * connected with ElementLocator::recvGlobalIdsFromLazyProcsW
566    */
567   void ElementLocator::sendGlobalIdsToWorkingSideL()
568   {
569     int procId=0;
570     CommInterface comm;
571     DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering();
572     const int *globalIdsC=globalIds->getConstPointer();
573     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
574       {
575         const vector<int>& ids=_ids_per_working_proc[procId];
576         vector<int> valsToSend(ids.size());
577         vector<int>::iterator iter1=valsToSend.begin();
578         for(vector<int>::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++)
579           *iter1=globalIdsC[*iter2];
580         comm.send(&valsToSend[0],ids.size(),MPI_INT,*iter,1123,*_comm);
581       }
582     if(globalIds)
583       globalIds->decrRef();
584   }
585
586   /*!
587    * connected with ElementLocator::sendPartialSumToLazyProcsW
588    */
589   void ElementLocator::recvSumFromWorkingSideL()
590   {
591     int procId=0;
592     int wProcSize=_distant_proc_ids.size();
593     CommInterface comm;
594     _ids_per_working_proc.resize(wProcSize);
595     _values_per_working_proc.resize(wProcSize);
596     MPI_Status status;
597     std::map<int,double> sums;
598     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
599       {
600         int lgth;
601         comm.recv(&lgth,1,MPI_INT,*iter,1124,*_comm,&status);
602         vector<int>& ids=_ids_per_working_proc[procId];
603         vector<double>& vals=_values_per_working_proc[procId];
604         ids.resize(lgth);
605         vals.resize(lgth);
606         comm.recv(&ids[0],lgth,MPI_INT,*iter,1125,*_comm,&status);
607         comm.recv(&vals[0],lgth,MPI_DOUBLE,*iter,1126,*_comm,&status);
608         vector<int>::const_iterator iter1=ids.begin();
609         vector<double>::const_iterator iter2=vals.begin();
610         for(;iter1!=ids.end();iter1++,iter2++)
611           sums[*iter1]+=*iter2;
612       }
613     //assign sum to prepare sending to working side
614     for(procId=0;procId<wProcSize;procId++)
615       {
616         vector<int>& ids=_ids_per_working_proc[procId];
617         vector<double>& vals=_values_per_working_proc[procId];
618         vector<int>::const_iterator iter1=ids.begin();
619         vector<double>::iterator iter2=vals.begin();
620         for(;iter1!=ids.end();iter1++,iter2++)
621           *iter2=sums[*iter1];
622         ids.clear();
623       }
624   }
625
626   /*!
627    * Foreach working procs Wi compute and push it in _ids_per_working_proc3,
628    * if it exist, local id of nodes that are in interaction with an another lazy proc than this
629    * and that exists in this \b but with no interaction with this.
630    * The computation is performed here. sendAddElementsToWorkingSideL is only in charge to send
631    * precomputed _ids_per_working_proc3 attribute.
632    * connected with ElementLocator::sendCandidatesForAddElementsW
633    */
634   void ElementLocator::recvCandidatesForAddElementsL()
635   {
636     int procId=0;
637     int wProcSize=_distant_proc_ids.size();
638     CommInterface comm;
639     _ids_per_working_proc3.resize(wProcSize);
640     MPI_Status status;
641     std::map<int,double> sums;
642     DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering();
643     const int *globalIdsC=globalIds->getConstPointer();
644     int nbElts=globalIds->getNumberOfTuples();
645     std::set<int> globalIdsS(globalIdsC,globalIdsC+nbElts);
646     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
647       {
648         const std::vector<int>& ids0=_ids_per_working_proc[procId];
649         int lgth0=ids0.size();
650         std::set<int> elts0;
651         for(int i=0;i<lgth0;i++)
652           elts0.insert(globalIdsC[ids0[i]]);
653         int lgth;
654         comm.recv(&lgth,1,MPI_INT,*iter,1128,*_comm,&status);
655         vector<int> ids(lgth);
656         comm.recv(&ids[0],lgth,MPI_INT,*iter,1129,*_comm,&status);
657         set<int> ids1(ids.begin(),ids.end());
658         ids.clear();
659         set<int> tmp5,tmp6;
660         set_intersection(globalIdsS.begin(),globalIdsS.end(),ids1.begin(),ids1.end(),inserter(tmp5,tmp5.begin()));
661         set_difference(tmp5.begin(),tmp5.end(),elts0.begin(),elts0.end(),inserter(tmp6,tmp6.begin()));
662         std::vector<int>& ids2=_ids_per_working_proc3[procId];
663         ids2.resize(tmp6.size());
664         std::copy(tmp6.begin(),tmp6.end(),ids2.begin());
665         //global->local
666         for(std::vector<int>::iterator iter2=ids2.begin();iter2!=ids2.end();iter2++)
667           *iter2=std::find(globalIdsC,globalIdsC+nbElts,*iter2)-globalIdsC;
668       }
669     if(globalIds)
670       globalIds->decrRef();
671   }
672
673   /*!
674    * connected with ElementLocator::recvAddElementsFromLazyProcsW
675    */
676   void ElementLocator::sendAddElementsToWorkingSideL()
677   {
678     int procId=0;
679     CommInterface comm;
680     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
681       {
682         const std::vector<int>& vals=_ids_per_working_proc3[procId];
683         int size=vals.size();
684         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&size)),1,MPI_INT,*iter,1130,*_comm);
685         comm.send(const_cast<void *>(reinterpret_cast<const void *>(&vals[0])),size,MPI_INT,*iter,1131,*_comm);
686       }
687   }
688
689   /*!
690    * This method sends to working side Wi only nodes in interaction with Wi \b and located on boundary, to reduce number.
691    * connected with ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW
692    */
693   void ElementLocator::sendCandidatesGlobalIdsToWorkingSideL()
694   { 
695     int procId=0;
696     CommInterface comm;
697     DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering();
698     const int *globalIdsC=globalIds->getConstPointer();
699     MCAuto<DataArrayInt> candidates=_local_para_field.getSupport()->getCellMesh()->findBoundaryNodes();
700     for(int *iter1=candidates->getPointer();iter1!=candidates->getPointer()+candidates->getNumberOfTuples();iter1++)
701       (*iter1)=globalIdsC[*iter1];
702     std::set<int> candidatesS(candidates->begin(),candidates->end());
703     for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
704       {
705         const vector<int>& ids=_ids_per_working_proc[procId];
706         vector<int> valsToSend(ids.size());
707         vector<int>::iterator iter1=valsToSend.begin();
708         for(vector<int>::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++)
709           *iter1=globalIdsC[*iter2];
710         std::set<int> tmp2(valsToSend.begin(),valsToSend.end());
711         std::vector<int> tmp3;
712         set_intersection(candidatesS.begin(),candidatesS.end(),tmp2.begin(),tmp2.end(),std::back_insert_iterator< std::vector<int> >(tmp3));
713         int lgth=tmp3.size();
714         comm.send(&lgth,1,MPI_INT,*iter,1132,*_comm);
715         comm.send(&tmp3[0],lgth,MPI_INT,*iter,1133,*_comm);
716       }
717     if(globalIds)
718       globalIds->decrRef();
719   }
720 }