1 // Copyright (C) 2007-2024 CEA, EDF
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 #include "CommInterface.hxx"
22 #include "Topology.hxx"
23 #include "BlockTopology.hxx"
24 #include "ComponentTopology.hxx"
25 #include "ParaFIELD.hxx"
26 #include "MPIProcessorGroup.hxx"
27 #include "ExplicitCoincidentDEC.hxx"
28 #include "ExplicitMapping.hxx"
29 #include "InterpKernelUtilities.hxx"
37 \anchor ExplicitCoincidentDEC-det
38 \class ExplicitCoincidentDEC
41 This class aims at \ref interpolation "remapping fields" that have identical
42 supports (=the same underlying mesh) but different parallel topologies
43 (=different sub-domains in the mesh). It can be used to couple
44 together multi-physics codes that operate on the same domain
45 with different partitioning.
47 It is very similar to what the \ref StructuredCoincidentDEC-det "StructuredCoincidentDEC"
48 does, except that it works with an arbitrary user-defined topology.
50 The remapping between the two supports is based on identity of global
51 ids, instead of geometrical considerations (as it is the case for
52 \ref InterpKernelDEC-det "InterpKernelDEC").
53 Therefore, beware that this \ref para-dec "DEC" can not be used
54 for coincident meshes if they do *not* have the exact same numbering.
56 With this \ref para-dec "DEC" no projection, and no interpolation of the field data is done, contrary
57 to what happens in \ref InterpKernelDEC-det "InterpKernelDEC". It is just
58 a matter of allocating the values from one side to the other, using directly the cell
61 As all the other DECs, its usage requires two phases :
62 - a setup phase during which the topologies are exchanged so that
63 the target side knows from which processors it should expect
65 - a send/recv phase during which the field data is actually transferred.
67 This example illustrates the sending of a field with
68 the \c ExplicitCoincidentDEC :
71 ExplicitCoincidentDEC dec(groupA, groupB);
72 dec.attachLocalField(field);
74 if (groupA.containsMyRank())
76 else if (groupB.containsMyRank())
81 Creating a ParaFIELD to be attached to the %DEC is done in exactly the same way as for
82 the other DECs, if only the partitioning of the support mesh differs.
84 fields have also different *component* topologies, creating the ParaFIELD
85 requires some more effort. See the \ref para-over "parallelism" section for more details.
91 ExplicitCoincidentDEC::ExplicitCoincidentDEC():
92 _toposource(0),_topotarget(0),
93 _targetgroup(0), _sourcegroup(0),
94 _sendcounts(0), _recvcounts(0),
95 _senddispls(0), _recvdispls(0),
96 _recvbuffer(0), _sendbuffer(0),
97 _distant_elems(), _explicit_mapping()
101 ExplicitCoincidentDEC::~ExplicitCoincidentDEC()
105 /*! Synchronization process for exchanging topologies
107 void ExplicitCoincidentDEC::synchronize()
109 if (_source_group->containsMyRank())
111 _toposource = dynamic_cast<ExplicitTopology*>(_local_field->getTopology());
112 _sourcegroup= _toposource->getProcGroup()->createProcGroup();
113 _targetgroup=_toposource->getProcGroup()->createComplementProcGroup();
115 if (_target_group->containsMyRank())
117 _topotarget = dynamic_cast<ExplicitTopology*>(_local_field->getTopology());
118 _sourcegroup= _topotarget->getProcGroup()->createComplementProcGroup();
119 _targetgroup=_topotarget->getProcGroup()->createProcGroup();
124 // Transmitting source topology to target code
125 broadcastTopology(_toposource,_topotarget,1000);
126 transferMappingToSource();
129 /*! Creates the arrays necessary for the data transfer
130 * and fills the send array with the values of the
133 void ExplicitCoincidentDEC::prepareSourceDE()
135 ////////////////////////////////////
136 //Step 1 : buffer array creation
138 if (!_toposource->getProcGroup()->containsMyRank())
140 MPIProcessorGroup* group=new MPIProcessorGroup(_sourcegroup->getCommInterface());
142 // Warning : the size of the target side is implicitly deduced
143 //from the size of MPI_COMM_WORLD
144 int target_size = _toposource->getProcGroup()->getCommInterface().worldSize()- _toposource->getProcGroup()->size() ;
146 vector<int>* target_arrays=new vector<int>[target_size];
148 mcIdType nb_local = _toposource-> getNbLocalElements();
150 std::size_t union_size=group->size();
152 _sendcounts=new int[union_size];
153 _senddispls=new int[union_size];
154 _recvcounts=new int[union_size];
155 _recvdispls=new int[union_size];
157 for (std::size_t i=0; i< union_size; i++)
165 int* counts=_explicit_mapping.getCounts();
166 for (int i=0; i<group->size(); i++)
167 _sendcounts[i]=counts[i];
169 for (int iproc=1; iproc<group->size();iproc++)
170 _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1];
172 _sendbuffer = new double [nb_local * _toposource->getNbComponents()];
174 /////////////////////////////////////////////////////////////
175 //Step 2 : filling the buffers with the source field values
177 int* counter=new int [target_size];
179 for (int i=1; i<target_size; i++)
180 counter[i]=counter[i-1]+(int)target_arrays[i-1].size();
183 const double* value = _local_field->getField()->getArray()->getPointer();
185 int* bufferindex= _explicit_mapping.getBufferIndex();
187 for (int ielem=0; ielem<nb_local; ielem++)
189 int ncomp = _toposource->getNbComponents();
190 for (int icomp=0; icomp<ncomp; icomp++)
192 _sendbuffer[ielem*ncomp+icomp]=value[bufferindex[ielem]*ncomp+icomp];
195 delete[] target_arrays;
200 * Creates the buffers for receiving the fields on the target side
202 void ExplicitCoincidentDEC::prepareTargetDE()
204 if (!_topotarget->getProcGroup()->containsMyRank())
206 MPIProcessorGroup* group=new MPIProcessorGroup(_topotarget->getProcGroup()->getCommInterface());
208 vector < vector <mcIdType> > source_arrays(_sourcegroup->size());
209 mcIdType nb_local = _topotarget-> getNbLocalElements();
210 for (mcIdType ielem=0; ielem< nb_local ; ielem++)
212 //pair<int,mcIdType> source_local =_distant_elems[ielem];
213 pair <int,mcIdType> source_local=_explicit_mapping.getDistantNumbering(ielem);
214 source_arrays[source_local.first].push_back(source_local.second);
216 std::size_t union_size=group->size();
217 _recvcounts=new int[union_size];
218 _recvdispls=new int[union_size];
219 _sendcounts=new int[union_size];
220 _senddispls=new int[union_size];
222 for (std::size_t i=0; i< union_size; i++)
228 for (int iproc=0; iproc < _sourcegroup->size(); iproc++)
230 //converts the rank in target to the rank in union communicator
231 int unionrank=group->translateRank(_sourcegroup,iproc);
232 _recvcounts[unionrank]=(int)(source_arrays[iproc].size()*_topotarget->getNbComponents());
234 for (std::size_t i=1; i<union_size; i++)
235 _recvdispls[i]=_recvdispls[i-1]+_recvcounts[i-1];
236 _recvbuffer=new double[nb_local*_topotarget->getNbComponents()];
242 * Synchronizing a topology so that all the groups get it.
244 * \param toposend Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
245 * \param toporecv Topology which is received.
246 * \param tag Communication tag associated with this operation.
248 void ExplicitCoincidentDEC::broadcastTopology(const ExplicitTopology* toposend, ExplicitTopology* toporecv, int tag)
252 mcIdType* serializer=0;
255 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
257 // The send processors serialize the send topology
258 // and send the buffers to the recv procs
259 if (toposend !=0 && toposend->getProcGroup()->containsMyRank())
261 toposend->serialize(serializer, size);
262 for (int iproc=0; iproc< group->size(); iproc++)
265 if (!toposend->getProcGroup()->contains(itarget))
267 _comm_interface->send(&size,1,MPI_ID_TYPE, itarget,tag+itarget,*(group->getComm()));
268 _comm_interface->send(serializer, (int)size, MPI_ID_TYPE, itarget, tag+itarget,*(group->getComm()));
274 vector <int> size2(group->size());
275 int myworldrank=group->myRank();
276 for (int iproc=0; iproc<group->size();iproc++)
279 if (!toporecv->getProcGroup()->contains(isource))
282 _comm_interface->recv(&nbelem, 1, MPI_ID_TYPE, isource, tag+myworldrank, *(group->getComm()), &status);
283 mcIdType* buffer = new mcIdType[nbelem];
284 _comm_interface->recv(buffer, (int)nbelem, MPI_ID_TYPE, isource,tag+myworldrank, *(group->getComm()), &status);
286 ExplicitTopology* topotemp=new ExplicitTopology();
287 topotemp->unserialize(buffer, *_comm_interface);
290 for (mcIdType ielem=0; ielem<toporecv->getNbLocalElements(); ielem++)
292 mcIdType global = toporecv->localToGlobal(ielem);
293 mcIdType sendlocal=topotemp->globalToLocal(global);
297 _explicit_mapping.pushBackElem(make_pair(iproc,sendlocal));
304 MESSAGE (" rank "<<group->myRank()<< " broadcastTopology is over");
307 void ExplicitCoincidentDEC::transferMappingToSource()
310 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
312 // sending source->target mapping which is stored by target
313 //in _distant_elems from target to source
314 if (_topotarget!=0 && _topotarget->getProcGroup()->containsMyRank())
316 int world_size = _topotarget->getProcGroup()->getCommInterface().worldSize() ;
317 int* nb_transfer_union=new int[world_size];
318 int* dummy_recv=new int[world_size];
319 for (int i=0; i<world_size; i++)
320 nb_transfer_union[i]=0;
321 //converts the rank in target to the rank in union communicator
323 for (int i=0; i< _explicit_mapping.nbDistantDomains(); i++)
325 int unionrank=group->translateRank(_sourcegroup,_explicit_mapping.getDistantDomain(i));
326 nb_transfer_union[unionrank]=_explicit_mapping.getNbDistantElems(i);
328 _comm_interface->allToAll(nb_transfer_union, 1, MPI_INT, dummy_recv, 1, MPI_INT, MPI_COMM_WORLD);
330 mcIdType* sendbuffer= _explicit_mapping.serialize(_topotarget->getProcGroup()->myRank());
332 int* sendcounts= new int [world_size];
333 int* senddispls = new int [world_size];
334 for (int i=0; i< world_size; i++)
336 sendcounts[i]=2*nb_transfer_union[i];
340 senddispls[i]=senddispls[i-1]+sendcounts[i-1];
342 int* recvcounts=new int[world_size];
343 int* recvdispls=new int[world_size];
345 for (int i=0; i <world_size; i++)
350 _comm_interface->allToAllV(sendbuffer, sendcounts, senddispls, MPI_ID_TYPE, dummyrecv, recvcounts, senddispls, MPI_ID_TYPE, MPI_COMM_WORLD);
353 //receiving in the source subdomains the mapping sent by targets
356 int world_size = _toposource->getProcGroup()->getCommInterface().worldSize() ;
357 int* nb_transfer_union=new int[world_size];
358 int* dummy_send=new int[world_size];
359 for (int i=0; i<world_size; i++)
361 _comm_interface->allToAll(dummy_send, 1, MPI_INT, nb_transfer_union, 1, MPI_INT, MPI_COMM_WORLD);
364 for (int i=0; i< world_size; i++)
365 total_size+=nb_transfer_union[i];
366 int nbtarget = _targetgroup->size();
367 int* targetranks = new int[ nbtarget];
368 for (int i=0; i<nbtarget; i++)
369 targetranks[i]=group->translateRank(_targetgroup,i);
370 mcIdType* mappingbuffer= new mcIdType [total_size*2];
371 int* sendcounts= new int [world_size];
372 int* senddispls = new int [world_size];
373 int* recvcounts=new int[world_size];
374 int* recvdispls=new int[world_size];
375 for (int i=0; i< world_size; i++)
377 recvcounts[i]=2*nb_transfer_union[i];
381 recvdispls[i]=recvdispls[i-1]+recvcounts[i-1];
385 for (int i=0; i <world_size; i++)
390 _comm_interface->allToAllV(dummysend, sendcounts, senddispls, MPI_ID_TYPE, mappingbuffer, recvcounts, recvdispls, MPI_ID_TYPE, MPI_COMM_WORLD);
391 _explicit_mapping.unserialize(world_size,nb_transfer_union,nbtarget, targetranks, mappingbuffer);
395 void ExplicitCoincidentDEC::recvData()
397 //MPI_COMM_WORLD is used instead of group because there is no
398 //mechanism for creating the union group yet
401 cout<<"start AllToAll"<<endl;
402 _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
403 _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
404 cout<<"end AllToAll"<<endl;
405 mcIdType nb_local = _topotarget->getNbLocalElements();
406 double* value=new double[nb_local*_topotarget->getNbComponents()];
408 vector<int> counters(_sourcegroup->size());
410 for (int i=0; i<_sourcegroup->size()-1; i++)
412 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
413 int worldrank=group->translateRank(_sourcegroup,i);
414 counters[i+1]=counters[i]+_recvcounts[worldrank];
417 for (int ielem=0; ielem<nb_local ; ielem++)
419 pair<int,int> distant_numbering=_explicit_mapping.getDistantNumbering(ielem);
420 int iproc=distant_numbering.first;
421 int ncomp = _topotarget->getNbComponents();
422 for (int icomp=0; icomp< ncomp; icomp++)
423 value[ielem*ncomp+icomp]=_recvbuffer[counters[iproc]*ncomp+icomp];
426 _local_field->getField()->getArray()->useArray(value,true,DeallocType::CPP_DEALLOC,nb_local,_topotarget->getNbComponents());
429 void ExplicitCoincidentDEC::sendData()
431 MESSAGE ("sendData");
432 for (int i=0; i< 4; i++)
433 cout << _sendcounts[i]<<" ";
435 for (int i=0; i< 4; i++)
436 cout << _senddispls[i]<<" ";
438 //MPI_COMM_WORLD is used instead of group because there is no
439 //mechanism for creating the union group yet
440 cout <<"start AllToAll"<<endl;
441 _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
442 _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);