1 // Copyright (C) 2007-2016 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 #include "CommInterface.hxx"
22 #include "Topology.hxx"
23 #include "BlockTopology.hxx"
24 #include "ComponentTopology.hxx"
25 #include "ParaFIELD.hxx"
26 #include "MPIProcessorGroup.hxx"
27 #include "StructuredCoincidentDEC.hxx"
28 #include "InterpKernelUtilities.hxx"
38 \anchor StructuredCoincidentDEC-det
39 \class StructuredCoincidentDEC
41 This class aims at \ref interpolation "remapping fields" that have identical
42 structured supports (=the same underlying mesh) but different parallel topologies
43 (=different sub-domains in the structured mesh). It can be used to couple
44 together multi-physics codes that operate on the same domain
45 with different partitioning. This can be useful for example if one of
46 the computation is much faster than the other. It can also be used
47 to couple together codes that share an interface that was generated
48 in the same manner (with identical global ids).
49 Also, this \ref para-dec "DEC" can be used for fields that have component topologies,
50 i.e., components that are scattered over several processors.
52 The remapping between the two supports is based on identity of global
53 ids, instead of geometrical considerations (as it is the case for
54 \ref InterpKernelDEC-det "InterpKernelDEC").
55 Therefore, beware that this \ref para-dec "DEC" can not be used
56 for coincident meshes if they do *not* have the exact same numbering.
58 With this %DEC no projection, and no interpolation of the field data is done, contrary
59 to what happens in \ref InterpKernelDEC-det "InterpKernelDEC". It is just
60 a matter of allocating the values from one side to the other, using directly the cell
63 As all the other DECs, its usage requires two phases :
64 - a setup phase during which the topologies are exchanged so that
65 the target side knows from which processors it should expect
67 - a send/recv phase during which the field data is actually transferred.
69 This example illustrates the sending of a field with
70 the \c StructuredCoincidentDEC :
73 StructuredCoincidentDEC dec(groupA, groupB);
74 dec.attachLocalField(field);
76 if (groupA.containsMyRank())
78 else if (groupB.containsMyRank())
83 Creating a ParaFIELD to be attached to the %DEC is done in exactly the same way as for
84 the other DECs, if only the partitioning of the support mesh differs.
86 fields have also different *component* topologies, creating the ParaFIELD
87 requires some more effort. See the \ref para-over "parallelism" section for more details.
91 StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(0),_topo_target(0),
92 _send_counts(0),_recv_counts(0),
93 _send_displs(0),_recv_displs(0),
94 _recv_buffer(0),_send_buffer(0)
99 StructuredCoincidentDEC::~StructuredCoincidentDEC()
101 delete [] _send_buffer;
102 delete [] _recv_buffer;
103 delete []_send_displs;
104 delete [] _recv_displs;
105 delete [] _send_counts;
106 delete [] _recv_counts;
107 if (! _source_group->containsMyRank())
109 if(!_target_group->containsMyRank())
113 StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):
114 DisjointDEC(local_group,distant_group),
115 _topo_source(0),_topo_target(0),
116 _send_counts(0),_recv_counts(0),
117 _send_displs(0),_recv_displs(0),
118 _recv_buffer(0),_send_buffer(0)
122 /*! Synchronization process for exchanging topologies
124 void StructuredCoincidentDEC::synchronizeTopology()
126 if (_source_group->containsMyRank())
127 _topo_source = dynamic_cast<BlockTopology*>(_local_field->getTopology());
128 if (_target_group->containsMyRank())
129 _topo_target = dynamic_cast<BlockTopology*>(_local_field->getTopology());
131 // Transmitting source topology to target code
132 broadcastTopology(_topo_source,1000);
133 // Transmitting target topology to source code
134 broadcastTopology(_topo_target,2000);
135 if (_topo_source->getNbElements() != _topo_target->getNbElements())
136 throw INTERP_KERNEL::Exception("Incompatible dimensions for target and source topologies");
140 /*! Creates the arrays necessary for the data transfer
141 * and fills the send array with the values of the
144 void StructuredCoincidentDEC::prepareSourceDE()
146 ////////////////////////////////////
147 //Step 1 : _buffer array creation
149 if (!_topo_source->getProcGroup()->containsMyRank())
151 MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface());
153 int myranksource = _topo_source->getProcGroup()->myRank();
155 vector <int>* target_arrays=new vector<int>[_topo_target->getProcGroup()->size()];
157 //cout<<" topotarget size"<< _topo_target->getProcGroup()->size()<<endl;
159 int nb_local = _topo_source-> getNbLocalElements();
160 for (int ielem=0; ielem< nb_local ; ielem++)
162 // cout <<"source local :"<<myranksource<<","<<ielem<<endl;
163 int global = _topo_source->localToGlobal(make_pair(myranksource, ielem));
164 // cout << "global "<<global<<endl;
165 pair<int,int> target_local =_topo_target->globalToLocal(global);
166 // cout << "target local : "<<target_local.first<<","<<target_local.second<<endl;
167 target_arrays[target_local.first].push_back(target_local.second);
170 int union_size=group->size();
172 _send_counts=new int[union_size];
173 _send_displs=new int[union_size];
174 _recv_counts=new int[union_size];
175 _recv_displs=new int[union_size];
177 for (int i=0; i< union_size; i++)
185 for (int iproc=0; iproc < _topo_target->getProcGroup()->size(); iproc++)
187 //converts the rank in target to the rank in union communicator
188 int unionrank=group->translateRank(_topo_target->getProcGroup(),iproc);
189 _send_counts[unionrank]=target_arrays[iproc].size();
192 for (int iproc=1; iproc<group->size();iproc++)
193 _send_displs[iproc]=_send_displs[iproc-1]+_send_counts[iproc-1];
195 _send_buffer = new double [nb_local ];
197 /////////////////////////////////////////////////////////////
198 //Step 2 : filling the _buffers with the source field values
200 int* counter=new int [_topo_target->getProcGroup()->size()];
202 for (int i=1; i<_topo_target->getProcGroup()->size(); i++)
203 counter[i]=counter[i-1]+target_arrays[i-1].size();
206 const double* value = _local_field->getField()->getArray()->getPointer();
207 //cout << "Nb local " << nb_local<<endl;
208 for (int ielem=0; ielem<nb_local ; ielem++)
210 int global = _topo_source->localToGlobal(make_pair(myranksource, ielem));
211 pair<int,int> target_local =_topo_target->globalToLocal(global);
212 //cout <<"global : "<< global<<" local :"<<target_local.first<<" "<<target_local.second;
213 //cout <<"counter[]"<<counter[target_local.first]<<endl;
214 _send_buffer[counter[target_local.first]++]=value[ielem];
217 delete[] target_arrays;
223 * Creates the buffers for receiving the fields on the target side
225 void StructuredCoincidentDEC::prepareTargetDE()
227 if (!_topo_target->getProcGroup()->containsMyRank())
229 MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface());
231 int myranktarget = _topo_target->getProcGroup()->myRank();
233 vector < vector <int> > source_arrays(_topo_source->getProcGroup()->size());
234 int nb_local = _topo_target-> getNbLocalElements();
235 for (int ielem=0; ielem< nb_local ; ielem++)
237 // cout <<"TS target local :"<<myranktarget<<","<<ielem<<endl;
238 int global = _topo_target->localToGlobal(make_pair(myranktarget, ielem));
239 //cout << "TS global "<<global<<endl;
240 pair<int,int> source_local =_topo_source->globalToLocal(global);
241 // cout << "TS source local : "<<source_local.first<<","<<source_local.second<<endl;
242 source_arrays[source_local.first].push_back(source_local.second);
244 int union_size=group->size();
245 _recv_counts=new int[union_size];
246 _recv_displs=new int[union_size];
247 _send_counts=new int[union_size];
248 _send_displs=new int[union_size];
250 for (int i=0; i< union_size; i++)
256 for (int iproc=0; iproc < _topo_source->getProcGroup()->size(); iproc++)
258 //converts the rank in target to the rank in union communicator
259 int unionrank=group->translateRank(_topo_source->getProcGroup(),iproc);
260 _recv_counts[unionrank]=source_arrays[iproc].size();
262 for (int i=1; i<union_size; i++)
263 _recv_displs[i]=_recv_displs[i-1]+_recv_counts[i-1];
264 _recv_buffer=new double[nb_local];
271 * Synchronizing a topology so that all the
272 * group possesses it.
274 * \param topo Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
275 * \param tag Communication tag associated with this operation.
277 void StructuredCoincidentDEC::broadcastTopology(BlockTopology*& topo, int tag)
284 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
286 // The master proc creates a send buffer containing
287 // a serialized topology
290 if (topo!=0 && topo->getProcGroup()->myRank()==0)
292 MESSAGE ("Master rank");
293 topo->serialize(serializer, size);
294 rank_master = group->translateRank(topo->getProcGroup(),0);
295 MESSAGE("Master rank world number is "<<rank_master);
296 MESSAGE("World Size is "<<group->size());
297 for (int i=0; i< group->size(); i++)
300 _comm_interface->send(&rank_master,1,MPI_INT, i,tag+i,*(group->getComm()));
305 MESSAGE(" rank "<<group->myRank()<< " waiting ...");
306 _comm_interface->recv(&rank_master, 1,MPI_INT, MPI_ANY_SOURCE, tag+group->myRank(), *(group->getComm()),&status);
307 MESSAGE(" rank "<<group->myRank()<< "received master rank"<<rank_master);
309 // The topology is broadcasted to all processsors in the group
310 _comm_interface->broadcast(&size, 1,MPI_INT,rank_master,*(group->getComm()));
312 int* buffer=new int[size];
313 if (topo!=0 && topo->getProcGroup()->myRank()==0)
314 copy(serializer, serializer+size, buffer);
315 _comm_interface->broadcast(buffer,size,MPI_INT,rank_master,*(group->getComm()));
317 // Processors which did not possess the source topology
320 BlockTopology* topotemp=new BlockTopology();
321 topotemp->unserialize(buffer, *_comm_interface);
332 MESSAGE (" rank "<<group->myRank()<< " unserialize is over");
338 void StructuredCoincidentDEC::recvData()
340 //MPI_COMM_WORLD is used instead of group because there is no
341 //mechanism for creating the union group yet
343 for (int i=0; i< 4; i++)
344 cout << _recv_counts[i]<<" ";
346 for (int i=0; i< 4; i++)
347 cout << _recv_displs[i]<<" ";
350 cout<<"start AllToAll"<<endl;
351 MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
352 _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE,
353 _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm);
354 cout<<"end AllToAll"<<endl;
356 int nb_local = _topo_target->getNbLocalElements();
357 //double* value=new double[nb_local];
358 double* value=const_cast<double*>(_local_field->getField()->getArray()->getPointer());
360 int myranktarget=_topo_target->getProcGroup()->myRank();
361 vector<int> counters(_topo_source->getProcGroup()->size());
363 for (int i=0; i<_topo_source->getProcGroup()->size()-1; i++)
365 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
366 int worldrank=group->translateRank(_topo_source->getProcGroup(),i);
367 counters[i+1]=counters[i]+_recv_counts[worldrank];
371 for (int ielem=0; ielem<nb_local ; ielem++)
373 int global = _topo_target->localToGlobal(make_pair(myranktarget, ielem));
374 pair<int,int> source_local =_topo_source->globalToLocal(global);
375 value[ielem]=_recv_buffer[counters[source_local.first]++];
379 //_local_field->getField()->setValue(value);
382 void StructuredCoincidentDEC::sendData()
384 MESSAGE ("sendData");
385 for (int i=0; i< 4; i++)
386 cout << _send_counts[i]<<" ";
388 for (int i=0; i< 4; i++)
389 cout << _send_displs[i]<<" ";
391 cout <<"start AllToAll"<<endl;
392 MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
393 _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE,
394 _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm);
395 cout<<"end AllToAll"<<endl;
398 /*! Prepares a DEC for data exchange
400 This method broadcasts the topologies from source to target
401 so that the target side can analyse from which processors it
402 is expected to receive data.
405 void StructuredCoincidentDEC::synchronize()
407 if (_source_group->containsMyRank())
409 synchronizeTopology();
412 else if (_target_group->containsMyRank())
414 synchronizeTopology();