1 // Copyright (C) 2007-2015 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 #include "CommInterface.hxx"
22 #include "Topology.hxx"
23 #include "BlockTopology.hxx"
24 #include "ComponentTopology.hxx"
25 #include "ParaFIELD.hxx"
26 #include "MPIProcessorGroup.hxx"
27 #include "StructuredCoincidentDEC.hxx"
28 #include "InterpKernelUtilities.hxx"
38 \anchor StructuredCoincidentDEC-det
39 \class StructuredCoincidentDEC
41 This class is meant for remapping fields that have identical
42 supports with different parallel topologies. It can be used to couple
43 together multiphysics codes that operate on the same domain
44 with different partitionings, which can be useful if one of
45 the computation is much faster than the other. It can also be used
46 to couple together codes that share an interface that was generated
47 in the same manner (with identical global ids).
48 Also, this \ref para-dec can be used for fields that have component topologies,
49 i.e., components that are scattered over several processors.
51 The remapping between the two supports is based on identity of global
52 ids, instead of geometrical considerations as it is the case for
53 \ref NonCoincidentDEC-det "NonCoincidentDEC" and \ref InterpKernelDEC-det "InterpKernelDEC".
54 Therefore, this \ref para-dec "DEC" must not be used
55 for coincident meshes that do not have the same numbering.
57 As all the other DECs, its use is made of two phases :
58 - a setup phase during which the topologies are exchanged so that
59 the target side knows from which processors it should expect
61 - a send/recv phase during which the field data is actually transferred.
63 This example illustrates the sending of a field with
64 the \c StructuredCoincidentDEC :
67 StructuredCoincidentDEC dec(groupA, groupB);
68 dec.attachLocalField(field);
70 if (groupA.containsMyRank())
72 else if (groupB.containsMyRank())
77 Creating a ParaFIELD to be attached to the DEC is exactly the same as for
78 other DECs in the case when the remapping concerns similar meshes
79 that only have different partitionings. In the case when the
80 fields have also different component topologies, creating the ParaFIELD
81 requires some more effort. See the \ref para-over section for more details.
85 StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(0),_topo_target(0),
86 _send_counts(0),_recv_counts(0),
87 _send_displs(0),_recv_displs(0),
88 _recv_buffer(0),_send_buffer(0)
93 StructuredCoincidentDEC::~StructuredCoincidentDEC()
95 delete [] _send_buffer;
96 delete [] _recv_buffer;
97 delete []_send_displs;
98 delete [] _recv_displs;
99 delete [] _send_counts;
100 delete [] _recv_counts;
101 if (! _source_group->containsMyRank())
103 if(!_target_group->containsMyRank())
107 StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):DisjointDEC(local_group,distant_group),
108 _topo_source(0),_topo_target(0),
109 _send_counts(0),_recv_counts(0),
110 _send_displs(0),_recv_displs(0),
111 _recv_buffer(0),_send_buffer(0)
115 /*! Synchronization process for exchanging topologies
117 void StructuredCoincidentDEC::synchronizeTopology()
119 if (_source_group->containsMyRank())
120 _topo_source = dynamic_cast<BlockTopology*>(_local_field->getTopology());
121 if (_target_group->containsMyRank())
122 _topo_target = dynamic_cast<BlockTopology*>(_local_field->getTopology());
124 // Transmitting source topology to target code
125 broadcastTopology(_topo_source,1000);
126 // Transmitting target topology to source code
127 broadcastTopology(_topo_target,2000);
128 if (_topo_source->getNbElements() != _topo_target->getNbElements())
129 throw INTERP_KERNEL::Exception("Incompatible dimensions for target and source topologies");
133 /*! Creates the arrays necessary for the data transfer
134 * and fills the send array with the values of the
137 void StructuredCoincidentDEC::prepareSourceDE()
139 ////////////////////////////////////
140 //Step 1 : _buffer array creation
142 if (!_topo_source->getProcGroup()->containsMyRank())
144 MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface());
146 int myranksource = _topo_source->getProcGroup()->myRank();
148 vector <int>* target_arrays=new vector<int>[_topo_target->getProcGroup()->size()];
150 //cout<<" topotarget size"<< _topo_target->getProcGroup()->size()<<endl;
152 int nb_local = _topo_source-> getNbLocalElements();
153 for (int ielem=0; ielem< nb_local ; ielem++)
155 // cout <<"source local :"<<myranksource<<","<<ielem<<endl;
156 int global = _topo_source->localToGlobal(make_pair(myranksource, ielem));
157 // cout << "global "<<global<<endl;
158 pair<int,int> target_local =_topo_target->globalToLocal(global);
159 // cout << "target local : "<<target_local.first<<","<<target_local.second<<endl;
160 target_arrays[target_local.first].push_back(target_local.second);
163 int union_size=group->size();
165 _send_counts=new int[union_size];
166 _send_displs=new int[union_size];
167 _recv_counts=new int[union_size];
168 _recv_displs=new int[union_size];
170 for (int i=0; i< union_size; i++)
178 for (int iproc=0; iproc < _topo_target->getProcGroup()->size(); iproc++)
180 //converts the rank in target to the rank in union communicator
181 int unionrank=group->translateRank(_topo_target->getProcGroup(),iproc);
182 _send_counts[unionrank]=target_arrays[iproc].size();
185 for (int iproc=1; iproc<group->size();iproc++)
186 _send_displs[iproc]=_send_displs[iproc-1]+_send_counts[iproc-1];
188 _send_buffer = new double [nb_local ];
190 /////////////////////////////////////////////////////////////
191 //Step 2 : filling the _buffers with the source field values
193 int* counter=new int [_topo_target->getProcGroup()->size()];
195 for (int i=1; i<_topo_target->getProcGroup()->size(); i++)
196 counter[i]=counter[i-1]+target_arrays[i-1].size();
199 const double* value = _local_field->getField()->getArray()->getPointer();
200 //cout << "Nb local " << nb_local<<endl;
201 for (int ielem=0; ielem<nb_local ; ielem++)
203 int global = _topo_source->localToGlobal(make_pair(myranksource, ielem));
204 pair<int,int> target_local =_topo_target->globalToLocal(global);
205 //cout <<"global : "<< global<<" local :"<<target_local.first<<" "<<target_local.second;
206 //cout <<"counter[]"<<counter[target_local.first]<<endl;
207 _send_buffer[counter[target_local.first]++]=value[ielem];
210 delete[] target_arrays;
216 * Creates the buffers for receiving the fields on the target side
218 void StructuredCoincidentDEC::prepareTargetDE()
220 if (!_topo_target->getProcGroup()->containsMyRank())
222 MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface());
224 int myranktarget = _topo_target->getProcGroup()->myRank();
226 vector < vector <int> > source_arrays(_topo_source->getProcGroup()->size());
227 int nb_local = _topo_target-> getNbLocalElements();
228 for (int ielem=0; ielem< nb_local ; ielem++)
230 // cout <<"TS target local :"<<myranktarget<<","<<ielem<<endl;
231 int global = _topo_target->localToGlobal(make_pair(myranktarget, ielem));
232 //cout << "TS global "<<global<<endl;
233 pair<int,int> source_local =_topo_source->globalToLocal(global);
234 // cout << "TS source local : "<<source_local.first<<","<<source_local.second<<endl;
235 source_arrays[source_local.first].push_back(source_local.second);
237 int union_size=group->size();
238 _recv_counts=new int[union_size];
239 _recv_displs=new int[union_size];
240 _send_counts=new int[union_size];
241 _send_displs=new int[union_size];
243 for (int i=0; i< union_size; i++)
249 for (int iproc=0; iproc < _topo_source->getProcGroup()->size(); iproc++)
251 //converts the rank in target to the rank in union communicator
252 int unionrank=group->translateRank(_topo_source->getProcGroup(),iproc);
253 _recv_counts[unionrank]=source_arrays[iproc].size();
255 for (int i=1; i<union_size; i++)
256 _recv_displs[i]=_recv_displs[i-1]+_recv_counts[i-1];
257 _recv_buffer=new double[nb_local];
264 * Synchronizing a topology so that all the
265 * group possesses it.
267 * \param topo Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
268 * \param tag Communication tag associated with this operation.
270 void StructuredCoincidentDEC::broadcastTopology(BlockTopology*& topo, int tag)
277 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
279 // The master proc creates a send buffer containing
280 // a serialized topology
283 if (topo!=0 && topo->getProcGroup()->myRank()==0)
285 MESSAGE ("Master rank");
286 topo->serialize(serializer, size);
287 rank_master = group->translateRank(topo->getProcGroup(),0);
288 MESSAGE("Master rank world number is "<<rank_master);
289 MESSAGE("World Size is "<<group->size());
290 for (int i=0; i< group->size(); i++)
293 _comm_interface->send(&rank_master,1,MPI_INT, i,tag+i,*(group->getComm()));
298 MESSAGE(" rank "<<group->myRank()<< " waiting ...");
299 _comm_interface->recv(&rank_master, 1,MPI_INT, MPI_ANY_SOURCE, tag+group->myRank(), *(group->getComm()),&status);
300 MESSAGE(" rank "<<group->myRank()<< "received master rank"<<rank_master);
302 // The topology is broadcasted to all processsors in the group
303 _comm_interface->broadcast(&size, 1,MPI_INT,rank_master,*(group->getComm()));
305 int* buffer=new int[size];
306 if (topo!=0 && topo->getProcGroup()->myRank()==0)
307 copy(serializer, serializer+size, buffer);
308 _comm_interface->broadcast(buffer,size,MPI_INT,rank_master,*(group->getComm()));
310 // Processors which did not possess the source topology
313 BlockTopology* topotemp=new BlockTopology();
314 topotemp->unserialize(buffer, *_comm_interface);
325 MESSAGE (" rank "<<group->myRank()<< " unserialize is over");
331 void StructuredCoincidentDEC::recvData()
333 //MPI_COMM_WORLD is used instead of group because there is no
334 //mechanism for creating the union group yet
336 for (int i=0; i< 4; i++)
337 cout << _recv_counts[i]<<" ";
339 for (int i=0; i< 4; i++)
340 cout << _recv_displs[i]<<" ";
343 cout<<"start AllToAll"<<endl;
344 MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
345 _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE,
346 _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm);
347 cout<<"end AllToAll"<<endl;
349 int nb_local = _topo_target->getNbLocalElements();
350 //double* value=new double[nb_local];
351 double* value=const_cast<double*>(_local_field->getField()->getArray()->getPointer());
353 int myranktarget=_topo_target->getProcGroup()->myRank();
354 vector<int> counters(_topo_source->getProcGroup()->size());
356 for (int i=0; i<_topo_source->getProcGroup()->size()-1; i++)
358 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
359 int worldrank=group->translateRank(_topo_source->getProcGroup(),i);
360 counters[i+1]=counters[i]+_recv_counts[worldrank];
364 for (int ielem=0; ielem<nb_local ; ielem++)
366 int global = _topo_target->localToGlobal(make_pair(myranktarget, ielem));
367 pair<int,int> source_local =_topo_source->globalToLocal(global);
368 value[ielem]=_recv_buffer[counters[source_local.first]++];
372 //_local_field->getField()->setValue(value);
375 void StructuredCoincidentDEC::sendData()
377 MESSAGE ("sendData");
378 for (int i=0; i< 4; i++)
379 cout << _send_counts[i]<<" ";
381 for (int i=0; i< 4; i++)
382 cout << _send_displs[i]<<" ";
384 cout <<"start AllToAll"<<endl;
385 MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
386 _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE,
387 _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm);
388 cout<<"end AllToAll"<<endl;
391 /*! Prepares a DEC for data exchange
393 This method broadcasts the topologies from source to target
394 so that the target side can analyse from which processors it
395 is expected to receive data.
398 void StructuredCoincidentDEC::synchronize()
400 if (_source_group->containsMyRank())
402 synchronizeTopology();
405 else if (_target_group->containsMyRank())
407 synchronizeTopology();