1 // Copyright (C) 2007-2014 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 #include "CommInterface.hxx"
22 #include "Topology.hxx"
23 #include "BlockTopology.hxx"
24 #include "ComponentTopology.hxx"
25 #include "ParaFIELD.hxx"
26 #include "MPIProcessorGroup.hxx"
27 #include "StructuredCoincidentDEC.hxx"
28 #include "InterpKernelUtilities.hxx"
37 /*! \defgroup structuredcoincidentdec StructuredCoincidentDEC
39 This class is meant for remapping fields that have identical
40 supports with different parallel topologies. It can be used to couple
41 together multiphysics codes that operate on the same domain
42 with different partitionings, which can be useful if one of
43 the computation is much faster than the other. It can also be used
44 to couple together codes that share an interface that was generated
45 in the same manner (with identical global ids).
46 Also, this \ref dec can be used for fields that have component topologies,
47 i.e., components that are scattered over several processors.
49 The remapping between the two supports is based on identity of global
50 ids, instead of geometrical considerations as it is the case for
51 \ref NonCoincidentDEC and \ref InterpKernelDEC. Therefore, this \ref dec must not be used
52 for coincident meshes that do not have the same numbering.
54 As all the other DECs, its use is made of two phases :
55 - a setup phase during which the topologies are exchanged so that
56 the target side knows from which processors it should expect
58 - a send/recv phase during which the field data is actually transferred.
60 This example illustrates the sending of a field with
61 the \c StructuredCoincidentDEC :
64 StructuredCoincidentDEC dec(groupA, groupB);
65 dec.attachLocalField(field);
67 if (groupA.containsMyRank())
69 else if (groupB.containsMyRank())
74 Creating a ParaFIELD to be attached to the DEC is exactly the same as for
75 other DECs in the case when the remapping concerns similar meshes
76 that only have different partitionings. In the case when the
77 fields have also different component topologies, creating the ParaFIELD
78 requires some more effort. See \ref parafield section for more details.
82 StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(0),_topo_target(0),
83 _send_counts(0),_recv_counts(0),
84 _send_displs(0),_recv_displs(0),
85 _recv_buffer(0),_send_buffer(0)
90 StructuredCoincidentDEC::~StructuredCoincidentDEC()
92 delete [] _send_buffer;
93 delete [] _recv_buffer;
94 delete []_send_displs;
95 delete [] _recv_displs;
96 delete [] _send_counts;
97 delete [] _recv_counts;
98 if (! _source_group->containsMyRank())
100 if(!_target_group->containsMyRank())
105 \addtogroup structuredcoincidentdec
108 StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):DisjointDEC(local_group,distant_group),
109 _topo_source(0),_topo_target(0),
110 _send_counts(0),_recv_counts(0),
111 _send_displs(0),_recv_displs(0),
112 _recv_buffer(0),_send_buffer(0)
116 /*! Synchronization process for exchanging topologies
118 void StructuredCoincidentDEC::synchronizeTopology()
120 if (_source_group->containsMyRank())
121 _topo_source = dynamic_cast<BlockTopology*>(_local_field->getTopology());
122 if (_target_group->containsMyRank())
123 _topo_target = dynamic_cast<BlockTopology*>(_local_field->getTopology());
125 // Transmitting source topology to target code
126 broadcastTopology(_topo_source,1000);
127 // Transmitting target topology to source code
128 broadcastTopology(_topo_target,2000);
129 if (_topo_source->getNbElements() != _topo_target->getNbElements())
130 throw INTERP_KERNEL::Exception("Incompatible dimensions for target and source topologies");
134 /*! Creates the arrays necessary for the data transfer
135 * and fills the send array with the values of the
138 void StructuredCoincidentDEC::prepareSourceDE()
140 ////////////////////////////////////
141 //Step 1 : _buffer array creation
143 if (!_topo_source->getProcGroup()->containsMyRank())
145 MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface());
147 int myranksource = _topo_source->getProcGroup()->myRank();
149 vector <int>* target_arrays=new vector<int>[_topo_target->getProcGroup()->size()];
151 //cout<<" topotarget size"<< _topo_target->getProcGroup()->size()<<endl;
153 int nb_local = _topo_source-> getNbLocalElements();
154 for (int ielem=0; ielem< nb_local ; ielem++)
156 // cout <<"source local :"<<myranksource<<","<<ielem<<endl;
157 int global = _topo_source->localToGlobal(make_pair(myranksource, ielem));
158 // cout << "global "<<global<<endl;
159 pair<int,int> target_local =_topo_target->globalToLocal(global);
160 // cout << "target local : "<<target_local.first<<","<<target_local.second<<endl;
161 target_arrays[target_local.first].push_back(target_local.second);
164 int union_size=group->size();
166 _send_counts=new int[union_size];
167 _send_displs=new int[union_size];
168 _recv_counts=new int[union_size];
169 _recv_displs=new int[union_size];
171 for (int i=0; i< union_size; i++)
179 for (int iproc=0; iproc < _topo_target->getProcGroup()->size(); iproc++)
181 //converts the rank in target to the rank in union communicator
182 int unionrank=group->translateRank(_topo_target->getProcGroup(),iproc);
183 _send_counts[unionrank]=target_arrays[iproc].size();
186 for (int iproc=1; iproc<group->size();iproc++)
187 _send_displs[iproc]=_send_displs[iproc-1]+_send_counts[iproc-1];
189 _send_buffer = new double [nb_local ];
191 /////////////////////////////////////////////////////////////
192 //Step 2 : filling the _buffers with the source field values
194 int* counter=new int [_topo_target->getProcGroup()->size()];
196 for (int i=1; i<_topo_target->getProcGroup()->size(); i++)
197 counter[i]=counter[i-1]+target_arrays[i-1].size();
200 const double* value = _local_field->getField()->getArray()->getPointer();
201 //cout << "Nb local " << nb_local<<endl;
202 for (int ielem=0; ielem<nb_local ; ielem++)
204 int global = _topo_source->localToGlobal(make_pair(myranksource, ielem));
205 pair<int,int> target_local =_topo_target->globalToLocal(global);
206 //cout <<"global : "<< global<<" local :"<<target_local.first<<" "<<target_local.second;
207 //cout <<"counter[]"<<counter[target_local.first]<<endl;
208 _send_buffer[counter[target_local.first]++]=value[ielem];
211 delete[] target_arrays;
217 * Creates the buffers for receiving the fields on the target side
219 void StructuredCoincidentDEC::prepareTargetDE()
221 if (!_topo_target->getProcGroup()->containsMyRank())
223 MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface());
225 int myranktarget = _topo_target->getProcGroup()->myRank();
227 vector < vector <int> > source_arrays(_topo_source->getProcGroup()->size());
228 int nb_local = _topo_target-> getNbLocalElements();
229 for (int ielem=0; ielem< nb_local ; ielem++)
231 // cout <<"TS target local :"<<myranktarget<<","<<ielem<<endl;
232 int global = _topo_target->localToGlobal(make_pair(myranktarget, ielem));
233 //cout << "TS global "<<global<<endl;
234 pair<int,int> source_local =_topo_source->globalToLocal(global);
235 // cout << "TS source local : "<<source_local.first<<","<<source_local.second<<endl;
236 source_arrays[source_local.first].push_back(source_local.second);
238 int union_size=group->size();
239 _recv_counts=new int[union_size];
240 _recv_displs=new int[union_size];
241 _send_counts=new int[union_size];
242 _send_displs=new int[union_size];
244 for (int i=0; i< union_size; i++)
250 for (int iproc=0; iproc < _topo_source->getProcGroup()->size(); iproc++)
252 //converts the rank in target to the rank in union communicator
253 int unionrank=group->translateRank(_topo_source->getProcGroup(),iproc);
254 _recv_counts[unionrank]=source_arrays[iproc].size();
256 for (int i=1; i<union_size; i++)
257 _recv_displs[i]=_recv_displs[i-1]+_recv_counts[i-1];
258 _recv_buffer=new double[nb_local];
265 * Synchronizing a topology so that all the
266 * group possesses it.
268 * \param topo Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
269 * \param tag Communication tag associated with this operation.
271 void StructuredCoincidentDEC::broadcastTopology(BlockTopology*& topo, int tag)
278 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
280 // The master proc creates a send buffer containing
281 // a serialized topology
284 if (topo!=0 && topo->getProcGroup()->myRank()==0)
286 MESSAGE ("Master rank");
287 topo->serialize(serializer, size);
288 rank_master = group->translateRank(topo->getProcGroup(),0);
289 MESSAGE("Master rank world number is "<<rank_master);
290 MESSAGE("World Size is "<<group->size());
291 for (int i=0; i< group->size(); i++)
294 _comm_interface->send(&rank_master,1,MPI_INT, i,tag+i,*(group->getComm()));
299 MESSAGE(" rank "<<group->myRank()<< " waiting ...");
300 _comm_interface->recv(&rank_master, 1,MPI_INT, MPI_ANY_SOURCE, tag+group->myRank(), *(group->getComm()),&status);
301 MESSAGE(" rank "<<group->myRank()<< "received master rank"<<rank_master);
303 // The topology is broadcasted to all processsors in the group
304 _comm_interface->broadcast(&size, 1,MPI_INT,rank_master,*(group->getComm()));
306 int* buffer=new int[size];
307 if (topo!=0 && topo->getProcGroup()->myRank()==0)
308 copy(serializer, serializer+size, buffer);
309 _comm_interface->broadcast(buffer,size,MPI_INT,rank_master,*(group->getComm()));
311 // Processors which did not possess the source topology
314 BlockTopology* topotemp=new BlockTopology();
315 topotemp->unserialize(buffer, *_comm_interface);
326 MESSAGE (" rank "<<group->myRank()<< " unserialize is over");
332 void StructuredCoincidentDEC::recvData()
334 //MPI_COMM_WORLD is used instead of group because there is no
335 //mechanism for creating the union group yet
337 for (int i=0; i< 4; i++)
338 cout << _recv_counts[i]<<" ";
340 for (int i=0; i< 4; i++)
341 cout << _recv_displs[i]<<" ";
344 cout<<"start AllToAll"<<endl;
345 MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
346 _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE,
347 _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm);
348 cout<<"end AllToAll"<<endl;
350 int nb_local = _topo_target->getNbLocalElements();
351 //double* value=new double[nb_local];
352 double* value=const_cast<double*>(_local_field->getField()->getArray()->getPointer());
354 int myranktarget=_topo_target->getProcGroup()->myRank();
355 vector<int> counters(_topo_source->getProcGroup()->size());
357 for (int i=0; i<_topo_source->getProcGroup()->size()-1; i++)
359 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
360 int worldrank=group->translateRank(_topo_source->getProcGroup(),i);
361 counters[i+1]=counters[i]+_recv_counts[worldrank];
365 for (int ielem=0; ielem<nb_local ; ielem++)
367 int global = _topo_target->localToGlobal(make_pair(myranktarget, ielem));
368 pair<int,int> source_local =_topo_source->globalToLocal(global);
369 value[ielem]=_recv_buffer[counters[source_local.first]++];
373 //_local_field->getField()->setValue(value);
376 void StructuredCoincidentDEC::sendData()
378 MESSAGE ("sendData");
379 for (int i=0; i< 4; i++)
380 cout << _send_counts[i]<<" ";
382 for (int i=0; i< 4; i++)
383 cout << _send_displs[i]<<" ";
385 cout <<"start AllToAll"<<endl;
386 MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
387 _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE,
388 _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm);
389 cout<<"end AllToAll"<<endl;
392 /*! Prepares a DEC for data exchange
394 This method broadcasts the topologies from source to target
395 so that the target side can analyse from which processors it
396 is expected to receive data.
399 void StructuredCoincidentDEC::synchronize()
401 if (_source_group->containsMyRank())
403 synchronizeTopology();
406 else if (_target_group->containsMyRank())
408 synchronizeTopology();