1 // Copyright (C) 2007-2024 CEA, EDF
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 #include "CommInterface.hxx"
22 #include "Topology.hxx"
23 #include "BlockTopology.hxx"
24 #include "ComponentTopology.hxx"
25 #include "ParaFIELD.hxx"
26 #include "MPIProcessorGroup.hxx"
27 #include "StructuredCoincidentDEC.hxx"
28 #include "InterpKernelUtilities.hxx"
37 StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(nullptr),_topo_target(nullptr),
38 _owns_topo_source(false), _owns_topo_target(false),
39 _send_counts(nullptr),_recv_counts(nullptr),
40 _send_displs(nullptr),_recv_displs(nullptr),
41 _recv_buffer(nullptr),_send_buffer(nullptr)
45 StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):
46 DisjointDEC(local_group,distant_group),
47 _topo_source(nullptr),_topo_target(nullptr),
48 _owns_topo_source(false), _owns_topo_target(false),
49 _send_counts(nullptr),_recv_counts(nullptr),
50 _send_displs(nullptr),_recv_displs(nullptr),
51 _recv_buffer(nullptr),_send_buffer(nullptr)
55 StructuredCoincidentDEC::~StructuredCoincidentDEC()
60 /** Destructor involves MPI operations: make sure this is accessible from a proper
61 * method for Python wrapping.
63 void StructuredCoincidentDEC::release()
65 delete [] _send_buffer;
66 delete [] _recv_buffer;
67 delete [] _send_displs;
68 delete [] _recv_displs;
69 delete [] _send_counts;
70 delete [] _recv_counts;
71 _send_buffer = nullptr;
72 _recv_buffer = nullptr;
73 _send_displs = nullptr;
74 _recv_displs = nullptr;
75 _send_counts = nullptr;
76 _recv_counts = nullptr;
78 if (_owns_topo_source)
80 if (_owns_topo_target)
82 _topo_source = nullptr;
83 _topo_target = nullptr;
84 _owns_topo_source = false;
85 _owns_topo_target = false;
87 DisjointDEC::cleanInstance();
90 /*! Synchronization process for exchanging topologies
92 void StructuredCoincidentDEC::synchronizeTopology()
94 if (_source_group->containsMyRank())
95 _topo_source = dynamic_cast<BlockTopology*>(_local_field->getTopology());
97 _owns_topo_source = true; // _topo_source will be filled by broadcastTopology below
98 if (_target_group->containsMyRank())
99 _topo_target = dynamic_cast<BlockTopology*>(_local_field->getTopology());
101 _owns_topo_target = true; // _topo_target will be filled by broadcastTopology below
103 // Transmitting source topology to target code
104 MESSAGE ("Broadcast source topo ...");
105 broadcastTopology(_topo_source,1000);
107 // Transmitting target topology to source code
108 MESSAGE ("Broadcast target topo ...");
109 broadcastTopology(_topo_target,2000);
110 if (_topo_source->getNbElements() != _topo_target->getNbElements())
111 throw INTERP_KERNEL::Exception("Incompatible dimensions for target and source topologies");
114 /*! Creates the arrays necessary for the data transfer
115 * and fills the send array with the values of the
118 void StructuredCoincidentDEC::prepareSourceDE()
120 ////////////////////////////////////
121 //Step 1 : _buffer array creation
123 if (!_topo_source->getProcGroup()->containsMyRank())
125 MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface());
127 int myranksource = _topo_source->getProcGroup()->myRank();
129 vector <mcIdType>* target_arrays=new vector<mcIdType>[_topo_target->getProcGroup()->size()];
131 //cout<<" topotarget size"<< _topo_target->getProcGroup()->size()<<endl;
133 mcIdType nb_local = _topo_source-> getNbLocalElements();
134 for (mcIdType ielem=0; ielem< nb_local ; ielem++)
136 // cout <<"source local :"<<myranksource<<","<<ielem<<endl;
137 mcIdType global = _topo_source->localToGlobal(make_pair(myranksource, ielem));
138 // cout << "global "<<global<<endl;
139 pair<int,mcIdType> target_local =_topo_target->globalToLocal(global);
140 // cout << "target local : "<<target_local.first<<","<<target_local.second<<endl;
141 target_arrays[target_local.first].push_back(target_local.second);
144 std::size_t union_size=group->size();
146 _send_counts=new int[union_size];
147 _send_displs=new int[union_size];
148 _recv_counts=new int[union_size];
149 _recv_displs=new int[union_size];
151 for (std::size_t i=0; i< union_size; i++)
159 for (int iproc=0; iproc < _topo_target->getProcGroup()->size(); iproc++)
161 //converts the rank in target to the rank in union communicator
162 int unionrank=group->translateRank(_topo_target->getProcGroup(),iproc);
163 _send_counts[unionrank]=(int)target_arrays[iproc].size();
166 for (int iproc=1; iproc<group->size();iproc++)
167 _send_displs[iproc]=_send_displs[iproc-1]+_send_counts[iproc-1];
169 _send_buffer = new double [nb_local ];
171 /////////////////////////////////////////////////////////////
172 //Step 2 : filling the _buffers with the source field values
174 int* counter=new int [_topo_target->getProcGroup()->size()];
176 for (int i=1; i<_topo_target->getProcGroup()->size(); i++)
177 counter[i]=counter[i-1]+(int)target_arrays[i-1].size();
180 const double* value = _local_field->getField()->getArray()->getPointer();
181 //cout << "Nb local " << nb_local<<endl;
182 for (int ielem=0; ielem<nb_local ; ielem++)
184 mcIdType global = _topo_source->localToGlobal(make_pair(myranksource, ielem));
185 pair<int,mcIdType> target_local =_topo_target->globalToLocal(global);
186 //cout <<"global : "<< global<<" local :"<<target_local.first<<" "<<target_local.second;
187 //cout <<"counter[]"<<counter[target_local.first]<<endl;
188 _send_buffer[counter[target_local.first]++]=value[ielem];
191 delete[] target_arrays;
197 * Creates the buffers for receiving the fields on the target side
199 void StructuredCoincidentDEC::prepareTargetDE()
201 if (!_topo_target->getProcGroup()->containsMyRank())
203 MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface());
205 int myranktarget = _topo_target->getProcGroup()->myRank();
207 vector < vector <mcIdType> > source_arrays(_topo_source->getProcGroup()->size());
208 mcIdType nb_local = _topo_target-> getNbLocalElements();
209 for (mcIdType ielem=0; ielem< nb_local ; ielem++)
211 // cout <<"TS target local :"<<myranktarget<<","<<ielem<<endl;
212 mcIdType global = _topo_target->localToGlobal(make_pair(myranktarget, ielem));
213 //cout << "TS global "<<global<<endl;
214 pair<int,mcIdType> source_local =_topo_source->globalToLocal(global);
215 // cout << "TS source local : "<<source_local.first<<","<<source_local.second<<endl;
216 source_arrays[source_local.first].push_back(source_local.second);
218 std::size_t union_size=group->size();
219 _recv_counts=new int[union_size];
220 _recv_displs=new int[union_size];
221 _send_counts=new int[union_size];
222 _send_displs=new int[union_size];
224 for (std::size_t i=0; i< union_size; i++)
230 for (int iproc=0; iproc < _topo_source->getProcGroup()->size(); iproc++)
232 //converts the rank in target to the rank in union communicator
233 int unionrank=group->translateRank(_topo_source->getProcGroup(),iproc);
234 _recv_counts[unionrank]=(int)source_arrays[iproc].size();
236 for (std::size_t i=1; i<union_size; i++)
237 _recv_displs[i]=_recv_displs[i-1]+_recv_counts[i-1];
238 _recv_buffer=new double[nb_local];
245 * Synchronizing a topology so that all the
246 * group possesses it.
248 * \param topo Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
249 * \param tag Communication tag associated with this operation.
251 void StructuredCoincidentDEC::broadcastTopology(BlockTopology*& topo, int tag)
255 mcIdType* serializer=0;
258 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
260 // The master proc creates a send buffer containing a serialized topology
263 if (topo!=0 && topo->getProcGroup()->myRank()==0)
265 MESSAGE ("Master rank");
266 topo->serialize(serializer, size);
267 rank_master = group->translateRank(topo->getProcGroup(),0);
268 MESSAGE("Master rank world number is "<<rank_master);
269 MESSAGE("World Size is "<<group->size());
270 for (int i=0; i< group->size(); i++)
273 _comm_interface->send(&rank_master,1,MPI_INT, i,tag+i,*(group->getComm()));
278 MESSAGE(" rank "<<group->myRank()<< " waiting ...");
279 _comm_interface->recv(&rank_master, 1,MPI_INT, MPI_ANY_SOURCE, tag+group->myRank(), *(group->getComm()),&status);
280 MESSAGE(" rank "<<group->myRank()<< " received master rank "<<rank_master);
282 // The topology is broadcasted to all processors in the group
283 _comm_interface->broadcast(&size, 1,MPI_ID_TYPE,rank_master,*(group->getComm()));
285 mcIdType* buffer=new mcIdType[size];
286 if (topo!=0 && topo->getProcGroup()->myRank()==0)
287 copy(serializer, serializer+size, buffer);
288 _comm_interface->broadcast(buffer,(int)size,MPI_ID_TYPE,rank_master,*(group->getComm()));
290 // Processors which did not possess the source topology unserialize it
291 BlockTopology* topotemp=new BlockTopology();
292 topotemp->unserialize(buffer, *_comm_interface);
303 MESSAGE (" rank "<<group->myRank()<< " unserialize is over");
309 void StructuredCoincidentDEC::recvData()
311 //MPI_COMM_WORLD is used instead of group because there is no
312 //mechanism for creating the union group yet
314 for (int i=0; i< 4; i++)
315 cout << _recv_counts[i]<<" ";
317 for (int i=0; i< 4; i++)
318 cout << _recv_displs[i]<<" ";
321 cout<<"start AllToAll"<<endl;
322 MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
323 _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE,
324 _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm);
325 cout<<"end AllToAll"<<endl;
327 mcIdType nb_local = _topo_target->getNbLocalElements();
328 //double* value=new double[nb_local];
329 double* value=const_cast<double*>(_local_field->getField()->getArray()->getPointer());
331 int myranktarget=_topo_target->getProcGroup()->myRank();
332 vector<int> counters(_topo_source->getProcGroup()->size());
334 for (int i=0; i<_topo_source->getProcGroup()->size()-1; i++)
336 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
337 int worldrank=group->translateRank(_topo_source->getProcGroup(),i);
338 counters[i+1]=counters[i]+_recv_counts[worldrank];
342 for (mcIdType ielem=0; ielem<nb_local ; ielem++)
344 mcIdType global = _topo_target->localToGlobal(make_pair(myranktarget, ielem));
345 pair<int,mcIdType> source_local =_topo_source->globalToLocal(global);
346 value[ielem]=_recv_buffer[counters[source_local.first]++];
350 //_local_field->getField()->setValue(value);
353 void StructuredCoincidentDEC::sendData()
355 MESSAGE ("sendData");
356 for (int i=0; i< 4; i++)
357 cout << _send_counts[i]<<" ";
359 for (int i=0; i< 4; i++)
360 cout << _send_displs[i]<<" ";
362 cout <<"start AllToAll"<<endl;
363 MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
364 _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE,
365 _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm);
366 cout<<"end AllToAll"<<endl;
369 /*! Prepares a DEC for data exchange
371 This method broadcasts the topologies from source to target
372 so that the target side can analyse from which processors it
373 is expected to receive data.
376 void StructuredCoincidentDEC::synchronize()
378 if (_source_group->containsMyRank())
380 synchronizeTopology();
383 else if (_target_group->containsMyRank())
385 synchronizeTopology();