1 // Copyright (C) 2007-2015 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 #include "CommInterface.hxx"
22 #include "Topology.hxx"
23 #include "BlockTopology.hxx"
24 #include "ComponentTopology.hxx"
25 #include "ParaFIELD.hxx"
26 #include "MPIProcessorGroup.hxx"
27 #include "ExplicitCoincidentDEC.hxx"
28 #include "ExplicitMapping.hxx"
29 #include "InterpKernelUtilities.hxx"
36 * \anchor ExplicitCoincidentDEC-det
37 * \class ExplicitCoincidentDEC
44 ExplicitCoincidentDEC::ExplicitCoincidentDEC():_toposource(0),_topotarget(0)
48 ExplicitCoincidentDEC::~ExplicitCoincidentDEC()
52 /*! Synchronization process for exchanging topologies
54 void ExplicitCoincidentDEC::synchronize()
56 if (_source_group->containsMyRank())
58 _toposource = dynamic_cast<ExplicitTopology*>(_local_field->getTopology());
59 _sourcegroup= _toposource->getProcGroup()->createProcGroup();
60 _targetgroup=_toposource->getProcGroup()->createComplementProcGroup();
62 if (_target_group->containsMyRank())
64 _topotarget = dynamic_cast<ExplicitTopology*>(_local_field->getTopology());
65 _sourcegroup= _topotarget->getProcGroup()->createComplementProcGroup();
66 _targetgroup=_topotarget->getProcGroup()->createProcGroup();
71 // Transmitting source topology to target code
72 broadcastTopology(_toposource,_topotarget,1000);
73 transferMappingToSource();
76 /*! Creates the arrays necessary for the data transfer
77 * and fills the send array with the values of the
80 void ExplicitCoincidentDEC::prepareSourceDE()
82 ////////////////////////////////////
83 //Step 1 : buffer array creation
85 if (!_toposource->getProcGroup()->containsMyRank())
87 MPIProcessorGroup* group=new MPIProcessorGroup(_sourcegroup->getCommInterface());
89 // Warning : the size of the target side is implicitly deduced
90 //from the size of MPI_COMM_WORLD
91 int target_size = _toposource->getProcGroup()->getCommInterface().worldSize()- _toposource->getProcGroup()->size() ;
93 vector<int>* target_arrays=new vector<int>[target_size];
95 int nb_local = _toposource-> getNbLocalElements();
97 int union_size=group->size();
99 _sendcounts=new int[union_size];
100 _senddispls=new int[union_size];
101 _recvcounts=new int[union_size];
102 _recvdispls=new int[union_size];
104 for (int i=0; i< union_size; i++)
112 int* counts=_explicit_mapping.getCounts();
113 for (int i=0; i<group->size(); i++)
114 _sendcounts[i]=counts[i];
116 for (int iproc=1; iproc<group->size();iproc++)
117 _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1];
119 _sendbuffer = new double [nb_local * _toposource->getNbComponents()];
121 /////////////////////////////////////////////////////////////
122 //Step 2 : filling the buffers with the source field values
124 int* counter=new int [target_size];
126 for (int i=1; i<target_size; i++)
127 counter[i]=counter[i-1]+target_arrays[i-1].size();
130 const double* value = _local_field->getField()->getArray()->getPointer();
132 int* bufferindex= _explicit_mapping.getBufferIndex();
134 for (int ielem=0; ielem<nb_local; ielem++)
136 int ncomp = _toposource->getNbComponents();
137 for (int icomp=0; icomp<ncomp; icomp++)
139 _sendbuffer[ielem*ncomp+icomp]=value[bufferindex[ielem]*ncomp+icomp];
142 delete[] target_arrays;
147 * Creates the buffers for receiving the fields on the target side
149 void ExplicitCoincidentDEC::prepareTargetDE()
151 if (!_topotarget->getProcGroup()->containsMyRank())
153 MPIProcessorGroup* group=new MPIProcessorGroup(_topotarget->getProcGroup()->getCommInterface());
155 vector < vector <int> > source_arrays(_sourcegroup->size());
156 int nb_local = _topotarget-> getNbLocalElements();
157 for (int ielem=0; ielem< nb_local ; ielem++)
159 //pair<int,int> source_local =_distant_elems[ielem];
160 pair <int,int> source_local=_explicit_mapping.getDistantNumbering(ielem);
161 source_arrays[source_local.first].push_back(source_local.second);
163 int union_size=group->size();
164 _recvcounts=new int[union_size];
165 _recvdispls=new int[union_size];
166 _sendcounts=new int[union_size];
167 _senddispls=new int[union_size];
169 for (int i=0; i< union_size; i++)
175 for (int iproc=0; iproc < _sourcegroup->size(); iproc++)
177 //converts the rank in target to the rank in union communicator
178 int unionrank=group->translateRank(_sourcegroup,iproc);
179 _recvcounts[unionrank]=source_arrays[iproc].size()*_topotarget->getNbComponents();
181 for (int i=1; i<union_size; i++)
182 _recvdispls[i]=_recvdispls[i-1]+_recvcounts[i-1];
183 _recvbuffer=new double[nb_local*_topotarget->getNbComponents()];
189 * Synchronizing a topology so that all the
190 * group possesses it.
192 * \param toposend Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
193 * \param toporecv Topology which is received.
194 * \param tag Communication tag associated with this operation.
196 void ExplicitCoincidentDEC::broadcastTopology(const ExplicitTopology* toposend, ExplicitTopology* toporecv, int tag)
203 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
205 // The send processors serialize the send topology
206 // and send the buffers to the recv procs
207 if (toposend !=0 && toposend->getProcGroup()->containsMyRank())
209 toposend->serialize(serializer, size);
210 for (int iproc=0; iproc< group->size(); iproc++)
213 if (!toposend->getProcGroup()->contains(itarget))
215 _comm_interface->send(&size,1,MPI_INT, itarget,tag+itarget,*(group->getComm()));
216 _comm_interface->send(serializer, size, MPI_INT, itarget, tag+itarget,*(group->getComm()));
222 vector <int> size2(group->size());
223 int myworldrank=group->myRank();
224 for (int iproc=0; iproc<group->size();iproc++)
227 if (!toporecv->getProcGroup()->contains(isource))
230 _comm_interface->recv(&nbelem, 1, MPI_INT, isource, tag+myworldrank, *(group->getComm()), &status);
231 int* buffer = new int[nbelem];
232 _comm_interface->recv(buffer, nbelem, MPI_INT, isource,tag+myworldrank, *(group->getComm()), &status);
234 ExplicitTopology* topotemp=new ExplicitTopology();
235 topotemp->unserialize(buffer, *_comm_interface);
238 for (int ielem=0; ielem<toporecv->getNbLocalElements(); ielem++)
240 int global = toporecv->localToGlobal(ielem);
241 int sendlocal=topotemp->globalToLocal(global);
245 _explicit_mapping.pushBackElem(make_pair(iproc,sendlocal));
252 MESSAGE (" rank "<<group->myRank()<< " broadcastTopology is over");
255 void ExplicitCoincidentDEC::transferMappingToSource()
258 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
260 // sending source->target mapping which is stored by target
261 //in _distant_elems from target to source
262 if (_topotarget!=0 && _topotarget->getProcGroup()->containsMyRank())
264 int world_size = _topotarget->getProcGroup()->getCommInterface().worldSize() ;
265 int* nb_transfer_union=new int[world_size];
266 int* dummy_recv=new int[world_size];
267 for (int i=0; i<world_size; i++)
268 nb_transfer_union[i]=0;
269 //converts the rank in target to the rank in union communicator
271 for (int i=0; i< _explicit_mapping.nbDistantDomains(); i++)
273 int unionrank=group->translateRank(_sourcegroup,_explicit_mapping.getDistantDomain(i));
274 nb_transfer_union[unionrank]=_explicit_mapping.getNbDistantElems(i);
276 _comm_interface->allToAll(nb_transfer_union, 1, MPI_INT, dummy_recv, 1, MPI_INT, MPI_COMM_WORLD);
278 int* sendbuffer= _explicit_mapping.serialize(_topotarget->getProcGroup()->myRank());
280 int* sendcounts= new int [world_size];
281 int* senddispls = new int [world_size];
282 for (int i=0; i< world_size; i++)
284 sendcounts[i]=2*nb_transfer_union[i];
288 senddispls[i]=senddispls[i-1]+sendcounts[i-1];
290 int* recvcounts=new int[world_size];
291 int* recvdispls=new int[world_size];
293 for (int i=0; i <world_size; i++)
298 _comm_interface->allToAllV(sendbuffer, sendcounts, senddispls, MPI_INT, dummyrecv, recvcounts, senddispls, MPI_INT, MPI_COMM_WORLD);
301 //receiving in the source subdomains the mapping sent by targets
304 int world_size = _toposource->getProcGroup()->getCommInterface().worldSize() ;
305 int* nb_transfer_union=new int[world_size];
306 int* dummy_send=new int[world_size];
307 for (int i=0; i<world_size; i++)
309 _comm_interface->allToAll(dummy_send, 1, MPI_INT, nb_transfer_union, 1, MPI_INT, MPI_COMM_WORLD);
312 for (int i=0; i< world_size; i++)
313 total_size+=nb_transfer_union[i];
314 int nbtarget = _targetgroup->size();
315 int* targetranks = new int[ nbtarget];
316 for (int i=0; i<nbtarget; i++)
317 targetranks[i]=group->translateRank(_targetgroup,i);
318 int* mappingbuffer= new int [total_size*2];
319 int* sendcounts= new int [world_size];
320 int* senddispls = new int [world_size];
321 int* recvcounts=new int[world_size];
322 int* recvdispls=new int[world_size];
323 for (int i=0; i< world_size; i++)
325 recvcounts[i]=2*nb_transfer_union[i];
329 recvdispls[i]=recvdispls[i-1]+recvcounts[i-1];
333 for (int i=0; i <world_size; i++)
338 _comm_interface->allToAllV(dummysend, sendcounts, senddispls, MPI_INT, mappingbuffer, recvcounts, recvdispls, MPI_INT, MPI_COMM_WORLD);
339 _explicit_mapping.unserialize(world_size,nb_transfer_union,nbtarget, targetranks, mappingbuffer);
343 void ExplicitCoincidentDEC::recvData()
345 //MPI_COMM_WORLD is used instead of group because there is no
346 //mechanism for creating the union group yet
349 cout<<"start AllToAll"<<endl;
350 _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
351 _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
352 cout<<"end AllToAll"<<endl;
353 int nb_local = _topotarget->getNbLocalElements();
354 double* value=new double[nb_local*_topotarget->getNbComponents()];
356 vector<int> counters(_sourcegroup->size());
358 for (int i=0; i<_sourcegroup->size()-1; i++)
360 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
361 int worldrank=group->translateRank(_sourcegroup,i);
362 counters[i+1]=counters[i]+_recvcounts[worldrank];
365 for (int ielem=0; ielem<nb_local ; ielem++)
367 pair<int,int> distant_numbering=_explicit_mapping.getDistantNumbering(ielem);
368 int iproc=distant_numbering.first;
369 int ncomp = _topotarget->getNbComponents();
370 for (int icomp=0; icomp< ncomp; icomp++)
371 value[ielem*ncomp+icomp]=_recvbuffer[counters[iproc]*ncomp+icomp];
374 _local_field->getField()->getArray()->useArray(value,true,CPP_DEALLOC,nb_local,_topotarget->getNbComponents());
377 void ExplicitCoincidentDEC::sendData()
379 MESSAGE ("sendData");
380 for (int i=0; i< 4; i++)
381 cout << _sendcounts[i]<<" ";
383 for (int i=0; i< 4; i++)
384 cout << _senddispls[i]<<" ";
386 //MPI_COMM_WORLD is used instead of group because there is no
387 //mechanism for creating the union group yet
388 cout <<"start AllToAll"<<endl;
389 _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
390 _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);