1 // Copyright (C) 2007-2013 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 #include "CommInterface.hxx"
22 #include "Topology.hxx"
23 #include "BlockTopology.hxx"
24 #include "ComponentTopology.hxx"
25 #include "ParaFIELD.hxx"
26 #include "MPIProcessorGroup.hxx"
27 #include "ExplicitCoincidentDEC.hxx"
28 #include "ExplicitMapping.hxx"
29 #include "InterpKernelUtilities.hxx"
35 /*! \defgroup explicitcoincidentdec ExplicitCoincidentDEC
37 ExplicitCoincidentDEC::ExplicitCoincidentDEC():_toposource(0),_topotarget(0)
41 ExplicitCoincidentDEC::~ExplicitCoincidentDEC()
47 \addtogroup explicitcoincidentdec
51 /*! Synchronization process for exchanging topologies
53 void ExplicitCoincidentDEC::synchronize()
55 if (_source_group->containsMyRank())
57 _toposource = dynamic_cast<ExplicitTopology*>(_local_field->getTopology());
58 _sourcegroup= _toposource->getProcGroup()->createProcGroup();
59 _targetgroup=_toposource->getProcGroup()->createComplementProcGroup();
61 if (_target_group->containsMyRank())
63 _topotarget = dynamic_cast<ExplicitTopology*>(_local_field->getTopology());
64 _sourcegroup= _topotarget->getProcGroup()->createComplementProcGroup();
65 _targetgroup=_topotarget->getProcGroup()->createProcGroup();
70 // Transmitting source topology to target code
71 broadcastTopology(_toposource,_topotarget,1000);
72 transferMappingToSource();
75 /*! Creates the arrays necessary for the data transfer
76 * and fills the send array with the values of the
79 void ExplicitCoincidentDEC::prepareSourceDE()
81 ////////////////////////////////////
82 //Step 1 : buffer array creation
84 if (!_toposource->getProcGroup()->containsMyRank())
86 MPIProcessorGroup* group=new MPIProcessorGroup(_sourcegroup->getCommInterface());
88 // Warning : the size of the target side is implicitly deduced
89 //from the size of MPI_COMM_WORLD
90 int target_size = _toposource->getProcGroup()->getCommInterface().worldSize()- _toposource->getProcGroup()->size() ;
92 vector<int>* target_arrays=new vector<int>[target_size];
94 int nb_local = _toposource-> getNbLocalElements();
96 int union_size=group->size();
98 _sendcounts=new int[union_size];
99 _senddispls=new int[union_size];
100 _recvcounts=new int[union_size];
101 _recvdispls=new int[union_size];
103 for (int i=0; i< union_size; i++)
111 int* counts=_explicit_mapping.getCounts();
112 for (int i=0; i<group->size(); i++)
113 _sendcounts[i]=counts[i];
115 for (int iproc=1; iproc<group->size();iproc++)
116 _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1];
118 _sendbuffer = new double [nb_local * _toposource->getNbComponents()];
120 /////////////////////////////////////////////////////////////
121 //Step 2 : filling the buffers with the source field values
123 int* counter=new int [target_size];
125 for (int i=1; i<target_size; i++)
126 counter[i]=counter[i-1]+target_arrays[i-1].size();
129 const double* value = _local_field->getField()->getArray()->getPointer();
131 int* bufferindex= _explicit_mapping.getBufferIndex();
133 for (int ielem=0; ielem<nb_local; ielem++)
135 int ncomp = _toposource->getNbComponents();
136 for (int icomp=0; icomp<ncomp; icomp++)
138 _sendbuffer[ielem*ncomp+icomp]=value[bufferindex[ielem]*ncomp+icomp];
141 delete[] target_arrays;
146 * Creates the buffers for receiving the fields on the target side
148 void ExplicitCoincidentDEC::prepareTargetDE()
150 if (!_topotarget->getProcGroup()->containsMyRank())
152 MPIProcessorGroup* group=new MPIProcessorGroup(_topotarget->getProcGroup()->getCommInterface());
154 vector < vector <int> > source_arrays(_sourcegroup->size());
155 int nb_local = _topotarget-> getNbLocalElements();
156 for (int ielem=0; ielem< nb_local ; ielem++)
158 //pair<int,int> source_local =_distant_elems[ielem];
159 pair <int,int> source_local=_explicit_mapping.getDistantNumbering(ielem);
160 source_arrays[source_local.first].push_back(source_local.second);
162 int union_size=group->size();
163 _recvcounts=new int[union_size];
164 _recvdispls=new int[union_size];
165 _sendcounts=new int[union_size];
166 _senddispls=new int[union_size];
168 for (int i=0; i< union_size; i++)
174 for (int iproc=0; iproc < _sourcegroup->size(); iproc++)
176 //converts the rank in target to the rank in union communicator
177 int unionrank=group->translateRank(_sourcegroup,iproc);
178 _recvcounts[unionrank]=source_arrays[iproc].size()*_topotarget->getNbComponents();
180 for (int i=1; i<union_size; i++)
181 _recvdispls[i]=_recvdispls[i-1]+_recvcounts[i-1];
182 _recvbuffer=new double[nb_local*_topotarget->getNbComponents()];
188 * Synchronizing a topology so that all the
189 * group possesses it.
191 * \param toposend Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
192 * \param toporecv Topology which is received.
193 * \param tag Communication tag associated with this operation.
195 void ExplicitCoincidentDEC::broadcastTopology(const ExplicitTopology* toposend, ExplicitTopology* toporecv, int tag)
202 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
204 // The send processors serialize the send topology
205 // and send the buffers to the recv procs
206 if (toposend !=0 && toposend->getProcGroup()->containsMyRank())
208 toposend->serialize(serializer, size);
209 for (int iproc=0; iproc< group->size(); iproc++)
212 if (!toposend->getProcGroup()->contains(itarget))
214 _comm_interface->send(&size,1,MPI_INT, itarget,tag+itarget,*(group->getComm()));
215 _comm_interface->send(serializer, size, MPI_INT, itarget, tag+itarget,*(group->getComm()));
221 vector <int> size (group->size());
222 int myworldrank=group->myRank();
223 for (int iproc=0; iproc<group->size();iproc++)
226 if (!toporecv->getProcGroup()->contains(isource))
229 _comm_interface->recv(&nbelem, 1, MPI_INT, isource, tag+myworldrank, *(group->getComm()), &status);
230 int* buffer = new int[nbelem];
231 _comm_interface->recv(buffer, nbelem, MPI_INT, isource,tag+myworldrank, *(group->getComm()), &status);
233 ExplicitTopology* topotemp=new ExplicitTopology();
234 topotemp->unserialize(buffer, *_comm_interface);
237 for (int ielem=0; ielem<toporecv->getNbLocalElements(); ielem++)
239 int global = toporecv->localToGlobal(ielem);
240 int sendlocal=topotemp->globalToLocal(global);
244 _explicit_mapping.pushBackElem(make_pair(iproc,sendlocal));
251 MESSAGE (" rank "<<group->myRank()<< " broadcastTopology is over");
254 void ExplicitCoincidentDEC::transferMappingToSource()
257 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
259 // sending source->target mapping which is stored by target
260 //in _distant_elems from target to source
261 if (_topotarget!=0 && _topotarget->getProcGroup()->containsMyRank())
263 int world_size = _topotarget->getProcGroup()->getCommInterface().worldSize() ;
264 int* nb_transfer_union=new int[world_size];
265 int* dummy_recv=new int[world_size];
266 for (int i=0; i<world_size; i++)
267 nb_transfer_union[i]=0;
268 //converts the rank in target to the rank in union communicator
270 for (int i=0; i< _explicit_mapping.nbDistantDomains(); i++)
272 int unionrank=group->translateRank(_sourcegroup,_explicit_mapping.getDistantDomain(i));
273 nb_transfer_union[unionrank]=_explicit_mapping.getNbDistantElems(i);
275 _comm_interface->allToAll(nb_transfer_union, 1, MPI_INT, dummy_recv, 1, MPI_INT, MPI_COMM_WORLD);
277 int* sendbuffer= _explicit_mapping.serialize(_topotarget->getProcGroup()->myRank());
279 int* sendcounts= new int [world_size];
280 int* senddispls = new int [world_size];
281 for (int i=0; i< world_size; i++)
283 sendcounts[i]=2*nb_transfer_union[i];
287 senddispls[i]=senddispls[i-1]+sendcounts[i-1];
289 int* recvcounts=new int[world_size];
290 int* recvdispls=new int[world_size];
292 for (int i=0; i <world_size; i++)
297 _comm_interface->allToAllV(sendbuffer, sendcounts, senddispls, MPI_INT, dummyrecv, recvcounts, senddispls, MPI_INT, MPI_COMM_WORLD);
300 //receiving in the source subdomains the mapping sent by targets
303 int world_size = _toposource->getProcGroup()->getCommInterface().worldSize() ;
304 int* nb_transfer_union=new int[world_size];
305 int* dummy_send=new int[world_size];
306 for (int i=0; i<world_size; i++)
308 _comm_interface->allToAll(dummy_send, 1, MPI_INT, nb_transfer_union, 1, MPI_INT, MPI_COMM_WORLD);
311 for (int i=0; i< world_size; i++)
312 total_size+=nb_transfer_union[i];
313 int nbtarget = _targetgroup->size();
314 int* targetranks = new int[ nbtarget];
315 for (int i=0; i<nbtarget; i++)
316 targetranks[i]=group->translateRank(_targetgroup,i);
317 int* mappingbuffer= new int [total_size*2];
318 int* sendcounts= new int [world_size];
319 int* senddispls = new int [world_size];
320 int* recvcounts=new int[world_size];
321 int* recvdispls=new int[world_size];
322 for (int i=0; i< world_size; i++)
324 recvcounts[i]=2*nb_transfer_union[i];
328 recvdispls[i]=recvdispls[i-1]+recvcounts[i-1];
332 for (int i=0; i <world_size; i++)
337 _comm_interface->allToAllV(dummysend, sendcounts, senddispls, MPI_INT, mappingbuffer, recvcounts, recvdispls, MPI_INT, MPI_COMM_WORLD);
338 _explicit_mapping.unserialize(world_size,nb_transfer_union,nbtarget, targetranks, mappingbuffer);
342 void ExplicitCoincidentDEC::recvData()
344 //MPI_COMM_WORLD is used instead of group because there is no
345 //mechanism for creating the union group yet
348 cout<<"start AllToAll"<<endl;
349 _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
350 _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
351 cout<<"end AllToAll"<<endl;
352 int nb_local = _topotarget->getNbLocalElements();
353 double* value=new double[nb_local*_topotarget->getNbComponents()];
355 vector<int> counters(_sourcegroup->size());
357 for (int i=0; i<_sourcegroup->size()-1; i++)
359 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
360 int worldrank=group->translateRank(_sourcegroup,i);
361 counters[i+1]=counters[i]+_recvcounts[worldrank];
364 for (int ielem=0; ielem<nb_local ; ielem++)
366 pair<int,int> distant_numbering=_explicit_mapping.getDistantNumbering(ielem);
367 int iproc=distant_numbering.first;
368 int ncomp = _topotarget->getNbComponents();
369 for (int icomp=0; icomp< ncomp; icomp++)
370 value[ielem*ncomp+icomp]=_recvbuffer[counters[iproc]*ncomp+icomp];
373 _local_field->getField()->getArray()->useArray(value,true,CPP_DEALLOC,nb_local,_topotarget->getNbComponents());
376 void ExplicitCoincidentDEC::sendData()
378 MESSAGE ("sendData");
379 for (int i=0; i< 4; i++)
380 cout << _sendcounts[i]<<" ";
382 for (int i=0; i< 4; i++)
383 cout << _senddispls[i]<<" ";
385 //MPI_COMM_WORLD is used instead of group because there is no
386 //mechanism for creating the union group yet
387 cout <<"start AllToAll"<<endl;
388 _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
389 _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);