1 // Copyright (C) 2007-2008 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "CommInterface.hxx"
21 #include "Topology.hxx"
22 #include "BlockTopology.hxx"
23 #include "ComponentTopology.hxx"
24 #include "ParaFIELD.hxx"
25 #include "MPIProcessorGroup.hxx"
26 #include "ExplicitCoincidentDEC.hxx"
27 #include "ExplicitMapping.hxx"
28 #include "InterpKernelUtilities.hxx"
35 ExplicitCoincidentDEC::ExplicitCoincidentDEC():_toposource(0),_topotarget(0)
39 ExplicitCoincidentDEC::~ExplicitCoincidentDEC()
43 /*! Synchronization process for exchanging topologies
45 void ExplicitCoincidentDEC::synchronize()
47 if (_source_group->containsMyRank())
49 _toposource = dynamic_cast<ExplicitTopology*>(_local_field->getTopology());
50 _sourcegroup= _toposource->getProcGroup()->createProcGroup();
51 _targetgroup=_toposource->getProcGroup()->createComplementProcGroup();
53 if (_target_group->containsMyRank())
55 _topotarget = dynamic_cast<ExplicitTopology*>(_local_field->getTopology());
56 _sourcegroup= _topotarget->getProcGroup()->createComplementProcGroup();
57 _targetgroup=_topotarget->getProcGroup()->createProcGroup();
62 // Transmitting source topology to target code
63 broadcastTopology(_toposource,_topotarget,1000);
64 transferMappingToSource();
67 /*! Creates the arrays necessary for the data transfer
68 * and fills the send array with the values of the
71 void ExplicitCoincidentDEC::prepareSourceDE()
73 ////////////////////////////////////
74 //Step 1 : buffer array creation
76 if (!_toposource->getProcGroup()->containsMyRank())
78 MPIProcessorGroup* group=new MPIProcessorGroup(_sourcegroup->getCommInterface());
80 // Warning : the size of the target side is implicitly deduced
81 //from the size of MPI_COMM_WORLD
82 int target_size = _toposource->getProcGroup()->getCommInterface().worldSize()- _toposource->getProcGroup()->size() ;
84 vector<int>* target_arrays=new vector<int>[target_size];
86 int nb_local = _toposource-> getNbLocalElements();
88 int union_size=group->size();
90 _sendcounts=new int[union_size];
91 _senddispls=new int[union_size];
92 _recvcounts=new int[union_size];
93 _recvdispls=new int[union_size];
95 for (int i=0; i< union_size; i++)
103 int* counts=_explicit_mapping.getCounts();
104 for (int i=0; i<group->size(); i++)
105 _sendcounts[i]=counts[i];
107 for (int iproc=1; iproc<group->size();iproc++)
108 _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1];
110 _sendbuffer = new double [nb_local * _toposource->getNbComponents()];
112 /////////////////////////////////////////////////////////////
113 //Step 2 : filling the buffers with the source field values
115 int* counter=new int [target_size];
117 for (int i=1; i<target_size; i++)
118 counter[i]=counter[i-1]+target_arrays[i-1].size();
121 const double* value = _local_field->getField()->getArray()->getPointer();
123 int* bufferindex= _explicit_mapping.getBufferIndex();
125 for (int ielem=0; ielem<nb_local; ielem++)
127 int ncomp = _toposource->getNbComponents();
128 for (int icomp=0; icomp<ncomp; icomp++)
130 _sendbuffer[ielem*ncomp+icomp]=value[bufferindex[ielem]*ncomp+icomp];
133 delete[] target_arrays;
138 * Creates the buffers for receiving the fields on the target side
140 void ExplicitCoincidentDEC::prepareTargetDE()
142 if (!_topotarget->getProcGroup()->containsMyRank())
144 MPIProcessorGroup* group=new MPIProcessorGroup(_topotarget->getProcGroup()->getCommInterface());
146 vector < vector <int> > source_arrays(_sourcegroup->size());
147 int nb_local = _topotarget-> getNbLocalElements();
148 for (int ielem=0; ielem< nb_local ; ielem++)
150 //pair<int,int> source_local =_distant_elems[ielem];
151 pair <int,int> source_local=_explicit_mapping.getDistantNumbering(ielem);
152 source_arrays[source_local.first].push_back(source_local.second);
154 int union_size=group->size();
155 _recvcounts=new int[union_size];
156 _recvdispls=new int[union_size];
157 _sendcounts=new int[union_size];
158 _senddispls=new int[union_size];
160 for (int i=0; i< union_size; i++)
166 for (int iproc=0; iproc < _sourcegroup->size(); iproc++)
168 //converts the rank in target to the rank in union communicator
169 int unionrank=group->translateRank(_sourcegroup,iproc);
170 _recvcounts[unionrank]=source_arrays[iproc].size()*_topotarget->getNbComponents();
172 for (int i=1; i<union_size; i++)
173 _recvdispls[i]=_recvdispls[i-1]+_recvcounts[i-1];
174 _recvbuffer=new double[nb_local*_topotarget->getNbComponents()];
180 * Synchronizing a topology so that all the
181 * group possesses it.
183 * \param toposend Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
184 * \param toporecv Topology which is received.
185 * \param tag Communication tag associated with this operation.
187 void ExplicitCoincidentDEC::broadcastTopology(const ExplicitTopology* toposend, ExplicitTopology* toporecv, int tag)
194 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
196 // The send processors serialize the send topology
197 // and send the buffers to the recv procs
198 if (toposend !=0 && toposend->getProcGroup()->containsMyRank())
200 toposend->serialize(serializer, size);
201 for (int iproc=0; iproc< group->size(); iproc++)
204 if (!toposend->getProcGroup()->contains(itarget))
206 _comm_interface->send(&size,1,MPI_INT, itarget,tag+itarget,*(group->getComm()));
207 _comm_interface->send(serializer, size, MPI_INT, itarget, tag+itarget,*(group->getComm()));
213 vector <int> size (group->size());
214 int myworldrank=group->myRank();
215 for (int iproc=0; iproc<group->size();iproc++)
218 if (!toporecv->getProcGroup()->contains(isource))
221 _comm_interface->recv(&nbelem, 1, MPI_INT, isource, tag+myworldrank, *(group->getComm()), &status);
222 int* buffer = new int[nbelem];
223 _comm_interface->recv(buffer, nbelem, MPI_INT, isource,tag+myworldrank, *(group->getComm()), &status);
225 ExplicitTopology* topotemp=new ExplicitTopology();
226 topotemp->unserialize(buffer, *_comm_interface);
229 for (int ielem=0; ielem<toporecv->getNbLocalElements(); ielem++)
231 int global = toporecv->localToGlobal(ielem);
232 int sendlocal=topotemp->globalToLocal(global);
236 _explicit_mapping.pushBackElem(make_pair(iproc,sendlocal));
243 MESSAGE (" rank "<<group->myRank()<< " broadcastTopology is over");
246 void ExplicitCoincidentDEC::transferMappingToSource()
249 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
251 // sending source->target mapping which is stored by target
252 //in _distant_elems from target to source
253 if (_topotarget!=0 && _topotarget->getProcGroup()->containsMyRank())
255 int world_size = _topotarget->getProcGroup()->getCommInterface().worldSize() ;
256 int* nb_transfer_union=new int[world_size];
257 int* dummy_recv=new int[world_size];
258 for (int i=0; i<world_size; i++)
259 nb_transfer_union[i]=0;
260 //converts the rank in target to the rank in union communicator
262 for (int i=0; i< _explicit_mapping.nbDistantDomains(); i++)
264 int unionrank=group->translateRank(_sourcegroup,_explicit_mapping.getDistantDomain(i));
265 nb_transfer_union[unionrank]=_explicit_mapping.getNbDistantElems(i);
267 _comm_interface->allToAll(nb_transfer_union, 1, MPI_INT, dummy_recv, 1, MPI_INT, MPI_COMM_WORLD);
269 int* sendbuffer= _explicit_mapping.serialize(_topotarget->getProcGroup()->myRank());
271 int* sendcounts= new int [world_size];
272 int* senddispls = new int [world_size];
273 for (int i=0; i< world_size; i++)
275 sendcounts[i]=2*nb_transfer_union[i];
279 senddispls[i]=senddispls[i-1]+sendcounts[i-1];
281 int* recvcounts=new int[world_size];
282 int* recvdispls=new int[world_size];
284 for (int i=0; i <world_size; i++)
289 _comm_interface->allToAllV(sendbuffer, sendcounts, senddispls, MPI_INT, dummyrecv, recvcounts, senddispls, MPI_INT, MPI_COMM_WORLD);
292 //receiving in the source subdomains the mapping sent by targets
295 int world_size = _toposource->getProcGroup()->getCommInterface().worldSize() ;
296 int* nb_transfer_union=new int[world_size];
297 int* dummy_send=new int[world_size];
298 for (int i=0; i<world_size; i++)
300 _comm_interface->allToAll(dummy_send, 1, MPI_INT, nb_transfer_union, 1, MPI_INT, MPI_COMM_WORLD);
303 for (int i=0; i< world_size; i++)
304 total_size+=nb_transfer_union[i];
305 int nbtarget = _targetgroup->size();
306 int* targetranks = new int[ nbtarget];
307 for (int i=0; i<nbtarget; i++)
308 targetranks[i]=group->translateRank(_targetgroup,i);
309 int* mappingbuffer= new int [total_size*2];
310 int* sendcounts= new int [world_size];
311 int* senddispls = new int [world_size];
312 int* recvcounts=new int[world_size];
313 int* recvdispls=new int[world_size];
314 for (int i=0; i< world_size; i++)
316 recvcounts[i]=2*nb_transfer_union[i];
320 recvdispls[i]=recvdispls[i-1]+recvcounts[i-1];
324 for (int i=0; i <world_size; i++)
329 _comm_interface->allToAllV(dummysend, sendcounts, senddispls, MPI_INT, mappingbuffer, recvcounts, recvdispls, MPI_INT, MPI_COMM_WORLD);
330 _explicit_mapping.unserialize(world_size,nb_transfer_union,nbtarget, targetranks, mappingbuffer);
334 void ExplicitCoincidentDEC::recvData()
336 //MPI_COMM_WORLD is used instead of group because there is no
337 //mechanism for creating the union group yet
340 cout<<"start AllToAll"<<endl;
341 _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
342 _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
343 cout<<"end AllToAll"<<endl;
344 int nb_local = _topotarget->getNbLocalElements();
345 double* value=new double[nb_local*_topotarget->getNbComponents()];
347 vector<int> counters(_sourcegroup->size());
349 for (int i=0; i<_sourcegroup->size()-1; i++)
351 MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
352 int worldrank=group->translateRank(_sourcegroup,i);
353 counters[i+1]=counters[i]+_recvcounts[worldrank];
356 for (int ielem=0; ielem<nb_local ; ielem++)
358 pair<int,int> distant_numbering=_explicit_mapping.getDistantNumbering(ielem);
359 int iproc=distant_numbering.first;
360 int ncomp = _topotarget->getNbComponents();
361 for (int icomp=0; icomp< ncomp; icomp++)
362 value[ielem*ncomp+icomp]=_recvbuffer[counters[iproc]*ncomp+icomp];
365 _local_field->getField()->getArray()->useArray(value,true,CPP_DEALLOC,nb_local,_topotarget->getNbComponents());
368 void ExplicitCoincidentDEC::sendData()
370 MESSAGE ("sendData");
371 for (int i=0; i< 4; i++)
372 cout << _sendcounts[i]<<" ";
374 for (int i=0; i< 4; i++)
375 cout << _senddispls[i]<<" ";
377 //MPI_COMM_WORLD is used instead of group because there is no
378 //mechanism for creating the union group yet
379 cout <<"start AllToAll"<<endl;
380 _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
381 _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);