1 // Copyright (C) 2007-2024 CEA, EDF
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "CommInterface.hxx"
21 #include "ProcessorGroup.hxx"
22 #include "MPIProcessorGroup.hxx"
23 #include "MPIAccessDEC.hxx"
24 #include "MxN_Mapping.hxx"
31 MxN_Mapping::MxN_Mapping(const ProcessorGroup& source_group, const ProcessorGroup& target_group,const DECOptions& dec_options)
32 : DECOptions(dec_options),
33 _union_group(source_group.fuse(target_group)),
34 _nb_comps(0), _sending_ids(), _recv_ids()
36 _access_DEC = new MPIAccessDEC(source_group,target_group,getAsynchronous());
37 _access_DEC->setTimeInterpolator(getTimeInterpolationMethod());
38 _send_proc_offsets.resize(_union_group->size()+1,0);
39 _recv_proc_offsets.resize(_union_group->size()+1,0);
43 MxN_Mapping::~MxN_Mapping()
51 Method registering a new element for correspondence with a distant element
52 \param distant_proc proc rank of the distant processor (in terms of the union group)
53 \param distant_element id of the element on the distant processor
55 void MxN_Mapping::addElementFromSource(int distant_proc, mcIdType distant_element)
57 _sending_ids.push_back(make_pair(distant_proc,distant_element));
58 for (int i=distant_proc; i<_union_group->size(); i++)
59 _send_proc_offsets[i+1]++;
62 void MxN_Mapping::initialize()
65 std::fill(_send_proc_offsets.begin(),_send_proc_offsets.end(),0);
68 void MxN_Mapping::prepareSendRecv()
70 CommInterface comm_interface=_union_group->getCommInterface();
71 // sending count pattern
72 int* nbsend=new int[_union_group->size()];
73 int* nbrecv=new int[_union_group->size()];
74 for (int i=0; i<_union_group->size(); i++)
76 nbsend[i]=_send_proc_offsets[i+1]-_send_proc_offsets[i];
79 MPIProcessorGroup* group = static_cast<MPIProcessorGroup*>(_union_group);
80 const MPI_Comm* comm=group->getComm();
81 comm_interface.allToAll(nbsend, 1, MPI_INT,
85 for (int i=0; i<_union_group->size(); i++)
87 for (int j=i+1;j<_union_group->size()+1; j++)
88 _recv_proc_offsets[j]+=nbrecv[i];
95 _recv_ids.resize(_recv_proc_offsets[_union_group->size()]);
98 if (_sending_ids.size()>0)
99 isendbuf = new mcIdType[_sending_ids.size()];
100 if (_recv_ids.size()>0)
101 irecvbuf = new mcIdType[_recv_ids.size()];
102 int* sendcounts = new int[_union_group->size()];
103 int* senddispls=new int[_union_group->size()];
104 int* recvcounts=new int[_union_group->size()];
105 int* recvdispls=new int[_union_group->size()];
106 for (int i=0; i< _union_group->size(); i++)
108 sendcounts[i]=_send_proc_offsets[i+1]-_send_proc_offsets[i];
109 senddispls[i]=_send_proc_offsets[i];
110 recvcounts[i]=_recv_proc_offsets[i+1]-_recv_proc_offsets[i];
111 recvdispls[i]=_recv_proc_offsets[i];
113 vector<int> offsets = _send_proc_offsets;
114 for (std::size_t i=0; i<_sending_ids.size();i++)
116 int iproc = _sending_ids[i].first;
117 isendbuf[offsets[iproc]]=_sending_ids[i].second;
120 comm_interface.allToAllV(isendbuf, sendcounts, senddispls, MPI_ID_TYPE,
121 irecvbuf, recvcounts, recvdispls, MPI_ID_TYPE,
124 for (int i=0; i< _recv_proc_offsets[_union_group->size()]; i++)
125 _recv_ids[i]=irecvbuf[i];
127 if (_sending_ids.size()>0)
129 if (_recv_ids.size()>0)
137 MCAuto<DataArrayIdType> MxN_Mapping::retrieveNonFetchedIdsTarget(mcIdType nbTuples) const
139 MCAuto<DataArrayIdType> ret(DataArrayIdType::New()); ret->alloc(0,1);
140 std::vector<bool> hitMachine(nbTuples,false);
141 for (int i=0; i< _recv_proc_offsets[_union_group->size()]; i++)
143 mcIdType recvId(_recv_ids[i]);
144 hitMachine[recvId] = true;
146 for( mcIdType ituple = 0 ; ituple < nbTuples ; ++ituple )
148 if( ! hitMachine[ituple] )
149 ret->pushBackSilent( ituple );
154 /*! Exchanging field data between two groups of processes
156 * \param field MEDCoupling field containing the values to be sent
158 * The ids that were defined by addElementFromSource method
161 void MxN_Mapping::sendRecv(double* sendfield, MEDCouplingFieldDouble& field) const
163 CommInterface comm_interface=_union_group->getCommInterface();
164 const MPIProcessorGroup* group = static_cast<const MPIProcessorGroup*>(_union_group);
166 int nbcomp=(int)field.getArray()->getNumberOfComponents();
169 if (_sending_ids.size() >0)
170 sendbuf = new double[_sending_ids.size()*nbcomp];
171 if (_recv_ids.size()>0)
172 recvbuf = new double[_recv_ids.size()*nbcomp];
173 int* sendcounts = new int[_union_group->size()];
174 int* senddispls=new int[_union_group->size()];
175 int* recvcounts=new int[_union_group->size()];
176 int* recvdispls=new int[_union_group->size()];
178 for (int i=0; i< _union_group->size(); i++)
180 sendcounts[i]=nbcomp*(_send_proc_offsets[i+1]-_send_proc_offsets[i]);
181 senddispls[i]=nbcomp*(_send_proc_offsets[i]);
182 recvcounts[i]=nbcomp*(_recv_proc_offsets[i+1]-_recv_proc_offsets[i]);
183 recvdispls[i]=nbcomp*(_recv_proc_offsets[i]);
185 //building the buffer of the elements to be sent
186 vector<int> offsets = _send_proc_offsets;
188 for (int i=0; i<(int)_sending_ids.size();i++)
190 int iproc = _sending_ids[i].first;
191 for (int icomp=0; icomp<nbcomp; icomp++)
192 sendbuf[offsets[iproc]*nbcomp+icomp]=sendfield[i*nbcomp+icomp];
196 //communication phase
197 switch (getAllToAllMethod())
201 const MPI_Comm* comm = group->getComm();
202 comm_interface.allToAllV(sendbuf, sendcounts, senddispls, MPI_DOUBLE,
203 recvbuf, recvcounts, recvdispls, MPI_DOUBLE,
208 _access_DEC->allToAllv(sendbuf, sendcounts, senddispls, MPI_DOUBLE,
209 recvbuf, recvcounts, recvdispls, MPI_DOUBLE);
213 //setting the received values in the field
214 DataArrayDouble *fieldArr=field.getArray();
215 double* recvptr=recvbuf;
216 for (int i=0; i< _recv_proc_offsets[_union_group->size()]; i++)
218 mcIdType recvId(_recv_ids[i]);
219 for (int icomp=0; icomp<nbcomp; icomp++)
221 double temp = fieldArr->getIJ(recvId,icomp);
222 fieldArr->setIJ(recvId,icomp,temp+*recvptr);
226 if (sendbuf!=0 && getAllToAllMethod()== Native)
237 /*! Exchanging field data between two groups of processes
239 * \param field MEDCoupling field containing the values to be sent
241 * The ids that were defined by addElementFromSource method
244 void MxN_Mapping::reverseSendRecv(double* recvfield, MEDCouplingFieldDouble& field) const
246 CommInterface comm_interface=_union_group->getCommInterface();
247 const MPIProcessorGroup* group = static_cast<const MPIProcessorGroup*>(_union_group);
249 int nbcomp=(int)field.getArray()->getNumberOfComponents();
252 if (_recv_ids.size() >0)
253 sendbuf = new double[_recv_ids.size()*nbcomp];
254 if (_sending_ids.size()>0)
255 recvbuf = new double[_sending_ids.size()*nbcomp];
257 int* sendcounts = new int[_union_group->size()];
258 int* senddispls=new int[_union_group->size()];
259 int* recvcounts=new int[_union_group->size()];
260 int* recvdispls=new int[_union_group->size()];
262 for (int i=0; i< _union_group->size(); i++)
264 sendcounts[i]=nbcomp*(_recv_proc_offsets[i+1]-_recv_proc_offsets[i]);
265 senddispls[i]=nbcomp*(_recv_proc_offsets[i]);
266 recvcounts[i]=nbcomp*(_send_proc_offsets[i+1]-_send_proc_offsets[i]);
267 recvdispls[i]=nbcomp*(_send_proc_offsets[i]);
269 //building the buffer of the elements to be sent
270 vector<int> offsets = _recv_proc_offsets;
271 DataArrayDouble *fieldArr=field.getArray();
272 for (int iproc=0; iproc<_union_group->size();iproc++)
273 for (int i=_recv_proc_offsets[iproc]; i<_recv_proc_offsets[iproc+1]; i++)
275 for (int icomp=0; icomp<nbcomp; icomp++)
276 sendbuf[i*nbcomp+icomp]=fieldArr->getIJ(_recv_ids[i],icomp);
279 //communication phase
280 switch (getAllToAllMethod())
284 const MPI_Comm* comm = group->getComm();
285 comm_interface.allToAllV(sendbuf, sendcounts, senddispls, MPI_DOUBLE,
286 recvbuf, recvcounts, recvdispls, MPI_DOUBLE,
291 _access_DEC->allToAllv(sendbuf, sendcounts, senddispls, MPI_DOUBLE,
292 recvbuf, recvcounts, recvdispls, MPI_DOUBLE);
296 //setting the received values in the field
297 double* recvptr=recvbuf;
298 for (int i=0; i< _send_proc_offsets[_union_group->size()]; i++)
300 for (int icomp=0; icomp<nbcomp; icomp++)
302 recvfield[i*nbcomp+icomp]=*recvptr;
306 if (sendbuf!=0 && getAllToAllMethod() == Native)
316 ostream & operator<< (ostream & f ,const AllToAllMethod & alltoallmethod )
318 switch (alltoallmethod)
324 f << " PointToPoint ";
327 f << " UnknownAllToAllMethod ";