1 // Copyright (C) 2007-2016 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
19 // Author : Anthony Geay (CEA/DEN)
21 #include "OverlapMapping.hxx"
22 #include "MPIProcessorGroup.hxx"
24 #include "MEDCouplingFieldDouble.hxx"
27 #include "InterpKernelAutoPtr.hxx"
32 using namespace MEDCoupling;
34 OverlapMapping::OverlapMapping(const ProcessorGroup& group, const OverlapElementLocator & loc):
35 _group(group),_locator(loc)
40 * Keeps the link between a given a proc holding source mesh data, and the corresponding cell IDs.
42 void OverlapMapping::keepTracksOfSourceIds(int procId, DataArrayInt *ids)
45 _sent_src_ids[procId] = ids;
49 * Same as keepTracksOfSourceIds() but for target mesh data.
51 void OverlapMapping::keepTracksOfTargetIds(int procId, DataArrayInt *ids)
54 _sent_trg_ids[procId] = ids;
58 * This method stores in the local members the contribution coming from a matrix in format
59 * Target(rows)/Source(cols) for a source procId 'srcProcId' and for a target procId 'trgProcId'.
60 * All IDs received here (source and target) are in the format of local IDs.
62 * @param srcIds is null if the source mesh is on the local proc
63 * @param trgIds is null if the source mesh is on the local proc
65 * One of the 2 is necessarily null (the two can be null together)
67 void OverlapMapping::addContributionST(const std::vector< SparseDoubleVec >& matrixST, const DataArrayInt *srcIds, int srcProcId, const DataArrayInt *trgIds, int trgProcId)
69 _matrixes_st.push_back(matrixST);
70 _source_proc_id_st.push_back(srcProcId);
71 _target_proc_id_st.push_back(trgProcId);
72 if(srcIds) // source mesh part is remote <=> srcProcId != myRank
73 _nb_of_rcv_src_ids[srcProcId] = srcIds->getNumberOfTuples();
74 else // source mesh part is local
77 // For all source IDs (=col indices) in the sparse matrix:
78 for(std::vector< SparseDoubleVec >::const_iterator it1=matrixST.begin();it1!=matrixST.end();it1++)
79 for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
80 s.insert((*it2).first);
81 vector<int> v(s.begin(), s.end()); // turn set into vector
82 _src_ids_zip_comp[trgProcId] = v;
87 * This method is in charge to send matrices in AlltoAll mode.
89 * 'procsToSendField' gives the list of procs field data has to be sent to.
90 * See OverlapElementLocator::computeBoundingBoxesAndTodoList()
92 * After the call of this method, 'this' contains the matrixST for all source cells of the current proc
94 void OverlapMapping::prepare(const std::vector< int >& procsToSendField, int nbOfTrgElems)
100 CommInterface commInterface=_group.getCommInterface();
101 const MPIProcessorGroup *group=static_cast<const MPIProcessorGroup*>(&_group);
102 const MPI_Comm *comm=group->getComm();
103 int grpSize=_group.size();
104 INTERP_KERNEL::AutoPtr<int> nbsend=new int[grpSize];
105 INTERP_KERNEL::AutoPtr<int> nbsend2=new int[grpSize];
106 INTERP_KERNEL::AutoPtr<int> nbsend3=new int[grpSize];
107 std::fill<int *>(nbsend,nbsend+grpSize,0);
108 int myProcId=_group.myRank();
109 for(std::size_t i=0;i<_matrixes_st.size();i++)
110 if(_source_proc_id_st[i]==myProcId)
111 nbsend[_target_proc_id_st[i]]=_matrixes_st[i].size();
112 INTERP_KERNEL::AutoPtr<int> nbrecv=new int[grpSize];
113 commInterface.allToAll(nbsend,1,MPI_INT,nbrecv,1,MPI_INT,*comm);
115 //first exchanging offsets+ids_source
116 INTERP_KERNEL::AutoPtr<int> nbrecv1=new int[grpSize];
117 INTERP_KERNEL::AutoPtr<int> nbrecv2=new int[grpSize];
120 serializeMatrixStep0ST(nbrecv,
123 INTERP_KERNEL::AutoPtr<int> bigArr=tmp;
124 INTERP_KERNEL::AutoPtr<int> bigArrRecv=new int[nbrecv2[grpSize-1]+nbrecv1[grpSize-1]];
125 commInterface.allToAllV(bigArr,nbsend2,nbsend3,MPI_INT,
126 bigArrRecv,nbrecv1,nbrecv2,MPI_INT,
127 *comm);// sending ids of sparse matrix (n+1 elems)
128 //second phase echange target ids
129 std::fill<int *>(nbsend2,nbsend2+grpSize,0);
130 INTERP_KERNEL::AutoPtr<int> nbrecv3=new int[grpSize];
131 INTERP_KERNEL::AutoPtr<int> nbrecv4=new int[grpSize];
133 int lgthOfArr=serializeMatrixStep1ST(nbrecv,bigArrRecv,nbrecv1,nbrecv2,
135 nbsend2,nbsend3,nbrecv3,nbrecv4);
136 INTERP_KERNEL::AutoPtr<int> bigArr2=tmp;
137 INTERP_KERNEL::AutoPtr<double> bigArrD2=tmp2;
138 INTERP_KERNEL::AutoPtr<int> bigArrRecv2=new int[lgthOfArr];
139 INTERP_KERNEL::AutoPtr<double> bigArrDRecv2=new double[lgthOfArr];
140 commInterface.allToAllV(bigArr2,nbsend2,nbsend3,MPI_INT,
141 bigArrRecv2,nbrecv3,nbrecv4,MPI_INT,
143 commInterface.allToAllV(bigArrD2,nbsend2,nbsend3,MPI_DOUBLE,
144 bigArrDRecv2,nbrecv3,nbrecv4,MPI_DOUBLE,
147 unserializationST(nbOfTrgElems,nbrecv,bigArrRecv,nbrecv1,nbrecv2,
148 bigArrRecv2,bigArrDRecv2,nbrecv3,nbrecv4);
150 //finish to fill _the_matrix_st with already in place matrix in _matrixes_st (local computation)
151 finishToFillFinalMatrixST();
153 //updating _src_ids_zip_st2 and _src_ids_zip_st2 with received matrix.
154 fillSourceIdsZipReceivedForMultiply();
155 // Prepare proc list for future field data exchange (mutliply()):
156 _proc_ids_to_send_vector_st = procsToSendField;
157 // Make some space on local proc:
158 _matrixes_st.clear();
166 // * Compute denominators for ExtensiveConservation interp.
167 // * TO BE REVISED: needs another communication since some bits are held non locally
169 //void OverlapMapping::computeDenoGlobConstraint()
171 // _the_deno_st.clear();
172 // std::size_t sz1=_the_matrix_st.size();
173 // _the_deno_st.resize(sz1);
174 // for(std::size_t i=0;i<sz1;i++)
176 // std::size_t sz2=_the_matrix_st[i].size();
177 // _the_deno_st[i].resize(sz2);
178 // for(std::size_t j=0;j<sz2;j++)
181 // SparseDoubleVec& mToFill=_the_deno_st[i][j];
182 // const SparseDoubleVec& m=_the_matrix_st[i][j];
183 // for(SparseDoubleVec::const_iterator it=m.begin();it!=m.end();it++)
184 // sum+=(*it).second;
185 // for(SparseDoubleVec::const_iterator it=m.begin();it!=m.end();it++)
186 // mToFill[(*it).first]=sum;
189 // printDenoMatrix();
192 ///*! Compute integral denominators
193 // * TO BE REVISED: needs another communication since some source areas are held non locally
195 //void OverlapMapping::computeDenoIntegral()
197 // _the_deno_st.clear();
198 // std::size_t sz1=_the_matrix_st.size();
199 // _the_deno_st.resize(sz1);
200 // for(std::size_t i=0;i<sz1;i++)
202 // std::size_t sz2=_the_matrix_st[i].size();
203 // _the_deno_st[i].resize(sz2);
204 // for(std::size_t j=0;j<sz2;j++)
206 // SparseDoubleVec& mToFill=_the_deno_st[i][j];
207 // for(SparseDoubleVec::const_iterator it=mToFill.begin();it!=mToFill.end();it++)
208 // mToFill[(*it).first] = sourceAreas;
211 // printDenoMatrix();
214 /*! Compute rev integral denominators
216 void OverlapMapping::computeDenoRevIntegral(const DataArrayDouble & targetAreas)
218 _the_deno_st.clear();
219 std::size_t sz1=_the_matrix_st.size();
220 _the_deno_st.resize(sz1);
221 const double * targetAreasP = targetAreas.getConstPointer();
222 for(std::size_t i=0;i<sz1;i++)
224 std::size_t sz2=_the_matrix_st[i].size();
225 _the_deno_st[i].resize(sz2);
226 for(std::size_t j=0;j<sz2;j++)
228 SparseDoubleVec& mToFill=_the_deno_st[i][j];
229 SparseDoubleVec& mToIterate=_the_matrix_st[i][j];
230 for(SparseDoubleVec::const_iterator it=mToIterate.begin();it!=mToIterate.end();it++)
231 mToFill[(*it).first] = targetAreasP[j];
234 // printDenoMatrix();
239 * Compute denominators for ConvervativeVolumic interp.
241 void OverlapMapping::computeDenoConservativeVolumic(int nbOfTuplesTrg)
243 int myProcId=_group.myRank();
245 _the_deno_st.clear();
246 std::size_t sz1=_the_matrix_st.size();
247 _the_deno_st.resize(sz1);
248 std::vector<double> deno(nbOfTuplesTrg);
249 // Fills in the vector indexed by target cell ID:
250 for(std::size_t i=0;i<sz1;i++)
252 const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i];
253 int curSrcId=_the_matrix_st_source_proc_id[i];
254 map < int, MCAuto<DataArrayInt> >::const_iterator isItem1 = _sent_trg_ids.find(curSrcId);
256 if(isItem1==_sent_trg_ids.end() || curSrcId==myProcId) // Local computation: simple, because rowId of mat are directly target cell ids.
258 for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
259 for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
260 deno[rowId]+=(*it2).second;
262 else // matrix was received, remote computation
264 const DataArrayInt *trgIds = (*isItem1).second;
265 const int *trgIds2=trgIds->getConstPointer();
266 for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
267 for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
268 deno[trgIds2[rowId]]+=(*it2).second;
271 // Broadcast the vector into a structure similar to the initial sparse matrix of numerators:
272 for(std::size_t i=0;i<sz1;i++)
275 const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i];
276 int curSrcId=_the_matrix_st_source_proc_id[i];
277 map < int, MCAuto<DataArrayInt> >::const_iterator isItem1 = _sent_trg_ids.find(curSrcId);
278 std::vector< SparseDoubleVec >& denoM=_the_deno_st[i];
279 denoM.resize(mat.size());
280 if(isItem1==_sent_trg_ids.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids.
283 for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
284 for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
285 denoM[rowId][(*it2).first]=deno[rowId];
289 const DataArrayInt *trgIds = (*isItem1).second;
290 const int *trgIds2=trgIds->getConstPointer();
291 for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
292 for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
293 denoM[rowId][(*it2).first]=deno[trgIds2[rowId]];
296 // printDenoMatrix();
300 * This method performs step #0/3 in serialization process.
301 * \param count tells specifies nb of elems to send to corresponding proc id. size equal to _group.size().
302 * \param offsets tells for a proc i where to start serialize#0 matrix. size equal to _group.size().
303 * \param nbOfElemsSrc of size _group.size(). Comes from previous all2all call. tells how many srcIds per proc contains matrix for current proc.
305 void OverlapMapping::serializeMatrixStep0ST(const int *nbOfElemsSrc, int *&bigArr, int *count, int *offsets,
306 int *countForRecv, int *offsetsForRecv) const
308 int grpSize=_group.size();
309 std::fill<int *>(count,count+grpSize,0);
311 int myProcId=_group.myRank();
312 for(std::size_t i=0;i<_matrixes_st.size();i++)
314 if(_source_proc_id_st[i]==myProcId)// && _target_proc_id_st[i]!=myProcId
316 count[_target_proc_id_st[i]]=_matrixes_st[i].size()+1;
317 szz+=_matrixes_st[i].size()+1;
322 for(int i=1;i<grpSize;i++)
323 offsets[i]=offsets[i-1]+count[i-1];
324 for(std::size_t i=0;i<_matrixes_st.size();i++)
326 if(_source_proc_id_st[i]==myProcId)
328 int start=offsets[_target_proc_id_st[i]];
329 int *work=bigArr+start;
331 const std::vector< SparseDoubleVec >& mat=_matrixes_st[i];
332 for(std::vector< SparseDoubleVec >::const_iterator it=mat.begin();it!=mat.end();it++,work++)
333 work[1]=work[0]+(*it).size();
338 for(int i=0;i<grpSize;i++)
340 if(nbOfElemsSrc[i]>0)
341 countForRecv[i]=nbOfElemsSrc[i]+1;
345 offsetsForRecv[i]=offsetsForRecv[i-1]+countForRecv[i-1];
350 * This method performs step#1 and step#2/3. It returns the size of expected array to get allToAllV.
351 * It is where the locally computed matrices are serialized to be sent to adequate final proc.
353 int OverlapMapping::serializeMatrixStep1ST(const int *nbOfElemsSrc, const int *recvStep0, const int *countStep0, const int *offsStep0,
354 int *&bigArrI, double *&bigArrD, int *count, int *offsets,
355 int *countForRecv, int *offsForRecv) const
357 int grpSize=_group.size();
358 int myProcId=_group.myRank();
361 for(int i=0;i<grpSize;i++)
363 if(nbOfElemsSrc[i]!=0)
364 countForRecv[i]=recvStep0[offsStep0[i]+nbOfElemsSrc[i]];
367 szz+=countForRecv[i];
369 offsForRecv[i]=offsForRecv[i-1]+countForRecv[i-1];
372 std::fill(count,count+grpSize,0);
375 for(std::size_t i=0;i<_matrixes_st.size();i++)
377 if(_source_proc_id_st[i]==myProcId)
379 const std::vector< SparseDoubleVec >& mat=_matrixes_st[i];
381 for(std::vector< SparseDoubleVec >::const_iterator it=mat.begin();it!=mat.end();it++)
382 lgthToSend+=(*it).size();
383 count[_target_proc_id_st[i]]=lgthToSend;
384 fullLgth+=lgthToSend;
387 for(int i=1;i<grpSize;i++)
388 offsets[i]=offsets[i-1]+count[i-1];
390 bigArrI=new int[fullLgth];
391 bigArrD=new double[fullLgth];
394 for(std::size_t i=0;i<_matrixes_st.size();i++)
396 if(_source_proc_id_st[i]==myProcId)
398 const std::vector< SparseDoubleVec >& mat=_matrixes_st[i];
399 for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++)
402 for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++,j++)
404 bigArrI[fullLgth+j]=(*it2).first;
405 bigArrD[fullLgth+j]=(*it2).second;
407 fullLgth+=(*it1).size();
415 * This is the last step after all2Alls for matrix exchange.
416 * _the_matrix_st is the final matrix :
417 * - The first entry is srcId in current proc.
418 * - The second is the pseudo id of source proc (correspondance with true id is in attribute _the_matrix_st_source_proc_id and _the_matrix_st_source_ids)
419 * - the third is the srcId in the pseudo source proc
421 void OverlapMapping::unserializationST(int nbOfTrgElems,
422 const int *nbOfElemsSrcPerProc,//first all2all
423 const int *bigArrRecv, const int *bigArrRecvCounts, const int *bigArrRecvOffs,//2nd all2all
424 const int *bigArrRecv2, const double *bigArrDRecv2, const int *bigArrRecv2Count, const int *bigArrRecv2Offs)//3rd and 4th all2alls
426 _the_matrix_st.clear();
427 _the_matrix_st_source_proc_id.clear();
429 int grpSize=_group.size();
430 for(int i=0;i<grpSize;i++)
431 if(nbOfElemsSrcPerProc[i]!=0)
432 _the_matrix_st_source_proc_id.push_back(i);
433 int nbOfPseudoProcs=_the_matrix_st_source_proc_id.size();//_the_matrix_st_target_proc_id.size() contains number of matrix fetched remotely whose sourceProcId==myProcId
434 _the_matrix_st.resize(nbOfPseudoProcs);
437 for(int i=0;i<grpSize;i++)
438 if(nbOfElemsSrcPerProc[i]!=0)
440 _the_matrix_st[j].resize(nbOfElemsSrcPerProc[i]);
441 for(int k=0;k<nbOfElemsSrcPerProc[i];k++)
443 int offs=bigArrRecv[bigArrRecvOffs[i]+k];
444 int lgthOfMap=bigArrRecv[bigArrRecvOffs[i]+k+1]-offs;
445 for(int l=0;l<lgthOfMap;l++)
446 _the_matrix_st[j][k][bigArrRecv2[bigArrRecv2Offs[i]+offs+l]]=bigArrDRecv2[bigArrRecv2Offs[i]+offs+l];
453 * This method should be called when all remote matrix with sourceProcId==thisProcId have been retrieved and are
454 * in 'this->_the_matrix_st' and 'this->_the_matrix_st_target_proc_id' and 'this->_the_matrix_st_target_ids'.
455 * This method finish the job of filling 'this->_the_matrix_st' and 'this->_the_matrix_st_target_proc_id'
456 * by putting candidates in 'this->_matrixes_st' into them (i.e. local computation result).
458 void OverlapMapping::finishToFillFinalMatrixST()
460 int myProcId=_group.myRank();
461 int sz=_matrixes_st.size();
462 int nbOfEntryToAdd=0;
463 for(int i=0;i<sz;i++)
464 if(_source_proc_id_st[i]!=myProcId)
466 if(nbOfEntryToAdd==0)
468 int oldNbOfEntry=_the_matrix_st.size();
469 int newNbOfEntry=oldNbOfEntry+nbOfEntryToAdd;
470 _the_matrix_st.resize(newNbOfEntry);
472 for(int i=0;i<sz;i++)
473 if(_source_proc_id_st[i]!=myProcId)
475 const std::vector<SparseDoubleVec >& mat=_matrixes_st[i];
476 _the_matrix_st[j]=mat;
477 _the_matrix_st_source_proc_id.push_back(_source_proc_id_st[i]);
484 * This method performs a transpose multiply of 'fieldInput' and put the result into 'fieldOutput'.
485 * 'fieldInput' is expected to be the sourcefield and 'fieldOutput' the targetfield.
487 void OverlapMapping::multiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput, double default_val) const
491 int nbOfCompo=fieldInput->getNumberOfComponents();//to improve same number of components to test
492 CommInterface commInterface=_group.getCommInterface();
493 const MPIProcessorGroup *group=static_cast<const MPIProcessorGroup*>(&_group);
494 const MPI_Comm *comm=group->getComm();
495 int grpSize=_group.size();
496 int myProcID=_group.myRank();
498 INTERP_KERNEL::AutoPtr<int> nbsend=new int[grpSize];
499 INTERP_KERNEL::AutoPtr<int> nbsend2=new int[grpSize];
500 INTERP_KERNEL::AutoPtr<int> nbrecv=new int[grpSize];
501 INTERP_KERNEL::AutoPtr<int> nbrecv2=new int[grpSize];
502 fill<int *>(nbsend,nbsend+grpSize,0);
503 fill<int *>(nbrecv,nbrecv+grpSize,0);
506 vector<double> valsToSend;
510 * We call the 'BB source IDs' (bounding box source IDs) the set of source cell IDs transmitted just based on the bounding box information.
511 * This is potentially bigger than what is finally in the interp matrix and this is stored in _sent_src_ids.
512 * We call 'interp source IDs' the set of source cell IDs with non null entries in the interp matrix. This is a sub-set of the above.
514 for(int procID=0;procID<grpSize;procID++)
516 /* SENDING part: compute field values to be SENT (and how many of them)
517 * - for all proc 'procID' in group
518 * * if procID == myProcID, send nothing
519 * * elif 'procID' in _proc_ids_to_send_vector_st (computed from the BB intersection)
520 * % if myProcID computed the job (myProcID, procID)
521 * => send only 'interp source IDs' field values (i.e. IDs stored in _src_ids_zip_comp)
522 * % else (=we just sent mesh data to procID, but have never seen the matrix, i.e. matrix was computed remotely by procID)
523 * => send 'BB source IDs' set of field values (i.e. IDs stored in _sent_src_ids)
525 if (procID == myProcID)
528 if(find(_proc_ids_to_send_vector_st.begin(),_proc_ids_to_send_vector_st.end(),procID)!=_proc_ids_to_send_vector_st.end())
530 MCAuto<DataArrayDouble> vals;
531 if(_locator.isInMyTodoList(myProcID, procID))
533 map<int, vector<int> >::const_iterator isItem11 = _src_ids_zip_comp.find(procID);
534 if (isItem11 == _src_ids_zip_comp.end())
535 throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: SEND: unexpected end iterator in _src_ids_zip_comp!");
536 const vector<int> & v = (*isItem11).second;
538 vals=fieldInput->getArray()->selectByTupleId(&(v[0]),&(v[0])+sz);
542 map < int, MCAuto<DataArrayInt> >::const_iterator isItem11 = _sent_src_ids.find( procID );
543 if (isItem11 == _sent_src_ids.end())
544 throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: SEND: unexpected end iterator in _sent_src_ids!");
545 vals=fieldInput->getArray()->selectByTupleId(*(*isItem11).second);
547 nbsend[procID] = vals->getNbOfElems();
548 valsToSend.insert(valsToSend.end(),vals->getConstPointer(),vals->getConstPointer()+nbsend[procID]);
551 /* RECEIVE: compute number of field values to be RECEIVED
552 * - for all proc 'procID' in group
553 * * if procID == myProcID, rcv nothing
554 * * elif 'procID' in _proc_ids_to_recv_vector_st (computed from BB intersec)
555 * % if myProcID computed the job (procID, myProcID)
556 * => receive full set ('BB source IDs') of field data from proc #procID which has never seen the matrix
557 * i.e. prepare to receive the numb in _nb_of_rcv_src_ids
558 * % else (=we did NOT compute the job, hence procID has, and knows the matrix)
559 * => receive 'interp source IDs' set of field values
561 const std::vector< int > & _proc_ids_to_recv_vector_st = _the_matrix_st_source_proc_id;
562 if (procID == myProcID)
565 if(find(_proc_ids_to_recv_vector_st.begin(),_proc_ids_to_recv_vector_st.end(),procID)!=_proc_ids_to_recv_vector_st.end())
567 if(_locator.isInMyTodoList(procID, myProcID))
569 map <int,int>::const_iterator isItem11 = _nb_of_rcv_src_ids.find(procID);
570 if (isItem11 == _nb_of_rcv_src_ids.end())
571 throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: RCV: unexpected end iterator in _nb_of_rcv_src_ids!");
572 nbrecv[procID] = (*isItem11).second;
576 map<int, vector<int> >::const_iterator isItem11 = _src_ids_zip_recv.find(procID);
577 if (isItem11 == _src_ids_zip_recv.end())
578 throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: RCV: unexpected end iterator in _src_ids_zip_recv!");
579 nbrecv[procID] = (*isItem11).second.size()*nbOfCompo;
583 // Compute offsets in the sending/receiving array.
584 for(int i=1;i<grpSize;i++)
586 nbsend2[i]=nbsend2[i-1]+nbsend[i-1];
587 nbrecv2[i]=nbrecv2[i-1]+nbrecv[i-1];
589 INTERP_KERNEL::AutoPtr<double> bigArr=new double[nbrecv2[grpSize-1]+nbrecv[grpSize-1]];
593 scout << "(" << myProcID << ") nbsend :" << nbsend[0] << "," << nbsend[1] << "," << nbsend[2] << "\n";
594 scout << "(" << myProcID << ") nbrecv :" << nbrecv[0] << "," << nbrecv[1] << "," << nbrecv[2] << "\n";
595 scout << "(" << myProcID << ") valsToSend: ";
596 for (int iii=0; iii<valsToSend.size(); iii++)
597 scout << ", " << valsToSend[iii];
598 cout << scout.str() << "\n";
602 * *********************** ALL-TO-ALL
604 commInterface.allToAllV(&valsToSend[0],nbsend,nbsend2,MPI_DOUBLE,
605 bigArr,nbrecv,nbrecv2,MPI_DOUBLE,*comm);
607 MPI_Barrier(MPI_COMM_WORLD);
608 scout << "(" << myProcID << ") bigArray: ";
609 for (int iii=0; iii<nbrecv2[grpSize-1]+nbrecv[grpSize-1]; iii++)
610 scout << ", " << bigArr[iii];
611 cout << scout.str() << "\n";
615 * TARGET FIELD COMPUTATION (matrix-vec computation)
617 fieldOutput->getArray()->fillWithZero();
618 INTERP_KERNEL::AutoPtr<double> tmp=new double[nbOfCompo];
620 // By default field value set to default value - so mark which cells are hit
621 INTERP_KERNEL::AutoPtr<bool> hit_cells = new bool[fieldOutput->getNumberOfTuples()];
623 for(vector<int>::const_iterator itProc=_the_matrix_st_source_proc_id.begin(); itProc != _the_matrix_st_source_proc_id.end();itProc++)
624 // For each source processor corresponding to a locally held matrix:
626 int srcProcID = *itProc;
627 int id = distance(_the_matrix_st_source_proc_id.begin(),itProc);
628 const vector< SparseDoubleVec >& mat =_the_matrix_st[id];
629 const vector< SparseDoubleVec >& deno = _the_deno_st[id];
631 /* FINAL MULTIPLICATION
632 * * if srcProcID == myProcID, local multiplication without any mapping
633 * => for all target cell ID 'tgtCellID'
634 * => for all src cell ID 'srcCellID' in the sparse vector
635 * => tgtFieldLocal[tgtCellID] += srcFieldLocal[srcCellID] * matrix[tgtCellID][srcCellID] / deno[tgtCellID][srcCellID]
637 if (srcProcID == myProcID)
639 int nbOfTrgTuples=mat.size();
640 double * targetBase = fieldOutput->getArray()->getPointer();
641 for(int j=0; j<nbOfTrgTuples; j++)
643 const SparseDoubleVec& mat1=mat[j];
644 const SparseDoubleVec& deno1=deno[j];
645 SparseDoubleVec::const_iterator it5=deno1.begin();
646 const double * localSrcField = fieldInput->getArray()->getConstPointer();
647 double * targetPt = targetBase+j*nbOfCompo;
648 for(SparseDoubleVec::const_iterator it3=mat1.begin();it3!=mat1.end();it3++,it5++)
650 // Apply the multiplication for all components:
651 double ratio = (*it3).second/(*it5).second;
652 transform(localSrcField+((*it3).first)*nbOfCompo,
653 localSrcField+((*it3).first+1)*nbOfCompo,
655 bind2nd(multiplies<double>(),ratio) );
656 // Accumulate with current value:
657 transform((double *)tmp,(double *)tmp+nbOfCompo,targetPt,targetPt,plus<double>());
663 if(nbrecv[srcProcID]<=0) // also covers the preceding 'if'
666 /* * if something was received
667 * % if received matrix (=we didn't compute the job), this means that :
668 * 1. we sent part of our targetIDs to srcProcID before, so that srcProcId can do the computation.
669 * 2. srcProcID has sent us only the 'interp source IDs' field values
670 * => invert _src_ids_zip_recv -> 'revert_zip'
671 * => for all target cell ID 'tgtCellID'
672 * => mappedTgtID = _sent_trg_ids[srcProcID][tgtCellID]
673 * => for all src cell ID 'srcCellID' in the sparse vector
674 * => idx = revert_zip[srcCellID]
675 * => tgtFieldLocal[mappedTgtID] += rcvValue[srcProcID][idx] * matrix[tgtCellID][srcCellID] / deno[tgtCellID][srcCellID]
677 if(!_locator.isInMyTodoList(srcProcID, myProcID))
679 // invert _src_ids_zip_recv
680 map<int,int> revert_zip;
681 map<int, vector<int> >::const_iterator it11= _src_ids_zip_recv.find(srcProcID);
682 if (it11 == _src_ids_zip_recv.end())
683 throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: MULTIPLY: unexpected end iterator in _src_ids_zip_recv!");
684 const vector<int> & vec = (*it11).second;
686 for(vector<int>::const_iterator it=vec.begin();it!=vec.end();it++,newId++)
687 revert_zip[*it]=newId;
688 map < int, MCAuto<DataArrayInt> >::const_iterator isItem24 = _sent_trg_ids.find(srcProcID);
689 if (isItem24 == _sent_trg_ids.end())
690 throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: MULTIPLY: unexpected end iterator in _sent_trg_ids!");
691 const DataArrayInt *tgrIdsDA = (*isItem24).second;
692 const int *tgrIds = tgrIdsDA->getConstPointer();
694 int nbOfTrgTuples=mat.size();
695 double * targetBase = fieldOutput->getArray()->getPointer();
696 for(int j=0;j<nbOfTrgTuples;j++)
698 const SparseDoubleVec& mat1=mat[j];
699 const SparseDoubleVec& deno1=deno[j];
700 SparseDoubleVec::const_iterator it5=deno1.begin();
701 double * targetPt = targetBase+tgrIds[j]*nbOfCompo;
702 for(SparseDoubleVec::const_iterator it3=mat1.begin();it3!=mat1.end();it3++,it5++)
704 map<int,int>::const_iterator it4=revert_zip.find((*it3).first);
705 if(it4==revert_zip.end())
706 throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: MULTIPLY: unexpected end iterator in revert_zip!");
707 double ratio = (*it3).second/(*it5).second;
708 transform(bigArr+nbrecv2[srcProcID]+((*it4).second)*nbOfCompo,
709 bigArr+nbrecv2[srcProcID]+((*it4).second+1)*nbOfCompo,
711 bind2nd(multiplies<double>(),ratio) );
712 transform((double *)tmp,(double *)tmp+nbOfCompo,targetPt,targetPt,plus<double>());
713 hit_cells[tgrIds[j]] = true;
718 /* % else (=we computed the job and we received the 'BB source IDs' set of source field values)
719 * => for all target cell ID 'tgtCellID'
720 * => for all src cell ID 'srcCellID' in the sparse vector
721 * => tgtFieldLocal[tgtCellID] += rcvValue[srcProcID][srcCellID] * matrix[tgtCellID][srcCellID] / deno[tgtCellID][srcCellID]
724 // Same loop as in the case srcProcID == myProcID, except that instead of working on local field data, we work on bigArr
725 int nbOfTrgTuples=mat.size();
726 double * targetBase = fieldOutput->getArray()->getPointer();
727 for(int j=0;j<nbOfTrgTuples;j++)
729 const SparseDoubleVec& mat1=mat[j];
730 const SparseDoubleVec& deno1=deno[j];
731 SparseDoubleVec::const_iterator it5=deno1.begin();
732 double * targetPt = targetBase+j*nbOfCompo;
733 for(SparseDoubleVec::const_iterator it3=mat1.begin();it3!=mat1.end();it3++,it5++)
735 // Apply the multiplication for all components:
736 double ratio = (*it3).second/(*it5).second;
737 transform(bigArr+nbrecv2[srcProcID]+((*it3).first)*nbOfCompo,
738 bigArr+nbrecv2[srcProcID]+((*it3).first+1)*nbOfCompo,
740 bind2nd(multiplies<double>(),ratio));
741 // Accumulate with current value:
742 transform((double *)tmp,(double *)tmp+nbOfCompo,targetPt,targetPt,plus<double>());
749 // Fill in default values for cells which haven't been hit:
751 for(bool * hit_cells_ptr=hit_cells; i< fieldOutput->getNumberOfTuples(); hit_cells_ptr++,i++)
752 if (!(*hit_cells_ptr))
754 double * targetPt=fieldOutput->getArray()->getPointer();
755 fill(targetPt+i*nbOfCompo, targetPt+(i+1)*nbOfCompo, default_val);
760 * This method performs a transpose multiply of 'fieldInput' and put the result into 'fieldOutput'.
761 * 'fieldInput' is expected to be the targetfield and 'fieldOutput' the sourcefield.
763 void OverlapMapping::transposeMultiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput)
768 * This method should be called immediately after _the_matrix_st has been filled with remote computed matrix
769 * put in this proc for Matrix-Vector.
770 * It fills _src_ids_zip_recv (see member doc)
772 void OverlapMapping::fillSourceIdsZipReceivedForMultiply()
774 /* When it is called, only the bits received from other processors (i.e. the remotely executed jobs) are in the
775 big matrix _the_matrix_st. */
777 CommInterface commInterface=_group.getCommInterface();
778 int myProcId=_group.myRank();
779 int nbOfMatrixRecveived=_the_matrix_st_source_proc_id.size();
780 for(int i=0;i<nbOfMatrixRecveived;i++)
782 int curSrcProcId=_the_matrix_st_source_proc_id[i];
783 if(curSrcProcId!=myProcId) // if =, data has been populated by addContributionST()
785 const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i];
787 for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++)
788 for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
789 s.insert((*it2).first);
790 vector<int> vec(s.begin(),s.end());
791 _src_ids_zip_recv[curSrcProcId] = vec;
797 void OverlapMapping::printTheMatrix() const
799 CommInterface commInterface=_group.getCommInterface();
800 const MPIProcessorGroup *group=static_cast<const MPIProcessorGroup*>(&_group);
801 const MPI_Comm *comm=group->getComm();
802 int grpSize=_group.size();
803 int myProcId=_group.myRank();
804 std::stringstream oscerr;
805 int nbOfMat=_the_matrix_st.size();
806 oscerr << "(" << myProcId << ") I hold " << nbOfMat << " matrix(ces) : "<< std::endl;
807 for(int i=0;i<nbOfMat;i++)
809 oscerr << " - Matrix #" << i << " coming from source proc #" << _the_matrix_st_source_proc_id[i] << ":\n ";
810 const std::vector< SparseDoubleVec >& locMat=_the_matrix_st[i];
812 for(std::vector< SparseDoubleVec >::const_iterator it1=locMat.begin();it1!=locMat.end();it1++, j++)
814 oscerr << " Target Cell #" << j;
815 for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
816 oscerr << " (" << (*it2).first << "," << (*it2).second << "), ";
820 oscerr << "*********" << std::endl;
822 // Hope this will be flushed in one go:
823 std::cerr << oscerr.str() << std::endl;
825 // MPI_Barrier(MPI_COMM_WORLD);
828 void OverlapMapping::printMatrixesST() const
830 CommInterface commInterface=_group.getCommInterface();
831 const MPIProcessorGroup *group=static_cast<const MPIProcessorGroup*>(&_group);
832 const MPI_Comm *comm=group->getComm();
833 int grpSize=_group.size();
834 int myProcId=_group.myRank();
835 std::stringstream oscerr;
836 int nbOfMat=_matrixes_st.size();
837 oscerr << "(" << myProcId << ") I hold " << nbOfMat << " LOCAL matrix(ces) : "<< std::endl;
838 for(int i=0;i<nbOfMat;i++)
840 oscerr << " - Matrix #" << i << ": (source proc #" << _source_proc_id_st[i] << " / tgt proc#" << _target_proc_id_st[i] << "): \n";
841 const std::vector< SparseDoubleVec >& locMat=_matrixes_st[i];
843 for(std::vector< SparseDoubleVec >::const_iterator it1=locMat.begin();it1!=locMat.end();it1++, j++)
845 oscerr << " Target Cell #" << j;
846 for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
847 oscerr << " (" << (*it2).first << "," << (*it2).second << "), ";
851 oscerr << "*********" << std::endl;
853 // Hope this will be flushed in one go:
854 std::cerr << oscerr.str() << std::endl;
857 void OverlapMapping::printDenoMatrix() const
859 CommInterface commInterface=_group.getCommInterface();
860 const MPIProcessorGroup *group=static_cast<const MPIProcessorGroup*>(&_group);
861 const MPI_Comm *comm=group->getComm();
862 int grpSize=_group.size();
863 int myProcId=_group.myRank();
864 std::stringstream oscerr;
865 int nbOfMat=_the_deno_st.size();
866 oscerr << "(" << myProcId << ") I hold " << nbOfMat << " DENOMINATOR matrix(ces) : "<< std::endl;
867 for(int i=0;i<nbOfMat;i++)
869 oscerr << " - Matrix #" << i << " coming from source proc #" << _the_matrix_st_source_proc_id[i] << ": \n";
870 const std::vector< SparseDoubleVec >& locMat=_the_deno_st[i];
872 for(std::vector< SparseDoubleVec >::const_iterator it1=locMat.begin();it1!=locMat.end();it1++, j++)
874 oscerr << " Target Cell #" << j;
875 for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
876 oscerr << " (" << (*it2).first << "," << (*it2).second << "), ";
880 oscerr << "*********" << std::endl;
882 // Hope this will be flushed in one go:
883 std::cerr << oscerr.str() << std::endl;