-// Copyright (C) 2007-2015 CEA/DEN, EDF R&D
+// Copyright (C) 2007-2016 CEA/DEN, EDF R&D
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
#include "MPIProcessorGroup.hxx"
#include "MEDCouplingFieldDouble.hxx"
-#include "MEDCouplingAutoRefCountObjectPtr.hxx"
+#include "MCAuto.hxx"
#include "InterpKernelAutoPtr.hxx"
#include <numeric>
#include <algorithm>
-using namespace ParaMEDMEM;
+using namespace MEDCoupling;
OverlapMapping::OverlapMapping(const ProcessorGroup& group, const OverlapElementLocator & loc):
_group(group),_locator(loc)
void OverlapMapping::keepTracksOfSourceIds(int procId, DataArrayInt *ids)
{
ids->incrRef();
- _sent_src_ids_st2.push_back(ids);
- _sent_src_proc_st2.push_back(procId);
+ _sent_src_ids[procId] = ids;
}
/*!
void OverlapMapping::keepTracksOfTargetIds(int procId, DataArrayInt *ids)
{
ids->incrRef();
- _sent_trg_ids_st2.push_back(ids);
- _sent_trg_proc_st2.push_back(procId);
+ _sent_trg_ids[procId] = ids;
}
/*!
_source_proc_id_st.push_back(srcProcId);
_target_proc_id_st.push_back(trgProcId);
if(srcIds) // source mesh part is remote <=> srcProcId != myRank
- {
- _rcv_src_ids_proc_st2.push_back(srcProcId);
- _nb_of_rcv_src_ids_proc_st2.push_back(srcIds->getNumberOfTuples());
- }
+ _nb_of_rcv_src_ids[srcProcId] = srcIds->getNumberOfTuples();
else // source mesh part is local
{
std::set<int> s;
for(std::vector< SparseDoubleVec >::const_iterator it1=matrixST.begin();it1!=matrixST.end();it1++)
for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
s.insert((*it2).first);
- _src_ids_zip_proc_st2.push_back(trgProcId);
- _src_ids_zip_st2.resize(_src_ids_zip_st2.size()+1);
- _src_ids_zip_st2.back().insert(_src_ids_zip_st2.back().end(),s.begin(),s.end());
+ vector<int> v(s.begin(), s.end()); // turn set into vector
+ _src_ids_zip_comp[trgProcId] = v;
}
}
//finishing
unserializationST(nbOfTrgElems,nbrecv,bigArrRecv,nbrecv1,nbrecv2,
bigArrRecv2,bigArrDRecv2,nbrecv3,nbrecv4);
- //updating _src_ids_zip_st2 and _src_ids_zip_st2 with received matrix.
- updateZipSourceIdsForMultiply();
+
//finish to fill _the_matrix_st with already in place matrix in _matrixes_st (local computation)
finishToFillFinalMatrixST();
- // Prepare proc lists for future field data exchange (mutliply()):
- _proc_ids_to_recv_vector_st = _the_matrix_st_source_proc_id;
+
+ //updating _src_ids_zip_st2 and _src_ids_zip_st2 with received matrix.
+ fillSourceIdsZipReceivedForMultiply();
+ // Prepare proc list for future field data exchange (mutliply()):
_proc_ids_to_send_vector_st = procsToSendField;
// Make some space on local proc:
_matrixes_st.clear();
#endif
}
-///**
-// * Fill the members _proc_ids_to_send_vector_st and _proc_ids_to_recv_vector_st
-// * indicating for each proc to which proc it should send its source field data
-// * and from which proc it should receive source field data.
-// *
-// * Should be called once finishToFillFinalMatrixST() has been executed, i.e. when the
-// * local matrices are completely ready.
+///*!
+// * Compute denominators for ExtensiveConservation interp.
+// * TO BE REVISED: needs another communication since some bits are held non locally
// */
-//void OverlapMapping::fillProcToSendRcvForMultiply(const std::vector< int >& procsToSendField)
+//void OverlapMapping::computeDenoGlobConstraint()
//{
-//// _proc_ids_to_send_vector_st.clear();
-// _proc_ids_to_recv_vector_st.clear();
-// // Receiving side is easy - just inspect non-void terms in the final matrix
-// int i=0;
-// std::vector< std::vector< SparseDoubleVec > >::const_iterator it;
-// std::vector< SparseDoubleVec >::const_iterator it2;
-// for(it=_the_matrix_st.begin(); it!=_the_matrix_st.end(); it++, i++)
-// _proc_ids_to_recv_vector_st.push_back(_the_matrix_st_source_proc_id[i]);
-//
-// // Sending side was computed in OverlapElementLocator::computeBoundingBoxesAndTodoList()
-// _proc_ids_to_send_vector_st = procsToSendField;
+// _the_deno_st.clear();
+// std::size_t sz1=_the_matrix_st.size();
+// _the_deno_st.resize(sz1);
+// for(std::size_t i=0;i<sz1;i++)
+// {
+// std::size_t sz2=_the_matrix_st[i].size();
+// _the_deno_st[i].resize(sz2);
+// for(std::size_t j=0;j<sz2;j++)
+// {
+// double sum=0;
+// SparseDoubleVec& mToFill=_the_deno_st[i][j];
+// const SparseDoubleVec& m=_the_matrix_st[i][j];
+// for(SparseDoubleVec::const_iterator it=m.begin();it!=m.end();it++)
+// sum+=(*it).second;
+// for(SparseDoubleVec::const_iterator it=m.begin();it!=m.end();it++)
+// mToFill[(*it).first]=sum;
+// }
+// }
+// printDenoMatrix();
//}
-/*!
- * Compute denominators for IntegralGlobConstraint interp.
- */
-void OverlapMapping::computeDenoGlobConstraint()
+///*! Compute integral denominators
+// * TO BE REVISED: needs another communication since some source areas are held non locally
+// */
+//void OverlapMapping::computeDenoIntegral()
+//{
+// _the_deno_st.clear();
+// std::size_t sz1=_the_matrix_st.size();
+// _the_deno_st.resize(sz1);
+// for(std::size_t i=0;i<sz1;i++)
+// {
+// std::size_t sz2=_the_matrix_st[i].size();
+// _the_deno_st[i].resize(sz2);
+// for(std::size_t j=0;j<sz2;j++)
+// {
+// SparseDoubleVec& mToFill=_the_deno_st[i][j];
+// for(SparseDoubleVec::const_iterator it=mToFill.begin();it!=mToFill.end();it++)
+// mToFill[(*it).first] = sourceAreas;
+// }
+// }
+// printDenoMatrix();
+//}
+
+/*! Compute rev integral denominators
+ */
+void OverlapMapping::computeDenoRevIntegral(const DataArrayDouble & targetAreas)
{
_the_deno_st.clear();
std::size_t sz1=_the_matrix_st.size();
_the_deno_st.resize(sz1);
+ const double * targetAreasP = targetAreas.getConstPointer();
for(std::size_t i=0;i<sz1;i++)
{
std::size_t sz2=_the_matrix_st[i].size();
_the_deno_st[i].resize(sz2);
for(std::size_t j=0;j<sz2;j++)
{
- double sum=0;
SparseDoubleVec& mToFill=_the_deno_st[i][j];
- const SparseDoubleVec& m=_the_matrix_st[i][j];
- for(SparseDoubleVec::const_iterator it=m.begin();it!=m.end();it++)
- sum+=(*it).second;
- for(SparseDoubleVec::const_iterator it=m.begin();it!=m.end();it++)
- mToFill[(*it).first]=sum;
+ SparseDoubleVec& mToIterate=_the_matrix_st[i][j];
+ for(SparseDoubleVec::const_iterator it=mToIterate.begin();it!=mToIterate.end();it++)
+ mToFill[(*it).first] = targetAreasP[j];
}
}
+// printDenoMatrix();
}
+
/*!
* Compute denominators for ConvervativeVolumic interp.
*/
std::size_t sz1=_the_matrix_st.size();
_the_deno_st.resize(sz1);
std::vector<double> deno(nbOfTuplesTrg);
+ // Fills in the vector indexed by target cell ID:
for(std::size_t i=0;i<sz1;i++)
{
const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i];
int curSrcId=_the_matrix_st_source_proc_id[i];
- std::vector<int>::iterator isItem1=std::find(_sent_trg_proc_st2.begin(),_sent_trg_proc_st2.end(),curSrcId);
+ map < int, MCAuto<DataArrayInt> >::const_iterator isItem1 = _sent_trg_ids.find(curSrcId);
int rowId=0;
- if(isItem1==_sent_trg_proc_st2.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids.
+ if(isItem1==_sent_trg_ids.end() || curSrcId==myProcId) // Local computation: simple, because rowId of mat are directly target cell ids.
{
for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
deno[rowId]+=(*it2).second;
}
- else
- {//item0 of step2 main algo. More complicated.
- std::vector<int>::iterator fnd=isItem1;//std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),curSrcId);
- int locId=std::distance(_sent_trg_proc_st2.begin(),fnd);
- const DataArrayInt *trgIds=_sent_trg_ids_st2[locId];
+ else // matrix was received, remote computation
+ {
+ const DataArrayInt *trgIds = (*isItem1).second;
const int *trgIds2=trgIds->getConstPointer();
for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
deno[trgIds2[rowId]]+=(*it2).second;
}
}
- //
+ // Broadcast the vector into a structure similar to the initial sparse matrix of numerators:
for(std::size_t i=0;i<sz1;i++)
{
int rowId=0;
const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i];
int curSrcId=_the_matrix_st_source_proc_id[i];
- std::vector<int>::iterator isItem1=std::find(_sent_trg_proc_st2.begin(),_sent_trg_proc_st2.end(),curSrcId);
+ map < int, MCAuto<DataArrayInt> >::const_iterator isItem1 = _sent_trg_ids.find(curSrcId);
std::vector< SparseDoubleVec >& denoM=_the_deno_st[i];
denoM.resize(mat.size());
- if(isItem1==_sent_trg_proc_st2.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids.
+ if(isItem1==_sent_trg_ids.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids.
{
int rowId=0;
for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
}
else
{
- std::vector<int>::iterator fnd=isItem1;
- int locId=std::distance(_sent_trg_proc_st2.begin(),fnd);
- const DataArrayInt *trgIds=_sent_trg_ids_st2[locId];
+ const DataArrayInt *trgIds = (*isItem1).second;
const int *trgIds2=trgIds->getConstPointer();
for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
* This is the last step after all2Alls for matrix exchange.
* _the_matrix_st is the final matrix :
* - The first entry is srcId in current proc.
- * - The second is the pseudo id of source proc (correspondance with true id is in attribute _the_matrix_st_source_proc_id and _the_matrix_st_source_ids)
+ * - The second is the pseudo id of source proc (correspondence with true id is in attribute _the_matrix_st_source_proc_id and _the_matrix_st_source_ids)
* - the third is the srcId in the pseudo source proc
*/
void OverlapMapping::unserializationST(int nbOfTrgElems,
}
}
+
/*!
* This method performs a transpose multiply of 'fieldInput' and put the result into 'fieldOutput'.
* 'fieldInput' is expected to be the sourcefield and 'fieldOutput' the targetfield.
/*
* FIELD VALUE XCHGE:
* We call the 'BB source IDs' (bounding box source IDs) the set of source cell IDs transmitted just based on the bounding box information.
- * This is potentially bigger than what is finally in the interp matrix and this is stored in _sent_src_ids_st2.
+ * This is potentially bigger than what is finally in the interp matrix and this is stored in _sent_src_ids.
* We call 'interp source IDs' the set of source cell IDs with non null entries in the interp matrix. This is a sub-set of the above.
*/
for(int procID=0;procID<grpSize;procID++)
* * if procID == myProcID, send nothing
* * elif 'procID' in _proc_ids_to_send_vector_st (computed from the BB intersection)
* % if myProcID computed the job (myProcID, procID)
- * => send only 'interp source IDs' field values (i.e. IDs stored in _src_ids_zip_proc_st2)
+ * => send only 'interp source IDs' field values (i.e. IDs stored in _src_ids_zip_comp)
* % else (=we just sent mesh data to procID, but have never seen the matrix, i.e. matrix was computed remotely by procID)
- * => send 'BB source IDs' set of field values (i.e. IDs stored in _sent_src_ids_st2)
+ * => send 'BB source IDs' set of field values (i.e. IDs stored in _sent_src_ids)
*/
if (procID == myProcID)
nbsend[procID] = 0;
else
if(find(_proc_ids_to_send_vector_st.begin(),_proc_ids_to_send_vector_st.end(),procID)!=_proc_ids_to_send_vector_st.end())
{
- MEDCouplingAutoRefCountObjectPtr<DataArrayDouble> vals;
+ MCAuto<DataArrayDouble> vals;
if(_locator.isInMyTodoList(myProcID, procID))
{
- vector<int>::const_iterator isItem11 = find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),procID);
- if (isItem11 == _src_ids_zip_proc_st2.end())
- throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: SEND: unexpected end iterator in _src_ids_zip_proc_st2!");
- int id=distance(_src_ids_zip_proc_st2.begin(),isItem11);
- int sz=_src_ids_zip_st2[id].size();
- vals=fieldInput->getArray()->selectByTupleId(&(_src_ids_zip_st2[id])[0],&(_src_ids_zip_st2[id])[0]+sz);
+ map<int, vector<int> >::const_iterator isItem11 = _src_ids_zip_comp.find(procID);
+ if (isItem11 == _src_ids_zip_comp.end())
+ throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: SEND: unexpected end iterator in _src_ids_zip_comp!");
+ const vector<int> & v = (*isItem11).second;
+ int sz = v.size();
+ vals=fieldInput->getArray()->selectByTupleId(&(v[0]),&(v[0])+sz);
}
else
{
- vector<int>::const_iterator isItem11 = find(_sent_src_proc_st2.begin(),_sent_src_proc_st2.end(),procID );
- if (isItem11 == _sent_src_proc_st2.end())
- throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: SEND: unexpected end iterator in _sent_src_proc_st2!");
- int id=distance(_sent_src_proc_st2.begin(),isItem11);
- vals=fieldInput->getArray()->selectByTupleId(*_sent_src_ids_st2[id]);
+ map < int, MCAuto<DataArrayInt> >::const_iterator isItem11 = _sent_src_ids.find( procID );
+ if (isItem11 == _sent_src_ids.end())
+ throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: SEND: unexpected end iterator in _sent_src_ids!");
+ vals=fieldInput->getArray()->selectByTupleId(*(*isItem11).second);
}
nbsend[procID] = vals->getNbOfElems();
valsToSend.insert(valsToSend.end(),vals->getConstPointer(),vals->getConstPointer()+nbsend[procID]);
* * elif 'procID' in _proc_ids_to_recv_vector_st (computed from BB intersec)
* % if myProcID computed the job (procID, myProcID)
* => receive full set ('BB source IDs') of field data from proc #procID which has never seen the matrix
- * i.e. prepare to receive the numb in _nb_of_rcv_src_ids_proc_st2
+ * i.e. prepare to receive the numb in _nb_of_rcv_src_ids
* % else (=we did NOT compute the job, hence procID has, and knows the matrix)
* => receive 'interp source IDs' set of field values
*/
+ const std::vector< int > & _proc_ids_to_recv_vector_st = _the_matrix_st_source_proc_id;
if (procID == myProcID)
nbrecv[procID] = 0;
else
{
if(_locator.isInMyTodoList(procID, myProcID))
{
- vector<int>::const_iterator isItem11 = find(_rcv_src_ids_proc_st2.begin(),_rcv_src_ids_proc_st2.end(),procID);
- if (isItem11 == _rcv_src_ids_proc_st2.end())
- throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: RCV: unexpected end iterator in _rcv_src_ids_proc_st2!");
- int id=distance(_rcv_src_ids_proc_st2.begin(),isItem11);
- nbrecv[procID] = _nb_of_rcv_src_ids_proc_st2[id];
+ map <int,int>::const_iterator isItem11 = _nb_of_rcv_src_ids.find(procID);
+ if (isItem11 == _nb_of_rcv_src_ids.end())
+ throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: RCV: unexpected end iterator in _nb_of_rcv_src_ids!");
+ nbrecv[procID] = (*isItem11).second;
}
else
{
- vector<int>::const_iterator isItem11 = find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),procID);
- if (isItem11 == _src_ids_zip_proc_st2.end())
- throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: RCV: unexpected end iterator in _src_ids_zip_proc_st2!");
- int id=distance(_src_ids_zip_proc_st2.begin(),isItem11);
- nbrecv[procID] = _src_ids_zip_st2[id].size()*nbOfCompo;
+ map<int, vector<int> >::const_iterator isItem11 = _src_ids_zip_recv.find(procID);
+ if (isItem11 == _src_ids_zip_recv.end())
+ throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: RCV: unexpected end iterator in _src_ids_zip_recv!");
+ nbrecv[procID] = (*isItem11).second.size()*nbOfCompo;
}
}
}
scout << "(" << myProcID << ") valsToSend: ";
for (int iii=0; iii<valsToSend.size(); iii++)
scout << ", " << valsToSend[iii];
+ cout << scout.str() << "\n";
#endif
/*
* % if received matrix (=we didn't compute the job), this means that :
* 1. we sent part of our targetIDs to srcProcID before, so that srcProcId can do the computation.
* 2. srcProcID has sent us only the 'interp source IDs' field values
- * => invert _src_ids_zip_st2 -> 'revert_zip'
+ * => invert _src_ids_zip_recv -> 'revert_zip'
* => for all target cell ID 'tgtCellID'
- * => mappedTgtID = _sent_trg_ids_st2[srcProcID][tgtCellID]
+ * => mappedTgtID = _sent_trg_ids[srcProcID][tgtCellID]
* => for all src cell ID 'srcCellID' in the sparse vector
* => idx = revert_zip[srcCellID]
* => tgtFieldLocal[mappedTgtID] += rcvValue[srcProcID][idx] * matrix[tgtCellID][srcCellID] / deno[tgtCellID][srcCellID]
*/
if(!_locator.isInMyTodoList(srcProcID, myProcID))
{
- // invert _src_ids_zip_proc_st2
+ // invert _src_ids_zip_recv
map<int,int> revert_zip;
- vector< int >::const_iterator it11= find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),srcProcID);
- if (it11 == _src_ids_zip_proc_st2.end())
- throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: MULTIPLY: unexpected end iterator in _src_ids_zip_proc_st2!");
- int id1=distance(_src_ids_zip_proc_st2.begin(),it11);
+ map<int, vector<int> >::const_iterator it11= _src_ids_zip_recv.find(srcProcID);
+ if (it11 == _src_ids_zip_recv.end())
+ throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: MULTIPLY: unexpected end iterator in _src_ids_zip_recv!");
+ const vector<int> & vec = (*it11).second;
int newId=0;
- for(vector<int>::const_iterator it=_src_ids_zip_st2[id1].begin();it!=_src_ids_zip_st2[id1].end();it++,newId++)
+ for(vector<int>::const_iterator it=vec.begin();it!=vec.end();it++,newId++)
revert_zip[*it]=newId;
- vector<int>::const_iterator isItem24 = find(_sent_trg_proc_st2.begin(),_sent_trg_proc_st2.end(),srcProcID);
- if (isItem24 == _sent_trg_proc_st2.end())
- throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: MULTIPLY: unexpected end iterator in _sent_trg_proc_st2!");
- int id2 = distance(_sent_trg_proc_st2.begin(),isItem24);
- const DataArrayInt *tgrIdsDA=_sent_trg_ids_st2[id2];
+ map < int, MCAuto<DataArrayInt> >::const_iterator isItem24 = _sent_trg_ids.find(srcProcID);
+ if (isItem24 == _sent_trg_ids.end())
+ throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: MULTIPLY: unexpected end iterator in _sent_trg_ids!");
+ const DataArrayInt *tgrIdsDA = (*isItem24).second;
const int *tgrIds = tgrIdsDA->getConstPointer();
int nbOfTrgTuples=mat.size();
/*!
* This method should be called immediately after _the_matrix_st has been filled with remote computed matrix
* put in this proc for Matrix-Vector.
- * It finishes the filling _src_ids_zip_st2 and _src_ids_zip_proc_st2 (see member doc)
+ * It fills _src_ids_zip_recv (see member doc)
*/
-void OverlapMapping::updateZipSourceIdsForMultiply()
+void OverlapMapping::fillSourceIdsZipReceivedForMultiply()
{
/* When it is called, only the bits received from other processors (i.e. the remotely executed jobs) are in the
big matrix _the_matrix_st. */
if(curSrcProcId!=myProcId) // if =, data has been populated by addContributionST()
{
const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i];
- _src_ids_zip_proc_st2.push_back(curSrcProcId);
- _src_ids_zip_st2.resize(_src_ids_zip_st2.size()+1);
std::set<int> s;
for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++)
for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
s.insert((*it2).first);
- _src_ids_zip_st2.back().insert(_src_ids_zip_st2.back().end(),s.begin(),s.end());
+ vector<int> vec(s.begin(),s.end());
+ _src_ids_zip_recv[curSrcProcId] = vec;
}
}
}