From: abn Date: Tue, 1 Dec 2015 14:50:26 +0000 (+0100) Subject: OverlapDEC: new load balancing algo (option X-Git-Tag: V8_0_0a1~7^2~1 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=494244ba3ab85191db9e0b33fb8a1381cd7c5738;p=modules%2Fmed.git OverlapDEC: new load balancing algo (option setWorkSharingAlgo(int)) plus some refactoring. --- diff --git a/src/ParaMEDMEM/OverlapDEC.cxx b/src/ParaMEDMEM/OverlapDEC.cxx index 4fcfdc49f..9e5221dea 100644 --- a/src/ParaMEDMEM/OverlapDEC.cxx +++ b/src/ParaMEDMEM/OverlapDEC.cxx @@ -209,6 +209,7 @@ namespace ParaMEDMEM The method in charge to perform this is : ParaMEDMEM::OverlapMapping::prepare. */ OverlapDEC::OverlapDEC(const std::set& procIds, const MPI_Comm& world_comm): + _load_balancing_algo(1), _own_group(true),_interpolation_matrix(0), _locator(0), _source_field(0),_own_source_field(false), _target_field(0),_own_target_field(false), @@ -284,7 +285,7 @@ namespace ParaMEDMEM if (_target_field->getField()->getNumberOfComponents() != _source_field->getField()->getNumberOfComponents()) throw INTERP_KERNEL::Exception("OverlapDEC::synchronize(): source and target field have different number of components!"); delete _interpolation_matrix; - _locator = new OverlapElementLocator(_source_field,_target_field,*_group, getBoundingBoxAdjustmentAbs()); + _locator = new OverlapElementLocator(_source_field,_target_field,*_group, getBoundingBoxAdjustmentAbs(), _load_balancing_algo); _interpolation_matrix=new OverlapInterpolationMatrix(_source_field,_target_field,*_group,*this,*this, *_locator); _locator->copyOptions(*this); _locator->exchangeMeshes(*_interpolation_matrix); @@ -297,7 +298,7 @@ namespace ParaMEDMEM const DataArrayInt *srcIds=_locator->getSourceIds((*it).first); const MEDCouplingPointSet *trg=_locator->getTargetMesh((*it).second); const DataArrayInt *trgIds=_locator->getTargetIds((*it).second); - _interpolation_matrix->addContribution(src,srcIds,srcMeth,(*it).first,trg,trgIds,trgMeth,(*it).second); + _interpolation_matrix->computeLocalIntersection(src,srcIds,srcMeth,(*it).first,trg,trgIds,trgMeth,(*it).second); } _interpolation_matrix->prepare(_locator->getProcsToSendFieldData()); _interpolation_matrix->computeSurfacesAndDeno(); diff --git a/src/ParaMEDMEM/OverlapDEC.hxx b/src/ParaMEDMEM/OverlapDEC.hxx index 0fcba0971..a1f29cf22 100644 --- a/src/ParaMEDMEM/OverlapDEC.hxx +++ b/src/ParaMEDMEM/OverlapDEC.hxx @@ -50,7 +50,10 @@ namespace ParaMEDMEM bool isInGroup() const; void setDefaultValue(double val) {_default_field_value = val;} + void setWorkSharingAlgo(int method) { _load_balancing_algo = method; } private: + int _load_balancing_algo; // 0 means initial algo from Antho, 1 means Adrien's algo. Make your choice :-)) + bool _own_group; OverlapInterpolationMatrix* _interpolation_matrix; OverlapElementLocator* _locator; diff --git a/src/ParaMEDMEM/OverlapElementLocator.cxx b/src/ParaMEDMEM/OverlapElementLocator.cxx index 0410526bf..93984ccce 100644 --- a/src/ParaMEDMEM/OverlapElementLocator.cxx +++ b/src/ParaMEDMEM/OverlapElementLocator.cxx @@ -42,7 +42,7 @@ namespace ParaMEDMEM const int OverlapElementLocator::START_TAG_MESH_XCH = 1140; OverlapElementLocator::OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField, - const ProcessorGroup& group, double epsAbs) + const ProcessorGroup& group, double epsAbs, int workSharingAlgo) : _local_source_field(sourceField), _local_target_field(targetField), _local_source_mesh(0), @@ -56,7 +56,17 @@ namespace ParaMEDMEM if(_local_target_field) _local_target_mesh=_local_target_field->getSupport()->getCellMesh(); _comm=getCommunicator(); - computeBoundingBoxesAndTodoList(); + + computeBoundingBoxesAndInteractionList(); + if (workSharingAlgo == 0) + computeTodoList_original(); + else + if(workSharingAlgo == 1) + computeTodoList_new(); + else + throw INTERP_KERNEL::Exception("OverlapElementLocator::OverlapElementLocator(): invalid algorithm selected!"); + + fillProcToSend(); } OverlapElementLocator::~OverlapElementLocator() @@ -70,7 +80,7 @@ namespace ParaMEDMEM return group->getComm(); } - void OverlapElementLocator::computeBoundingBoxesAndTodoList() + void OverlapElementLocator::computeBoundingBoxesAndInteractionList() { CommInterface comm_interface=_group.getCommInterface(); const MPIProcessorGroup* group=static_cast (&_group); @@ -115,30 +125,30 @@ namespace ParaMEDMEM _proc_pairs.resize(_group.size()); for(int i=0;i<_group.size();i++) for(int j=0;j<_group.size();j++) - { if(intersectsBoundingBox(i,j)) - { _proc_pairs[i].push_back(j); - } - } + } + + void OverlapElementLocator::computeTodoList_original() + { // OK now let's assigning as balanced as possible, job to each proc of group - std::vector< std::vector< ProcCouple > > pairsToBeDonePerProc(_group.size()); + _all_todo_lists.resize(_group.size()); int i=0; for(std::vector< std::vector< int > >::const_iterator it1=_proc_pairs.begin();it1!=_proc_pairs.end();it1++,i++) for(std::vector< int >::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) { - if(pairsToBeDonePerProc[i].size()<=pairsToBeDonePerProc[*it2].size())//it includes the fact that i==*it2 - pairsToBeDonePerProc[i].push_back(ProcCouple(i,*it2)); + if(_all_todo_lists[i].size()<=_all_todo_lists[*it2].size())//it includes the fact that i==*it2 + _all_todo_lists[i].push_back(ProcCouple(i,*it2)); else - pairsToBeDonePerProc[*it2].push_back(ProcCouple(i,*it2)); + _all_todo_lists[*it2].push_back(ProcCouple(i,*it2)); } //Keeping todo list of current proc. _to_do_list contains a set of pair where at least _group.myRank() appears once. //This proc will be in charge to perform interpolation of any of element of '_to_do_list' //If _group.myRank()==myPair.first, current proc should fetch target mesh of myPair.second (if different from _group.myRank()). //If _group.myRank()==myPair.second, current proc should fetch source mesh of myPair.second. - + int myProcId=_group.myRank(); - _to_do_list=pairsToBeDonePerProc[myProcId]; + _to_do_list=_all_todo_lists[myProcId]; #ifdef DEC_DEBUG std::stringstream scout; @@ -147,14 +157,134 @@ namespace ParaMEDMEM scout << "(" << (*itdbg).first << "," << (*itdbg).second << ")"; std::cout << scout.str() << "\n"; #endif + } + /* More efficient (?) work sharing algorithm: a job (i,j) is initially assigned twice: to proc#i and to proc#j. + * Then try to reduce as much as possible the variance of the num of jobs per proc: + * - take the most loaded proc i, + * + select the job (i,j) for which proc#j is the less loaded + * + remove this job from proc#i + * - repeat until no more duplicates are found + */ + void OverlapElementLocator::computeTodoList_new() + { + using namespace std; + int infinity = std::numeric_limits::max(); + // Initialisation + int grp_size = _group.size(); + vector < map > full_set(grp_size ); + int srcProcID = 0; + for(vector< vector< int > >::const_iterator it = _proc_pairs.begin(); it != _proc_pairs.end(); it++, srcProcID++) + for (vector< int >::const_iterator it2=(*it).begin(); it2 != (*it).end(); it2++) + { + // Here a pair of the form (i,i) is added only once! + int tgtProcID = *it2; + ProcCouple cpl = make_pair(srcProcID, tgtProcID); + full_set[srcProcID][cpl] = -1; + full_set[tgtProcID][cpl] = -1; + } + int procID = 0; + vector < map > ::iterator itVector; + map::iterator itMap; + for(itVector = full_set.begin(); itVector != full_set.end(); itVector++, procID++) + for (itMap=(*itVector).begin(); itMap != (*itVector).end(); itMap++) + { + const ProcCouple & cpl = (*itMap).first; + if (cpl.first == cpl.second) + // special case - this couple can not be removed in the future + (*itMap).second = infinity; + else + { + if(cpl.first == procID) + (*itMap).second = full_set[cpl.second].size(); + else // cpl.second == srcProcID + (*itMap).second = full_set[cpl.first].size(); + } + } + INTERP_KERNEL::AutoPtr proc_valid = new bool[grp_size]; + fill((bool *)proc_valid, proc_valid+grp_size, true); + + // Now the algo: + while (find((bool *)proc_valid, proc_valid+grp_size, true) != proc_valid+grp_size) + { + // Most loaded proc: + int max_sz = -1, max_id = -1; + for(itVector = full_set.begin(), procID=0; itVector != full_set.end(); itVector++, procID++) + { + int sz = (*itVector).size(); + if (proc_valid[procID] && sz > max_sz) + { + max_sz = (*itVector).size(); + max_id = procID; + } + } + + // Nothing more to do: + if (max_sz == -1) + break; + // For this proc, job with less loaded second proc: + int min_sz = infinity; + map & max_map = full_set[max_id]; + ProcCouple hit_cpl = make_pair(-1,-1); + for(itMap=max_map.begin(); itMap != max_map.end(); itMap++) + if ((*itMap).second < min_sz) + hit_cpl = (*itMap).first; + if (hit_cpl.first == -1) + { + // Plouf. Current proc 'max_id' can not be reduced. Invalid it: + proc_valid[max_id] = false; + continue; + } + // Remove item from proc 'max_id' + full_set[max_id].erase(hit_cpl); + // And mark it as not removable on the other side: + if (hit_cpl.first == max_id) + full_set[hit_cpl.second][hit_cpl] = infinity; + else // hit_cpl.second == max_id + full_set[hit_cpl.first][hit_cpl] = infinity; + + // Now update all counts of valid maps: + procID = 0; + for(itVector = full_set.begin(); itVector != full_set.end(); itVector++, procID++) + if(proc_valid[procID] && procID != max_id) + for(itMap = (*itVector).begin(); itMap != (*itVector).end(); itMap++) + { + const ProcCouple & cpl = (*itMap).first; + // Unremovable item: + if ((*itMap).second == infinity) + continue; + if (cpl.first == max_id || cpl.second == max_id) + (*itMap).second--; + } + } + // Final formatting - extract remaining keys in each map: + int myProcId=_group.myRank(); + _all_todo_lists.resize(grp_size); + procID = 0; + for(itVector = full_set.begin(); itVector != full_set.end(); itVector++, procID++) + for(itMap = (*itVector).begin(); itMap != (*itVector).end(); itMap++) + _all_todo_lists[procID].push_back((*itMap).first); + _to_do_list=_all_todo_lists[myProcId]; + +#ifdef DEC_DEBUG + std::stringstream scout; + scout << "(" << myProcId << ") my TODO list is: "; + for (std::vector< ProcCouple >::const_iterator itdbg=_to_do_list.begin(); itdbg!=_to_do_list.end(); itdbg++) + scout << "(" << (*itdbg).first << "," << (*itdbg).second << ")"; + std::cout << scout.str() << "\n"; +#endif + } + + void OverlapElementLocator::fillProcToSend() + { // Feeding now '_procs_to_send*'. A same id can appears twice. The second parameter in pair means what // to send true=source, false=target + int myProcId=_group.myRank(); _procs_to_send_mesh.clear(); _procs_to_send_field.clear(); for(int i=_group.size()-1;i>=0;i--) { - const std::vector< ProcCouple >& anRemoteProcToDoList=pairsToBeDonePerProc[i]; + const std::vector< ProcCouple >& anRemoteProcToDoList=_all_todo_lists[i]; for(std::vector< ProcCouple >::const_iterator it=anRemoteProcToDoList.begin();it!=anRemoteProcToDoList.end();it++) { if((*it).first==myProcId) @@ -170,6 +300,7 @@ namespace ParaMEDMEM } } + /*! * The aim of this method is to perform the communication to get data corresponding to '_to_do_list' attribute. * The principle is the following : if proc n1 and n2 need to perform a cross sending with n1 #include +//#define DEC_DEBUG + namespace ParaMEDMEM { class ParaFIELD; @@ -43,7 +45,8 @@ namespace ParaMEDMEM class OverlapElementLocator : public INTERP_KERNEL::InterpolationOptions { public: - OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField, const ProcessorGroup& group,double epsAbs); + OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField, const ProcessorGroup& group, + double epsAbs, int workSharingAlgo); virtual ~OverlapElementLocator(); const MPI_Comm *getCommunicator() const; void exchangeMeshes(OverlapInterpolationMatrix& matrix); @@ -57,7 +60,10 @@ namespace ParaMEDMEM const DataArrayInt *getTargetIds(int procId) const; bool isInMyTodoList(int i, int j) const; private: - void computeBoundingBoxesAndTodoList(); + void computeBoundingBoxesAndInteractionList(); + void computeTodoList_original(); + void computeTodoList_new(); + void fillProcToSend(); bool intersectsBoundingBox(int i, int j) const; void sendLocalMeshTo(int procId, bool sourceOrTarget, OverlapInterpolationMatrix& matrix) const; void receiveRemoteMeshFrom(int procId, bool sourceOrTarget); @@ -78,9 +84,11 @@ namespace ParaMEDMEM MEDCouplingPointSet *_local_target_mesh; /*! of size _group.size(). Contains for each source proc i, the ids of proc j the targets interact with. - This vector is common for all procs in _group. */ + This vector is common for all procs in _group. This is the full list of jobs to do the interp. */ std::vector< std::vector< int > > _proc_pairs; - //! list of interpolation couples to be done by this proc only. This is a simple extraction of the member _pairsToBeDonePerProc + //! todo lists per proc + std::vector< std::vector< ProcCouple > > _all_todo_lists; + //! list of interpolation couples to be done by this proc only. This is a simple extraction of the member above _all_todo_lists std::vector< ProcCouple > _to_do_list; //! list of procs the local proc will have to send mesh data to: std::vector< Proc_SrcOrTgt > _procs_to_send_mesh; diff --git a/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx b/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx index 7b84d60a4..060783833 100644 --- a/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx +++ b/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx @@ -77,12 +77,14 @@ namespace ParaMEDMEM // TODO? Merge with MEDCouplingRemapper::prepareInterpKernelOnlyUU() ? /**! + * Local run (on this proc) of the sequential interpolation algorithm. + * * @param srcIds is null if the source mesh is on the local proc * @param trgIds is null if the source mesh is on the local proc * * One of the 2 is necessarily null (the two can be null together) */ - void OverlapInterpolationMatrix::addContribution(const MEDCouplingPointSet *src, const DataArrayInt *srcIds, const std::string& srcMeth, int srcProcId, + void OverlapInterpolationMatrix::computeLocalIntersection(const MEDCouplingPointSet *src, const DataArrayInt *srcIds, const std::string& srcMeth, int srcProcId, const MEDCouplingPointSet *trg, const DataArrayInt *trgIds, const std::string& trgMeth, int trgProcId) { std::string interpMethod(srcMeth); diff --git a/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx b/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx index de7fb905b..d652ad024 100644 --- a/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx +++ b/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx @@ -52,7 +52,7 @@ namespace ParaMEDMEM void keepTracksOfTargetIds(int procId, DataArrayInt *ids); - void addContribution(const MEDCouplingPointSet *src, const DataArrayInt *srcIds, const std::string& srcMeth, int srcProcId, + void computeLocalIntersection(const MEDCouplingPointSet *src, const DataArrayInt *srcIds, const std::string& srcMeth, int srcProcId, const MEDCouplingPointSet *trg, const DataArrayInt *trgIds, const std::string& trgMeth, int trgProcId); void prepare(const std::vector< int > & procsToSendField); diff --git a/src/ParaMEDMEM/OverlapMapping.cxx b/src/ParaMEDMEM/OverlapMapping.cxx index 1ef168ff4..f66dee02c 100644 --- a/src/ParaMEDMEM/OverlapMapping.cxx +++ b/src/ParaMEDMEM/OverlapMapping.cxx @@ -42,8 +42,7 @@ OverlapMapping::OverlapMapping(const ProcessorGroup& group, const OverlapElement void OverlapMapping::keepTracksOfSourceIds(int procId, DataArrayInt *ids) { ids->incrRef(); - _sent_src_ids_st2.push_back(ids); - _sent_src_proc_st2.push_back(procId); + _sent_src_ids[procId] = ids; } /*! @@ -52,8 +51,7 @@ void OverlapMapping::keepTracksOfSourceIds(int procId, DataArrayInt *ids) void OverlapMapping::keepTracksOfTargetIds(int procId, DataArrayInt *ids) { ids->incrRef(); - _sent_trg_ids_st2.push_back(ids); - _sent_trg_proc_st2.push_back(procId); + _sent_trg_ids[procId] = ids; } /*! @@ -72,10 +70,7 @@ void OverlapMapping::addContributionST(const std::vector< SparseDoubleVec >& mat _source_proc_id_st.push_back(srcProcId); _target_proc_id_st.push_back(trgProcId); if(srcIds) // source mesh part is remote <=> srcProcId != myRank - { - _rcv_src_ids_proc_st2.push_back(srcProcId); - _nb_of_rcv_src_ids_proc_st2.push_back(srcIds->getNumberOfTuples()); - } + _nb_of_rcv_src_ids[srcProcId] = srcIds->getNumberOfTuples(); else // source mesh part is local { std::set s; @@ -83,9 +78,8 @@ void OverlapMapping::addContributionST(const std::vector< SparseDoubleVec >& mat for(std::vector< SparseDoubleVec >::const_iterator it1=matrixST.begin();it1!=matrixST.end();it1++) for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) s.insert((*it2).first); - _src_ids_zip_proc_st2.push_back(trgProcId); - _src_ids_zip_st2.resize(_src_ids_zip_st2.size()+1); - _src_ids_zip_st2.back().insert(_src_ids_zip_st2.back().end(),s.begin(),s.end()); + vector v(s.begin(), s.end()); // turn set into vector + _src_ids_zip_comp[trgProcId] = v; } } @@ -152,10 +146,12 @@ void OverlapMapping::prepare(const std::vector< int >& procsToSendField, int nbO //finishing unserializationST(nbOfTrgElems,nbrecv,bigArrRecv,nbrecv1,nbrecv2, bigArrRecv2,bigArrDRecv2,nbrecv3,nbrecv4); - //updating _src_ids_zip_st2 and _src_ids_zip_st2 with received matrix. - updateZipSourceIdsForMultiply(); + //finish to fill _the_matrix_st with already in place matrix in _matrixes_st (local computation) finishToFillFinalMatrixST(); + + //updating _src_ids_zip_st2 and _src_ids_zip_st2 with received matrix. + fillSourceIdsZipReceivedForMultiply(); // Prepare proc list for future field data exchange (mutliply()): _proc_ids_to_send_vector_st = procsToSendField; // Make some space on local proc: @@ -255,9 +251,9 @@ void OverlapMapping::computeDenoConservativeVolumic(int nbOfTuplesTrg) { const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i]; int curSrcId=_the_matrix_st_source_proc_id[i]; - std::vector::iterator isItem1=std::find(_sent_trg_proc_st2.begin(),_sent_trg_proc_st2.end(),curSrcId); + map < int, MEDCouplingAutoRefCountObjectPtr >::const_iterator isItem1 = _sent_trg_ids.find(curSrcId); int rowId=0; - if(isItem1==_sent_trg_proc_st2.end() || curSrcId==myProcId) // Local computation: simple, because rowId of mat are directly target cell ids. + if(isItem1==_sent_trg_ids.end() || curSrcId==myProcId) // Local computation: simple, because rowId of mat are directly target cell ids. { for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) @@ -265,9 +261,7 @@ void OverlapMapping::computeDenoConservativeVolumic(int nbOfTuplesTrg) } else // matrix was received, remote computation { - std::vector::iterator fnd=isItem1;//std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),curSrcId); - int locId=std::distance(_sent_trg_proc_st2.begin(),fnd); - const DataArrayInt *trgIds=_sent_trg_ids_st2[locId]; + const DataArrayInt *trgIds = (*isItem1).second; const int *trgIds2=trgIds->getConstPointer(); for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) @@ -280,10 +274,10 @@ void OverlapMapping::computeDenoConservativeVolumic(int nbOfTuplesTrg) int rowId=0; const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i]; int curSrcId=_the_matrix_st_source_proc_id[i]; - std::vector::iterator isItem1=std::find(_sent_trg_proc_st2.begin(),_sent_trg_proc_st2.end(),curSrcId); + map < int, MEDCouplingAutoRefCountObjectPtr >::const_iterator isItem1 = _sent_trg_ids.find(curSrcId); std::vector< SparseDoubleVec >& denoM=_the_deno_st[i]; denoM.resize(mat.size()); - if(isItem1==_sent_trg_proc_st2.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids. + if(isItem1==_sent_trg_ids.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids. { int rowId=0; for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) @@ -292,9 +286,7 @@ void OverlapMapping::computeDenoConservativeVolumic(int nbOfTuplesTrg) } else { - std::vector::iterator fnd=isItem1; - int locId=std::distance(_sent_trg_proc_st2.begin(),fnd); - const DataArrayInt *trgIds=_sent_trg_ids_st2[locId]; + const DataArrayInt *trgIds = (*isItem1).second; const int *trgIds2=trgIds->getConstPointer(); for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) @@ -516,7 +508,7 @@ void OverlapMapping::multiply(const MEDCouplingFieldDouble *fieldInput, MEDCoupl /* * FIELD VALUE XCHGE: * We call the 'BB source IDs' (bounding box source IDs) the set of source cell IDs transmitted just based on the bounding box information. - * This is potentially bigger than what is finally in the interp matrix and this is stored in _sent_src_ids_st2. + * This is potentially bigger than what is finally in the interp matrix and this is stored in _sent_src_ids. * We call 'interp source IDs' the set of source cell IDs with non null entries in the interp matrix. This is a sub-set of the above. */ for(int procID=0;procID send only 'interp source IDs' field values (i.e. IDs stored in _src_ids_zip_proc_st2) + * => send only 'interp source IDs' field values (i.e. IDs stored in _src_ids_zip_comp) * % else (=we just sent mesh data to procID, but have never seen the matrix, i.e. matrix was computed remotely by procID) - * => send 'BB source IDs' set of field values (i.e. IDs stored in _sent_src_ids_st2) + * => send 'BB source IDs' set of field values (i.e. IDs stored in _sent_src_ids) */ if (procID == myProcID) nbsend[procID] = 0; @@ -538,20 +530,19 @@ void OverlapMapping::multiply(const MEDCouplingFieldDouble *fieldInput, MEDCoupl MEDCouplingAutoRefCountObjectPtr vals; if(_locator.isInMyTodoList(myProcID, procID)) { - vector::const_iterator isItem11 = find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),procID); - if (isItem11 == _src_ids_zip_proc_st2.end()) - throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: SEND: unexpected end iterator in _src_ids_zip_proc_st2!"); - int id=distance(_src_ids_zip_proc_st2.begin(),isItem11); - int sz=_src_ids_zip_st2[id].size(); - vals=fieldInput->getArray()->selectByTupleId(&(_src_ids_zip_st2[id])[0],&(_src_ids_zip_st2[id])[0]+sz); + map >::const_iterator isItem11 = _src_ids_zip_comp.find(procID); + if (isItem11 == _src_ids_zip_comp.end()) + throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: SEND: unexpected end iterator in _src_ids_zip_comp!"); + const vector & v = (*isItem11).second; + int sz = v.size(); + vals=fieldInput->getArray()->selectByTupleId(&(v[0]),&(v[0])+sz); } else { - vector::const_iterator isItem11 = find(_sent_src_proc_st2.begin(),_sent_src_proc_st2.end(),procID ); - if (isItem11 == _sent_src_proc_st2.end()) - throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: SEND: unexpected end iterator in _sent_src_proc_st2!"); - int id=distance(_sent_src_proc_st2.begin(),isItem11); - vals=fieldInput->getArray()->selectByTupleId(*_sent_src_ids_st2[id]); + map < int, MEDCouplingAutoRefCountObjectPtr >::const_iterator isItem11 = _sent_src_ids.find( procID ); + if (isItem11 == _sent_src_ids.end()) + throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: SEND: unexpected end iterator in _sent_src_ids!"); + vals=fieldInput->getArray()->selectByTupleId(*(*isItem11).second); } nbsend[procID] = vals->getNbOfElems(); valsToSend.insert(valsToSend.end(),vals->getConstPointer(),vals->getConstPointer()+nbsend[procID]); @@ -563,7 +554,7 @@ void OverlapMapping::multiply(const MEDCouplingFieldDouble *fieldInput, MEDCoupl * * elif 'procID' in _proc_ids_to_recv_vector_st (computed from BB intersec) * % if myProcID computed the job (procID, myProcID) * => receive full set ('BB source IDs') of field data from proc #procID which has never seen the matrix - * i.e. prepare to receive the numb in _nb_of_rcv_src_ids_proc_st2 + * i.e. prepare to receive the numb in _nb_of_rcv_src_ids * % else (=we did NOT compute the job, hence procID has, and knows the matrix) * => receive 'interp source IDs' set of field values */ @@ -575,19 +566,17 @@ void OverlapMapping::multiply(const MEDCouplingFieldDouble *fieldInput, MEDCoupl { if(_locator.isInMyTodoList(procID, myProcID)) { - vector::const_iterator isItem11 = find(_rcv_src_ids_proc_st2.begin(),_rcv_src_ids_proc_st2.end(),procID); - if (isItem11 == _rcv_src_ids_proc_st2.end()) - throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: RCV: unexpected end iterator in _rcv_src_ids_proc_st2!"); - int id=distance(_rcv_src_ids_proc_st2.begin(),isItem11); - nbrecv[procID] = _nb_of_rcv_src_ids_proc_st2[id]; + map ::const_iterator isItem11 = _nb_of_rcv_src_ids.find(procID); + if (isItem11 == _nb_of_rcv_src_ids.end()) + throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: RCV: unexpected end iterator in _nb_of_rcv_src_ids!"); + nbrecv[procID] = (*isItem11).second; } else { - vector::const_iterator isItem11 = find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),procID); - if (isItem11 == _src_ids_zip_proc_st2.end()) - throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: RCV: unexpected end iterator in _src_ids_zip_proc_st2!"); - int id=distance(_src_ids_zip_proc_st2.begin(),isItem11); - nbrecv[procID] = _src_ids_zip_st2[id].size()*nbOfCompo; + map >::const_iterator isItem11 = _src_ids_zip_recv.find(procID); + if (isItem11 == _src_ids_zip_recv.end()) + throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: RCV: unexpected end iterator in _src_ids_zip_recv!"); + nbrecv[procID] = (*isItem11).second.size()*nbOfCompo; } } } @@ -606,6 +595,7 @@ void OverlapMapping::multiply(const MEDCouplingFieldDouble *fieldInput, MEDCoupl scout << "(" << myProcID << ") valsToSend: "; for (int iii=0; iii invert _src_ids_zip_st2 -> 'revert_zip' + * => invert _src_ids_zip_recv -> 'revert_zip' * => for all target cell ID 'tgtCellID' - * => mappedTgtID = _sent_trg_ids_st2[srcProcID][tgtCellID] + * => mappedTgtID = _sent_trg_ids[srcProcID][tgtCellID] * => for all src cell ID 'srcCellID' in the sparse vector * => idx = revert_zip[srcCellID] * => tgtFieldLocal[mappedTgtID] += rcvValue[srcProcID][idx] * matrix[tgtCellID][srcCellID] / deno[tgtCellID][srcCellID] */ if(!_locator.isInMyTodoList(srcProcID, myProcID)) { - // invert _src_ids_zip_proc_st2 + // invert _src_ids_zip_recv map revert_zip; - vector< int >::const_iterator it11= find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),srcProcID); - if (it11 == _src_ids_zip_proc_st2.end()) - throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: MULTIPLY: unexpected end iterator in _src_ids_zip_proc_st2!"); - int id1=distance(_src_ids_zip_proc_st2.begin(),it11); + map >::const_iterator it11= _src_ids_zip_recv.find(srcProcID); + if (it11 == _src_ids_zip_recv.end()) + throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: MULTIPLY: unexpected end iterator in _src_ids_zip_recv!"); + const vector & vec = (*it11).second; int newId=0; - for(vector::const_iterator it=_src_ids_zip_st2[id1].begin();it!=_src_ids_zip_st2[id1].end();it++,newId++) + for(vector::const_iterator it=vec.begin();it!=vec.end();it++,newId++) revert_zip[*it]=newId; - vector::const_iterator isItem24 = find(_sent_trg_proc_st2.begin(),_sent_trg_proc_st2.end(),srcProcID); - if (isItem24 == _sent_trg_proc_st2.end()) - throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: MULTIPLY: unexpected end iterator in _sent_trg_proc_st2!"); - int id2 = distance(_sent_trg_proc_st2.begin(),isItem24); - const DataArrayInt *tgrIdsDA=_sent_trg_ids_st2[id2]; + map < int, MEDCouplingAutoRefCountObjectPtr >::const_iterator isItem24 = _sent_trg_ids.find(srcProcID); + if (isItem24 == _sent_trg_ids.end()) + throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): internal error: MULTIPLY: unexpected end iterator in _sent_trg_ids!"); + const DataArrayInt *tgrIdsDA = (*isItem24).second; const int *tgrIds = tgrIdsDA->getConstPointer(); int nbOfTrgTuples=mat.size(); @@ -778,9 +767,9 @@ void OverlapMapping::transposeMultiply(const MEDCouplingFieldDouble *fieldInput, /*! * This method should be called immediately after _the_matrix_st has been filled with remote computed matrix * put in this proc for Matrix-Vector. - * It finishes the filling _src_ids_zip_st2 and _src_ids_zip_proc_st2 (see member doc) + * It fills _src_ids_zip_recv (see member doc) */ -void OverlapMapping::updateZipSourceIdsForMultiply() +void OverlapMapping::fillSourceIdsZipReceivedForMultiply() { /* When it is called, only the bits received from other processors (i.e. the remotely executed jobs) are in the big matrix _the_matrix_st. */ @@ -794,13 +783,12 @@ void OverlapMapping::updateZipSourceIdsForMultiply() if(curSrcProcId!=myProcId) // if =, data has been populated by addContributionST() { const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i]; - _src_ids_zip_proc_st2.push_back(curSrcProcId); - _src_ids_zip_st2.resize(_src_ids_zip_st2.size()+1); std::set s; for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++) for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) s.insert((*it2).first); - _src_ids_zip_st2.back().insert(_src_ids_zip_st2.back().end(),s.begin(),s.end()); + vector vec(s.begin(),s.end()); + _src_ids_zip_recv[curSrcProcId] = vec; } } } diff --git a/src/ParaMEDMEM/OverlapMapping.hxx b/src/ParaMEDMEM/OverlapMapping.hxx index 58dab05c4..ab9cb3139 100644 --- a/src/ParaMEDMEM/OverlapMapping.hxx +++ b/src/ParaMEDMEM/OverlapMapping.hxx @@ -34,14 +34,14 @@ namespace ParaMEDMEM class DataArrayInt; class MEDCouplingFieldDouble; - typedef std::map SparseDoubleVec; + using namespace std; + typedef map SparseDoubleVec; /*! * Internal class, not part of the public API. * * Used by the impl of OverlapInterpolationMatrix, plays an equivalent role than what the NxM_Mapping * does for the InterpolationMatrix. - * */ class OverlapMapping { @@ -50,8 +50,8 @@ namespace ParaMEDMEM OverlapMapping(const ProcessorGroup& group, const OverlapElementLocator& locator); void keepTracksOfSourceIds(int procId, DataArrayInt *ids); void keepTracksOfTargetIds(int procId, DataArrayInt *ids); - void addContributionST(const std::vector< SparseDoubleVec >& matrixST, const DataArrayInt *srcIds, int srcProcId, const DataArrayInt *trgIds, int trgProcId); - void prepare(const std::vector< int >& procsToSendField, int nbOfTrgElems); + void addContributionST(const vector< SparseDoubleVec >& matrixST, const DataArrayInt *srcIds, int srcProcId, const DataArrayInt *trgIds, int trgProcId); + void prepare(const vector< int >& procsToSendField, int nbOfTrgElems); void computeDenoConservativeVolumic(int nbOfTuplesTrg); // void computeDenoIntegralGlobConstraint(); // void computeDenoIntegral(); @@ -60,7 +60,6 @@ namespace ParaMEDMEM void multiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput, double default_val) const; void transposeMultiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput); private: -// void fillProcToSendRcvForMultiply(const std::vector< int >& procsToSendField); void serializeMatrixStep0ST(const int *nbOfElemsSrc, int *&bigArr, int *count, int *offsets, int *countForRecv, int *offsetsForRecv) const; int serializeMatrixStep1ST(const int *nbOfElemsSrc, const int *recvStep0, const int *countStep0, const int *offsStep0, @@ -69,7 +68,7 @@ namespace ParaMEDMEM void unserializationST(int nbOfTrgElems, const int *nbOfElemsSrcPerProc, const int *bigArrRecv, const int *bigArrRecvCounts, const int *bigArrRecvOffs, const int *bigArrRecv2, const double *bigArrDRecv2, const int *bigArrRecv2Count, const int *bigArrRecv2Offs); void finishToFillFinalMatrixST(); - void updateZipSourceIdsForMultiply(); + void fillSourceIdsZipReceivedForMultiply(); #ifdef DEC_DEBUG void printMatrixesST() const; @@ -80,58 +79,51 @@ namespace ParaMEDMEM const ProcessorGroup &_group; const OverlapElementLocator& _locator; - /**! Vector of DAInt of cell identifiers. The 2 following class members work in pair. For a proc ID i, - * first member gives an old2new map for the local part of the source mesh that has been sent to proc#i, just based on the + /**! Map of DAInt of cell identifiers. For a proc ID i, + * gives an old2new map for the local part of the source mesh that has been sent to proc#i, just based on the * bounding box computation (this is potentially a larger set than what is finally in the interp matrix). * Second member gives proc ID. */ - std::vector< MEDCouplingAutoRefCountObjectPtr > _sent_src_ids_st2; - //! see above _sent_src_ids_st2 - std::vector< int > _sent_src_proc_st2; - - //! See _src_ids_st2 and _sent_src_proc_st2. Same for target mesh. - std::vector< MEDCouplingAutoRefCountObjectPtr > _sent_trg_ids_st2; - //! See _src_ids_st2 and _sent_src_proc_st2. Same for target mesh. - std::vector< int > _sent_trg_proc_st2; + map < int, MEDCouplingAutoRefCountObjectPtr > _sent_src_ids; + //! See _sent_src_ids. Same for target mesh. + map < int, MEDCouplingAutoRefCountObjectPtr > _sent_trg_ids; /**! Vector of matrixes (partial interpolation ratios), result of the LOCAL interpolator run. * Indexing shared with _source_proc_id_st, and _target_proc_id_st. */ - std::vector< std::vector< SparseDoubleVec > > _matrixes_st; + vector< vector< SparseDoubleVec > > _matrixes_st; //! See _matrixes_st - vec of source proc IDs - std::vector< int > _source_proc_id_st; + vector< int > _source_proc_id_st; //! See _matrixes_st - vec of target proc IDs - std::vector< int > _target_proc_id_st; - - /**! Vector of remote proc IDs from which this proc received cell IDs of the source mesh. - * Indexing shared with _nb_of_rcv_src_ids_proc_st2 */ - std::vector< int > _rcv_src_ids_proc_st2; - /**! Number of received source mesh IDs at mesh data exchange. See _src_ids_proc_st2 above. - Counting the number of IDs suffices, as we just need this to prepare the receive when doing the final vector matrix multiplication */ - std::vector< int > _nb_of_rcv_src_ids_proc_st2; - - /**! Specifies for each (target) remote proc ID (given in _src_ids_zip_proc_st2 below) the corresponding - * source cell IDs to use. Same indexing as _src_ids_zip_proc_st2. Sorted. - * On a given proc, and after updateZipSourceIdsForMultiply(), this member contains exactly the same set of source cell IDs as what is given - * in the locally held interpolation matrices. - * IMPORTANT: as a consequence cell IDs in _src_ids_zip_st2 are *remote* identifiers. */ - std::vector< std::vector > _src_ids_zip_st2; - //! Vector of remote proc ID to which the local source mapping above corresponds. See _src_ids_zip_st2 above. - std::vector< int > _src_ids_zip_proc_st2; + vector< int > _target_proc_id_st; + + /**! Number of received source mesh IDs at mesh data exchange. + Counting the number of IDs suffices, as we just need this to prepare the receive side, when doing the final vector matrix multiplication. + First dimension is the remote proc ID from which we received. */ + map _nb_of_rcv_src_ids; + + /**! Specifies for each (target) remote proc ID (first dim of the map) the corresponding + * source cell IDs to use. + * This information is stored from the *locally* COMPuted matrices, and corresponds hence to field value that will need to + * sent later on, if this matrix bit itself is sent aways. */ + map > _src_ids_zip_comp; + + /**! Same idea as _src_ids_zip_comp above, but for RECEIVED matrix. */ + map > _src_ids_zip_recv; /**! THE matrix for matrix-vector product. The first dimension is indexed in the set of target procs * that interacts with local source mesh. The second dim is the target cell ID. - * Same indexing as _the_matrix_st_source_proc_id */ - std::vector< std::vector< SparseDoubleVec > > _the_matrix_st; + * Same indexing as _the_matrix_st_source_proc_id and _the_deno_st. + * We don't use a map here to be more efficient in the final matrix-vector computation which requires the joint + * taversal of _the_matrix_st and _the_deno_st. + * This matrix is filled after receival from other procs, contrary to _matrixes_st which contains local computations.*/ + vector< vector< SparseDoubleVec > > _the_matrix_st; //! See _the_matrix_st above. List of source proc IDs contributing to _the_matrix_st - std::vector< int > _the_matrix_st_source_proc_id; + vector< int > _the_matrix_st_source_proc_id; + // Denominators (computed from the numerator matrix). As for _the_matrix_st it is paired with _the_matrix_st_source_proc_id + vector< vector< SparseDoubleVec > > _the_deno_st; //! Proc IDs to which data will be sent (originating this current proc) for matrix-vector computation - std::vector< int > _proc_ids_to_send_vector_st; - //! Proc IDs from which data will be received (on this current proc) for matrix-vector computation - //std::vector< int > _proc_ids_to_recv_vector_st; // directly equal to _the_matrix_st_source_proc_id - - // Denominators (computed from the numerator matrix). As for _the_matrix_st it is paired with _the_matrix_st_source_proc_id - std::vector< std::vector< SparseDoubleVec > > _the_deno_st; + vector< int > _proc_ids_to_send_vector_st; }; } diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx index 9d6d1f141..4272b9985 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx @@ -48,7 +48,9 @@ class ParaMEDMEMTest : public CppUnit::TestFixture CPPUNIT_TEST(testInterpKernelDECPartialProcs); // 3 procs CPPUNIT_TEST(testInterpKernelDEC3DSurfEmptyBBox); // 3 procs CPPUNIT_TEST(testOverlapDEC1); // 3 procs + CPPUNIT_TEST(testOverlapDEC1_bis); // 3 procs CPPUNIT_TEST(testOverlapDEC2); // 3 procs + CPPUNIT_TEST(testOverlapDEC2_bis); // 3 procs CPPUNIT_TEST(testOverlapDEC3); // 2 procs CPPUNIT_TEST(testOverlapDEC4); // 2 procs @@ -109,8 +111,9 @@ public: void testInterpKernelDECPartialProcs(); void testInterpKernelDEC3DSurfEmptyBBox(); void testOverlapDEC1(); + void testOverlapDEC1_bis(); void testOverlapDEC2(); -// void testOverlapDEC2_bis(); + void testOverlapDEC2_bis(); void testOverlapDEC3(); // void testOverlapDEC3_bis(); void testOverlapDEC4(); diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx index 187d3df3f..995c79c87 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx @@ -311,128 +311,6 @@ void prepareData1(int rank, NatureOfField nature, } } -/*! Test case from the official doc of the OverlapDEC. - * WARNING: bounding boxes are tweaked here to make the case more interesting (i.e. to avoid an all to all exchange - * between all procs). - */ -void ParaMEDMEMTest::testOverlapDEC1() -{ - int size, rank; - MPI_Comm_size(MPI_COMM_WORLD,&size); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - // char hostname[256]; - // printf("(%d) PID %d on localhost ready for attach\n", rank, getpid()); - // fflush(stdout); - -// if (rank == 1) -// { -// int i=1, j=0; -// while (i!=0) -// j=2; -// } - - if (size != 3) return ; - int nproc = 3; - std::set procs; - for (int i=0; igetArray()->getIJ(0,0),1e-12); - } - if(rank==1) - { - CPPUNIT_ASSERT_DOUBLES_EQUAL(8.5,mcfieldT->getArray()->getIJ(0,0),1e-12); - } - if(rank==2) - { - CPPUNIT_ASSERT_DOUBLES_EQUAL(10.5,mcfieldT->getArray()->getIJ(0,0),1e-12); - } - - mcfieldS->decrRef(); - mcfieldT->decrRef(); - - MPI_Barrier(MPI_COMM_WORLD); -} - -/*! - * Same as testOverlapDEC1() but with regular bounding boxes. If you're looking for a nice debug case, - * testOverlapDEC1() is identical in terms of geometry and field values, and more appropriate. - */ -void ParaMEDMEMTest::testOverlapDEC2() -{ - int size, rank; - MPI_Comm_size(MPI_COMM_WORLD,&size); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - - if (size != 3) return ; - int nproc = 3; - std::set procs; - for (int i=0; igetArray()->getIJ(0,0),1e-12); - } - if(rank==1) - { - CPPUNIT_ASSERT_DOUBLES_EQUAL(8.5,mcfieldT->getArray()->getIJ(0,0),1e-12); - } - if(rank==2) - { - CPPUNIT_ASSERT_DOUBLES_EQUAL(10.5,mcfieldT->getArray()->getIJ(0,0),1e-12); - } - - mcfieldS->decrRef(); - mcfieldT->decrRef(); - - MPI_Barrier(MPI_COMM_WORLD); -} - void prepareData2_buildOneSquare(MEDCouplingUMesh* & meshS_0, MEDCouplingUMesh* & meshT_0) { const double coords[10] = {0.0,0.0, 0.0,1.0, 1.0,1.0, 1.0,0.0, 0.5,0.5}; @@ -574,6 +452,103 @@ void prepareData2(int rank, ProcessorGroup * grp, NatureOfField nature, meshT_0->decrRef(); } +/*! Test case from the official doc of the OverlapDEC. + * WARNING: bounding boxes might be tweaked here to make the case more interesting (i.e. to avoid an all to all exchange + * between all procs). + */ +void testOverlapDEC_generic(int workSharingAlgo, double bbAdj) +{ + int size, rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + // char hostname[256]; + // printf("(%d) PID %d on localhost ready for attach\n", rank, getpid()); + // fflush(stdout); + +// if (rank == 0) +// { +// int i=1, j=0; +// while (i!=0) +// j=2; +// } + + if (size != 3) return ; + int nproc = 3; + std::set procs; + for (int i=0; igetArray()->getIJ(0,0),1e-12); + } + if(rank==1) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(8.5,mcfieldT->getArray()->getIJ(0,0),1e-12); + } + if(rank==2) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(10.5,mcfieldT->getArray()->getIJ(0,0),1e-12); + } + + mcfieldS->decrRef(); + mcfieldT->decrRef(); + + MPI_Barrier(MPI_COMM_WORLD); +} + +void ParaMEDMEMTest::testOverlapDEC1() +{ + /*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + * HACK ON BOUNDING BOX TO MAKE THIS CASE SIMPLE AND USABLE IN DEBUG + * Bounding boxes are slightly smaller than should be, thus localizing the work to be done + * and avoiding every proc talking to everyone else. + * Obviously this is NOT a good idea to do this in production code :-) + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + */ + testOverlapDEC_generic(0,-1.0e-12); +} + +void ParaMEDMEMTest::testOverlapDEC1_bis() +{ + /*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + * Same BB hack as above + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + */ + testOverlapDEC_generic(1,-1.0e-12); +} + +/*! + * Same as testOverlapDEC1() but with regular bounding boxes. If you're looking for a nice debug case, + * testOverlapDEC1() is identical in terms of geometry and field values, and more appropriate. + */ +void ParaMEDMEMTest::testOverlapDEC2() +{ + testOverlapDEC_generic(0,1.0e-12); +} + +void ParaMEDMEMTest::testOverlapDEC2_bis() +{ + testOverlapDEC_generic(1,1.0e-12); +} + + /*! Test focused on the mapping of cell IDs. * (i.e. when only part of the source/target mesh is transmitted) */ @@ -716,6 +691,5 @@ void ParaMEDMEMTest::testOverlapDEC4() meshT->decrRef(); MPI_Barrier(MPI_COMM_WORLD); - }