From: ageay Date: Mon, 24 Aug 2009 14:23:18 +0000 (+0000) Subject: P0->P1 parllel. X-Git-Tag: V5_1_main_FINAL~361 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=1092e41877185d0134b7ddd1077407ae4a6cdf2f;p=tools%2Fmedcoupling.git P0->P1 parllel. --- diff --git a/src/ParaMEDMEM/ElementLocator.cxx b/src/ParaMEDMEM/ElementLocator.cxx index 16d2ef66c..7a7237fb9 100644 --- a/src/ParaMEDMEM/ElementLocator.cxx +++ b/src/ParaMEDMEM/ElementLocator.cxx @@ -46,6 +46,7 @@ namespace ParaMEDMEM { _union_group = _local_group.fuse(distant_group); _computeBoundingBoxes(); + _comm=getCommunicator(); } ElementLocator::~ElementLocator() @@ -60,6 +61,11 @@ namespace ParaMEDMEM return group->getComm(); } + NatureOfField ElementLocator::getLocalNature() const + { + return _local_para_field.getField()->getNature(); + } + // ========================================================================== // Procedure for exchanging mesh between a distant proc and a local processor // param idistantrank proc id on distant group @@ -91,25 +97,6 @@ namespace ParaMEDMEM if(send_mesh) send_mesh->decrRef(); -#if 0 - int* distant_ids_send=0; - //send_mesh contains null pointer if elems is empty - MEDCouplingPointSet* send_mesh = _local_cell_mesh->buildPartOfMySelf(&elems[0],&elems[elems.size()],false); - // Constituting an array containing the ids of the elements that are - // going to be sent to the distant subdomain. - // This array enables the correct redistribution of the data when the - // interpolated field is transmitted to the target array - - if (elems.size()>0) - { - distant_ids_send = new int[elems.size()]; - std::copy(elems.begin(),elems.end(),distant_ids_send); - } - _exchangeMesh(send_mesh, distant_mesh, idistantrank, distant_ids_send, distant_ids); - delete[] distant_ids_send; - if(send_mesh) - send_mesh->decrRef(); -#endif } void ElementLocator::exchangeMethod(const std::string& sourceMeth, int idistantrank, std::string& targetMeth) @@ -253,4 +240,399 @@ namespace ParaMEDMEM v1Distant->decrRef(); v2Distant->decrRef(); } + + /*! + * connected with ElementLocator::sendPolicyToWorkingSideL + */ + void ElementLocator::recvPolicyFromLazySideW(std::vector& policy) + { + policy.resize(_distant_proc_ids.size()); + int procId=0; + CommInterface comm; + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int toRecv; + comm.recv((void *)&toRecv,1,MPI_INT,*iter,1120,*_comm,&status); + policy[procId]=toRecv; + } + } + + /*! + * connected with ElementLocator::recvFromWorkingSideL + */ + void ElementLocator::sendSumToLazySideW(const std::vector< std::vector >& distantLocEltIds, const std::vector< std::vector >& partialSumRelToDistantIds) + { + int procId=0; + CommInterface comm; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& eltIds=distantLocEltIds[procId]; + const vector& valued=partialSumRelToDistantIds[procId]; + int lgth=eltIds.size(); + comm.send(&lgth,1,MPI_INT,*iter,1114,*_comm); + comm.send((void *)&eltIds[0],lgth,MPI_INT,*iter,1115,*_comm); + comm.send((void *)&valued[0],lgth,MPI_DOUBLE,*iter,1116,*_comm); + } + } + + /*! + * connected with ElementLocator::sendToWorkingSideL + */ + void ElementLocator::recvSumFromLazySideW(std::vector< std::vector >& globalSumRelToDistantIds) + { + int procId=0; + CommInterface comm; + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + std::vector& vec=globalSumRelToDistantIds[procId]; + comm.recv(&vec[0],vec.size(),MPI_DOUBLE,*iter,1117,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvLocalIdsFromWorkingSideL + */ + void ElementLocator::sendLocalIdsToLazyProcsW(const std::vector< std::vector >& distantLocEltIds) + { + int procId=0; + CommInterface comm; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& eltIds=distantLocEltIds[procId]; + int lgth=eltIds.size(); + comm.send(&lgth,1,MPI_INT,*iter,1121,*_comm); + comm.send((void *)&eltIds[0],lgth,MPI_INT,*iter,1122,*_comm); + } + } + + /*! + * connected with ElementLocator::sendGlobalIdsToWorkingSideL + */ + void ElementLocator::recvGlobalIdsFromLazyProcsW(const std::vector< std::vector >& distantLocEltIds, std::vector< std::vector >& globalIds) + { + int procId=0; + CommInterface comm; + MPI_Status status; + globalIds.resize(_distant_proc_ids.size()); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const std::vector& vec=distantLocEltIds[procId]; + std::vector& global=globalIds[procId]; + global.resize(vec.size()); + comm.recv(&global[0],vec.size(),MPI_INT,*iter,1123,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::sendCandidatesGlobalIdsToWorkingSideL + */ + void ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW(std::vector< std::vector >& globalIds) + { + int procId=0; + CommInterface comm; + MPI_Status status; + globalIds.resize(_distant_proc_ids.size()); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + std::vector& global=globalIds[procId]; + int lgth; + comm.recv(&lgth,1,MPI_INT,*iter,1132,*_comm,&status); + global.resize(lgth); + comm.recv(&global[0],lgth,MPI_INT,*iter,1133,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvSumFromWorkingSideL + */ + void ElementLocator::sendPartialSumToLazyProcsW(const std::vector& distantGlobIds, const std::vector& sum) + { + int procId=0; + CommInterface comm; + int lgth=distantGlobIds.size(); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + comm.send(&lgth,1,MPI_INT,*iter,1124,*_comm); + comm.send((void*)&distantGlobIds[0],lgth,MPI_INT,*iter,1125,*_comm); + comm.send((void*)&sum[0],lgth,MPI_DOUBLE,*iter,1126,*_comm); + } + } + + /*! + * connected with ElementLocator::recvCandidatesForAddElementsL + */ + void ElementLocator::sendCandidatesForAddElementsW(const std::vector& distantGlobIds) + { + int procId=0; + CommInterface comm; + int lgth=distantGlobIds.size(); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + comm.send(&lgth,1,MPI_INT,*iter,1128,*_comm); + comm.send((void*)&distantGlobIds[0],lgth,MPI_INT,*iter,1129,*_comm); + } + } + + /*! + * connected with ElementLocator::sendAddElementsToWorkingSideL + */ + void ElementLocator::recvAddElementsFromLazyProcsW(std::vector >& elementsToAdd) + { + int procId=0; + CommInterface comm; + MPI_Status status; + int lgth=_distant_proc_ids.size(); + elementsToAdd.resize(lgth); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int locLgth; + std::vector& eltToFeed=elementsToAdd[procId]; + comm.recv(&locLgth,1,MPI_INT,*iter,1130,*_comm,&status); + eltToFeed.resize(locLgth); + comm.recv(&eltToFeed[0],locLgth,MPI_INT,*iter,1131,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvPolicyFromLazySideW + */ + int ElementLocator::sendPolicyToWorkingSideL() + { + CommInterface comm; + int toSend; + DataArrayInt *isCumulative=_local_para_field.returnCumulativeGlobalNumbering(); + if(isCumulative) + { + toSend=CUMULATIVE_POLICY; + isCumulative->decrRef(); + } + else + toSend=NO_POST_TREATMENT_POLICY; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++) + comm.send(&toSend,1,MPI_INT,*iter,1120,*_comm); + return toSend; + } + + /*! + * connected with ElementLocator::sendSumToLazySideW + */ + void ElementLocator::recvFromWorkingSideL() + { + _values_added.resize(_local_para_field.getField()->getNumberOfTuples()); + int procId=0; + CommInterface comm; + _ids_per_working_proc.resize(_distant_proc_ids.size()); + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int lgth; + comm.recv(&lgth,1,MPI_INT,*iter,1114,*_comm,&status); + vector& ids=_ids_per_working_proc[procId]; + ids.resize(lgth); + vector values(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1115,*_comm,&status); + comm.recv(&values[0],lgth,MPI_DOUBLE,*iter,1116,*_comm,&status); + for(int i=0;i::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + vector& ids=_ids_per_working_proc[procId]; + vector valsToSend(ids.size()); + vector::iterator iter3=valsToSend.begin(); + for(vector::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter3++) + *iter3=_values_added[*iter2]; + comm.send(&valsToSend[0],ids.size(),MPI_DOUBLE,*iter,1117,*_comm); + //ids.clear(); + } + //_ids_per_working_proc.clear(); + } + + /*! + * connected with ElementLocator::sendLocalIdsToLazyProcsW + */ + void ElementLocator::recvLocalIdsFromWorkingSideL() + { + int procId=0; + CommInterface comm; + _ids_per_working_proc.resize(_distant_proc_ids.size()); + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int lgth; + vector& ids=_ids_per_working_proc[procId]; + comm.recv(&lgth,1,MPI_INT,*iter,1121,*_comm,&status); + ids.resize(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1122,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvGlobalIdsFromLazyProcsW + */ + void ElementLocator::sendGlobalIdsToWorkingSideL() + { + int procId=0; + CommInterface comm; + DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering(); + const int *globalIdsC=globalIds->getConstPointer(); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& ids=_ids_per_working_proc[procId]; + vector valsToSend(ids.size()); + vector::iterator iter1=valsToSend.begin(); + for(vector::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++) + *iter1=globalIdsC[*iter2]; + comm.send(&valsToSend[0],ids.size(),MPI_INT,*iter,1123,*_comm); + } + if(globalIds) + globalIds->decrRef(); + } + + /*! + * connected with ElementLocator::sendPartialSumToLazyProcsW + */ + void ElementLocator::recvSumFromWorkingSideL() + { + int procId=0; + int wProcSize=_distant_proc_ids.size(); + CommInterface comm; + _ids_per_working_proc.resize(wProcSize); + _values_per_working_proc.resize(wProcSize); + MPI_Status status; + std::map sums; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int lgth; + comm.recv(&lgth,1,MPI_INT,*iter,1124,*_comm,&status); + vector& ids=_ids_per_working_proc[procId]; + vector& vals=_values_per_working_proc[procId]; + ids.resize(lgth); + vals.resize(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1125,*_comm,&status); + comm.recv(&vals[0],lgth,MPI_DOUBLE,*iter,1126,*_comm,&status); + vector::const_iterator iter1=ids.begin(); + vector::const_iterator iter2=vals.begin(); + for(;iter1!=ids.end();iter1++,iter2++) + sums[*iter1]+=*iter2; + } + //assign sum to prepare sending to working side + for(procId=0;procId& ids=_ids_per_working_proc[procId]; + vector& vals=_values_per_working_proc[procId]; + vector::const_iterator iter1=ids.begin(); + vector::iterator iter2=vals.begin(); + for(;iter1!=ids.end();iter1++,iter2++) + *iter2=sums[*iter1]; + ids.clear(); + } + } + + /*! + * Foreach working procs Wi compute and push it in _ids_per_working_proc3, + * if it exist, local id of nodes that are in interaction with an another lazy proc than this + * and that exists in this \b but with no interaction with this. + * The computation is performed here. sendAddElementsToWorkingSideL is only in charge to send + * precomputed _ids_per_working_proc3 attribute. + * connected with ElementLocator::sendCandidatesForAddElementsW + */ + void ElementLocator::recvCandidatesForAddElementsL() + { + int procId=0; + int wProcSize=_distant_proc_ids.size(); + CommInterface comm; + _ids_per_working_proc3.resize(wProcSize); + MPI_Status status; + std::map sums; + DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering(); + const int *globalIdsC=globalIds->getConstPointer(); + int nbElts=globalIds->getNumberOfTuples(); + std::set globalIdsS(globalIdsC,globalIdsC+nbElts); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const std::vector& ids0=_ids_per_working_proc[procId]; + int lgth0=ids0.size(); + std::set elts0; + for(int i=0;i ids(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1129,*_comm,&status); + set ids1(ids.begin(),ids.end()); + ids.clear(); + set tmp5,tmp6; + set_intersection(globalIdsS.begin(),globalIdsS.end(),ids1.begin(),ids1.end(),inserter(tmp5,tmp5.begin())); + set_difference(tmp5.begin(),tmp5.end(),elts0.begin(),elts0.end(),inserter(tmp6,tmp6.begin())); + std::vector& ids2=_ids_per_working_proc3[procId]; + ids2.resize(tmp6.size()); + std::copy(tmp6.begin(),tmp6.end(),ids2.begin()); + //global->local + for(std::vector::iterator iter2=ids2.begin();iter2!=ids2.end();iter2++) + *iter2=std::find(globalIdsC,globalIdsC+nbElts,*iter2)-globalIdsC; + } + if(globalIds) + globalIds->decrRef(); + } + + /*! + * connected with ElementLocator::recvAddElementsFromLazyProcsW + */ + void ElementLocator::sendAddElementsToWorkingSideL() + { + int procId=0; + CommInterface comm; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const std::vector& vals=_ids_per_working_proc3[procId]; + int size=vals.size(); + comm.send((void *)&size,1,MPI_INT,*iter,1130,*_comm); + comm.send((void *)&vals[0],size,MPI_INT,*iter,1131,*_comm); + } + } + + /*! + * This method sends to working side Wi only nodes in interaction with Wi \b and located on boundary, to reduce number. + * connected with ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW + */ + void ElementLocator::sendCandidatesGlobalIdsToWorkingSideL() + { + int procId=0; + CommInterface comm; + DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering(); + const int *globalIdsC=globalIds->getConstPointer(); + std::vector candidates; + _local_para_field.getSupport()->getCellMesh()->findBoundaryNodes(candidates); + for(std::vector::iterator iter1=candidates.begin();iter1!=candidates.end();iter1++) + (*iter1)=globalIdsC[*iter1]; + std::set candidatesS(candidates.begin(),candidates.end()); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& ids=_ids_per_working_proc[procId]; + vector valsToSend(ids.size()); + vector::iterator iter1=valsToSend.begin(); + for(vector::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++) + *iter1=globalIdsC[*iter2]; + std::set tmp2(valsToSend.begin(),valsToSend.end()); + std::vector tmp3; + set_intersection(candidatesS.begin(),candidatesS.end(),tmp2.begin(),tmp2.end(),std::back_insert_iterator< std::vector >(tmp3)); + int lgth=tmp3.size(); + comm.send(&lgth,1,MPI_INT,*iter,1132,*_comm); + comm.send(&tmp3[0],lgth,MPI_INT,*iter,1133,*_comm); + } + if(globalIds) + globalIds->decrRef(); + } } diff --git a/src/ParaMEDMEM/ElementLocator.hxx b/src/ParaMEDMEM/ElementLocator.hxx index 1acbf84f6..1dbf51a71 100644 --- a/src/ParaMEDMEM/ElementLocator.hxx +++ b/src/ParaMEDMEM/ElementLocator.hxx @@ -20,6 +20,7 @@ #define __ELEMENTLOCATOR_HXX__ #include "InterpolationOptions.hxx" +#include "MEDCouplingNatureOfField.hxx" #include #include @@ -46,6 +47,30 @@ namespace ParaMEDMEM void exchangeMethod(const std::string& sourceMeth, int idistantrank, std::string& targetMeth); const std::vector& getDistantProcIds() const { return _distant_proc_ids; } const MPI_Comm *getCommunicator() const; + NatureOfField getLocalNature() const; + //Working side methods + void recvPolicyFromLazySideW(std::vector& policy); + void sendSumToLazySideW(const std::vector< std::vector >& distantLocEltIds, const std::vector< std::vector >& partialSumRelToDistantIds); + void recvSumFromLazySideW(std::vector< std::vector >& globalSumRelToDistantIds); + void sendCandidatesForAddElementsW(const std::vector& distantGlobIds); + void recvAddElementsFromLazyProcsW(std::vector >& elementsToAdd); + // + void sendLocalIdsToLazyProcsW(const std::vector< std::vector >& distantLocEltIds); + void recvGlobalIdsFromLazyProcsW(const std::vector< std::vector >& distantLocEltIds, std::vector< std::vector >& globalIds); + void recvCandidatesGlobalIdsFromLazyProcsW(std::vector< std::vector >& globalIds); + void sendPartialSumToLazyProcsW(const std::vector& distantGlobIds, const std::vector& sum); + //Lazy side methods + int sendPolicyToWorkingSideL(); + void recvFromWorkingSideL(); + void sendToWorkingSideL(); + // + void recvLocalIdsFromWorkingSideL(); + void sendGlobalIdsToWorkingSideL(); + void sendCandidatesGlobalIdsToWorkingSideL(); + // + void recvSumFromWorkingSideL(); + void recvCandidatesForAddElementsL(); + void sendAddElementsToWorkingSideL(); private: void _computeBoundingBoxes(); bool _intersectsBoundingBox(int irank); @@ -63,6 +88,15 @@ namespace ParaMEDMEM const ProcessorGroup& _local_group; ProcessorGroup* _union_group; std::vector _distant_proc_ids; + const MPI_Comm *_comm; + //Attributes only used by lazy side + std::vector _values_added; + std::vector< std::vector > _ids_per_working_proc; + std::vector< std::vector > _ids_per_working_proc3; + std::vector< std::vector > _values_per_working_proc; + public: + static const int CUMULATIVE_POLICY=3; + static const int NO_POST_TREATMENT_POLICY=7; }; } diff --git a/src/ParaMEDMEM/GlobalizerMesh.cxx b/src/ParaMEDMEM/GlobalizerMesh.cxx deleted file mode 100644 index 909215846..000000000 --- a/src/ParaMEDMEM/GlobalizerMesh.cxx +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (C) 2007-2008 CEA/DEN, EDF R&D -// -// This library is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 2.1 of the License. -// -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -// Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License along with this library; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -// -// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com -// -#include "GlobalizerMesh.hxx" -#include "MEDCouplingFieldDouble.hxx" -#include "CommInterface.hxx" - -using namespace std; - -namespace ParaMEDMEM -{ - GlobalizerMesh::GlobalizerMesh(const MPI_Comm *comm, MEDCouplingFieldDouble *localField):_comm(comm),_local_field(localField) - { - if(_local_field) - _local_field->incrRef(); - } - - GlobalizerMesh::~GlobalizerMesh() - { - if(_local_field) - _local_field->decrRef(); - } - - NatureOfField GlobalizerMesh::getLocalNature() const - { - return _local_field->getNature(); - } - - GlobalizerMeshWorkingSide::GlobalizerMeshWorkingSide(const MPI_Comm *comm, MEDCouplingFieldDouble *localField, - const std::string& distantMeth, const std::vector& lazyProcs):GlobalizerMesh(comm,localField),_distant_method(distantMeth),_lazy_procs(lazyProcs) - { - } - - GlobalizerMeshWorkingSide::~GlobalizerMeshWorkingSide() - { - } - - const std::vector& GlobalizerMeshWorkingSide::getProcIdsInInteraction() const - { - return _lazy_procs; - } - - /*! - * connected with GlobalizerMeshLazySide::recvFromWorkingSide - */ - void GlobalizerMeshWorkingSide::sendSumToLazySide(const std::vector< std::vector >& distantLocEltIds, const std::vector< std::vector >& partialSumRelToDistantIds) - { - int procId=0; - CommInterface comm; - for(vector::const_iterator iter=_lazy_procs.begin();iter!=_lazy_procs.end();iter++,procId++) - { - const vector& eltIds=distantLocEltIds[procId]; - const vector& valued=partialSumRelToDistantIds[procId]; - int lgth=eltIds.size(); - comm.send(&lgth,1,MPI_INT,*iter,1114,*_comm); - comm.send((void *)&eltIds[0],lgth,MPI_INT,*iter,1115,*_comm); - comm.send((void *)&valued[0],lgth,MPI_DOUBLE,*iter,1116,*_comm); - } - } - - /*! - * connected with GlobalizerMeshLazySide::sendToWorkingSide - */ - void GlobalizerMeshWorkingSide::recvSumFromLazySide(std::vector< std::vector >& globalSumRelToDistantIds) - { - int procId=0; - CommInterface comm; - MPI_Status status; - for(vector::const_iterator iter=_lazy_procs.begin();iter!=_lazy_procs.end();iter++,procId++) - { - std::vector& vec=globalSumRelToDistantIds[procId]; - comm.recv(&vec[0],vec.size(),MPI_DOUBLE,*iter,1117,*_comm,&status); - } - } - - GlobalizerMeshLazySide::GlobalizerMeshLazySide(const MPI_Comm *comm, MEDCouplingFieldDouble *localField, const std::vector& computeProcs):GlobalizerMesh(comm,localField),_compute_procs(computeProcs) - { - } - - GlobalizerMeshLazySide::~GlobalizerMeshLazySide() - { - } - - /*! - * connected with GlobalizerMeshWorkingSide::sendSumToLazySide - */ - void GlobalizerMeshLazySide::recvFromWorkingSide() - { - _values_added.resize(_local_field->getNumberOfTuples()); - int procId=0; - CommInterface comm; - _ids_per_working_proc.resize(_compute_procs.size()); - MPI_Status status; - for(vector::const_iterator iter=_compute_procs.begin();iter!=_compute_procs.end();iter++,procId++) - { - int lgth; - comm.recv(&lgth,1,MPI_INT,*iter,1114,*_comm,&status); - vector& ids=_ids_per_working_proc[procId]; - ids.resize(lgth); - vector values(lgth); - comm.recv(&ids[0],lgth,MPI_INT,*iter,1115,*_comm,&status); - comm.recv(&values[0],lgth,MPI_DOUBLE,*iter,1116,*_comm,&status); - for(int i=0;i::const_iterator iter=_compute_procs.begin();iter!=_compute_procs.end();iter++,procId++) - { - vector& ids=_ids_per_working_proc[procId]; - vector valsToSend(ids.size()); - vector::iterator iter3=valsToSend.begin(); - for(vector::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter3++) - *iter3=_values_added[*iter2]; - comm.send(&valsToSend[0],ids.size(),MPI_DOUBLE,*iter,1117,*_comm); - ids.clear(); - } - _ids_per_working_proc.clear(); - } -} - diff --git a/src/ParaMEDMEM/GlobalizerMesh.hxx b/src/ParaMEDMEM/GlobalizerMesh.hxx deleted file mode 100644 index 3436545a8..000000000 --- a/src/ParaMEDMEM/GlobalizerMesh.hxx +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (C) 2007-2008 CEA/DEN, EDF R&D -// -// This library is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 2.1 of the License. -// -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -// Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License along with this library; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -// -// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com -// -#ifndef __GLOBALIZERMESH_HXX__ -#define __GLOBALIZERMESH_HXX__ - -#include "MEDCouplingNatureOfField.hxx" - -#include -#include -#include - -namespace ParaMEDMEM -{ - class MEDCouplingFieldDouble; - - class GlobalizerMesh - { - protected: - GlobalizerMesh(const MPI_Comm *comm, MEDCouplingFieldDouble *localField); - public: - NatureOfField getLocalNature() const; - virtual ~GlobalizerMesh(); - protected: - const MPI_Comm *_comm; - MEDCouplingFieldDouble *_local_field; - }; - - class GlobalizerMeshWorkingSide : public GlobalizerMesh - { - public: - GlobalizerMeshWorkingSide(const MPI_Comm *comm, MEDCouplingFieldDouble *localField, const std::string& distantMeth, const std::vector& lazyProcs); - ~GlobalizerMeshWorkingSide(); - // - const std::vector& getProcIdsInInteraction() const; - void sendSumToLazySide(const std::vector< std::vector >& distantLocEltIds, const std::vector< std::vector >& partialSumRelToDistantIds); - void recvSumFromLazySide(std::vector< std::vector >& globalSumRelToDistantIds); - private: - std::string _distant_method; - std::vector _lazy_procs; - }; - - class GlobalizerMeshLazySide : public GlobalizerMesh - { - public: - GlobalizerMeshLazySide(const MPI_Comm *comm, MEDCouplingFieldDouble *localField, const std::vector& computeProcs); - ~GlobalizerMeshLazySide(); - void recvFromWorkingSide(); - void sendToWorkingSide(); - private: - std::vector _compute_procs; - std::vector _values_added; - std::vector< std::vector > _ids_per_working_proc; - }; -} - -#endif diff --git a/src/ParaMEDMEM/InterpolationMatrix.cxx b/src/ParaMEDMEM/InterpolationMatrix.cxx index d75d5428c..6edb3d40e 100644 --- a/src/ParaMEDMEM/InterpolationMatrix.cxx +++ b/src/ParaMEDMEM/InterpolationMatrix.cxx @@ -30,7 +30,9 @@ #include "MEDCouplingNormalizedUnstructuredMesh.txx" #include "InterpolationOptions.hxx" #include "NormalizedUnstructuredMesh.hxx" -#include "GlobalizerMesh.hxx" +#include "ElementLocator.hxx" + +#include // class InterpolationMatrix // This class enables the storage of an interpolation matrix Wij mapping @@ -95,7 +97,7 @@ namespace ParaMEDMEM void InterpolationMatrix::addContribution ( MEDCouplingPointSet& distant_support, int iproc_distant, - int* distant_elems, + const int* distant_elems, const std::string& srcMeth, const std::string& targetMeth) { @@ -148,112 +150,135 @@ namespace ParaMEDMEM { throw INTERP_KERNEL::Exception("no interpolator exists for these mesh and space dimensions "); } - - int source_size=surfaces.size(); bool needTargetSurf=isSurfaceComputationNeeded(targetMeth); - MEDCouplingFieldDouble *target_triangle_surf; + MEDCouplingFieldDouble *target_triangle_surf=0; if(needTargetSurf) target_triangle_surf = distant_support.getMeasureField(); + fillDSFromVM(iproc_distant,distant_elems,surfaces,target_triangle_surf); + if(needTargetSurf) + target_triangle_surf->decrRef(); + } + + void InterpolationMatrix::fillDSFromVM(int iproc_distant, const int* distant_elems, const std::vector< std::map >& values, MEDCouplingFieldDouble *surf) + { //loop over the elements to build the interpolation //matrix structures + int source_size=values.size(); for (int ielem=0; ielem < source_size; ielem++) { - _row_offsets[ielem+1] += surfaces[ielem].size(); - //_source_indices.push_back(make_pair(iproc_distant, ielem)); - - for (map::const_iterator iter = surfaces[ielem].begin(); - iter != surfaces[ielem].end(); - iter++) + _row_offsets[ielem+1] += values[ielem].size(); + for(map::const_iterator iter=values[ielem].begin();iter!=values[ielem].end();iter++) { - //surface of the target triangle - double surf; - if(needTargetSurf) - surf = target_triangle_surf->getIJ(iter->first,0); - + int localId; + if(distant_elems) + localId=distant_elems[iter->first]; + else + localId=iter->first; //locating the (iproc, itriangle) pair in the list of columns - map,int >::iterator iter2 = _col_offsets.find(make_pair(iproc_distant,iter->first)); + map,int >::iterator iter2 = _col_offsets.find(make_pair(iproc_distant,localId)); int col_id; if (iter2 == _col_offsets.end()) { //(iproc, itriangle) is not registered in the list //of distant elements - col_id =_col_offsets.size(); - _col_offsets.insert(make_pair(make_pair(iproc_distant,iter->first),col_id)); - _mapping.addElementFromSource(iproc_distant, - distant_elems[iter->first]); - //target_volume.push_back(surf); + _col_offsets.insert(make_pair(make_pair(iproc_distant,localId),col_id)); + _mapping.addElementFromSource(iproc_distant,localId); } else { col_id = iter2->second; } - //the non zero coefficient is stored //ielem is the row, //col_id is the number of the column //iter->second is the value of the coefficient - if(needTargetSurf) - _target_volume[ielem].push_back(surf); + if(surf) + { + double surface = surf->getIJ(iter->first,0); + _target_volume[ielem].push_back(surface); + } _coeffs[ielem].push_back(make_pair(col_id,iter->second)); } } - if(needTargetSurf) - target_triangle_surf->decrRef(); } - void InterpolationMatrix::finishContributionW(GlobalizerMeshWorkingSide& globalizer) + void InterpolationMatrix::serializeMe(std::vector< std::vector< std::map > >& data1, std::vector& data2) const { - NatureOfField nature=globalizer.getLocalNature(); + data1.clear(); + data2.clear(); + const std::vector >& sendingIds=_mapping.getSendingIds(); + std::set procsS; + for(std::vector >::const_iterator iter1=sendingIds.begin();iter1!=sendingIds.end();iter1++) + procsS.insert((*iter1).first); + data1.resize(procsS.size()); + data2.resize(procsS.size()); + std::copy(procsS.begin(),procsS.end(),data2.begin()); + std::map fastProcAcc; + int id=0; + for(std::set::const_iterator iter2=procsS.begin();iter2!=procsS.end();iter2++,id++) + fastProcAcc[*iter2]=id; + int nbOfSrcElt=_coeffs.size(); + for(std::vector< std::vector< std::map > >::iterator iter3=data1.begin();iter3!=data1.end();iter3++) + (*iter3).resize(nbOfSrcElt); + id=0; + for(std::vector< std::vector< std::pair > >::const_iterator iter4=_coeffs.begin();iter4!=_coeffs.end();iter4++,id++) + { + for(std::vector< std::pair >::const_iterator iter5=(*iter4).begin();iter5!=(*iter4).end();iter5++) + { + const std::pair& elt=sendingIds[(*iter5).first]; + data1[fastProcAcc[elt.first]][id][elt.second]=(*iter5).second; + } + } + } + + void InterpolationMatrix::initialize() + { + int lgth=_coeffs.size(); + _row_offsets.clear(); _row_offsets.resize(lgth+1); + _coeffs.clear(); _coeffs.resize(lgth); + _target_volume.clear(); _target_volume.resize(lgth); + _col_offsets.clear(); + _mapping.initialize(); + } + + void InterpolationMatrix::finishContributionW(ElementLocator& elementLocator) + { + NatureOfField nature=elementLocator.getLocalNature(); switch(nature) { case ConservativeVolumic: - computeConservVolDenoW(globalizer); + computeConservVolDenoW(elementLocator); break; case Integral: - computeIntegralDenoW(globalizer); + computeIntegralDenoW(elementLocator); break; case IntegralGlobConstraint: - computeGlobConstraintDenoW(globalizer); + computeGlobConstraintDenoW(elementLocator); break; default: throw INTERP_KERNEL::Exception("Not recognized nature of field. Change nature of Field."); break; } - /*for(map,int>::iterator iter=_col_offsets.begin();iter!=_col_offsets.end();iter++) - if((*iter).second==9) - cout << (*iter).first.first << " ** " << (*iter).first.second << endl; - cout << "--> " << _col_offsets[make_pair(4,74)] << endl; - for(vector > >::iterator iter3=_coeffs.begin();iter3!=_coeffs.end();iter3++) - for(vector >::iterator iter4=(*iter3).begin();iter4!=(*iter3).end();iter4++) - if((*iter4).first==569) - cout << " __ " << iter3-_coeffs.begin() << "___" << (*iter4).second << endl; - ostringstream st; st << "deno_" << _deno_multiply[0][0]; - ofstream fs(st.str().c_str()); - for(vector >::iterator iter1=_deno_multiply.begin();iter1!=_deno_multiply.end();iter1++) - { - for(vector::iterator iter2=(*iter1).begin();iter2!=(*iter1).end();iter2++) - fs << *iter2 << " "; - fs << endl; - }*/ } - void InterpolationMatrix::finishContributionL(GlobalizerMeshLazySide& globalizer) + void InterpolationMatrix::finishContributionL(ElementLocator& elementLocator) { - NatureOfField nature=globalizer.getLocalNature(); + NatureOfField nature=elementLocator.getLocalNature(); switch(nature) { case ConservativeVolumic: - computeConservVolDenoL(globalizer); + computeConservVolDenoL(elementLocator); break; case Integral: - computeIntegralDenoL(globalizer); + computeIntegralDenoL(elementLocator); break; case IntegralGlobConstraint: - computeGlobConstraintDenoL(globalizer); + //this is not a bug doing like ConservativeVolumic + computeConservVolDenoL(elementLocator); break; default: throw INTERP_KERNEL::Exception("Not recognized nature of field. Change nature of Field."); @@ -261,19 +286,39 @@ namespace ParaMEDMEM } } - void InterpolationMatrix::computeConservVolDenoW(GlobalizerMeshWorkingSide& globalizer) + void InterpolationMatrix::computeConservVolDenoW(ElementLocator& elementLocator) { - computeGlobalRowSum(globalizer,_deno_multiply); computeGlobalColSum(_deno_reverse_multiply); + computeGlobalRowSum(elementLocator,_deno_multiply,_deno_reverse_multiply); } - void InterpolationMatrix::computeConservVolDenoL(GlobalizerMeshLazySide& globalizer) + void InterpolationMatrix::computeConservVolDenoL(ElementLocator& elementLocator) { - globalizer.recvFromWorkingSide(); - globalizer.sendToWorkingSide(); + int pol1=elementLocator.sendPolicyToWorkingSideL(); + if(pol1==ElementLocator::NO_POST_TREATMENT_POLICY) + { + elementLocator.recvFromWorkingSideL(); + elementLocator.sendToWorkingSideL(); + } + else if(ElementLocator::CUMULATIVE_POLICY) + { + //ask for lazy side to deduce ids eventually missing on working side and to send it back. + elementLocator.recvLocalIdsFromWorkingSideL(); + elementLocator.sendCandidatesGlobalIdsToWorkingSideL(); + elementLocator.recvCandidatesForAddElementsL(); + elementLocator.sendAddElementsToWorkingSideL(); + //Working side has updated its eventually missing ids updates its global ids with lazy side procs contribution + elementLocator.recvLocalIdsFromWorkingSideL(); + elementLocator.sendGlobalIdsToWorkingSideL(); + //like no post treatment + elementLocator.recvFromWorkingSideL(); + elementLocator.sendToWorkingSideL(); + } + else + throw INTERP_KERNEL::Exception("Not managed policy detected on lazy side : not implemented !"); } - void InterpolationMatrix::computeIntegralDenoW(GlobalizerMeshWorkingSide& globalizer) + void InterpolationMatrix::computeIntegralDenoW(ElementLocator& elementLocator) { MEDCouplingFieldDouble *source_triangle_surf = _source_support->getMeasureField(); _deno_multiply.resize(_coeffs.size()); @@ -291,32 +336,58 @@ namespace ParaMEDMEM /*! * Nothing to do because surface computation is on working side. */ - void InterpolationMatrix::computeIntegralDenoL(GlobalizerMeshLazySide& globalizer) + void InterpolationMatrix::computeIntegralDenoL(ElementLocator& elementLocator) { } - void InterpolationMatrix::computeGlobConstraintDenoW(GlobalizerMeshWorkingSide& globalizer) + void InterpolationMatrix::computeGlobConstraintDenoW(ElementLocator& elementLocator) { computeGlobalColSum(_deno_multiply); - computeGlobalRowSum(globalizer,_deno_reverse_multiply); - } - - void InterpolationMatrix::computeGlobConstraintDenoL(GlobalizerMeshLazySide& globalizer) - { - globalizer.recvFromWorkingSide(); - globalizer.sendToWorkingSide(); + computeGlobalRowSum(elementLocator,_deno_reverse_multiply,_deno_multiply); } - void InterpolationMatrix::computeGlobalRowSum(GlobalizerMeshWorkingSide& globalizer, std::vector >& denoStrorage) + void InterpolationMatrix::computeGlobalRowSum(ElementLocator& elementLocator, std::vector >& denoStrorage, std::vector >& denoStrorageInv) { //stores id in distant procs sorted by lazy procs connected with vector< vector > rowsPartialSumI; + //stores for each lazy procs connected with, if global info is available and if it's the case the policy + vector policyPartial; //stores the corresponding values. vector< vector > rowsPartialSumD; - computeLocalRowSum(globalizer.getProcIdsInInteraction(),rowsPartialSumI,rowsPartialSumD); - globalizer.sendSumToLazySide(rowsPartialSumI,rowsPartialSumD); - globalizer.recvSumFromLazySide(rowsPartialSumD); - divideByGlobalRowSum(globalizer.getProcIdsInInteraction(),rowsPartialSumI,rowsPartialSumD,denoStrorage); + elementLocator.recvPolicyFromLazySideW(policyPartial); + int pol1=mergePolicies(policyPartial); + if(pol1==ElementLocator::NO_POST_TREATMENT_POLICY) + { + computeLocalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD); + elementLocator.sendSumToLazySideW(rowsPartialSumI,rowsPartialSumD); + elementLocator.recvSumFromLazySideW(rowsPartialSumD); + } + else if(pol1==ElementLocator::CUMULATIVE_POLICY) + { + //updateWithNewAdditionnalElements(addingElements); + //stores for each lazy procs connected with, the ids in global mode if it exists (regarding policyPartial). This array has exactly the size of rowsPartialSumI, + //if policyPartial has CUMALATIVE_POLICY in each. + vector< vector > globalIdsPartial; + computeLocalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD); + elementLocator.sendLocalIdsToLazyProcsW(rowsPartialSumI); + elementLocator.recvCandidatesGlobalIdsFromLazyProcsW(globalIdsPartial); + std::vector< std::vector > addingElements; + findAdditionnalElements(elementLocator,addingElements,rowsPartialSumI,globalIdsPartial); + addGhostElements(elementLocator.getDistantProcIds(),addingElements); + rowsPartialSumI.clear(); + globalIdsPartial.clear(); + computeLocalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD); + elementLocator.sendLocalIdsToLazyProcsW(rowsPartialSumI); + elementLocator.recvGlobalIdsFromLazyProcsW(rowsPartialSumI,globalIdsPartial); + // + elementLocator.sendSumToLazySideW(rowsPartialSumI,rowsPartialSumD); + elementLocator.recvSumFromLazySideW(rowsPartialSumD); + mergeRowSum3(globalIdsPartial,rowsPartialSumD); + mergeCoeffs(elementLocator.getDistantProcIds(),rowsPartialSumI,globalIdsPartial,denoStrorageInv); + } + else + throw INTERP_KERNEL::Exception("Not managed policy detected : not implemented !"); + divideByGlobalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD,denoStrorage); } /*! @@ -345,15 +416,225 @@ namespace ParaMEDMEM resPerProcI[id].push_back((*iter2).second); resPerProcD[id].push_back(res[iter2-mapping.begin()]); } - /* - for(map, int >::const_iterator iter2=_col_offsets.begin();iter2!=_col_offsets.end();iter2++) + } + + /*! + * This method is only usable when CUMULATIVE_POLICY detected. This method finds elements ids (typically nodes) lazy side that + * are not present in columns of 'this' and that should regarding cumulative merge of elements regarding their global ids. + */ + void InterpolationMatrix::findAdditionnalElements(ElementLocator& elementLocator, std::vector >& elementsToAdd, + const std::vector >& resPerProcI, const std::vector >& globalIdsPartial) + { + std::set globalIds; + int nbLazyProcs=globalIdsPartial.size(); + for(int i=0;i tmp(globalIds.size()); + std::copy(globalIds.begin(),globalIds.end(),tmp.begin()); + globalIds.clear(); + elementLocator.sendCandidatesForAddElementsW(tmp); + elementLocator.recvAddElementsFromLazyProcsW(elementsToAdd); + } + + void InterpolationMatrix::addGhostElements(const std::vector& distantProcs, const std::vector >& elementsToAdd) + { + std::vector< std::vector< std::map > > data1; + std::vector data2; + serializeMe(data1,data2); + initialize(); + int nbOfDistProcs=distantProcs.size(); + for(int i=0;i::iterator,bool> isIns=procsSet.insert((*iter2).first.first); - if(isIns.second) - id=std::find(distantProcs.begin(),distantProcs.end(),(*iter2).first.first)-distantProcs.begin(); - resPerProcI[id].push_back((*iter2).first.second); - resPerProcD[id].push_back(res[(*iter2).second]); - }*/ + int procId=distantProcs[i]; + const std::vector& eltsForThisProc=elementsToAdd[i]; + if(!eltsForThisProc.empty()) + { + std::vector::iterator iter1=std::find(data2.begin(),data2.end(),procId); + std::map *toFeed=0; + if(iter1!=data2.end()) + {//to test + int rank=iter1-data2.begin(); + toFeed=&(data1[rank].back()); + } + else + { + iter1=std::lower_bound(data2.begin(),data2.end(),procId); + int rank=iter1-data2.begin(); + data2.insert(iter1,procId); + std::vector< std::map > tmp(data1.front().size()); + data1.insert(data1.begin()+rank,tmp); + toFeed=&(data1[rank].back()); + } + for(std::vector::const_iterator iter2=eltsForThisProc.begin();iter2!=eltsForThisProc.end();iter2++) + (*toFeed)[*iter2]=0.; + } + } + // + nbOfDistProcs=data2.size(); + for(int j=0;j& policyPartial) + { + if(policyPartial.empty()) + return ElementLocator::NO_POST_TREATMENT_POLICY; + int ref=policyPartial[0]; + std::vector::const_iterator iter1=std::find_if(policyPartial.begin(),policyPartial.end(),std::bind2nd(std::not_equal_to(),ref)); + if(iter1!=policyPartial.end()) + { + std::ostringstream msg; msg << "Incompatible policies between lazy procs each other : proc # " << iter1-policyPartial.begin(); + throw INTERP_KERNEL::Exception(msg.str().c_str()); + } + return ref; + } + + /*! + * This method introduce global ids aspects in computed 'rowsPartialSumD'. + * As precondition rowsPartialSumD.size()==policyPartial.size()==globalIdsPartial.size(). Foreach i in [0;rowsPartialSumD.size() ) rowsPartialSumD[i].size()==globalIdsPartial[i].size() + * @param rowsPartialSumD : in parameter, Partial row sum computed for each lazy procs connected with. + * @param rowsPartialSumI : in parameter, Corresponding local ids for each lazy procs connected with. + * @param globalIdsPartial : in parameter, the global numbering, of elements connected with. + * @param globalIdsLazySideInteraction : out parameter, constituted from all global ids of lazy procs connected with. + * @para sumCorresponding : out parameter, relative to 'globalIdsLazySideInteraction' + */ + void InterpolationMatrix::mergeRowSum(const std::vector< std::vector >& rowsPartialSumD, const std::vector< std::vector >& globalIdsPartial, + std::vector& globalIdsLazySideInteraction, std::vector& sumCorresponding) + { + std::map sumToReturn; + int nbLazyProcs=rowsPartialSumD.size(); + for(int i=0;i& rowSumOfP=rowsPartialSumD[i]; + const std::vector& globalIdsOfP=globalIdsPartial[i]; + std::vector::const_iterator iter1=rowSumOfP.begin(); + std::vector::const_iterator iter2=globalIdsOfP.begin(); + for(;iter1!=rowSumOfP.end();iter1++,iter2++) + sumToReturn[*iter2]+=*iter1; + } + // + int lgth=sumToReturn.size(); + globalIdsLazySideInteraction.resize(lgth); + sumCorresponding.resize(lgth); + std::vector::iterator iter3=globalIdsLazySideInteraction.begin(); + std::vector::iterator iter4=sumCorresponding.begin(); + for(std::map::const_iterator iter5=sumToReturn.begin();iter5!=sumToReturn.end();iter5++,iter3++,iter4++) + { + *iter3=(*iter5).first; + *iter4=(*iter5).second; + } + } + + /*! + * This method simply reorganize the result contained in 'sumCorresponding' computed by lazy side into 'rowsPartialSumD' with help of 'globalIdsPartial' and 'globalIdsLazySideInteraction' + * + * @param globalIdsPartial : in parameter, global ids sorted by lazy procs + * @param rowsPartialSumD : in/out parameter, with exactly the same size as 'globalIdsPartial' + * @param globalIdsLazySideInteraction : in parameter that represents ALL the global ids of every lazy procs in interaction + * @param sumCorresponding : in parameter with same size as 'globalIdsLazySideInteraction' that stores the corresponding sum of 'globalIdsLazySideInteraction' + */ + void InterpolationMatrix::mergeRowSum2(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD, + const std::vector& globalIdsLazySideInteraction, const std::vector& sumCorresponding) + { + std::map acc; + std::vector::const_iterator iter1=globalIdsLazySideInteraction.begin(); + std::vector::const_iterator iter2=sumCorresponding.begin(); + for(;iter1!=globalIdsLazySideInteraction.end();iter1++,iter2++) + acc[*iter1]=*iter2; + // + int nbLazyProcs=globalIdsPartial.size(); + for(int i=0;i& tmp1=globalIdsPartial[i]; + std::vector& tmp2=rowsPartialSumD[i]; + std::vector::const_iterator iter3=tmp1.begin(); + std::vector::iterator iter4=tmp2.begin(); + for(;iter3!=tmp1.end();iter3++,iter4++) + *iter4=acc[*iter3]; + } + } + + void InterpolationMatrix::mergeRowSum3(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD) + { + std::map sum; + std::vector< std::vector >::const_iterator iter1=globalIdsPartial.begin(); + std::vector< std::vector >::iterator iter2=rowsPartialSumD.begin(); + for(;iter1!=globalIdsPartial.end();iter1++,iter2++) + { + std::vector::const_iterator iter3=(*iter1).begin(); + std::vector::const_iterator iter4=(*iter2).begin(); + for(;iter3!=(*iter1).end();iter3++,iter4++) + sum[*iter3]+=*iter4; + } + iter2=rowsPartialSumD.begin(); + for(iter1=globalIdsPartial.begin();iter1!=globalIdsPartial.end();iter1++,iter2++) + { + std::vector::const_iterator iter3=(*iter1).begin(); + std::vector::iterator iter4=(*iter2).begin(); + for(;iter3!=(*iter1).end();iter3++,iter4++) + *iter4=sum[*iter3]; + } + } + + /*! + * This method updates this->_coeffs attribute in order to take into account hidden (because having the same global number) similar nodes in _coeffs array. + * If in this->_coeffs two distant element id have the same global id their values will be replaced for each by the sum of the two. + * @param procsInInteraction input parameter : specifies the procId in absolute of distant lazy procs in interaction with + * @param rowsPartialSumI input parameter : local ids of distant lazy procs elements in interaction with + * @param globalIdsPartial input parameter : global ids of distant lazy procs elements in interaction with + */ + void InterpolationMatrix::mergeCoeffs(const std::vector& procsInInteraction, const std::vector< std::vector >& rowsPartialSumI, + const std::vector >& globalIdsPartial, std::vector >& denoStrorageInv) + { + //preparing fast access structures + std::map procT; + int localProcId=0; + for(std::vector::const_iterator iter1=procsInInteraction.begin();iter1!=procsInInteraction.end();iter1++,localProcId++) + procT[*iter1]=localProcId; + int size=procsInInteraction.size(); + std::vector > localToGlobal(size); + for(int i=0;i& myLocalToGlobal=localToGlobal[i]; + const std::vector& locals=rowsPartialSumI[i]; + const std::vector& globals=globalIdsPartial[i]; + std::vector::const_iterator iter3=locals.begin(); + std::vector::const_iterator iter4=globals.begin(); + for(;iter3!=locals.end();iter3++,iter4++) + myLocalToGlobal[*iter3]=*iter4; + } + // + const vector >& mapping=_mapping.getSendingIds(); + std::map globalIdVal; + //accumulate for same global id on lazy part. + for(vector > >::iterator iter1=_coeffs.begin();iter1!=_coeffs.end();iter1++) + for(vector >::iterator iter2=(*iter1).begin();iter2!=(*iter1).end();iter2++) + { + const std::pair& distantLocalLazyId=mapping[(*iter2).first]; + int localLazyProcId=procT[distantLocalLazyId.first]; + int globalDistantLazyId=localToGlobal[localLazyProcId][distantLocalLazyId.second]; + globalIdVal[globalDistantLazyId]+=(*iter2).second; + } + //perform merge + std::vector >::iterator iter3=denoStrorageInv.begin(); + for(vector > >::iterator iter1=_coeffs.begin();iter1!=_coeffs.end();iter1++,iter3++) + { + double val=(*iter3).back(); + (*iter3).resize((*iter1).size()); + std::vector::iterator iter4=(*iter3).begin(); + for(vector >::iterator iter2=(*iter1).begin();iter2!=(*iter1).end();iter2++,iter4++) + { + const std::pair& distantLocalLazyId=mapping[(*iter2).first]; + int localLazyProcId=procT[distantLocalLazyId.first]; + int globalDistantLazyId=localToGlobal[localLazyProcId][distantLocalLazyId.second]; + double newVal=globalIdVal[globalDistantLazyId]; + if((*iter2).second!=0.) + (*iter4)=val*newVal/(*iter2).second; + else + (*iter4)=std::numeric_limits::max(); + (*iter2).second=newVal; + } + } } void InterpolationMatrix::divideByGlobalRowSum(const std::vector& distantProcs, const std::vector >& resPerProcI, @@ -361,18 +642,13 @@ namespace ParaMEDMEM { map fastSums; int procId=0; - const vector >& mapping=_mapping.getSendingIds(); - map< int, map > distIdToLocId; - for(map< pair,int >::iterator iter8=_col_offsets.begin();iter8!=_col_offsets.end();iter8++) - distIdToLocId[(*iter8).first.first][mapping[(*iter8).second].second]=(*iter8).first.second; - for(vector::const_iterator iter1=distantProcs.begin();iter1!=distantProcs.end();iter1++,procId++) { const std::vector& currentProcI=resPerProcI[procId]; const std::vector& currentProcD=resPerProcD[procId]; vector::const_iterator iter3=currentProcD.begin(); for(vector::const_iterator iter2=currentProcI.begin();iter2!=currentProcI.end();iter2++,iter3++) - fastSums[_col_offsets[std::make_pair(*iter1,distIdToLocId[*iter1][*iter2])]]=*iter3; + fastSums[_col_offsets[std::make_pair(*iter1,*iter2)]]=*iter3; } deno.resize(_coeffs.size()); vector >::iterator iter6=deno.begin(); @@ -399,6 +675,17 @@ namespace ParaMEDMEM } } + void InterpolationMatrix::resizeGlobalColSum(std::vector >& denoStrorage) + { + vector >::iterator iter2=denoStrorage.begin(); + for(vector > >::const_iterator iter1=_coeffs.begin();iter1!=_coeffs.end();iter1++,iter2++) + { + double val=(*iter2).back(); + (*iter2).resize((*iter1).size()); + std::fill((*iter2).begin(),(*iter2).end(),val); + } + } + // ================================================================== // The call to this method updates the arrays on the target side // so that they know which amount of data from which processor they diff --git a/src/ParaMEDMEM/InterpolationMatrix.hxx b/src/ParaMEDMEM/InterpolationMatrix.hxx index 53a2829d0..e0dfceab3 100644 --- a/src/ParaMEDMEM/InterpolationMatrix.hxx +++ b/src/ParaMEDMEM/InterpolationMatrix.hxx @@ -26,8 +26,7 @@ namespace ParaMEDMEM { - class GlobalizerMeshWorkingSide; - class GlobalizerMeshLazySide; + class ElementLocator; class InterpolationMatrix : public INTERP_KERNEL::InterpolationOptions, public DECOptions @@ -43,27 +42,41 @@ namespace ParaMEDMEM virtual ~InterpolationMatrix(); void addContribution(MEDCouplingPointSet& distant_support, int iproc_distant, - int* distant_elems, const std::string& srcMeth, const std::string& targetMeth); - void finishContributionW(GlobalizerMeshWorkingSide& globalizer); - void finishContributionL(GlobalizerMeshLazySide& globalizer); + const int* distant_elems, const std::string& srcMeth, const std::string& targetMeth); + void finishContributionW(ElementLocator& elementLocator); + void finishContributionL(ElementLocator& elementLocator); void multiply(MEDCouplingFieldDouble& field) const; void transposeMultiply(MEDCouplingFieldDouble& field)const; void prepare(); int getNbRows() const { return _row_offsets.size(); } MPIAccessDEC* getAccessDEC() { return _mapping.getAccessDEC(); } private: - void computeConservVolDenoW(GlobalizerMeshWorkingSide& globalizer); - void computeIntegralDenoW(GlobalizerMeshWorkingSide& globalizer); - void computeGlobConstraintDenoW(GlobalizerMeshWorkingSide& globalizer); - void computeConservVolDenoL(GlobalizerMeshLazySide& globalizer); - void computeIntegralDenoL(GlobalizerMeshLazySide& globalizer); - void computeGlobConstraintDenoL(GlobalizerMeshLazySide& globalizer); + void computeConservVolDenoW(ElementLocator& elementLocator); + void computeIntegralDenoW(ElementLocator& elementLocator); + void computeGlobConstraintDenoW(ElementLocator& elementLocator); + void computeConservVolDenoL(ElementLocator& elementLocator); + void computeIntegralDenoL(ElementLocator& elementLocator); void computeLocalColSum(std::vector& res) const; void computeLocalRowSum(const std::vector& distantProcs, std::vector >& resPerProcI, std::vector >& resPerProcD) const; - void computeGlobalRowSum(GlobalizerMeshWorkingSide& globalizer, std::vector >& denoStrorage); + void computeGlobalRowSum(ElementLocator& elementLocator, std::vector >& denoStrorage, std::vector >& denoStrorageInv); void computeGlobalColSum(std::vector >& denoStrorage); + void resizeGlobalColSum(std::vector >& denoStrorage); + void fillDSFromVM(int iproc_distant, const int* distant_elems, const std::vector< std::map >& values, MEDCouplingFieldDouble *surf); + void serializeMe(std::vector< std::vector< std::map > >& data1, std::vector& data2) const; + void initialize(); + void findAdditionnalElements(ElementLocator& elementLocator, std::vector >& elementsToAdd, + const std::vector >& resPerProcI, const std::vector >& globalIdsPartial); + void addGhostElements(const std::vector& distantProcs, const std::vector >& elementsToAdd); + int mergePolicies(const std::vector& policyPartial); + void mergeRowSum(const std::vector< std::vector >& rowsPartialSumD, const std::vector< std::vector >& globalIdsPartial, + std::vector& globalIdsLazySideInteraction, std::vector& sumCorresponding); + void mergeRowSum2(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD, + const std::vector& globalIdsLazySideInteraction, const std::vector& sumCorresponding); + void mergeRowSum3(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD); + void mergeCoeffs(const std::vector& procsInInteraction, const std::vector< std::vector >& rowsPartialSumI, + const std::vector >& globalIdsPartial, std::vector >& denoStrorageInv); void divideByGlobalRowSum(const std::vector& distantProcs, const std::vector >& resPerProcI, const std::vector >& resPerProcD, std::vector >& deno); private: diff --git a/src/ParaMEDMEM/IntersectionDEC.cxx b/src/ParaMEDMEM/IntersectionDEC.cxx index 852ae39cf..b080ab51f 100644 --- a/src/ParaMEDMEM/IntersectionDEC.cxx +++ b/src/ParaMEDMEM/IntersectionDEC.cxx @@ -28,7 +28,6 @@ #include "InterpolationMatrix.hxx" #include "IntersectionDEC.hxx" #include "ElementLocator.hxx" -#include "GlobalizerMesh.hxx" namespace ParaMEDMEM { @@ -161,7 +160,7 @@ namespace ParaMEDMEM for (int i=0; i<_target_group->size(); i++) { // int idistant_proc = (i+_source_group->myRank())%_target_group->size(); - int idistant_proc=i; + int idistant_proc=i; //gathers pieces of the target meshes that can intersect the local mesh locator.exchangeMesh(idistant_proc,distant_mesh,distant_ids); @@ -178,8 +177,7 @@ namespace ParaMEDMEM distant_ids=0; } } - GlobalizerMeshWorkingSide globalizer(locator.getCommunicator(),_local_field->getField(),distantMeth,locator.getDistantProcIds()); - _interpolation_matrix->finishContributionW(globalizer); + _interpolation_matrix->finishContributionW(locator); } if (_target_group->containsMyRank()) @@ -207,8 +205,7 @@ namespace ParaMEDMEM distant_ids=0; } } - GlobalizerMeshLazySide globalizer(locator.getCommunicator(),_local_field->getField(),locator.getDistantProcIds()); - _interpolation_matrix->finishContributionL(globalizer); + _interpolation_matrix->finishContributionL(locator); } _interpolation_matrix->prepare(); } diff --git a/src/ParaMEDMEM/Makefile.am b/src/ParaMEDMEM/Makefile.am index f164a3c7e..b2d0e0bd5 100644 --- a/src/ParaMEDMEM/Makefile.am +++ b/src/ParaMEDMEM/Makefile.am @@ -54,7 +54,6 @@ InterpolationMatrix.hxx\ IntersectionDEC.hxx\ ExplicitCoincidentDEC.hxx\ ElementLocator.hxx\ -GlobalizerMesh.hxx\ ExplicitMapping.hxx\ ICoCoField.hxx \ ICoCoMEDField.hxx \ @@ -75,7 +74,6 @@ StructuredCoincidentDEC.cxx\ ExplicitCoincidentDEC.cxx\ IntersectionDEC.cxx\ ElementLocator.cxx\ -GlobalizerMesh.cxx\ MPIAccessDEC.cxx \ TimeInterpolator.cxx \ LinearTimeInterpolator.cxx\ diff --git a/src/ParaMEDMEM/MxN_Mapping.cxx b/src/ParaMEDMEM/MxN_Mapping.cxx index e1fd35aa2..a74d64089 100644 --- a/src/ParaMEDMEM/MxN_Mapping.cxx +++ b/src/ParaMEDMEM/MxN_Mapping.cxx @@ -57,6 +57,12 @@ namespace ParaMEDMEM _send_proc_offsets[i+1]++; } + void MxN_Mapping::initialize() + { + _sending_ids.clear(); + std::fill(_send_proc_offsets.begin(),_send_proc_offsets.end(),0); + } + void MxN_Mapping::prepareSendRecv() { CommInterface comm_interface=_union_group->getCommInterface(); @@ -288,7 +294,6 @@ namespace ParaMEDMEM delete[] recvdispls; } - ostream & operator<< (ostream & f ,const AllToAllMethod & alltoallmethod ) { switch (alltoallmethod) diff --git a/src/ParaMEDMEM/MxN_Mapping.hxx b/src/ParaMEDMEM/MxN_Mapping.hxx index e87a05297..198859e5a 100644 --- a/src/ParaMEDMEM/MxN_Mapping.hxx +++ b/src/ParaMEDMEM/MxN_Mapping.hxx @@ -43,7 +43,9 @@ namespace ParaMEDMEM void reverseSendRecv(double* field, MEDCouplingFieldDouble& field) const ; // - const std::vector >& getSendingIds() const { return _sending_ids; }//tmp + const std::vector >& getSendingIds() const { return _sending_ids; } + const std::vector& getSendProcsOffsets() const { return _send_proc_offsets; } + void initialize(); MPIAccessDEC* getAccessDEC(){ return _access_DEC; } private : diff --git a/src/ParaMEDMEM/ParaFIELD.cxx b/src/ParaMEDMEM/ParaFIELD.cxx index 8750126f9..6afb23911 100644 --- a/src/ParaMEDMEM/ParaFIELD.cxx +++ b/src/ParaMEDMEM/ParaFIELD.cxx @@ -164,6 +164,44 @@ namespace ParaMEDMEM delete data_channel; } + + /*! + * This method returns, if it exists, an array with only one component and as many as tuples as _field has. + * This array gives for every element on which this->_field lies, its global number, if this->_field is nodal. + * For example if _field is a nodal field : returned array will be the nodal global numbers. + * The content of this method is used to inform Working side to accumulate data recieved by lazy side. + */ + DataArrayInt* ParaFIELD::returnCumulativeGlobalNumbering() const + { + if(!_field) + return 0; + TypeOfField type=_field->getTypeOfField(); + switch(type) + { + case ON_CELLS: + return 0; + case ON_NODES: + return _support->getGlobalNumberingNodeDA(); + default: + return 0; + } + } + + DataArrayInt* ParaFIELD::returnGlobalNumbering() const + { + if(!_field) + return 0; + TypeOfField type=_field->getTypeOfField(); + switch(type) + { + case ON_CELLS: + return _support->getGlobalNumberingCellDA(); + case ON_NODES: + return _support->getGlobalNumberingNodeDA(); + default: + return 0; + } + } int ParaFIELD::nbComponents() const { diff --git a/src/ParaMEDMEM/ParaFIELD.hxx b/src/ParaMEDMEM/ParaFIELD.hxx index 5b89e2e8f..a9bcf57b8 100644 --- a/src/ParaMEDMEM/ParaFIELD.hxx +++ b/src/ParaMEDMEM/ParaFIELD.hxx @@ -23,11 +23,12 @@ namespace ParaMEDMEM { - + class DataArrayInt; class ParaMESH; class ProcessorGroup; class MEDCouplingFieldDouble; class ComponentTopology; + class Topology; class ParaFIELD { @@ -42,6 +43,8 @@ namespace ParaMEDMEM void synchronizeTarget( ParaMEDMEM::ParaFIELD* source_field); void synchronizeSource( ParaMEDMEM::ParaFIELD* target_field); MEDCouplingFieldDouble* getField() const { return _field; } + DataArrayInt* returnCumulativeGlobalNumbering() const; + DataArrayInt* returnGlobalNumbering() const; Topology* getTopology() const { return _topology; } ParaMESH* getSupport() const { return _support; } int nbComponents() const; diff --git a/src/ParaMEDMEM/ParaMESH.cxx b/src/ParaMEDMEM/ParaMESH.cxx index 9bc8c6266..7f59d9120 100644 --- a/src/ParaMEDMEM/ParaMESH.cxx +++ b/src/ParaMEDMEM/ParaMESH.cxx @@ -78,6 +78,30 @@ namespace ParaMEDMEM } } + void ParaMESH::setNodeGlobal(DataArrayInt *nodeGlobal) + { + if(nodeGlobal!=_node_global) + { + if(_node_global) + _node_global->decrRef(); + _node_global=nodeGlobal; + if(_node_global) + _node_global->incrRef(); + } + } + + void ParaMESH::setCellGlobal(DataArrayInt *cellGlobal) + { + if(cellGlobal!=_cell_global) + { + if(_cell_global) + _cell_global->decrRef(); + _cell_global=cellGlobal; + if(_cell_global) + _cell_global->incrRef(); + } + } + ParaMESH::~ParaMESH() { if(_cell_mesh) @@ -91,8 +115,6 @@ namespace ParaMEDMEM _cell_global->decrRef(); if(_face_global) _face_global->decrRef(); - if(_node_global) - _node_global->decrRef(); delete _explicit_topology; } diff --git a/src/ParaMEDMEM/ParaMESH.hxx b/src/ParaMEDMEM/ParaMESH.hxx index dc433c1d0..f713459dd 100644 --- a/src/ParaMEDMEM/ParaMESH.hxx +++ b/src/ParaMEDMEM/ParaMESH.hxx @@ -45,15 +45,20 @@ namespace ParaMEDMEM const ProcessorGroup& proc_group, const std::string& name); virtual ~ParaMESH(); + void setNodeGlobal(DataArrayInt *nodeGlobal); + void setCellGlobal(DataArrayInt *cellGlobal); Topology* getTopology() const { return _explicit_topology; } bool isStructured() const { return _cell_mesh->isStructured(); } MEDCouplingPointSet *getCellMesh() const { return _cell_mesh; } MEDCouplingPointSet *getFaceMesh() const { return _face_mesh; } BlockTopology* getBlockTopology() const { return _block_topology; } - const int* getGlobalNumberingNode() const { return _node_global->getConstPointer(); } - const int* getGlobalNumberingFace() const { return _face_global->getConstPointer(); } - const int* getGlobalNumberingCell() const { return _cell_global->getConstPointer(); } + DataArrayInt* getGlobalNumberingNodeDA() const { if(_node_global) _node_global->incrRef(); return _node_global; } + DataArrayInt* getGlobalNumberingFaceDA() const { if(_face_global) _face_global->incrRef(); return _face_global; } + DataArrayInt* getGlobalNumberingCellDA() const { if(_cell_global) _cell_global->incrRef(); return _cell_global; } + const int* getGlobalNumberingNode() const { if(_node_global) return _node_global->getConstPointer(); return 0; } + const int* getGlobalNumberingFace() const { if(_face_global) return _face_global->getConstPointer(); return 0; } + const int* getGlobalNumberingCell() const { if(_cell_global) return _cell_global->getConstPointer(); return 0; } private: //mesh object underlying the ParaMESH object diff --git a/src/ParaMEDMEM/Test/ParaMEDMEMTest_IntersectionDEC.cxx b/src/ParaMEDMEM/Test/ParaMEDMEMTest_IntersectionDEC.cxx index 40b1c0518..8915ae507 100644 --- a/src/ParaMEDMEM/Test/ParaMEDMEMTest_IntersectionDEC.cxx +++ b/src/ParaMEDMEM/Test/ParaMEDMEMTest_IntersectionDEC.cxx @@ -33,6 +33,7 @@ #include "MEDLoader.hxx" #include +#include // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -854,6 +855,7 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0() } else { + const char targetMeshName[]="target mesh"; if(rank==2) { double coords[10]={-0.3,-0.3, 0.2,-0.3, 0.7,-0.3, -0.3,0.2, 0.2,0.2 }; @@ -869,6 +871,12 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0() std::copy(coords,coords+10,myCoords->getPointer()); mesh->setCoords(myCoords); myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + DataArrayInt *da=DataArrayInt::New(); + const int globalNumberingP2[5]={0,1,2,3,4}; + da->useArray(globalNumberingP2,false,CPP_DEALLOC,5,1); + paramesh->setNodeGlobal(da); + da->decrRef(); } if(rank==3) { @@ -884,6 +892,12 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0() std::copy(coords,coords+6,myCoords->getPointer()); mesh->setCoords(myCoords); myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + DataArrayInt *da=DataArrayInt::New(); + const int globalNumberingP3[3]={4,2,5}; + da->useArray(globalNumberingP3,false,CPP_DEALLOC,3,1); + paramesh->setNodeGlobal(da); + da->decrRef(); } if(rank==4) { @@ -900,9 +914,14 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0() std::copy(coords,coords+12,myCoords->getPointer()); mesh->setCoords(myCoords); myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + DataArrayInt *da=DataArrayInt::New(); + const int globalNumberingP4[6]={3,6,7,4,8,5}; + da->useArray(globalNumberingP4,false,CPP_DEALLOC,6,1); + paramesh->setNodeGlobal(da); + da->decrRef(); } ParaMEDMEM::ComponentTopology comptopo; - paramesh=new ParaMESH(mesh,*target_group,"target mesh"); parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafieldP1 = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo); parafieldP0->getField()->setNature(ConservativeVolumic); @@ -917,6 +936,16 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0() dec.synchronize(); dec.setForcedRenormalization(false); dec.sendData(); + dec.recvData(); + const double *valueP0=parafieldP0->getField()->getArray()->getPointer(); + if(rank==0) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(34.42857143,valueP0[0],1e-7); + } + if(rank==1) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(44.,valueP0[0],1e-7); + } } else { @@ -925,10 +954,32 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0() dec.synchronize(); dec.setForcedRenormalization(false); dec.recvData(); - /*const double *res=parafield->getField()->getArray()->getConstPointer(); - const double *expected=targetResults[rank-nproc_source]; - CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13); - CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13);*/ + const double *res=parafieldP1->getField()->getArray()->getConstPointer(); + if(rank==2) + { + const double expectP2[5]={39.0, 31.0, 31.0, 47.0, 39.0}; + CPPUNIT_ASSERT_EQUAL(5,parafieldP1->getField()->getNumberOfTuples()); + CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents()); + for(int kk=0;kk<5;kk++) + CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP2[kk],res[kk],1e-12); + } + if(rank==3) + { + const double expectP3[3]={39.0, 31.0, 31.0}; + CPPUNIT_ASSERT_EQUAL(3,parafieldP1->getField()->getNumberOfTuples()); + CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents()); + for(int kk=0;kk<3;kk++) + CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP3[kk],res[kk],1e-12); + } + if(rank==4) + { + const double expectP4[6]={47.0, 47.0, 47.0, 39.0, 39.0, 31.0}; + CPPUNIT_ASSERT_EQUAL(6,parafieldP1->getField()->getNumberOfTuples()); + CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents()); + for(int kk=0;kk<6;kk++) + CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP4[kk],res[kk],1e-12); + } + dec.sendData(); } // delete parafieldP0;