From: ageay Date: Tue, 5 Apr 2011 09:38:12 +0000 (+0000) Subject: *** empty log message *** X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=b0a8fcb2ca2e2d185b8cd33fe3a2db8ba816b263;p=tools%2Fmedcoupling.git *** empty log message *** --- diff --git a/src/ParaMEDMEM/OverlapDEC.cxx b/src/ParaMEDMEM/OverlapDEC.cxx index 7a62893ca..4ac0dfdf0 100644 --- a/src/ParaMEDMEM/OverlapDEC.cxx +++ b/src/ParaMEDMEM/OverlapDEC.cxx @@ -93,14 +93,22 @@ of proc #m. In this case proc#k computes part of mesh A in boundingbox B of proc#m. It implies that the corresponding cellIds or nodeIds of corresponding part are sent to proc #m too. - Let's consider the couple (k,m) in TODO list. This couple is treated by either k or m as seen \ref ParaMEDMEMOverlapDECAlgoStep2 "here". + Let's consider the couple (k,m) in TODO list. This couple is treated by either k or m as seen \ref ParaMEDMEMOverlapDECAlgoStep2 "here in Step2". - As it will be dealt in Step 6, at the end for final matrix-vector computation the result matrix of the couple (k,m) anywhere it is computed (proc#k or proc#m) + As it will be dealt in Step 6, at the end for final matrix-vector computation the result matrix of the couple (k,m) anywhere it is computed (proc #k or proc #m) it will be stored in \b proc#m. - If proc#k is in charge of this couple (k,m) target ids (cells or nodes) of mesh in proc#m are renumbered, because proc#m has stripped - its target mesh to avoid big amount of data. In this case has it is finally proc#m in charge of the matrix, proc#m keeps preciously the - target ids sent to proc#k. No problem will appear for source ids because no restriction done. + - If proc #k is in charge (performs matrix computation) of this couple (k,m) target ids (cells or nodes) of mesh in proc #m are renumbered, because proc #m has stripped + its target mesh to avoid big amount of data to transfer. In this case as it is finally proc #m in charge finally of the matrix, proc #k must keep preciously the + source ids needed to be sent to proc#m. No problem will appear for matrix assembling in proc m, for source ids because no restriction done. + Concerning source ids to be sent for matrix-vector computation, proc k will known precisely which source ids field values to send to proc #m. + This is incarnated by OverlapMapping::keepTracksOfTargetIds in proc m. + + - If proc #m is in charge (performs matrix computation) of this couple (k,m) source ids (cells or nodes) of mesh in proc #k are renumbered, because proc #k has stripped + its source mesh to avoid big amount of data to transfer. In this case as it is finally proc #m in charge finally of the matrix, proc #m receive the source ids + from remote proc #k so the matrix is directly OK, no need of renumbering will be needed in \ref ParaMEDMEMOverlapDECAlgoStep5 "Step 5". But proc k must + keep tracks of sent ids to proc m for matrix-vector computation. + This is incarnated by OverlapMapping::keepTracksOfSourceIds in proc k. This step is performed in ParaMEDMEM::OverlapElementLocator::exchangeMeshes method. @@ -124,6 +132,27 @@ For a proc#k, it is necessary to fetch info of all matrix built in \ref ParaMEDMEMOverlapDECAlgoStep4 "Step4" where the first element in pair is equal to k. + After this step, the matrix repartition is the following after call ParaMEDMEM::OverlapMapping::prepare : + + - proc#0 : (0,0),(1,0),(2,0) + - proc#1 : (0,1),(2,1) + - proc#2 : (1,2),(2,2) + + Tuple (2,1) computed on proc 2 is stored at the end of "prepare" in proc 1. So it is an example of item 0 in \ref ParaMEDMEMOverlapDECAlgoStep2 "Step2". + Tuple (0,1) computed on proc 1 and stored in proc 1 too. So it is an example of item 1 in \ref ParaMEDMEMOverlapDECAlgoStep2 "Step2". + + In ParaMEDMEM::OverlapMapping::_proc_ids_to_send_vector_st will contain : + + - Proc#0 : 0,1 + - Proc#1 : 0,2 + - Proc#2 : 0,1,2 + + In ParaMEDMEM::OverlapMapping::_proc_ids_to_recv_vector_st will contain : + + - Proc#0 : 0,1,2 + - Proc#1 : 0,2 + - Proc#2 : 1,2 + The method in charge to perform this is : ParaMEDMEM::OverlapMapping::prepare. */ namespace ParaMEDMEM @@ -191,7 +220,7 @@ namespace ParaMEDMEM _interpolation_matrix=new OverlapInterpolationMatrix(_source_field,_target_field,*_group,*this,*this); OverlapElementLocator locator(_source_field,_target_field,*_group); locator.copyOptions(*this); - locator.exchangeMeshes(); + locator.exchangeMeshes(*_interpolation_matrix); std::vector< std::pair > jobs=locator.getToDoList(); std::string srcMeth=locator.getSourceMethod(); std::string trgMeth=locator.getTargetMethod(); diff --git a/src/ParaMEDMEM/OverlapElementLocator.cxx b/src/ParaMEDMEM/OverlapElementLocator.cxx index af751cdca..628350f3c 100644 --- a/src/ParaMEDMEM/OverlapElementLocator.cxx +++ b/src/ParaMEDMEM/OverlapElementLocator.cxx @@ -27,6 +27,7 @@ #include "ParaMESH.hxx" #include "ProcessorGroup.hxx" #include "MPIProcessorGroup.hxx" +#include "OverlapInterpolationMatrix.hxx" #include "MEDCouplingFieldDouble.hxx" #include "MEDCouplingFieldDiscretization.hxx" #include "DirectedBoundingBox.hxx" @@ -156,7 +157,7 @@ namespace ParaMEDMEM * The aim of this method is to perform the communication to get data corresponding to '_to_do_list' attribute. * The principle is the following : if proc n1 and n2 need to perform a cross sending with n1 >::const_iterator it2=_procs_to_send.begin();it2!=_procs_to_send.end();it2++) - sendLocalMeshTo((*it2).first,(*it2).second); + sendLocalMeshTo((*it2).first,(*it2).second,matrix); //fetching remaining meshes for(std::vector< std::pair >::const_iterator it=toDoListForFetchRemaining.begin();it!=toDoListForFetchRemaining.end();it++) { @@ -266,8 +267,10 @@ namespace ParaMEDMEM /*! * This methods sends local source if 'sourceOrTarget'==True to proc 'procId'. * This methods sends local target if 'sourceOrTarget'==False to proc 'procId'. + * + * This method prepares the matrix too, for matrix assembling and future matrix-vector computation. */ - void OverlapElementLocator::sendLocalMeshTo(int procId, bool sourceOrTarget) const + void OverlapElementLocator::sendLocalMeshTo(int procId, bool sourceOrTarget, OverlapInterpolationMatrix& matrix) const { vector elems; //int myProcId=_group.myRank(); @@ -288,7 +291,11 @@ namespace ParaMEDMEM } local_mesh->getCellsInBoundingBox(distant_bb,getBoundingBoxAdjustment(),elems); DataArrayInt *idsToSend; - MEDCouplingPointSet *send_mesh=(MEDCouplingPointSet *)field->getField()->buildSubMeshData(&elems[0],&elems[elems.size()],idsToSend); + MEDCouplingPointSet *send_mesh=static_cast(field->getField()->buildSubMeshData(&elems[0],&elems[elems.size()],idsToSend)); + if(sourceOrTarget) + matrix.keepTracksOfSourceIds(procId,idsToSend);//Case#1 in Step2 of main algorithm. + else + matrix.keepTracksOfTargetIds(procId,idsToSend);//Case#0 in Step2 of main algorithm. sendMesh(procId,send_mesh,idsToSend); send_mesh->decrRef(); idsToSend->decrRef(); diff --git a/src/ParaMEDMEM/OverlapElementLocator.hxx b/src/ParaMEDMEM/OverlapElementLocator.hxx index 4490be609..754ab6e7c 100644 --- a/src/ParaMEDMEM/OverlapElementLocator.hxx +++ b/src/ParaMEDMEM/OverlapElementLocator.hxx @@ -35,15 +35,15 @@ namespace ParaMEDMEM class ParaFIELD; class ProcessorGroup; class ParaSUPPORT; - class InterpolationMatrix; - + class OverlapInterpolationMatrix; + class OverlapElementLocator : public INTERP_KERNEL::InterpolationOptions { public: OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField, const ProcessorGroup& group); virtual ~OverlapElementLocator(); const MPI_Comm *getCommunicator() const; - void exchangeMeshes(); + void exchangeMeshes(OverlapInterpolationMatrix& matrix); std::vector< std::pair > getToDoList() const { return _to_do_list; } std::vector< std::vector< int > > getProcsInInteraction() const { return _proc_pairs; } std::string getSourceMethod() const; @@ -55,7 +55,7 @@ namespace ParaMEDMEM private: void computeBoundingBoxes(); bool intersectsBoundingBox(int i, int j) const; - void sendLocalMeshTo(int procId, bool sourceOrTarget) const; + void sendLocalMeshTo(int procId, bool sourceOrTarget, OverlapInterpolationMatrix& matrix) const; void receiveRemoteMesh(int procId, bool sourceOrTarget); void sendMesh(int procId, const MEDCouplingPointSet *mesh, const DataArrayInt *idsToSend) const; void receiveMesh(int procId, MEDCouplingPointSet* &mesh, DataArrayInt *&ids) const; diff --git a/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx b/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx index f08c2e664..edc3f3e0e 100644 --- a/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx +++ b/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx @@ -61,6 +61,16 @@ namespace ParaMEDMEM _target_volume.resize(nbelems); } + void OverlapInterpolationMatrix::keepTracksOfSourceIds(int procId, DataArrayInt *ids) + { + _mapping.keepTracksOfSourceIds(procId,ids); + } + + void OverlapInterpolationMatrix::keepTracksOfTargetIds(int procId, DataArrayInt *ids) + { + _mapping.keepTracksOfTargetIds(procId,ids); + } + OverlapInterpolationMatrix::~OverlapInterpolationMatrix() { } @@ -203,42 +213,7 @@ namespace ParaMEDMEM const DataArrayInt *srcIds, int srcProc, const DataArrayInt *trgIds, int trgProc) { - //computing matrix with real ids for target - int sz=res.size(); - const int *trgIds2=0; - const int *srcIds2=0; - int nbTrgIds=_target_field->getField()->getNumberOfTuplesExpected(); - int nbSrcIds=_source_field->getField()->getNumberOfTuplesExpected(); - std::vector< std::map > res1(sz); - INTERP_KERNEL::AutoPtr tmp2=new int[nbTrgIds]; - INTERP_KERNEL::AutoPtr tmp3=new int[nbSrcIds]; - if(trgIds) - trgIds2=trgIds->getConstPointer(); - else - { - trgIds2=tmp2; - for(int i=0;igetConstPointer(); - else - { - srcIds2=tmp3; - for(int i=0;i& m=res1[i]; - const std::map& ref=res[i]; - for(std::map::const_iterator it=ref.begin();it!=ref.end();it++) - { - m[srcIds2[(*it).first]]=(*it).second; - } - } - //dealing source ids - _mapping.addContributionST(res1,srcIds2,trgIds2,nbTrgIds,srcProc,trgProc); + _mapping.addContributionST(res,srcIds,srcProc,trgIds,trgProc); } /*! @@ -255,8 +230,10 @@ namespace ParaMEDMEM void OverlapInterpolationMatrix::computeDeno() { - if(_target_field->getField()->getNature()==IntegralGlobConstraint) - _mapping.computeDenoGlobConstraint(); + if(_target_field->getField()->getNature()==ConservativeVolumic) + _mapping.computeDenoConservativeVolumic(_target_field->getField()->getNumberOfTuplesExpected()); + else + throw INTERP_KERNEL::Exception("Policy Not implemented yet : only ConservativeVolumic defined !"); } void OverlapInterpolationMatrix::multiply() diff --git a/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx b/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx index 1f84e7ced..eca4e8c32 100644 --- a/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx +++ b/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx @@ -41,6 +41,10 @@ namespace ParaMEDMEM const DECOptions& dec_opt, const InterpolationOptions& i_opt); + void keepTracksOfSourceIds(int procId, DataArrayInt *ids); + + void keepTracksOfTargetIds(int procId, DataArrayInt *ids); + void addContribution(const MEDCouplingPointSet *src, const DataArrayInt *srcIds, const std::string& srcMeth, int srcProcId, const MEDCouplingPointSet *trg, const DataArrayInt *trgIds, const std::string& trgMeth, int trgProcId); diff --git a/src/ParaMEDMEM/OverlapMapping.cxx b/src/ParaMEDMEM/OverlapMapping.cxx index 830d026b8..dfc078c0e 100644 --- a/src/ParaMEDMEM/OverlapMapping.cxx +++ b/src/ParaMEDMEM/OverlapMapping.cxx @@ -34,21 +34,53 @@ OverlapMapping::OverlapMapping(const ProcessorGroup& group):_group(group) { } +/*! + * This method keeps tracks of source ids to know in step 6 of main algorithm, which tuple ids to send away. + * This method incarnates item#1 of step2 algorithm. + */ +void OverlapMapping::keepTracksOfSourceIds(int procId, DataArrayInt *ids) +{ + ids->incrRef(); + _src_ids_st2.push_back(ids); + _src_proc_st2.push_back(procId); +} + +/*! + * This method keeps tracks of target ids to know in step 6 of main algorithm. + * This method incarnates item#0 of step2 algorithm. + */ +void OverlapMapping::keepTracksOfTargetIds(int procId, DataArrayInt *ids) +{ + ids->incrRef(); + _trg_ids_st2.push_back(ids); + _trg_proc_st2.push_back(procId); +} + /*! * This method stores from a matrix in format Target(rows)/Source(cols) for a source procId 'srcProcId' and for a target procId 'trgProcId'. * All ids (source and target) are in format of local ids. */ -void OverlapMapping::addContributionST(const std::vector< std::map >& matrixST, const int *srcIds, const int *trgIds, int trgIdsLgth, int srcProcId, int trgProcId) +void OverlapMapping::addContributionST(const std::vector< std::map >& matrixST, const DataArrayInt *srcIds, int srcProcId, const DataArrayInt *trgIds, int trgProcId) { int nbOfRows=matrixST.size(); _matrixes_st.push_back(matrixST); - //_source_ids_st.resize(_source_ids_st.size()+1); - //_source_ids_st.back().insert(_source_ids_st.back().end(),srcIds,srcIds+nbOfRows); _source_proc_id_st.push_back(srcProcId); - // - _target_ids_st.resize(_target_ids_st.size()+1); - _target_ids_st.back().insert(_target_ids_st.back().end(),trgIds,trgIds+trgIdsLgth); _target_proc_id_st.push_back(trgProcId); + if(srcIds) + {//item#1 of step2 algorithm in proc m. Only to know in advanced nb of recv ids [ (0,1) computed on proc1 and Matrix-Vector on proc1 ] + _nb_of_src_ids_proc_st2.push_back(srcIds->getNumberOfTuples()); + _src_ids_proc_st2.push_back(srcProcId); + } + else + {//item#0 of step2 algorithm in proc k + std::set s; + for(std::vector< std::map >::const_iterator it1=matrixST.begin();it1!=matrixST.end();it1++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + s.insert((*it2).first); + _src_ids_zip_st2.resize(_src_ids_zip_st2.size()+1); + _src_ids_zip_st2.back().insert(_src_ids_zip_st2.back().end(),s.begin(),s.end()); + _src_ids_zip_proc_st2.push_back(trgProcId); + } } /*! @@ -56,8 +88,7 @@ void OverlapMapping::addContributionST(const std::vector< std::map > * In 'procsInInteraction' for a proc with id i, is in interaction with procs listed in procsInInteraction[i]. * * This method is in charge to send matrixes in AlltoAll mode. - * After the call of this method 'this' contains the matrixST for all source elements of the current proc and - * matrixTS for all target elements of current proc. + * After the call of this method 'this' contains the matrixST for all source elements of the current proc */ void OverlapMapping::prepare(const std::vector< std::vector >& procsInInteraction, int nbOfTrgElems) { @@ -70,11 +101,15 @@ void OverlapMapping::prepare(const std::vector< std::vector >& procsInInter INTERP_KERNEL::AutoPtr nbsend3=new int[grpSize]; std::fill(nbsend,nbsend+grpSize,0); int myProcId=_group.myRank(); + _proc_ids_to_recv_vector_st.clear(); + int curProc=0; + for(std::vector< std::vector >::const_iterator it1=procsInInteraction.begin();it1!=procsInInteraction.end();it1++,curProc++) + if(std::find((*it1).begin(),(*it1).end(),myProcId)!=(*it1).end()) + _proc_ids_to_recv_vector_st.push_back(curProc); + _proc_ids_to_send_vector_st=procsInInteraction[myProcId]; for(std::size_t i=0;i<_matrixes_st.size();i++) - { - if(_source_proc_id_st[i]!=myProcId) - nbsend[_source_proc_id_st[i]]=_matrixes_st[i].size(); - } + if(_source_proc_id_st[i]==myProcId) + nbsend[_target_proc_id_st[i]]=_matrixes_st[i].size(); INTERP_KERNEL::AutoPtr nbrecv=new int[grpSize]; commInterface.allToAll(nbsend,1,MPI_INT,nbrecv,1,MPI_INT,*comm); //exchanging matrix @@ -90,7 +125,7 @@ void OverlapMapping::prepare(const std::vector< std::vector >& procsInInter INTERP_KERNEL::AutoPtr bigArrRecv=new int[nbrecv2[grpSize-1]+nbrecv1[grpSize-1]]; commInterface.allToAllV(bigArr,nbsend2,nbsend3,MPI_INT, bigArrRecv,nbrecv1,nbrecv2,MPI_INT, - *comm);// sending ids of sparse matrix (n+1 elems) + src ids (n elems) + *comm);// sending ids of sparse matrix (n+1 elems) //second phase echange target ids std::fill(nbsend2,nbsend2+grpSize,0); INTERP_KERNEL::AutoPtr nbrecv3=new int[grpSize]; @@ -112,10 +147,11 @@ void OverlapMapping::prepare(const std::vector< std::vector >& procsInInter //finishing unserializationST(nbOfTrgElems,nbrecv,bigArrRecv,nbrecv1,nbrecv2, bigArrRecv2,bigArrDRecv2,nbrecv3,nbrecv4); - //finish to fill _the_matrix_st and _the_matrix_st_target_proc_id with already in place matrix in _matrixes_st - finishToFillFinalMatrixST(nbOfTrgElems); - //exchanging target ids for future sending - prepareIdsToSendST(); + //updating _src_ids_zip_st2 and _src_ids_zip_st2 with received matrix. + updateZipSourceIdsForFuture(); + //finish to fill _the_matrix_st with already in place matrix in _matrixes_st + finishToFillFinalMatrixST(); + //printTheMatrix(); } /*! @@ -143,6 +179,72 @@ void OverlapMapping::computeDenoGlobConstraint() } } +/*! + * Compute denominators. + */ +void OverlapMapping::computeDenoConservativeVolumic(int nbOfTuplesTrg) +{ + CommInterface commInterface=_group.getCommInterface(); + const MPIProcessorGroup *group=static_cast(&_group); + const MPI_Comm *comm=group->getComm(); + int myProcId=_group.myRank(); + // + _the_deno_st.clear(); + std::size_t sz1=_the_matrix_st.size(); + _the_deno_st.resize(sz1); + std::vector deno(nbOfTuplesTrg); + for(std::size_t i=0;i >& mat=_the_matrix_st[i]; + int curSrcId=_the_matrix_st_source_proc_id[i]; + std::vector::iterator isItem1=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),curSrcId); + int rowId=0; + if(isItem1==_trg_proc_st2.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids. + { + for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + deno[rowId]+=(*it2).second; + } + else + {//item0 of step2 main algo. More complicated. + std::vector::iterator fnd=isItem1;//std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),curSrcId); + int locId=std::distance(_trg_proc_st2.begin(),fnd); + const DataArrayInt *trgIds=_trg_ids_st2[locId]; + const int *trgIds2=trgIds->getConstPointer(); + for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + deno[trgIds2[rowId]]+=(*it2).second; + } + } + // + for(std::size_t i=0;i >& mat=_the_matrix_st[i]; + int curSrcId=_the_matrix_st_source_proc_id[i]; + std::vector::iterator isItem1=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),curSrcId); + std::vector< std::map >& denoM=_the_deno_st[i]; + denoM.resize(mat.size()); + if(isItem1==_trg_proc_st2.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids. + { + int rowId=0; + for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + denoM[rowId][(*it2).first]=deno[rowId]; + } + else + { + std::vector::iterator fnd=isItem1; + int locId=std::distance(_trg_proc_st2.begin(),fnd); + const DataArrayInt *trgIds=_trg_ids_st2[locId]; + const int *trgIds2=trgIds->getConstPointer(); + for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + denoM[rowId][(*it2).first]=deno[trgIds2[rowId]]; + } + } +} + /*! * This method performs step #0/3 in serialization process. * \param count tells specifies nb of elems to send to corresponding proc id. size equal to _group.size(). @@ -158,10 +260,10 @@ void OverlapMapping::serializeMatrixStep0ST(const int *nbOfElemsSrc, int *&bigAr int myProcId=_group.myRank(); for(std::size_t i=0;i<_matrixes_st.size();i++) { - if(_source_proc_id_st[i]!=myProcId) + if(_source_proc_id_st[i]==myProcId)// && _target_proc_id_st[i]!=myProcId { - count[_source_proc_id_st[i]]=2*_matrixes_st[i].size()+1; - szz+=2*_matrixes_st[i].size()+1; + count[_target_proc_id_st[i]]=_matrixes_st[i].size()+1; + szz+=_matrixes_st[i].size()+1; } } bigArr=new int[szz]; @@ -170,15 +272,14 @@ void OverlapMapping::serializeMatrixStep0ST(const int *nbOfElemsSrc, int *&bigAr offsets[i]=offsets[i-1]+count[i-1]; for(std::size_t i=0;i<_matrixes_st.size();i++) { - if(_source_proc_id_st[i]!=myProcId) + if(_source_proc_id_st[i]==myProcId) { - int start=offsets[_source_proc_id_st[i]]; + int start=offsets[_target_proc_id_st[i]]; int *work=bigArr+start; *work=0; const std::vector< std::map >& mat=_matrixes_st[i]; for(std::vector< std::map >::const_iterator it=mat.begin();it!=mat.end();it++,work++) work[1]=work[0]+(*it).size(); - std::copy(_source_ids_st[i].begin(),_source_ids_st[i].end(),work+1); } } // @@ -186,7 +287,7 @@ void OverlapMapping::serializeMatrixStep0ST(const int *nbOfElemsSrc, int *&bigAr for(int i=0;i0) - countForRecv[i]=2*nbOfElemsSrc[i]+1; + countForRecv[i]=nbOfElemsSrc[i]+1; else countForRecv[i]=0; if(i>0) @@ -221,13 +322,13 @@ int OverlapMapping::serializeMatrixStep1ST(const int *nbOfElemsSrc, const int *r int fullLgth=0; for(std::size_t i=0;i<_matrixes_st.size();i++) { - if(_source_proc_id_st[i]!=myProcId) + if(_source_proc_id_st[i]==myProcId) { const std::vector< std::map >& mat=_matrixes_st[i]; int lgthToSend=0; for(std::vector< std::map >::const_iterator it=mat.begin();it!=mat.end();it++) lgthToSend+=(*it).size(); - count[_source_proc_id_st[i]]=lgthToSend; + count[_target_proc_id_st[i]]=lgthToSend; fullLgth+=lgthToSend; } } @@ -240,7 +341,7 @@ int OverlapMapping::serializeMatrixStep1ST(const int *nbOfElemsSrc, const int *r fullLgth=0; for(std::size_t i=0;i<_matrixes_st.size();i++) { - if(_source_proc_id_st[i]!=myProcId) + if(_source_proc_id_st[i]==myProcId) { const std::vector< std::map >& mat=_matrixes_st[i]; for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++) @@ -278,35 +379,19 @@ void OverlapMapping::unserializationST(int nbOfTrgElems, if(nbOfElemsSrcPerProc[i]!=0) _the_matrix_st_source_proc_id.push_back(i); int nbOfPseudoProcs=_the_matrix_st_source_proc_id.size();//_the_matrix_st_target_proc_id.size() contains number of matrix fetched remotely whose sourceProcId==myProcId - _the_matrix_st_source_ids.resize(nbOfPseudoProcs); _the_matrix_st.resize(nbOfPseudoProcs); - for(int i=0;i sourceIdsZip;// this zip is to reduce amount of data to send/rexcv on transposeMultiply target ids transfert + _the_matrix_st[j].resize(nbOfElemsSrcPerProc[i]); for(int k=0;k old2newSrcIds; - int newNbTrg=0; - for(std::set::const_iterator it=sourceIdsZip.begin();it!=sourceIdsZip.end();it++,newNbTrg++) - old2newSrcIds[*it]=newNbTrg; - for(int k=0;k_the_matrix_st_target_ids'. * This method finish the job of filling 'this->_the_matrix_st' and 'this->_the_matrix_st_target_proc_id' by putting candidates in 'this->_matrixes_st' into them. */ -void OverlapMapping::finishToFillFinalMatrixST(int nbOfTrgElems) +void OverlapMapping::finishToFillFinalMatrixST() { int myProcId=_group.myRank(); int sz=_matrixes_st.size(); int nbOfEntryToAdd=0; for(int i=0;i >& mat=_matrixes_st[i]; - const std::vector& srcIds=_source_ids_st[i]; - int sz=srcIds.size();//assert srcIds.size()==mat.size() - for(int k=0;k& m2=mat[k]; - for(std::map::const_iterator it=m2.begin();it!=m2.end();it++) - _the_matrix_st[j][(*it).first][srcIds[k]]=(*it).second; - } - _the_matrix_st_source_ids[j].insert(_the_matrix_st_source_ids[j].end(),_source_ids_st[i].begin(),_source_ids_st[i].end()); + _the_matrix_st[j]=mat; + _the_matrix_st_source_proc_id.push_back(_source_proc_id_st[i]); j++; } + _matrixes_st.clear(); } /*! @@ -410,111 +483,196 @@ void OverlapMapping::prepareIdsToSendST() * This method performs a transpose multiply of 'fieldInput' and put the result into 'fieldOutput'. * 'fieldInput' is expected to be the sourcefield and 'fieldOutput' the targetfield. */ -void OverlapMapping::multiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput) -{ - -} - -/*! - * This method performs a transpose multiply of 'fieldInput' and put the result into 'fieldOutput'. - * 'fieldInput' is expected to be the targetfield and 'fieldOutput' the sourcefield. - */ -void OverlapMapping::transposeMultiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput) +void OverlapMapping::multiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput) const { -#if 0 - CommInterface commInterface=_group.getCommInterface(); - const MPIProcessorGroup *group=static_cast(&_group); - const MPI_Comm *comm=group->getComm(); - int grpSize=_group.size(); - INTERP_KERNEL::AutoPtr nbsend=new int[grpSize]; - std::fill(nbsend,nbsend+grpSize,0); - for(std::size_t i=0;i<_the_matrix_st.size();i++) - nbsend[_the_matrix_st_target_proc_id[i]]=_the_matrix_st_target_ids[i].size(); - INTERP_KERNEL::AutoPtr nbrecv=new int[grpSize]; - commInterface.allToAll(nbsend,1,MPI_INT,nbrecv,1,MPI_INT,*comm); - int nbOfCompo=fieldInput->getNumberOfComponents();//to improve same number of components - std::transform((int *)nbsend,(int *)nbsend+grpSize,(int *)nbsend,std::bind2nd(std::multiplies(),nbOfCompo)); - std::transform((int *)nbrecv,(int *)nbrecv+grpSize,(int *)nbrecv,std::bind2nd(std::multiplies(),nbOfCompo)); - INTERP_KERNEL::AutoPtr nbsend2=new int[grpSize]; - nbsend2[0]=0; - for(int i=1;i nbsend3=new double[szToSend]; - double *work=nbsend3; - for(std::size_t i=0;i<_the_matrix_st.size();i++) - { - MEDCouplingAutoRefCountObjectPtr ptr=fieldInput->getArray()->selectByTupleId(&_target_ids_st[i][0],&(_target_ids_st[i][0])+_target_ids_st[i].size()); - std::copy(ptr->getConstPointer(),ptr->getConstPointer()+nbsend[_target_proc_id_st[i]],work+nbsend2[_target_proc_id_st[i]]); - } - INTERP_KERNEL::AutoPtr nbrecv3=new int[grpSize]; - nbrecv3[0]=0; - for(int i=1;i nbrecv2=new double[szToFetch]; -#endif -#if 0 - // int nbOfCompo=fieldInput->getNumberOfComponents();//to improve same number of components to test CommInterface commInterface=_group.getCommInterface(); const MPIProcessorGroup *group=static_cast(&_group); const MPI_Comm *comm=group->getComm(); int grpSize=_group.size(); + int myProcId=_group.myRank(); // INTERP_KERNEL::AutoPtr nbsend=new int[grpSize]; - std::fill(nbsend,nbsend+grpSize,0); INTERP_KERNEL::AutoPtr nbsend2=new int[grpSize]; - for(int i=0;i nbsend3=new double[szToSend]; INTERP_KERNEL::AutoPtr nbrecv=new int[grpSize]; - INTERP_KERNEL::AutoPtr nbrecv3=new int[grpSize]; + INTERP_KERNEL::AutoPtr nbrecv2=new int[grpSize]; + std::fill(nbsend,nbsend+grpSize,0); std::fill(nbrecv,nbrecv+grpSize,0); - for(std::size_t i=0;i<_the_matrix_st_source_proc_id.size();i++) - nbrecv[_the_matrix_st_source_proc_id[i]]=_the_matrix_st_target_ids[i].size()*nbOfCompo; - nbrecv3[0]=0; - for(int i=1;i valsToSend; for(int i=0;i ptr=fieldInput->getArray()->selectByTupleId(&(_target_ids_to_send_st[i][0]), - &(_target_ids_to_send_st[i][0])+_target_ids_to_send_st[i].size()); - std::copy(ptr->getConstPointer(),ptr->getConstPointer()+nbsend[i],work+nbsend2[i]); + if(std::find(_proc_ids_to_send_vector_st.begin(),_proc_ids_to_send_vector_st.end(),i)!=_proc_ids_to_send_vector_st.end()) + { + std::vector::const_iterator isItem1=std::find(_src_proc_st2.begin(),_src_proc_st2.end(),i); + MEDCouplingAutoRefCountObjectPtr vals; + if(isItem1!=_src_proc_st2.end())//item1 of step2 main algo + { + int id=std::distance(_src_proc_st2.begin(),isItem1); + vals=fieldInput->getArray()->selectByTupleId(_src_ids_st2[id]->getConstPointer(),_src_ids_st2[id]->getConstPointer()+_src_ids_st2[id]->getNumberOfTuples()); + } + else + {//item0 of step2 main algo + int id=std::distance(_src_ids_zip_proc_st2.begin(),std::find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),i)); + vals=fieldInput->getArray()->selectByTupleId(&(_src_ids_zip_st2[id])[0],&(_src_ids_zip_st2[id])[0]+_src_ids_zip_st2[id].size()); + } + nbsend[i]=vals->getNbOfElems(); + valsToSend.insert(valsToSend.end(),vals->getConstPointer(),vals->getConstPointer()+nbsend[i]); + } + if(std::find(_proc_ids_to_recv_vector_st.begin(),_proc_ids_to_recv_vector_st.end(),i)!=_proc_ids_to_recv_vector_st.end()) + { + std::vector::const_iterator isItem0=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),i); + if(isItem0==_trg_proc_st2.end())//item1 of step2 main algo [ (0,1) computed on proc1 and Matrix-Vector on proc1 ] + { + std::vector::const_iterator it1=std::find(_src_ids_proc_st2.begin(),_src_ids_proc_st2.end(),i); + if(it1!=_src_ids_proc_st2.end()) + { + int id=std::distance(_src_ids_proc_st2.begin(),it1); + nbrecv[i]=_nb_of_src_ids_proc_st2[id]*nbOfCompo; + } + else if(i==myProcId) + { + nbrecv[i]=fieldInput->getNumberOfTuplesExpected()*nbOfCompo; + } + else + throw INTERP_KERNEL::Exception("Plouff ! send email to anthony.geay@cea.fr ! "); + } + else + {//item0 of step2 main algo [ (2,1) computed on proc2 but Matrix-Vector on proc1 ] [(1,0) computed on proc1 but Matrix-Vector on proc0] + int id=std::distance(_src_ids_zip_proc_st2.begin(),std::find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),i)); + nbrecv[i]=_src_ids_zip_st2[id].size()*nbOfCompo; + } + } } - INTERP_KERNEL::AutoPtr nbrecv2=new double[szToRecv]; - commInterface.allToAllV(nbsend3,nbsend,nbsend2,MPI_DOUBLE, - nbrecv2,nbrecv,nbrecv3,MPI_DOUBLE, - *comm); - // deserialization + for(int i=1;i bigArr=new double[nbrecv2[grpSize-1]+nbrecv[grpSize-1]]; + commInterface.allToAllV(&valsToSend[0],nbsend,nbsend2,MPI_DOUBLE, + bigArr,nbrecv,nbrecv2,MPI_DOUBLE,*comm); fieldOutput->getArray()->fillWithZero(); - INTERP_KERNEL::AutoPtr workZone=new double[nbOfCompo]; - for(std::size_t i=0;i<_the_matrix_st.size();i++) + INTERP_KERNEL::AutoPtr tmp=new double[nbOfCompo]; + for(int i=0;igetArray()->getPointer(); - int sourceProcId=_the_matrix_st_source_proc_id[i]; - const std::vector >& m=_the_matrix_st[i]; - const std::vector >& deno=_the_deno_st[i]; - const double *vecOnTargetProcId=((const double *)nbrecv2)+nbrecv3[sourceProcId]; - std::size_t nbOfIds=m.size(); - for(std::size_t j=0;j0) { - const std::map& m2=m[j]; - const std::map& deno2=deno[j]; - for(std::map::const_iterator it=m2.begin();it!=m2.end();it++) + double *pt=fieldOutput->getArray()->getPointer(); + std::vector::const_iterator it=std::find(_the_matrix_st_source_proc_id.begin(),_the_matrix_st_source_proc_id.end(),i); + if(it==_the_matrix_st_source_proc_id.end()) + throw INTERP_KERNEL::Exception("Big problem !"); + int id=std::distance(_the_matrix_st_source_proc_id.begin(),it); + const std::vector< std::map >& mat=_the_matrix_st[id]; + const std::vector< std::map >& deno=_the_deno_st[id]; + std::vector::const_iterator isItem0=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),i); + if(isItem0==_trg_proc_st2.end())//item1 of step2 main algo [ (0,1) computed on proc1 and Matrix-Vector on proc1 ] { - std::map::const_iterator it2=deno2.find((*it).first); - std::transform(vecOnTargetProcId+((*it).first*nbOfCompo),vecOnTargetProcId+((*it).first+1)*nbOfCompo,(double *)workZone,std::bind2nd(std::multiplies(),(*it).second)); - std::transform((double *)workZone,(double *)workZone+nbOfCompo,(double *)workZone,std::bind2nd(std::multiplies(),1./(*it2).second)); - std::transform((double *)workZone,(double *)workZone+nbOfCompo,res,res,std::plus()); + int nbOfTrgTuples=mat.size(); + for(int j=0;j& mat1=mat[j]; + const std::map& deno1=deno[j]; + std::map::const_iterator it4=deno1.begin(); + for(std::map::const_iterator it3=mat1.begin();it3!=mat1.end();it3++,it4++) + { + std::transform(bigArr+nbrecv2[i]+((*it3).first)*nbOfCompo,bigArr+nbrecv2[i]+((*it3).first+1)*(nbOfCompo),(double *)tmp,std::bind2nd(std::multiplies(),(*it3).second/(*it4).second)); + std::transform((double *)tmp,(double *)tmp+nbOfCompo,pt,pt,std::plus()); + } + } + } + else + {//item0 of step2 main algo [ (2,1) computed on proc2 but Matrix-Vector on proc1 ] + double *pt=fieldOutput->getArray()->getPointer(); + std::map zipCor; + int id=std::distance(_src_ids_zip_proc_st2.begin(),std::find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),i)); + const std::vector zipIds=_src_ids_zip_st2[id]; + int newId=0; + for(std::vector::const_iterator it=zipIds.begin();it!=zipIds.end();it++,newId++) + zipCor[*it]=newId; + int id2=std::distance(_trg_proc_st2.begin(),std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),i)); + const DataArrayInt *tgrIds=_trg_ids_st2[id2]; + const int *tgrIds2=tgrIds->getConstPointer(); + int nbOfTrgTuples=mat.size(); + for(int j=0;j& mat1=mat[j]; + const std::map& deno1=deno[j]; + std::map::const_iterator it5=deno1.begin(); + for(std::map::const_iterator it3=mat1.begin();it3!=mat1.end();it3++,it5++) + { + std::map::const_iterator it4=zipCor.find((*it3).first); + if(it4==zipCor.end()) + throw INTERP_KERNEL::Exception("Hmmmmm send e mail to anthony.geay@cea.fr !"); + std::transform(bigArr+nbrecv2[i]+((*it4).second)*nbOfCompo,bigArr+nbrecv2[i]+((*it4).second+1)*(nbOfCompo),(double *)tmp,std::bind2nd(std::multiplies(),(*it3).second/(*it5).second)); + std::transform((double *)tmp,(double *)tmp+nbOfCompo,pt+tgrIds2[j]*nbOfCompo,pt+tgrIds2[j]*nbOfCompo,std::plus()); + } + } } } } -#endif } + +/*! + * This method performs a transpose multiply of 'fieldInput' and put the result into 'fieldOutput'. + * 'fieldInput' is expected to be the targetfield and 'fieldOutput' the sourcefield. + */ +void OverlapMapping::transposeMultiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput) +{ +} + +/*! + * This method should be called immediately after _the_matrix_st has been filled with remote computed matrix put in this proc for Matrix-Vector. + * This method computes for these matrix the minimal set of source ids corresponding to the source proc id. + */ +void OverlapMapping::updateZipSourceIdsForFuture() +{ + CommInterface commInterface=_group.getCommInterface(); + const MPIProcessorGroup *group=static_cast(&_group); + const MPI_Comm *comm=group->getComm(); + int grpSize=_group.size(); + int myProcId=_group.myRank(); + int nbOfMatrixRecveived=_the_matrix_st_source_proc_id.size(); + for(int i=0;i >& mat=_the_matrix_st[i]; + _src_ids_zip_proc_st2.push_back(curSrcProcId); + _src_ids_zip_st2.resize(_src_ids_zip_st2.size()+1); + std::set s; + for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + s.insert((*it2).first); + _src_ids_zip_st2.back().insert(_src_ids_zip_st2.back().end(),s.begin(),s.end()); + } + } +} + +// #include + +// void OverlapMapping::printTheMatrix() const +// { +// CommInterface commInterface=_group.getCommInterface(); +// const MPIProcessorGroup *group=static_cast(&_group); +// const MPI_Comm *comm=group->getComm(); +// int grpSize=_group.size(); +// int myProcId=_group.myRank(); +// std::cerr << "I am proc #" << myProcId << std::endl; +// int nbOfMat=_the_matrix_st.size(); +// std::cerr << "I do manage " << nbOfMat << "matrix : "<< std::endl; +// for(int i=0;i >& locMat=_the_matrix_st[i]; +// for(std::vector< std::map >::const_iterator it1=locMat.begin();it1!=locMat.end();it1++) +// { +// for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) +// std::cerr << "(" << (*it2).first << "," << (*it2).second << "), "; +// std::cerr << std::endl; +// } +// } +// std::cerr << "*********" << std::endl; +// } diff --git a/src/ParaMEDMEM/OverlapMapping.hxx b/src/ParaMEDMEM/OverlapMapping.hxx index 0bcd76f12..3ac4abdb6 100644 --- a/src/ParaMEDMEM/OverlapMapping.hxx +++ b/src/ParaMEDMEM/OverlapMapping.hxx @@ -20,23 +20,29 @@ #ifndef __OVERLAPMAPPING_HXX__ #define __OVERLAPMAPPING_HXX__ +#include "MEDCouplingAutoRefCountObjectPtr.hxx" + #include #include namespace ParaMEDMEM { class ProcessorGroup; + class DataArrayInt; class MEDCouplingFieldDouble; class OverlapMapping { public: OverlapMapping(const ProcessorGroup& group); - void addContributionST(const std::vector< std::map >& matrixST, const int *srcIds, const int *trgIds, int trgIdsLgth, int srcProcId, int trgProcId); + void keepTracksOfSourceIds(int procId, DataArrayInt *ids); + void keepTracksOfTargetIds(int procId, DataArrayInt *ids); + void addContributionST(const std::vector< std::map >& matrixST, const DataArrayInt *srcIds, int srcProcId, const DataArrayInt *trgIds, int trgProcId); void prepare(const std::vector< std::vector >& procsInInteraction, int nbOfTrgElems); + void computeDenoConservativeVolumic(int nbOfTuplesTrg); void computeDenoGlobConstraint(); // - void multiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput); + void multiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput) const; void transposeMultiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput); private: void serializeMatrixStep0ST(const int *nbOfElemsSrc, int *&bigArr, int *count, int *offsets, @@ -46,10 +52,21 @@ namespace ParaMEDMEM int *countForRecv, int *offsForRecv) const; void unserializationST(int nbOfTrgElems, const int *nbOfElemsSrcPerProc, const int *bigArrRecv, const int *bigArrRecvCounts, const int *bigArrRecvOffs, const int *bigArrRecv2, const double *bigArrDRecv2, const int *bigArrRecv2Count, const int *bigArrRecv2Offs); - void finishToFillFinalMatrixST(int nbOfTrgElems); + void finishToFillFinalMatrixST(); void prepareIdsToSendST(); + void updateZipSourceIdsForFuture(); + //void printTheMatrix() const; private: const ProcessorGroup &_group; + //! vector of ids + std::vector< MEDCouplingAutoRefCountObjectPtr > _src_ids_st2;//item #1 + std::vector< int > _src_proc_st2;//item #1 + std::vector< MEDCouplingAutoRefCountObjectPtr > _trg_ids_st2;//item #0 + std::vector< int > _trg_proc_st2;//item #0 + std::vector< int > _nb_of_src_ids_proc_st2;//item #1 + std::vector< int > _src_ids_proc_st2;//item #1 + std::vector< std::vector > _src_ids_zip_st2;//same size as _src_ids_zip_proc_st2. Sorted. specifies for each id the corresponding ids to send. This is for item0 of Step2 of main algorithm + std::vector< int > _src_ids_zip_proc_st2; //! vector of matrixes the first entry correspond to source proc id in _source_ids_st std::vector< std::vector< std::map > > _matrixes_st; std::vector< std::vector > _source_ids_st; @@ -61,7 +78,10 @@ namespace ParaMEDMEM std::vector< int > _the_matrix_st_source_proc_id; std::vector< std::vector > _the_matrix_st_source_ids; std::vector< std::vector< std::map > > _the_deno_st; - //! this attribute is of size _group.size(); for each procId in _group _target_ids_to_send_st[procId] contains tupleId to send abroad + //! this attribute stores the proc ids that wait for data from this proc ids for matrix-vector computation + std::vector< int > _proc_ids_to_send_vector_st; + std::vector< int > _proc_ids_to_recv_vector_st; + //! this attribute is of size _group.size(); for each procId in _group _source_ids_to_send_st[procId] contains tupleId to send abroad std::vector< std::vector > _source_ids_to_send_st; }; } diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx index 05a0b3275..ea2128b5f 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx @@ -47,7 +47,7 @@ class ParaMEDMEMTest : public CppUnit::TestFixture CPPUNIT_TEST(testInterpKernelDEC2DM1D_P0P0); CPPUNIT_TEST(testInterpKernelDECPartialProcs); CPPUNIT_TEST(testInterpKernelDEC3DSurfEmptyBBox); - //CPPUNIT_TEST(testOverlapDEC1); + CPPUNIT_TEST(testOverlapDEC1); CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D); CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpDEC_2D); diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx index e4391a4da..5a41497be 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx @@ -81,7 +81,7 @@ void ParaMEDMEMTest::testOverlapDEC1() ParaMEDMEM::ComponentTopology comptopo; parameshS=new ParaMEDMEM::ParaMESH(meshS,*dec.getGrp(),"source mesh"); parafieldS=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshS,comptopo); - parafieldS->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//ConservativeVolumic IntegralGlobConstraint + parafieldS->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint double *valsS=parafieldS->getField()->getArray()->getPointer(); valsS[0]=7.; valsS[1]=8.; // @@ -98,7 +98,7 @@ void ParaMEDMEMTest::testOverlapDEC1() meshT->finishInsertingCells(); parameshT=new ParaMEDMEM::ParaMESH(meshT,*dec.getGrp(),"target mesh"); parafieldT=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshT,comptopo); - parafieldT->getField()->setNature(ParaMEDMEM::IntegralGlobConstraint);//ConservativeVolumic + parafieldT->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint double *valsT=parafieldT->getField()->getArray()->getPointer(); valsT[0]=7.; } @@ -122,7 +122,7 @@ void ParaMEDMEMTest::testOverlapDEC1() ParaMEDMEM::ComponentTopology comptopo; parameshS=new ParaMEDMEM::ParaMESH(meshS,*dec.getGrp(),"source mesh"); parafieldS=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshS,comptopo); - parafieldS->getField()->setNature(ParaMEDMEM::IntegralGlobConstraint);//ConservativeVolumic + parafieldS->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint double *valsS=parafieldS->getField()->getArray()->getPointer(); valsS[0]=9.; valsS[1]=11.; // @@ -139,7 +139,7 @@ void ParaMEDMEMTest::testOverlapDEC1() meshT->finishInsertingCells(); parameshT=new ParaMEDMEM::ParaMESH(meshT,*dec.getGrp(),"target mesh"); parafieldT=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshT,comptopo); - parafieldT->getField()->setNature(ParaMEDMEM::IntegralGlobConstraint); + parafieldT->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint double *valsT=parafieldT->getField()->getArray()->getPointer(); valsT[0]=8.; } @@ -162,7 +162,7 @@ void ParaMEDMEMTest::testOverlapDEC1() ParaMEDMEM::ComponentTopology comptopo; parameshS=new ParaMEDMEM::ParaMESH(meshS,*dec.getGrp(),"source mesh"); parafieldS=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshS,comptopo); - parafieldS->getField()->setNature(ParaMEDMEM::IntegralGlobConstraint);//ConservativeVolumic + parafieldS->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint double *valsS=parafieldS->getField()->getArray()->getPointer(); valsS[0]=10.; // @@ -179,7 +179,7 @@ void ParaMEDMEMTest::testOverlapDEC1() meshT->finishInsertingCells(); parameshT=new ParaMEDMEM::ParaMESH(meshT,*dec.getGrp(),"target mesh"); parafieldT=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshT,comptopo); - parafieldT->getField()->setNature(ParaMEDMEM::IntegralGlobConstraint); + parafieldT->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint double *valsT=parafieldT->getField()->getArray()->getPointer(); valsT[0]=9.; } @@ -188,20 +188,18 @@ void ParaMEDMEMTest::testOverlapDEC1() dec.synchronize(); dec.sendRecvData(true); // - /*if(rank==0) + if(rank==0) { - CPPUNIT_ASSERT_DOUBLES_EQUAL(7.5,parafieldS->getField()->getArray()->getIJ(0,0),1e-12); - CPPUNIT_ASSERT_DOUBLES_EQUAL(8.,parafieldS->getField()->getArray()->getIJ(0,1),1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(8.75,parafieldT->getField()->getArray()->getIJ(0,0),1e-12); } if(rank==1) { - CPPUNIT_ASSERT_DOUBLES_EQUAL(8.,parafieldS->getField()->getArray()->getIJ(0,0),1e-12); - CPPUNIT_ASSERT_DOUBLES_EQUAL(8.,parafieldS->getField()->getArray()->getIJ(0,1),1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(8.5,parafieldT->getField()->getArray()->getIJ(0,0),1e-12); } if(rank==2) { - CPPUNIT_ASSERT_DOUBLES_EQUAL(8.5,parafieldS->getField()->getArray()->getIJ(0,0),1e-12); - }*/ + CPPUNIT_ASSERT_DOUBLES_EQUAL(10.5,parafieldT->getField()->getArray()->getIJ(0,0),1e-12); + } delete parafieldS; delete parafieldT; delete parameshS;