]> SALOME platform Git repositories - tools/medcoupling.git/commitdiff
Salome HOME
P0->P1 parllel.
authorageay <ageay>
Mon, 24 Aug 2009 14:23:18 +0000 (14:23 +0000)
committerageay <ageay>
Mon, 24 Aug 2009 14:23:18 +0000 (14:23 +0000)
15 files changed:
src/ParaMEDMEM/ElementLocator.cxx
src/ParaMEDMEM/ElementLocator.hxx
src/ParaMEDMEM/GlobalizerMesh.cxx [deleted file]
src/ParaMEDMEM/GlobalizerMesh.hxx [deleted file]
src/ParaMEDMEM/InterpolationMatrix.cxx
src/ParaMEDMEM/InterpolationMatrix.hxx
src/ParaMEDMEM/IntersectionDEC.cxx
src/ParaMEDMEM/Makefile.am
src/ParaMEDMEM/MxN_Mapping.cxx
src/ParaMEDMEM/MxN_Mapping.hxx
src/ParaMEDMEM/ParaFIELD.cxx
src/ParaMEDMEM/ParaFIELD.hxx
src/ParaMEDMEM/ParaMESH.cxx
src/ParaMEDMEM/ParaMESH.hxx
src/ParaMEDMEM/Test/ParaMEDMEMTest_IntersectionDEC.cxx

index 16d2ef66c7c09c77f50fd915073fe69b0ceaaf2c..7a7237fb9b774f5c9094902f5d23614e83ae59c4 100644 (file)
@@ -46,6 +46,7 @@ namespace ParaMEDMEM
   { 
     _union_group = _local_group.fuse(distant_group);
     _computeBoundingBoxes();
+    _comm=getCommunicator();
   }
 
   ElementLocator::~ElementLocator()
@@ -60,6 +61,11 @@ namespace ParaMEDMEM
     return group->getComm();
   }
 
+  NatureOfField ElementLocator::getLocalNature() const
+  {
+    return _local_para_field.getField()->getNature();
+  }
+
   // ==========================================================================
   // Procedure for exchanging mesh between a distant proc and a local processor
   // param idistantrank  proc id on distant group
@@ -91,25 +97,6 @@ namespace ParaMEDMEM
     
     if(send_mesh)
       send_mesh->decrRef();
-#if 0
-    int* distant_ids_send=0;
-    //send_mesh contains null pointer if elems is empty
-    MEDCouplingPointSet* send_mesh = _local_cell_mesh->buildPartOfMySelf(&elems[0],&elems[elems.size()],false);
-    // Constituting an array containing the ids of the elements that are 
-    // going to be sent to the distant subdomain.
-    // This array  enables the correct redistribution of the data when the
-    // interpolated field is transmitted to the target array
-
-    if (elems.size()>0)
-      {
-        distant_ids_send = new int[elems.size()];
-        std::copy(elems.begin(),elems.end(),distant_ids_send);
-      }
-    _exchangeMesh(send_mesh, distant_mesh, idistantrank, distant_ids_send, distant_ids);
-    delete[] distant_ids_send;
-    if(send_mesh)
-      send_mesh->decrRef();
-#endif
   }
 
   void ElementLocator::exchangeMethod(const std::string& sourceMeth, int idistantrank, std::string& targetMeth)
@@ -253,4 +240,399 @@ namespace ParaMEDMEM
     v1Distant->decrRef();
     v2Distant->decrRef();
   }
+  
+  /*!
+   * connected with ElementLocator::sendPolicyToWorkingSideL
+   */
+  void ElementLocator::recvPolicyFromLazySideW(std::vector<int>& policy)
+  {
+    policy.resize(_distant_proc_ids.size());
+    int procId=0;
+    CommInterface comm;
+    MPI_Status status;
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        int toRecv;
+        comm.recv((void *)&toRecv,1,MPI_INT,*iter,1120,*_comm,&status);
+        policy[procId]=toRecv;
+      }
+  }
+
+  /*!
+   * connected with ElementLocator::recvFromWorkingSideL
+   */
+  void ElementLocator::sendSumToLazySideW(const std::vector< std::vector<int> >& distantLocEltIds, const std::vector< std::vector<double> >& partialSumRelToDistantIds)
+  {
+    int procId=0;
+    CommInterface comm;
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        const vector<int>& eltIds=distantLocEltIds[procId];
+        const vector<double>& valued=partialSumRelToDistantIds[procId];
+        int lgth=eltIds.size();
+        comm.send(&lgth,1,MPI_INT,*iter,1114,*_comm);
+        comm.send((void *)&eltIds[0],lgth,MPI_INT,*iter,1115,*_comm);
+        comm.send((void *)&valued[0],lgth,MPI_DOUBLE,*iter,1116,*_comm);
+      }
+  }
+
+  /*!
+   * connected with ElementLocator::sendToWorkingSideL
+   */
+  void ElementLocator::recvSumFromLazySideW(std::vector< std::vector<double> >& globalSumRelToDistantIds)
+  {
+    int procId=0;
+    CommInterface comm;
+    MPI_Status status;
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        std::vector<double>& vec=globalSumRelToDistantIds[procId];
+        comm.recv(&vec[0],vec.size(),MPI_DOUBLE,*iter,1117,*_comm,&status);
+      }
+  }
+
+  /*!
+   * connected with ElementLocator::recvLocalIdsFromWorkingSideL
+   */
+  void ElementLocator::sendLocalIdsToLazyProcsW(const std::vector< std::vector<int> >& distantLocEltIds)
+  {
+    int procId=0;
+    CommInterface comm;
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        const vector<int>& eltIds=distantLocEltIds[procId];
+        int lgth=eltIds.size();
+        comm.send(&lgth,1,MPI_INT,*iter,1121,*_comm);
+        comm.send((void *)&eltIds[0],lgth,MPI_INT,*iter,1122,*_comm);
+      }
+  }
+
+  /*!
+   * connected with ElementLocator::sendGlobalIdsToWorkingSideL
+   */
+  void ElementLocator::recvGlobalIdsFromLazyProcsW(const std::vector< std::vector<int> >& distantLocEltIds, std::vector< std::vector<int> >& globalIds)
+  {
+    int procId=0;
+    CommInterface comm;
+    MPI_Status status;
+    globalIds.resize(_distant_proc_ids.size());
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        const std::vector<int>& vec=distantLocEltIds[procId];
+        std::vector<int>& global=globalIds[procId];
+        global.resize(vec.size());
+        comm.recv(&global[0],vec.size(),MPI_INT,*iter,1123,*_comm,&status);
+      }
+  }
+  
+  /*!
+   * connected with ElementLocator::sendCandidatesGlobalIdsToWorkingSideL
+   */
+  void ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW(std::vector< std::vector<int> >& globalIds)
+  {
+    int procId=0;
+    CommInterface comm;
+    MPI_Status status;
+    globalIds.resize(_distant_proc_ids.size());
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        std::vector<int>& global=globalIds[procId];
+        int lgth;
+        comm.recv(&lgth,1,MPI_INT,*iter,1132,*_comm,&status);
+        global.resize(lgth);
+        comm.recv(&global[0],lgth,MPI_INT,*iter,1133,*_comm,&status);
+      }
+  }
+  
+  /*!
+   * connected with ElementLocator::recvSumFromWorkingSideL
+   */
+  void ElementLocator::sendPartialSumToLazyProcsW(const std::vector<int>& distantGlobIds, const std::vector<double>& sum)
+  {
+    int procId=0;
+    CommInterface comm;
+    int lgth=distantGlobIds.size();
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        comm.send(&lgth,1,MPI_INT,*iter,1124,*_comm);
+        comm.send((void*)&distantGlobIds[0],lgth,MPI_INT,*iter,1125,*_comm);
+        comm.send((void*)&sum[0],lgth,MPI_DOUBLE,*iter,1126,*_comm);
+      }
+  }
+
+  /*!
+   * connected with ElementLocator::recvCandidatesForAddElementsL
+   */
+  void ElementLocator::sendCandidatesForAddElementsW(const std::vector<int>& distantGlobIds)
+  {
+    int procId=0;
+    CommInterface comm;
+    int lgth=distantGlobIds.size();
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        comm.send(&lgth,1,MPI_INT,*iter,1128,*_comm);
+        comm.send((void*)&distantGlobIds[0],lgth,MPI_INT,*iter,1129,*_comm);
+      }
+  }
+  
+  /*!
+   * connected with ElementLocator::sendAddElementsToWorkingSideL
+   */
+  void ElementLocator::recvAddElementsFromLazyProcsW(std::vector<std::vector<int> >& elementsToAdd)
+  {
+    int procId=0;
+    CommInterface comm;
+    MPI_Status status;
+    int lgth=_distant_proc_ids.size();
+    elementsToAdd.resize(lgth);
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        int locLgth;
+        std::vector<int>& eltToFeed=elementsToAdd[procId];
+        comm.recv(&locLgth,1,MPI_INT,*iter,1130,*_comm,&status);
+        eltToFeed.resize(locLgth);
+        comm.recv(&eltToFeed[0],locLgth,MPI_INT,*iter,1131,*_comm,&status);
+      }
+  }
+
+  /*!
+   * connected with ElementLocator::recvPolicyFromLazySideW
+   */
+  int ElementLocator::sendPolicyToWorkingSideL()
+  {
+    CommInterface comm;
+    int toSend;
+    DataArrayInt *isCumulative=_local_para_field.returnCumulativeGlobalNumbering();
+    if(isCumulative)
+      {
+        toSend=CUMULATIVE_POLICY;
+        isCumulative->decrRef();
+      }
+    else
+      toSend=NO_POST_TREATMENT_POLICY;
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++)
+      comm.send(&toSend,1,MPI_INT,*iter,1120,*_comm);
+    return toSend;
+  }
+
+  /*!
+   * connected with ElementLocator::sendSumToLazySideW
+   */
+  void ElementLocator::recvFromWorkingSideL()
+  {
+    _values_added.resize(_local_para_field.getField()->getNumberOfTuples());
+    int procId=0;
+    CommInterface comm;
+    _ids_per_working_proc.resize(_distant_proc_ids.size());
+    MPI_Status status;
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        int lgth;
+        comm.recv(&lgth,1,MPI_INT,*iter,1114,*_comm,&status);
+        vector<int>& ids=_ids_per_working_proc[procId];
+        ids.resize(lgth);
+        vector<double> values(lgth);
+        comm.recv(&ids[0],lgth,MPI_INT,*iter,1115,*_comm,&status);
+        comm.recv(&values[0],lgth,MPI_DOUBLE,*iter,1116,*_comm,&status);
+        for(int i=0;i<lgth;i++)
+          _values_added[ids[i]]+=values[i];
+      }
+  }
+
+  /*!
+   * connected with ElementLocator::recvSumFromLazySideW
+   */
+  void ElementLocator::sendToWorkingSideL()
+  {
+    int procId=0;
+    CommInterface comm;
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        vector<int>& ids=_ids_per_working_proc[procId];
+        vector<double> valsToSend(ids.size());
+        vector<double>::iterator iter3=valsToSend.begin();
+        for(vector<int>::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter3++)
+          *iter3=_values_added[*iter2];
+        comm.send(&valsToSend[0],ids.size(),MPI_DOUBLE,*iter,1117,*_comm);
+        //ids.clear();
+      }
+    //_ids_per_working_proc.clear();
+  }
+
+  /*!
+   * connected with ElementLocator::sendLocalIdsToLazyProcsW
+   */
+  void ElementLocator::recvLocalIdsFromWorkingSideL()
+  {
+    int procId=0;
+    CommInterface comm;
+    _ids_per_working_proc.resize(_distant_proc_ids.size());
+    MPI_Status status;
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        int lgth;
+        vector<int>& ids=_ids_per_working_proc[procId];
+        comm.recv(&lgth,1,MPI_INT,*iter,1121,*_comm,&status);
+        ids.resize(lgth);
+        comm.recv(&ids[0],lgth,MPI_INT,*iter,1122,*_comm,&status);
+      }
+  }
+
+  /*!
+   * connected with ElementLocator::recvGlobalIdsFromLazyProcsW
+   */
+  void ElementLocator::sendGlobalIdsToWorkingSideL()
+  {
+    int procId=0;
+    CommInterface comm;
+    DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering();
+    const int *globalIdsC=globalIds->getConstPointer();
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        const vector<int>& ids=_ids_per_working_proc[procId];
+        vector<int> valsToSend(ids.size());
+        vector<int>::iterator iter1=valsToSend.begin();
+        for(vector<int>::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++)
+          *iter1=globalIdsC[*iter2];
+        comm.send(&valsToSend[0],ids.size(),MPI_INT,*iter,1123,*_comm);
+      }
+    if(globalIds)
+      globalIds->decrRef();
+  }
+
+  /*!
+   * connected with ElementLocator::sendPartialSumToLazyProcsW
+   */
+  void ElementLocator::recvSumFromWorkingSideL()
+  {
+    int procId=0;
+    int wProcSize=_distant_proc_ids.size();
+    CommInterface comm;
+    _ids_per_working_proc.resize(wProcSize);
+    _values_per_working_proc.resize(wProcSize);
+    MPI_Status status;
+    std::map<int,double> sums;
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        int lgth;
+        comm.recv(&lgth,1,MPI_INT,*iter,1124,*_comm,&status);
+        vector<int>& ids=_ids_per_working_proc[procId];
+        vector<double>& vals=_values_per_working_proc[procId];
+        ids.resize(lgth);
+        vals.resize(lgth);
+        comm.recv(&ids[0],lgth,MPI_INT,*iter,1125,*_comm,&status);
+        comm.recv(&vals[0],lgth,MPI_DOUBLE,*iter,1126,*_comm,&status);
+        vector<int>::const_iterator iter1=ids.begin();
+        vector<double>::const_iterator iter2=vals.begin();
+        for(;iter1!=ids.end();iter1++,iter2++)
+          sums[*iter1]+=*iter2;
+      }
+    //assign sum to prepare sending to working side
+    for(procId=0;procId<wProcSize;procId++)
+      {
+        vector<int>& ids=_ids_per_working_proc[procId];
+        vector<double>& vals=_values_per_working_proc[procId];
+        vector<int>::const_iterator iter1=ids.begin();
+        vector<double>::iterator iter2=vals.begin();
+        for(;iter1!=ids.end();iter1++,iter2++)
+          *iter2=sums[*iter1];
+        ids.clear();
+      }
+  }
+
+  /*!
+   * Foreach working procs Wi compute and push it in _ids_per_working_proc3,
+   * if it exist, local id of nodes that are in interaction with an another lazy proc than this
+   * and that exists in this \b but with no interaction with this.
+   * The computation is performed here. sendAddElementsToWorkingSideL is only in charge to send
+   * precomputed _ids_per_working_proc3 attribute.
+   * connected with ElementLocator::sendCandidatesForAddElementsW
+   */
+  void ElementLocator::recvCandidatesForAddElementsL()
+  {
+    int procId=0;
+    int wProcSize=_distant_proc_ids.size();
+    CommInterface comm;
+    _ids_per_working_proc3.resize(wProcSize);
+    MPI_Status status;
+    std::map<int,double> sums;
+    DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering();
+    const int *globalIdsC=globalIds->getConstPointer();
+    int nbElts=globalIds->getNumberOfTuples();
+    std::set<int> globalIdsS(globalIdsC,globalIdsC+nbElts);
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        const std::vector<int>& ids0=_ids_per_working_proc[procId];
+        int lgth0=ids0.size();
+        std::set<int> elts0;
+        for(int i=0;i<lgth0;i++)
+          elts0.insert(globalIdsC[ids0[i]]);
+        int lgth;
+        comm.recv(&lgth,1,MPI_INT,*iter,1128,*_comm,&status);
+        vector<int> ids(lgth);
+        comm.recv(&ids[0],lgth,MPI_INT,*iter,1129,*_comm,&status);
+        set<int> ids1(ids.begin(),ids.end());
+        ids.clear();
+        set<int> tmp5,tmp6;
+        set_intersection(globalIdsS.begin(),globalIdsS.end(),ids1.begin(),ids1.end(),inserter(tmp5,tmp5.begin()));
+        set_difference(tmp5.begin(),tmp5.end(),elts0.begin(),elts0.end(),inserter(tmp6,tmp6.begin()));
+        std::vector<int>& ids2=_ids_per_working_proc3[procId];
+        ids2.resize(tmp6.size());
+        std::copy(tmp6.begin(),tmp6.end(),ids2.begin());
+        //global->local
+        for(std::vector<int>::iterator iter2=ids2.begin();iter2!=ids2.end();iter2++)
+          *iter2=std::find(globalIdsC,globalIdsC+nbElts,*iter2)-globalIdsC;
+      }
+    if(globalIds)
+      globalIds->decrRef();
+  }
+
+  /*!
+   * connected with ElementLocator::recvAddElementsFromLazyProcsW
+   */
+  void ElementLocator::sendAddElementsToWorkingSideL()
+  {
+    int procId=0;
+    CommInterface comm;
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        const std::vector<int>& vals=_ids_per_working_proc3[procId];
+        int size=vals.size();
+        comm.send((void *)&size,1,MPI_INT,*iter,1130,*_comm);
+        comm.send((void *)&vals[0],size,MPI_INT,*iter,1131,*_comm);
+      }
+  }
+
+  /*!
+   * This method sends to working side Wi only nodes in interaction with Wi \b and located on boundary, to reduce number.
+   * connected with ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW
+   */
+  void ElementLocator::sendCandidatesGlobalIdsToWorkingSideL()
+  { 
+    int procId=0;
+    CommInterface comm;
+    DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering();
+    const int *globalIdsC=globalIds->getConstPointer();
+    std::vector<int> candidates;
+    _local_para_field.getSupport()->getCellMesh()->findBoundaryNodes(candidates);
+    for(std::vector<int>::iterator iter1=candidates.begin();iter1!=candidates.end();iter1++)
+      (*iter1)=globalIdsC[*iter1];
+    std::set<int> candidatesS(candidates.begin(),candidates.end());
+    for(vector<int>::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++)
+      {
+        const vector<int>& ids=_ids_per_working_proc[procId];
+        vector<int> valsToSend(ids.size());
+        vector<int>::iterator iter1=valsToSend.begin();
+        for(vector<int>::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++)
+          *iter1=globalIdsC[*iter2];
+        std::set<int> tmp2(valsToSend.begin(),valsToSend.end());
+        std::vector<int> tmp3;
+        set_intersection(candidatesS.begin(),candidatesS.end(),tmp2.begin(),tmp2.end(),std::back_insert_iterator< std::vector<int> >(tmp3));
+        int lgth=tmp3.size();
+        comm.send(&lgth,1,MPI_INT,*iter,1132,*_comm);
+        comm.send(&tmp3[0],lgth,MPI_INT,*iter,1133,*_comm);
+      }
+    if(globalIds)
+      globalIds->decrRef();
+  }
 }
index 1acbf84f60b294b79412162178cc2418f6cea6ac..1dbf51a71eed6d31ed1c457ea79eee9af88c91a4 100644 (file)
@@ -20,6 +20,7 @@
 #define __ELEMENTLOCATOR_HXX__
 
 #include "InterpolationOptions.hxx"
+#include "MEDCouplingNatureOfField.hxx"
 
 #include <mpi.h>
 #include <vector>
@@ -46,6 +47,30 @@ namespace ParaMEDMEM
     void exchangeMethod(const std::string& sourceMeth, int idistantrank, std::string& targetMeth);
     const std::vector<int>& getDistantProcIds() const { return _distant_proc_ids; }
     const MPI_Comm *getCommunicator() const;
+    NatureOfField getLocalNature() const;
+    //Working side methods
+    void recvPolicyFromLazySideW(std::vector<int>& policy);
+    void sendSumToLazySideW(const std::vector< std::vector<int> >& distantLocEltIds, const std::vector< std::vector<double> >& partialSumRelToDistantIds);
+    void recvSumFromLazySideW(std::vector< std::vector<double> >& globalSumRelToDistantIds);
+    void sendCandidatesForAddElementsW(const std::vector<int>& distantGlobIds);
+    void recvAddElementsFromLazyProcsW(std::vector<std::vector<int> >& elementsToAdd);
+    //
+    void sendLocalIdsToLazyProcsW(const std::vector< std::vector<int> >& distantLocEltIds);
+    void recvGlobalIdsFromLazyProcsW(const std::vector< std::vector<int> >& distantLocEltIds, std::vector< std::vector<int> >& globalIds);
+    void recvCandidatesGlobalIdsFromLazyProcsW(std::vector< std::vector<int> >& globalIds);
+    void sendPartialSumToLazyProcsW(const std::vector<int>& distantGlobIds, const std::vector<double>& sum);
+    //Lazy side methods
+    int sendPolicyToWorkingSideL();
+    void recvFromWorkingSideL();
+    void sendToWorkingSideL();
+    //
+    void recvLocalIdsFromWorkingSideL();
+    void sendGlobalIdsToWorkingSideL();
+    void sendCandidatesGlobalIdsToWorkingSideL();
+    //
+    void recvSumFromWorkingSideL();
+    void recvCandidatesForAddElementsL();
+    void sendAddElementsToWorkingSideL();
   private:
     void _computeBoundingBoxes();
     bool _intersectsBoundingBox(int irank);
@@ -63,6 +88,15 @@ namespace ParaMEDMEM
     const ProcessorGroup& _local_group;
     ProcessorGroup* _union_group;
     std::vector<int> _distant_proc_ids;
+    const MPI_Comm *_comm;
+    //Attributes only used by lazy side
+    std::vector<double> _values_added;
+    std::vector< std::vector<int> > _ids_per_working_proc;
+    std::vector< std::vector<int> > _ids_per_working_proc3;
+    std::vector< std::vector<double> > _values_per_working_proc;
+  public:
+    static const int CUMULATIVE_POLICY=3;
+    static const int NO_POST_TREATMENT_POLICY=7;
   };
 
 }
diff --git a/src/ParaMEDMEM/GlobalizerMesh.cxx b/src/ParaMEDMEM/GlobalizerMesh.cxx
deleted file mode 100644 (file)
index 9092158..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-//  Copyright (C) 2007-2008  CEA/DEN, EDF R&D
-//
-//  This library is free software; you can redistribute it and/or
-//  modify it under the terms of the GNU Lesser General Public
-//  License as published by the Free Software Foundation; either
-//  version 2.1 of the License.
-//
-//  This library is distributed in the hope that it will be useful,
-//  but WITHOUT ANY WARRANTY; without even the implied warranty of
-//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-//  Lesser General Public License for more details.
-//
-//  You should have received a copy of the GNU Lesser General Public
-//  License along with this library; if not, write to the Free Software
-//  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
-//
-//  See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include "GlobalizerMesh.hxx"
-#include "MEDCouplingFieldDouble.hxx"
-#include "CommInterface.hxx"
-
-using namespace std;
-
-namespace ParaMEDMEM
-{
-  GlobalizerMesh::GlobalizerMesh(const MPI_Comm *comm, MEDCouplingFieldDouble *localField):_comm(comm),_local_field(localField)
-  {
-    if(_local_field)
-      _local_field->incrRef();
-  }
-  
-  GlobalizerMesh::~GlobalizerMesh()
-  {
-    if(_local_field)
-      _local_field->decrRef();
-  }
-
-  NatureOfField GlobalizerMesh::getLocalNature() const
-  {
-    return _local_field->getNature();
-  }
-
-  GlobalizerMeshWorkingSide::GlobalizerMeshWorkingSide(const MPI_Comm *comm, MEDCouplingFieldDouble *localField,
-                                                       const std::string& distantMeth, const std::vector<int>& lazyProcs):GlobalizerMesh(comm,localField),_distant_method(distantMeth),_lazy_procs(lazyProcs)
-  {
-  }
-
-  GlobalizerMeshWorkingSide::~GlobalizerMeshWorkingSide()
-  {
-  }
-
-  const std::vector<int>& GlobalizerMeshWorkingSide::getProcIdsInInteraction() const
-  {
-    return _lazy_procs;
-  }
-
-  /*!
-   * connected with GlobalizerMeshLazySide::recvFromWorkingSide
-   */
-  void GlobalizerMeshWorkingSide::sendSumToLazySide(const std::vector< std::vector<int> >& distantLocEltIds, const std::vector< std::vector<double> >& partialSumRelToDistantIds)
-  {
-    int procId=0;
-    CommInterface comm;
-    for(vector<int>::const_iterator iter=_lazy_procs.begin();iter!=_lazy_procs.end();iter++,procId++)
-      {
-        const vector<int>& eltIds=distantLocEltIds[procId];
-        const vector<double>& valued=partialSumRelToDistantIds[procId];
-        int lgth=eltIds.size();
-        comm.send(&lgth,1,MPI_INT,*iter,1114,*_comm);
-        comm.send((void *)&eltIds[0],lgth,MPI_INT,*iter,1115,*_comm);
-        comm.send((void *)&valued[0],lgth,MPI_DOUBLE,*iter,1116,*_comm);
-      }
-  }
-
-  /*!
-   * connected with GlobalizerMeshLazySide::sendToWorkingSide
-   */
-  void GlobalizerMeshWorkingSide::recvSumFromLazySide(std::vector< std::vector<double> >& globalSumRelToDistantIds)
-  {
-    int procId=0;
-    CommInterface comm;
-    MPI_Status status;
-    for(vector<int>::const_iterator iter=_lazy_procs.begin();iter!=_lazy_procs.end();iter++,procId++)
-      {
-        std::vector<double>& vec=globalSumRelToDistantIds[procId];
-        comm.recv(&vec[0],vec.size(),MPI_DOUBLE,*iter,1117,*_comm,&status);
-      }
-  }
-
-  GlobalizerMeshLazySide::GlobalizerMeshLazySide(const MPI_Comm *comm, MEDCouplingFieldDouble *localField, const std::vector<int>& computeProcs):GlobalizerMesh(comm,localField),_compute_procs(computeProcs)
-  {
-  }
-
-  GlobalizerMeshLazySide::~GlobalizerMeshLazySide()
-  {
-  }
-
-  /*!
-   * connected with GlobalizerMeshWorkingSide::sendSumToLazySide
-   */
-  void GlobalizerMeshLazySide::recvFromWorkingSide()
-  {
-    _values_added.resize(_local_field->getNumberOfTuples());
-    int procId=0;
-    CommInterface comm;
-    _ids_per_working_proc.resize(_compute_procs.size());
-    MPI_Status status;
-    for(vector<int>::const_iterator iter=_compute_procs.begin();iter!=_compute_procs.end();iter++,procId++)
-      {
-        int lgth;
-        comm.recv(&lgth,1,MPI_INT,*iter,1114,*_comm,&status);
-        vector<int>& ids=_ids_per_working_proc[procId];
-        ids.resize(lgth);
-        vector<double> values(lgth);
-        comm.recv(&ids[0],lgth,MPI_INT,*iter,1115,*_comm,&status);
-        comm.recv(&values[0],lgth,MPI_DOUBLE,*iter,1116,*_comm,&status);
-        for(int i=0;i<lgth;i++)
-          _values_added[ids[i]]+=values[i];
-      }
-  }
-
-  /*!
-   * connected with GlobalizerMeshWorkingSide::recvSumFromLazySide
-   */
-  void GlobalizerMeshLazySide::sendToWorkingSide()
-  {
-    int procId=0;
-    CommInterface comm;
-    for(vector<int>::const_iterator iter=_compute_procs.begin();iter!=_compute_procs.end();iter++,procId++)
-      {
-        vector<int>& ids=_ids_per_working_proc[procId];
-        vector<double> valsToSend(ids.size());
-        vector<double>::iterator iter3=valsToSend.begin();
-        for(vector<int>::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter3++)
-          *iter3=_values_added[*iter2];
-        comm.send(&valsToSend[0],ids.size(),MPI_DOUBLE,*iter,1117,*_comm);
-        ids.clear();
-      }
-    _ids_per_working_proc.clear();
-  }
-}
-
diff --git a/src/ParaMEDMEM/GlobalizerMesh.hxx b/src/ParaMEDMEM/GlobalizerMesh.hxx
deleted file mode 100644 (file)
index 3436545..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-//  Copyright (C) 2007-2008  CEA/DEN, EDF R&D
-//
-//  This library is free software; you can redistribute it and/or
-//  modify it under the terms of the GNU Lesser General Public
-//  License as published by the Free Software Foundation; either
-//  version 2.1 of the License.
-//
-//  This library is distributed in the hope that it will be useful,
-//  but WITHOUT ANY WARRANTY; without even the implied warranty of
-//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-//  Lesser General Public License for more details.
-//
-//  You should have received a copy of the GNU Lesser General Public
-//  License along with this library; if not, write to the Free Software
-//  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
-//
-//  See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#ifndef __GLOBALIZERMESH_HXX__
-#define __GLOBALIZERMESH_HXX__
-
-#include "MEDCouplingNatureOfField.hxx"
-
-#include <mpi.h>
-#include <string>
-#include <vector>
-
-namespace ParaMEDMEM
-{
-  class MEDCouplingFieldDouble;
-
-  class GlobalizerMesh
-  {
-  protected:
-    GlobalizerMesh(const MPI_Comm *comm, MEDCouplingFieldDouble *localField);
-  public:
-    NatureOfField getLocalNature() const;
-    virtual ~GlobalizerMesh();
-  protected:
-    const MPI_Comm *_comm;
-    MEDCouplingFieldDouble *_local_field;
-  };
-
-  class GlobalizerMeshWorkingSide : public GlobalizerMesh
-  {
-  public:
-    GlobalizerMeshWorkingSide(const MPI_Comm *comm, MEDCouplingFieldDouble *localField, const std::string& distantMeth, const std::vector<int>& lazyProcs);
-    ~GlobalizerMeshWorkingSide();
-    //
-    const std::vector<int>& getProcIdsInInteraction() const;
-    void sendSumToLazySide(const std::vector< std::vector<int> >& distantLocEltIds, const std::vector< std::vector<double> >& partialSumRelToDistantIds);
-    void recvSumFromLazySide(std::vector< std::vector<double> >& globalSumRelToDistantIds);
-  private:
-    std::string _distant_method;
-    std::vector<int> _lazy_procs;
-  };
-
-  class GlobalizerMeshLazySide : public GlobalizerMesh
-  {
-  public:
-    GlobalizerMeshLazySide(const MPI_Comm *comm, MEDCouplingFieldDouble *localField, const std::vector<int>& computeProcs);
-    ~GlobalizerMeshLazySide();
-    void recvFromWorkingSide();
-    void sendToWorkingSide();
-  private:
-    std::vector<int> _compute_procs;
-    std::vector<double> _values_added;
-    std::vector< std::vector<int> > _ids_per_working_proc;
-  };
-}
-
-#endif
index d75d5428ce502c691c0894714793e673fa5d5438..6edb3d40e1bc35706676133103803481342cf059 100644 (file)
@@ -30,7 +30,9 @@
 #include "MEDCouplingNormalizedUnstructuredMesh.txx"
 #include "InterpolationOptions.hxx"
 #include "NormalizedUnstructuredMesh.hxx"
-#include "GlobalizerMesh.hxx"
+#include "ElementLocator.hxx"
+
+#include <algorithm>
 
 // class InterpolationMatrix
 // This class enables the storage of an interpolation matrix Wij mapping 
@@ -95,7 +97,7 @@ namespace ParaMEDMEM
 
   void InterpolationMatrix::addContribution ( MEDCouplingPointSet& distant_support,
                                               int iproc_distant,
-                                              int* distant_elems,
+                                              const int* distant_elems,
                                               const std::string& srcMeth,
                                               const std::string& targetMeth)
   {
@@ -148,112 +150,135 @@ namespace ParaMEDMEM
       {
         throw INTERP_KERNEL::Exception("no interpolator exists for these mesh and space dimensions ");
       }
-  
-    int source_size=surfaces.size();
     bool needTargetSurf=isSurfaceComputationNeeded(targetMeth);
 
-    MEDCouplingFieldDouble *target_triangle_surf;
+    MEDCouplingFieldDouble *target_triangle_surf=0;
     if(needTargetSurf)
       target_triangle_surf = distant_support.getMeasureField();
+    fillDSFromVM(iproc_distant,distant_elems,surfaces,target_triangle_surf);
 
+    if(needTargetSurf)
+      target_triangle_surf->decrRef();
+  }
+
+  void InterpolationMatrix::fillDSFromVM(int iproc_distant, const int* distant_elems, const std::vector< std::map<int,double> >& values, MEDCouplingFieldDouble *surf)
+  {
     //loop over the elements to build the interpolation
     //matrix structures
+    int source_size=values.size();
     for (int ielem=0; ielem < source_size; ielem++) 
       {
-        _row_offsets[ielem+1] += surfaces[ielem].size();
-        //_source_indices.push_back(make_pair(iproc_distant, ielem));
-
-        for (map<int,double>::const_iterator iter = surfaces[ielem].begin();
-             iter != surfaces[ielem].end();
-             iter++)
+        _row_offsets[ielem+1] += values[ielem].size();
+        for(map<int,double>::const_iterator iter=values[ielem].begin();iter!=values[ielem].end();iter++)
           {
-            //surface of the target triangle
-            double surf;
-            if(needTargetSurf)
-              surf = target_triangle_surf->getIJ(iter->first,0);
-
+            int localId;
+            if(distant_elems)
+              localId=distant_elems[iter->first];
+            else
+              localId=iter->first;
             //locating the (iproc, itriangle) pair in the list of columns
-            map<pair<int,int>,int >::iterator iter2 = _col_offsets.find(make_pair(iproc_distant,iter->first));
+            map<pair<int,int>,int >::iterator iter2 = _col_offsets.find(make_pair(iproc_distant,localId));
             int col_id;
 
             if (iter2 == _col_offsets.end())
               {
                 //(iproc, itriangle) is not registered in the list
                 //of distant elements
-
                 col_id =_col_offsets.size();
-                _col_offsets.insert(make_pair(make_pair(iproc_distant,iter->first),col_id));
-                _mapping.addElementFromSource(iproc_distant,
-                                              distant_elems[iter->first]);
-                //target_volume.push_back(surf);
+                _col_offsets.insert(make_pair(make_pair(iproc_distant,localId),col_id));
+                _mapping.addElementFromSource(iproc_distant,localId);
               }
             else 
               {
                 col_id = iter2->second;
               }
-
             //the non zero coefficient is stored 
             //ielem is the row,
             //col_id is the number of the column
             //iter->second is the value of the coefficient
-            if(needTargetSurf)
-              _target_volume[ielem].push_back(surf);
+            if(surf)
+              {
+                double surface = surf->getIJ(iter->first,0);
+                _target_volume[ielem].push_back(surface);
+              }
             _coeffs[ielem].push_back(make_pair(col_id,iter->second));
           }
       }
-    if(needTargetSurf)
-      target_triangle_surf->decrRef();
   }
 
-  void InterpolationMatrix::finishContributionW(GlobalizerMeshWorkingSide& globalizer)
+  void InterpolationMatrix::serializeMe(std::vector< std::vector< std::map<int,double> > >& data1, std::vector<int>& data2) const
   {
-    NatureOfField nature=globalizer.getLocalNature();
+    data1.clear();
+    data2.clear();
+    const std::vector<std::pair<int,int> >& sendingIds=_mapping.getSendingIds();
+    std::set<int> procsS;
+    for(std::vector<std::pair<int,int> >::const_iterator iter1=sendingIds.begin();iter1!=sendingIds.end();iter1++)
+      procsS.insert((*iter1).first);
+    data1.resize(procsS.size());
+    data2.resize(procsS.size());
+    std::copy(procsS.begin(),procsS.end(),data2.begin());
+    std::map<int,int> fastProcAcc;
+    int id=0;
+    for(std::set<int>::const_iterator iter2=procsS.begin();iter2!=procsS.end();iter2++,id++)
+      fastProcAcc[*iter2]=id;
+    int nbOfSrcElt=_coeffs.size();
+    for(std::vector< std::vector< std::map<int,double> > >::iterator iter3=data1.begin();iter3!=data1.end();iter3++)
+      (*iter3).resize(nbOfSrcElt);
+    id=0;
+    for(std::vector< std::vector< std::pair<int,double> > >::const_iterator iter4=_coeffs.begin();iter4!=_coeffs.end();iter4++,id++)
+      {
+        for(std::vector< std::pair<int,double> >::const_iterator iter5=(*iter4).begin();iter5!=(*iter4).end();iter5++)
+          {
+            const std::pair<int,int>& elt=sendingIds[(*iter5).first];
+            data1[fastProcAcc[elt.first]][id][elt.second]=(*iter5).second;
+          }
+      }
+  }
+
+  void InterpolationMatrix::initialize()
+  {
+    int lgth=_coeffs.size();
+    _row_offsets.clear(); _row_offsets.resize(lgth+1);
+    _coeffs.clear(); _coeffs.resize(lgth);
+    _target_volume.clear(); _target_volume.resize(lgth);
+    _col_offsets.clear();
+    _mapping.initialize();
+  }
+
+  void InterpolationMatrix::finishContributionW(ElementLocator& elementLocator)
+  {
+    NatureOfField nature=elementLocator.getLocalNature();
     switch(nature)
       {
       case ConservativeVolumic:
-        computeConservVolDenoW(globalizer);
+        computeConservVolDenoW(elementLocator);
         break;
       case Integral:
-        computeIntegralDenoW(globalizer);
+        computeIntegralDenoW(elementLocator);
         break;
       case IntegralGlobConstraint:
-        computeGlobConstraintDenoW(globalizer);
+        computeGlobConstraintDenoW(elementLocator);
         break;
       default:
         throw INTERP_KERNEL::Exception("Not recognized nature of field. Change nature of Field.");
         break;
       }
-    /*for(map<pair<int,int>,int>::iterator iter=_col_offsets.begin();iter!=_col_offsets.end();iter++)
-          if((*iter).second==9)
-            cout << (*iter).first.first << " ** " << (*iter).first.second << endl;
-    cout << "--> " << _col_offsets[make_pair(4,74)] << endl;
-    for(vector<vector<pair<int,double> > >::iterator iter3=_coeffs.begin();iter3!=_coeffs.end();iter3++)
-      for(vector<pair<int,double> >::iterator iter4=(*iter3).begin();iter4!=(*iter3).end();iter4++)
-        if((*iter4).first==569)
-          cout << " __ " << iter3-_coeffs.begin() << "___" << (*iter4).second << endl; 
-    ostringstream st; st << "deno_" << _deno_multiply[0][0];
-    ofstream fs(st.str().c_str());
-    for(vector<vector<double> >::iterator iter1=_deno_multiply.begin();iter1!=_deno_multiply.end();iter1++)
-      {
-        for(vector<double>::iterator iter2=(*iter1).begin();iter2!=(*iter1).end();iter2++)
-          fs << *iter2 << " ";
-        fs << endl;
-        }*/
   }
 
-  void InterpolationMatrix::finishContributionL(GlobalizerMeshLazySide& globalizer)
+  void InterpolationMatrix::finishContributionL(ElementLocator& elementLocator)
   {
-    NatureOfField nature=globalizer.getLocalNature();
+    NatureOfField nature=elementLocator.getLocalNature();
     switch(nature)
       {
       case ConservativeVolumic:
-        computeConservVolDenoL(globalizer);
+        computeConservVolDenoL(elementLocator);
         break;
       case Integral:
-        computeIntegralDenoL(globalizer);
+        computeIntegralDenoL(elementLocator);
         break;
       case IntegralGlobConstraint:
-        computeGlobConstraintDenoL(globalizer);
+        //this is not a bug doing like ConservativeVolumic
+        computeConservVolDenoL(elementLocator);
         break;
       default:
         throw INTERP_KERNEL::Exception("Not recognized nature of field. Change nature of Field.");
@@ -261,19 +286,39 @@ namespace ParaMEDMEM
       }
   }
   
-  void InterpolationMatrix::computeConservVolDenoW(GlobalizerMeshWorkingSide& globalizer)
+  void InterpolationMatrix::computeConservVolDenoW(ElementLocator& elementLocator)
   {
-    computeGlobalRowSum(globalizer,_deno_multiply);
     computeGlobalColSum(_deno_reverse_multiply);
+    computeGlobalRowSum(elementLocator,_deno_multiply,_deno_reverse_multiply);
   }
   
-  void InterpolationMatrix::computeConservVolDenoL(GlobalizerMeshLazySide& globalizer)
+  void InterpolationMatrix::computeConservVolDenoL(ElementLocator& elementLocator)
   {
-    globalizer.recvFromWorkingSide();
-    globalizer.sendToWorkingSide();
+    int pol1=elementLocator.sendPolicyToWorkingSideL();
+    if(pol1==ElementLocator::NO_POST_TREATMENT_POLICY)
+      {
+        elementLocator.recvFromWorkingSideL();
+        elementLocator.sendToWorkingSideL();
+      }
+    else if(ElementLocator::CUMULATIVE_POLICY)
+      {
+        //ask for lazy side to deduce ids eventually missing on working side and to send it back.
+        elementLocator.recvLocalIdsFromWorkingSideL();
+        elementLocator.sendCandidatesGlobalIdsToWorkingSideL();
+        elementLocator.recvCandidatesForAddElementsL();
+        elementLocator.sendAddElementsToWorkingSideL();
+        //Working side has updated its eventually missing ids updates its global ids with lazy side procs contribution
+        elementLocator.recvLocalIdsFromWorkingSideL();
+        elementLocator.sendGlobalIdsToWorkingSideL();
+        //like no post treatment
+        elementLocator.recvFromWorkingSideL();
+        elementLocator.sendToWorkingSideL();
+      }
+    else
+      throw INTERP_KERNEL::Exception("Not managed policy detected on lazy side : not implemented !");
   }
 
-  void InterpolationMatrix::computeIntegralDenoW(GlobalizerMeshWorkingSide& globalizer)
+  void InterpolationMatrix::computeIntegralDenoW(ElementLocator& elementLocator)
   {
     MEDCouplingFieldDouble *source_triangle_surf = _source_support->getMeasureField();
     _deno_multiply.resize(_coeffs.size());
@@ -291,32 +336,58 @@ namespace ParaMEDMEM
   /*!
    * Nothing to do because surface computation is on working side.
    */
-  void InterpolationMatrix::computeIntegralDenoL(GlobalizerMeshLazySide& globalizer)
+  void InterpolationMatrix::computeIntegralDenoL(ElementLocator& elementLocator)
   {
   }
 
-  void InterpolationMatrix::computeGlobConstraintDenoW(GlobalizerMeshWorkingSide& globalizer)
+  void InterpolationMatrix::computeGlobConstraintDenoW(ElementLocator& elementLocator)
   {
     computeGlobalColSum(_deno_multiply);
-    computeGlobalRowSum(globalizer,_deno_reverse_multiply);
-  }
-
-  void InterpolationMatrix::computeGlobConstraintDenoL(GlobalizerMeshLazySide& globalizer)
-  {
-    globalizer.recvFromWorkingSide();
-    globalizer.sendToWorkingSide();
+    computeGlobalRowSum(elementLocator,_deno_reverse_multiply,_deno_multiply);
   }
 
-  void InterpolationMatrix::computeGlobalRowSum(GlobalizerMeshWorkingSide& globalizer, std::vector<std::vector<double> >& denoStrorage)
+  void InterpolationMatrix::computeGlobalRowSum(ElementLocator& elementLocator, std::vector<std::vector<double> >& denoStrorage, std::vector<std::vector<double> >& denoStrorageInv)
   {
     //stores id in distant procs sorted by lazy procs connected with
     vector< vector<int> > rowsPartialSumI;
+    //stores for each lazy procs connected with, if global info is available and if it's the case the policy
+    vector<int> policyPartial;
     //stores the corresponding values.
     vector< vector<double> > rowsPartialSumD;
-    computeLocalRowSum(globalizer.getProcIdsInInteraction(),rowsPartialSumI,rowsPartialSumD);
-    globalizer.sendSumToLazySide(rowsPartialSumI,rowsPartialSumD);
-    globalizer.recvSumFromLazySide(rowsPartialSumD);
-    divideByGlobalRowSum(globalizer.getProcIdsInInteraction(),rowsPartialSumI,rowsPartialSumD,denoStrorage);
+    elementLocator.recvPolicyFromLazySideW(policyPartial);
+    int pol1=mergePolicies(policyPartial);
+    if(pol1==ElementLocator::NO_POST_TREATMENT_POLICY)
+      {
+        computeLocalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD);
+        elementLocator.sendSumToLazySideW(rowsPartialSumI,rowsPartialSumD);
+        elementLocator.recvSumFromLazySideW(rowsPartialSumD);
+      }
+    else if(pol1==ElementLocator::CUMULATIVE_POLICY)
+      {
+        //updateWithNewAdditionnalElements(addingElements);
+        //stores for each lazy procs connected with, the ids in global mode if it exists (regarding policyPartial). This array has exactly the size of  rowsPartialSumI,
+        //if policyPartial has CUMALATIVE_POLICY in each.
+        vector< vector<int> > globalIdsPartial;
+        computeLocalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD);
+        elementLocator.sendLocalIdsToLazyProcsW(rowsPartialSumI);
+        elementLocator.recvCandidatesGlobalIdsFromLazyProcsW(globalIdsPartial);
+        std::vector< std::vector<int> > addingElements;
+        findAdditionnalElements(elementLocator,addingElements,rowsPartialSumI,globalIdsPartial);
+        addGhostElements(elementLocator.getDistantProcIds(),addingElements);
+        rowsPartialSumI.clear();
+        globalIdsPartial.clear();
+        computeLocalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD);
+        elementLocator.sendLocalIdsToLazyProcsW(rowsPartialSumI);
+        elementLocator.recvGlobalIdsFromLazyProcsW(rowsPartialSumI,globalIdsPartial);
+        //
+        elementLocator.sendSumToLazySideW(rowsPartialSumI,rowsPartialSumD);
+        elementLocator.recvSumFromLazySideW(rowsPartialSumD);
+        mergeRowSum3(globalIdsPartial,rowsPartialSumD);
+        mergeCoeffs(elementLocator.getDistantProcIds(),rowsPartialSumI,globalIdsPartial,denoStrorageInv);
+      }
+    else
+      throw INTERP_KERNEL::Exception("Not managed policy detected : not implemented !");
+    divideByGlobalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD,denoStrorage);
   }
 
   /*!
@@ -345,15 +416,225 @@ namespace ParaMEDMEM
         resPerProcI[id].push_back((*iter2).second);
         resPerProcD[id].push_back(res[iter2-mapping.begin()]);
       }
-    /*
-    for(map<pair<int,int>, int >::const_iterator iter2=_col_offsets.begin();iter2!=_col_offsets.end();iter2++)
+  }
+
+  /*!
+   * This method is only usable when CUMULATIVE_POLICY detected. This method finds elements ids (typically nodes) lazy side that
+   * are not present in columns of 'this' and that should regarding cumulative merge of elements regarding their global ids.
+   */
+  void InterpolationMatrix::findAdditionnalElements(ElementLocator& elementLocator, std::vector<std::vector<int> >& elementsToAdd,
+                                                    const std::vector<std::vector<int> >& resPerProcI, const std::vector<std::vector<int> >& globalIdsPartial)
+  {
+    std::set<int> globalIds;
+    int nbLazyProcs=globalIdsPartial.size();
+    for(int i=0;i<nbLazyProcs;i++)
+      globalIds.insert(globalIdsPartial[i].begin(),globalIdsPartial[i].end());
+    std::vector<int> tmp(globalIds.size());
+    std::copy(globalIds.begin(),globalIds.end(),tmp.begin());
+    globalIds.clear();
+    elementLocator.sendCandidatesForAddElementsW(tmp);
+    elementLocator.recvAddElementsFromLazyProcsW(elementsToAdd);
+  }
+
+  void InterpolationMatrix::addGhostElements(const std::vector<int>& distantProcs, const std::vector<std::vector<int> >& elementsToAdd)
+  {
+    std::vector< std::vector< std::map<int,double> > > data1;
+    std::vector<int> data2;
+    serializeMe(data1,data2);
+    initialize();
+    int nbOfDistProcs=distantProcs.size();
+    for(int i=0;i<nbOfDistProcs;i++)
       {
-        std::pair<set<int>::iterator,bool> isIns=procsSet.insert((*iter2).first.first);
-        if(isIns.second)
-          id=std::find(distantProcs.begin(),distantProcs.end(),(*iter2).first.first)-distantProcs.begin();
-        resPerProcI[id].push_back((*iter2).first.second);
-        resPerProcD[id].push_back(res[(*iter2).second]);
-        }*/
+        int procId=distantProcs[i];
+        const std::vector<int>& eltsForThisProc=elementsToAdd[i];
+        if(!eltsForThisProc.empty())
+          {
+            std::vector<int>::iterator iter1=std::find(data2.begin(),data2.end(),procId);
+            std::map<int,double> *toFeed=0;
+            if(iter1!=data2.end())
+              {//to test
+                int rank=iter1-data2.begin();
+                toFeed=&(data1[rank].back());
+              }
+            else
+              {
+                iter1=std::lower_bound(data2.begin(),data2.end(),procId);
+                int rank=iter1-data2.begin();
+                data2.insert(iter1,procId);
+                std::vector< std::map<int,double> > tmp(data1.front().size());
+                data1.insert(data1.begin()+rank,tmp);
+                toFeed=&(data1[rank].back());
+              }
+            for(std::vector<int>::const_iterator iter2=eltsForThisProc.begin();iter2!=eltsForThisProc.end();iter2++)
+              (*toFeed)[*iter2]=0.;
+          }
+      }
+    //
+    nbOfDistProcs=data2.size();
+    for(int j=0;j<nbOfDistProcs;j++)
+      fillDSFromVM(data2[j],0,data1[j],0);
+  }
+
+  int InterpolationMatrix::mergePolicies(const std::vector<int>& policyPartial)
+  {
+    if(policyPartial.empty())
+      return ElementLocator::NO_POST_TREATMENT_POLICY;
+    int ref=policyPartial[0];
+     std::vector<int>::const_iterator iter1=std::find_if(policyPartial.begin(),policyPartial.end(),std::bind2nd(std::not_equal_to<int>(),ref));
+    if(iter1!=policyPartial.end())
+      {
+        std::ostringstream msg; msg << "Incompatible policies between lazy procs each other : proc # " << iter1-policyPartial.begin();
+        throw INTERP_KERNEL::Exception(msg.str().c_str());
+      }
+    return ref;
+  }
+
+  /*!
+   * This method introduce global ids aspects in computed 'rowsPartialSumD'.
+   * As precondition rowsPartialSumD.size()==policyPartial.size()==globalIdsPartial.size(). Foreach i in [0;rowsPartialSumD.size() ) rowsPartialSumD[i].size()==globalIdsPartial[i].size()
+   * @param rowsPartialSumD : in parameter, Partial row sum computed for each lazy procs connected with.
+   * @param rowsPartialSumI : in parameter, Corresponding local ids for each lazy procs connected with.
+   * @param globalIdsPartial : in parameter, the global numbering, of elements connected with.
+   * @param globalIdsLazySideInteraction : out parameter, constituted from all global ids of lazy procs connected with.
+   * @para sumCorresponding : out parameter, relative to 'globalIdsLazySideInteraction'
+   */
+  void InterpolationMatrix::mergeRowSum(const std::vector< std::vector<double> >& rowsPartialSumD, const std::vector< std::vector<int> >& globalIdsPartial,
+                                        std::vector<int>& globalIdsLazySideInteraction, std::vector<double>& sumCorresponding)
+  {
+    std::map<int,double> sumToReturn;
+    int nbLazyProcs=rowsPartialSumD.size();
+    for(int i=0;i<nbLazyProcs;i++)
+      {
+        const std::vector<double>& rowSumOfP=rowsPartialSumD[i];
+        const std::vector<int>& globalIdsOfP=globalIdsPartial[i];
+        std::vector<double>::const_iterator iter1=rowSumOfP.begin();
+        std::vector<int>::const_iterator iter2=globalIdsOfP.begin();
+        for(;iter1!=rowSumOfP.end();iter1++,iter2++)
+          sumToReturn[*iter2]+=*iter1;
+      }
+    //
+    int lgth=sumToReturn.size();
+    globalIdsLazySideInteraction.resize(lgth);
+    sumCorresponding.resize(lgth);
+    std::vector<int>::iterator iter3=globalIdsLazySideInteraction.begin();
+    std::vector<double>::iterator iter4=sumCorresponding.begin();
+    for(std::map<int,double>::const_iterator iter5=sumToReturn.begin();iter5!=sumToReturn.end();iter5++,iter3++,iter4++)
+      {
+        *iter3=(*iter5).first;
+        *iter4=(*iter5).second;
+      }
+  }
+
+  /*!
+   * This method simply reorganize the result contained in 'sumCorresponding' computed by lazy side into 'rowsPartialSumD' with help of 'globalIdsPartial' and 'globalIdsLazySideInteraction'
+   *
+   * @param globalIdsPartial : in parameter, global ids sorted by lazy procs
+   * @param rowsPartialSumD : in/out parameter, with exactly the same size as 'globalIdsPartial'
+   * @param globalIdsLazySideInteraction : in parameter that represents ALL the global ids of every lazy procs in interaction
+   * @param sumCorresponding : in parameter with same size as 'globalIdsLazySideInteraction' that stores the corresponding sum of 'globalIdsLazySideInteraction'
+   */
+  void InterpolationMatrix::mergeRowSum2(const std::vector< std::vector<int> >& globalIdsPartial, std::vector< std::vector<double> >& rowsPartialSumD,
+                                         const std::vector<int>& globalIdsLazySideInteraction, const std::vector<double>& sumCorresponding)
+  {
+    std::map<int,double> acc;
+    std::vector<int>::const_iterator iter1=globalIdsLazySideInteraction.begin();
+    std::vector<double>::const_iterator iter2=sumCorresponding.begin();
+    for(;iter1!=globalIdsLazySideInteraction.end();iter1++,iter2++)
+      acc[*iter1]=*iter2;
+    //
+    int nbLazyProcs=globalIdsPartial.size();
+    for(int i=0;i<nbLazyProcs;i++)
+      {
+        const std::vector<int>& tmp1=globalIdsPartial[i];
+        std::vector<double>& tmp2=rowsPartialSumD[i];
+        std::vector<int>::const_iterator iter3=tmp1.begin();
+        std::vector<double>::iterator iter4=tmp2.begin();
+        for(;iter3!=tmp1.end();iter3++,iter4++)
+          *iter4=acc[*iter3];
+      }
+  }
+  
+  void InterpolationMatrix::mergeRowSum3(const std::vector< std::vector<int> >& globalIdsPartial, std::vector< std::vector<double> >& rowsPartialSumD)
+  {
+    std::map<int,double> sum;
+    std::vector< std::vector<int> >::const_iterator iter1=globalIdsPartial.begin();
+    std::vector< std::vector<double> >::iterator iter2=rowsPartialSumD.begin();
+    for(;iter1!=globalIdsPartial.end();iter1++,iter2++)
+      {
+        std::vector<int>::const_iterator iter3=(*iter1).begin();
+        std::vector<double>::const_iterator iter4=(*iter2).begin();
+        for(;iter3!=(*iter1).end();iter3++,iter4++)
+          sum[*iter3]+=*iter4;
+      }
+    iter2=rowsPartialSumD.begin();
+    for(iter1=globalIdsPartial.begin();iter1!=globalIdsPartial.end();iter1++,iter2++)
+      {
+        std::vector<int>::const_iterator iter3=(*iter1).begin();
+        std::vector<double>::iterator iter4=(*iter2).begin();
+        for(;iter3!=(*iter1).end();iter3++,iter4++)
+          *iter4=sum[*iter3];
+      }
+  }
+
+  /*!
+   * This method updates this->_coeffs attribute in order to take into account hidden (because having the same global number) similar nodes in _coeffs array.
+   * If in this->_coeffs two distant element id have the same global id their values will be replaced for each by the sum of the two.
+   * @param procsInInteraction input parameter : specifies the procId in absolute of distant lazy procs in interaction with
+   * @param rowsPartialSumI input parameter : local ids of distant lazy procs elements in interaction with
+   * @param globalIdsPartial input parameter : global ids of distant lazy procs elements in interaction with
+   */
+  void InterpolationMatrix::mergeCoeffs(const std::vector<int>& procsInInteraction, const std::vector< std::vector<int> >& rowsPartialSumI,
+                                        const std::vector<std::vector<int> >& globalIdsPartial, std::vector<std::vector<double> >& denoStrorageInv)
+  {
+    //preparing fast access structures
+    std::map<int,int> procT;
+    int localProcId=0;
+    for(std::vector<int>::const_iterator iter1=procsInInteraction.begin();iter1!=procsInInteraction.end();iter1++,localProcId++)
+      procT[*iter1]=localProcId;
+    int size=procsInInteraction.size();
+    std::vector<std::map<int,int> > localToGlobal(size);
+    for(int i=0;i<size;i++)
+      {
+        std::map<int,int>& myLocalToGlobal=localToGlobal[i];
+        const std::vector<int>& locals=rowsPartialSumI[i];
+        const std::vector<int>& globals=globalIdsPartial[i];
+        std::vector<int>::const_iterator iter3=locals.begin();
+        std::vector<int>::const_iterator iter4=globals.begin();
+        for(;iter3!=locals.end();iter3++,iter4++)
+          myLocalToGlobal[*iter3]=*iter4;
+      }
+    //
+    const vector<std::pair<int,int> >& mapping=_mapping.getSendingIds();
+    std::map<int,double> globalIdVal;
+    //accumulate for same global id on lazy part.
+    for(vector<vector<pair<int,double> > >::iterator iter1=_coeffs.begin();iter1!=_coeffs.end();iter1++)
+      for(vector<pair<int,double> >::iterator iter2=(*iter1).begin();iter2!=(*iter1).end();iter2++)
+        {
+          const std::pair<int,int>& distantLocalLazyId=mapping[(*iter2).first];
+          int localLazyProcId=procT[distantLocalLazyId.first];
+          int globalDistantLazyId=localToGlobal[localLazyProcId][distantLocalLazyId.second];
+          globalIdVal[globalDistantLazyId]+=(*iter2).second;
+        }
+    //perform merge
+    std::vector<std::vector<double> >::iterator iter3=denoStrorageInv.begin();
+    for(vector<vector<pair<int,double> > >::iterator iter1=_coeffs.begin();iter1!=_coeffs.end();iter1++,iter3++)
+      {
+        double val=(*iter3).back();
+        (*iter3).resize((*iter1).size());
+        std::vector<double>::iterator iter4=(*iter3).begin();
+        for(vector<pair<int,double> >::iterator iter2=(*iter1).begin();iter2!=(*iter1).end();iter2++,iter4++)
+          {
+            const std::pair<int,int>& distantLocalLazyId=mapping[(*iter2).first];
+            int localLazyProcId=procT[distantLocalLazyId.first];
+            int globalDistantLazyId=localToGlobal[localLazyProcId][distantLocalLazyId.second];
+            double newVal=globalIdVal[globalDistantLazyId];
+            if((*iter2).second!=0.)
+              (*iter4)=val*newVal/(*iter2).second;
+            else
+              (*iter4)=std::numeric_limits<double>::max();
+            (*iter2).second=newVal;
+          }
+      }
   }
 
   void InterpolationMatrix::divideByGlobalRowSum(const std::vector<int>& distantProcs, const std::vector<std::vector<int> >& resPerProcI,
@@ -361,18 +642,13 @@ namespace ParaMEDMEM
   {
     map<int,double> fastSums;
     int procId=0;
-    const vector<std::pair<int,int> >& mapping=_mapping.getSendingIds();
-    map< int, map<int,int> > distIdToLocId;
-    for(map< pair<int,int>,int >::iterator iter8=_col_offsets.begin();iter8!=_col_offsets.end();iter8++)
-          distIdToLocId[(*iter8).first.first][mapping[(*iter8).second].second]=(*iter8).first.second;
-
     for(vector<int>::const_iterator iter1=distantProcs.begin();iter1!=distantProcs.end();iter1++,procId++)
       {
         const std::vector<int>& currentProcI=resPerProcI[procId];
         const std::vector<double>& currentProcD=resPerProcD[procId];
         vector<double>::const_iterator iter3=currentProcD.begin();
         for(vector<int>::const_iterator iter2=currentProcI.begin();iter2!=currentProcI.end();iter2++,iter3++)
-          fastSums[_col_offsets[std::make_pair(*iter1,distIdToLocId[*iter1][*iter2])]]=*iter3;
+          fastSums[_col_offsets[std::make_pair(*iter1,*iter2)]]=*iter3;
       }
     deno.resize(_coeffs.size());
     vector<vector<double> >::iterator iter6=deno.begin();
@@ -399,6 +675,17 @@ namespace ParaMEDMEM
       }
   }
 
+  void InterpolationMatrix::resizeGlobalColSum(std::vector<std::vector<double> >& denoStrorage)
+  {
+    vector<vector<double> >::iterator iter2=denoStrorage.begin();
+    for(vector<vector<pair<int,double> > >::const_iterator iter1=_coeffs.begin();iter1!=_coeffs.end();iter1++,iter2++)
+      {
+        double val=(*iter2).back();
+        (*iter2).resize((*iter1).size());
+        std::fill((*iter2).begin(),(*iter2).end(),val);
+      }
+  }
+
   // ==================================================================
   // The call to this method updates the arrays on the target side
   //   so that they know which amount of data from which processor they 
index 53a2829d078427c2327d510e7199eb70a5e64147..e0dfceab320ccb74537db97881606d143a2ecc9c 100644 (file)
@@ -26,8 +26,7 @@
 
 namespace ParaMEDMEM
 {
-  class GlobalizerMeshWorkingSide;
-  class GlobalizerMeshLazySide;
+  class ElementLocator;
 
   class InterpolationMatrix : public INTERP_KERNEL::InterpolationOptions,
                               public DECOptions
@@ -43,27 +42,41 @@ namespace ParaMEDMEM
     
     virtual ~InterpolationMatrix();
     void addContribution(MEDCouplingPointSet& distant_support, int iproc_distant,
-                         int* distant_elems, const std::string& srcMeth, const std::string& targetMeth);
-    void finishContributionW(GlobalizerMeshWorkingSide& globalizer);
-    void finishContributionL(GlobalizerMeshLazySide& globalizer);
+                         const int* distant_elems, const std::string& srcMeth, const std::string& targetMeth);
+    void finishContributionW(ElementLocator& elementLocator);
+    void finishContributionL(ElementLocator& elementLocator);
     void multiply(MEDCouplingFieldDouble& field) const;
     void transposeMultiply(MEDCouplingFieldDouble& field)const;
     void prepare();
     int getNbRows() const { return _row_offsets.size(); }
     MPIAccessDEC* getAccessDEC() { return _mapping.getAccessDEC(); }
   private:
-    void computeConservVolDenoW(GlobalizerMeshWorkingSide& globalizer);
-    void computeIntegralDenoW(GlobalizerMeshWorkingSide& globalizer);
-    void computeGlobConstraintDenoW(GlobalizerMeshWorkingSide& globalizer);
-    void computeConservVolDenoL(GlobalizerMeshLazySide& globalizer);
-    void computeIntegralDenoL(GlobalizerMeshLazySide& globalizer);
-    void computeGlobConstraintDenoL(GlobalizerMeshLazySide& globalizer);
+    void computeConservVolDenoW(ElementLocator& elementLocator);
+    void computeIntegralDenoW(ElementLocator& elementLocator);
+    void computeGlobConstraintDenoW(ElementLocator& elementLocator);
+    void computeConservVolDenoL(ElementLocator& elementLocator);
+    void computeIntegralDenoL(ElementLocator& elementLocator);
     
     void computeLocalColSum(std::vector<double>& res) const;
     void computeLocalRowSum(const std::vector<int>& distantProcs, std::vector<std::vector<int> >& resPerProcI,
                             std::vector<std::vector<double> >& resPerProcD) const;
-    void computeGlobalRowSum(GlobalizerMeshWorkingSide& globalizer, std::vector<std::vector<double> >& denoStrorage);
+    void computeGlobalRowSum(ElementLocator& elementLocator, std::vector<std::vector<double> >& denoStrorage, std::vector<std::vector<double> >& denoStrorageInv);
     void computeGlobalColSum(std::vector<std::vector<double> >& denoStrorage);
+    void resizeGlobalColSum(std::vector<std::vector<double> >& denoStrorage);
+    void fillDSFromVM(int iproc_distant, const int* distant_elems, const std::vector< std::map<int,double> >& values, MEDCouplingFieldDouble *surf);
+    void serializeMe(std::vector< std::vector< std::map<int,double> > >& data1, std::vector<int>& data2) const;
+    void initialize();
+    void findAdditionnalElements(ElementLocator& elementLocator, std::vector<std::vector<int> >& elementsToAdd,
+                                 const std::vector<std::vector<int> >& resPerProcI, const std::vector<std::vector<int> >& globalIdsPartial);
+    void addGhostElements(const std::vector<int>& distantProcs, const std::vector<std::vector<int> >& elementsToAdd);
+    int mergePolicies(const std::vector<int>& policyPartial);
+    void mergeRowSum(const std::vector< std::vector<double> >& rowsPartialSumD, const std::vector< std::vector<int> >& globalIdsPartial,
+                     std::vector<int>& globalIdsLazySideInteraction, std::vector<double>& sumCorresponding);
+    void mergeRowSum2(const std::vector< std::vector<int> >& globalIdsPartial, std::vector< std::vector<double> >& rowsPartialSumD,
+                      const std::vector<int>& globalIdsLazySideInteraction, const std::vector<double>& sumCorresponding);
+    void mergeRowSum3(const std::vector< std::vector<int> >& globalIdsPartial, std::vector< std::vector<double> >& rowsPartialSumD);
+    void mergeCoeffs(const std::vector<int>& procsInInteraction, const std::vector< std::vector<int> >& rowsPartialSumI,
+                     const std::vector<std::vector<int> >& globalIdsPartial, std::vector<std::vector<double> >& denoStrorageInv);
     void divideByGlobalRowSum(const std::vector<int>& distantProcs, const std::vector<std::vector<int> >& resPerProcI,
                               const std::vector<std::vector<double> >& resPerProcD, std::vector<std::vector<double> >& deno);
   private:
index 852ae39cf513b98b5f16aaaf8e36928da1c3486f..b080ab51f4e056a3161bf156881bed4f0f069e94 100644 (file)
@@ -28,7 +28,6 @@
 #include "InterpolationMatrix.hxx"
 #include "IntersectionDEC.hxx"
 #include "ElementLocator.hxx"
-#include "GlobalizerMesh.hxx"
 
 namespace ParaMEDMEM
 {  
@@ -161,7 +160,7 @@ namespace ParaMEDMEM
         for (int i=0; i<_target_group->size(); i++)
           {
             //        int idistant_proc = (i+_source_group->myRank())%_target_group->size();
-            int       idistant_proc=i;
+            int idistant_proc=i;
 
             //gathers pieces of the target meshes that can intersect the local mesh
             locator.exchangeMesh(idistant_proc,distant_mesh,distant_ids);
@@ -178,8 +177,7 @@ namespace ParaMEDMEM
                 distant_ids=0;
               }
           }
-        GlobalizerMeshWorkingSide globalizer(locator.getCommunicator(),_local_field->getField(),distantMeth,locator.getDistantProcIds());
-        _interpolation_matrix->finishContributionW(globalizer);
+       _interpolation_matrix->finishContributionW(locator);
       }
 
     if (_target_group->containsMyRank())
@@ -207,8 +205,7 @@ namespace ParaMEDMEM
                 distant_ids=0;
               }
           }
-        GlobalizerMeshLazySide globalizer(locator.getCommunicator(),_local_field->getField(),locator.getDistantProcIds());
-        _interpolation_matrix->finishContributionL(globalizer);
+        _interpolation_matrix->finishContributionL(locator);
       }
     _interpolation_matrix->prepare();
   }
index f164a3c7ee83a6a96253b7261d8fe70cff27dd65..b2d0e0bd5d7bcaf531c5aa8de3977fe7e121a736 100644 (file)
@@ -54,7 +54,6 @@ InterpolationMatrix.hxx\
 IntersectionDEC.hxx\
 ExplicitCoincidentDEC.hxx\
 ElementLocator.hxx\
-GlobalizerMesh.hxx\
 ExplicitMapping.hxx\
 ICoCoField.hxx \
 ICoCoMEDField.hxx \
@@ -75,7 +74,6 @@ StructuredCoincidentDEC.cxx\
 ExplicitCoincidentDEC.cxx\
 IntersectionDEC.cxx\
 ElementLocator.cxx\
-GlobalizerMesh.cxx\
 MPIAccessDEC.cxx \
 TimeInterpolator.cxx \
 LinearTimeInterpolator.cxx\
index e1fd35aa26b9096ad6f4e8daa4f4c655ac4a75a5..a74d64089c3ffa30f754f3ff764b05c50da8de26 100644 (file)
@@ -57,6 +57,12 @@ namespace ParaMEDMEM
       _send_proc_offsets[i+1]++;
   }
 
+  void MxN_Mapping::initialize()
+  {
+    _sending_ids.clear();
+    std::fill(_send_proc_offsets.begin(),_send_proc_offsets.end(),0);
+  }
+
   void MxN_Mapping::prepareSendRecv()
   {
     CommInterface comm_interface=_union_group->getCommInterface();
@@ -288,7 +294,6 @@ namespace ParaMEDMEM
     delete[] recvdispls;
   }
 
-
   ostream & operator<< (ostream & f ,const AllToAllMethod & alltoallmethod )
   {
     switch (alltoallmethod)
index e87a05297039d2a79417cf48ea77a3a29d55659c..198859e5af1e3681e49a6184c3b8aea0a5a7fbf3 100644 (file)
@@ -43,7 +43,9 @@ namespace ParaMEDMEM
     void reverseSendRecv(double* field, MEDCouplingFieldDouble& field) const ;
  
     //
-    const std::vector<std::pair<int,int> >& getSendingIds() const { return _sending_ids; }//tmp
+    const std::vector<std::pair<int,int> >& getSendingIds() const { return _sending_ids; }
+    const std::vector<int>& getSendProcsOffsets() const { return _send_proc_offsets; }
+    void initialize();
 
     MPIAccessDEC* getAccessDEC(){ return _access_DEC; }
   private :
index 8750126f989203917053b47776fefe76145a8ce0..6afb239116282fc122739a9c113948c66a195cbd 100644 (file)
@@ -164,6 +164,44 @@ namespace ParaMEDMEM
   
     delete data_channel;
   }
+
+  /*!
+   * This method returns, if it exists, an array with only one component and as many as tuples as _field has.
+   * This array gives for every element on which this->_field lies, its global number, if this->_field is nodal.
+   * For example if _field is a nodal field : returned array will be the nodal global numbers.
+   * The content of this method is used to inform Working side to accumulate data recieved by lazy side.
+   */
+  DataArrayInt* ParaFIELD::returnCumulativeGlobalNumbering() const
+  {
+    if(!_field)
+      return 0;
+    TypeOfField type=_field->getTypeOfField();
+    switch(type)
+      {
+      case ON_CELLS:
+        return 0;
+      case ON_NODES:
+        return _support->getGlobalNumberingNodeDA();
+      default:
+        return 0;
+      }
+  }
+
+  DataArrayInt* ParaFIELD::returnGlobalNumbering() const
+  {
+    if(!_field)
+      return 0;
+    TypeOfField type=_field->getTypeOfField();
+    switch(type)
+      {
+      case ON_CELLS:
+        return _support->getGlobalNumberingCellDA();
+      case ON_NODES:
+        return _support->getGlobalNumberingNodeDA();
+      default:
+        return 0;
+      }
+  }
   
   int ParaFIELD::nbComponents() const
   {
index 5b89e2e8fb68d2339fb9708e1cbe1a1bdca0fe91..a9bcf57b88d4a63af70f9de4f070cef4d7f89c2a 100644 (file)
 
 namespace ParaMEDMEM
 {
-
+  class DataArrayInt;
   class ParaMESH;
   class ProcessorGroup;
   class MEDCouplingFieldDouble;
   class ComponentTopology;
+  class Topology;
 
   class ParaFIELD
   {
@@ -42,6 +43,8 @@ namespace ParaMEDMEM
     void synchronizeTarget( ParaMEDMEM::ParaFIELD* source_field);
     void synchronizeSource( ParaMEDMEM::ParaFIELD* target_field);
     MEDCouplingFieldDouble* getField() const { return _field; }
+    DataArrayInt* returnCumulativeGlobalNumbering() const;
+    DataArrayInt* returnGlobalNumbering() const;
     Topology* getTopology() const { return _topology; }
     ParaMESH* getSupport() const  { return _support; }
     int nbComponents() const;
index 9bc8c6266ea46e863ca71f6402edb54fe807a7e8..7f59d9120e97c050472208726cb1e52be8be6ce4 100644 (file)
@@ -78,6 +78,30 @@ namespace ParaMEDMEM
       }
   }
 
+  void ParaMESH::setNodeGlobal(DataArrayInt *nodeGlobal)
+  {
+    if(nodeGlobal!=_node_global)
+      {
+        if(_node_global)
+          _node_global->decrRef();
+        _node_global=nodeGlobal;
+        if(_node_global)
+          _node_global->incrRef();
+      }
+  }
+
+  void ParaMESH::setCellGlobal(DataArrayInt *cellGlobal)
+  {
+    if(cellGlobal!=_cell_global)
+      {
+        if(_cell_global)
+          _cell_global->decrRef();
+        _cell_global=cellGlobal;
+        if(_cell_global)
+          _cell_global->incrRef();
+      }
+  }
+
   ParaMESH::~ParaMESH()
   {
     if(_cell_mesh)
@@ -91,8 +115,6 @@ namespace ParaMEDMEM
       _cell_global->decrRef();
     if(_face_global)
       _face_global->decrRef();
-    if(_node_global)
-      _node_global->decrRef();
     delete _explicit_topology;
   }
 
index dc433c1d0ceea3895336ef8ef0c4a8ec60c3cc5b..f713459dd241fc5906027018b265d92f8434abbf 100644 (file)
@@ -45,15 +45,20 @@ namespace ParaMEDMEM
               const ProcessorGroup& proc_group, const std::string& name);
 
     virtual ~ParaMESH();
+    void setNodeGlobal(DataArrayInt *nodeGlobal);
+    void setCellGlobal(DataArrayInt *cellGlobal);
     Topology* getTopology() const { return _explicit_topology; }
     bool isStructured() const { return _cell_mesh->isStructured(); }
     MEDCouplingPointSet *getCellMesh() const { return _cell_mesh; }
     MEDCouplingPointSet *getFaceMesh() const { return _face_mesh; }
     BlockTopology* getBlockTopology() const { return _block_topology; }
 
-    const int* getGlobalNumberingNode() const { return _node_global->getConstPointer(); } 
-    const int* getGlobalNumberingFace() const { return _face_global->getConstPointer(); } 
-    const int* getGlobalNumberingCell() const { return _cell_global->getConstPointer(); } 
+    DataArrayInt* getGlobalNumberingNodeDA() const { if(_node_global) _node_global->incrRef(); return _node_global; }
+    DataArrayInt* getGlobalNumberingFaceDA() const { if(_face_global) _face_global->incrRef(); return _face_global; }
+    DataArrayInt* getGlobalNumberingCellDA() const { if(_cell_global) _cell_global->incrRef(); return _cell_global; }
+    const int* getGlobalNumberingNode() const { if(_node_global) return _node_global->getConstPointer(); return 0; }
+    const int* getGlobalNumberingFace() const { if(_face_global) return _face_global->getConstPointer(); return 0; }
+    const int* getGlobalNumberingCell() const { if(_cell_global) return _cell_global->getConstPointer(); return 0; }
 
   private:
     //mesh object underlying the ParaMESH object
index 40b1c05187f4689ed966b1a33ad692960d96ccc3..8915ae5071f992f5392aef14b0bd0ca9a5d61612 100644 (file)
@@ -33,6 +33,7 @@
 #include "MEDLoader.hxx"
  
 #include <string>
+#include <iterator>
 
 // use this define to enable lines, execution of which leads to Segmentation Fault
 #define ENABLE_FAULTS
@@ -854,6 +855,7 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0()
     }
   else
     {
+      const char targetMeshName[]="target mesh";
       if(rank==2)
         {
           double coords[10]={-0.3,-0.3, 0.2,-0.3, 0.7,-0.3, -0.3,0.2, 0.2,0.2 };
@@ -869,6 +871,12 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0()
           std::copy(coords,coords+10,myCoords->getPointer());
           mesh->setCoords(myCoords);
           myCoords->decrRef();
+          paramesh=new ParaMESH(mesh,*target_group,targetMeshName);
+          DataArrayInt *da=DataArrayInt::New();
+          const int globalNumberingP2[5]={0,1,2,3,4};
+          da->useArray(globalNumberingP2,false,CPP_DEALLOC,5,1);
+          paramesh->setNodeGlobal(da);
+          da->decrRef();
         }
       if(rank==3)
         {
@@ -884,6 +892,12 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0()
           std::copy(coords,coords+6,myCoords->getPointer());
           mesh->setCoords(myCoords);
           myCoords->decrRef();
+          paramesh=new ParaMESH(mesh,*target_group,targetMeshName);
+          DataArrayInt *da=DataArrayInt::New();
+          const int globalNumberingP3[3]={4,2,5};
+          da->useArray(globalNumberingP3,false,CPP_DEALLOC,3,1);
+          paramesh->setNodeGlobal(da);
+          da->decrRef();
         }
       if(rank==4)
         {
@@ -900,9 +914,14 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0()
           std::copy(coords,coords+12,myCoords->getPointer());
           mesh->setCoords(myCoords);
           myCoords->decrRef();
+          paramesh=new ParaMESH(mesh,*target_group,targetMeshName);
+          DataArrayInt *da=DataArrayInt::New();
+          const int globalNumberingP4[6]={3,6,7,4,8,5};
+          da->useArray(globalNumberingP4,false,CPP_DEALLOC,6,1);
+          paramesh->setNodeGlobal(da);
+          da->decrRef();
         }
       ParaMEDMEM::ComponentTopology comptopo;
-      paramesh=new ParaMESH(mesh,*target_group,"target mesh");
       parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
       parafieldP1 = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
       parafieldP0->getField()->setNature(ConservativeVolumic);
@@ -917,6 +936,16 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0()
       dec.synchronize();
       dec.setForcedRenormalization(false);
       dec.sendData();
+      dec.recvData();
+      const double *valueP0=parafieldP0->getField()->getArray()->getPointer();
+      if(rank==0)
+        {
+          CPPUNIT_ASSERT_DOUBLES_EQUAL(34.42857143,valueP0[0],1e-7);
+        }
+      if(rank==1)
+        {
+          CPPUNIT_ASSERT_DOUBLES_EQUAL(44.,valueP0[0],1e-7);
+        }
     }
   else
     {
@@ -925,10 +954,32 @@ void ParaMEDMEMTest::testIntersectionDECNonOverlapp_2D_P0P1P1P0()
       dec.synchronize();
       dec.setForcedRenormalization(false);
       dec.recvData();
-      /*const double *res=parafield->getField()->getArray()->getConstPointer();
-      const double *expected=targetResults[rank-nproc_source];
-      CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13);
-      CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13);*/
+      const double *res=parafieldP1->getField()->getArray()->getConstPointer();
+      if(rank==2)
+        {
+          const double expectP2[5]={39.0, 31.0, 31.0, 47.0, 39.0};
+          CPPUNIT_ASSERT_EQUAL(5,parafieldP1->getField()->getNumberOfTuples());
+          CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents());
+          for(int kk=0;kk<5;kk++)
+           CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP2[kk],res[kk],1e-12);
+        }
+      if(rank==3)
+        {
+          const double expectP3[3]={39.0, 31.0, 31.0};
+          CPPUNIT_ASSERT_EQUAL(3,parafieldP1->getField()->getNumberOfTuples());
+          CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents());
+          for(int kk=0;kk<3;kk++)
+           CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP3[kk],res[kk],1e-12);
+        }
+      if(rank==4)
+        {
+          const double expectP4[6]={47.0, 47.0, 47.0, 39.0, 39.0, 31.0};
+          CPPUNIT_ASSERT_EQUAL(6,parafieldP1->getField()->getNumberOfTuples());
+          CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents());
+          for(int kk=0;kk<6;kk++)
+            CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP4[kk],res[kk],1e-12);
+        }
+      dec.sendData();
     }
   //
   delete parafieldP0;