Salome HOME
[ParaMEDMEM] Adding SWIG interface for OverlapDEC + py tests of DEC
authorabn <adrien.bruneton@cea.fr>
Fri, 12 Feb 2021 20:56:46 +0000 (21:56 +0100)
committerabn <adrien.bruneton@cea.fr>
Fri, 5 Mar 2021 15:22:13 +0000 (16:22 +0100)
+ added 'release()' method for some classes (gives a chance to Python wrapping
to call MPI clean up operations without having to wait for destructor)
+ added Python tests

25 files changed:
src/ParaMEDMEM/BlockTopology.cxx
src/ParaMEDMEM/BlockTopology.hxx
src/ParaMEDMEM/DisjointDEC.cxx
src/ParaMEDMEM/DisjointDEC.hxx
src/ParaMEDMEM/ExplicitTopology.hxx
src/ParaMEDMEM/InterpKernelDEC.cxx
src/ParaMEDMEM/InterpKernelDEC.hxx
src/ParaMEDMEM/MPIProcessorGroup.cxx
src/ParaMEDMEM/MPIProcessorGroup.hxx
src/ParaMEDMEM/OverlapDEC.cxx
src/ParaMEDMEM/OverlapDEC.hxx
src/ParaMEDMEM/OverlapInterpolationMatrix.cxx
src/ParaMEDMEM/ParaFIELD.cxx
src/ParaMEDMEM/ParaFIELD.hxx
src/ParaMEDMEM/ParaMESH.cxx
src/ParaMEDMEM/ParaMESH.hxx
src/ParaMEDMEM/StructuredCoincidentDEC.cxx
src/ParaMEDMEM/StructuredCoincidentDEC.hxx
src/ParaMEDMEM_Swig/CMakeLists.txt
src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i
src/ParaMEDMEM_Swig/ParaMEDMEMTestTools.py [new file with mode: 0644]
src/ParaMEDMEM_Swig/test_InterpKernelDEC.py
src/ParaMEDMEM_Swig/test_NonCoincidentDEC.py
src/ParaMEDMEM_Swig/test_OverlapDEC.py [new file with mode: 0644]
src/ParaMEDMEM_Swig/test_StructuredCoincidentDEC.py

index 82831b74ff19df6b45575af822692533ff0220d5..e139bfa5e1fec4a5ca5e5ca55ceb33ca3586c2e0 100644 (file)
@@ -41,7 +41,7 @@ namespace MEDCoupling
   BlockTopology::BlockTopology() :
     _dimension(0), _nb_procs_per_dim(0),
     _local_array_indices(0), _cycle_type(0),
-    _proc_group(NULL),_nb_elems(0),
+    _proc_group(nullptr),_nb_elems(0),
     _owns_processor_group(false)
   {}
 
@@ -168,9 +168,18 @@ namespace MEDCoupling
   }
 
   BlockTopology::~BlockTopology()
+  {
+    release();
+  }
+
+  /** Destructor involves MPI operations: make sure this is accessible from a proper
+   * method for Python wrapping.
+   */
+  void BlockTopology::release()
   {
     if (_owns_processor_group)
       delete _proc_group;
+    _proc_group = nullptr;
   }
 
   //!converts a pair <subdomainid,local> to a global number
@@ -317,11 +326,8 @@ namespace MEDCoupling
   void BlockTopology::unserialize(const mcIdType* serializer,const CommInterface& comm_interface)
   {
     const mcIdType* ptr_serializer=serializer;
-    cout << "unserialize..."<<endl;
     _dimension=(int)*(ptr_serializer++);
-    cout << "dimension "<<_dimension<<endl;
     _nb_elems=*(ptr_serializer++);
-    cout << "nbelems "<<_nb_elems<<endl;
     _nb_procs_per_dim.resize(_dimension);
     _cycle_type.resize(_dimension);
     _local_array_indices.resize(_dimension);
@@ -337,9 +343,10 @@ namespace MEDCoupling
     mcIdType size_comm=*(ptr_serializer++);
     for (int i=0; i<size_comm; i++)
       procs.insert((int)*(ptr_serializer++));
-    cout << "unserialize..."<<procs.size()<<endl;
+
+    if (_owns_processor_group)
+      delete _proc_group;
     _proc_group=new MPIProcessorGroup(comm_interface,procs);
     _owns_processor_group=true;
-    //TODO manage memory ownership of _proc_group  
   }
 }
index 0a85e5303c0401f04e2e4b751f36523935cddc92..b10624852e3ecbac2c26f73594b2281a31555a19 100644 (file)
@@ -50,6 +50,8 @@ namespace MEDCoupling
     BlockTopology(const BlockTopology& geom_topo, const ComponentTopology& comp_topo);
     BlockTopology(const ProcessorGroup& group, mcIdType nb_elem);
     virtual ~BlockTopology();
+    void release();
+
     //!Retrieves the number of elements for a given topology
     mcIdType getNbElements()const { return _nb_elems; }
     mcIdType getNbLocalElements() const;
index 178f296f65cb37019683590b913a922bb7822b1a..3d6be30b3cd20adfaec633959ced803c0848af99 100644 (file)
@@ -173,7 +173,7 @@ namespace MEDCoupling
       {
         delete _local_field;
       }
-    _local_field=0;
+    _local_field=nullptr;
     _owns_field=false;
     if(_owns_groups)
       {
@@ -181,12 +181,13 @@ namespace MEDCoupling
         delete _target_group;
       }
     _owns_groups=false;
-    _source_group=0;
-    _target_group=0;
+    _source_group=nullptr;
+    _target_group=nullptr;
     delete _union_group;
-    _union_group=0;
+    _union_group=nullptr;
     if (_union_comm != MPI_COMM_NULL)
       _comm_interface->commFree(&_union_comm);
+    _union_comm = MPI_COMM_NULL;
   }
 
   /**
index 289d618dded6d65c915ee59805baa6a7cd9866bb..8f7e73ba34e2db752dee214d7e5a7b9107229bab 100644 (file)
@@ -79,18 +79,20 @@ namespace MEDCoupling
     DisjointDEC &operator=(const DisjointDEC& s);
     DisjointDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids,
                 const MPI_Comm& world_comm=MPI_COMM_WORLD);
+    virtual ~DisjointDEC();
+
     void setNature(NatureOfField nature);
     void attachLocalField( MEDCouplingFieldDouble *field);
     void attachLocalField(const ParaFIELD *field, bool ownPt=false);
     void attachLocalField(const ICoCo::MEDField *field);
-    
+
     virtual void prepareSourceDE() = 0;
     virtual void prepareTargetDE() = 0;
     virtual void recvData() = 0;
     virtual void sendData() = 0;
     void sendRecvData(bool way=true);
     virtual void synchronize() = 0;
-    virtual ~DisjointDEC();
+
     virtual void computeProcGroup() { }
     void renormalizeTargetField(bool isWAbs);
     //
index d815f3d7157df818d7470ab4ce95c6d86c24e67a..ef8b7deea456eabceeef165e1ef5aa41df2a2459 100644 (file)
@@ -47,7 +47,7 @@ namespace MEDCoupling
     ExplicitTopology( const ExplicitTopology& topo, int nbcomponents);
     ExplicitTopology(const ParaMESH &mesh);
     virtual ~ExplicitTopology();
-    
+
     inline mcIdType getNbElements()const;
     inline mcIdType getNbLocalElements() const;
     const ProcessorGroup* getProcGroup()const { return _proc_group; }
index d379bf206c9efe4fc088d76e7a24351890441746..16a26546ed929f8e6d2b271f321d7c23e7a430f4 100644 (file)
@@ -71,9 +71,17 @@ namespace MEDCoupling
 
   InterpKernelDEC::~InterpKernelDEC()
   {
-    if (_interpolation_matrix !=0)
+    release();
+  }
+
+  void InterpKernelDEC::release()
+  {
+    if (_interpolation_matrix != nullptr)
       delete _interpolation_matrix;
-  } 
+    _interpolation_matrix = nullptr;
+    DisjointDEC::cleanInstance();
+  }
+
 
   /*! 
     \brief Synchronization process for exchanging topologies.
index b92a663c8c5f7aa57f3e7119c17a8286a64dfbf4..f68477502cc7c0acce2ae73ba5f6f74af5178145 100644 (file)
@@ -133,6 +133,8 @@ namespace MEDCoupling
     InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids,
                     const MPI_Comm& world_comm=MPI_COMM_WORLD);
     virtual ~InterpKernelDEC();
+    void release();
+
     void synchronize();
     void recvData();
     void recvData(double time);
index e800181c56fbc24f81185ca49955d9e4de243d83..aeb68c1679e6bce894286c3842a514f887c30ef9 100644 (file)
@@ -181,10 +181,20 @@ namespace MEDCoupling
 
   MPIProcessorGroup::~MPIProcessorGroup()
   {
-    _comm_interface.groupFree(&_group);
+    release();
+  }
+
+  /** Destructor involves MPI operations: make sure this is accessible from a proper
+   * method for Python wrapping.
+   */
+  void MPIProcessorGroup::release()
+  {
+    if (_group != MPI_GROUP_EMPTY)
+      _comm_interface.groupFree(&_group);
+    _group = MPI_GROUP_EMPTY;
     if (_comm!=_world_comm && _comm !=MPI_COMM_NULL)
       _comm_interface.commFree(&_comm);
-  
+    _comm = MPI_COMM_NULL;
   }
 
   /*! Translation of the rank id between two processor groups. This method translates rank \a rank
index 6feec77504ec0ae19579ee36274024f18fe3b48b..e73f21066a154b809a78a63fef8fcd251dd285e2 100644 (file)
@@ -38,6 +38,8 @@ namespace MEDCoupling
     MPIProcessorGroup(const CommInterface& interface,int pstart, int pend, const MPI_Comm& world_comm=MPI_COMM_WORLD);
     MPIProcessorGroup(const MPIProcessorGroup& other);
     virtual ~MPIProcessorGroup();
+    void release();
+
     virtual MPIProcessorGroup *deepCopy() const;
     virtual ProcessorGroup* fuse (const ProcessorGroup&) const;
     void intersect (ProcessorGroup&) { }
index ae6a4f0cca29a12cedaa8599fd9299e633b92c55..2be96a941eabf865d6c10d18698350e56ba1e05f 100644 (file)
@@ -59,20 +59,40 @@ namespace MEDCoupling
   }
 
   OverlapDEC::~OverlapDEC()
+  {
+    release();
+  }
+
+  /** Destructor involves MPI operations: make sure this is accessible from a proper
+   * method for Python wrapping.
+   */
+  void OverlapDEC::release()
   {
     if(_own_group)
-      delete _group;
+      {
+        delete _group;
+        _group = nullptr;
+      }
     if(_own_source_field)
-      delete _source_field;
+      {
+        delete _source_field;
+        _source_field = nullptr;
+      }
     if(_own_target_field)
-      delete _target_field;
+      {
+        delete _target_field;
+        _target_field = nullptr;
+      }
     delete _interpolation_matrix;
+    _interpolation_matrix = nullptr;
     delete _locator;
+    _locator = nullptr;
     if (_comm != MPI_COMM_NULL)
       {
         MEDCoupling::CommInterface comm;
         comm.commFree(&_comm);
       }
+    _comm = MPI_COMM_NULL;
   }
 
   void OverlapDEC::sendRecvData(bool way)
index 942653f1c469409230bd804a5d952f8f41bda3ab..ef0fe77a0a903e0afef85cb3b006120a33cc02c7 100644 (file)
@@ -224,6 +224,8 @@ namespace MEDCoupling
   public:
     OverlapDEC(const std::set<int>& procIds,const MPI_Comm& world_comm=MPI_COMM_WORLD);
     virtual ~OverlapDEC();
+    void release();
+
     void sendRecvData(bool way=true);
     void sendData();
     void recvData();
index 0b835c41b99af56588ff03814f30f7c1aea7eab4..17c2308cbab18d892c5f333a4048e51b074316e9 100644 (file)
@@ -260,7 +260,7 @@ namespace MEDCoupling
     if(_target_field->getField()->getNature()==IntensiveMaximum)
       _mapping.computeDenoConservativeVolumic(_target_field->getField()->getNumberOfTuplesExpected());
     else
-      throw INTERP_KERNEL::Exception("OverlapDEC: Policy not implemented yet: only IntensiveMaximum!");
+      throw INTERP_KERNEL::Exception("OverlapDEC: Policy not set (did you call setNature()?) or not implemented yet: only IntensiveMaximum!");
 //      {
 //      if(_target_field->getField()->getNature()==IntensiveConservation)
 //        {
index aa1cb94afa2d756ac525e817f9ac0899d743e428..7957222ab2ea07f7d3afab882c5260ae13e4898f 100644 (file)
@@ -122,12 +122,28 @@ namespace MEDCoupling
   }
 
   ParaFIELD::~ParaFIELD()
+  {
+    release();
+  }
+
+  /** Destructor involves MPI operations: make sure this is accessible from a proper
+   * method for Python wrapping.
+   */
+  void ParaFIELD::release()
   {
     if(_field)
-      _field->decrRef();
+      {
+        _field->decrRef();
+        _field = nullptr;
+      }
+
     if(_own_support)
-      delete _support;
+      {
+        delete _support;
+        _support = nullptr;
+      }
     delete _topology;
+    _topology = nullptr;
   }
 
   void ParaFIELD::synchronizeTarget(ParaFIELD* source_field)
index 1dba2f4219cb9b2f6c20044daf142023e2b817cf..606b479751b1ff06c1f7313495bfb032875566e3 100644 (file)
@@ -39,6 +39,7 @@ namespace MEDCoupling
     ParaFIELD(TypeOfField type, TypeOfTimeDiscretization td, ParaMESH* mesh, const ComponentTopology& component_topology); 
     ParaFIELD(MEDCouplingFieldDouble* field, ParaMESH *sup, const ProcessorGroup& group);
     virtual ~ParaFIELD();
+    void release();
 
     void synchronizeTarget( MEDCoupling::ParaFIELD* source_field);
     void synchronizeSource( MEDCoupling::ParaFIELD* target_field);
index d4c1045f4587caf5e6261e041d24f0533366dd90..c9a251916588ed5d186754ea548667dc5a8d20f5 100644 (file)
@@ -76,9 +76,19 @@ namespace MEDCoupling
   }
 
   ParaMESH::~ParaMESH()
+  {
+    release();
+  }
+
+  /** Destructor involves MPI operations: make sure this is accessible from a proper
+   * method for Python wrapping.
+   */
+  void ParaMESH::release()
   {
     delete _block_topology;
     delete _explicit_topology;
+    _block_topology = nullptr;
+    _explicit_topology = nullptr;
   }
 
 }
index 7955bd42e453ec4a99747ca6e121f501fb734622..534e534106574954951cf6d9317bfe2e0c9ed578 100644 (file)
@@ -56,6 +56,8 @@ namespace MEDCoupling
               const ProcessorGroup& proc_group ) ;
 
     virtual ~ParaMESH();
+    void release();
+
     void setNodeGlobal(DataArrayIdType *nodeGlobal);
     void setCellGlobal(DataArrayIdType *cellGlobal);
     Topology* getTopology() const { return _explicit_topology; }
@@ -80,7 +82,7 @@ namespace MEDCoupling
     int _my_domain_id;
 
     //global topology of the cells
-    MEDCoupling::BlockTopology* _block_topology;
+    BlockTopology* _block_topology;
     Topology*  _explicit_topology;
     // pointers to global numberings
     MCAuto<DataArrayIdType> _node_global;
index 9408c267eae27b432e6bcafbc05debb1dbc64e78..1a1bdd501cf8e675ea5037e50ad83f048a44e70d 100644 (file)
@@ -34,35 +34,57 @@ using namespace std;
 namespace MEDCoupling
 {
 
-  StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(0),_topo_target(0),
-                                                     _send_counts(0),_recv_counts(0),
-                                                     _send_displs(0),_recv_displs(0),
-                                                     _recv_buffer(0),_send_buffer(0)
+  StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(nullptr),_topo_target(nullptr),
+                                                     _owns_topo_source(false), _owns_topo_target(false),
+                                                     _send_counts(nullptr),_recv_counts(nullptr),
+                                                     _send_displs(nullptr),_recv_displs(nullptr),
+                                                     _recv_buffer(nullptr),_send_buffer(nullptr)
   {
   }
 
+  StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):
+      DisjointDEC(local_group,distant_group),
+      _topo_source(nullptr),_topo_target(nullptr),
+      _owns_topo_source(false), _owns_topo_target(false),
+      _send_counts(nullptr),_recv_counts(nullptr),
+      _send_displs(nullptr),_recv_displs(nullptr),
+      _recv_buffer(nullptr),_send_buffer(nullptr)
+  {
+  }
 
   StructuredCoincidentDEC::~StructuredCoincidentDEC()
+  {
+    release();
+  }
+
+  /** Destructor involves MPI operations: make sure this is accessible from a proper
+   * method for Python wrapping.
+   */
+  void StructuredCoincidentDEC::release()
   {
     delete [] _send_buffer;
     delete [] _recv_buffer;
-    delete []_send_displs;
+    delete [] _send_displs;
     delete [] _recv_displs;
     delete [] _send_counts;
     delete [] _recv_counts;
-    if (! _source_group->containsMyRank())
+    _send_buffer = nullptr;
+    _recv_buffer = nullptr;
+    _send_displs = nullptr;
+    _recv_displs = nullptr;
+    _send_counts = nullptr;
+    _recv_counts = nullptr;
+
+    if (_owns_topo_source)
       delete _topo_source;
-    if(!_target_group->containsMyRank())
+    if (_owns_topo_target)
       delete _topo_target;
-  }
+    _topo_source = nullptr;
+    _topo_target = nullptr;
+    _owns_topo_source = false;
+    _owns_topo_target = false;
 
-  StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):
-      DisjointDEC(local_group,distant_group),
-      _topo_source(0),_topo_target(0),
-      _send_counts(0),_recv_counts(0),
-      _send_displs(0),_recv_displs(0),
-      _recv_buffer(0),_send_buffer(0)
-  {
+    DisjointDEC::cleanInstance();
   }
 
   /*! Synchronization process for exchanging topologies
@@ -71,16 +93,22 @@ namespace MEDCoupling
   {
     if (_source_group->containsMyRank())
       _topo_source = dynamic_cast<BlockTopology*>(_local_field->getTopology());
+    else
+      _owns_topo_source = true;  // _topo_source will be filled by broadcastTopology below
     if (_target_group->containsMyRank())
       _topo_target = dynamic_cast<BlockTopology*>(_local_field->getTopology());
+    else
+      _owns_topo_target = true;  // _topo_target will be filled by broadcastTopology below
 
     // Transmitting source topology to target code
+    MESSAGE ("Broadcast source topo ...");
     broadcastTopology(_topo_source,1000);
+
     // Transmitting target topology to source code
+    MESSAGE ("Broadcast target topo ...");
     broadcastTopology(_topo_target,2000);
     if (_topo_source->getNbElements() != _topo_target->getNbElements())
       throw INTERP_KERNEL::Exception("Incompatible dimensions for target and source topologies");
-
   }
 
   /*! Creates the arrays necessary for the data transfer
@@ -229,8 +257,7 @@ namespace MEDCoupling
 
     MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
 
-    // The master proc creates a send buffer containing
-    // a serialized topology
+    // The master proc creates a send buffer containing a serialized topology
     int rank_master;
 
     if (topo!=0 && topo->getProcGroup()->myRank()==0)
@@ -250,7 +277,7 @@ namespace MEDCoupling
       {
         MESSAGE(" rank "<<group->myRank()<< " waiting ...");
         _comm_interface->recv(&rank_master, 1,MPI_INT, MPI_ANY_SOURCE, tag+group->myRank(), *(group->getComm()),&status);
-        MESSAGE(" rank "<<group->myRank()<< "received master rank"<<rank_master);
+        MESSAGE(" rank "<<group->myRank()<< " received master rank "<<rank_master);
       }
     // The topology is broadcasted to all processors in the group
     _comm_interface->broadcast(&size, 1,MPI_ID_TYPE,rank_master,*(group->getComm()));
@@ -260,9 +287,7 @@ namespace MEDCoupling
       copy(serializer, serializer+size, buffer);
     _comm_interface->broadcast(buffer,(int)size,MPI_ID_TYPE,rank_master,*(group->getComm()));
 
-    // Processors which did not possess the source topology
-    // unserialize it
-
+    // Processors which did not possess the source topology unserialize it
     BlockTopology* topotemp=new BlockTopology();
     topotemp->unserialize(buffer, *_comm_interface);
 
@@ -360,5 +385,6 @@ namespace MEDCoupling
         synchronizeTopology();
         prepareTargetDE();
       }
+    MESSAGE ("sync OK");
   }
 }
index d90b2d6510e8cba865d108828e570ca58aa50b71..9e8b00c9f81adaf48e11cae721d2a10d2530b029 100644 (file)
@@ -87,6 +87,8 @@ namespace MEDCoupling
     StructuredCoincidentDEC();
     StructuredCoincidentDEC( ProcessorGroup& source, ProcessorGroup& target);
     virtual ~StructuredCoincidentDEC();
+    void release();
+
     void synchronize();
     void recvData();
     void sendData();
@@ -99,6 +101,10 @@ namespace MEDCoupling
 
     BlockTopology* _topo_source;
     BlockTopology* _topo_target;
+
+    bool _owns_topo_source;
+    bool _owns_topo_target;
+
     int* _send_counts;
     int* _recv_counts;
     int* _send_displs;
index 5a4649b08362e67f5d21e9e33db0e80994fc8eb5..71cc03a38bf6e5fb0b493d720af5b20a7974f360 100644 (file)
@@ -57,7 +57,7 @@ SET (SWIG_MODULE_ParaMEDMEM_EXTRA_DEPS
     ${medcoupling_HEADERS_HXX} ${medcoupling_HEADERS_TXX}
     ${interpkernel_HEADERS_HXX} ${interpkernel_HEADERS_TXX})
 
-IF(${CMAKE_VERSION} VERSION_LESS "3.8.0")     
+IF(${CMAKE_VERSION} VERSION_LESS "3.8.0")
   SWIG_ADD_MODULE(ParaMEDMEM python ParaMEDMEM.i)
 ELSE()
   SWIG_ADD_LIBRARY(ParaMEDMEM LANGUAGE python SOURCES ParaMEDMEM.i)
@@ -66,9 +66,52 @@ ENDIF()
 SWIG_LINK_LIBRARIES(ParaMEDMEM ${PYTHON_LIBRARIES} paramedmem)
 SWIG_CHECK_GENERATION(ParaMEDMEM)
 
+#
+# Tests
+#
+SALOME_ACCUMULATE_ENVIRONMENT(PYTHONPATH NOCHECK ${CMAKE_CURRENT_BINARY_DIR}/../PyWrapping)
+SALOME_ACCUMULATE_ENVIRONMENT(MED_RESOURCES_DIR NOCHECK ${CMAKE_SOURCE_DIR}/resources)
+SALOME_GENERATE_TESTS_ENVIRONMENT(tests_env)
+
+#  -- some tests require 2, 3, 4 or 5 procs --
+# MPICH does not support --oversubscribe:
+IF(NOT ${MPIEXEC_EXECUTABLE} MATCHES "mpich")
+    SET(_oversub_opt "--oversubscribe")
+ENDIF()
+
+ADD_TEST(NAME PyPara_Basics_Proc2
+         COMMAND ${MPIEXEC} -np 2  ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_BasicOperation.py)
+SET_TESTS_PROPERTIES(PyPara_Basics_Proc2 PROPERTIES ENVIRONMENT "${tests_env}")
+
+ADD_TEST(NAME PyPara_InterpKernelDEC_Proc4
+         COMMAND ${MPIEXEC} -np 4  ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC.py)
+SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}")
+ADD_TEST(NAME PyPara_InterpKernelDEC_Proc5
+         COMMAND ${MPIEXEC} -np 5  ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC.py)
+SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_Proc5 PROPERTIES ENVIRONMENT "${tests_env}")
+
+#ADD_TEST(NAME PyPara_NonCoincidentDEC_Proc5
+#         COMMAND ${MPIEXEC} -np 5  ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_NonCoincidentDEC.py)
+#SET_TESTS_PROPERTIES(PyPara_NonCoincidentDEC_Proc5 PROPERTIES ENVIRONMENT "${tests_env}")
+
+ADD_TEST(NAME PyPara_StructuredCoincidentDEC_Proc4
+         COMMAND ${MPIEXEC} -np 4  ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_StructuredCoincidentDEC.py)
+SET_TESTS_PROPERTIES(PyPara_StructuredCoincidentDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}")
+
+ADD_TEST(NAME PyPara_OverlapDEC_Proc4
+         COMMAND ${MPIEXEC} -np 4  ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_OverlapDEC.py)
+SET_TESTS_PROPERTIES(PyPara_OverlapDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}")
+
+SET(_tst_scripts
+  test_InterpKernelDEC.py
+  test_NonCoincidentDEC.py
+  test_StructuredCoincidentDEC.py
+  test_OverlapDEC.py
+)
+
 INSTALL(TARGETS _ParaMEDMEM DESTINATION ${MEDCOUPLING_INSTALL_PYTHON})
 INSTALL(FILES ParaMEDMEM.i ParaMEDMEMCommon.i DESTINATION ${MEDCOUPLING_INSTALL_HEADERS})
-INSTALL(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_StructuredCoincidentDEC.py DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON})
+INSTALL(FILES ${_tst_scripts} DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON})
 SALOME_INSTALL_SCRIPTS(${CMAKE_CURRENT_BINARY_DIR}/ParaMEDMEM.py ${MEDCOUPLING_INSTALL_PYTHON} EXTRA_DPYS "${SWIG_MODULE_ParaMEDMEM_REAL_NAME}")
 
 INSTALL(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_StructuredCoincidentDEC.py DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON})
index 896f77469c751f2bcf95f3219ea7efb2c48d7712..d1537c9ebcbf0acce97bfed2ed366bfe9f1d8f7a 100644 (file)
@@ -30,6 +30,7 @@
 #include "InterpKernelDEC.hxx"
 #include "NonCoincidentDEC.hxx"
 #include "StructuredCoincidentDEC.hxx"
+#include "OverlapDEC.hxx"
 #include "ParaMESH.hxx"
 #include "ParaFIELD.hxx"
 #include "ICoCoMEDField.hxx"
@@ -54,6 +55,7 @@ using namespace ICoCo;
 %include "DisjointDEC.hxx"
 %include "InterpKernelDEC.hxx"
 %include "StructuredCoincidentDEC.hxx"
+%include "OverlapDEC.hxx"
 
 %include "ICoCoField.hxx"
 %rename(ICoCoMEDField) ICoCo::MEDField;
diff --git a/src/ParaMEDMEM_Swig/ParaMEDMEMTestTools.py b/src/ParaMEDMEM_Swig/ParaMEDMEMTestTools.py
new file mode 100644 (file)
index 0000000..a677a4d
--- /dev/null
@@ -0,0 +1,31 @@
+#  -*- coding: iso-8859-1 -*-
+# Copyright (C) 2007-2020  CEA/DEN, EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author : Anthony Geay (EDF R&D)
+
+def WriteInTmpDir(func):
+    def decaratedFunc(*args,**kwargs):
+        import tempfile,os
+        ret = None
+        with tempfile.TemporaryDirectory() as tmpdirname:
+            os.chdir(tmpdirname)
+            ret = func(*args,**kwargs)
+            pass
+        return ret
+    return decaratedFunc
index 0e473a6e8654d258d7a7bfc57b5a2b610615c4a4..be4c1346a3634a445ba433dae5584aac326a085c 100755 (executable)
 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
 #
 
-from ParaMEDMEM import *
-from MEDLoader import ReadUMeshFromFile
+from medcoupling import *
+from ParaMEDMEMTestTools import WriteInTmpDir
 import sys, os
 import unittest
 import math
 from mpi4py import MPI
 
 
-class ParaMEDMEMBasicsTest(unittest.TestCase):
-    def testInterpKernelDEC_2D(self):
+class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
+    """ See test_StructuredCoincidentDEC_py_1() for a quick start.
+    """
+    def generateFullSource(self):
+        """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """
+        msh  = self.generateFullTarget()
+        msh.simplexize(0)
+        msh.setName("src_mesh")
+        fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
+        fld.setMesh(msh); fld.setName("source_F");
+        da = DataArrayDouble(msh.getNumberOfCells())
+        da.iota()
+        da *= 2
+        fld.setArray(da)
+        return msh, fld
+
+    def generateFullTarget(self):
+        """ The complete target mesh: 4 squares """
+        m1 = MEDCouplingCMesh("tgt_msh")
+        da = DataArrayDouble([0,1,2])
+        m1.setCoords(da, da)
+        msh = m1.buildUnstructured()
+        return msh
+
+    #
+    # Below, the two functions emulating the set up of a piece of the source and target mesh
+    # on each proc. Obviously in real world problems, this comes from your code and is certainly
+    # not computed by cuting again from scratch the full-size mesh!!
+    #
+    def getPartialSource(self, rank):
+        """ Will return an empty mesh piece for rank=2 and 3 """
+        msh, f = self.generateFullSource()
+        if rank == 0:
+            sub_m, sub_f = msh[0:4], f[0:4]
+        elif rank == 1:
+            sub_m, sub_f = msh[4:8], f[4:8]
+        sub_m.zipCoords()
+        return sub_m, sub_f
+
+    def getPartialTarget(self, rank):
+        """ One square for each rank """
+        msh = self.generateFullTarget()
+        if rank == 2:
+            sub_m = msh[[0,2]]
+        elif rank == 3:
+            sub_m = msh[[1,3]]
+        sub_m.zipCoords()
+        # Receiving side must prepare an empty field that will be filled by DEC:
+        fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
+        da = DataArrayDouble(sub_m.getNumberOfCells())
+        fld.setArray(da)
+        fld.setName("tgt_F")
+        fld.setMesh(sub_m)
+        return sub_m, fld
+
+    @WriteInTmpDir
+    def testInterpKernelDEC_2D_py_1(self):
+        """ This test illustrates a basic use of the InterpKernelDEC.
+        Look at the C++ documentation of the class for more informations.
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+
+        # Define two processor groups
+        nproc_source = 2
+        procs_source = list(range(nproc_source))
+        procs_target = list(range(size - nproc_source, size))
+
+        interface = CommInterface()
+        source_group = MPIProcessorGroup(interface, procs_source)
+        target_group = MPIProcessorGroup(interface, procs_target)
+        idec = InterpKernelDEC(source_group, target_group)
+
+        # Write out full size meshes/fields for inspection
+        if rank == 0:
+            _, fld = self.generateFullSource()
+            mshT = self.generateFullTarget()
+            WriteField("./source_field_FULL.med", fld, True)
+            WriteUMesh("./target_mesh_FULL.med", mshT, True)
+
+        MPI.COMM_WORLD.Barrier()  # really necessary??
+
+        #
+        # OK, let's go DEC !!
+        #
+        if source_group.containsMyRank():
+            _, fieldS = self.getPartialSource(rank)
+            fieldS.setNature(IntensiveMaximum)   # The only policy supported for now ...
+            WriteField("./source_field_part_%d.med" % rank, fieldS, True)
+            idec.attachLocalField(fieldS)
+            idec.synchronize()
+            idec.sendData()
+
+        if target_group.containsMyRank():
+            mshT, fieldT = self.getPartialTarget(rank)
+            fieldT.setNature(IntensiveMaximum)
+            WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
+            idec.attachLocalField(fieldT)
+            idec.synchronize()
+            idec.recvData()
+            # Now the actual checks:
+            if rank == 2:
+                self.assertEqual(fieldT.getArray().getValues(), [1.0, 9.0])
+            elif rank == 3:
+                self.assertEqual(fieldT.getArray().getValues(), [5.0, 13.0])
+
+        # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
+        idec.release()
+        source_group.release()
+        target_group.release()
+        MPI.COMM_WORLD.Barrier()
+
+    @WriteInTmpDir
+    def test_InterpKernelDEC_2D_py_2(self):
+        """ More involved test using Para* objects.
+        """
         size = MPI.COMM_WORLD.size
         rank = MPI.COMM_WORLD.rank
         if size != 5:
-            raise RuntimeError("Expect MPI_COMM_WORLD size == 5")
+            print("Should be run on 5 procs!")
+            return
+
         print(rank)
         nproc_source = 3
         procs_source = list(range(nproc_source))
@@ -43,20 +162,12 @@ class ParaMEDMEMBasicsTest(unittest.TestCase):
         source_group = MPIProcessorGroup(interface, procs_source)
         dec = InterpKernelDEC(source_group, target_group)
 
-        mesh       =0
-        support    =0
-        paramesh   =0
-        parafield  =0
-        icocofield =0
-        data_dir = os.environ['MEDCOUPLING_ROOT_DIR']
-        tmp_dir  = os.environ['TMP']
-
-        if not tmp_dir or len(tmp_dir)==0:
-            tmp_dir = "/tmp"
-            pass
+        data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med")
+        if not os.path.isdir(data_dir):
+            data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1]
 
-        filename_xml1 = os.path.join(data_dir, "share/resources/med/square1_split")
-        filename_xml2 = os.path.join(data_dir, "share/resources/med/square2_split")
+        filename_xml1 = os.path.join(data_dir, "square1_split")
+        filename_xml2 = os.path.join(data_dir, "square2_split")
 
         MPI.COMM_WORLD.Barrier()
         if source_group.containsMyRank():
@@ -104,18 +215,16 @@ class ParaMEDMEMBasicsTest(unittest.TestCase):
             dec.sendData()
             pass
         ## end
-        interface = 0
-        target_group = 0
-        source_group = 0
-        dec = 0
-        mesh       =0
-        support    =0
-        paramesh   =0
-        parafield  =0
-        icocofield =0
+
+        # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
+        parafield.release()
+        paramesh.release()
+        dec.release()
+        target_group.release()
+        source_group.release()
         MPI.COMM_WORLD.Barrier()
-        MPI.Finalize()
-        pass
-    pass
 
-unittest.main()
+if __name__ == "__main__":
+    unittest.main()
+    MPI.Finalize()
+
index 3d929d625c2819ad62fff37ed49ae8d1d2a63dd8..a4b0e7da19bc862532499248bdacbc87ea9c61c3 100755 (executable)
 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
 #
 
-from ParaMEDMEM import *
-import sys, os
+from medcoupling import *
+from mpi4py import MPI
+import unittest
+import os
 
-MPI_Init(sys.argv)
+class ParaMEDMEM_DEC_Tests(unittest.TestCase):
+    def test_NonCoincidentDEC_py(self):
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
 
-size = MPI_Comm_size(MPI_COMM_WORLD)
-rank = MPI_Comm_rank(MPI_COMM_WORLD)
-if size != 5:
-    raise RuntimeError("Expect MPI_COMM_WORLD size == 5")
+        if size != 5:
+            raise RuntimeError("Expect MPI.MPI_COMM_WORLD size == 5")
 
-nproc_source = 3
-procs_source = list(range(nproc_source))
-procs_target = list(range(size - nproc_source + 1, size))
+        nproc_source = 3
+        procs_source = list(range(nproc_source))
+        procs_target = list(range(size - nproc_source + 1, size))
 
-interface = CommInterface()
+        interface = CommInterface()
 
-target_group = MPIProcessorGroup(interface, procs_target)
-source_group = MPIProcessorGroup(interface, procs_source)
+        target_group = MPIProcessorGroup(interface, procs_target)
+        source_group = MPIProcessorGroup(interface, procs_source)
 
-source_mesh= 0
-target_mesh= 0
-parasupport= 0
-mesh       = 0
-support    = 0
-field      = 0
-paramesh   = 0
-parafield  = 0
-icocofield = 0
+        dec = NonCoincidentDEC(source_group, target_group)
 
-dec = NonCoincidentDEC(source_group, target_group)
+        data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med")
+        if not os.path.isdir(data_dir):
+            data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1]
+        tmp_dir  = os.environ.get('TMP', "")
+        if tmp_dir == '':
+            tmp_dir = "/tmp"
 
-data_dir = os.environ['MEDCOUPLING_ROOT_DIR']
-tmp_dir  = os.environ['TMP']
-if tmp_dir == '':
-    tmp_dir = "/tmp"
-    pass
+        filename_xml1 = os.path.join(data_dir, "square1_split")
+        filename_xml2 = os.path.join(data_dir, "square2_split")
 
-filename_xml1 = data_dir + "/share/resources/med/square1_split"
-filename_xml2 = data_dir + "/share/resources/med/square2_split"
+        MPI.COMM_WORLD.Barrier()
 
-MPI_Barrier(MPI_COMM_WORLD)
+        if source_group.containsMyRank():
+            filename = filename_xml1 + str(rank+1) + ".med"
+            meshname = "Mesh_2_" + str(rank+1)
 
-if source_group.containsMyRank():
+            mesh = MESH(MED_DRIVER, filename, meshname)
+            support = SUPPORT(mesh, "all elements", MED_CELL)
+            paramesh = ParaMESH(mesh, source_group, "source mesh")
 
-    filename = filename_xml1 + str(rank+1) + ".med"
-    meshname = "Mesh_2_" + str(rank+1)
+            parasupport = UnstructuredParaSUPPORT( support, source_group)
+            comptopo = ComponentTopology()
 
-    mesh = MESH(MED_DRIVER, filename, meshname)
-    support = SUPPORT(mesh, "all elements", MED_CELL)
-    paramesh = ParaMESH(mesh, source_group, "source mesh")
+            parafield = ParaFIELD(parasupport, comptopo)
 
-    parasupport = UnstructuredParaSUPPORT( support, source_group)
-    comptopo = ComponentTopology()
+            nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS);
 
-    parafield = ParaFIELD(parasupport, comptopo)
+            value = [1.0]*nb_local
 
-    nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS);
+            parafield.getField().setValue(value)
+            icocofield = ICoCo_MEDField(paramesh,parafield)
+            dec.attachLocalField(icocofield,'P0')
+            pass
 
-    value = [1.0]*nb_local
+        if target_group.containsMyRank():
+            filename = filename_xml2 + str(rank - nproc_source + 1) + ".med"
+            meshname = "Mesh_3_" + str(rank - nproc_source + 1)
 
-    parafield.getField().setValue(value)
-    icocofield = ICoCo_MEDField(paramesh,parafield)
-    dec.attachLocalField(icocofield,'P0')
-    pass
+            mesh = MESH(MED_DRIVER, filename, meshname)
+            support = SUPPORT(mesh, "all elements", MED_CELL)
+            paramesh = ParaMESH(mesh, target_group, "target mesh")
 
-if target_group.containsMyRank():
+            parasupport = UnstructuredParaSUPPORT( support, target_group)
+            comptopo = ComponentTopology()
+            parafield = ParaFIELD(parasupport, comptopo)
 
-    filename = filename_xml2 + str(rank - nproc_source + 1) + ".med"
-    meshname = "Mesh_3_" + str(rank - nproc_source + 1)
+            nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS)
+            value = [0.0]*nb_local
 
-    mesh = MESH(MED_DRIVER, filename, meshname)
-    support = SUPPORT(mesh, "all elements", MED_CELL)
-    paramesh = ParaMESH(mesh, target_group, "target mesh")
+            parafield.getField().setValue(value)
+            icocofield = ICoCo_MEDField(paramesh,parafield)
 
-    parasupport = UnstructuredParaSUPPORT( support, target_group)
-    comptopo = ComponentTopology()
-    parafield = ParaFIELD(parasupport, comptopo)
+            dec.attachLocalField(icocofield, 'P0')
+            pass
 
-    nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS)
-    value = [0.0]*nb_local
+        field_before_int = [0.0]
+        field_after_int = [0.0]
 
-    parafield.getField().setValue(value)
-    icocofield = ICoCo_MEDField(paramesh,parafield)
+        if source_group.containsMyRank():
+            field_before_int = [parafield.getVolumeIntegral(1)]
+            MPI.MPI_Bcast(field_before_int, 1, MPI.MPI_DOUBLE, 0, MPI.MPI_COMM_WORLD);
+            dec.synchronize()
+            print("DEC usage")
+            dec.setForcedRenormalization(False)
+            dec.sendData()
+            pass
 
-    dec.attachLocalField(icocofield, 'P0')
-    pass
+        if target_group.containsMyRank():
+            MPI.MPI_Bcast(field_before_int, 1, MPI.MPI_DOUBLE, 0, MPI.MPI_COMM_WORLD)
+            dec.synchronize()
+            dec.setForcedRenormalization(False)
+            dec.recvData()
+            field_after_int = [parafield.getVolumeIntegral(1)]
+            pass
 
-field_before_int = [0.0]
-field_after_int = [0.0]
+        MPI.MPI_Bcast(field_before_int, 1, MPI.MPI_DOUBLE, 0, MPI.MPI_COMM_WORLD)
+        MPI.MPI_Bcast(field_after_int , 1, MPI.MPI_DOUBLE, size-1, MPI.MPI_COMM_WORLD)
 
-if source_group.containsMyRank():
+        epsilon = 1e-6
+        self.assertDoubleEquals(field_before_int[0], field_after_int[0], epsilon)
 
-    field_before_int = [parafield.getVolumeIntegral(1)]
-    MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
-    dec.synchronize()
-    print("DEC usage")
-    dec.setForcedRenormalization(False)
+        # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
+        dec.release()
+        target_group.release()
+        source_group.release()
 
-    dec.sendData()
-    pass
+        MPI.COMM_WORLD.Barrier()
+        MPI.Finalize()
 
-if target_group.containsMyRank():
+if __name__ == "__main__":
+    unittest.main()
 
-    MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD)
-    dec.synchronize()
-    dec.setForcedRenormalization(False)
-    dec.recvData()
-    field_after_int = [parafield.getVolumeIntegral(1)]
-    pass
-
-MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD)
-MPI_Bcast(field_after_int , 1, MPI_DOUBLE, size-1, MPI_COMM_WORLD)
-
-epsilon = 1e-6
-if abs(field_before_int[0] - field_after_int[0]) > epsilon:
-    print("Field before is not equal field after: %s != %s"%\
-          (field_before_int[0],field_after_int[0]))
-    pass
-
-
-MPI_Barrier(MPI_COMM_WORLD)
-MPI_Finalize()
-print("# End of testNonCoincidentDEC")
diff --git a/src/ParaMEDMEM_Swig/test_OverlapDEC.py b/src/ParaMEDMEM_Swig/test_OverlapDEC.py
new file mode 100644 (file)
index 0000000..eb7d1d6
--- /dev/null
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+
+from medcoupling import *
+from ParaMEDMEMTestTools import WriteInTmpDir
+import sys, os
+import unittest
+import math
+from mpi4py import MPI
+
+class ParaMEDMEM_O_DEC_Tests(unittest.TestCase):
+    """ This test illustrates a basic use of the OverlapDEC and shows notably that not all 
+    processors must possess a piece of the source and/or target mesh. 
+    Look at the C++ documentation of the class for more informations.
+    In this case, the source mesh is only stored on 2 procs, whereas the target is on 4.
+    Since only a single group of processor is defined in the setup, the 2 idle procs on the source side are just providing an empty mesh,
+    thus indicating that they don't participate in the source definition. 
+    
+    Main method is testOverlapDEC_2D_py_1()
+    """
+
+    def generateFullSource(self):
+        """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """
+        msh  = self.generateFullTarget()
+        msh.simplexize(0)
+        msh.setName("src_mesh")
+        fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
+        fld.setMesh(msh); fld.setName("source_F");
+        da = DataArrayDouble(msh.getNumberOfCells())
+        da.iota()
+        da *= 2
+        fld.setArray(da)
+        return msh, fld
+
+    def generateFullTarget(self):
+        """ The complete target mesh: 4 squares """
+        m1 = MEDCouplingCMesh("tgt_msh")
+        da = DataArrayDouble([0,1,2])
+        m1.setCoords(da, da)
+        msh = m1.buildUnstructured()
+        return msh
+
+    #
+    # Below, the two functions emulating the set up of a piece of the source and target mesh
+    # on each proc. Obviously in real world problems, this comes from your code and is certainly
+    # not computed by cuting again from scratch the full-size mesh!!
+    #
+    def getPartialSource(self, rank):
+        """ Will return an empty mesh piece for rank=2 and 3 """
+        msh, f = self.generateFullSource()
+        if rank in [2,3]:
+            sub_m, sub_f = msh[[]], f[[]]  # Little trick to select nothing in the mesh, thus producing an empty mesh
+        elif rank == 0:
+            sub_m, sub_f = msh[0:4], f[0:4]
+        elif rank == 1:
+            sub_m, sub_f = msh[4:8], f[4:8]
+        sub_m.zipCoords()
+        return sub_m, sub_f
+
+    def getPartialTarget(self, rank):
+        """ One square for each rank """
+        msh = self.generateFullTarget()
+        sub_m = msh[rank]
+        sub_m.zipCoords()
+        # Receiving side must prepare an empty field that will be filled by DEC:
+        fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
+        da = DataArrayDouble(sub_m.getNumberOfCells())
+        fld.setArray(da)
+        fld.setName("tgt_F")
+        fld.setMesh(sub_m)
+        return sub_m, fld
+
+    @WriteInTmpDir
+    def testOverlapDEC_2D_py_1(self):
+        """ The main method of the test """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 4:
+            raise RuntimeError("Should be run on 4 procs!")
+
+        # Define (single) processor group - note the difference with InterpKernelDEC which needs two groups.
+        proc_group = list(range(size))   # No need for ProcessorGroup object here.
+        odec = OverlapDEC(proc_group)
+
+        # Write out full size meshes/fields for inspection
+        if rank == 0:
+            _, fld = self.generateFullSource()
+            mshT = self.generateFullTarget()
+            WriteField("./source_field_FULL.med", fld, True)
+            WriteUMesh("./target_mesh_FULL.med", mshT, True)
+
+        MPI.COMM_WORLD.Barrier()  # really necessary??
+
+        #
+        # OK, let's go DEC !!
+        #
+        _, fieldS = self.getPartialSource(rank)
+        fieldS.setNature(IntensiveMaximum)   # The only policy supported for now ...
+        mshT, fieldT = self.getPartialTarget(rank)
+        fieldT.setNature(IntensiveMaximum)
+        if rank not in [2,3]:
+            WriteField("./source_field_part_%d.med" % rank, fieldS, True)
+        WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
+
+        odec.attachSourceLocalField(fieldS)
+        odec.attachTargetLocalField(fieldT)
+        odec.synchronize()
+        odec.sendRecvData()
+
+        # Now the actual checks:
+        if rank == 0:
+            self.assertEqual(fieldT.getArray().getValues(), [1.0])
+        elif rank == 1:
+            self.assertEqual(fieldT.getArray().getValues(), [5.0])
+        elif rank == 2:
+            self.assertEqual(fieldT.getArray().getValues(), [9.0])
+        elif rank == 3:
+            self.assertEqual(fieldT.getArray().getValues(), [13.0])
+
+        # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
+        odec.release()
+
+        MPI.COMM_WORLD.Barrier()
+
+if __name__ == "__main__":
+    unittest.main()
+    MPI.Finalize()
index f110cae6f6e7b1b919f1ac79aab80ffb86d0200a..11c5b7a16636cbdd96c4ab3cf496df90550ad225 100755 (executable)
 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
 #
 
-from ParaMEDMEM import *
-import sys, os
+from medcoupling import *
+from ParaMEDMEMTestTools import WriteInTmpDir
+import os
 import unittest
 import math
+from mpi4py import MPI
+
+
+class ParaMEDMEM_SC_DEC_Tests(unittest.TestCase):
+    """ See test_StructuredCoincidentDEC_py_1() for a quick start.
+    """
+
+    def generateFullMeshField(self):
+        """ The complete mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) 
+        Note that in this case, this is the **only** mesh for the whole problem.
+        """
+        m1 = MEDCouplingCMesh("tgt_msh")
+        da = DataArrayDouble([0,1,2])
+        m1.setCoords(da, da)
+        msh = m1.buildUnstructured()
+
+        msh.simplexize(0)
+        msh.setName("src_mesh")
+        fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
+        fld.setMesh(msh); fld.setName("source_F");
+        da = DataArrayDouble(msh.getNumberOfCells())
+        da.iota()
+        da *= 2
+        fld.setArray(da)
+        return msh, fld
+
+    #
+    # Below, the function emulating the set up of a piece of the mesh being owned by
+    # a given processor.
+    #
+    def getPartialSource(self, rank):
+        msh, f = self.generateFullMeshField()
+        if rank == 0:
+            sub_ids = [0,1,4,5]
+        elif rank == 1:
+            sub_ids = [2,3,6,7]
+        sub_m, sub_f = msh[sub_ids], f[sub_ids]
+        sub_m.zipCoords()
+        return sub_m, sub_f
+
+    def getPartialTarget(self, rank):
+        msh, f = self.generateFullMeshField()
+        if rank == 2:
+            sub_ids = [0,1,2,3]
+        elif rank == 3:
+            sub_ids = [4,5,6,7]
+        sub_m, sub_f = msh[sub_ids], f[sub_ids]
+        sub_m.zipCoords()
+        return sub_m, sub_f
+
+    @WriteInTmpDir
+    def test_StructuredCoincidentDEC_py_1(self):
+        """ This test illustrates a basic use of the StructuredCoincidentDEC which allows to
+        resdistribute a field/mesh which is already scattered on several processors into a different configuration.
+        Look at the C++ documentation of the class for more informations. 
+        Note that in the case of the StructuredCoincidentDEC no interpolation whatsoever is performed. This is only
+        really a redistribution of the data among the processors.
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+
+        # Define two processor groups
+        nproc_source = 2
+        procs_source = list(range(nproc_source))
+        procs_target = list(range(size - nproc_source, size))
+
+        interface = CommInterface()
+        source_group = MPIProcessorGroup(interface, procs_source)
+        target_group = MPIProcessorGroup(interface, procs_target)
+
+        scdec = StructuredCoincidentDEC(source_group, target_group)
+
+        # Write out full size meshes/fields for inspection
+        if rank == 0:
+            _, fld = self.generateFullMeshField()
+            WriteField("./source_field_FULL.med", fld, True)
 
-class ParaMEDMEMBasicsTest2(unittest.TestCase):
-    def testStructuredCoincidentDEC(self):
-        MPI_Init(sys.argv)
         #
-        size = MPI_Comm_size(MPI_COMM_WORLD)
-        rank = MPI_Comm_rank(MPI_COMM_WORLD)
+        # OK, let's go DEC !!
         #
+        if source_group.containsMyRank():
+            _, fieldS = self.getPartialSource(rank)
+            fieldS.setNature(IntensiveMaximum)   # The only policy supported for now ...
+            WriteField("./source_field_part_%d.med" % rank, fieldS, True)
+            scdec.attachLocalField(fieldS)
+            scdec.synchronize()
+            scdec.sendData()
+
+        if target_group.containsMyRank():
+            mshT, fieldT = self.getPartialTarget(rank)
+            fieldT.setNature(IntensiveMaximum)
+            WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
+            scdec.attachLocalField(fieldT)
+            scdec.synchronize()
+            scdec.recvData()
+            # Now the actual checks:
+            if rank == 2:
+                self.assertEqual(fieldT.getArray().getValues(), [0.0, 2.0, 8.0, 10.0])
+            elif rank == 3:
+                self.assertEqual(fieldT.getArray().getValues(), [4.0, 6.0, 12.0, 14.0])
+
+        # Release DEC (this involves MPI exchanges -- so better be done before MPI.Finalize()
+        scdec.release()
+        source_group.release()
+        target_group.release()
+
+        MPI.COMM_WORLD.Barrier()
+
+    @WriteInTmpDir
+    def test_StructuredCoincidentDEC_py_2(self):
+        """ More involved tests using Para* objects ...
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+
         if size < 4:
-            raise RuntimeError("Expect MPI_COMM_WORLD size >= 4")
-        #
+            print("Should be run on >= 4 procs!")
+            return
+
         interface = CommInterface()
-        #
-        self_group   = MPIProcessorGroup(interface, rank, rank)
-        target_group = MPIProcessorGroup(interface, 3, size-1)
+
         source_group = MPIProcessorGroup(interface, 0, 2)
-        #
-        mesh      = 0
-        support   = 0
-        paramesh  = 0
-        parafield = 0
-        comptopo  = 0
-        icocofield= 0
-        #
-        data_dir = os.environ['MEDCOUPLING_ROOT_DIR']
-        tmp_dir  = os.environ['TMP']
+        target_group = MPIProcessorGroup(interface, 3, size-1)
+        self_group = MPIProcessorGroup(interface, rank, rank)
+
+        data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med")
+        if not os.path.isdir(data_dir):
+            data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1]
+        tmp_dir  = os.environ.get('TMP', "")
         if tmp_dir == '':
             tmp_dir = "/tmp"
-            pass
 
-        filename_xml1    = data_dir + "/share/resources/med/square1_split"
-        filename_2       = data_dir + "/share/resources/med/square1.med"
-        filename_seq_wr  = tmp_dir + "/"
-        filename_seq_med = tmp_dir + "/myWrField_seq_pointe221.med"
+        filename_xml1 = os.path.join(data_dir, "square1_split")
+        filename_2 = os.path.join(data_dir, "square1.med")
+        filename_seq_wr  = "."
+        filename_seq_med = os.path.join(".", "myWrField_seq_pointe221.med")
 
         dec = StructuredCoincidentDEC(source_group, target_group)
-        MPI_Barrier(MPI_COMM_WORLD)
+
+        MPI.COMM_WORLD.Barrier()
         if source_group.containsMyRank():
             filename = filename_xml1 + str(rank+1) + ".med"
             meshname = "Mesh_2_" + str(rank+1)
-            mesh=ReadUMeshFromFile(filename,meshname,0)
+            mesh = ReadUMeshFromFile(filename,meshname,0)
             paramesh=ParaMESH(mesh,source_group,"source mesh")
             comptopo=ComponentTopology(6)
             parafield=ParaFIELD(ON_CELLS,NO_TIME,paramesh,comptopo)
@@ -74,27 +181,24 @@ class ParaMEDMEMBasicsTest2(unittest.TestCase):
             for ielem in range(nb_local):
                 for icomp in range(6):
                     value.append(global_numbering[ielem]*6.0+icomp);
-                    pass
-                pass
             parafield.getField().setValues(value)
-            icocofield = ICoCoMEDField(mesh,parafield.getField())
+            icocofield = ICoCoMEDField(parafield.getField())
             dec.setMethod("P0")
             dec.attachLocalField(parafield)
             dec.synchronize()
             dec.sendData()
-            pass
 
         if target_group.containsMyRank():
             meshname2 = "Mesh_2"
-            mesh=ReadUMeshFromFile(filename_2, meshname2,0)
-            paramesh=ParaMESH(mesh, self_group, "target mesh")
-            comptopo=ComponentTopology(6,target_group)
-            parafield=ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
+            mesh = ReadUMeshFromFile(filename_2, meshname2,0)
+            paramesh = ParaMESH(mesh, self_group, "target mesh")
+            comptopo = ComponentTopology(6,target_group)
+            parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
             parafield.getField().setNature(IntensiveMaximum)
             nb_local=mesh.getNumberOfCells()
             value = [0.0]*(nb_local*comptopo.nbLocalComponents())
             parafield.getField().setValues(value)
-            icocofield = ICoCoMEDField(mesh,parafield.getField())
+            icocofield = ICoCoMEDField(parafield.getField())
             dec.setMethod("P0")
             dec.attachLocalField(parafield)
             dec.synchronize()
@@ -103,26 +207,18 @@ class ParaMEDMEMBasicsTest2(unittest.TestCase):
             for i in range(nb_local):
                 first=comptopo.firstLocalComponent()
                 for icomp in range(comptopo.nbLocalComponents()):
-                    self.assertTrue(math.fabs(recv_value[i*comptopo.nbLocalComponents()+icomp]-
-                                              (float)(i*6+icomp+first))<1e-12)
-                    pass
-                pass
-            pass
-        comptopo=0
-        interface = 0
-        mesh       =0
-        support    =0
-        paramesh   =0
-        parafield  =0
-        icocofield =0
-        dec=0
-        self_group =0
-        target_group = 0
-        source_group = 0
-        MPI_Barrier(MPI_COMM_WORLD)
-        MPI_Finalize()
-        print("End of test StructuredCoincidentDEC")
-        pass
-
-
-unittest.main()
+                    self.assertTrue(math.fabs(recv_value[i*comptopo.nbLocalComponents()+icomp]-(float)(i*6+icomp+first))<1e-12)
+
+        # Release DEC (this involves MPI exchanges -- so better be done before MPI.Finalize()
+        parafield.release()
+        paramesh.release()
+        dec.release()
+        target_group.release()
+        source_group.release()
+        self_group.release()
+
+        MPI.COMM_WORLD.Barrier()
+
+if __name__ == "__main__":
+    unittest.main()
+    MPI.Finalize()