From f8a705ad74272c8921e72fa06fd726e4dcf9b0a4 Mon Sep 17 00:00:00 2001 From: abn Date: Fri, 12 Feb 2021 21:56:46 +0100 Subject: [PATCH] [ParaMEDMEM] Adding SWIG interface for OverlapDEC + py tests of DEC + added 'release()' method for some classes (gives a chance to Python wrapping to call MPI clean up operations without having to wait for destructor) + added Python tests --- src/ParaMEDMEM/BlockTopology.cxx | 19 +- src/ParaMEDMEM/BlockTopology.hxx | 2 + src/ParaMEDMEM/DisjointDEC.cxx | 9 +- src/ParaMEDMEM/DisjointDEC.hxx | 6 +- src/ParaMEDMEM/ExplicitTopology.hxx | 2 +- src/ParaMEDMEM/InterpKernelDEC.cxx | 12 +- src/ParaMEDMEM/InterpKernelDEC.hxx | 2 + src/ParaMEDMEM/MPIProcessorGroup.cxx | 14 +- src/ParaMEDMEM/MPIProcessorGroup.hxx | 2 + src/ParaMEDMEM/OverlapDEC.cxx | 26 ++- src/ParaMEDMEM/OverlapDEC.hxx | 2 + src/ParaMEDMEM/OverlapInterpolationMatrix.cxx | 2 +- src/ParaMEDMEM/ParaFIELD.cxx | 20 +- src/ParaMEDMEM/ParaFIELD.hxx | 1 + src/ParaMEDMEM/ParaMESH.cxx | 10 + src/ParaMEDMEM/ParaMESH.hxx | 4 +- src/ParaMEDMEM/StructuredCoincidentDEC.cxx | 70 ++++-- src/ParaMEDMEM/StructuredCoincidentDEC.hxx | 6 + src/ParaMEDMEM_Swig/CMakeLists.txt | 47 +++- src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i | 2 + src/ParaMEDMEM_Swig/ParaMEDMEMTestTools.py | 31 +++ src/ParaMEDMEM_Swig/test_InterpKernelDEC.py | 171 +++++++++++--- src/ParaMEDMEM_Swig/test_NonCoincidentDEC.py | 177 +++++++------- src/ParaMEDMEM_Swig/test_OverlapDEC.py | 126 ++++++++++ .../test_StructuredCoincidentDEC.py | 218 +++++++++++++----- 25 files changed, 749 insertions(+), 232 deletions(-) create mode 100644 src/ParaMEDMEM_Swig/ParaMEDMEMTestTools.py create mode 100644 src/ParaMEDMEM_Swig/test_OverlapDEC.py diff --git a/src/ParaMEDMEM/BlockTopology.cxx b/src/ParaMEDMEM/BlockTopology.cxx index 82831b74f..e139bfa5e 100644 --- a/src/ParaMEDMEM/BlockTopology.cxx +++ b/src/ParaMEDMEM/BlockTopology.cxx @@ -41,7 +41,7 @@ namespace MEDCoupling BlockTopology::BlockTopology() : _dimension(0), _nb_procs_per_dim(0), _local_array_indices(0), _cycle_type(0), - _proc_group(NULL),_nb_elems(0), + _proc_group(nullptr),_nb_elems(0), _owns_processor_group(false) {} @@ -168,9 +168,18 @@ namespace MEDCoupling } BlockTopology::~BlockTopology() + { + release(); + } + + /** Destructor involves MPI operations: make sure this is accessible from a proper + * method for Python wrapping. + */ + void BlockTopology::release() { if (_owns_processor_group) delete _proc_group; + _proc_group = nullptr; } //!converts a pair to a global number @@ -317,11 +326,8 @@ namespace MEDCoupling void BlockTopology::unserialize(const mcIdType* serializer,const CommInterface& comm_interface) { const mcIdType* ptr_serializer=serializer; - cout << "unserialize..."<commFree(&_union_comm); + _union_comm = MPI_COMM_NULL; } /** diff --git a/src/ParaMEDMEM/DisjointDEC.hxx b/src/ParaMEDMEM/DisjointDEC.hxx index 289d618dd..8f7e73ba3 100644 --- a/src/ParaMEDMEM/DisjointDEC.hxx +++ b/src/ParaMEDMEM/DisjointDEC.hxx @@ -79,18 +79,20 @@ namespace MEDCoupling DisjointDEC &operator=(const DisjointDEC& s); DisjointDEC(const std::set& src_ids, const std::set& trg_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD); + virtual ~DisjointDEC(); + void setNature(NatureOfField nature); void attachLocalField( MEDCouplingFieldDouble *field); void attachLocalField(const ParaFIELD *field, bool ownPt=false); void attachLocalField(const ICoCo::MEDField *field); - + virtual void prepareSourceDE() = 0; virtual void prepareTargetDE() = 0; virtual void recvData() = 0; virtual void sendData() = 0; void sendRecvData(bool way=true); virtual void synchronize() = 0; - virtual ~DisjointDEC(); + virtual void computeProcGroup() { } void renormalizeTargetField(bool isWAbs); // diff --git a/src/ParaMEDMEM/ExplicitTopology.hxx b/src/ParaMEDMEM/ExplicitTopology.hxx index d815f3d71..ef8b7deea 100644 --- a/src/ParaMEDMEM/ExplicitTopology.hxx +++ b/src/ParaMEDMEM/ExplicitTopology.hxx @@ -47,7 +47,7 @@ namespace MEDCoupling ExplicitTopology( const ExplicitTopology& topo, int nbcomponents); ExplicitTopology(const ParaMESH &mesh); virtual ~ExplicitTopology(); - + inline mcIdType getNbElements()const; inline mcIdType getNbLocalElements() const; const ProcessorGroup* getProcGroup()const { return _proc_group; } diff --git a/src/ParaMEDMEM/InterpKernelDEC.cxx b/src/ParaMEDMEM/InterpKernelDEC.cxx index d379bf206..16a26546e 100644 --- a/src/ParaMEDMEM/InterpKernelDEC.cxx +++ b/src/ParaMEDMEM/InterpKernelDEC.cxx @@ -71,9 +71,17 @@ namespace MEDCoupling InterpKernelDEC::~InterpKernelDEC() { - if (_interpolation_matrix !=0) + release(); + } + + void InterpKernelDEC::release() + { + if (_interpolation_matrix != nullptr) delete _interpolation_matrix; - } + _interpolation_matrix = nullptr; + DisjointDEC::cleanInstance(); + } + /*! \brief Synchronization process for exchanging topologies. diff --git a/src/ParaMEDMEM/InterpKernelDEC.hxx b/src/ParaMEDMEM/InterpKernelDEC.hxx index b92a663c8..f68477502 100644 --- a/src/ParaMEDMEM/InterpKernelDEC.hxx +++ b/src/ParaMEDMEM/InterpKernelDEC.hxx @@ -133,6 +133,8 @@ namespace MEDCoupling InterpKernelDEC(const std::set& src_ids, const std::set& trg_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD); virtual ~InterpKernelDEC(); + void release(); + void synchronize(); void recvData(); void recvData(double time); diff --git a/src/ParaMEDMEM/MPIProcessorGroup.cxx b/src/ParaMEDMEM/MPIProcessorGroup.cxx index e800181c5..aeb68c167 100644 --- a/src/ParaMEDMEM/MPIProcessorGroup.cxx +++ b/src/ParaMEDMEM/MPIProcessorGroup.cxx @@ -181,10 +181,20 @@ namespace MEDCoupling MPIProcessorGroup::~MPIProcessorGroup() { - _comm_interface.groupFree(&_group); + release(); + } + + /** Destructor involves MPI operations: make sure this is accessible from a proper + * method for Python wrapping. + */ + void MPIProcessorGroup::release() + { + if (_group != MPI_GROUP_EMPTY) + _comm_interface.groupFree(&_group); + _group = MPI_GROUP_EMPTY; if (_comm!=_world_comm && _comm !=MPI_COMM_NULL) _comm_interface.commFree(&_comm); - + _comm = MPI_COMM_NULL; } /*! Translation of the rank id between two processor groups. This method translates rank \a rank diff --git a/src/ParaMEDMEM/MPIProcessorGroup.hxx b/src/ParaMEDMEM/MPIProcessorGroup.hxx index 6feec7750..e73f21066 100644 --- a/src/ParaMEDMEM/MPIProcessorGroup.hxx +++ b/src/ParaMEDMEM/MPIProcessorGroup.hxx @@ -38,6 +38,8 @@ namespace MEDCoupling MPIProcessorGroup(const CommInterface& interface,int pstart, int pend, const MPI_Comm& world_comm=MPI_COMM_WORLD); MPIProcessorGroup(const MPIProcessorGroup& other); virtual ~MPIProcessorGroup(); + void release(); + virtual MPIProcessorGroup *deepCopy() const; virtual ProcessorGroup* fuse (const ProcessorGroup&) const; void intersect (ProcessorGroup&) { } diff --git a/src/ParaMEDMEM/OverlapDEC.cxx b/src/ParaMEDMEM/OverlapDEC.cxx index ae6a4f0cc..2be96a941 100644 --- a/src/ParaMEDMEM/OverlapDEC.cxx +++ b/src/ParaMEDMEM/OverlapDEC.cxx @@ -59,20 +59,40 @@ namespace MEDCoupling } OverlapDEC::~OverlapDEC() + { + release(); + } + + /** Destructor involves MPI operations: make sure this is accessible from a proper + * method for Python wrapping. + */ + void OverlapDEC::release() { if(_own_group) - delete _group; + { + delete _group; + _group = nullptr; + } if(_own_source_field) - delete _source_field; + { + delete _source_field; + _source_field = nullptr; + } if(_own_target_field) - delete _target_field; + { + delete _target_field; + _target_field = nullptr; + } delete _interpolation_matrix; + _interpolation_matrix = nullptr; delete _locator; + _locator = nullptr; if (_comm != MPI_COMM_NULL) { MEDCoupling::CommInterface comm; comm.commFree(&_comm); } + _comm = MPI_COMM_NULL; } void OverlapDEC::sendRecvData(bool way) diff --git a/src/ParaMEDMEM/OverlapDEC.hxx b/src/ParaMEDMEM/OverlapDEC.hxx index 942653f1c..ef0fe77a0 100644 --- a/src/ParaMEDMEM/OverlapDEC.hxx +++ b/src/ParaMEDMEM/OverlapDEC.hxx @@ -224,6 +224,8 @@ namespace MEDCoupling public: OverlapDEC(const std::set& procIds,const MPI_Comm& world_comm=MPI_COMM_WORLD); virtual ~OverlapDEC(); + void release(); + void sendRecvData(bool way=true); void sendData(); void recvData(); diff --git a/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx b/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx index 0b835c41b..17c2308cb 100644 --- a/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx +++ b/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx @@ -260,7 +260,7 @@ namespace MEDCoupling if(_target_field->getField()->getNature()==IntensiveMaximum) _mapping.computeDenoConservativeVolumic(_target_field->getField()->getNumberOfTuplesExpected()); else - throw INTERP_KERNEL::Exception("OverlapDEC: Policy not implemented yet: only IntensiveMaximum!"); + throw INTERP_KERNEL::Exception("OverlapDEC: Policy not set (did you call setNature()?) or not implemented yet: only IntensiveMaximum!"); // { // if(_target_field->getField()->getNature()==IntensiveConservation) // { diff --git a/src/ParaMEDMEM/ParaFIELD.cxx b/src/ParaMEDMEM/ParaFIELD.cxx index aa1cb94af..7957222ab 100644 --- a/src/ParaMEDMEM/ParaFIELD.cxx +++ b/src/ParaMEDMEM/ParaFIELD.cxx @@ -122,12 +122,28 @@ namespace MEDCoupling } ParaFIELD::~ParaFIELD() + { + release(); + } + + /** Destructor involves MPI operations: make sure this is accessible from a proper + * method for Python wrapping. + */ + void ParaFIELD::release() { if(_field) - _field->decrRef(); + { + _field->decrRef(); + _field = nullptr; + } + if(_own_support) - delete _support; + { + delete _support; + _support = nullptr; + } delete _topology; + _topology = nullptr; } void ParaFIELD::synchronizeTarget(ParaFIELD* source_field) diff --git a/src/ParaMEDMEM/ParaFIELD.hxx b/src/ParaMEDMEM/ParaFIELD.hxx index 1dba2f421..606b47975 100644 --- a/src/ParaMEDMEM/ParaFIELD.hxx +++ b/src/ParaMEDMEM/ParaFIELD.hxx @@ -39,6 +39,7 @@ namespace MEDCoupling ParaFIELD(TypeOfField type, TypeOfTimeDiscretization td, ParaMESH* mesh, const ComponentTopology& component_topology); ParaFIELD(MEDCouplingFieldDouble* field, ParaMESH *sup, const ProcessorGroup& group); virtual ~ParaFIELD(); + void release(); void synchronizeTarget( MEDCoupling::ParaFIELD* source_field); void synchronizeSource( MEDCoupling::ParaFIELD* target_field); diff --git a/src/ParaMEDMEM/ParaMESH.cxx b/src/ParaMEDMEM/ParaMESH.cxx index d4c1045f4..c9a251916 100644 --- a/src/ParaMEDMEM/ParaMESH.cxx +++ b/src/ParaMEDMEM/ParaMESH.cxx @@ -76,9 +76,19 @@ namespace MEDCoupling } ParaMESH::~ParaMESH() + { + release(); + } + + /** Destructor involves MPI operations: make sure this is accessible from a proper + * method for Python wrapping. + */ + void ParaMESH::release() { delete _block_topology; delete _explicit_topology; + _block_topology = nullptr; + _explicit_topology = nullptr; } } diff --git a/src/ParaMEDMEM/ParaMESH.hxx b/src/ParaMEDMEM/ParaMESH.hxx index 7955bd42e..534e53410 100644 --- a/src/ParaMEDMEM/ParaMESH.hxx +++ b/src/ParaMEDMEM/ParaMESH.hxx @@ -56,6 +56,8 @@ namespace MEDCoupling const ProcessorGroup& proc_group ) ; virtual ~ParaMESH(); + void release(); + void setNodeGlobal(DataArrayIdType *nodeGlobal); void setCellGlobal(DataArrayIdType *cellGlobal); Topology* getTopology() const { return _explicit_topology; } @@ -80,7 +82,7 @@ namespace MEDCoupling int _my_domain_id; //global topology of the cells - MEDCoupling::BlockTopology* _block_topology; + BlockTopology* _block_topology; Topology* _explicit_topology; // pointers to global numberings MCAuto _node_global; diff --git a/src/ParaMEDMEM/StructuredCoincidentDEC.cxx b/src/ParaMEDMEM/StructuredCoincidentDEC.cxx index 9408c267e..1a1bdd501 100644 --- a/src/ParaMEDMEM/StructuredCoincidentDEC.cxx +++ b/src/ParaMEDMEM/StructuredCoincidentDEC.cxx @@ -34,35 +34,57 @@ using namespace std; namespace MEDCoupling { - StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(0),_topo_target(0), - _send_counts(0),_recv_counts(0), - _send_displs(0),_recv_displs(0), - _recv_buffer(0),_send_buffer(0) + StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(nullptr),_topo_target(nullptr), + _owns_topo_source(false), _owns_topo_target(false), + _send_counts(nullptr),_recv_counts(nullptr), + _send_displs(nullptr),_recv_displs(nullptr), + _recv_buffer(nullptr),_send_buffer(nullptr) { } + StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group): + DisjointDEC(local_group,distant_group), + _topo_source(nullptr),_topo_target(nullptr), + _owns_topo_source(false), _owns_topo_target(false), + _send_counts(nullptr),_recv_counts(nullptr), + _send_displs(nullptr),_recv_displs(nullptr), + _recv_buffer(nullptr),_send_buffer(nullptr) + { + } StructuredCoincidentDEC::~StructuredCoincidentDEC() + { + release(); + } + + /** Destructor involves MPI operations: make sure this is accessible from a proper + * method for Python wrapping. + */ + void StructuredCoincidentDEC::release() { delete [] _send_buffer; delete [] _recv_buffer; - delete []_send_displs; + delete [] _send_displs; delete [] _recv_displs; delete [] _send_counts; delete [] _recv_counts; - if (! _source_group->containsMyRank()) + _send_buffer = nullptr; + _recv_buffer = nullptr; + _send_displs = nullptr; + _recv_displs = nullptr; + _send_counts = nullptr; + _recv_counts = nullptr; + + if (_owns_topo_source) delete _topo_source; - if(!_target_group->containsMyRank()) + if (_owns_topo_target) delete _topo_target; - } + _topo_source = nullptr; + _topo_target = nullptr; + _owns_topo_source = false; + _owns_topo_target = false; - StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group): - DisjointDEC(local_group,distant_group), - _topo_source(0),_topo_target(0), - _send_counts(0),_recv_counts(0), - _send_displs(0),_recv_displs(0), - _recv_buffer(0),_send_buffer(0) - { + DisjointDEC::cleanInstance(); } /*! Synchronization process for exchanging topologies @@ -71,16 +93,22 @@ namespace MEDCoupling { if (_source_group->containsMyRank()) _topo_source = dynamic_cast(_local_field->getTopology()); + else + _owns_topo_source = true; // _topo_source will be filled by broadcastTopology below if (_target_group->containsMyRank()) _topo_target = dynamic_cast(_local_field->getTopology()); + else + _owns_topo_target = true; // _topo_target will be filled by broadcastTopology below // Transmitting source topology to target code + MESSAGE ("Broadcast source topo ..."); broadcastTopology(_topo_source,1000); + // Transmitting target topology to source code + MESSAGE ("Broadcast target topo ..."); broadcastTopology(_topo_target,2000); if (_topo_source->getNbElements() != _topo_target->getNbElements()) throw INTERP_KERNEL::Exception("Incompatible dimensions for target and source topologies"); - } /*! Creates the arrays necessary for the data transfer @@ -229,8 +257,7 @@ namespace MEDCoupling MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface); - // The master proc creates a send buffer containing - // a serialized topology + // The master proc creates a send buffer containing a serialized topology int rank_master; if (topo!=0 && topo->getProcGroup()->myRank()==0) @@ -250,7 +277,7 @@ namespace MEDCoupling { MESSAGE(" rank "<myRank()<< " waiting ..."); _comm_interface->recv(&rank_master, 1,MPI_INT, MPI_ANY_SOURCE, tag+group->myRank(), *(group->getComm()),&status); - MESSAGE(" rank "<myRank()<< "received master rank"<myRank()<< " received master rank "<broadcast(&size, 1,MPI_ID_TYPE,rank_master,*(group->getComm())); @@ -260,9 +287,7 @@ namespace MEDCoupling copy(serializer, serializer+size, buffer); _comm_interface->broadcast(buffer,(int)size,MPI_ID_TYPE,rank_master,*(group->getComm())); - // Processors which did not possess the source topology - // unserialize it - + // Processors which did not possess the source topology unserialize it BlockTopology* topotemp=new BlockTopology(); topotemp->unserialize(buffer, *_comm_interface); @@ -360,5 +385,6 @@ namespace MEDCoupling synchronizeTopology(); prepareTargetDE(); } + MESSAGE ("sync OK"); } } diff --git a/src/ParaMEDMEM/StructuredCoincidentDEC.hxx b/src/ParaMEDMEM/StructuredCoincidentDEC.hxx index d90b2d651..9e8b00c9f 100644 --- a/src/ParaMEDMEM/StructuredCoincidentDEC.hxx +++ b/src/ParaMEDMEM/StructuredCoincidentDEC.hxx @@ -87,6 +87,8 @@ namespace MEDCoupling StructuredCoincidentDEC(); StructuredCoincidentDEC( ProcessorGroup& source, ProcessorGroup& target); virtual ~StructuredCoincidentDEC(); + void release(); + void synchronize(); void recvData(); void sendData(); @@ -99,6 +101,10 @@ namespace MEDCoupling BlockTopology* _topo_source; BlockTopology* _topo_target; + + bool _owns_topo_source; + bool _owns_topo_target; + int* _send_counts; int* _recv_counts; int* _send_displs; diff --git a/src/ParaMEDMEM_Swig/CMakeLists.txt b/src/ParaMEDMEM_Swig/CMakeLists.txt index 5a4649b08..71cc03a38 100644 --- a/src/ParaMEDMEM_Swig/CMakeLists.txt +++ b/src/ParaMEDMEM_Swig/CMakeLists.txt @@ -57,7 +57,7 @@ SET (SWIG_MODULE_ParaMEDMEM_EXTRA_DEPS ${medcoupling_HEADERS_HXX} ${medcoupling_HEADERS_TXX} ${interpkernel_HEADERS_HXX} ${interpkernel_HEADERS_TXX}) -IF(${CMAKE_VERSION} VERSION_LESS "3.8.0") +IF(${CMAKE_VERSION} VERSION_LESS "3.8.0") SWIG_ADD_MODULE(ParaMEDMEM python ParaMEDMEM.i) ELSE() SWIG_ADD_LIBRARY(ParaMEDMEM LANGUAGE python SOURCES ParaMEDMEM.i) @@ -66,9 +66,52 @@ ENDIF() SWIG_LINK_LIBRARIES(ParaMEDMEM ${PYTHON_LIBRARIES} paramedmem) SWIG_CHECK_GENERATION(ParaMEDMEM) +# +# Tests +# +SALOME_ACCUMULATE_ENVIRONMENT(PYTHONPATH NOCHECK ${CMAKE_CURRENT_BINARY_DIR}/../PyWrapping) +SALOME_ACCUMULATE_ENVIRONMENT(MED_RESOURCES_DIR NOCHECK ${CMAKE_SOURCE_DIR}/resources) +SALOME_GENERATE_TESTS_ENVIRONMENT(tests_env) + +# -- some tests require 2, 3, 4 or 5 procs -- +# MPICH does not support --oversubscribe: +IF(NOT ${MPIEXEC_EXECUTABLE} MATCHES "mpich") + SET(_oversub_opt "--oversubscribe") +ENDIF() + +ADD_TEST(NAME PyPara_Basics_Proc2 + COMMAND ${MPIEXEC} -np 2 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_BasicOperation.py) +SET_TESTS_PROPERTIES(PyPara_Basics_Proc2 PROPERTIES ENVIRONMENT "${tests_env}") + +ADD_TEST(NAME PyPara_InterpKernelDEC_Proc4 + COMMAND ${MPIEXEC} -np 4 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC.py) +SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}") +ADD_TEST(NAME PyPara_InterpKernelDEC_Proc5 + COMMAND ${MPIEXEC} -np 5 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC.py) +SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_Proc5 PROPERTIES ENVIRONMENT "${tests_env}") + +#ADD_TEST(NAME PyPara_NonCoincidentDEC_Proc5 +# COMMAND ${MPIEXEC} -np 5 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_NonCoincidentDEC.py) +#SET_TESTS_PROPERTIES(PyPara_NonCoincidentDEC_Proc5 PROPERTIES ENVIRONMENT "${tests_env}") + +ADD_TEST(NAME PyPara_StructuredCoincidentDEC_Proc4 + COMMAND ${MPIEXEC} -np 4 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_StructuredCoincidentDEC.py) +SET_TESTS_PROPERTIES(PyPara_StructuredCoincidentDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}") + +ADD_TEST(NAME PyPara_OverlapDEC_Proc4 + COMMAND ${MPIEXEC} -np 4 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_OverlapDEC.py) +SET_TESTS_PROPERTIES(PyPara_OverlapDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}") + +SET(_tst_scripts + test_InterpKernelDEC.py + test_NonCoincidentDEC.py + test_StructuredCoincidentDEC.py + test_OverlapDEC.py +) + INSTALL(TARGETS _ParaMEDMEM DESTINATION ${MEDCOUPLING_INSTALL_PYTHON}) INSTALL(FILES ParaMEDMEM.i ParaMEDMEMCommon.i DESTINATION ${MEDCOUPLING_INSTALL_HEADERS}) -INSTALL(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_StructuredCoincidentDEC.py DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON}) +INSTALL(FILES ${_tst_scripts} DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON}) SALOME_INSTALL_SCRIPTS(${CMAKE_CURRENT_BINARY_DIR}/ParaMEDMEM.py ${MEDCOUPLING_INSTALL_PYTHON} EXTRA_DPYS "${SWIG_MODULE_ParaMEDMEM_REAL_NAME}") INSTALL(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_StructuredCoincidentDEC.py DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON}) diff --git a/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i b/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i index 896f77469..d1537c9eb 100644 --- a/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i +++ b/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i @@ -30,6 +30,7 @@ #include "InterpKernelDEC.hxx" #include "NonCoincidentDEC.hxx" #include "StructuredCoincidentDEC.hxx" +#include "OverlapDEC.hxx" #include "ParaMESH.hxx" #include "ParaFIELD.hxx" #include "ICoCoMEDField.hxx" @@ -54,6 +55,7 @@ using namespace ICoCo; %include "DisjointDEC.hxx" %include "InterpKernelDEC.hxx" %include "StructuredCoincidentDEC.hxx" +%include "OverlapDEC.hxx" %include "ICoCoField.hxx" %rename(ICoCoMEDField) ICoCo::MEDField; diff --git a/src/ParaMEDMEM_Swig/ParaMEDMEMTestTools.py b/src/ParaMEDMEM_Swig/ParaMEDMEMTestTools.py new file mode 100644 index 000000000..a677a4d1c --- /dev/null +++ b/src/ParaMEDMEM_Swig/ParaMEDMEMTestTools.py @@ -0,0 +1,31 @@ +# -*- coding: iso-8859-1 -*- +# Copyright (C) 2007-2020 CEA/DEN, EDF R&D +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# +# Author : Anthony Geay (EDF R&D) + +def WriteInTmpDir(func): + def decaratedFunc(*args,**kwargs): + import tempfile,os + ret = None + with tempfile.TemporaryDirectory() as tmpdirname: + os.chdir(tmpdirname) + ret = func(*args,**kwargs) + pass + return ret + return decaratedFunc diff --git a/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py b/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py index 0e473a6e8..be4c1346a 100755 --- a/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py +++ b/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py @@ -19,20 +19,139 @@ # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com # -from ParaMEDMEM import * -from MEDLoader import ReadUMeshFromFile +from medcoupling import * +from ParaMEDMEMTestTools import WriteInTmpDir import sys, os import unittest import math from mpi4py import MPI -class ParaMEDMEMBasicsTest(unittest.TestCase): - def testInterpKernelDEC_2D(self): +class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase): + """ See test_StructuredCoincidentDEC_py_1() for a quick start. + """ + def generateFullSource(self): + """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """ + msh = self.generateFullTarget() + msh.simplexize(0) + msh.setName("src_mesh") + fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME) + fld.setMesh(msh); fld.setName("source_F"); + da = DataArrayDouble(msh.getNumberOfCells()) + da.iota() + da *= 2 + fld.setArray(da) + return msh, fld + + def generateFullTarget(self): + """ The complete target mesh: 4 squares """ + m1 = MEDCouplingCMesh("tgt_msh") + da = DataArrayDouble([0,1,2]) + m1.setCoords(da, da) + msh = m1.buildUnstructured() + return msh + + # + # Below, the two functions emulating the set up of a piece of the source and target mesh + # on each proc. Obviously in real world problems, this comes from your code and is certainly + # not computed by cuting again from scratch the full-size mesh!! + # + def getPartialSource(self, rank): + """ Will return an empty mesh piece for rank=2 and 3 """ + msh, f = self.generateFullSource() + if rank == 0: + sub_m, sub_f = msh[0:4], f[0:4] + elif rank == 1: + sub_m, sub_f = msh[4:8], f[4:8] + sub_m.zipCoords() + return sub_m, sub_f + + def getPartialTarget(self, rank): + """ One square for each rank """ + msh = self.generateFullTarget() + if rank == 2: + sub_m = msh[[0,2]] + elif rank == 3: + sub_m = msh[[1,3]] + sub_m.zipCoords() + # Receiving side must prepare an empty field that will be filled by DEC: + fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME) + da = DataArrayDouble(sub_m.getNumberOfCells()) + fld.setArray(da) + fld.setName("tgt_F") + fld.setMesh(sub_m) + return sub_m, fld + + @WriteInTmpDir + def testInterpKernelDEC_2D_py_1(self): + """ This test illustrates a basic use of the InterpKernelDEC. + Look at the C++ documentation of the class for more informations. + """ + size = MPI.COMM_WORLD.size + rank = MPI.COMM_WORLD.rank + if size != 4: + print("Should be run on 4 procs!") + return + + # Define two processor groups + nproc_source = 2 + procs_source = list(range(nproc_source)) + procs_target = list(range(size - nproc_source, size)) + + interface = CommInterface() + source_group = MPIProcessorGroup(interface, procs_source) + target_group = MPIProcessorGroup(interface, procs_target) + idec = InterpKernelDEC(source_group, target_group) + + # Write out full size meshes/fields for inspection + if rank == 0: + _, fld = self.generateFullSource() + mshT = self.generateFullTarget() + WriteField("./source_field_FULL.med", fld, True) + WriteUMesh("./target_mesh_FULL.med", mshT, True) + + MPI.COMM_WORLD.Barrier() # really necessary?? + + # + # OK, let's go DEC !! + # + if source_group.containsMyRank(): + _, fieldS = self.getPartialSource(rank) + fieldS.setNature(IntensiveMaximum) # The only policy supported for now ... + WriteField("./source_field_part_%d.med" % rank, fieldS, True) + idec.attachLocalField(fieldS) + idec.synchronize() + idec.sendData() + + if target_group.containsMyRank(): + mshT, fieldT = self.getPartialTarget(rank) + fieldT.setNature(IntensiveMaximum) + WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True) + idec.attachLocalField(fieldT) + idec.synchronize() + idec.recvData() + # Now the actual checks: + if rank == 2: + self.assertEqual(fieldT.getArray().getValues(), [1.0, 9.0]) + elif rank == 3: + self.assertEqual(fieldT.getArray().getValues(), [5.0, 13.0]) + + # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize() + idec.release() + source_group.release() + target_group.release() + MPI.COMM_WORLD.Barrier() + + @WriteInTmpDir + def test_InterpKernelDEC_2D_py_2(self): + """ More involved test using Para* objects. + """ size = MPI.COMM_WORLD.size rank = MPI.COMM_WORLD.rank if size != 5: - raise RuntimeError("Expect MPI_COMM_WORLD size == 5") + print("Should be run on 5 procs!") + return + print(rank) nproc_source = 3 procs_source = list(range(nproc_source)) @@ -43,20 +162,12 @@ class ParaMEDMEMBasicsTest(unittest.TestCase): source_group = MPIProcessorGroup(interface, procs_source) dec = InterpKernelDEC(source_group, target_group) - mesh =0 - support =0 - paramesh =0 - parafield =0 - icocofield =0 - data_dir = os.environ['MEDCOUPLING_ROOT_DIR'] - tmp_dir = os.environ['TMP'] - - if not tmp_dir or len(tmp_dir)==0: - tmp_dir = "/tmp" - pass + data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med") + if not os.path.isdir(data_dir): + data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1] - filename_xml1 = os.path.join(data_dir, "share/resources/med/square1_split") - filename_xml2 = os.path.join(data_dir, "share/resources/med/square2_split") + filename_xml1 = os.path.join(data_dir, "square1_split") + filename_xml2 = os.path.join(data_dir, "square2_split") MPI.COMM_WORLD.Barrier() if source_group.containsMyRank(): @@ -104,18 +215,16 @@ class ParaMEDMEMBasicsTest(unittest.TestCase): dec.sendData() pass ## end - interface = 0 - target_group = 0 - source_group = 0 - dec = 0 - mesh =0 - support =0 - paramesh =0 - parafield =0 - icocofield =0 + + # Some clean up that still needs MPI communication, so to be done before MPI_Finalize() + parafield.release() + paramesh.release() + dec.release() + target_group.release() + source_group.release() MPI.COMM_WORLD.Barrier() - MPI.Finalize() - pass - pass -unittest.main() +if __name__ == "__main__": + unittest.main() + MPI.Finalize() + diff --git a/src/ParaMEDMEM_Swig/test_NonCoincidentDEC.py b/src/ParaMEDMEM_Swig/test_NonCoincidentDEC.py index 3d929d625..a4b0e7da1 100755 --- a/src/ParaMEDMEM_Swig/test_NonCoincidentDEC.py +++ b/src/ParaMEDMEM_Swig/test_NonCoincidentDEC.py @@ -19,126 +19,119 @@ # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com # -from ParaMEDMEM import * -import sys, os +from medcoupling import * +from mpi4py import MPI +import unittest +import os -MPI_Init(sys.argv) +class ParaMEDMEM_DEC_Tests(unittest.TestCase): + def test_NonCoincidentDEC_py(self): + size = MPI.COMM_WORLD.size + rank = MPI.COMM_WORLD.rank -size = MPI_Comm_size(MPI_COMM_WORLD) -rank = MPI_Comm_rank(MPI_COMM_WORLD) -if size != 5: - raise RuntimeError("Expect MPI_COMM_WORLD size == 5") + if size != 5: + raise RuntimeError("Expect MPI.MPI_COMM_WORLD size == 5") -nproc_source = 3 -procs_source = list(range(nproc_source)) -procs_target = list(range(size - nproc_source + 1, size)) + nproc_source = 3 + procs_source = list(range(nproc_source)) + procs_target = list(range(size - nproc_source + 1, size)) -interface = CommInterface() + interface = CommInterface() -target_group = MPIProcessorGroup(interface, procs_target) -source_group = MPIProcessorGroup(interface, procs_source) + target_group = MPIProcessorGroup(interface, procs_target) + source_group = MPIProcessorGroup(interface, procs_source) -source_mesh= 0 -target_mesh= 0 -parasupport= 0 -mesh = 0 -support = 0 -field = 0 -paramesh = 0 -parafield = 0 -icocofield = 0 + dec = NonCoincidentDEC(source_group, target_group) -dec = NonCoincidentDEC(source_group, target_group) + data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med") + if not os.path.isdir(data_dir): + data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1] + tmp_dir = os.environ.get('TMP', "") + if tmp_dir == '': + tmp_dir = "/tmp" -data_dir = os.environ['MEDCOUPLING_ROOT_DIR'] -tmp_dir = os.environ['TMP'] -if tmp_dir == '': - tmp_dir = "/tmp" - pass + filename_xml1 = os.path.join(data_dir, "square1_split") + filename_xml2 = os.path.join(data_dir, "square2_split") -filename_xml1 = data_dir + "/share/resources/med/square1_split" -filename_xml2 = data_dir + "/share/resources/med/square2_split" + MPI.COMM_WORLD.Barrier() -MPI_Barrier(MPI_COMM_WORLD) + if source_group.containsMyRank(): + filename = filename_xml1 + str(rank+1) + ".med" + meshname = "Mesh_2_" + str(rank+1) -if source_group.containsMyRank(): + mesh = MESH(MED_DRIVER, filename, meshname) + support = SUPPORT(mesh, "all elements", MED_CELL) + paramesh = ParaMESH(mesh, source_group, "source mesh") - filename = filename_xml1 + str(rank+1) + ".med" - meshname = "Mesh_2_" + str(rank+1) + parasupport = UnstructuredParaSUPPORT( support, source_group) + comptopo = ComponentTopology() - mesh = MESH(MED_DRIVER, filename, meshname) - support = SUPPORT(mesh, "all elements", MED_CELL) - paramesh = ParaMESH(mesh, source_group, "source mesh") + parafield = ParaFIELD(parasupport, comptopo) - parasupport = UnstructuredParaSUPPORT( support, source_group) - comptopo = ComponentTopology() + nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS); - parafield = ParaFIELD(parasupport, comptopo) + value = [1.0]*nb_local - nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS); + parafield.getField().setValue(value) + icocofield = ICoCo_MEDField(paramesh,parafield) + dec.attachLocalField(icocofield,'P0') + pass - value = [1.0]*nb_local + if target_group.containsMyRank(): + filename = filename_xml2 + str(rank - nproc_source + 1) + ".med" + meshname = "Mesh_3_" + str(rank - nproc_source + 1) - parafield.getField().setValue(value) - icocofield = ICoCo_MEDField(paramesh,parafield) - dec.attachLocalField(icocofield,'P0') - pass + mesh = MESH(MED_DRIVER, filename, meshname) + support = SUPPORT(mesh, "all elements", MED_CELL) + paramesh = ParaMESH(mesh, target_group, "target mesh") -if target_group.containsMyRank(): + parasupport = UnstructuredParaSUPPORT( support, target_group) + comptopo = ComponentTopology() + parafield = ParaFIELD(parasupport, comptopo) - filename = filename_xml2 + str(rank - nproc_source + 1) + ".med" - meshname = "Mesh_3_" + str(rank - nproc_source + 1) + nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS) + value = [0.0]*nb_local - mesh = MESH(MED_DRIVER, filename, meshname) - support = SUPPORT(mesh, "all elements", MED_CELL) - paramesh = ParaMESH(mesh, target_group, "target mesh") + parafield.getField().setValue(value) + icocofield = ICoCo_MEDField(paramesh,parafield) - parasupport = UnstructuredParaSUPPORT( support, target_group) - comptopo = ComponentTopology() - parafield = ParaFIELD(parasupport, comptopo) + dec.attachLocalField(icocofield, 'P0') + pass - nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS) - value = [0.0]*nb_local + field_before_int = [0.0] + field_after_int = [0.0] - parafield.getField().setValue(value) - icocofield = ICoCo_MEDField(paramesh,parafield) + if source_group.containsMyRank(): + field_before_int = [parafield.getVolumeIntegral(1)] + MPI.MPI_Bcast(field_before_int, 1, MPI.MPI_DOUBLE, 0, MPI.MPI_COMM_WORLD); + dec.synchronize() + print("DEC usage") + dec.setForcedRenormalization(False) + dec.sendData() + pass - dec.attachLocalField(icocofield, 'P0') - pass + if target_group.containsMyRank(): + MPI.MPI_Bcast(field_before_int, 1, MPI.MPI_DOUBLE, 0, MPI.MPI_COMM_WORLD) + dec.synchronize() + dec.setForcedRenormalization(False) + dec.recvData() + field_after_int = [parafield.getVolumeIntegral(1)] + pass -field_before_int = [0.0] -field_after_int = [0.0] + MPI.MPI_Bcast(field_before_int, 1, MPI.MPI_DOUBLE, 0, MPI.MPI_COMM_WORLD) + MPI.MPI_Bcast(field_after_int , 1, MPI.MPI_DOUBLE, size-1, MPI.MPI_COMM_WORLD) -if source_group.containsMyRank(): + epsilon = 1e-6 + self.assertDoubleEquals(field_before_int[0], field_after_int[0], epsilon) - field_before_int = [parafield.getVolumeIntegral(1)] - MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); - dec.synchronize() - print("DEC usage") - dec.setForcedRenormalization(False) + # Some clean up that still needs MPI communication, so to be done before MPI_Finalize() + dec.release() + target_group.release() + source_group.release() - dec.sendData() - pass + MPI.COMM_WORLD.Barrier() + MPI.Finalize() -if target_group.containsMyRank(): +if __name__ == "__main__": + unittest.main() - MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD) - dec.synchronize() - dec.setForcedRenormalization(False) - dec.recvData() - field_after_int = [parafield.getVolumeIntegral(1)] - pass - -MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD) -MPI_Bcast(field_after_int , 1, MPI_DOUBLE, size-1, MPI_COMM_WORLD) - -epsilon = 1e-6 -if abs(field_before_int[0] - field_after_int[0]) > epsilon: - print("Field before is not equal field after: %s != %s"%\ - (field_before_int[0],field_after_int[0])) - pass - - -MPI_Barrier(MPI_COMM_WORLD) -MPI_Finalize() -print("# End of testNonCoincidentDEC") diff --git a/src/ParaMEDMEM_Swig/test_OverlapDEC.py b/src/ParaMEDMEM_Swig/test_OverlapDEC.py new file mode 100644 index 000000000..eb7d1d610 --- /dev/null +++ b/src/ParaMEDMEM_Swig/test_OverlapDEC.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +from medcoupling import * +from ParaMEDMEMTestTools import WriteInTmpDir +import sys, os +import unittest +import math +from mpi4py import MPI + +class ParaMEDMEM_O_DEC_Tests(unittest.TestCase): + """ This test illustrates a basic use of the OverlapDEC and shows notably that not all + processors must possess a piece of the source and/or target mesh. + Look at the C++ documentation of the class for more informations. + In this case, the source mesh is only stored on 2 procs, whereas the target is on 4. + Since only a single group of processor is defined in the setup, the 2 idle procs on the source side are just providing an empty mesh, + thus indicating that they don't participate in the source definition. + + Main method is testOverlapDEC_2D_py_1() + """ + + def generateFullSource(self): + """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """ + msh = self.generateFullTarget() + msh.simplexize(0) + msh.setName("src_mesh") + fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME) + fld.setMesh(msh); fld.setName("source_F"); + da = DataArrayDouble(msh.getNumberOfCells()) + da.iota() + da *= 2 + fld.setArray(da) + return msh, fld + + def generateFullTarget(self): + """ The complete target mesh: 4 squares """ + m1 = MEDCouplingCMesh("tgt_msh") + da = DataArrayDouble([0,1,2]) + m1.setCoords(da, da) + msh = m1.buildUnstructured() + return msh + + # + # Below, the two functions emulating the set up of a piece of the source and target mesh + # on each proc. Obviously in real world problems, this comes from your code and is certainly + # not computed by cuting again from scratch the full-size mesh!! + # + def getPartialSource(self, rank): + """ Will return an empty mesh piece for rank=2 and 3 """ + msh, f = self.generateFullSource() + if rank in [2,3]: + sub_m, sub_f = msh[[]], f[[]] # Little trick to select nothing in the mesh, thus producing an empty mesh + elif rank == 0: + sub_m, sub_f = msh[0:4], f[0:4] + elif rank == 1: + sub_m, sub_f = msh[4:8], f[4:8] + sub_m.zipCoords() + return sub_m, sub_f + + def getPartialTarget(self, rank): + """ One square for each rank """ + msh = self.generateFullTarget() + sub_m = msh[rank] + sub_m.zipCoords() + # Receiving side must prepare an empty field that will be filled by DEC: + fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME) + da = DataArrayDouble(sub_m.getNumberOfCells()) + fld.setArray(da) + fld.setName("tgt_F") + fld.setMesh(sub_m) + return sub_m, fld + + @WriteInTmpDir + def testOverlapDEC_2D_py_1(self): + """ The main method of the test """ + size = MPI.COMM_WORLD.size + rank = MPI.COMM_WORLD.rank + if size != 4: + raise RuntimeError("Should be run on 4 procs!") + + # Define (single) processor group - note the difference with InterpKernelDEC which needs two groups. + proc_group = list(range(size)) # No need for ProcessorGroup object here. + odec = OverlapDEC(proc_group) + + # Write out full size meshes/fields for inspection + if rank == 0: + _, fld = self.generateFullSource() + mshT = self.generateFullTarget() + WriteField("./source_field_FULL.med", fld, True) + WriteUMesh("./target_mesh_FULL.med", mshT, True) + + MPI.COMM_WORLD.Barrier() # really necessary?? + + # + # OK, let's go DEC !! + # + _, fieldS = self.getPartialSource(rank) + fieldS.setNature(IntensiveMaximum) # The only policy supported for now ... + mshT, fieldT = self.getPartialTarget(rank) + fieldT.setNature(IntensiveMaximum) + if rank not in [2,3]: + WriteField("./source_field_part_%d.med" % rank, fieldS, True) + WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True) + + odec.attachSourceLocalField(fieldS) + odec.attachTargetLocalField(fieldT) + odec.synchronize() + odec.sendRecvData() + + # Now the actual checks: + if rank == 0: + self.assertEqual(fieldT.getArray().getValues(), [1.0]) + elif rank == 1: + self.assertEqual(fieldT.getArray().getValues(), [5.0]) + elif rank == 2: + self.assertEqual(fieldT.getArray().getValues(), [9.0]) + elif rank == 3: + self.assertEqual(fieldT.getArray().getValues(), [13.0]) + + # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize() + odec.release() + + MPI.COMM_WORLD.Barrier() + +if __name__ == "__main__": + unittest.main() + MPI.Finalize() diff --git a/src/ParaMEDMEM_Swig/test_StructuredCoincidentDEC.py b/src/ParaMEDMEM_Swig/test_StructuredCoincidentDEC.py index f110cae6f..11c5b7a16 100755 --- a/src/ParaMEDMEM_Swig/test_StructuredCoincidentDEC.py +++ b/src/ParaMEDMEM_Swig/test_StructuredCoincidentDEC.py @@ -19,51 +19,158 @@ # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com # -from ParaMEDMEM import * -import sys, os +from medcoupling import * +from ParaMEDMEMTestTools import WriteInTmpDir +import os import unittest import math +from mpi4py import MPI + + +class ParaMEDMEM_SC_DEC_Tests(unittest.TestCase): + """ See test_StructuredCoincidentDEC_py_1() for a quick start. + """ + + def generateFullMeshField(self): + """ The complete mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) + Note that in this case, this is the **only** mesh for the whole problem. + """ + m1 = MEDCouplingCMesh("tgt_msh") + da = DataArrayDouble([0,1,2]) + m1.setCoords(da, da) + msh = m1.buildUnstructured() + + msh.simplexize(0) + msh.setName("src_mesh") + fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME) + fld.setMesh(msh); fld.setName("source_F"); + da = DataArrayDouble(msh.getNumberOfCells()) + da.iota() + da *= 2 + fld.setArray(da) + return msh, fld + + # + # Below, the function emulating the set up of a piece of the mesh being owned by + # a given processor. + # + def getPartialSource(self, rank): + msh, f = self.generateFullMeshField() + if rank == 0: + sub_ids = [0,1,4,5] + elif rank == 1: + sub_ids = [2,3,6,7] + sub_m, sub_f = msh[sub_ids], f[sub_ids] + sub_m.zipCoords() + return sub_m, sub_f + + def getPartialTarget(self, rank): + msh, f = self.generateFullMeshField() + if rank == 2: + sub_ids = [0,1,2,3] + elif rank == 3: + sub_ids = [4,5,6,7] + sub_m, sub_f = msh[sub_ids], f[sub_ids] + sub_m.zipCoords() + return sub_m, sub_f + + @WriteInTmpDir + def test_StructuredCoincidentDEC_py_1(self): + """ This test illustrates a basic use of the StructuredCoincidentDEC which allows to + resdistribute a field/mesh which is already scattered on several processors into a different configuration. + Look at the C++ documentation of the class for more informations. + Note that in the case of the StructuredCoincidentDEC no interpolation whatsoever is performed. This is only + really a redistribution of the data among the processors. + """ + size = MPI.COMM_WORLD.size + rank = MPI.COMM_WORLD.rank + if size != 4: + print("Should be run on 4 procs!") + return + + # Define two processor groups + nproc_source = 2 + procs_source = list(range(nproc_source)) + procs_target = list(range(size - nproc_source, size)) + + interface = CommInterface() + source_group = MPIProcessorGroup(interface, procs_source) + target_group = MPIProcessorGroup(interface, procs_target) + + scdec = StructuredCoincidentDEC(source_group, target_group) + + # Write out full size meshes/fields for inspection + if rank == 0: + _, fld = self.generateFullMeshField() + WriteField("./source_field_FULL.med", fld, True) -class ParaMEDMEMBasicsTest2(unittest.TestCase): - def testStructuredCoincidentDEC(self): - MPI_Init(sys.argv) # - size = MPI_Comm_size(MPI_COMM_WORLD) - rank = MPI_Comm_rank(MPI_COMM_WORLD) + # OK, let's go DEC !! # + if source_group.containsMyRank(): + _, fieldS = self.getPartialSource(rank) + fieldS.setNature(IntensiveMaximum) # The only policy supported for now ... + WriteField("./source_field_part_%d.med" % rank, fieldS, True) + scdec.attachLocalField(fieldS) + scdec.synchronize() + scdec.sendData() + + if target_group.containsMyRank(): + mshT, fieldT = self.getPartialTarget(rank) + fieldT.setNature(IntensiveMaximum) + WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True) + scdec.attachLocalField(fieldT) + scdec.synchronize() + scdec.recvData() + # Now the actual checks: + if rank == 2: + self.assertEqual(fieldT.getArray().getValues(), [0.0, 2.0, 8.0, 10.0]) + elif rank == 3: + self.assertEqual(fieldT.getArray().getValues(), [4.0, 6.0, 12.0, 14.0]) + + # Release DEC (this involves MPI exchanges -- so better be done before MPI.Finalize() + scdec.release() + source_group.release() + target_group.release() + + MPI.COMM_WORLD.Barrier() + + @WriteInTmpDir + def test_StructuredCoincidentDEC_py_2(self): + """ More involved tests using Para* objects ... + """ + size = MPI.COMM_WORLD.size + rank = MPI.COMM_WORLD.rank + if size < 4: - raise RuntimeError("Expect MPI_COMM_WORLD size >= 4") - # + print("Should be run on >= 4 procs!") + return + interface = CommInterface() - # - self_group = MPIProcessorGroup(interface, rank, rank) - target_group = MPIProcessorGroup(interface, 3, size-1) + source_group = MPIProcessorGroup(interface, 0, 2) - # - mesh = 0 - support = 0 - paramesh = 0 - parafield = 0 - comptopo = 0 - icocofield= 0 - # - data_dir = os.environ['MEDCOUPLING_ROOT_DIR'] - tmp_dir = os.environ['TMP'] + target_group = MPIProcessorGroup(interface, 3, size-1) + self_group = MPIProcessorGroup(interface, rank, rank) + + data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med") + if not os.path.isdir(data_dir): + data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1] + tmp_dir = os.environ.get('TMP', "") if tmp_dir == '': tmp_dir = "/tmp" - pass - filename_xml1 = data_dir + "/share/resources/med/square1_split" - filename_2 = data_dir + "/share/resources/med/square1.med" - filename_seq_wr = tmp_dir + "/" - filename_seq_med = tmp_dir + "/myWrField_seq_pointe221.med" + filename_xml1 = os.path.join(data_dir, "square1_split") + filename_2 = os.path.join(data_dir, "square1.med") + filename_seq_wr = "." + filename_seq_med = os.path.join(".", "myWrField_seq_pointe221.med") dec = StructuredCoincidentDEC(source_group, target_group) - MPI_Barrier(MPI_COMM_WORLD) + + MPI.COMM_WORLD.Barrier() if source_group.containsMyRank(): filename = filename_xml1 + str(rank+1) + ".med" meshname = "Mesh_2_" + str(rank+1) - mesh=ReadUMeshFromFile(filename,meshname,0) + mesh = ReadUMeshFromFile(filename,meshname,0) paramesh=ParaMESH(mesh,source_group,"source mesh") comptopo=ComponentTopology(6) parafield=ParaFIELD(ON_CELLS,NO_TIME,paramesh,comptopo) @@ -74,27 +181,24 @@ class ParaMEDMEMBasicsTest2(unittest.TestCase): for ielem in range(nb_local): for icomp in range(6): value.append(global_numbering[ielem]*6.0+icomp); - pass - pass parafield.getField().setValues(value) - icocofield = ICoCoMEDField(mesh,parafield.getField()) + icocofield = ICoCoMEDField(parafield.getField()) dec.setMethod("P0") dec.attachLocalField(parafield) dec.synchronize() dec.sendData() - pass if target_group.containsMyRank(): meshname2 = "Mesh_2" - mesh=ReadUMeshFromFile(filename_2, meshname2,0) - paramesh=ParaMESH(mesh, self_group, "target mesh") - comptopo=ComponentTopology(6,target_group) - parafield=ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo) + mesh = ReadUMeshFromFile(filename_2, meshname2,0) + paramesh = ParaMESH(mesh, self_group, "target mesh") + comptopo = ComponentTopology(6,target_group) + parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo) parafield.getField().setNature(IntensiveMaximum) nb_local=mesh.getNumberOfCells() value = [0.0]*(nb_local*comptopo.nbLocalComponents()) parafield.getField().setValues(value) - icocofield = ICoCoMEDField(mesh,parafield.getField()) + icocofield = ICoCoMEDField(parafield.getField()) dec.setMethod("P0") dec.attachLocalField(parafield) dec.synchronize() @@ -103,26 +207,18 @@ class ParaMEDMEMBasicsTest2(unittest.TestCase): for i in range(nb_local): first=comptopo.firstLocalComponent() for icomp in range(comptopo.nbLocalComponents()): - self.assertTrue(math.fabs(recv_value[i*comptopo.nbLocalComponents()+icomp]- - (float)(i*6+icomp+first))<1e-12) - pass - pass - pass - comptopo=0 - interface = 0 - mesh =0 - support =0 - paramesh =0 - parafield =0 - icocofield =0 - dec=0 - self_group =0 - target_group = 0 - source_group = 0 - MPI_Barrier(MPI_COMM_WORLD) - MPI_Finalize() - print("End of test StructuredCoincidentDEC") - pass - - -unittest.main() + self.assertTrue(math.fabs(recv_value[i*comptopo.nbLocalComponents()+icomp]-(float)(i*6+icomp+first))<1e-12) + + # Release DEC (this involves MPI exchanges -- so better be done before MPI.Finalize() + parafield.release() + paramesh.release() + dec.release() + target_group.release() + source_group.release() + self_group.release() + + MPI.COMM_WORLD.Barrier() + +if __name__ == "__main__": + unittest.main() + MPI.Finalize() -- 2.30.2