From 11f2b35cf340da5e41f056890bec0a825c35e866 Mon Sep 17 00:00:00 2001 From: cconopoima Date: Tue, 9 Jan 2024 15:45:14 +0000 Subject: [PATCH] [bos #38048] [EDF] (2023-T3) PARAMEDMEM Ergonomy. --- src/ParaMEDMEM/ByStringMPIProcessorGroup.cxx | 117 ++++++++++++++ src/ParaMEDMEM/ByStringMPIProcessorGroup.hxx | 41 +++++ src/ParaMEDMEM/CMakeLists.txt | 1 + src/ParaMEDMEM/InterpKernelDEC.cxx | 37 +++++ src/ParaMEDMEM/InterpKernelDEC.hxx | 2 + src/ParaMEDMEM/MPIProcessorGroup.cxx | 17 ++ src/ParaMEDMEM/MPIProcessorGroup.hxx | 1 + src/ParaMEDMEM/ProcessorGroup.hxx | 6 +- src/ParaMEDMEMTest/CMakeLists.txt | 1 + src/ParaMEDMEMTest/ParaMEDMEMTest.hxx | 5 +- ...raMEDMEMTest_ByStringMPIProcessorGroup.cxx | 97 ++++++++++++ src/ParaMEDMEM_Swig/CMakeLists.txt | 6 +- .../CTestTestfileInstall.cmake.in | 5 + src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i | 4 + .../test_InterpKernelDEC_easy.py | 147 ++++++++++++++++++ 15 files changed, 484 insertions(+), 3 deletions(-) create mode 100644 src/ParaMEDMEM/ByStringMPIProcessorGroup.cxx create mode 100644 src/ParaMEDMEM/ByStringMPIProcessorGroup.hxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx create mode 100755 src/ParaMEDMEM_Swig/test_InterpKernelDEC_easy.py diff --git a/src/ParaMEDMEM/ByStringMPIProcessorGroup.cxx b/src/ParaMEDMEM/ByStringMPIProcessorGroup.cxx new file mode 100644 index 000000000..49f39e8a0 --- /dev/null +++ b/src/ParaMEDMEM/ByStringMPIProcessorGroup.cxx @@ -0,0 +1,117 @@ +// Copyright (C) 2007-2023 CEA, EDF +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ByStringMPIProcessorGroup.hxx" + +#include +#include +#include +#include "mpi.h" + +using namespace std; + + +namespace MEDCoupling +{ + /*! + \class ByStringMPIProcessorGroup + + The ByStringMPIProcessorGroup implements a derived version of MPIProcessorGroup. + + Groups are formed from MPI ranks with the same simCodeTag. Two trivial cases: + - All simCodeTag are equal, then one group is formed from all mpi ranks in the communicator + - All simCodeTag are different, then n-MPI groups are formed. + */ + + + /*! Build and return a map between different simCodeTag and the set of MPI ranks ids (based on the passed communicator) grouped by the same identifier. + \param interface CommInterface object giving access to the MPI communication layer + \param simCodeTag the string identifiying the tag for the group. + \param world_comm mpi communicator + \return Map relating unique simCodeTag and the group of MPI ranks ids belowing to that group + */ + static std::map> DefineSetIdByStringName( const CommInterface& interface, const std::string& simCodeTag, const MPI_Comm& world_comm ) + { + int size_world; + int rank_world; + interface.commSize(world_comm,&size_world); + interface.commRank(world_comm,&rank_world); + + std::map> myRanksSet; + + std::vector displacement(size_world, 0 ); + std::vector words_size(size_world); + + int stringSize = (int) simCodeTag.size(); + interface.allGather( &stringSize, 1, MPI_INT, words_size.data(), 1, MPI_INT, world_comm ); + + for (size_t rank = 1; rank < words_size.size(); rank++) + displacement[ rank ] = words_size[ rank - 1 ] + displacement[ rank - 1 ]; + + char globalnames[displacement[size_world-1]]; + + interface.allGatherV( simCodeTag.c_str(), stringSize, MPI_CHAR, &globalnames, + words_size.data(), displacement.data(), MPI_CHAR, world_comm ); + + for (size_t rank = 0; rank < size_world; rank++) + { + std::string strByRank( &globalnames[displacement[ rank ]], words_size[ rank ] ); + myRanksSet[ strByRank ].insert( (int)rank ); + } + return myRanksSet; + } + + /*! + * Creates a processor group that is based on all the + processors of MPI_COMM_WORLD .This routine must be called by all processors in MPI_COMM_WORLD. + \param interface CommInterface object giving access to the MPI + communication layer + */ + ByStringMPIProcessorGroup::ByStringMPIProcessorGroup(const CommInterface& interface): + MPIProcessorGroup(interface) + { + } + + /*! Creates a processor group based in the simCodeTag passed. + + \param interface CommInterface object giving access to the MPI + communication layer + \param simCodeTag the string identifiying the tag for the group. + \param world_comm mpi communicator + */ + ByStringMPIProcessorGroup::ByStringMPIProcessorGroup(const CommInterface& interface, const std::string& simCodeTag, const MPI_Comm& world_comm ): + MPIProcessorGroup(interface, DefineSetIdByStringName( interface, simCodeTag, world_comm ), simCodeTag, world_comm ) + { + } + + ByStringMPIProcessorGroup::ByStringMPIProcessorGroup(const ByStringMPIProcessorGroup& other): + MPIProcessorGroup(other) + { + } + + ByStringMPIProcessorGroup::~ByStringMPIProcessorGroup() + { + } + + ByStringMPIProcessorGroup *ByStringMPIProcessorGroup::deepCopy() const + { + return new ByStringMPIProcessorGroup(*this); + } + +} diff --git a/src/ParaMEDMEM/ByStringMPIProcessorGroup.hxx b/src/ParaMEDMEM/ByStringMPIProcessorGroup.hxx new file mode 100644 index 000000000..21bf13fdb --- /dev/null +++ b/src/ParaMEDMEM/ByStringMPIProcessorGroup.hxx @@ -0,0 +1,41 @@ +// Copyright (C) 2007-2023 CEA, EDF +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __BYSTRINGMPIPROCESSORGROUP_HXX__ +#define __BYSTRINGMPIPROCESSORGROUP_HXX__ + +#include "MPIProcessorGroup.hxx" + +namespace MEDCoupling +{ + class CommInterface; + + class ByStringMPIProcessorGroup : public MPIProcessorGroup + { + public: + ByStringMPIProcessorGroup(const CommInterface& interface); + ByStringMPIProcessorGroup(const CommInterface& interface, const std::string& simCodeTag, const MPI_Comm& world_comm=MPI_COMM_WORLD); + ByStringMPIProcessorGroup(const ByStringMPIProcessorGroup& other); + virtual ~ByStringMPIProcessorGroup(); + virtual ByStringMPIProcessorGroup *deepCopy() const; + + }; +} + +#endif diff --git a/src/ParaMEDMEM/CMakeLists.txt b/src/ParaMEDMEM/CMakeLists.txt index 03b9373d8..1a8143112 100644 --- a/src/ParaMEDMEM/CMakeLists.txt +++ b/src/ParaMEDMEM/CMakeLists.txt @@ -51,6 +51,7 @@ SET(paramedmem_SOURCES InterpolationMatrix.cxx LinearTimeInterpolator.cxx MPIProcessorGroup.cxx + ByStringMPIProcessorGroup.cxx MxN_Mapping.cxx OverlapDEC.cxx OverlapElementLocator.cxx diff --git a/src/ParaMEDMEM/InterpKernelDEC.cxx b/src/ParaMEDMEM/InterpKernelDEC.cxx index 18cf01e7c..6ef13d1e4 100644 --- a/src/ParaMEDMEM/InterpKernelDEC.cxx +++ b/src/ParaMEDMEM/InterpKernelDEC.cxx @@ -69,6 +69,43 @@ namespace MEDCoupling { } + /*! + * Creates an InterpKernelDEC from an string identifier for the source and target groups. + * The set of procs might not cover entirely MPI_COMM_WORLD + * (a sub-communicator holding the union of source and target procs is recreated internally). + */ + InterpKernelDEC::InterpKernelDEC(ProcessorGroup& generic_group, const std::string& source_group, const std::string& target_group): + DisjointDEC(generic_group.getProcIDsByName(source_group),generic_group.getProcIDsByName(target_group)), + _interpolation_matrix(0) + { + } + + /*! + * Split the interaction group based on the predefined token string "<->" + * The string at left of the token will be the source group and the string at right the target group + */ + static std::pair GetGroupsName( const std::string& interaction_group ) + { + const std::string delimiter = "<->"; + size_t delimiter_position = interaction_group.find(delimiter); + if ( delimiter_position == std::string::npos ) + throw ( "No delimiter <-> found in the interaction group."); + + std::string src = interaction_group.substr(0,delimiter_position); + std::string tgt = interaction_group.substr(delimiter_position+delimiter.size(),interaction_group.size()); + return std::make_pair(src,tgt); + } + + /*! + * Creates an InterpKernelDEC from an string defining an interaction. + * The source and target group are obtained by spliting the string based in the "<->" token. + * The constructor accepting a ProcessorGroup and two strings is reused. + */ + InterpKernelDEC::InterpKernelDEC(ProcessorGroup& generic_group, const std::string& interaction_group ): + InterpKernelDEC(generic_group,GetGroupsName(interaction_group).first,GetGroupsName(interaction_group).second) + { + } + InterpKernelDEC::~InterpKernelDEC() { release(); diff --git a/src/ParaMEDMEM/InterpKernelDEC.hxx b/src/ParaMEDMEM/InterpKernelDEC.hxx index eaa87526e..32c1eb328 100644 --- a/src/ParaMEDMEM/InterpKernelDEC.hxx +++ b/src/ParaMEDMEM/InterpKernelDEC.hxx @@ -131,6 +131,8 @@ namespace MEDCoupling InterpKernelDEC(); InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group); InterpKernelDEC(const std::set& src_ids, const std::set& trg_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD); + InterpKernelDEC(ProcessorGroup& generic_group, const std::string& source_group, const std::string& target_group); + InterpKernelDEC(ProcessorGroup& generic_group, const std::string& interaction_group); virtual ~InterpKernelDEC(); void release(); diff --git a/src/ParaMEDMEM/MPIProcessorGroup.cxx b/src/ParaMEDMEM/MPIProcessorGroup.cxx index 70d14565b..adcf52642 100644 --- a/src/ParaMEDMEM/MPIProcessorGroup.cxx +++ b/src/ParaMEDMEM/MPIProcessorGroup.cxx @@ -92,6 +92,23 @@ namespace MEDCoupling } + /*! Creates a processor group that is based on the processors included in \a proc_ids_by_name[name]. + This routine must be called by all processors in MPI_COMM_WORLD. + + \param interface CommInterface object giving access to the MPI + communication layer + \param proc_ids_by_name a map defining a relation between a name and a set of ids that are to be integrated in the group. + The ids number are to be understood in terms of MPI_COMM_WORLD ranks. + \param simCodeTag identifier of the group + */ + + MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, std::map> proc_ids_by_name, const std::string& simCodeTag, const MPI_Comm& world_comm): + ProcessorGroup(interface, proc_ids_by_name, simCodeTag), _world_comm(world_comm) + { + updateMPISpecificAttributes(); + } + + void MPIProcessorGroup::updateMPISpecificAttributes() { //Creation of a communicator diff --git a/src/ParaMEDMEM/MPIProcessorGroup.hxx b/src/ParaMEDMEM/MPIProcessorGroup.hxx index 09e0de56f..a817fa507 100644 --- a/src/ParaMEDMEM/MPIProcessorGroup.hxx +++ b/src/ParaMEDMEM/MPIProcessorGroup.hxx @@ -34,6 +34,7 @@ namespace MEDCoupling public: MPIProcessorGroup(const CommInterface& interface); MPIProcessorGroup(const CommInterface& interface, std::set proc_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD); + MPIProcessorGroup(const CommInterface& interface, std::map> proc_ids_by_name, const std::string& simCodeTag, const MPI_Comm& world_comm=MPI_COMM_WORLD); MPIProcessorGroup (const ProcessorGroup& proc_group, std::set proc_ids); MPIProcessorGroup(const CommInterface& interface,int pstart, int pend, const MPI_Comm& world_comm=MPI_COMM_WORLD); MPIProcessorGroup(const MPIProcessorGroup& other); diff --git a/src/ParaMEDMEM/ProcessorGroup.hxx b/src/ParaMEDMEM/ProcessorGroup.hxx index 149b71472..88b4bfdec 100644 --- a/src/ParaMEDMEM/ProcessorGroup.hxx +++ b/src/ParaMEDMEM/ProcessorGroup.hxx @@ -43,6 +43,8 @@ namespace MEDCoupling ProcessorGroup (const ProcessorGroup& other): _comm_interface(other.getCommInterface()),_proc_ids(other._proc_ids) { } ProcessorGroup (const CommInterface& interface, int start, int end); + ProcessorGroup(const CommInterface& interface,std::map> proc_ids_by_name,const std::string& simCodeTag): + _comm_interface(interface),_proc_ids_by_name(proc_ids_by_name),_proc_ids(proc_ids_by_name.at(simCodeTag)) { } virtual ~ProcessorGroup() { } virtual ProcessorGroup *deepCopy() const = 0; virtual ProcessorGroup* fuse (const ProcessorGroup&) const = 0; @@ -55,9 +57,11 @@ namespace MEDCoupling virtual int translateRank(const ProcessorGroup*, int) const = 0; virtual ProcessorGroup* createComplementProcGroup() const = 0; virtual ProcessorGroup* createProcGroup() const = 0; - virtual const std::set& getProcIDs()const { return _proc_ids; } + virtual const std::set& getProcIDs()const { return _proc_ids; } + virtual const std::set& getProcIDsByName( const std::string& simCodeTag ) const { return _proc_ids_by_name.at(simCodeTag); } protected: const CommInterface _comm_interface; + std::map> _proc_ids_by_name; std::set _proc_ids; }; } diff --git a/src/ParaMEDMEMTest/CMakeLists.txt b/src/ParaMEDMEMTest/CMakeLists.txt index 6afee105a..0f87dc85a 100644 --- a/src/ParaMEDMEMTest/CMakeLists.txt +++ b/src/ParaMEDMEMTest/CMakeLists.txt @@ -39,6 +39,7 @@ INCLUDE_DIRECTORIES( SET(ParaMEDMEMTest_SOURCES ParaMEDMEMTest.cxx ParaMEDMEMTest_MPIProcessorGroup.cxx + ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx ParaMEDMEMTest_BlockTopology.cxx ParaMEDMEMTest_InterpKernelDEC.cxx ParaMEDMEMTest_StructuredCoincidentDEC.cxx diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx index db002331c..613cb05d1 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx @@ -55,7 +55,8 @@ class ParaMEDMEMTest : public CppUnit::TestFixture CPPUNIT_TEST(testOverlapDEC2_ter); // 3 procs // CPPUNIT_TEST(testOverlapDEC3); // 2 procs // CPPUNIT_TEST(testOverlapDEC4); // 2 procs - + CPPUNIT_TEST(testByStringMPIProcessorGroup_constructor); // 1 and 2 procs + CPPUNIT_TEST(testByStringMPIProcessorGroup_stringconstructor); // 3 procs CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D);// 5 procs CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpDEC_2D); // 5 procs CPPUNIT_TEST(testSynchronousEqualInterpKernelDEC_2D); // 5 procs @@ -123,6 +124,8 @@ public: void testOverlapDEC3(); // void testOverlapDEC3_bis(); void testOverlapDEC4(); + void testByStringMPIProcessorGroup_constructor(); + void testByStringMPIProcessorGroup_stringconstructor(); #ifdef MED_ENABLE_FVM void testNonCoincidentDEC_2D(); void testNonCoincidentDEC_3D(); diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx new file mode 100644 index 000000000..8207031d5 --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx @@ -0,0 +1,97 @@ +// Copyright (C) 2007-2023 CEA, EDF +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaMEDMEMTest.hxx" +#include +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "ByStringMPIProcessorGroup.hxx" + +#include + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + + +using namespace std; +using namespace MEDCoupling; + +/* + * Check methods defined in MPPIProcessorGroup.hxx + * + (+) ByStringMPIProcessorGroup(const CommInterface& interface); + (+) ByStringMPIProcessorGroup(const CommInterface& interface, std::string& codeTag, const MPI_Comm& world_comm ); + (+) ByStringMPIProcessorGroup(ByStringMPIProcessorGroup& other ); +*/ + +void ParaMEDMEMTest::testByStringMPIProcessorGroup_constructor() +{ + CommInterface comm_interface; + ByStringMPIProcessorGroup* group = new ByStringMPIProcessorGroup(comm_interface); + int size; + MPI_Comm_size(MPI_COMM_WORLD, &size); + CPPUNIT_ASSERT_EQUAL(size,group->size()); + int size2; + const MPI_Comm* communicator=group->getComm(); + MPI_Comm_size(*communicator, &size2); + CPPUNIT_ASSERT_EQUAL(size,size2); + delete group; +} + +void ParaMEDMEMTest::testByStringMPIProcessorGroup_stringconstructor() +{ + int size, rankId; + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &rankId); + + if (size != 3) + return; + + std::string myTag; + if ( rankId == 0 || rankId == 2 ) + myTag = "group0"; + else + myTag = "gr1"; + + CommInterface comm_interface; + ByStringMPIProcessorGroup * group = new ByStringMPIProcessorGroup(comm_interface,myTag,MPI_COMM_WORLD); + ByStringMPIProcessorGroup * copygroup = new ByStringMPIProcessorGroup(*group); + CPPUNIT_ASSERT(group); + CPPUNIT_ASSERT(copygroup); + + std::set ranksInGroup = group->getProcIDs(); + std::set ranksInCopiedGroup = group->getProcIDs(); + if ( rankId == 0 || rankId == 2 ) + { + CPPUNIT_ASSERT_EQUAL( (int)ranksInGroup.size(), 2 ); + CPPUNIT_ASSERT_EQUAL( (int)ranksInCopiedGroup.size(), 2 ); + } + else + { + CPPUNIT_ASSERT_EQUAL( (int)ranksInGroup.size(), 1 ); + CPPUNIT_ASSERT_EQUAL( (int)ranksInCopiedGroup.size(), 1 ); + } + CPPUNIT_ASSERT( group->contains(rankId) ); + CPPUNIT_ASSERT( copygroup->contains(rankId) ); + delete group; + delete copygroup; +} diff --git a/src/ParaMEDMEM_Swig/CMakeLists.txt b/src/ParaMEDMEM_Swig/CMakeLists.txt index c191cc18e..1ed0f03ba 100644 --- a/src/ParaMEDMEM_Swig/CMakeLists.txt +++ b/src/ParaMEDMEM_Swig/CMakeLists.txt @@ -91,6 +91,10 @@ IF(MEDCOUPLING_BUILD_PY_TESTS) ADD_TEST(NAME PyPara_InterpKernelDEC_Proc5 COMMAND ${MPIEXEC} -np 5 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC.py) SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_Proc5 PROPERTIES ENVIRONMENT "${tests_env}") + + ADD_TEST(NAME PyPara_InterpKernelDEC_easy_Proc5 + COMMAND ${MPIEXEC} -np 5 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC_easy.py) + SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_easy_Proc5 PROPERTIES ENVIRONMENT "${tests_env}") #ADD_TEST(NAME PyPara_NonCoincidentDEC_Proc5 # COMMAND ${MPIEXEC} -np 5 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_NonCoincidentDEC.py) @@ -120,7 +124,7 @@ SALOME_INSTALL_SCRIPTS(${CMAKE_CURRENT_BINARY_DIR}/ParaMEDMEM.py ${MEDCOUPLING_I INSTALL(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_StructuredCoincidentDEC.py DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON}) set(TEST_INSTALL_DIRECTORY ${MEDCOUPLING_INSTALL_TESTS}/ParaMEDMEM_Swig) -install(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_OverlapDEC.py test_StructuredCoincidentDEC.py ParaMEDMEMTestTools.py test_BasicOperation.py DESTINATION ${TEST_INSTALL_DIRECTORY}) +install(FILES test_InterpKernelDEC.py test_InterpKernelDEC_easy.py test_NonCoincidentDEC.py test_OverlapDEC.py test_StructuredCoincidentDEC.py ParaMEDMEMTestTools.py test_BasicOperation.py DESTINATION ${TEST_INSTALL_DIRECTORY}) # export MPIEXEC and _oversub_opt to CTestTestfile.cmake of salome test mechanism configure_file(CTestTestfileInstall.cmake.in "CTestTestfileST.cmake" @ONLY) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/CTestTestfileST.cmake DESTINATION ${TEST_INSTALL_DIRECTORY} RENAME CTestTestfile.cmake) diff --git a/src/ParaMEDMEM_Swig/CTestTestfileInstall.cmake.in b/src/ParaMEDMEM_Swig/CTestTestfileInstall.cmake.in index a44a7d3e3..ede33242c 100644 --- a/src/ParaMEDMEM_Swig/CTestTestfileInstall.cmake.in +++ b/src/ParaMEDMEM_Swig/CTestTestfileInstall.cmake.in @@ -40,6 +40,11 @@ set(TEST_NAME ${COMPONENT_NAME}_${TEST_NAMES}_${tfile}) add_test(${TEST_NAME} ${MPIEXEC} -np 5 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_InterpKernelDEC.py) set_tests_properties(${TEST_NAME} PROPERTIES LABELS "${COMPONENT_NAME}" TIMEOUT ${TIMEOUT}) +set(tfile PyPara_InterpKernelDEC_easy_Proc5) +set(TEST_NAME ${COMPONENT_NAME}_${TEST_NAMES}_${tfile}) +add_test(${TEST_NAME} ${MPIEXEC} -np 5 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_InterpKernelDEC_easy.py) +set_tests_properties(${TEST_NAME} PROPERTIES LABELS "${COMPONENT_NAME}" TIMEOUT ${TIMEOUT}) + set(tfile PyPara_StructuredCoincidentDEC_Proc4) set(TEST_NAME ${COMPONENT_NAME}_${TEST_NAMES}_${tfile}) add_test(${TEST_NAME} ${MPIEXEC} -np 4 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_StructuredCoincidentDEC.py) diff --git a/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i b/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i index 624ffadd8..7a09cc4e2 100644 --- a/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i +++ b/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i @@ -26,6 +26,7 @@ #include "ProcessorGroup.hxx" #include "Topology.hxx" #include "MPIProcessorGroup.hxx" +#include "ByStringMPIProcessorGroup.hxx" #include "DEC.hxx" #include "InterpKernelDEC.hxx" #include "NonCoincidentDEC.hxx" @@ -51,6 +52,7 @@ using namespace ICoCo; %include "ParaMESH.hxx" %include "ParaFIELD.hxx" %include "MPIProcessorGroup.hxx" +%include "ByStringMPIProcessorGroup.hxx" %include "ComponentTopology.hxx" %include "DEC.hxx" %include "DisjointDEC.hxx" @@ -293,6 +295,8 @@ namespace MEDCoupling InterpKernelDEC(); InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group); InterpKernelDEC(const std::set& src_ids, const std::set& trg_ids); // hide last optional parameter! + InterpKernelDEC(ProcessorGroup& generic_group, const std::string& source_group, const std::string& target_group); + InterpKernelDEC(ProcessorGroup& generic_group, const std::string& interaction_group); virtual ~InterpKernelDEC(); void release(); diff --git a/src/ParaMEDMEM_Swig/test_InterpKernelDEC_easy.py b/src/ParaMEDMEM_Swig/test_InterpKernelDEC_easy.py new file mode 100755 index 000000000..48016da17 --- /dev/null +++ b/src/ParaMEDMEM_Swig/test_InterpKernelDEC_easy.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python +# -*- coding: iso-8859-1 -*- +# Copyright (C) 2007-2023 CEA, EDF +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# + +from medcoupling import * +#from ParaMEDMEMTestTools import WriteInTmpDir +import sys, os +import unittest +import math +from mpi4py import MPI + +def ranksByGroup(groupString, jobPerWorldRank): + ranks=[] + for key,value in jobPerWorldRank.items(): + if (groupString == value ): + ranks.append(key) + return ranks + + +class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase): + def test_InterpKernelDEC_easy_comm_creation(self): + """ + [EDF26706] : + """ + size = MPI.COMM_WORLD.size + rank = MPI.COMM_WORLD.rank + if size != 5: + print("Should be run on 5 procs!") + return + jobPerWorldRank = {0:"A",1:"B",2:"B",3:"C",4:"C"} + interface = CommInterface() + group = ByStringMPIProcessorGroup(interface, jobPerWorldRank[rank]) + decBC = InterpKernelDEC(group,"B<->C") + decAC = InterpKernelDEC(group,"A<->C") + eval("Easy_comm_creation_{}".format(rank))(decBC,decAC) + # + MPI.COMM_WORLD.Barrier() + + def test_InterpKernelDEC_easy_comm_creation_2(self): + """ + [EDF26706] : + """ + size = MPI.COMM_WORLD.size + rank = MPI.COMM_WORLD.rank + if size != 5: + print("Should be run on 5 procs!") + return + jobPerWorldRank = {0:"A",1:"B",2:"B",3:"C",4:"C"} + interface = CommInterface() + group = ByStringMPIProcessorGroup(interface, jobPerWorldRank[rank]) + decBC = InterpKernelDEC(group,"B","C") + decAC = InterpKernelDEC(group,"A","C") + eval("Easy_comm_creation_{}".format(rank))(decBC,decAC) + # + MPI.COMM_WORLD.Barrier() + +def Easy_comm_creation_0(decBC,decAC): + """ Proc 0 of A""" + m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0,1]),DataArrayDouble([0,1])) ; m = m.buildUnstructured() + field = MEDCouplingFieldDouble(ON_CELLS) + field.setNature(IntensiveMaximum) + field.setMesh( m ) + field.setArray( DataArrayDouble([1.2])) + decAC.attachLocalField( field ) + decAC.synchronize() + decAC.sendData() + pass + +def Easy_comm_creation_1(decBC,decAC): + """ Proc 0 of B""" + m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([2,3]),DataArrayDouble([1,2])) ; m = m.buildUnstructured() + field = MEDCouplingFieldDouble(ON_CELLS) + field.setNature(IntensiveMaximum) + field.setMesh( m ) + field.setArray( DataArrayDouble([2.3])) + decBC.attachLocalField( field ) + decBC.synchronize() + decBC.sendData() + pass + +def Easy_comm_creation_2(decBC,decAC): + """ Proc 1 of B""" + m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([3,4]),DataArrayDouble([1,2])) ; m = m.buildUnstructured() + field = MEDCouplingFieldDouble(ON_CELLS) + field.setNature(IntensiveMaximum) + field.setMesh( m ) + field.setArray( DataArrayDouble([3.3])) + decBC.attachLocalField( field ) + decBC.synchronize() + decBC.sendData() + pass + +def Easy_comm_creation_3(decBC,decAC): + """ Proc 0 of C""" + m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0.5,3.5]),DataArrayDouble([0,1.5])) ; m = m.buildUnstructured() + field = MEDCouplingFieldDouble(ON_CELLS) + field.setNature(IntensiveMaximum) + field.setMesh( m ) + field.setArray( DataArrayDouble([0.])) + decBC.attachLocalField( field ) + decAC.attachLocalField( field ) + decBC.synchronize() + decAC.synchronize() + decBC.recvData() + print(field.getArray().getValues()) + decAC.recvData() + print(field.getArray().getValues()) + pass + +def Easy_comm_creation_4(decBC,decAC): + """ Proc 1 of C""" + m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0.7,3.5]),DataArrayDouble([0,1.5])) ; m = m.buildUnstructured() + field = MEDCouplingFieldDouble(ON_CELLS) + field.setNature(IntensiveMaximum) + field.setMesh( m ) + field.setArray( DataArrayDouble([0.])) + decBC.attachLocalField( field ) + decAC.attachLocalField( field ) + decBC.synchronize() + decAC.synchronize() + decBC.recvData() + print(field.getArray().getValues()) + decAC.recvData() + print(field.getArray().getValues()) + pass + +if __name__ == "__main__": + unittest.main() + MPI.Finalize() + -- 2.39.2