]> SALOME platform Git repositories - tools/medcoupling.git/commitdiff
Salome HOME
[bos #38048] [EDF] (2023-T3) PARAMEDMEM Ergonomy.
authorcconopoima <cesar.conopoima@gmail.com>
Tue, 9 Jan 2024 15:45:14 +0000 (15:45 +0000)
committerAnthony Geay <anthony.geay@edf.fr>
Mon, 11 Mar 2024 09:02:12 +0000 (10:02 +0100)
15 files changed:
src/ParaMEDMEM/ByStringMPIProcessorGroup.cxx [new file with mode: 0644]
src/ParaMEDMEM/ByStringMPIProcessorGroup.hxx [new file with mode: 0644]
src/ParaMEDMEM/CMakeLists.txt
src/ParaMEDMEM/InterpKernelDEC.cxx
src/ParaMEDMEM/InterpKernelDEC.hxx
src/ParaMEDMEM/MPIProcessorGroup.cxx
src/ParaMEDMEM/MPIProcessorGroup.hxx
src/ParaMEDMEM/ProcessorGroup.hxx
src/ParaMEDMEMTest/CMakeLists.txt
src/ParaMEDMEMTest/ParaMEDMEMTest.hxx
src/ParaMEDMEMTest/ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx [new file with mode: 0644]
src/ParaMEDMEM_Swig/CMakeLists.txt
src/ParaMEDMEM_Swig/CTestTestfileInstall.cmake.in
src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i
src/ParaMEDMEM_Swig/test_InterpKernelDEC_easy.py [new file with mode: 0755]

diff --git a/src/ParaMEDMEM/ByStringMPIProcessorGroup.cxx b/src/ParaMEDMEM/ByStringMPIProcessorGroup.cxx
new file mode 100644 (file)
index 0000000..49f39e8
--- /dev/null
@@ -0,0 +1,117 @@
+// Copyright (C) 2007-2023  CEA, EDF
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include "ByStringMPIProcessorGroup.hxx"
+
+#include <iostream>
+#include <set>
+#include <algorithm>
+#include "mpi.h"
+
+using namespace std;
+
+
+namespace MEDCoupling
+{
+  /*!
+   \class ByStringMPIProcessorGroup
+
+   The ByStringMPIProcessorGroup implements a derived version of MPIProcessorGroup. 
+
+   Groups are formed from MPI ranks with the same simCodeTag. Two trivial cases: 
+    - All simCodeTag are equal, then one group is formed from all mpi ranks in the communicator
+    - All simCodeTag are different, then n-MPI groups are formed.
+  */
+
+
+  /*! Build and return a map between different simCodeTag and the set of MPI ranks ids (based on the passed communicator) grouped by the same identifier.
+    \param interface CommInterface object giving access to the MPI communication layer
+    \param simCodeTag the string identifiying the tag for the group.
+    \param world_comm mpi communicator
+    \return Map relating unique simCodeTag and the group of MPI ranks ids belowing to that group
+  */
+  static std::map<std::string,std::set<int>> DefineSetIdByStringName( const CommInterface& interface, const std::string& simCodeTag, const MPI_Comm& world_comm )
+  {
+    int size_world;
+    int rank_world;
+    interface.commSize(world_comm,&size_world);
+    interface.commRank(world_comm,&rank_world);
+
+    std::map<std::string,std::set<int>> myRanksSet;
+
+    std::vector<int> displacement(size_world, 0 );
+    std::vector<int> words_size(size_world);
+
+    int stringSize = (int) simCodeTag.size();
+    interface.allGather( &stringSize, 1, MPI_INT, words_size.data(), 1, MPI_INT, world_comm );
+
+    for (size_t rank = 1; rank < words_size.size(); rank++)
+      displacement[ rank ] = words_size[ rank - 1 ] + displacement[ rank - 1 ];
+
+    char globalnames[displacement[size_world-1]];
+
+    interface.allGatherV( simCodeTag.c_str(), stringSize, MPI_CHAR, &globalnames, 
+                          words_size.data(), displacement.data(), MPI_CHAR, world_comm );
+
+    for (size_t rank = 0; rank < size_world; rank++)
+    {
+      std::string strByRank( &globalnames[displacement[ rank ]], words_size[ rank ] );
+      myRanksSet[ strByRank ].insert( (int)rank );
+    }
+    return myRanksSet;
+  }
+
+  /*! 
+   * Creates a processor group that is based on all the
+   processors of MPI_COMM_WORLD .This routine must be called by all processors in MPI_COMM_WORLD.
+   \param interface CommInterface object giving access to the MPI
+   communication layer
+  */
+  ByStringMPIProcessorGroup::ByStringMPIProcessorGroup(const CommInterface& interface):
+    MPIProcessorGroup(interface)
+  {
+  }
+
+  /*! Creates a processor group based in the simCodeTag passed.
+
+    \param interface CommInterface object giving access to the MPI
+    communication layer
+    \param simCodeTag the string identifiying the tag for the group.
+    \param world_comm mpi communicator
+  */
+  ByStringMPIProcessorGroup::ByStringMPIProcessorGroup(const CommInterface& interface, const std::string& simCodeTag, const MPI_Comm& world_comm ):
+    MPIProcessorGroup(interface, DefineSetIdByStringName( interface, simCodeTag, world_comm ), simCodeTag, world_comm )
+  {
+  } 
+
+  ByStringMPIProcessorGroup::ByStringMPIProcessorGroup(const ByStringMPIProcessorGroup& other):
+    MPIProcessorGroup(other)
+  {
+  }
+
+  ByStringMPIProcessorGroup::~ByStringMPIProcessorGroup()
+  {
+  }
+
+  ByStringMPIProcessorGroup *ByStringMPIProcessorGroup::deepCopy() const
+  {
+    return new ByStringMPIProcessorGroup(*this);
+  }
+
+}
diff --git a/src/ParaMEDMEM/ByStringMPIProcessorGroup.hxx b/src/ParaMEDMEM/ByStringMPIProcessorGroup.hxx
new file mode 100644 (file)
index 0000000..21bf13f
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright (C) 2007-2023  CEA, EDF
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#ifndef __BYSTRINGMPIPROCESSORGROUP_HXX__
+#define __BYSTRINGMPIPROCESSORGROUP_HXX__
+
+#include "MPIProcessorGroup.hxx"
+
+namespace MEDCoupling
+{
+  class CommInterface;
+
+  class ByStringMPIProcessorGroup : public MPIProcessorGroup
+  {
+  public:
+    ByStringMPIProcessorGroup(const CommInterface& interface);
+    ByStringMPIProcessorGroup(const CommInterface& interface, const std::string& simCodeTag, const MPI_Comm& world_comm=MPI_COMM_WORLD);
+    ByStringMPIProcessorGroup(const ByStringMPIProcessorGroup& other);
+    virtual ~ByStringMPIProcessorGroup();
+    virtual ByStringMPIProcessorGroup *deepCopy() const;
+    
+  };
+}
+
+#endif
index a7be534fae01788d255c35276118f7e879217fb4..bdb966ce239cfcf951e853e96ddc7579edfc00ce 100644 (file)
@@ -51,6 +51,7 @@ SET(paramedmem_SOURCES
   InterpolationMatrix.cxx
   LinearTimeInterpolator.cxx
   MPIProcessorGroup.cxx
+  ByStringMPIProcessorGroup.cxx
   MxN_Mapping.cxx
   OverlapDEC.cxx
   OverlapElementLocator.cxx
index 3131a1a3361d25ac1f1c6b51e39a8a18d07d21c5..f4494f489a64877a22e5e44bb242c497bf17d12b 100644 (file)
@@ -69,6 +69,43 @@ namespace MEDCoupling
   {
   }
 
+  /*!
+   * Creates an InterpKernelDEC from an string identifier for the source and target groups.
+   * The set of procs might not cover entirely MPI_COMM_WORLD
+   * (a sub-communicator holding the union of source and target procs is recreated internally).
+   */
+  InterpKernelDEC::InterpKernelDEC(ProcessorGroup& generic_group, const std::string& source_group, const std::string& target_group):
+    DisjointDEC(generic_group.getProcIDsByName(source_group),generic_group.getProcIDsByName(target_group)),
+    _interpolation_matrix(0)
+  {
+  }
+  
+  /*!
+   * Split the interaction group based on the predefined token string "<->"
+   *  The string at left of the token will be the source group and the string at right the target group
+   */
+  static std::pair<std::string,std::string> GetGroupsName( const std::string& interaction_group )
+  {
+    const std::string delimiter = "<->";
+    size_t delimiter_position = interaction_group.find(delimiter);
+    if ( delimiter_position == std::string::npos )
+      throw ( "No delimiter <-> found in the interaction group.");
+
+    std::string src = interaction_group.substr(0,delimiter_position);
+    std::string tgt = interaction_group.substr(delimiter_position+delimiter.size(),interaction_group.size());
+    return std::make_pair(src,tgt);
+  }
+
+  /*!
+   * Creates an InterpKernelDEC from an string defining an interaction. 
+   *  The source and target group are obtained by spliting the string based in the "<->" token.
+   *  The constructor accepting a ProcessorGroup and two strings is reused.
+   */
+  InterpKernelDEC::InterpKernelDEC(ProcessorGroup& generic_group, const std::string& interaction_group ):
+    InterpKernelDEC(generic_group,GetGroupsName(interaction_group).first,GetGroupsName(interaction_group).second)
+  {
+  }
+
   InterpKernelDEC::~InterpKernelDEC()
   {
     release();
index 3e949143205c7f666cda401baa8588b631deddf7..3a8803a2279d8837d9aef2eba8c7e36d95a785e8 100644 (file)
@@ -131,6 +131,8 @@ namespace MEDCoupling
     InterpKernelDEC();
     InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
     InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD);
+    InterpKernelDEC(ProcessorGroup& generic_group, const std::string& source_group, const std::string& target_group);
+    InterpKernelDEC(ProcessorGroup& generic_group, const std::string& interaction_group);
     virtual ~InterpKernelDEC();
     void release();
 
index 2ada0d7cf32424945870ab436565c71b870f2bf8..1e40fd65d9cef1e6d976ec2b63aebdc69b9001df 100644 (file)
@@ -92,6 +92,23 @@ namespace MEDCoupling
   }
 
 
+  /*! Creates a processor group that is based on the processors included in \a proc_ids_by_name[name].
+    This routine must be called by all processors in MPI_COMM_WORLD.
+
+    \param interface CommInterface object giving access to the MPI
+    communication layer
+    \param proc_ids_by_name a map defining a relation between a name and a set of ids that are to be integrated in the group. 
+    The ids number are to be understood in terms of MPI_COMM_WORLD ranks.
+    \param simCodeTag identifier of the group
+  */
+
+  MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, std::map<std::string,std::set<int>> proc_ids_by_name, const std::string& simCodeTag, const MPI_Comm& world_comm):
+    ProcessorGroup(interface, proc_ids_by_name, simCodeTag), _world_comm(world_comm)
+  {
+    updateMPISpecificAttributes();
+  }
+
+
   void MPIProcessorGroup::updateMPISpecificAttributes()
   {
     //Creation of a communicator 
index cee8bc3910a04182d6a95069e111171fc10ea919..1dd035995c95973fac917ee6c98abd3d5a1c8720 100644 (file)
@@ -34,6 +34,7 @@ namespace MEDCoupling
   public:
     MPIProcessorGroup(const CommInterface& interface);
     MPIProcessorGroup(const CommInterface& interface, std::set<int> proc_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD);
+    MPIProcessorGroup(const CommInterface& interface, std::map<std::string,std::set<int>> proc_ids_by_name, const std::string& simCodeTag, const MPI_Comm& world_comm=MPI_COMM_WORLD);
     MPIProcessorGroup (const ProcessorGroup& proc_group, std::set<int> proc_ids);
     MPIProcessorGroup(const CommInterface& interface,int pstart, int pend, const MPI_Comm& world_comm=MPI_COMM_WORLD);
     MPIProcessorGroup(const MPIProcessorGroup& other);
index 842ee090cfa732000f37900bc2a2d656432c6f74..7e5e71048c49fd6b21e627bcfad1f500db961f92 100644 (file)
@@ -43,6 +43,8 @@ namespace MEDCoupling
     ProcessorGroup (const ProcessorGroup& other):
       _comm_interface(other.getCommInterface()),_proc_ids(other._proc_ids) { }
     ProcessorGroup (const CommInterface& interface, int start, int end);
+    ProcessorGroup(const CommInterface& interface,std::map<std::string,std::set<int>> proc_ids_by_name,const std::string& simCodeTag):
+      _comm_interface(interface),_proc_ids_by_name(proc_ids_by_name),_proc_ids(proc_ids_by_name.at(simCodeTag)) { }
     virtual ~ProcessorGroup() { }
     virtual ProcessorGroup *deepCopy() const = 0;
     virtual ProcessorGroup* fuse (const ProcessorGroup&) const = 0;
@@ -55,9 +57,11 @@ namespace MEDCoupling
     virtual int translateRank(const ProcessorGroup*, int) const = 0;
     virtual ProcessorGroup* createComplementProcGroup() const = 0;
     virtual ProcessorGroup* createProcGroup() const = 0;
-    virtual const std::set<int>& getProcIDs()const  { return _proc_ids; } 
+    virtual const std::set<int>& getProcIDs()const  { return _proc_ids; }
+    virtual const std::set<int>& getProcIDsByName( const std::string& simCodeTag ) const { return _proc_ids_by_name.at(simCodeTag); }
   protected:
     const CommInterface _comm_interface;
+    std::map<std::string,std::set<int>> _proc_ids_by_name;
     std::set<int> _proc_ids;
   };
 }
index 74a392d3ab5e3df7d82d94da2a7d8db8bfd03320..b06776ddfbc88eb28a116c173d36446a7545316c 100644 (file)
@@ -40,6 +40,7 @@ INCLUDE_DIRECTORIES(
 SET(ParaMEDMEMTest_SOURCES
   ParaMEDMEMTest.cxx
   ParaMEDMEMTest_MPIProcessorGroup.cxx
+  ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx
   ParaMEDMEMTest_BlockTopology.cxx
   ParaMEDMEMTest_InterpKernelDEC.cxx
   ParaMEDMEMTest_StructuredCoincidentDEC.cxx
index 008c8a698cecc4b5993f3a6bcedcfdb7eae404dc..fb2b339ec7f2df06aa7a1920fef042178547027a 100644 (file)
@@ -55,7 +55,8 @@ class ParaMEDMEMTest : public CppUnit::TestFixture
   CPPUNIT_TEST(testOverlapDEC2_ter);                // 3 procs
 //  CPPUNIT_TEST(testOverlapDEC3);                    // 2 procs
 //  CPPUNIT_TEST(testOverlapDEC4);                    // 2 procs
-
+  CPPUNIT_TEST(testByStringMPIProcessorGroup_constructor);        // 1 and 2 procs
+  CPPUNIT_TEST(testByStringMPIProcessorGroup_stringconstructor);  // 3 procs
   CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D);// 5 procs
   CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpDEC_2D);      // 5 procs
   CPPUNIT_TEST(testSynchronousEqualInterpKernelDEC_2D);                   // 5 procs
@@ -123,6 +124,8 @@ public:
   void testOverlapDEC3();
 //  void testOverlapDEC3_bis();
   void testOverlapDEC4();
+  void testByStringMPIProcessorGroup_constructor();
+  void testByStringMPIProcessorGroup_stringconstructor();
 #ifdef MED_ENABLE_FVM
   void testNonCoincidentDEC_2D();
   void testNonCoincidentDEC_3D();
diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx
new file mode 100644 (file)
index 0000000..8207031
--- /dev/null
@@ -0,0 +1,97 @@
+// Copyright (C) 2007-2023  CEA, EDF
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include "ParaMEDMEMTest.hxx"
+#include <cppunit/TestAssert.h>
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "ByStringMPIProcessorGroup.hxx"
+
+#include <string>
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+
+using namespace std;
+using namespace MEDCoupling;
+/*
+ * Check methods defined in MPPIProcessorGroup.hxx
+ *
+ (+) ByStringMPIProcessorGroup(const CommInterface& interface);
+ (+) ByStringMPIProcessorGroup(const CommInterface& interface, std::string& codeTag, const MPI_Comm& world_comm );
+ (+) ByStringMPIProcessorGroup(ByStringMPIProcessorGroup& other );
+*/
+void ParaMEDMEMTest::testByStringMPIProcessorGroup_constructor()
+{
+  CommInterface comm_interface;
+  ByStringMPIProcessorGroup* group = new ByStringMPIProcessorGroup(comm_interface);
+  int size;
+  MPI_Comm_size(MPI_COMM_WORLD, &size);
+  CPPUNIT_ASSERT_EQUAL(size,group->size());
+  int size2;
+  const MPI_Comm* communicator=group->getComm();
+  MPI_Comm_size(*communicator, &size2);
+  CPPUNIT_ASSERT_EQUAL(size,size2);
+  delete group;  
+}
+void ParaMEDMEMTest::testByStringMPIProcessorGroup_stringconstructor()
+{
+  int size, rankId;
+  MPI_Comm_size(MPI_COMM_WORLD, &size);
+  MPI_Comm_rank(MPI_COMM_WORLD, &rankId);
+
+  if (size != 3)
+    return;
+  
+  std::string myTag;
+  if ( rankId == 0 || rankId == 2 ) 
+    myTag = "group0";
+  else
+    myTag = "gr1";
+
+  CommInterface comm_interface;
+  ByStringMPIProcessorGroup * group = new ByStringMPIProcessorGroup(comm_interface,myTag,MPI_COMM_WORLD);
+  ByStringMPIProcessorGroup * copygroup = new ByStringMPIProcessorGroup(*group);
+  CPPUNIT_ASSERT(group);
+  CPPUNIT_ASSERT(copygroup);
+
+  std::set<int> ranksInGroup = group->getProcIDs();
+  std::set<int> ranksInCopiedGroup = group->getProcIDs();
+  if ( rankId == 0 || rankId == 2 )  
+  {
+    CPPUNIT_ASSERT_EQUAL( (int)ranksInGroup.size(), 2 );
+    CPPUNIT_ASSERT_EQUAL( (int)ranksInCopiedGroup.size(), 2 );
+  }
+  else
+  {
+    CPPUNIT_ASSERT_EQUAL( (int)ranksInGroup.size(), 1 );
+    CPPUNIT_ASSERT_EQUAL( (int)ranksInCopiedGroup.size(), 1 );
+  }  
+  CPPUNIT_ASSERT( group->contains(rankId) );
+  CPPUNIT_ASSERT( copygroup->contains(rankId) );
+  delete group;
+  delete copygroup;
+}
index 8f7d913169ac6d02edf57e30dc14fdc3a5e58aeb..4e7f502b4376b9b1c079b434def21c32f31a3696 100644 (file)
@@ -91,6 +91,10 @@ IF(MEDCOUPLING_BUILD_PY_TESTS)
     ADD_TEST(NAME PyPara_InterpKernelDEC_Proc5
              COMMAND ${MPIEXEC} -np 5  ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC.py)
     SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_Proc5 PROPERTIES ENVIRONMENT "${tests_env}")
+
+    ADD_TEST(NAME PyPara_InterpKernelDEC_easy_Proc5
+             COMMAND ${MPIEXEC} -np 5  ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC_easy.py)
+    SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_easy_Proc5 PROPERTIES ENVIRONMENT "${tests_env}")
     
     #ADD_TEST(NAME PyPara_NonCoincidentDEC_Proc5
     #         COMMAND ${MPIEXEC} -np 5  ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_NonCoincidentDEC.py)
@@ -120,7 +124,7 @@ SALOME_INSTALL_SCRIPTS(${CMAKE_CURRENT_BINARY_DIR}/ParaMEDMEM.py ${MEDCOUPLING_I
 INSTALL(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_StructuredCoincidentDEC.py DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON})
 
 set(TEST_INSTALL_DIRECTORY ${MEDCOUPLING_INSTALL_TESTS}/ParaMEDMEM_Swig)
-install(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_OverlapDEC.py test_StructuredCoincidentDEC.py ParaMEDMEMTestTools.py test_BasicOperation.py DESTINATION ${TEST_INSTALL_DIRECTORY})
+install(FILES test_InterpKernelDEC.py test_InterpKernelDEC_easy.py test_NonCoincidentDEC.py test_OverlapDEC.py test_StructuredCoincidentDEC.py ParaMEDMEMTestTools.py test_BasicOperation.py DESTINATION ${TEST_INSTALL_DIRECTORY})
 # export MPIEXEC and _oversub_opt to CTestTestfile.cmake of salome test mechanism
 configure_file(CTestTestfileInstall.cmake.in "CTestTestfileST.cmake" @ONLY)
 install(FILES ${CMAKE_CURRENT_BINARY_DIR}/CTestTestfileST.cmake DESTINATION ${TEST_INSTALL_DIRECTORY} RENAME CTestTestfile.cmake)
index 163d10012b054f9065f74c4f4ead7c03b8a25e03..b7c7d19975bb3e7bad7010cff1ea26206b560344 100644 (file)
@@ -40,6 +40,11 @@ set(TEST_NAME ${COMPONENT_NAME}_${TEST_NAMES}_${tfile})
 add_test(${TEST_NAME} ${MPIEXEC} -np 5 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_InterpKernelDEC.py)
 set_tests_properties(${TEST_NAME} PROPERTIES LABELS "${COMPONENT_NAME}" TIMEOUT ${TIMEOUT})
 
+set(tfile PyPara_InterpKernelDEC_easy_Proc5)
+set(TEST_NAME ${COMPONENT_NAME}_${TEST_NAMES}_${tfile})
+add_test(${TEST_NAME} ${MPIEXEC} -np 5 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_InterpKernelDEC_easy.py)
+set_tests_properties(${TEST_NAME} PROPERTIES LABELS "${COMPONENT_NAME}" TIMEOUT ${TIMEOUT})
+
 set(tfile PyPara_StructuredCoincidentDEC_Proc4)
 set(TEST_NAME ${COMPONENT_NAME}_${TEST_NAMES}_${tfile})
 add_test(${TEST_NAME} ${MPIEXEC} -np 4 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_StructuredCoincidentDEC.py)
index dca30940787b3f12dfe9371a5809280dae2a7a13..3613e6e6cd82cad4a6f99d156ccccdbb485f91eb 100644 (file)
@@ -26,6 +26,7 @@
 #include "ProcessorGroup.hxx"
 #include "Topology.hxx"
 #include "MPIProcessorGroup.hxx"
+#include "ByStringMPIProcessorGroup.hxx"
 #include "DEC.hxx"
 #include "InterpKernelDEC.hxx"
 #include "NonCoincidentDEC.hxx"
@@ -51,6 +52,7 @@ using namespace ICoCo;
 %include "ParaMESH.hxx"
 %include "ParaFIELD.hxx"
 %include "MPIProcessorGroup.hxx"
+%include "ByStringMPIProcessorGroup.hxx"
 %include "ComponentTopology.hxx"
 %include "DEC.hxx"
 %include "DisjointDEC.hxx"
@@ -293,6 +295,8 @@ namespace MEDCoupling
       InterpKernelDEC();
       InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
       InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids); // hide last optional parameter!
+      InterpKernelDEC(ProcessorGroup& generic_group, const std::string& source_group, const std::string& target_group);
+      InterpKernelDEC(ProcessorGroup& generic_group, const std::string& interaction_group);
       virtual ~InterpKernelDEC();
       void release();
 
diff --git a/src/ParaMEDMEM_Swig/test_InterpKernelDEC_easy.py b/src/ParaMEDMEM_Swig/test_InterpKernelDEC_easy.py
new file mode 100755 (executable)
index 0000000..48016da
--- /dev/null
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+#  -*- coding: iso-8859-1 -*-
+# Copyright (C) 2007-2023  CEA, EDF
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+
+from medcoupling import *
+#from ParaMEDMEMTestTools import WriteInTmpDir
+import sys, os
+import unittest
+import math
+from mpi4py import MPI
+
+def ranksByGroup(groupString, jobPerWorldRank):
+    ranks=[]
+    for key,value in jobPerWorldRank.items():
+        if (groupString == value ):
+            ranks.append(key)
+    return ranks
+
+
+class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
+    def test_InterpKernelDEC_easy_comm_creation(self):
+        """
+        [EDF26706] :
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 5:
+            print("Should be run on 5 procs!")
+            return
+        jobPerWorldRank = {0:"A",1:"B",2:"B",3:"C",4:"C"}
+        interface = CommInterface()
+        group = ByStringMPIProcessorGroup(interface, jobPerWorldRank[rank])
+        decBC = InterpKernelDEC(group,"B<->C")
+        decAC = InterpKernelDEC(group,"A<->C")
+        eval("Easy_comm_creation_{}".format(rank))(decBC,decAC)
+        #
+        MPI.COMM_WORLD.Barrier() 
+    
+    def test_InterpKernelDEC_easy_comm_creation_2(self):
+        """
+        [EDF26706] :
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 5:
+            print("Should be run on 5 procs!")
+            return
+        jobPerWorldRank = {0:"A",1:"B",2:"B",3:"C",4:"C"}
+        interface = CommInterface()
+        group = ByStringMPIProcessorGroup(interface, jobPerWorldRank[rank])
+        decBC = InterpKernelDEC(group,"B","C")
+        decAC = InterpKernelDEC(group,"A","C")
+        eval("Easy_comm_creation_{}".format(rank))(decBC,decAC)
+        #
+        MPI.COMM_WORLD.Barrier()    
+
+def Easy_comm_creation_0(decBC,decAC):
+    """ Proc 0 of A"""
+    m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0,1]),DataArrayDouble([0,1])) ; m = m.buildUnstructured()
+    field = MEDCouplingFieldDouble(ON_CELLS)
+    field.setNature(IntensiveMaximum)
+    field.setMesh( m )
+    field.setArray( DataArrayDouble([1.2]))
+    decAC.attachLocalField( field )
+    decAC.synchronize()
+    decAC.sendData()
+    pass
+
+def Easy_comm_creation_1(decBC,decAC):
+    """ Proc 0 of B"""
+    m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([2,3]),DataArrayDouble([1,2])) ; m = m.buildUnstructured()
+    field = MEDCouplingFieldDouble(ON_CELLS)
+    field.setNature(IntensiveMaximum)
+    field.setMesh( m )
+    field.setArray( DataArrayDouble([2.3]))
+    decBC.attachLocalField( field )
+    decBC.synchronize()
+    decBC.sendData()
+    pass
+
+def Easy_comm_creation_2(decBC,decAC):
+    """ Proc 1 of B"""
+    m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([3,4]),DataArrayDouble([1,2])) ; m = m.buildUnstructured()
+    field = MEDCouplingFieldDouble(ON_CELLS)
+    field.setNature(IntensiveMaximum)
+    field.setMesh( m )
+    field.setArray( DataArrayDouble([3.3]))
+    decBC.attachLocalField( field )
+    decBC.synchronize()
+    decBC.sendData()
+    pass
+
+def Easy_comm_creation_3(decBC,decAC):
+    """ Proc 0 of C"""
+    m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0.5,3.5]),DataArrayDouble([0,1.5])) ; m = m.buildUnstructured()
+    field = MEDCouplingFieldDouble(ON_CELLS)
+    field.setNature(IntensiveMaximum)
+    field.setMesh( m )
+    field.setArray( DataArrayDouble([0.]))
+    decBC.attachLocalField( field )
+    decAC.attachLocalField( field )
+    decBC.synchronize()
+    decAC.synchronize()
+    decBC.recvData()
+    print(field.getArray().getValues())
+    decAC.recvData()
+    print(field.getArray().getValues())
+    pass
+
+def Easy_comm_creation_4(decBC,decAC):
+    """ Proc 1 of C"""
+    m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0.7,3.5]),DataArrayDouble([0,1.5])) ; m = m.buildUnstructured()
+    field = MEDCouplingFieldDouble(ON_CELLS)
+    field.setNature(IntensiveMaximum)
+    field.setMesh( m )
+    field.setArray( DataArrayDouble([0.]))
+    decBC.attachLocalField( field )
+    decAC.attachLocalField( field )
+    decBC.synchronize()
+    decAC.synchronize()
+    decBC.recvData()
+    print(field.getArray().getValues())
+    decAC.recvData()
+    print(field.getArray().getValues())
+    pass
+
+if __name__ == "__main__":
+    unittest.main()
+    MPI.Finalize()
+