]> SALOME platform Git repositories - tools/medcoupling.git/commitdiff
Salome HOME
[bos #38048] [EDF] (2023-T3) PARAMEDMEM Ergonomy. Added initial files and definitions. kleontev/38048_PARAMEDMEM_Ergonomy
authorKonstantin Leontev <Konstantin.LEONTEV@opencascade.com>
Wed, 13 Dec 2023 19:39:20 +0000 (19:39 +0000)
committerKonstantin Leontev <Konstantin.LEONTEV@opencascade.com>
Thu, 14 Dec 2023 16:00:38 +0000 (16:00 +0000)
src/ParaMEDMEM/ByStringMPIProcessorGroup.cxx [new file with mode: 0644]
src/ParaMEDMEM/ByStringMPIProcessorGroup.hxx [new file with mode: 0644]
src/ParaMEDMEM/CMakeLists.txt
src/ParaMEDMEM/InterpKernelDEC.cxx
src/ParaMEDMEM/InterpKernelDEC.hxx
src/ParaMEDMEM_Swig/CMakeLists.txt
src/ParaMEDMEM_Swig/CTestTestfileInstall.cmake.in
src/ParaMEDMEM_Swig/test_InterpKernelDEC_easy.py [new file with mode: 0644]

diff --git a/src/ParaMEDMEM/ByStringMPIProcessorGroup.cxx b/src/ParaMEDMEM/ByStringMPIProcessorGroup.cxx
new file mode 100644 (file)
index 0000000..b990a2c
--- /dev/null
@@ -0,0 +1,47 @@
+// Copyright (C) 2007-2023  CEA, EDF
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include "ByStringMPIProcessorGroup.hxx"
+
+#include "mpi.h"
+
+namespace MEDCoupling
+{
+  /*!
+   \anchor ByStringMPIProcessorGroup-det
+   \class ByStringMPIProcessorGroup
+
+    TODO: add description
+  */
+
+
+  /*! 
+   * TODO: add description
+  */
+  ByStringMPIProcessorGroup::ByStringMPIProcessorGroup(const CommInterface& interface, const std::string& simCodeTag, const MPI_Comm& world_comm):
+    MPIProcessorGroup(interface), _world_comm(world_comm)
+  {
+
+  }
+
+
+  ByStringMPIProcessorGroup::~ByStringMPIProcessorGroup()
+  {
+    release();
+  }
diff --git a/src/ParaMEDMEM/ByStringMPIProcessorGroup.hxx b/src/ParaMEDMEM/ByStringMPIProcessorGroup.hxx
new file mode 100644 (file)
index 0000000..00cd02a
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright (C) 2007-2023  CEA, EDF
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#ifndef __BYSTRINGMPIPROCESSORGROUP_HXX__
+#define __BYSTRINGMPIPROCESSORGROUP_HXX__
+
+#include "MPIProcessorGroup.hxx"
+
+namespace MEDCoupling
+{
+  class ByStringMPIProcessorGroup : public MPIProcessorGroup
+  {
+  public:
+    ByStringMPIProcessorGroup(const CommInterface& interface, const std::string& simCodeTag, const MPI_Comm& world_comm = MPI_COMM_WORLD);
+
+    ~ByStringMPIProcessorGroup();
+  };
+}
+
+#endif
index 03b9373d8a95df2741049c1a7c00762453ec2693..1a8143112a3dd3ef2fdd5c6cf2c526332feb430a 100644 (file)
@@ -51,6 +51,7 @@ SET(paramedmem_SOURCES
   InterpolationMatrix.cxx
   LinearTimeInterpolator.cxx
   MPIProcessorGroup.cxx
+  ByStringMPIProcessorGroup.cxx
   MxN_Mapping.cxx
   OverlapDEC.cxx
   OverlapElementLocator.cxx
index 18cf01e7c7bab4cc71791b33edb24097a2b18dbf..aea25a8ac58e9d0a3ed56dbf8c3c33e9c862dfbd 100644 (file)
@@ -69,6 +69,26 @@ namespace MEDCoupling
   {
   }
 
+  /*!
+   * Creates an InterpKernelDEC from a group created as ByStringMPIProcessorGroup class.
+   * TODO: not implemented yet
+   */
+  InterpKernelDEC::InterpKernelDEC(ProcessorGroup& group, const std::string& simCodeToBeCoupled):
+    DisjointDEC(),
+    _interpolation_matrix(0)
+  {
+  }
+
+  /*!
+   * Creates an InterpKernelDEC from a group created as ByStringMPIProcessorGroup class.
+   * TODO: not implemented yet
+   */
+  InterpKernelDEC::InterpKernelDEC(ProcessorGroup& group, const std::string& simCodeToBeCoupledSrc, const std::string& simCodeToBeCoupledTrg):
+    DisjointDEC(),
+    _interpolation_matrix(0)
+  {
+  }
+
   InterpKernelDEC::~InterpKernelDEC()
   {
     release();
index eaa87526efd6ad5022dcbac80ca587e729d09922..241085d074a1e3b1ed17f96d124e7990eb573119 100644 (file)
@@ -131,6 +131,11 @@ namespace MEDCoupling
     InterpKernelDEC();
     InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
     InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD);
+
+    // Constructors below use the groups created as ByStringMPIProcessorGroup class 
+    InterpKernelDEC(ProcessorGroup& group, const std::string& simCodeToBeCoupled);
+    InterpKernelDEC(ProcessorGroup& group, const std::string& simCodeToBeCoupledSrc, const std::string& simCodeToBeCoupledTrg);
+
     virtual ~InterpKernelDEC();
     void release();
 
index c191cc18e2108db4cf7a960232070e38604f865b..8563d40d817d4f96dd40cc74b59c6b650427436b 100644 (file)
@@ -90,6 +90,8 @@ IF(MEDCOUPLING_BUILD_PY_TESTS)
     SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}")
     ADD_TEST(NAME PyPara_InterpKernelDEC_Proc5
              COMMAND ${MPIEXEC} -np 5  ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC.py)
+    ADD_TEST(NAME PyPara_InterpKernelDEC_easy_Proc5
+             COMMAND ${MPIEXEC} -np 5  ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC_easy.py)
     SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_Proc5 PROPERTIES ENVIRONMENT "${tests_env}")
     
     #ADD_TEST(NAME PyPara_NonCoincidentDEC_Proc5
@@ -107,6 +109,7 @@ ENDIF()
 
 SET(_tst_scripts
   test_InterpKernelDEC.py
+  test_InterpKernelDEC_easy.py
   test_NonCoincidentDEC.py
   test_StructuredCoincidentDEC.py
   test_OverlapDEC.py
@@ -117,10 +120,10 @@ INSTALL(FILES ParaMEDMEM.i ParaMEDMEMCommon.i DESTINATION ${MEDCOUPLING_INSTALL_
 INSTALL(FILES ${_tst_scripts} DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON})
 SALOME_INSTALL_SCRIPTS(${CMAKE_CURRENT_BINARY_DIR}/ParaMEDMEM.py ${MEDCOUPLING_INSTALL_PYTHON} EXTRA_DPYS "${SWIG_MODULE_ParaMEDMEM_REAL_NAME}")
 
-INSTALL(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_StructuredCoincidentDEC.py DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON})
+INSTALL(FILES test_InterpKernelDEC.py test_InterpKernelDEC_easy.py test_NonCoincidentDEC.py test_StructuredCoincidentDEC.py DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON})
 
 set(TEST_INSTALL_DIRECTORY ${MEDCOUPLING_INSTALL_TESTS}/ParaMEDMEM_Swig)
-install(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_OverlapDEC.py test_StructuredCoincidentDEC.py ParaMEDMEMTestTools.py test_BasicOperation.py DESTINATION ${TEST_INSTALL_DIRECTORY})
+install(FILES test_InterpKernelDEC.py test_InterpKernelDEC_easy.py test_NonCoincidentDEC.py test_OverlapDEC.py test_StructuredCoincidentDEC.py ParaMEDMEMTestTools.py test_BasicOperation.py DESTINATION ${TEST_INSTALL_DIRECTORY})
 # export MPIEXEC and _oversub_opt to CTestTestfile.cmake of salome test mechanism
 configure_file(CTestTestfileInstall.cmake.in "CTestTestfileST.cmake" @ONLY)
 install(FILES ${CMAKE_CURRENT_BINARY_DIR}/CTestTestfileST.cmake DESTINATION ${TEST_INSTALL_DIRECTORY} RENAME CTestTestfile.cmake)
index a44a7d3e3476a275025e9c9912128b125dbe9295..275e9cde8229c5047eb0f642a38334da615a55d5 100644 (file)
@@ -40,6 +40,11 @@ set(TEST_NAME ${COMPONENT_NAME}_${TEST_NAMES}_${tfile})
 add_test(${TEST_NAME} ${MPIEXEC} -np 5 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_InterpKernelDEC.py)
 set_tests_properties(${TEST_NAME} PROPERTIES LABELS "${COMPONENT_NAME}" TIMEOUT ${TIMEOUT})
 
+set(tfile PyPara_test_InterpKernelDEC_easy_Proc5)
+set(TEST_NAME ${COMPONENT_NAME}_${TEST_NAMES}_${tfile})
+add_test(${TEST_NAME} ${MPIEXEC} -np 5 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_test_InterpKernelDEC_easy.py)
+set_tests_properties(${TEST_NAME} PROPERTIES LABELS "${COMPONENT_NAME}" TIMEOUT ${TIMEOUT})
+
 set(tfile PyPara_StructuredCoincidentDEC_Proc4)
 set(TEST_NAME ${COMPONENT_NAME}_${TEST_NAMES}_${tfile})
 add_test(${TEST_NAME} ${MPIEXEC} -np 4 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_StructuredCoincidentDEC.py)
diff --git a/src/ParaMEDMEM_Swig/test_InterpKernelDEC_easy.py b/src/ParaMEDMEM_Swig/test_InterpKernelDEC_easy.py
new file mode 100644 (file)
index 0000000..c863c8c
--- /dev/null
@@ -0,0 +1,140 @@
+#!/usr/bin/env python
+#  -*- coding: iso-8859-1 -*-
+# Copyright (C) 2007-2023  CEA, EDF
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+
+from medcoupling import *
+from ParaMEDMEMTestTools import WriteInTmpDir
+import sys, os
+import unittest
+import math
+from mpi4py import MPI
+
+
+class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
+    def test_InterpKernelDEC_easy_comm_creation(self):
+        """
+        [EDF26706] :
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 5:
+            print("Should be run on 5 procs!")
+            return
+        jobPerWorldRank = {0:"A",1:"B",2:"B",3:"C",4:"C"}
+        interface = CommInterface()
+        group = ByStringMPIProcessorGroup(interface, jobPerWorldRank[rank])
+        decBC = InterpKernelDEC(group,"B<->C")
+        decAC = InterpKernelDEC(group,"A<->C")
+        eval("Easy_comm_creation_{}".format(rank))(decBC,decAC)
+        #
+        MPI.COMM_WORLD.Barrier()
+
+    def test_InterpKernelDEC_easy_comm_creation_2(self):
+        """
+        [EDF26706] :
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 5:
+            print("Should be run on 5 procs!")
+            return
+        jobPerWorldRank = {0:"A",1:"B",2:"B",3:"C",4:"C"}
+        interface = CommInterface()
+        group = ByStringMPIProcessorGroup(interface, jobPerWorldRank[rank])
+        decBC = InterpKernelDEC(group,"B","C")
+        decAC = InterpKernelDEC(group,"A","C")
+        eval("Easy_comm_creation_{}".format(rank))(decBC,decAC)
+        #
+        MPI.COMM_WORLD.Barrier()
+
+def Easy_comm_creation_0(decBC,decAC):
+    """ Proc 0 of A"""
+    m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0,1]),DataArrayDouble([0,1])) ; m = m.buildUnstructured()
+    field = MEDCouplingFieldDouble(ON_CELLS)
+    field.setNature(IntensiveMaximum)
+    field.setMesh( m )
+    field.setArray( DataArrayDouble([1.2]))
+    decAC.attachLocalField( field )
+    decAC.synchronize()
+    decAC.sendData()
+    pass
+
+def Easy_comm_creation_1(decBC,decAC):
+    """ Proc 0 of B"""
+    m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([2,3]),DataArrayDouble([1,2])) ; m = m.buildUnstructured()
+    field = MEDCouplingFieldDouble(ON_CELLS)
+    field.setNature(IntensiveMaximum)
+    field.setMesh( m )
+    field.setArray( DataArrayDouble([2.3]))
+    decBC.attachLocalField( field )
+    decBC.synchronize()
+    decBC.sendData()
+    pass
+
+def Easy_comm_creation_2(decBC,decAC):
+    """ Proc 1 of B"""
+    m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([3,4]),DataArrayDouble([1,2])) ; m = m.buildUnstructured()
+    field = MEDCouplingFieldDouble(ON_CELLS)
+    field.setNature(IntensiveMaximum)
+    field.setMesh( m )
+    field.setArray( DataArrayDouble([3.3]))
+    decBC.attachLocalField( field )
+    decBC.synchronize()
+    decBC.sendData()
+    pass
+
+def Easy_comm_creation_3(decBC,decAC):
+    """ Proc 0 of C"""
+    m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0.5,3.5]),DataArrayDouble([0,1.5])) ; m = m.buildUnstructured()
+    field = MEDCouplingFieldDouble(ON_CELLS)
+    field.setNature(IntensiveMaximum)
+    field.setMesh( m )
+    field.setArray( DataArrayDouble([0.]))
+    decBC.attachLocalField( field )
+    decAC.attachLocalField( field )
+    decBC.synchronize()
+    decAC.synchronize()
+    decBC.recvData()
+    print(field.getArray().getValues())
+    decAC.recvData()
+    print(field.getArray().getValues())
+    pass
+
+def Easy_comm_creation_4(decBC,decAC):
+    """ Proc 1 of C"""
+    m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0.7,3.5]),DataArrayDouble([0,1.5])) ; m = m.buildUnstructured()
+    field = MEDCouplingFieldDouble(ON_CELLS)
+    field.setNature(IntensiveMaximum)
+    field.setMesh( m )
+    field.setArray( DataArrayDouble([0.]))
+    decBC.attachLocalField( field )
+    decAC.attachLocalField( field )
+    decBC.synchronize()
+    decAC.synchronize()
+    decBC.recvData()
+    print(field.getArray().getValues())
+    decAC.recvData()
+    print(field.getArray().getValues())
+    pass
+
+if __name__ == "__main__":
+    unittest.main()
+    MPI.Finalize()
+