--- /dev/null
+// Copyright (C) 2007-2023 CEA, EDF
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include "ByStringMPIProcessorGroup.hxx"
+
+#include <iostream>
+#include <set>
+#include <algorithm>
+#include "mpi.h"
+
+using namespace std;
+
+
+namespace MEDCoupling
+{
+ /*!
+ \class ByStringMPIProcessorGroup
+
+ The ByStringMPIProcessorGroup implements a derived version of MPIProcessorGroup.
+
+ Groups are formed from MPI ranks with the same simCodeTag. Two trivial cases:
+ - All simCodeTag are equal, then one group is formed from all mpi ranks in the communicator
+ - All simCodeTag are different, then n-MPI groups are formed.
+ */
+
+
+ /*! Build and return a map between different simCodeTag and the set of MPI ranks ids (based on the passed communicator) grouped by the same identifier.
+ \param interface CommInterface object giving access to the MPI communication layer
+ \param simCodeTag the string identifiying the tag for the group.
+ \param world_comm mpi communicator
+ \return Map relating unique simCodeTag and the group of MPI ranks ids belowing to that group
+ */
+ static std::map<std::string,std::set<int>> DefineSetIdByStringName( const CommInterface& interface, const std::string& simCodeTag, const MPI_Comm& world_comm )
+ {
+ int size_world;
+ int rank_world;
+ interface.commSize(world_comm,&size_world);
+ interface.commRank(world_comm,&rank_world);
+
+ std::map<std::string,std::set<int>> myRanksSet;
+
+ std::vector<int> displacement(size_world, 0 );
+ std::vector<int> words_size(size_world);
+
+ int stringSize = (int) simCodeTag.size();
+ interface.allGather( &stringSize, 1, MPI_INT, words_size.data(), 1, MPI_INT, world_comm );
+
+ for (size_t rank = 1; rank < words_size.size(); rank++)
+ displacement[ rank ] = words_size[ rank - 1 ] + displacement[ rank - 1 ];
+
+ char globalnames[displacement[size_world-1]];
+
+ interface.allGatherV( simCodeTag.c_str(), stringSize, MPI_CHAR, &globalnames,
+ words_size.data(), displacement.data(), MPI_CHAR, world_comm );
+
+ for (size_t rank = 0; rank < size_world; rank++)
+ {
+ std::string strByRank( &globalnames[displacement[ rank ]], words_size[ rank ] );
+ myRanksSet[ strByRank ].insert( (int)rank );
+ }
+ return myRanksSet;
+ }
+
+ /*!
+ * Creates a processor group that is based on all the
+ processors of MPI_COMM_WORLD .This routine must be called by all processors in MPI_COMM_WORLD.
+ \param interface CommInterface object giving access to the MPI
+ communication layer
+ */
+ ByStringMPIProcessorGroup::ByStringMPIProcessorGroup(const CommInterface& interface):
+ MPIProcessorGroup(interface)
+ {
+ }
+
+ /*! Creates a processor group based in the simCodeTag passed.
+
+ \param interface CommInterface object giving access to the MPI
+ communication layer
+ \param simCodeTag the string identifiying the tag for the group.
+ \param world_comm mpi communicator
+ */
+ ByStringMPIProcessorGroup::ByStringMPIProcessorGroup(const CommInterface& interface, const std::string& simCodeTag, const MPI_Comm& world_comm ):
+ MPIProcessorGroup(interface, DefineSetIdByStringName( interface, simCodeTag, world_comm ), simCodeTag, world_comm )
+ {
+ }
+
+ ByStringMPIProcessorGroup::ByStringMPIProcessorGroup(const ByStringMPIProcessorGroup& other):
+ MPIProcessorGroup(other)
+ {
+ }
+
+ ByStringMPIProcessorGroup::~ByStringMPIProcessorGroup()
+ {
+ }
+
+ ByStringMPIProcessorGroup *ByStringMPIProcessorGroup::deepCopy() const
+ {
+ return new ByStringMPIProcessorGroup(*this);
+ }
+
+}
--- /dev/null
+// Copyright (C) 2007-2023 CEA, EDF
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#ifndef __BYSTRINGMPIPROCESSORGROUP_HXX__
+#define __BYSTRINGMPIPROCESSORGROUP_HXX__
+
+#include "MPIProcessorGroup.hxx"
+
+namespace MEDCoupling
+{
+ class CommInterface;
+
+ class ByStringMPIProcessorGroup : public MPIProcessorGroup
+ {
+ public:
+ ByStringMPIProcessorGroup(const CommInterface& interface);
+ ByStringMPIProcessorGroup(const CommInterface& interface, const std::string& simCodeTag, const MPI_Comm& world_comm=MPI_COMM_WORLD);
+ ByStringMPIProcessorGroup(const ByStringMPIProcessorGroup& other);
+ virtual ~ByStringMPIProcessorGroup();
+ virtual ByStringMPIProcessorGroup *deepCopy() const;
+
+ };
+}
+
+#endif
InterpolationMatrix.cxx
LinearTimeInterpolator.cxx
MPIProcessorGroup.cxx
+ ByStringMPIProcessorGroup.cxx
MxN_Mapping.cxx
OverlapDEC.cxx
OverlapElementLocator.cxx
{
}
+ /*!
+ * Creates an InterpKernelDEC from an string identifier for the source and target groups.
+ * The set of procs might not cover entirely MPI_COMM_WORLD
+ * (a sub-communicator holding the union of source and target procs is recreated internally).
+ */
+ InterpKernelDEC::InterpKernelDEC(ProcessorGroup& generic_group, const std::string& source_group, const std::string& target_group):
+ DisjointDEC(generic_group.getProcIDsByName(source_group),generic_group.getProcIDsByName(target_group)),
+ _interpolation_matrix(0)
+ {
+ }
+
+ /*!
+ * Split the interaction group based on the predefined token string "<->"
+ * The string at left of the token will be the source group and the string at right the target group
+ */
+ static std::pair<std::string,std::string> GetGroupsName( const std::string& interaction_group )
+ {
+ const std::string delimiter = "<->";
+ size_t delimiter_position = interaction_group.find(delimiter);
+ if ( delimiter_position == std::string::npos )
+ throw ( "No delimiter <-> found in the interaction group.");
+
+ std::string src = interaction_group.substr(0,delimiter_position);
+ std::string tgt = interaction_group.substr(delimiter_position+delimiter.size(),interaction_group.size());
+ return std::make_pair(src,tgt);
+ }
+
+ /*!
+ * Creates an InterpKernelDEC from an string defining an interaction.
+ * The source and target group are obtained by spliting the string based in the "<->" token.
+ * The constructor accepting a ProcessorGroup and two strings is reused.
+ */
+ InterpKernelDEC::InterpKernelDEC(ProcessorGroup& generic_group, const std::string& interaction_group ):
+ InterpKernelDEC(generic_group,GetGroupsName(interaction_group).first,GetGroupsName(interaction_group).second)
+ {
+ }
+
InterpKernelDEC::~InterpKernelDEC()
{
release();
InterpKernelDEC();
InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD);
+ InterpKernelDEC(ProcessorGroup& generic_group, const std::string& source_group, const std::string& target_group);
+ InterpKernelDEC(ProcessorGroup& generic_group, const std::string& interaction_group);
virtual ~InterpKernelDEC();
void release();
}
+ /*! Creates a processor group that is based on the processors included in \a proc_ids_by_name[name].
+ This routine must be called by all processors in MPI_COMM_WORLD.
+
+ \param interface CommInterface object giving access to the MPI
+ communication layer
+ \param proc_ids_by_name a map defining a relation between a name and a set of ids that are to be integrated in the group.
+ The ids number are to be understood in terms of MPI_COMM_WORLD ranks.
+ \param simCodeTag identifier of the group
+ */
+
+ MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, std::map<std::string,std::set<int>> proc_ids_by_name, const std::string& simCodeTag, const MPI_Comm& world_comm):
+ ProcessorGroup(interface, proc_ids_by_name, simCodeTag), _world_comm(world_comm)
+ {
+ updateMPISpecificAttributes();
+ }
+
+
void MPIProcessorGroup::updateMPISpecificAttributes()
{
//Creation of a communicator
public:
MPIProcessorGroup(const CommInterface& interface);
MPIProcessorGroup(const CommInterface& interface, std::set<int> proc_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD);
+ MPIProcessorGroup(const CommInterface& interface, std::map<std::string,std::set<int>> proc_ids_by_name, const std::string& simCodeTag, const MPI_Comm& world_comm=MPI_COMM_WORLD);
MPIProcessorGroup (const ProcessorGroup& proc_group, std::set<int> proc_ids);
MPIProcessorGroup(const CommInterface& interface,int pstart, int pend, const MPI_Comm& world_comm=MPI_COMM_WORLD);
MPIProcessorGroup(const MPIProcessorGroup& other);
ProcessorGroup (const ProcessorGroup& other):
_comm_interface(other.getCommInterface()),_proc_ids(other._proc_ids) { }
ProcessorGroup (const CommInterface& interface, int start, int end);
+ ProcessorGroup(const CommInterface& interface,std::map<std::string,std::set<int>> proc_ids_by_name,const std::string& simCodeTag):
+ _comm_interface(interface),_proc_ids_by_name(proc_ids_by_name),_proc_ids(proc_ids_by_name.at(simCodeTag)) { }
virtual ~ProcessorGroup() { }
virtual ProcessorGroup *deepCopy() const = 0;
virtual ProcessorGroup* fuse (const ProcessorGroup&) const = 0;
virtual int translateRank(const ProcessorGroup*, int) const = 0;
virtual ProcessorGroup* createComplementProcGroup() const = 0;
virtual ProcessorGroup* createProcGroup() const = 0;
- virtual const std::set<int>& getProcIDs()const { return _proc_ids; }
+ virtual const std::set<int>& getProcIDs()const { return _proc_ids; }
+ virtual const std::set<int>& getProcIDsByName( const std::string& simCodeTag ) const { return _proc_ids_by_name.at(simCodeTag); }
protected:
const CommInterface _comm_interface;
+ std::map<std::string,std::set<int>> _proc_ids_by_name;
std::set<int> _proc_ids;
};
}
SET(ParaMEDMEMTest_SOURCES
ParaMEDMEMTest.cxx
ParaMEDMEMTest_MPIProcessorGroup.cxx
+ ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx
ParaMEDMEMTest_BlockTopology.cxx
ParaMEDMEMTest_InterpKernelDEC.cxx
ParaMEDMEMTest_StructuredCoincidentDEC.cxx
CPPUNIT_TEST(testOverlapDEC2_ter); // 3 procs
// CPPUNIT_TEST(testOverlapDEC3); // 2 procs
// CPPUNIT_TEST(testOverlapDEC4); // 2 procs
-
+ CPPUNIT_TEST(testByStringMPIProcessorGroup_constructor); // 1 and 2 procs
+ CPPUNIT_TEST(testByStringMPIProcessorGroup_stringconstructor); // 3 procs
CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D);// 5 procs
CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpDEC_2D); // 5 procs
CPPUNIT_TEST(testSynchronousEqualInterpKernelDEC_2D); // 5 procs
void testOverlapDEC3();
// void testOverlapDEC3_bis();
void testOverlapDEC4();
+ void testByStringMPIProcessorGroup_constructor();
+ void testByStringMPIProcessorGroup_stringconstructor();
#ifdef MED_ENABLE_FVM
void testNonCoincidentDEC_2D();
void testNonCoincidentDEC_3D();
--- /dev/null
+// Copyright (C) 2007-2023 CEA, EDF
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include "ParaMEDMEMTest.hxx"
+#include <cppunit/TestAssert.h>
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "ByStringMPIProcessorGroup.hxx"
+
+#include <string>
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+
+using namespace std;
+using namespace MEDCoupling;
+
+/*
+ * Check methods defined in MPPIProcessorGroup.hxx
+ *
+ (+) ByStringMPIProcessorGroup(const CommInterface& interface);
+ (+) ByStringMPIProcessorGroup(const CommInterface& interface, std::string& codeTag, const MPI_Comm& world_comm );
+ (+) ByStringMPIProcessorGroup(ByStringMPIProcessorGroup& other );
+*/
+
+void ParaMEDMEMTest::testByStringMPIProcessorGroup_constructor()
+{
+ CommInterface comm_interface;
+ ByStringMPIProcessorGroup* group = new ByStringMPIProcessorGroup(comm_interface);
+ int size;
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ CPPUNIT_ASSERT_EQUAL(size,group->size());
+ int size2;
+ const MPI_Comm* communicator=group->getComm();
+ MPI_Comm_size(*communicator, &size2);
+ CPPUNIT_ASSERT_EQUAL(size,size2);
+ delete group;
+}
+
+void ParaMEDMEMTest::testByStringMPIProcessorGroup_stringconstructor()
+{
+ int size, rankId;
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rankId);
+
+ if (size != 3)
+ return;
+
+ std::string myTag;
+ if ( rankId == 0 || rankId == 2 )
+ myTag = "group0";
+ else
+ myTag = "gr1";
+
+ CommInterface comm_interface;
+ ByStringMPIProcessorGroup * group = new ByStringMPIProcessorGroup(comm_interface,myTag,MPI_COMM_WORLD);
+ ByStringMPIProcessorGroup * copygroup = new ByStringMPIProcessorGroup(*group);
+ CPPUNIT_ASSERT(group);
+ CPPUNIT_ASSERT(copygroup);
+
+ std::set<int> ranksInGroup = group->getProcIDs();
+ std::set<int> ranksInCopiedGroup = group->getProcIDs();
+ if ( rankId == 0 || rankId == 2 )
+ {
+ CPPUNIT_ASSERT_EQUAL( (int)ranksInGroup.size(), 2 );
+ CPPUNIT_ASSERT_EQUAL( (int)ranksInCopiedGroup.size(), 2 );
+ }
+ else
+ {
+ CPPUNIT_ASSERT_EQUAL( (int)ranksInGroup.size(), 1 );
+ CPPUNIT_ASSERT_EQUAL( (int)ranksInCopiedGroup.size(), 1 );
+ }
+ CPPUNIT_ASSERT( group->contains(rankId) );
+ CPPUNIT_ASSERT( copygroup->contains(rankId) );
+ delete group;
+ delete copygroup;
+}
ADD_TEST(NAME PyPara_InterpKernelDEC_Proc5
COMMAND ${MPIEXEC} -np 5 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC.py)
SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_Proc5 PROPERTIES ENVIRONMENT "${tests_env}")
+
+ ADD_TEST(NAME PyPara_InterpKernelDEC_easy_Proc5
+ COMMAND ${MPIEXEC} -np 5 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC_easy.py)
+ SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_easy_Proc5 PROPERTIES ENVIRONMENT "${tests_env}")
#ADD_TEST(NAME PyPara_NonCoincidentDEC_Proc5
# COMMAND ${MPIEXEC} -np 5 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_NonCoincidentDEC.py)
INSTALL(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_StructuredCoincidentDEC.py DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON})
set(TEST_INSTALL_DIRECTORY ${MEDCOUPLING_INSTALL_TESTS}/ParaMEDMEM_Swig)
-install(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_OverlapDEC.py test_StructuredCoincidentDEC.py ParaMEDMEMTestTools.py test_BasicOperation.py DESTINATION ${TEST_INSTALL_DIRECTORY})
+install(FILES test_InterpKernelDEC.py test_InterpKernelDEC_easy.py test_NonCoincidentDEC.py test_OverlapDEC.py test_StructuredCoincidentDEC.py ParaMEDMEMTestTools.py test_BasicOperation.py DESTINATION ${TEST_INSTALL_DIRECTORY})
# export MPIEXEC and _oversub_opt to CTestTestfile.cmake of salome test mechanism
configure_file(CTestTestfileInstall.cmake.in "CTestTestfileST.cmake" @ONLY)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/CTestTestfileST.cmake DESTINATION ${TEST_INSTALL_DIRECTORY} RENAME CTestTestfile.cmake)
add_test(${TEST_NAME} ${MPIEXEC} -np 5 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_InterpKernelDEC.py)
set_tests_properties(${TEST_NAME} PROPERTIES LABELS "${COMPONENT_NAME}" TIMEOUT ${TIMEOUT})
+set(tfile PyPara_InterpKernelDEC_easy_Proc5)
+set(TEST_NAME ${COMPONENT_NAME}_${TEST_NAMES}_${tfile})
+add_test(${TEST_NAME} ${MPIEXEC} -np 5 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_InterpKernelDEC_easy.py)
+set_tests_properties(${TEST_NAME} PROPERTIES LABELS "${COMPONENT_NAME}" TIMEOUT ${TIMEOUT})
+
set(tfile PyPara_StructuredCoincidentDEC_Proc4)
set(TEST_NAME ${COMPONENT_NAME}_${TEST_NAMES}_${tfile})
add_test(${TEST_NAME} ${MPIEXEC} -np 4 ${_oversub_opt} -path "${PATH_FOR_PYTHON}" python3 test_StructuredCoincidentDEC.py)
#include "ProcessorGroup.hxx"
#include "Topology.hxx"
#include "MPIProcessorGroup.hxx"
+#include "ByStringMPIProcessorGroup.hxx"
#include "DEC.hxx"
#include "InterpKernelDEC.hxx"
#include "NonCoincidentDEC.hxx"
%include "ParaMESH.hxx"
%include "ParaFIELD.hxx"
%include "MPIProcessorGroup.hxx"
+%include "ByStringMPIProcessorGroup.hxx"
%include "ComponentTopology.hxx"
%include "DEC.hxx"
%include "DisjointDEC.hxx"
InterpKernelDEC();
InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids); // hide last optional parameter!
+ InterpKernelDEC(ProcessorGroup& generic_group, const std::string& source_group, const std::string& target_group);
+ InterpKernelDEC(ProcessorGroup& generic_group, const std::string& interaction_group);
virtual ~InterpKernelDEC();
void release();
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+# Copyright (C) 2007-2023 CEA, EDF
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+
+from medcoupling import *
+#from ParaMEDMEMTestTools import WriteInTmpDir
+import sys, os
+import unittest
+import math
+from mpi4py import MPI
+
+def ranksByGroup(groupString, jobPerWorldRank):
+ ranks=[]
+ for key,value in jobPerWorldRank.items():
+ if (groupString == value ):
+ ranks.append(key)
+ return ranks
+
+
+class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
+ def test_InterpKernelDEC_easy_comm_creation(self):
+ """
+ [EDF26706] :
+ """
+ size = MPI.COMM_WORLD.size
+ rank = MPI.COMM_WORLD.rank
+ if size != 5:
+ print("Should be run on 5 procs!")
+ return
+ jobPerWorldRank = {0:"A",1:"B",2:"B",3:"C",4:"C"}
+ interface = CommInterface()
+ group = ByStringMPIProcessorGroup(interface, jobPerWorldRank[rank])
+ decBC = InterpKernelDEC(group,"B<->C")
+ decAC = InterpKernelDEC(group,"A<->C")
+ eval("Easy_comm_creation_{}".format(rank))(decBC,decAC)
+ #
+ MPI.COMM_WORLD.Barrier()
+
+ def test_InterpKernelDEC_easy_comm_creation_2(self):
+ """
+ [EDF26706] :
+ """
+ size = MPI.COMM_WORLD.size
+ rank = MPI.COMM_WORLD.rank
+ if size != 5:
+ print("Should be run on 5 procs!")
+ return
+ jobPerWorldRank = {0:"A",1:"B",2:"B",3:"C",4:"C"}
+ interface = CommInterface()
+ group = ByStringMPIProcessorGroup(interface, jobPerWorldRank[rank])
+ decBC = InterpKernelDEC(group,"B","C")
+ decAC = InterpKernelDEC(group,"A","C")
+ eval("Easy_comm_creation_{}".format(rank))(decBC,decAC)
+ #
+ MPI.COMM_WORLD.Barrier()
+
+def Easy_comm_creation_0(decBC,decAC):
+ """ Proc 0 of A"""
+ m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0,1]),DataArrayDouble([0,1])) ; m = m.buildUnstructured()
+ field = MEDCouplingFieldDouble(ON_CELLS)
+ field.setNature(IntensiveMaximum)
+ field.setMesh( m )
+ field.setArray( DataArrayDouble([1.2]))
+ decAC.attachLocalField( field )
+ decAC.synchronize()
+ decAC.sendData()
+ pass
+
+def Easy_comm_creation_1(decBC,decAC):
+ """ Proc 0 of B"""
+ m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([2,3]),DataArrayDouble([1,2])) ; m = m.buildUnstructured()
+ field = MEDCouplingFieldDouble(ON_CELLS)
+ field.setNature(IntensiveMaximum)
+ field.setMesh( m )
+ field.setArray( DataArrayDouble([2.3]))
+ decBC.attachLocalField( field )
+ decBC.synchronize()
+ decBC.sendData()
+ pass
+
+def Easy_comm_creation_2(decBC,decAC):
+ """ Proc 1 of B"""
+ m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([3,4]),DataArrayDouble([1,2])) ; m = m.buildUnstructured()
+ field = MEDCouplingFieldDouble(ON_CELLS)
+ field.setNature(IntensiveMaximum)
+ field.setMesh( m )
+ field.setArray( DataArrayDouble([3.3]))
+ decBC.attachLocalField( field )
+ decBC.synchronize()
+ decBC.sendData()
+ pass
+
+def Easy_comm_creation_3(decBC,decAC):
+ """ Proc 0 of C"""
+ m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0.5,3.5]),DataArrayDouble([0,1.5])) ; m = m.buildUnstructured()
+ field = MEDCouplingFieldDouble(ON_CELLS)
+ field.setNature(IntensiveMaximum)
+ field.setMesh( m )
+ field.setArray( DataArrayDouble([0.]))
+ decBC.attachLocalField( field )
+ decAC.attachLocalField( field )
+ decBC.synchronize()
+ decAC.synchronize()
+ decBC.recvData()
+ print(field.getArray().getValues())
+ decAC.recvData()
+ print(field.getArray().getValues())
+ pass
+
+def Easy_comm_creation_4(decBC,decAC):
+ """ Proc 1 of C"""
+ m = MEDCouplingCMesh() ; m.setCoords(DataArrayDouble([0.7,3.5]),DataArrayDouble([0,1.5])) ; m = m.buildUnstructured()
+ field = MEDCouplingFieldDouble(ON_CELLS)
+ field.setNature(IntensiveMaximum)
+ field.setMesh( m )
+ field.setArray( DataArrayDouble([0.]))
+ decBC.attachLocalField( field )
+ decAC.attachLocalField( field )
+ decBC.synchronize()
+ decAC.synchronize()
+ decBC.recvData()
+ print(field.getArray().getValues())
+ decAC.recvData()
+ print(field.getArray().getValues())
+ pass
+
+if __name__ == "__main__":
+ unittest.main()
+ MPI.Finalize()
+