BlockTopology::BlockTopology() :
_dimension(0), _nb_procs_per_dim(0),
_local_array_indices(0), _cycle_type(0),
- _proc_group(NULL),_nb_elems(0),
+ _proc_group(nullptr),_nb_elems(0),
_owns_processor_group(false)
{}
}
BlockTopology::~BlockTopology()
+ {
+ release();
+ }
+
+ /** Destructor involves MPI operations: make sure this is accessible from a proper
+ * method for Python wrapping.
+ */
+ void BlockTopology::release()
{
if (_owns_processor_group)
delete _proc_group;
+ _proc_group = nullptr;
}
//!converts a pair <subdomainid,local> to a global number
void BlockTopology::unserialize(const mcIdType* serializer,const CommInterface& comm_interface)
{
const mcIdType* ptr_serializer=serializer;
- cout << "unserialize..."<<endl;
_dimension=(int)*(ptr_serializer++);
- cout << "dimension "<<_dimension<<endl;
_nb_elems=*(ptr_serializer++);
- cout << "nbelems "<<_nb_elems<<endl;
_nb_procs_per_dim.resize(_dimension);
_cycle_type.resize(_dimension);
_local_array_indices.resize(_dimension);
mcIdType size_comm=*(ptr_serializer++);
for (int i=0; i<size_comm; i++)
procs.insert((int)*(ptr_serializer++));
- cout << "unserialize..."<<procs.size()<<endl;
+
+ if (_owns_processor_group)
+ delete _proc_group;
_proc_group=new MPIProcessorGroup(comm_interface,procs);
_owns_processor_group=true;
- //TODO manage memory ownership of _proc_group
}
}
BlockTopology(const BlockTopology& geom_topo, const ComponentTopology& comp_topo);
BlockTopology(const ProcessorGroup& group, mcIdType nb_elem);
virtual ~BlockTopology();
+ void release();
+
//!Retrieves the number of elements for a given topology
mcIdType getNbElements()const { return _nb_elems; }
mcIdType getNbLocalElements() const;
{
delete _local_field;
}
- _local_field=0;
+ _local_field=nullptr;
_owns_field=false;
if(_owns_groups)
{
delete _target_group;
}
_owns_groups=false;
- _source_group=0;
- _target_group=0;
+ _source_group=nullptr;
+ _target_group=nullptr;
delete _union_group;
- _union_group=0;
+ _union_group=nullptr;
if (_union_comm != MPI_COMM_NULL)
_comm_interface->commFree(&_union_comm);
+ _union_comm = MPI_COMM_NULL;
}
/**
DisjointDEC &operator=(const DisjointDEC& s);
DisjointDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids,
const MPI_Comm& world_comm=MPI_COMM_WORLD);
+ virtual ~DisjointDEC();
+
void setNature(NatureOfField nature);
void attachLocalField( MEDCouplingFieldDouble *field);
void attachLocalField(const ParaFIELD *field, bool ownPt=false);
void attachLocalField(const ICoCo::MEDField *field);
-
+
virtual void prepareSourceDE() = 0;
virtual void prepareTargetDE() = 0;
virtual void recvData() = 0;
virtual void sendData() = 0;
void sendRecvData(bool way=true);
virtual void synchronize() = 0;
- virtual ~DisjointDEC();
+
virtual void computeProcGroup() { }
void renormalizeTargetField(bool isWAbs);
//
ExplicitTopology( const ExplicitTopology& topo, int nbcomponents);
ExplicitTopology(const ParaMESH &mesh);
virtual ~ExplicitTopology();
-
+
inline mcIdType getNbElements()const;
inline mcIdType getNbLocalElements() const;
const ProcessorGroup* getProcGroup()const { return _proc_group; }
InterpKernelDEC::~InterpKernelDEC()
{
- if (_interpolation_matrix !=0)
+ release();
+ }
+
+ void InterpKernelDEC::release()
+ {
+ if (_interpolation_matrix != nullptr)
delete _interpolation_matrix;
- }
+ _interpolation_matrix = nullptr;
+ DisjointDEC::cleanInstance();
+ }
+
/*!
\brief Synchronization process for exchanging topologies.
InterpKernelDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids,
const MPI_Comm& world_comm=MPI_COMM_WORLD);
virtual ~InterpKernelDEC();
+ void release();
+
void synchronize();
void recvData();
void recvData(double time);
MPIProcessorGroup::~MPIProcessorGroup()
{
- _comm_interface.groupFree(&_group);
+ release();
+ }
+
+ /** Destructor involves MPI operations: make sure this is accessible from a proper
+ * method for Python wrapping.
+ */
+ void MPIProcessorGroup::release()
+ {
+ if (_group != MPI_GROUP_EMPTY)
+ _comm_interface.groupFree(&_group);
+ _group = MPI_GROUP_EMPTY;
if (_comm!=_world_comm && _comm !=MPI_COMM_NULL)
_comm_interface.commFree(&_comm);
-
+ _comm = MPI_COMM_NULL;
}
/*! Translation of the rank id between two processor groups. This method translates rank \a rank
MPIProcessorGroup(const CommInterface& interface,int pstart, int pend, const MPI_Comm& world_comm=MPI_COMM_WORLD);
MPIProcessorGroup(const MPIProcessorGroup& other);
virtual ~MPIProcessorGroup();
+ void release();
+
virtual MPIProcessorGroup *deepCopy() const;
virtual ProcessorGroup* fuse (const ProcessorGroup&) const;
void intersect (ProcessorGroup&) { }
}
OverlapDEC::~OverlapDEC()
+ {
+ release();
+ }
+
+ /** Destructor involves MPI operations: make sure this is accessible from a proper
+ * method for Python wrapping.
+ */
+ void OverlapDEC::release()
{
if(_own_group)
- delete _group;
+ {
+ delete _group;
+ _group = nullptr;
+ }
if(_own_source_field)
- delete _source_field;
+ {
+ delete _source_field;
+ _source_field = nullptr;
+ }
if(_own_target_field)
- delete _target_field;
+ {
+ delete _target_field;
+ _target_field = nullptr;
+ }
delete _interpolation_matrix;
+ _interpolation_matrix = nullptr;
delete _locator;
+ _locator = nullptr;
if (_comm != MPI_COMM_NULL)
{
MEDCoupling::CommInterface comm;
comm.commFree(&_comm);
}
+ _comm = MPI_COMM_NULL;
}
void OverlapDEC::sendRecvData(bool way)
public:
OverlapDEC(const std::set<int>& procIds,const MPI_Comm& world_comm=MPI_COMM_WORLD);
virtual ~OverlapDEC();
+ void release();
+
void sendRecvData(bool way=true);
void sendData();
void recvData();
if(_target_field->getField()->getNature()==IntensiveMaximum)
_mapping.computeDenoConservativeVolumic(_target_field->getField()->getNumberOfTuplesExpected());
else
- throw INTERP_KERNEL::Exception("OverlapDEC: Policy not implemented yet: only IntensiveMaximum!");
+ throw INTERP_KERNEL::Exception("OverlapDEC: Policy not set (did you call setNature()?) or not implemented yet: only IntensiveMaximum!");
// {
// if(_target_field->getField()->getNature()==IntensiveConservation)
// {
}
ParaFIELD::~ParaFIELD()
+ {
+ release();
+ }
+
+ /** Destructor involves MPI operations: make sure this is accessible from a proper
+ * method for Python wrapping.
+ */
+ void ParaFIELD::release()
{
if(_field)
- _field->decrRef();
+ {
+ _field->decrRef();
+ _field = nullptr;
+ }
+
if(_own_support)
- delete _support;
+ {
+ delete _support;
+ _support = nullptr;
+ }
delete _topology;
+ _topology = nullptr;
}
void ParaFIELD::synchronizeTarget(ParaFIELD* source_field)
ParaFIELD(TypeOfField type, TypeOfTimeDiscretization td, ParaMESH* mesh, const ComponentTopology& component_topology);
ParaFIELD(MEDCouplingFieldDouble* field, ParaMESH *sup, const ProcessorGroup& group);
virtual ~ParaFIELD();
+ void release();
void synchronizeTarget( MEDCoupling::ParaFIELD* source_field);
void synchronizeSource( MEDCoupling::ParaFIELD* target_field);
}
ParaMESH::~ParaMESH()
+ {
+ release();
+ }
+
+ /** Destructor involves MPI operations: make sure this is accessible from a proper
+ * method for Python wrapping.
+ */
+ void ParaMESH::release()
{
delete _block_topology;
delete _explicit_topology;
+ _block_topology = nullptr;
+ _explicit_topology = nullptr;
}
}
const ProcessorGroup& proc_group ) ;
virtual ~ParaMESH();
+ void release();
+
void setNodeGlobal(DataArrayIdType *nodeGlobal);
void setCellGlobal(DataArrayIdType *cellGlobal);
Topology* getTopology() const { return _explicit_topology; }
int _my_domain_id;
//global topology of the cells
- MEDCoupling::BlockTopology* _block_topology;
+ BlockTopology* _block_topology;
Topology* _explicit_topology;
// pointers to global numberings
MCAuto<DataArrayIdType> _node_global;
namespace MEDCoupling
{
- StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(0),_topo_target(0),
- _send_counts(0),_recv_counts(0),
- _send_displs(0),_recv_displs(0),
- _recv_buffer(0),_send_buffer(0)
+ StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(nullptr),_topo_target(nullptr),
+ _owns_topo_source(false), _owns_topo_target(false),
+ _send_counts(nullptr),_recv_counts(nullptr),
+ _send_displs(nullptr),_recv_displs(nullptr),
+ _recv_buffer(nullptr),_send_buffer(nullptr)
{
}
+ StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):
+ DisjointDEC(local_group,distant_group),
+ _topo_source(nullptr),_topo_target(nullptr),
+ _owns_topo_source(false), _owns_topo_target(false),
+ _send_counts(nullptr),_recv_counts(nullptr),
+ _send_displs(nullptr),_recv_displs(nullptr),
+ _recv_buffer(nullptr),_send_buffer(nullptr)
+ {
+ }
StructuredCoincidentDEC::~StructuredCoincidentDEC()
+ {
+ release();
+ }
+
+ /** Destructor involves MPI operations: make sure this is accessible from a proper
+ * method for Python wrapping.
+ */
+ void StructuredCoincidentDEC::release()
{
delete [] _send_buffer;
delete [] _recv_buffer;
- delete []_send_displs;
+ delete [] _send_displs;
delete [] _recv_displs;
delete [] _send_counts;
delete [] _recv_counts;
- if (! _source_group->containsMyRank())
+ _send_buffer = nullptr;
+ _recv_buffer = nullptr;
+ _send_displs = nullptr;
+ _recv_displs = nullptr;
+ _send_counts = nullptr;
+ _recv_counts = nullptr;
+
+ if (_owns_topo_source)
delete _topo_source;
- if(!_target_group->containsMyRank())
+ if (_owns_topo_target)
delete _topo_target;
- }
+ _topo_source = nullptr;
+ _topo_target = nullptr;
+ _owns_topo_source = false;
+ _owns_topo_target = false;
- StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):
- DisjointDEC(local_group,distant_group),
- _topo_source(0),_topo_target(0),
- _send_counts(0),_recv_counts(0),
- _send_displs(0),_recv_displs(0),
- _recv_buffer(0),_send_buffer(0)
- {
+ DisjointDEC::cleanInstance();
}
/*! Synchronization process for exchanging topologies
{
if (_source_group->containsMyRank())
_topo_source = dynamic_cast<BlockTopology*>(_local_field->getTopology());
+ else
+ _owns_topo_source = true; // _topo_source will be filled by broadcastTopology below
if (_target_group->containsMyRank())
_topo_target = dynamic_cast<BlockTopology*>(_local_field->getTopology());
+ else
+ _owns_topo_target = true; // _topo_target will be filled by broadcastTopology below
// Transmitting source topology to target code
+ MESSAGE ("Broadcast source topo ...");
broadcastTopology(_topo_source,1000);
+
// Transmitting target topology to source code
+ MESSAGE ("Broadcast target topo ...");
broadcastTopology(_topo_target,2000);
if (_topo_source->getNbElements() != _topo_target->getNbElements())
throw INTERP_KERNEL::Exception("Incompatible dimensions for target and source topologies");
-
}
/*! Creates the arrays necessary for the data transfer
MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
- // The master proc creates a send buffer containing
- // a serialized topology
+ // The master proc creates a send buffer containing a serialized topology
int rank_master;
if (topo!=0 && topo->getProcGroup()->myRank()==0)
{
MESSAGE(" rank "<<group->myRank()<< " waiting ...");
_comm_interface->recv(&rank_master, 1,MPI_INT, MPI_ANY_SOURCE, tag+group->myRank(), *(group->getComm()),&status);
- MESSAGE(" rank "<<group->myRank()<< "received master rank"<<rank_master);
+ MESSAGE(" rank "<<group->myRank()<< " received master rank "<<rank_master);
}
// The topology is broadcasted to all processors in the group
_comm_interface->broadcast(&size, 1,MPI_ID_TYPE,rank_master,*(group->getComm()));
copy(serializer, serializer+size, buffer);
_comm_interface->broadcast(buffer,(int)size,MPI_ID_TYPE,rank_master,*(group->getComm()));
- // Processors which did not possess the source topology
- // unserialize it
-
+ // Processors which did not possess the source topology unserialize it
BlockTopology* topotemp=new BlockTopology();
topotemp->unserialize(buffer, *_comm_interface);
synchronizeTopology();
prepareTargetDE();
}
+ MESSAGE ("sync OK");
}
}
StructuredCoincidentDEC();
StructuredCoincidentDEC( ProcessorGroup& source, ProcessorGroup& target);
virtual ~StructuredCoincidentDEC();
+ void release();
+
void synchronize();
void recvData();
void sendData();
BlockTopology* _topo_source;
BlockTopology* _topo_target;
+
+ bool _owns_topo_source;
+ bool _owns_topo_target;
+
int* _send_counts;
int* _recv_counts;
int* _send_displs;
${medcoupling_HEADERS_HXX} ${medcoupling_HEADERS_TXX}
${interpkernel_HEADERS_HXX} ${interpkernel_HEADERS_TXX})
-IF(${CMAKE_VERSION} VERSION_LESS "3.8.0")
+IF(${CMAKE_VERSION} VERSION_LESS "3.8.0")
SWIG_ADD_MODULE(ParaMEDMEM python ParaMEDMEM.i)
ELSE()
SWIG_ADD_LIBRARY(ParaMEDMEM LANGUAGE python SOURCES ParaMEDMEM.i)
SWIG_LINK_LIBRARIES(ParaMEDMEM ${PYTHON_LIBRARIES} paramedmem)
SWIG_CHECK_GENERATION(ParaMEDMEM)
+#
+# Tests
+#
+SALOME_GENERATE_TESTS_ENVIRONMENT(tests_env)
+
+# -- some tests require 2, 3, 4 or 5 procs --
+# MPICH does not support --oversubscribe:
+IF(NOT ${MPIEXEC_EXECUTABLE} MATCHES "mpich")
+ SET(_oversub_opt "--oversubscribe")
+ENDIF()
+
+ADD_TEST(NAME PyPara_Basics_Proc2
+ COMMAND ${MPIEXEC} -np 2 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_BasicOperation.py)
+SET_TESTS_PROPERTIES(PyPara_Basics_Proc2 PROPERTIES ENVIRONMENT "${tests_env}")
+
+ADD_TEST(NAME PyPara_InterpKernelDEC_Proc4
+ COMMAND ${MPIEXEC} -np 4 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC.py)
+SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}")
+ADD_TEST(NAME PyPara_InterpKernelDEC_Proc5
+ COMMAND ${MPIEXEC} -np 5 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_InterpKernelDEC.py)
+SET_TESTS_PROPERTIES(PyPara_InterpKernelDEC_Proc5 PROPERTIES ENVIRONMENT "${tests_env}")
+
+#ADD_TEST(NAME PyPara_NonCoincidentDEC_Proc5
+# COMMAND ${MPIEXEC} -np 5 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_NonCoincidentDEC.py)
+#SET_TESTS_PROPERTIES(PyPara_NonCoincidentDEC_Proc5 PROPERTIES ENVIRONMENT "${tests_env}")
+
+ADD_TEST(NAME PyPara_StructuredCoincidentDEC_Proc4
+ COMMAND ${MPIEXEC} -np 4 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_StructuredCoincidentDEC.py)
+SET_TESTS_PROPERTIES(PyPara_StructuredCoincidentDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}")
+
+ADD_TEST(NAME PyPara_OverlapDEC_Proc4
+ COMMAND ${MPIEXEC} -np 4 ${_oversub_opt} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_OverlapDEC.py)
+SET_TESTS_PROPERTIES(PyPara_OverlapDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}")
+
+SET(_tst_scripts
+ test_InterpKernelDEC.py
+ test_NonCoincidentDEC.py
+ test_StructuredCoincidentDEC.py
+ test_OverlapDEC.py
+)
+
INSTALL(TARGETS _ParaMEDMEM DESTINATION ${MEDCOUPLING_INSTALL_PYTHON})
INSTALL(FILES ParaMEDMEM.i ParaMEDMEMCommon.i DESTINATION ${MEDCOUPLING_INSTALL_HEADERS})
-INSTALL(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_StructuredCoincidentDEC.py DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON})
+INSTALL(FILES ${_tst_scripts} DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON})
SALOME_INSTALL_SCRIPTS(${CMAKE_CURRENT_BINARY_DIR}/ParaMEDMEM.py ${MEDCOUPLING_INSTALL_PYTHON} EXTRA_DPYS "${SWIG_MODULE_ParaMEDMEM_REAL_NAME}")
INSTALL(FILES test_InterpKernelDEC.py test_NonCoincidentDEC.py test_StructuredCoincidentDEC.py DESTINATION ${MEDCOUPLING_INSTALL_SCRIPT_PYTHON})
#include "InterpKernelDEC.hxx"
#include "NonCoincidentDEC.hxx"
#include "StructuredCoincidentDEC.hxx"
+#include "OverlapDEC.hxx"
#include "ParaMESH.hxx"
#include "ParaFIELD.hxx"
#include "ICoCoMEDField.hxx"
%include "DisjointDEC.hxx"
%include "InterpKernelDEC.hxx"
%include "StructuredCoincidentDEC.hxx"
+%include "OverlapDEC.hxx"
%include "ICoCoField.hxx"
%rename(ICoCoMEDField) ICoCo::MEDField;
--- /dev/null
+# -*- coding: iso-8859-1 -*-
+# Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author : Anthony Geay (EDF R&D)
+
+def WriteInTmpDir(func):
+ def decaratedFunc(*args,**kwargs):
+ import tempfile,os
+ ret = None
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ os.chdir(tmpdirname)
+ ret = func(*args,**kwargs)
+ pass
+ return ret
+ return decaratedFunc
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
-from ParaMEDMEM import *
-from MEDLoader import ReadUMeshFromFile
+import ParaMEDMEM as pm
+from medcoupling import ReadUMeshFromFile, WriteField, WriteUMesh
+from ParaMEDMEMTestTools import WriteInTmpDir
import sys, os
import unittest
import math
from mpi4py import MPI
-class ParaMEDMEMBasicsTest(unittest.TestCase):
- def testInterpKernelDEC_2D(self):
+class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
+ """ See test_StructuredCoincidentDEC_py_1() for a quick start.
+ """
+ def generateFullSource(self):
+ """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """
+ msh = self.generateFullTarget()
+ msh.simplexize(0)
+ msh.setName("src_mesh")
+ fld = pm.MEDCouplingFieldDouble(pm.ON_CELLS, pm.ONE_TIME)
+ fld.setMesh(msh); fld.setName("source_F");
+ da = pm.DataArrayDouble(msh.getNumberOfCells())
+ da.iota()
+ da *= 2
+ fld.setArray(da)
+ return msh, fld
+
+ def generateFullTarget(self):
+ """ The complete target mesh: 4 squares """
+ m1 = pm.MEDCouplingCMesh("tgt_msh")
+ da = pm.DataArrayDouble([0,1,2])
+ m1.setCoords(da, da)
+ msh = m1.buildUnstructured()
+ return msh
+
+ #
+ # Below, the two functions emulating the set up of a piece of the source and target mesh
+ # on each proc. Obviously in real world problems, this comes from your code and is certainly
+ # not computed by cuting again from scratch the full-size mesh!!
+ #
+ def getPartialSource(self, rank):
+ """ Will return an empty mesh piece for rank=2 and 3 """
+ msh, f = self.generateFullSource()
+ if rank == 0:
+ sub_m, sub_f = msh[0:4], f[0:4]
+ elif rank == 1:
+ sub_m, sub_f = msh[4:8], f[4:8]
+ sub_m.zipCoords()
+ return sub_m, sub_f
+
+ def getPartialTarget(self, rank):
+ """ One square for each rank """
+ msh = self.generateFullTarget()
+ if rank == 2:
+ sub_m = msh[[0,2]]
+ elif rank == 3:
+ sub_m = msh[[1,3]]
+ sub_m.zipCoords()
+ # Receiving side must prepare an empty field that will be filled by DEC:
+ fld = pm.MEDCouplingFieldDouble(pm.ON_CELLS, pm.ONE_TIME)
+ da = pm.DataArrayDouble(sub_m.getNumberOfCells())
+ fld.setArray(da)
+ fld.setName("tgt_F")
+ fld.setMesh(sub_m)
+ return sub_m, fld
+
+ @WriteInTmpDir
+ def testInterpKernelDEC_2D_py_1(self):
+ """ This test illustrates a basic use of the InterpKernelDEC.
+ Look at the C++ documentation of the class for more informations.
+ """
+ size = MPI.COMM_WORLD.size
+ rank = MPI.COMM_WORLD.rank
+ if size != 4:
+ print("Should be run on 4 procs!")
+ return
+
+ # Define two processor groups
+ nproc_source = 2
+ procs_source = list(range(nproc_source))
+ procs_target = list(range(size - nproc_source, size))
+
+ interface = pm.CommInterface()
+ source_group = pm.MPIProcessorGroup(interface, procs_source)
+ target_group = pm.MPIProcessorGroup(interface, procs_target)
+ idec = pm.InterpKernelDEC(source_group, target_group)
+
+ # Write out full size meshes/fields for inspection
+ if rank == 0:
+ _, fld = self.generateFullSource()
+ mshT = self.generateFullTarget()
+ WriteField("./source_field_FULL.med", fld, True)
+ WriteUMesh("./target_mesh_FULL.med", mshT, True)
+
+ MPI.COMM_WORLD.Barrier() # really necessary??
+
+ #
+ # OK, let's go DEC !!
+ #
+ if source_group.containsMyRank():
+ _, fieldS = self.getPartialSource(rank)
+ fieldS.setNature(pm.IntensiveMaximum) # The only policy supported for now ...
+ WriteField("./source_field_part_%d.med" % rank, fieldS, True)
+ idec.attachLocalField(fieldS)
+ idec.synchronize()
+ idec.sendData()
+
+ if target_group.containsMyRank():
+ mshT, fieldT = self.getPartialTarget(rank)
+ fieldT.setNature(pm.IntensiveMaximum)
+ WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
+ idec.attachLocalField(fieldT)
+ idec.synchronize()
+ idec.recvData()
+ # Now the actual checks:
+ if rank == 2:
+ self.assertEqual(fieldT.getArray().getValues(), [1.0, 9.0])
+ elif rank == 3:
+ self.assertEqual(fieldT.getArray().getValues(), [5.0, 13.0])
+
+ # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
+ idec.release()
+ source_group.release()
+ target_group.release()
+ MPI.COMM_WORLD.Barrier()
+
+ @WriteInTmpDir
+ def test_InterpKernelDEC_2D_py_2(self):
+ """ More involved test using Para* objects.
+ """
size = MPI.COMM_WORLD.size
rank = MPI.COMM_WORLD.rank
if size != 5:
- raise RuntimeError("Expect MPI_COMM_WORLD size == 5")
+ print("Should be run on 5 procs!")
+ return
+
print(rank)
nproc_source = 3
procs_source = list(range(nproc_source))
procs_target = list(range(size - nproc_source + 1, size))
- interface = CommInterface()
- target_group = MPIProcessorGroup(interface, procs_target)
- source_group = MPIProcessorGroup(interface, procs_source)
- dec = InterpKernelDEC(source_group, target_group)
+ interface = pm.CommInterface()
+ target_group = pm.MPIProcessorGroup(interface, procs_target)
+ source_group = pm.MPIProcessorGroup(interface, procs_source)
+ dec = pm.InterpKernelDEC(source_group, target_group)
- mesh =0
- support =0
- paramesh =0
- parafield =0
- icocofield =0
data_dir = os.environ['MEDCOUPLING_ROOT_DIR']
- tmp_dir = os.environ['TMP']
-
- if not tmp_dir or len(tmp_dir)==0:
- tmp_dir = "/tmp"
- pass
filename_xml1 = os.path.join(data_dir, "share/resources/med/square1_split")
filename_xml2 = os.path.join(data_dir, "share/resources/med/square2_split")
filename = filename_xml1 + str(rank+1) + ".med"
meshname = "Mesh_2_" + str(rank+1)
mesh=ReadUMeshFromFile(filename,meshname,0)
- paramesh=ParaMESH(mesh,source_group,"source mesh")
- comptopo = ComponentTopology()
- parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
- parafield.getField().setNature(IntensiveMaximum)
+ paramesh=pm.ParaMESH(mesh,source_group,"source mesh")
+ comptopo = pm.ComponentTopology()
+ parafield = pm.ParaFIELD(pm.ON_CELLS,pm.NO_TIME,paramesh, comptopo)
+ parafield.getField().setNature(pm.IntensiveMaximum)
nb_local=mesh.getNumberOfCells()
value = [1.0]*nb_local
parafield.getField().setValues(value)
- icocofield = ICoCoMEDField(parafield.getField())
+ icocofield = pm.ICoCoMEDField(parafield.getField())
dec.attachLocalField(icocofield)
pass
else:
filename = filename_xml2 + str(rank - nproc_source + 1) + ".med"
meshname = "Mesh_3_" + str(rank - nproc_source + 1)
mesh=ReadUMeshFromFile(filename,meshname,0)
- paramesh=ParaMESH(mesh,target_group,"target mesh")
- comptopo = ComponentTopology()
- parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
- parafield.getField().setNature(IntensiveMaximum)
+ paramesh=pm.ParaMESH(mesh,target_group,"target mesh")
+ comptopo = pm.ComponentTopology()
+ parafield = pm.ParaFIELD(pm.ON_CELLS,pm.NO_TIME,paramesh, comptopo)
+ parafield.getField().setNature(pm.IntensiveMaximum)
nb_local=mesh.getNumberOfCells()
value = [0.0]*nb_local
parafield.getField().setValues(value)
- icocofield = ICoCoMEDField(parafield.getField())
+ icocofield = pm.ICoCoMEDField(parafield.getField())
dec.attachLocalField(icocofield)
pass
dec.sendData()
pass
## end
- interface = 0
- target_group = 0
- source_group = 0
- dec = 0
- mesh =0
- support =0
- paramesh =0
- parafield =0
- icocofield =0
+
+ # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
+ parafield.release()
+ paramesh.release()
+ dec.release()
+ target_group.release()
+ source_group.release()
MPI.COMM_WORLD.Barrier()
- MPI.Finalize()
- pass
- pass
-unittest.main()
+if __name__ == "__main__":
+ unittest.main()
+ MPI.Finalize()
+
#
from ParaMEDMEM import *
-import sys, os
+from mpi4py import MPI
+import unittest
+import os
-MPI_Init(sys.argv)
+class ParaMEDMEM_DEC_Tests(unittest.TestCase):
+ def test_NonCoincidentDEC_py(self):
+ size = MPI.COMM_WORLD.size
+ rank = MPI.COMM_WORLD.rank
-size = MPI_Comm_size(MPI_COMM_WORLD)
-rank = MPI_Comm_rank(MPI_COMM_WORLD)
-if size != 5:
- raise RuntimeError("Expect MPI_COMM_WORLD size == 5")
+ if size != 5:
+ raise RuntimeError("Expect MPI.MPI_COMM_WORLD size == 5")
-nproc_source = 3
-procs_source = list(range(nproc_source))
-procs_target = list(range(size - nproc_source + 1, size))
+ nproc_source = 3
+ procs_source = list(range(nproc_source))
+ procs_target = list(range(size - nproc_source + 1, size))
-interface = CommInterface()
+ interface = CommInterface()
-target_group = MPIProcessorGroup(interface, procs_target)
-source_group = MPIProcessorGroup(interface, procs_source)
+ target_group = MPIProcessorGroup(interface, procs_target)
+ source_group = MPIProcessorGroup(interface, procs_source)
-source_mesh= 0
-target_mesh= 0
-parasupport= 0
-mesh = 0
-support = 0
-field = 0
-paramesh = 0
-parafield = 0
-icocofield = 0
+ dec = NonCoincidentDEC(source_group, target_group)
-dec = NonCoincidentDEC(source_group, target_group)
+ data_dir = os.environ['MEDCOUPLING_ROOT_DIR']
+ tmp_dir = os.environ['TMP']
+ if tmp_dir == '':
+ tmp_dir = "/tmp"
+ pass
-data_dir = os.environ['MEDCOUPLING_ROOT_DIR']
-tmp_dir = os.environ['TMP']
-if tmp_dir == '':
- tmp_dir = "/tmp"
- pass
+ filename_xml1 = data_dir + "/share/resources/med/square1_split"
+ filename_xml2 = data_dir + "/share/resources/med/square2_split"
-filename_xml1 = data_dir + "/share/resources/med/square1_split"
-filename_xml2 = data_dir + "/share/resources/med/square2_split"
+ MPI.COMM_WORLD.Barrier()
-MPI_Barrier(MPI_COMM_WORLD)
+ if source_group.containsMyRank():
+ filename = filename_xml1 + str(rank+1) + ".med"
+ meshname = "Mesh_2_" + str(rank+1)
-if source_group.containsMyRank():
+ mesh = MESH(MED_DRIVER, filename, meshname)
+ support = SUPPORT(mesh, "all elements", MED_CELL)
+ paramesh = ParaMESH(mesh, source_group, "source mesh")
- filename = filename_xml1 + str(rank+1) + ".med"
- meshname = "Mesh_2_" + str(rank+1)
+ parasupport = UnstructuredParaSUPPORT( support, source_group)
+ comptopo = ComponentTopology()
- mesh = MESH(MED_DRIVER, filename, meshname)
- support = SUPPORT(mesh, "all elements", MED_CELL)
- paramesh = ParaMESH(mesh, source_group, "source mesh")
+ parafield = ParaFIELD(parasupport, comptopo)
- parasupport = UnstructuredParaSUPPORT( support, source_group)
- comptopo = ComponentTopology()
+ nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS);
- parafield = ParaFIELD(parasupport, comptopo)
+ value = [1.0]*nb_local
- nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS);
+ parafield.getField().setValue(value)
+ icocofield = ICoCo_MEDField(paramesh,parafield)
+ dec.attachLocalField(icocofield,'P0')
+ pass
- value = [1.0]*nb_local
+ if target_group.containsMyRank():
+ filename = filename_xml2 + str(rank - nproc_source + 1) + ".med"
+ meshname = "Mesh_3_" + str(rank - nproc_source + 1)
- parafield.getField().setValue(value)
- icocofield = ICoCo_MEDField(paramesh,parafield)
- dec.attachLocalField(icocofield,'P0')
- pass
+ mesh = MESH(MED_DRIVER, filename, meshname)
+ support = SUPPORT(mesh, "all elements", MED_CELL)
+ paramesh = ParaMESH(mesh, target_group, "target mesh")
-if target_group.containsMyRank():
+ parasupport = UnstructuredParaSUPPORT( support, target_group)
+ comptopo = ComponentTopology()
+ parafield = ParaFIELD(parasupport, comptopo)
- filename = filename_xml2 + str(rank - nproc_source + 1) + ".med"
- meshname = "Mesh_3_" + str(rank - nproc_source + 1)
+ nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS)
+ value = [0.0]*nb_local
- mesh = MESH(MED_DRIVER, filename, meshname)
- support = SUPPORT(mesh, "all elements", MED_CELL)
- paramesh = ParaMESH(mesh, target_group, "target mesh")
+ parafield.getField().setValue(value)
+ icocofield = ICoCo_MEDField(paramesh,parafield)
- parasupport = UnstructuredParaSUPPORT( support, target_group)
- comptopo = ComponentTopology()
- parafield = ParaFIELD(parasupport, comptopo)
+ dec.attachLocalField(icocofield, 'P0')
+ pass
- nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS)
- value = [0.0]*nb_local
+ field_before_int = [0.0]
+ field_after_int = [0.0]
- parafield.getField().setValue(value)
- icocofield = ICoCo_MEDField(paramesh,parafield)
+ if source_group.containsMyRank():
+ field_before_int = [parafield.getVolumeIntegral(1)]
+ MPI.MPI_Bcast(field_before_int, 1, MPI.MPI_DOUBLE, 0, MPI.MPI_COMM_WORLD);
+ dec.synchronize()
+ print("DEC usage")
+ dec.setForcedRenormalization(False)
+ dec.sendData()
+ pass
- dec.attachLocalField(icocofield, 'P0')
- pass
+ if target_group.containsMyRank():
+ MPI.MPI_Bcast(field_before_int, 1, MPI.MPI_DOUBLE, 0, MPI.MPI_COMM_WORLD)
+ dec.synchronize()
+ dec.setForcedRenormalization(False)
+ dec.recvData()
+ field_after_int = [parafield.getVolumeIntegral(1)]
+ pass
-field_before_int = [0.0]
-field_after_int = [0.0]
+ MPI.MPI_Bcast(field_before_int, 1, MPI.MPI_DOUBLE, 0, MPI.MPI_COMM_WORLD)
+ MPI.MPI_Bcast(field_after_int , 1, MPI.MPI_DOUBLE, size-1, MPI.MPI_COMM_WORLD)
-if source_group.containsMyRank():
+ epsilon = 1e-6
+ self.assertDoubleEquals(field_before_int[0], field_after_int[0], epsilon)
- field_before_int = [parafield.getVolumeIntegral(1)]
- MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
- dec.synchronize()
- print("DEC usage")
- dec.setForcedRenormalization(False)
+ # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
+ dec.release()
+ target_group.release()
+ source_group.release()
- dec.sendData()
- pass
+ MPI.COMM_WORLD.Barrier()
+ MPI.Finalize()
-if target_group.containsMyRank():
+if __name__ == "__main__":
+ unittest.main()
- MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD)
- dec.synchronize()
- dec.setForcedRenormalization(False)
- dec.recvData()
- field_after_int = [parafield.getVolumeIntegral(1)]
- pass
-
-MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD)
-MPI_Bcast(field_after_int , 1, MPI_DOUBLE, size-1, MPI_COMM_WORLD)
-
-epsilon = 1e-6
-if abs(field_before_int[0] - field_after_int[0]) > epsilon:
- print("Field before is not equal field after: %s != %s"%\
- (field_before_int[0],field_after_int[0]))
- pass
-
-
-MPI_Barrier(MPI_COMM_WORLD)
-MPI_Finalize()
-print("# End of testNonCoincidentDEC")
--- /dev/null
+#!/usr/bin/env python
+
+import ParaMEDMEM as pm
+from medcoupling import WriteField, WriteUMesh
+from ParaMEDMEMTestTools import WriteInTmpDir
+import sys, os
+import unittest
+import math
+from mpi4py import MPI
+
+class ParaMEDMEM_O_DEC_Tests(unittest.TestCase):
+ """ This test illustrates a basic use of the OverlapDEC and shows notably that not all
+ processors must possess a piece of the source and/or target mesh.
+ Look at the C++ documentation of the class for more informations.
+ In this case, the source mesh is only stored on 2 procs, whereas the target is on 4.
+ Since only a single group of processor is defined in the setup, the 2 idle procs on the source side are just providing an empty mesh,
+ thus indicating that they don't participate in the source definition.
+
+ Main method is testOverlapDEC_2D_py_1()
+ """
+
+ def generateFullSource(self):
+ """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """
+ msh = self.generateFullTarget()
+ msh.simplexize(0)
+ msh.setName("src_mesh")
+ fld = pm.MEDCouplingFieldDouble(pm.ON_CELLS, pm.ONE_TIME)
+ fld.setMesh(msh); fld.setName("source_F");
+ da = pm.DataArrayDouble(msh.getNumberOfCells())
+ da.iota()
+ da *= 2
+ fld.setArray(da)
+ return msh, fld
+
+ def generateFullTarget(self):
+ """ The complete target mesh: 4 squares """
+ m1 = pm.MEDCouplingCMesh("tgt_msh")
+ da = pm.DataArrayDouble([0,1,2])
+ m1.setCoords(da, da)
+ msh = m1.buildUnstructured()
+ return msh
+
+ #
+ # Below, the two functions emulating the set up of a piece of the source and target mesh
+ # on each proc. Obviously in real world problems, this comes from your code and is certainly
+ # not computed by cuting again from scratch the full-size mesh!!
+ #
+ def getPartialSource(self, rank):
+ """ Will return an empty mesh piece for rank=2 and 3 """
+ msh, f = self.generateFullSource()
+ if rank in [2,3]:
+ sub_m, sub_f = msh[[]], f[[]] # Little trick to select nothing in the mesh, thus producing an empty mesh
+ elif rank == 0:
+ sub_m, sub_f = msh[0:4], f[0:4]
+ elif rank == 1:
+ sub_m, sub_f = msh[4:8], f[4:8]
+ sub_m.zipCoords()
+ return sub_m, sub_f
+
+ def getPartialTarget(self, rank):
+ """ One square for each rank """
+ msh = self.generateFullTarget()
+ sub_m = msh[rank]
+ sub_m.zipCoords()
+ # Receiving side must prepare an empty field that will be filled by DEC:
+ fld = pm.MEDCouplingFieldDouble(pm.ON_CELLS, pm.ONE_TIME)
+ da = pm.DataArrayDouble(sub_m.getNumberOfCells())
+ fld.setArray(da)
+ fld.setName("tgt_F")
+ fld.setMesh(sub_m)
+ return sub_m, fld
+
+ @WriteInTmpDir
+ def testOverlapDEC_2D_py_1(self):
+ """ The main method of the test """
+ size = MPI.COMM_WORLD.size
+ rank = MPI.COMM_WORLD.rank
+ if size != 4:
+ raise RuntimeError("Should be run on 4 procs!")
+
+ # Define (single) processor group - note the difference with InterpKernelDEC which needs two groups.
+ proc_group = list(range(size)) # No need for ProcessorGroup object here.
+ odec = pm.OverlapDEC(proc_group)
+
+ # Write out full size meshes/fields for inspection
+ if rank == 0:
+ _, fld = self.generateFullSource()
+ mshT = self.generateFullTarget()
+ WriteField("./source_field_FULL.med", fld, True)
+ WriteUMesh("./target_mesh_FULL.med", mshT, True)
+
+ MPI.COMM_WORLD.Barrier() # really necessary??
+
+ #
+ # OK, let's go DEC !!
+ #
+ _, fieldS = self.getPartialSource(rank)
+ fieldS.setNature(pm.IntensiveMaximum) # The only policy supported for now ...
+ mshT, fieldT = self.getPartialTarget(rank)
+ fieldT.setNature(pm.IntensiveMaximum)
+ if rank not in [2,3]:
+ WriteField("./source_field_part_%d.med" % rank, fieldS, True)
+ WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
+
+ odec.attachSourceLocalField(fieldS)
+ odec.attachTargetLocalField(fieldT)
+ odec.synchronize()
+ odec.sendRecvData()
+
+ # Now the actual checks:
+ if rank == 0:
+ self.assertEqual(fieldT.getArray().getValues(), [1.0])
+ elif rank == 1:
+ self.assertEqual(fieldT.getArray().getValues(), [5.0])
+ elif rank == 2:
+ self.assertEqual(fieldT.getArray().getValues(), [9.0])
+ elif rank == 3:
+ self.assertEqual(fieldT.getArray().getValues(), [13.0])
+
+ # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
+ odec.release()
+
+ MPI.COMM_WORLD.Barrier()
+
+if __name__ == "__main__":
+ unittest.main()
+ MPI.Finalize()
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
-from ParaMEDMEM import *
-import sys, os
+import ParaMEDMEM as pm
+from medcoupling import WriteField, WriteUMesh, ReadUMeshFromFile
+from ParaMEDMEMTestTools import WriteInTmpDir
+import os
import unittest
import math
+from mpi4py import MPI
+
+
+class ParaMEDMEM_SC_DEC_Tests(unittest.TestCase):
+ """ See test_StructuredCoincidentDEC_py_1() for a quick start.
+ """
+
+ def generateFullMeshField(self):
+ """ The complete mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total)
+ Note that in this case, this is the **only** mesh for the whole problem.
+ """
+ m1 = pm.MEDCouplingCMesh("tgt_msh")
+ da = pm.DataArrayDouble([0,1,2])
+ m1.setCoords(da, da)
+ msh = m1.buildUnstructured()
+
+ msh.simplexize(0)
+ msh.setName("src_mesh")
+ fld = pm.MEDCouplingFieldDouble(pm.ON_CELLS, pm.ONE_TIME)
+ fld.setMesh(msh); fld.setName("source_F");
+ da = pm.DataArrayDouble(msh.getNumberOfCells())
+ da.iota()
+ da *= 2
+ fld.setArray(da)
+ return msh, fld
+
+ #
+ # Below, the function emulating the set up of a piece of the mesh being owned by
+ # a given processor.
+ #
+ def getPartialSource(self, rank):
+ msh, f = self.generateFullMeshField()
+ if rank == 0:
+ sub_ids = [0,1,4,5]
+ elif rank == 1:
+ sub_ids = [2,3,6,7]
+ sub_m, sub_f = msh[sub_ids], f[sub_ids]
+ sub_m.zipCoords()
+ return sub_m, sub_f
+
+ def getPartialTarget(self, rank):
+ msh, f = self.generateFullMeshField()
+ if rank == 2:
+ sub_ids = [0,1,2,3]
+ elif rank == 3:
+ sub_ids = [4,5,6,7]
+ sub_m, sub_f = msh[sub_ids], f[sub_ids]
+ sub_m.zipCoords()
+ return sub_m, sub_f
+
+ @WriteInTmpDir
+ def test_StructuredCoincidentDEC_py_1(self):
+ """ This test illustrates a basic use of the StructuredCoincidentDEC which allows to
+ resdistribute a field/mesh which is already scattered on several processors into a different configuration.
+ Look at the C++ documentation of the class for more informations.
+ Note that in the case of the StructuredCoincidentDEC no interpolation whatsoever is performed. This is only
+ really a redistribution of the data among the processors.
+ """
+ size = MPI.COMM_WORLD.size
+ rank = MPI.COMM_WORLD.rank
+ if size != 4:
+ print("Should be run on 4 procs!")
+ return
+
+ # Define two processor groups
+ nproc_source = 2
+ procs_source = list(range(nproc_source))
+ procs_target = list(range(size - nproc_source, size))
+
+ interface = pm.CommInterface()
+ source_group = pm.MPIProcessorGroup(interface, procs_source)
+ target_group = pm.MPIProcessorGroup(interface, procs_target)
+
+ scdec = pm.StructuredCoincidentDEC(source_group, target_group)
+
+ # Write out full size meshes/fields for inspection
+ if rank == 0:
+ _, fld = self.generateFullMeshField()
+ WriteField("./source_field_FULL.med", fld, True)
-class ParaMEDMEMBasicsTest2(unittest.TestCase):
- def testStructuredCoincidentDEC(self):
- MPI_Init(sys.argv)
#
- size = MPI_Comm_size(MPI_COMM_WORLD)
- rank = MPI_Comm_rank(MPI_COMM_WORLD)
+ # OK, let's go DEC !!
#
+ if source_group.containsMyRank():
+ _, fieldS = self.getPartialSource(rank)
+ fieldS.setNature(pm.IntensiveMaximum) # The only policy supported for now ...
+ WriteField("./source_field_part_%d.med" % rank, fieldS, True)
+ scdec.attachLocalField(fieldS)
+ scdec.synchronize()
+ scdec.sendData()
+
+ if target_group.containsMyRank():
+ mshT, fieldT = self.getPartialTarget(rank)
+ fieldT.setNature(pm.IntensiveMaximum)
+ WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
+ scdec.attachLocalField(fieldT)
+ scdec.synchronize()
+ scdec.recvData()
+ # Now the actual checks:
+ if rank == 2:
+ self.assertEqual(fieldT.getArray().getValues(), [0.0, 2.0, 8.0, 10.0])
+ elif rank == 3:
+ self.assertEqual(fieldT.getArray().getValues(), [4.0, 6.0, 12.0, 14.0])
+
+ # Release DEC (this involves MPI exchanges -- so better be done before MPI.Finalize()
+ scdec.release()
+ source_group.release()
+ target_group.release()
+
+ MPI.COMM_WORLD.Barrier()
+
+ @WriteInTmpDir
+ def test_StructuredCoincidentDEC_py_2(self):
+ """ More involved tests using Para* objects ...
+ """
+ size = MPI.COMM_WORLD.size
+ rank = MPI.COMM_WORLD.rank
+
if size < 4:
- raise RuntimeError("Expect MPI_COMM_WORLD size >= 4")
- #
- interface = CommInterface()
- #
- self_group = MPIProcessorGroup(interface, rank, rank)
- target_group = MPIProcessorGroup(interface, 3, size-1)
- source_group = MPIProcessorGroup(interface, 0, 2)
- #
- mesh = 0
- support = 0
- paramesh = 0
- parafield = 0
- comptopo = 0
- icocofield= 0
- #
+ print("Should be run on >= 4 procs!")
+ return
+
+ interface = pm.CommInterface()
+
+ source_group = pm.MPIProcessorGroup(interface, 0, 2)
+ target_group = pm.MPIProcessorGroup(interface, 3, size-1)
+ self_group = pm.MPIProcessorGroup(interface, rank, rank)
+
data_dir = os.environ['MEDCOUPLING_ROOT_DIR']
- tmp_dir = os.environ['TMP']
- if tmp_dir == '':
- tmp_dir = "/tmp"
- pass
filename_xml1 = data_dir + "/share/resources/med/square1_split"
filename_2 = data_dir + "/share/resources/med/square1.med"
- filename_seq_wr = tmp_dir + "/"
- filename_seq_med = tmp_dir + "/myWrField_seq_pointe221.med"
+ filename_seq_wr = "."
+ filename_seq_med = "./myWrField_seq_pointe221.med"
+
+ dec = pm.StructuredCoincidentDEC(source_group, target_group)
- dec = StructuredCoincidentDEC(source_group, target_group)
- MPI_Barrier(MPI_COMM_WORLD)
+ MPI.COMM_WORLD.Barrier()
if source_group.containsMyRank():
filename = filename_xml1 + str(rank+1) + ".med"
meshname = "Mesh_2_" + str(rank+1)
- mesh=ReadUMeshFromFile(filename,meshname,0)
- paramesh=ParaMESH(mesh,source_group,"source mesh")
- comptopo=ComponentTopology(6)
- parafield=ParaFIELD(ON_CELLS,NO_TIME,paramesh,comptopo)
- parafield.getField().setNature(IntensiveMaximum)
+ mesh = ReadUMeshFromFile(filename,meshname,0)
+ paramesh=pm.ParaMESH(mesh,source_group,"source mesh")
+ comptopo=pm.ComponentTopology(6)
+ parafield=pm.ParaFIELD(pm.ON_CELLS,pm.NO_TIME,paramesh,comptopo)
+ parafield.getField().setNature(pm.IntensiveMaximum)
nb_local=mesh.getNumberOfCells()
global_numbering=paramesh.getGlobalNumberingCell2()
value = []
for ielem in range(nb_local):
for icomp in range(6):
value.append(global_numbering[ielem]*6.0+icomp);
- pass
- pass
parafield.getField().setValues(value)
- icocofield = ICoCoMEDField(mesh,parafield.getField())
+ icocofield = pm.ICoCoMEDField(parafield.getField())
dec.setMethod("P0")
dec.attachLocalField(parafield)
dec.synchronize()
dec.sendData()
- pass
if target_group.containsMyRank():
meshname2 = "Mesh_2"
- mesh=ReadUMeshFromFile(filename_2, meshname2,0)
- paramesh=ParaMESH(mesh, self_group, "target mesh")
- comptopo=ComponentTopology(6,target_group)
- parafield=ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
- parafield.getField().setNature(IntensiveMaximum)
+ mesh = ReadUMeshFromFile(filename_2, meshname2,0)
+ paramesh = pm.ParaMESH(mesh, self_group, "target mesh")
+ comptopo = pm.ComponentTopology(6,target_group)
+ parafield = pm.ParaFIELD(pm.ON_CELLS,pm.NO_TIME,paramesh, comptopo)
+ parafield.getField().setNature(pm.IntensiveMaximum)
nb_local=mesh.getNumberOfCells()
value = [0.0]*(nb_local*comptopo.nbLocalComponents())
parafield.getField().setValues(value)
- icocofield = ICoCoMEDField(mesh,parafield.getField())
+ icocofield = pm.ICoCoMEDField(parafield.getField())
dec.setMethod("P0")
dec.attachLocalField(parafield)
dec.synchronize()
for i in range(nb_local):
first=comptopo.firstLocalComponent()
for icomp in range(comptopo.nbLocalComponents()):
- self.assertTrue(math.fabs(recv_value[i*comptopo.nbLocalComponents()+icomp]-
- (float)(i*6+icomp+first))<1e-12)
- pass
- pass
- pass
- comptopo=0
- interface = 0
- mesh =0
- support =0
- paramesh =0
- parafield =0
- icocofield =0
- dec=0
- self_group =0
- target_group = 0
- source_group = 0
- MPI_Barrier(MPI_COMM_WORLD)
- MPI_Finalize()
- print("End of test StructuredCoincidentDEC")
- pass
-
-
-unittest.main()
+ self.assertTrue(math.fabs(recv_value[i*comptopo.nbLocalComponents()+icomp]-(float)(i*6+icomp+first))<1e-12)
+
+ # Release DEC (this involves MPI exchanges -- so better be done before MPI.Finalize()
+ parafield.release()
+ paramesh.release()
+ dec.release()
+ target_group.release()
+ source_group.release()
+ self_group.release()
+
+ MPI.COMM_WORLD.Barrier()
+
+if __name__ == "__main__":
+ unittest.main()
+ MPI.Finalize()