2 # -*- coding: iso-8859-1 -*-
3 # Copyright (C) 2007-2023 CEA, EDF
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
22 from medcoupling import *
23 from ParaMEDMEMTestTools import WriteInTmpDir
27 from mpi4py import MPI
29 class ParaMEDMEM_O_DEC_Tests(unittest.TestCase):
30 """ This test illustrates a basic use of the OverlapDEC and shows notably that not all
31 processors must possess a piece of the source and/or target mesh.
32 Look at the C++ documentation of the class for more informations.
33 In this case, the source mesh is only stored on 2 procs, whereas the target is on 4.
34 Since only a single group of processor is defined in the setup, the 2 idle procs on the source side are just providing an empty mesh,
35 thus indicating that they don't participate in the source definition.
37 Main method is testOverlapDEC_2D_py_1()
40 def generateFullSource(self):
41 """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """
42 msh = self.generateFullTarget()
44 msh.setName("src_mesh")
45 fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
46 fld.setMesh(msh); fld.setName("source_F");
47 da = DataArrayDouble(msh.getNumberOfCells())
53 def generateFullTarget(self):
54 """ The complete target mesh: 4 squares """
55 m1 = MEDCouplingCMesh("tgt_msh")
56 da = DataArrayDouble([0,1,2])
58 msh = m1.buildUnstructured()
62 # Below, the two functions emulating the set up of a piece of the source and target mesh
63 # on each proc. Obviously in real world problems, this comes from your code and is certainly
64 # not computed by cuting again from scratch the full-size mesh!!
66 def getPartialSource(self, rank):
67 """ Will return an empty mesh piece for rank=2 and 3 """
68 msh, f = self.generateFullSource()
70 sub_m, sub_f = msh[[]], f[[]] # Little trick to select nothing in the mesh, thus producing an empty mesh
72 sub_m, sub_f = msh[0:4], f[0:4]
74 sub_m, sub_f = msh[4:8], f[4:8]
78 def getPartialTarget(self, rank):
79 """ One square for each rank """
80 msh = self.generateFullTarget()
83 # Receiving side must prepare an empty field that will be filled by DEC:
84 fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
85 da = DataArrayDouble(sub_m.getNumberOfCells())
91 def testOverlapDEC_ctor(self):
92 """ Test the various Python ctors """
93 size = MPI.COMM_WORLD.size
95 print("Should be run on 4 procs!")
97 # Define processor group
98 proc_group = list(range(size))
100 o1 = OverlapDEC.New(proc_group)
101 # Should also work directly:
102 o2 = OverlapDEC(proc_group)
103 # With an iterable and a custom comm:
104 o3 = OverlapDEC.New(proc_group, MPI.COMM_WORLD)
105 # Also work directly with the **hack** on the comm:
106 o4 = OverlapDEC(proc_group, MPI._addressof(MPI.COMM_WORLD))
107 self.assertRaises(Exception, OverlapDEC, proc_group, MPI.COMM_WORLD)
108 o4.release(); o3.release(); o2.release(); o1.release()
111 def testOverlapDEC_2D_py_1(self):
112 """ The main method of the test """
113 size = MPI.COMM_WORLD.size
114 rank = MPI.COMM_WORLD.rank
116 raise RuntimeError("Should be run on 4 procs!")
118 # Define (single) processor group - note the difference with InterpKernelDEC which needs two groups.
119 proc_group = list(range(size)) # No need for ProcessorGroup object here.
120 odec = OverlapDEC(proc_group)
122 # Write out full size meshes/fields for inspection
124 _, fld = self.generateFullSource()
125 mshT = self.generateFullTarget()
126 WriteField("./source_field_FULL.med", fld, True)
127 WriteUMesh("./target_mesh_FULL.med", mshT, True)
129 MPI.COMM_WORLD.Barrier() # really necessary??
132 # OK, let's go DEC !!
134 _, fieldS = self.getPartialSource(rank)
135 fieldS.setNature(IntensiveMaximum) # The only policy supported for now ...
136 mshT, fieldT = self.getPartialTarget(rank)
137 fieldT.setNature(IntensiveMaximum)
138 if rank not in [2,3]:
139 WriteField("./source_field_part_%d.med" % rank, fieldS, True)
140 WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
142 odec.attachSourceLocalField(fieldS)
143 odec.attachTargetLocalField(fieldT)
147 # Now the actual checks:
149 self.assertEqual(fieldT.getArray().getValues(), [1.0])
151 self.assertEqual(fieldT.getArray().getValues(), [5.0])
153 self.assertEqual(fieldT.getArray().getValues(), [9.0])
155 self.assertEqual(fieldT.getArray().getValues(), [13.0])
157 # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
160 MPI.COMM_WORLD.Barrier()
162 if __name__ == "__main__":