2 # -*- coding: iso-8859-1 -*-
3 # Copyright (C) 2007-2024 CEA, EDF
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
22 from medcoupling import *
23 from ParaMEDMEMTestTools import WriteInTmpDir
27 from mpi4py import MPI
29 class ParaMEDMEM_O_DEC_Tests(unittest.TestCase):
30 """ This test illustrates a basic use of the OverlapDEC and shows notably that not all
31 processors must possess a piece of the source and/or target mesh.
32 Look at the C++ documentation of the class for more informations.
33 In this case, the source mesh is only stored on 2 procs, whereas the target is on 4.
34 Since only a single group of processor is defined in the setup, the 2 idle procs on the source side are just providing an empty mesh,
35 thus indicating that they don't participate in the source definition.
37 Main method is testOverlapDEC_2D_py_1()
40 def generateFullSource(self, nb_compo=1):
41 """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """
42 msh = self.generateFullTarget()
44 msh.setName("src_mesh")
45 fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
46 fld.setMesh(msh); fld.setName("source_F")
47 nc = msh.getNumberOfCells()
48 da = DataArrayDouble(nc*nb_compo)
49 da.rearrange(nb_compo)
50 for c in range(nb_compo):
51 da[:, c] = list(range(nc))
52 da *= 2 # To compensate for the later division of the volume by 2 betw target and source cells.
56 def generateFullTarget(self):
57 """ The complete target mesh: 4 squares """
58 m1 = MEDCouplingCMesh("tgt_msh")
59 da = DataArrayDouble([0,1,2])
61 msh = m1.buildUnstructured()
65 # Below, the two functions emulating the set up of a piece of the source and target mesh
66 # on each proc. Obviously in real world problems, this comes from your code and is certainly
67 # not computed by cutting again from scratch the full-size mesh!!
69 def getPartialSource(self, rank, nb_compo=1):
70 """ Will return an empty mesh piece for rank=2 and 3 """
71 msh, f = self.generateFullSource(nb_compo)
73 sub_m, sub_f = msh[[]], f[[]] # Little trick to select nothing in the mesh, thus producing an empty mesh
75 sub_m, sub_f = msh[0:4], f[0:4]
77 sub_m, sub_f = msh[4:8], f[4:8]
81 def getPartialTarget(self, rank, nb_compo=1):
82 """ One square for each rank """
83 msh = self.generateFullTarget()
86 # Receiving side must prepare an empty field that will be filled by DEC:
87 fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
88 da = DataArrayDouble(sub_m.getNumberOfCells(), nb_compo)
94 def testOverlapDEC_ctor(self):
95 """ Test the various Python ctors """
96 size = MPI.COMM_WORLD.size
98 print("Should be run on 4 procs!")
100 # Define processor group
101 proc_group = list(range(size))
103 o1 = OverlapDEC.New(proc_group)
104 # Should also work directly:
105 o2 = OverlapDEC(proc_group)
106 # With an iterable and a custom comm:
107 o3 = OverlapDEC.New(proc_group, MPI.COMM_WORLD)
108 # Also work directly with the **hack** on the comm:
109 o4 = OverlapDEC(proc_group, MPI._addressof(MPI.COMM_WORLD))
110 self.assertRaises(Exception, OverlapDEC, proc_group, MPI.COMM_WORLD)
111 o4.release(); o3.release(); o2.release(); o1.release()
114 def testOverlapDEC_2D_py_1(self):
115 """ The main method of the test. """
116 ncompo = 4 # Dummy field with 4 components
117 size = MPI.COMM_WORLD.size
118 rank = MPI.COMM_WORLD.rank
120 raise RuntimeError("Should be run on 4 procs!")
122 for algo in range(3):
123 # Define (single) processor group - note the difference with InterpKernelDEC which needs two groups.
124 proc_group = list(range(size)) # No need for ProcessorGroup object here.
125 odec = OverlapDEC(proc_group)
126 odec.setWorkSharingAlgo(algo) # Default is 1 - put here to test various different configurations of send/receive patterns
128 # Write out full size meshes/fields for inspection
130 _, fld = self.generateFullSource(ncompo)
131 mshT = self.generateFullTarget()
132 WriteField("./source_field_FULL.med", fld, True)
133 WriteUMesh("./target_mesh_FULL.med", mshT, True)
135 MPI.COMM_WORLD.Barrier() # really necessary??
138 # OK, let's go DEC !!
140 _, fieldS = self.getPartialSource(rank, ncompo)
141 fieldS.setNature(IntensiveMaximum) # The only policy supported for now ...
142 mshT, fieldT = self.getPartialTarget(rank, ncompo)
143 fieldT.setNature(IntensiveMaximum)
144 if rank not in [2,3]:
145 WriteField("./source_field_part_%d.med" % rank, fieldS, True)
146 WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
148 odec.attachSourceLocalField(fieldS)
149 odec.attachTargetLocalField(fieldT)
153 # Now the actual checks:
154 ref_vals = [1.0, 5.0, 9.0, 13.0]
155 self.assertEqual(fieldT.getArray().getValues(), [ref_vals[rank]]*ncompo)
157 # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
160 MPI.COMM_WORLD.Barrier()
163 def testOverlapDEC_2D_py_2(self):
164 """ Test on a question that often comes back: should I re-synchronize() / re-attach() each time that
165 I want to send a new field?
167 - you do not have to re-synchronize, but you can re-attach a new field, as long as it has the same support.
168 WARNING: this differs in InterpKernelDEC ...
170 size = MPI.COMM_WORLD.size
171 rank = MPI.COMM_WORLD.rank
173 raise RuntimeError("Should be run on 4 procs!")
175 # Define (single) processor group - note the difference with InterpKernelDEC which needs two groups.
176 proc_group = list(range(size)) # No need for ProcessorGroup object here.
177 odec = OverlapDEC(proc_group)
179 MPI.COMM_WORLD.Barrier() # really necessary??
182 # OK, let's go DEC !!
184 _, fieldS = self.getPartialSource(rank)
185 fieldS.setNature(IntensiveMaximum) # The only policy supported for now ...
186 mshT, fieldT = self.getPartialTarget(rank)
187 fieldT.setNature(IntensiveMaximum)
190 for t in range(3): # Emulating a time loop ...
192 odec.attachSourceLocalField(fieldS) # only once!
193 odec.attachTargetLocalField(fieldT) # only once!
194 odec.synchronize() # only once!
196 das = fieldS.getArray() # but we can still hack the underlying field values ...
199 odec.sendRecvData() # each time!
201 # Now the actual checks:
202 ref_vals = [1.0, 5.0, 9.0, 13.0]
203 self.assertEqual(fieldT.getArray().getValues(), [ref_vals[rank]*mul])
205 # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
208 MPI.COMM_WORLD.Barrier()
210 if __name__ == "__main__":
213 # tt = ParaMEDMEM_O_DEC_Tests()
214 # tt.testOverlapDEC_2D_py_1()