2 # -*- coding: iso-8859-1 -*-
3 # Copyright (C) 2007-2024 CEA, EDF
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
22 from medcoupling import *
23 from ParaMEDMEMTestTools import WriteInTmpDir
27 from mpi4py import MPI
30 class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
31 """ See test_StructuredCoincidentDEC_py_1() for a quick start.
33 def generateFullSource(self):
34 """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """
35 msh = self.generateFullTarget()
37 msh.setName("src_mesh")
38 fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
39 fld.setMesh(msh); fld.setName("source_F");
40 da = DataArrayDouble(msh.getNumberOfCells())
46 def generateFullTarget(self):
47 """ The complete target mesh: 4 squares """
48 m1 = MEDCouplingCMesh("tgt_msh")
49 da = DataArrayDouble([0,1,2])
51 msh = m1.buildUnstructured()
55 # Below, the two functions emulating the set up of a piece of the source and target mesh
56 # on each proc. Obviously in real world problems, this comes from your code and is certainly
57 # not computed by cuting again from scratch the full-size mesh!!
59 def getPartialSource(self, rank):
60 """ Will return an empty mesh piece for rank=2 and 3 """
61 msh, f = self.generateFullSource()
63 sub_m, sub_f = msh[0:4], f[0:4]
65 sub_m, sub_f = msh[4:8], f[4:8]
69 def getPartialTarget(self, rank):
70 """ One square for each rank """
71 msh = self.generateFullTarget()
77 # Receiving side must prepare an empty field that will be filled by DEC:
78 fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
79 da = DataArrayDouble(sub_m.getNumberOfCells())
85 def testInterpKernelDEC_ctor(self):
86 """ Test the various Python ctors """
87 size = MPI.COMM_WORLD.size
89 print("Should be run on 4 procs!")
91 # Define two processor groups
93 l1, l2 = range(nproc_source), range(size - nproc_source, size)
95 i1 = InterpKernelDEC.New(l1, l2)
96 # Should also work directly:
97 i2 = InterpKernelDEC(l1, l2)
99 interface = CommInterface()
100 source_group = MPIProcessorGroup(interface, list(l1))
101 target_group = MPIProcessorGroup(interface, list(l2))
102 i3 = InterpKernelDEC.New(source_group, target_group)
103 # Should also work directly:
104 i4 = InterpKernelDEC(source_group, target_group)
105 # With 2 iterables and a custom comm:
106 i5 = InterpKernelDEC.New(l1, l2, MPI.COMM_WORLD)
107 # Work directly with the **hack**
108 i6 = InterpKernelDEC(l1, l2, MPI._addressof(MPI.COMM_WORLD))
109 # Should fail with 2 proc groups **and** a communicator
110 self.assertRaises(InterpKernelException, InterpKernelDEC.New, source_group, target_group, MPI.COMM_WORLD)
111 self.assertRaises(Exception, InterpKernelDEC, source_group, target_group, MPI.COMM_WORLD)
112 i6.release(); i5.release(); i4.release(); i3.release(); i2.release(); i1.release()
113 source_group.release()
114 target_group.release()
117 def testInterpKernelDEC_2D_py_1(self):
118 """ This test illustrates a basic use of the InterpKernelDEC.
119 Look at the C++ documentation of the class for more informations.
121 size = MPI.COMM_WORLD.size
122 rank = MPI.COMM_WORLD.rank
124 print("Should be run on 4 procs!")
127 # Define two processor groups
129 procs_source = list(range(nproc_source))
130 procs_target = list(range(size - nproc_source, size))
132 interface = CommInterface()
133 source_group = MPIProcessorGroup(interface, procs_source)
134 target_group = MPIProcessorGroup(interface, procs_target)
135 idec = InterpKernelDEC(source_group, target_group)
137 # Write out full size meshes/fields for inspection
139 _, fld = self.generateFullSource()
140 mshT = self.generateFullTarget()
141 WriteField("./source_field_FULL.med", fld, True)
142 WriteUMesh("./target_mesh_FULL.med", mshT, True)
144 MPI.COMM_WORLD.Barrier() # really necessary??
147 # OK, let's go DEC !!
149 if source_group.containsMyRank():
150 _, fieldS = self.getPartialSource(rank)
151 fieldS.setNature(IntensiveMaximum) # The only policy supported for now ...
152 WriteField("./source_field_part_%d.med" % rank, fieldS, True)
153 idec.attachLocalField(fieldS)
157 if target_group.containsMyRank():
158 mshT, fieldT = self.getPartialTarget(rank)
159 fieldT.setNature(IntensiveMaximum)
160 WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
161 idec.attachLocalField(fieldT)
164 # Now the actual checks:
166 self.assertEqual(fieldT.getArray().getValues(), [1.0, 9.0])
168 self.assertEqual(fieldT.getArray().getValues(), [5.0, 13.0])
170 # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
172 source_group.release()
173 target_group.release()
174 MPI.COMM_WORLD.Barrier()
177 def test_InterpKernelDEC_2D_py_2(self):
178 """ More involved test using Para* objects.
180 size = MPI.COMM_WORLD.size
181 rank = MPI.COMM_WORLD.rank
183 print("Should be run on 5 procs!")
188 procs_source = list(range(nproc_source))
189 procs_target = list(range(size - nproc_source + 1, size))
191 interface = CommInterface()
192 target_group = MPIProcessorGroup(interface, procs_target)
193 source_group = MPIProcessorGroup(interface, procs_source)
194 dec = InterpKernelDEC(source_group, target_group)
196 data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med")
197 if not os.path.isdir(data_dir):
198 data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1]
200 filename_xml1 = os.path.join(data_dir, "square1_split")
201 filename_xml2 = os.path.join(data_dir, "square2_split")
203 MPI.COMM_WORLD.Barrier()
204 if source_group.containsMyRank():
205 filename = filename_xml1 + str(rank+1) + ".med"
206 meshname = "Mesh_2_" + str(rank+1)
207 mesh=ReadUMeshFromFile(filename,meshname,0)
208 paramesh=ParaMESH(mesh,source_group,"source mesh")
209 comptopo = ComponentTopology()
210 parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
211 parafield.getField().setNature(IntensiveMaximum)
212 nb_local=mesh.getNumberOfCells()
213 value = [1.0]*nb_local
214 parafield.getField().setValues(value)
215 icocofield = ICoCoMEDDoubleField(parafield.getField())
216 dec.attachLocalField(icocofield)
219 filename = filename_xml2 + str(rank - nproc_source + 1) + ".med"
220 meshname = "Mesh_3_" + str(rank - nproc_source + 1)
221 mesh=ReadUMeshFromFile(filename,meshname,0)
222 paramesh=ParaMESH(mesh,target_group,"target mesh")
223 comptopo = ComponentTopology()
224 parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
225 parafield.getField().setNature(IntensiveMaximum)
226 nb_local=mesh.getNumberOfCells()
227 value = [0.0]*nb_local
228 parafield.getField().setValues(value)
229 icocofield = ICoCoMEDDoubleField(parafield.getField())
230 dec.attachLocalField(icocofield)
233 if source_group.containsMyRank():
234 field_before_int = parafield.getVolumeIntegral(0,True)
236 dec.setForcedRenormalization(False)
239 field_after_int=parafield.getVolumeIntegral(0,True);
240 self.assertTrue(math.fabs(field_after_int-field_before_int)<1e-8)
244 dec.setForcedRenormalization(False)
250 # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
254 target_group.release()
255 source_group.release()
256 MPI.COMM_WORLD.Barrier()
258 def testInterpKernelDEC_2D_py_3(self):
259 """ Test on a question that often comes back: should I re-synchronize() / re-attach() each time that
260 I want to send a new field?
262 - you do not have to re-synchronize, but you can re-attach a new field, as long as it has the same support.
263 WARNING: this differs in OverlapDEC ...
265 size = MPI.COMM_WORLD.size
266 rank = MPI.COMM_WORLD.rank
268 print("Should be run on 4 procs!")
271 # Define two processor groups
273 procs_source = list(range(nproc_source))
274 procs_target = list(range(size - nproc_source, size))
276 interface = CommInterface()
277 source_group = MPIProcessorGroup(interface, procs_source)
278 target_group = MPIProcessorGroup(interface, procs_target)
279 idec = InterpKernelDEC(source_group, target_group)
281 MPI.COMM_WORLD.Barrier() # really necessary??
284 # OK, let's go DEC !!
287 for t in range(3): # Emulating a time loop for example
288 if source_group.containsMyRank():
289 _, fieldS = self.getPartialSource(rank)
290 fieldS.setNature(IntensiveMaximum) # The only policy supported for now ...
291 fS2 = fieldS.deepCopy()
292 fS2.setMesh(fieldS.getMesh())
293 idec.attachLocalField(fS2) # each time, but same support!
295 idec.synchronize() # only once!
298 idec.sendData() # each time!
300 if target_group.containsMyRank():
301 mshT, fieldT = self.getPartialTarget(rank)
302 fieldT.setNature(IntensiveMaximum)
303 fT2 = fieldT.deepCopy()
304 fT2.setMesh(fieldT.getMesh())
305 idec.attachLocalField(fT2) # each time, but same support!
307 idec.synchronize() # only once!
308 idec.recvData() # each time!
309 # Now the actual checks:
312 self.assertEqual(fT2.getArray().getValues(), [1.0*mul, 9.0*mul])
314 self.assertEqual(fT2.getArray().getValues(), [5.0*mul, 13.0*mul])
316 # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
318 source_group.release()
319 target_group.release()
320 MPI.COMM_WORLD.Barrier()
322 def test_InterpKernelDEC_default(self):
324 [EDF27375] : Put a default value when non intersecting case
326 size = MPI.COMM_WORLD.size
327 rank = MPI.COMM_WORLD.rank
329 print("Should be run on 4 procs!")
332 procs_source = list(range(nproc_source))
333 procs_target = list(range(size - nproc_source, size))
335 interface = CommInterface()
336 target_group = MPIProcessorGroup(interface, procs_target)
337 source_group = MPIProcessorGroup(interface, procs_source)
338 dec = InterpKernelDEC(source_group, target_group)
340 MPI.COMM_WORLD.Barrier()
341 if source_group.containsMyRank():
342 mesh = eval("Source_Proc_{}".format(rank))()
343 nb_local=mesh.getNumberOfCells()
344 field = MEDCouplingFieldDouble(ON_CELLS)
345 field.setNature(IntensiveMaximum)
347 arr = DataArrayDouble(nb_local) ; arr.iota() ; arr += rank
349 dec.attachLocalField(field)
350 dec.synchronizeWithDefaultValue(-2000.0)
355 self.assertTrue(field.getArray().isEqual(DataArrayDouble([0.6,0.6,-2000]),1e-12))
356 self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([2])) )
358 self.assertTrue(field.getArray().isEqual(DataArrayDouble([1.0,-2000]),1e-12))
359 self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([1])) )
361 mesh = eval("Target_Proc_{}".format(rank))()
362 nb_local=mesh.getNumberOfCells()
363 field = MEDCouplingFieldDouble(ON_CELLS)
364 field.setNature(IntensiveMaximum)
366 arr = DataArrayDouble(nb_local) ; arr[:] = -20
368 dec.attachLocalField(field)
369 dec.synchronizeWithDefaultValue(-1000.0)
372 # matrix S0 / T2 = [[(0,S0,1),(1,S0,1.5)]
373 # IntensiveMaximum => [[(0,S0,1/2.5),(1,S0,1.5/2.5)]
375 self.assertTrue(field.getArray().isEqual(DataArrayDouble([0.6]),1e-12))
376 self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([])) )
378 # matrix S1 / T3 = [[],[(0,S1,1.0)],[(0,S1,2.0)],[]]
379 # IntensiveMaximum => [[],[(0,S1,1.0/1.0)],[(0,S1,2.0/2.0)],[]]
380 self.assertTrue(field.getArray().isEqual(DataArrayDouble([-1000.0, 1.0, 1.0, -1000.0]),1e-8))
381 self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([0,3])) )
385 # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
387 target_group.release()
388 source_group.release()
389 MPI.COMM_WORLD.Barrier()
392 coo = DataArrayDouble([(0,2),(2,2),(4,2),(0,4),(2,4),(4,4),(0,6),(2,6)])
393 m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
394 m.insertNextCell(NORM_QUAD4,[0,1,4,3])
395 m.insertNextCell(NORM_QUAD4,[1,2,5,4])
396 m.insertNextCell(NORM_QUAD4,[3,4,7,6])
400 coo = DataArrayDouble([(6,2),(8,2),(10,2),(6,4),(8,4),(10,4)])
401 m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
402 m.insertNextCell(NORM_QUAD4,[0,1,4,3])
403 m.insertNextCell(NORM_QUAD4,[1,2,5,4])
407 coo = DataArrayDouble([(1,0),(3.5,0),(1,3),(3.5,3)])
408 m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
409 m.insertNextCell(NORM_QUAD4,[0,1,3,2])
413 coo = DataArrayDouble([(6,0),(7,0),(8,0),(9,0),(10,0),
414 (6,1),(7,1),(9,1),(10,1),
417 m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
418 m.insertNextCell(NORM_QUAD4,[0,1,6,5])
419 m.insertNextCell(NORM_QUAD4,[1,2,10,9])
420 m.insertNextCell(NORM_QUAD4,[5,6,12,11])
421 m.insertNextCell(NORM_QUAD4,[3,4,8,7])
424 if __name__ == "__main__":