]> SALOME platform Git repositories - tools/medcoupling.git/blob - src/ParaMEDMEM_Swig/test_InterpKernelDEC.py
Salome HOME
[OverlapDEC] Bug fix for transfer of multi-compo field
[tools/medcoupling.git] / src / ParaMEDMEM_Swig / test_InterpKernelDEC.py
1 #!/usr/bin/env python
2 #  -*- coding: iso-8859-1 -*-
3 # Copyright (C) 2007-2024  CEA, EDF
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21
22 from medcoupling import *
23 from ParaMEDMEMTestTools import WriteInTmpDir
24 import sys, os
25 import unittest
26 import math
27 from mpi4py import MPI
28
29
30 class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
31     """ See test_StructuredCoincidentDEC_py_1() for a quick start.
32     """
33     def generateFullSource(self):
34         """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """
35         msh  = self.generateFullTarget()
36         msh.simplexize(0)
37         msh.setName("src_mesh")
38         fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
39         fld.setMesh(msh); fld.setName("source_F");
40         da = DataArrayDouble(msh.getNumberOfCells())
41         da.iota()
42         da *= 2
43         fld.setArray(da)
44         return msh, fld
45
46     def generateFullTarget(self):
47         """ The complete target mesh: 4 squares """
48         m1 = MEDCouplingCMesh("tgt_msh")
49         da = DataArrayDouble([0,1,2])
50         m1.setCoords(da, da)
51         msh = m1.buildUnstructured()
52         return msh
53
54     #
55     # Below, the two functions emulating the set up of a piece of the source and target mesh
56     # on each proc. Obviously in real world problems, this comes from your code and is certainly
57     # not computed by cuting again from scratch the full-size mesh!!
58     #
59     def getPartialSource(self, rank):
60         """ Will return an empty mesh piece for rank=2 and 3 """
61         msh, f = self.generateFullSource()
62         if rank == 0:
63             sub_m, sub_f = msh[0:4], f[0:4]
64         elif rank == 1:
65             sub_m, sub_f = msh[4:8], f[4:8]
66         sub_m.zipCoords()
67         return sub_m, sub_f
68
69     def getPartialTarget(self, rank):
70         """ One square for each rank """
71         msh = self.generateFullTarget()
72         if rank == 2:
73             sub_m = msh[[0,2]]
74         elif rank == 3:
75             sub_m = msh[[1,3]]
76         sub_m.zipCoords()
77         # Receiving side must prepare an empty field that will be filled by DEC:
78         fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
79         da = DataArrayDouble(sub_m.getNumberOfCells())
80         fld.setArray(da)
81         fld.setName("tgt_F")
82         fld.setMesh(sub_m)
83         return sub_m, fld
84
85     def testInterpKernelDEC_ctor(self):
86         """ Test the various Python ctors """
87         size = MPI.COMM_WORLD.size
88         if size != 4:
89             print("Should be run on 4 procs!")
90             return
91         # Define two processor groups
92         nproc_source = 2
93         l1, l2 = range(nproc_source), range(size - nproc_source, size)
94         # With 2 iterables:
95         i1 = InterpKernelDEC.New(l1, l2)
96         # Should also work directly:
97         i2 = InterpKernelDEC(l1, l2)
98         # With 2 proc groups:
99         interface = CommInterface()
100         source_group = MPIProcessorGroup(interface, list(l1))
101         target_group = MPIProcessorGroup(interface, list(l2))
102         i3 = InterpKernelDEC.New(source_group, target_group)
103         # Should also work directly:
104         i4 = InterpKernelDEC(source_group, target_group)
105         # With 2 iterables and a custom comm:
106         i5 = InterpKernelDEC.New(l1, l2, MPI.COMM_WORLD)
107         # Work directly with the **hack**
108         i6 = InterpKernelDEC(l1, l2, MPI._addressof(MPI.COMM_WORLD))
109         # Should fail with 2 proc groups **and** a communicator
110         self.assertRaises(InterpKernelException, InterpKernelDEC.New, source_group, target_group, MPI.COMM_WORLD)
111         self.assertRaises(Exception, InterpKernelDEC, source_group, target_group, MPI.COMM_WORLD)
112         i6.release(); i5.release(); i4.release(); i3.release(); i2.release(); i1.release()
113         source_group.release()
114         target_group.release()
115
116     @WriteInTmpDir
117     def testInterpKernelDEC_2D_py_1(self):
118         """ This test illustrates a basic use of the InterpKernelDEC.
119         Look at the C++ documentation of the class for more informations.
120         """
121         size = MPI.COMM_WORLD.size
122         rank = MPI.COMM_WORLD.rank
123         if size != 4:
124             print("Should be run on 4 procs!")
125             return
126
127         # Define two processor groups
128         nproc_source = 2
129         procs_source = list(range(nproc_source))
130         procs_target = list(range(size - nproc_source, size))
131
132         interface = CommInterface()
133         source_group = MPIProcessorGroup(interface, procs_source)
134         target_group = MPIProcessorGroup(interface, procs_target)
135         idec = InterpKernelDEC(source_group, target_group)
136
137         # Write out full size meshes/fields for inspection
138         if rank == 0:
139             _, fld = self.generateFullSource()
140             mshT = self.generateFullTarget()
141             WriteField("./source_field_FULL.med", fld, True)
142             WriteUMesh("./target_mesh_FULL.med", mshT, True)
143
144         MPI.COMM_WORLD.Barrier()  # really necessary??
145
146         #
147         # OK, let's go DEC !!
148         #
149         if source_group.containsMyRank():
150             _, fieldS = self.getPartialSource(rank)
151             fieldS.setNature(IntensiveMaximum)   # The only policy supported for now ...
152             WriteField("./source_field_part_%d.med" % rank, fieldS, True)
153             idec.attachLocalField(fieldS)
154             idec.synchronize()
155             idec.sendData()
156
157         if target_group.containsMyRank():
158             mshT, fieldT = self.getPartialTarget(rank)
159             fieldT.setNature(IntensiveMaximum)
160             WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
161             idec.attachLocalField(fieldT)
162             idec.synchronize()
163             idec.recvData()
164             # Now the actual checks:
165             if rank == 2:
166                 self.assertEqual(fieldT.getArray().getValues(), [1.0, 9.0])
167             elif rank == 3:
168                 self.assertEqual(fieldT.getArray().getValues(), [5.0, 13.0])
169
170         # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
171         idec.release()
172         source_group.release()
173         target_group.release()
174         MPI.COMM_WORLD.Barrier()
175
176     @WriteInTmpDir
177     def test_InterpKernelDEC_2D_py_2(self):
178         """ More involved test using Para* objects.
179         """
180         size = MPI.COMM_WORLD.size
181         rank = MPI.COMM_WORLD.rank
182         if size != 5:
183             print("Should be run on 5 procs!")
184             return
185
186         print(rank)
187         nproc_source = 3
188         procs_source = list(range(nproc_source))
189         procs_target = list(range(size - nproc_source + 1, size))
190
191         interface = CommInterface()
192         target_group = MPIProcessorGroup(interface, procs_target)
193         source_group = MPIProcessorGroup(interface, procs_source)
194         dec = InterpKernelDEC(source_group, target_group)
195
196         data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med")
197         if not os.path.isdir(data_dir):
198             data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1]
199
200         filename_xml1 = os.path.join(data_dir, "square1_split")
201         filename_xml2 = os.path.join(data_dir, "square2_split")
202
203         MPI.COMM_WORLD.Barrier()
204         if source_group.containsMyRank():
205             filename = filename_xml1 + str(rank+1) + ".med"
206             meshname = "Mesh_2_" + str(rank+1)
207             mesh=ReadUMeshFromFile(filename,meshname,0)
208             paramesh=ParaMESH(mesh,source_group,"source mesh")
209             comptopo = ComponentTopology()
210             parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
211             parafield.getField().setNature(IntensiveMaximum)
212             nb_local=mesh.getNumberOfCells()
213             value = [1.0]*nb_local
214             parafield.getField().setValues(value)
215             icocofield = ICoCoMEDDoubleField(parafield.getField())
216             dec.attachLocalField(icocofield)
217             pass
218         else:
219             filename = filename_xml2 + str(rank - nproc_source + 1) + ".med"
220             meshname = "Mesh_3_" + str(rank - nproc_source + 1)
221             mesh=ReadUMeshFromFile(filename,meshname,0)
222             paramesh=ParaMESH(mesh,target_group,"target mesh")
223             comptopo = ComponentTopology()
224             parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
225             parafield.getField().setNature(IntensiveMaximum)
226             nb_local=mesh.getNumberOfCells()
227             value = [0.0]*nb_local
228             parafield.getField().setValues(value)
229             icocofield = ICoCoMEDDoubleField(parafield.getField())
230             dec.attachLocalField(icocofield)
231             pass
232
233         if source_group.containsMyRank():
234             field_before_int = parafield.getVolumeIntegral(0,True)
235             dec.synchronize()
236             dec.setForcedRenormalization(False)
237             dec.sendData()
238             dec.recvData()
239             field_after_int=parafield.getVolumeIntegral(0,True);
240             self.assertTrue(math.fabs(field_after_int-field_before_int)<1e-8)
241             pass
242         else:
243             dec.synchronize()
244             dec.setForcedRenormalization(False)
245             dec.recvData()
246             dec.sendData()
247             pass
248         ## end
249
250         # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
251         parafield.release()
252         paramesh.release()
253         dec.release()
254         target_group.release()
255         source_group.release()
256         MPI.COMM_WORLD.Barrier()
257
258     def testInterpKernelDEC_2D_py_3(self):
259         """ Test on a question that often comes back: should I re-synchronize() / re-attach() each time that
260         I want to send a new field? 
261         Basic answer: 
262           - you do not have to re-synchronize, but you can re-attach a new field, as long as it has the same support.
263         WARNING: this differs in OverlapDEC ...
264         """
265         size = MPI.COMM_WORLD.size
266         rank = MPI.COMM_WORLD.rank
267         if size != 4:
268             print("Should be run on 4 procs!")
269             return
270
271         # Define two processor groups
272         nproc_source = 2
273         procs_source = list(range(nproc_source))
274         procs_target = list(range(size - nproc_source, size))
275
276         interface = CommInterface()
277         source_group = MPIProcessorGroup(interface, procs_source)
278         target_group = MPIProcessorGroup(interface, procs_target)
279         idec = InterpKernelDEC(source_group, target_group)
280
281         MPI.COMM_WORLD.Barrier()  # really necessary??
282
283         #
284         # OK, let's go DEC !!
285         #
286         mul = 1
287         for t in range(3):  # Emulating a time loop for example
288             if source_group.containsMyRank():
289                 _, fieldS = self.getPartialSource(rank)
290                 fieldS.setNature(IntensiveMaximum)   # The only policy supported for now ...
291                 fS2 = fieldS.deepCopy()
292                 fS2.setMesh(fieldS.getMesh())
293                 idec.attachLocalField(fS2)         # each time, but same support!
294                 if t == 0:
295                     idec.synchronize()             # only once!
296                 das = fS2.getArray()
297                 das *= t+1
298                 idec.sendData()                    # each time!
299
300             if target_group.containsMyRank():
301                 mshT, fieldT = self.getPartialTarget(rank)
302                 fieldT.setNature(IntensiveMaximum)
303                 fT2 = fieldT.deepCopy()
304                 fT2.setMesh(fieldT.getMesh())
305                 idec.attachLocalField(fT2)          # each time, but same support!
306                 if t == 0:
307                     idec.synchronize()              # only once!
308                 idec.recvData()                     # each time!
309                 # Now the actual checks:
310                 mul = t+1
311                 if rank == 2:
312                     self.assertEqual(fT2.getArray().getValues(), [1.0*mul, 9.0*mul])
313                 elif rank == 3:
314                     self.assertEqual(fT2.getArray().getValues(), [5.0*mul, 13.0*mul])
315
316         # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
317         idec.release()
318         source_group.release()
319         target_group.release()
320         MPI.COMM_WORLD.Barrier()
321
322     def test_InterpKernelDEC_default(self):
323         """
324         [EDF27375] : Put a default value when non intersecting case
325         """
326         size = MPI.COMM_WORLD.size
327         rank = MPI.COMM_WORLD.rank
328         if size != 4:
329             print("Should be run on 4 procs!")
330             return
331         nproc_source = 2
332         procs_source = list(range(nproc_source))
333         procs_target = list(range(size - nproc_source, size))
334
335         interface = CommInterface()
336         target_group = MPIProcessorGroup(interface, procs_target)
337         source_group = MPIProcessorGroup(interface, procs_source)
338         dec = InterpKernelDEC(source_group, target_group)
339         #
340         MPI.COMM_WORLD.Barrier()
341         if source_group.containsMyRank():
342             mesh = eval("Source_Proc_{}".format(rank))()
343             nb_local=mesh.getNumberOfCells()
344             field = MEDCouplingFieldDouble(ON_CELLS)
345             field.setNature(IntensiveMaximum)
346             field.setMesh(mesh)
347             arr = DataArrayDouble(nb_local) ; arr.iota() ; arr += rank
348             field.setArray(arr)
349             dec.attachLocalField(field)
350             dec.synchronizeWithDefaultValue(-2000.0)
351             dec.sendData()
352             # target -> source
353             dec.recvData()
354             if rank == 0:
355                 self.assertTrue(field.getArray().isEqual(DataArrayDouble([0.6,0.6,-2000]),1e-12))
356                 self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([2])) )
357             if rank == 1:
358                 self.assertTrue(field.getArray().isEqual(DataArrayDouble([1.0,-2000]),1e-12))
359                 self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([1])) )
360         else:
361             mesh = eval("Target_Proc_{}".format(rank))()
362             nb_local=mesh.getNumberOfCells()
363             field = MEDCouplingFieldDouble(ON_CELLS)
364             field.setNature(IntensiveMaximum)
365             field.setMesh(mesh)
366             arr = DataArrayDouble(nb_local) ; arr[:] = -20
367             field.setArray(arr)
368             dec.attachLocalField(field)
369             dec.synchronizeWithDefaultValue(-1000.0)
370             dec.recvData()
371             if rank == 2:
372                 # matrix S0 / T2 = [[(0,S0,1),(1,S0,1.5)]
373                 # IntensiveMaximum => [[(0,S0,1/2.5),(1,S0,1.5/2.5)]
374                 #
375                 self.assertTrue(field.getArray().isEqual(DataArrayDouble([0.6]),1e-12))
376                 self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([])) )
377             if rank == 3:
378                 # matrix S1 / T3 = [[],[(0,S1,1.0)],[(0,S1,2.0)],[]]
379                 # IntensiveMaximum => [[],[(0,S1,1.0/1.0)],[(0,S1,2.0/2.0)],[]]
380                 self.assertTrue(field.getArray().isEqual(DataArrayDouble([-1000.0, 1.0, 1.0, -1000.0]),1e-8))
381                 self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([0,3])) )
382             # target -> source
383             dec.sendData()
384
385         # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
386         dec.release()
387         target_group.release()
388         source_group.release()
389         MPI.COMM_WORLD.Barrier()
390
391 def Source_Proc_0():
392     coo = DataArrayDouble([(0,2),(2,2),(4,2),(0,4),(2,4),(4,4),(0,6),(2,6)])
393     m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
394     m.insertNextCell(NORM_QUAD4,[0,1,4,3])
395     m.insertNextCell(NORM_QUAD4,[1,2,5,4])
396     m.insertNextCell(NORM_QUAD4,[3,4,7,6])
397     return m
398
399 def Source_Proc_1():
400     coo = DataArrayDouble([(6,2),(8,2),(10,2),(6,4),(8,4),(10,4)])
401     m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
402     m.insertNextCell(NORM_QUAD4,[0,1,4,3])
403     m.insertNextCell(NORM_QUAD4,[1,2,5,4])
404     return m
405
406 def Target_Proc_2():
407     coo = DataArrayDouble([(1,0),(3.5,0),(1,3),(3.5,3)])
408     m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
409     m.insertNextCell(NORM_QUAD4,[0,1,3,2])
410     return m
411
412 def Target_Proc_3():
413     coo = DataArrayDouble([(6,0),(7,0),(8,0),(9,0),(10,0),
414                            (6,1),(7,1),(9,1),(10,1),
415                            (7,3),(8,3),
416                            (6,4),(7,4)])
417     m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
418     m.insertNextCell(NORM_QUAD4,[0,1,6,5])
419     m.insertNextCell(NORM_QUAD4,[1,2,10,9])
420     m.insertNextCell(NORM_QUAD4,[5,6,12,11])
421     m.insertNextCell(NORM_QUAD4,[3,4,8,7])
422     return m
423
424 if __name__ == "__main__":
425     unittest.main()
426     MPI.Finalize()