]> SALOME platform Git repositories - tools/medcoupling.git/blobdiff - src/ParaMEDMEM_Swig/test_InterpKernelDEC.py
Salome HOME
refactor!: remove adm_local/ directory
[tools/medcoupling.git] / src / ParaMEDMEM_Swig / test_InterpKernelDEC.py
index d6e0ccb525602b35029d3632adcb04b590ae9cf1..49448968379c4d8f6752f9b38182673bdc88eab4 100755 (executable)
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 #  -*- coding: iso-8859-1 -*-
-# Copyright (C) 2007-2021  CEA/DEN, EDF R&D
+# Copyright (C) 2007-2024  CEA, EDF
 #
 # This library is free software; you can redistribute it and/or
 # modify it under the terms of the GNU Lesser General Public
@@ -82,6 +82,37 @@ class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
         fld.setMesh(sub_m)
         return sub_m, fld
 
+    def testInterpKernelDEC_ctor(self):
+        """ Test the various Python ctors """
+        size = MPI.COMM_WORLD.size
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+        # Define two processor groups
+        nproc_source = 2
+        l1, l2 = range(nproc_source), range(size - nproc_source, size)
+        # With 2 iterables:
+        i1 = InterpKernelDEC.New(l1, l2)
+        # Should also work directly:
+        i2 = InterpKernelDEC(l1, l2)
+        # With 2 proc groups:
+        interface = CommInterface()
+        source_group = MPIProcessorGroup(interface, list(l1))
+        target_group = MPIProcessorGroup(interface, list(l2))
+        i3 = InterpKernelDEC.New(source_group, target_group)
+        # Should also work directly:
+        i4 = InterpKernelDEC(source_group, target_group)
+        # With 2 iterables and a custom comm:
+        i5 = InterpKernelDEC.New(l1, l2, MPI.COMM_WORLD)
+        # Work directly with the **hack**
+        i6 = InterpKernelDEC(l1, l2, MPI._addressof(MPI.COMM_WORLD))
+        # Should fail with 2 proc groups **and** a communicator
+        self.assertRaises(InterpKernelException, InterpKernelDEC.New, source_group, target_group, MPI.COMM_WORLD)
+        self.assertRaises(Exception, InterpKernelDEC, source_group, target_group, MPI.COMM_WORLD)
+        i6.release(); i5.release(); i4.release(); i3.release(); i2.release(); i1.release()
+        source_group.release()
+        target_group.release()
+
     @WriteInTmpDir
     def testInterpKernelDEC_2D_py_1(self):
         """ This test illustrates a basic use of the InterpKernelDEC.
@@ -224,7 +255,172 @@ class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
         source_group.release()
         MPI.COMM_WORLD.Barrier()
 
+    def testInterpKernelDEC_2D_py_3(self):
+        """ Test on a question that often comes back: should I re-synchronize() / re-attach() each time that
+        I want to send a new field? 
+        Basic answer: 
+          - you do not have to re-synchronize, but you can re-attach a new field, as long as it has the same support.
+        WARNING: this differs in OverlapDEC ...
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+
+        # Define two processor groups
+        nproc_source = 2
+        procs_source = list(range(nproc_source))
+        procs_target = list(range(size - nproc_source, size))
+
+        interface = CommInterface()
+        source_group = MPIProcessorGroup(interface, procs_source)
+        target_group = MPIProcessorGroup(interface, procs_target)
+        idec = InterpKernelDEC(source_group, target_group)
+
+        MPI.COMM_WORLD.Barrier()  # really necessary??
+
+        #
+        # OK, let's go DEC !!
+        #
+        mul = 1
+        for t in range(3):  # Emulating a time loop for example
+            if source_group.containsMyRank():
+                _, fieldS = self.getPartialSource(rank)
+                fieldS.setNature(IntensiveMaximum)   # The only policy supported for now ...
+                fS2 = fieldS.deepCopy()
+                fS2.setMesh(fieldS.getMesh())
+                idec.attachLocalField(fS2)         # each time, but same support!
+                if t == 0:
+                    idec.synchronize()             # only once!
+                das = fS2.getArray()
+                das *= t+1
+                idec.sendData()                    # each time!
+
+            if target_group.containsMyRank():
+                mshT, fieldT = self.getPartialTarget(rank)
+                fieldT.setNature(IntensiveMaximum)
+                fT2 = fieldT.deepCopy()
+                fT2.setMesh(fieldT.getMesh())
+                idec.attachLocalField(fT2)          # each time, but same support!
+                if t == 0:
+                    idec.synchronize()              # only once!
+                idec.recvData()                     # each time!
+                # Now the actual checks:
+                mul = t+1
+                if rank == 2:
+                    self.assertEqual(fT2.getArray().getValues(), [1.0*mul, 9.0*mul])
+                elif rank == 3:
+                    self.assertEqual(fT2.getArray().getValues(), [5.0*mul, 13.0*mul])
+
+        # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
+        idec.release()
+        source_group.release()
+        target_group.release()
+        MPI.COMM_WORLD.Barrier()
+
+    def test_InterpKernelDEC_default(self):
+        """
+        [EDF27375] : Put a default value when non intersecting case
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+        nproc_source = 2
+        procs_source = list(range(nproc_source))
+        procs_target = list(range(size - nproc_source, size))
+
+        interface = CommInterface()
+        target_group = MPIProcessorGroup(interface, procs_target)
+        source_group = MPIProcessorGroup(interface, procs_source)
+        dec = InterpKernelDEC(source_group, target_group)
+        #
+        MPI.COMM_WORLD.Barrier()
+        if source_group.containsMyRank():
+            mesh = eval("Source_Proc_{}".format(rank))()
+            nb_local=mesh.getNumberOfCells()
+            field = MEDCouplingFieldDouble(ON_CELLS)
+            field.setNature(IntensiveMaximum)
+            field.setMesh(mesh)
+            arr = DataArrayDouble(nb_local) ; arr.iota() ; arr += rank
+            field.setArray(arr)
+            dec.attachLocalField(field)
+            dec.synchronizeWithDefaultValue(-2000.0)
+            dec.sendData()
+            # target -> source
+            dec.recvData()
+            if rank == 0:
+                self.assertTrue(field.getArray().isEqual(DataArrayDouble([0.6,0.6,-2000]),1e-12))
+                self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([2])) )
+            if rank == 1:
+                self.assertTrue(field.getArray().isEqual(DataArrayDouble([1.0,-2000]),1e-12))
+                self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([1])) )
+        else:
+            mesh = eval("Target_Proc_{}".format(rank))()
+            nb_local=mesh.getNumberOfCells()
+            field = MEDCouplingFieldDouble(ON_CELLS)
+            field.setNature(IntensiveMaximum)
+            field.setMesh(mesh)
+            arr = DataArrayDouble(nb_local) ; arr[:] = -20
+            field.setArray(arr)
+            dec.attachLocalField(field)
+            dec.synchronizeWithDefaultValue(-1000.0)
+            dec.recvData()
+            if rank == 2:
+                # matrix S0 / T2 = [[(0,S0,1),(1,S0,1.5)]
+                # IntensiveMaximum => [[(0,S0,1/2.5),(1,S0,1.5/2.5)]
+                #
+                self.assertTrue(field.getArray().isEqual(DataArrayDouble([0.6]),1e-12))
+                self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([])) )
+            if rank == 3:
+                # matrix S1 / T3 = [[],[(0,S1,1.0)],[(0,S1,2.0)],[]]
+                # IntensiveMaximum => [[],[(0,S1,1.0/1.0)],[(0,S1,2.0/2.0)],[]]
+                self.assertTrue(field.getArray().isEqual(DataArrayDouble([-1000.0, 1.0, 1.0, -1000.0]),1e-8))
+                self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([0,3])) )
+            # target -> source
+            dec.sendData()
+
+        # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
+        dec.release()
+        target_group.release()
+        source_group.release()
+        MPI.COMM_WORLD.Barrier()
+
+def Source_Proc_0():
+    coo = DataArrayDouble([(0,2),(2,2),(4,2),(0,4),(2,4),(4,4),(0,6),(2,6)])
+    m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
+    m.insertNextCell(NORM_QUAD4,[0,1,4,3])
+    m.insertNextCell(NORM_QUAD4,[1,2,5,4])
+    m.insertNextCell(NORM_QUAD4,[3,4,7,6])
+    return m
+
+def Source_Proc_1():
+    coo = DataArrayDouble([(6,2),(8,2),(10,2),(6,4),(8,4),(10,4)])
+    m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
+    m.insertNextCell(NORM_QUAD4,[0,1,4,3])
+    m.insertNextCell(NORM_QUAD4,[1,2,5,4])
+    return m
+
+def Target_Proc_2():
+    coo = DataArrayDouble([(1,0),(3.5,0),(1,3),(3.5,3)])
+    m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
+    m.insertNextCell(NORM_QUAD4,[0,1,3,2])
+    return m
+
+def Target_Proc_3():
+    coo = DataArrayDouble([(6,0),(7,0),(8,0),(9,0),(10,0),
+                           (6,1),(7,1),(9,1),(10,1),
+                           (7,3),(8,3),
+                           (6,4),(7,4)])
+    m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
+    m.insertNextCell(NORM_QUAD4,[0,1,6,5])
+    m.insertNextCell(NORM_QUAD4,[1,2,10,9])
+    m.insertNextCell(NORM_QUAD4,[5,6,12,11])
+    m.insertNextCell(NORM_QUAD4,[3,4,8,7])
+    return m
+
 if __name__ == "__main__":
     unittest.main()
     MPI.Finalize()
-