Salome HOME
refactor!: remove adm_local/ directory
[tools/medcoupling.git] / src / ParaMEDMEM_Swig / test_InterpKernelDEC.py
index 3f241865f8488100fe6dcad80c6a857e1a805be6..49448968379c4d8f6752f9b38182673bdc88eab4 100755 (executable)
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 #  -*- coding: iso-8859-1 -*-
-# Copyright (C) 2007-2014  CEA/DEN, EDF R&D
+# Copyright (C) 2007-2024  CEA, EDF
 #
 # This library is free software; you can redistribute it and/or
 # modify it under the terms of the GNU Lesser General Public
 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
 #
 
-from ParaMEDMEM import *
+from medcoupling import *
+from ParaMEDMEMTestTools import WriteInTmpDir
 import sys, os
 import unittest
 import math
+from mpi4py import MPI
 
-class ParaMEDMEMBasicsTest(unittest.TestCase):
-    def testInterpKernelDEC_2D(self):
-        MPI_Init(sys.argv)
-        size = MPI_Comm_size(MPI_COMM_WORLD)
-        rank = MPI_Comm_rank(MPI_COMM_WORLD)
+
+class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
+    """ See test_StructuredCoincidentDEC_py_1() for a quick start.
+    """
+    def generateFullSource(self):
+        """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """
+        msh  = self.generateFullTarget()
+        msh.simplexize(0)
+        msh.setName("src_mesh")
+        fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
+        fld.setMesh(msh); fld.setName("source_F");
+        da = DataArrayDouble(msh.getNumberOfCells())
+        da.iota()
+        da *= 2
+        fld.setArray(da)
+        return msh, fld
+
+    def generateFullTarget(self):
+        """ The complete target mesh: 4 squares """
+        m1 = MEDCouplingCMesh("tgt_msh")
+        da = DataArrayDouble([0,1,2])
+        m1.setCoords(da, da)
+        msh = m1.buildUnstructured()
+        return msh
+
+    #
+    # Below, the two functions emulating the set up of a piece of the source and target mesh
+    # on each proc. Obviously in real world problems, this comes from your code and is certainly
+    # not computed by cuting again from scratch the full-size mesh!!
+    #
+    def getPartialSource(self, rank):
+        """ Will return an empty mesh piece for rank=2 and 3 """
+        msh, f = self.generateFullSource()
+        if rank == 0:
+            sub_m, sub_f = msh[0:4], f[0:4]
+        elif rank == 1:
+            sub_m, sub_f = msh[4:8], f[4:8]
+        sub_m.zipCoords()
+        return sub_m, sub_f
+
+    def getPartialTarget(self, rank):
+        """ One square for each rank """
+        msh = self.generateFullTarget()
+        if rank == 2:
+            sub_m = msh[[0,2]]
+        elif rank == 3:
+            sub_m = msh[[1,3]]
+        sub_m.zipCoords()
+        # Receiving side must prepare an empty field that will be filled by DEC:
+        fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
+        da = DataArrayDouble(sub_m.getNumberOfCells())
+        fld.setArray(da)
+        fld.setName("tgt_F")
+        fld.setMesh(sub_m)
+        return sub_m, fld
+
+    def testInterpKernelDEC_ctor(self):
+        """ Test the various Python ctors """
+        size = MPI.COMM_WORLD.size
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+        # Define two processor groups
+        nproc_source = 2
+        l1, l2 = range(nproc_source), range(size - nproc_source, size)
+        # With 2 iterables:
+        i1 = InterpKernelDEC.New(l1, l2)
+        # Should also work directly:
+        i2 = InterpKernelDEC(l1, l2)
+        # With 2 proc groups:
+        interface = CommInterface()
+        source_group = MPIProcessorGroup(interface, list(l1))
+        target_group = MPIProcessorGroup(interface, list(l2))
+        i3 = InterpKernelDEC.New(source_group, target_group)
+        # Should also work directly:
+        i4 = InterpKernelDEC(source_group, target_group)
+        # With 2 iterables and a custom comm:
+        i5 = InterpKernelDEC.New(l1, l2, MPI.COMM_WORLD)
+        # Work directly with the **hack**
+        i6 = InterpKernelDEC(l1, l2, MPI._addressof(MPI.COMM_WORLD))
+        # Should fail with 2 proc groups **and** a communicator
+        self.assertRaises(InterpKernelException, InterpKernelDEC.New, source_group, target_group, MPI.COMM_WORLD)
+        self.assertRaises(Exception, InterpKernelDEC, source_group, target_group, MPI.COMM_WORLD)
+        i6.release(); i5.release(); i4.release(); i3.release(); i2.release(); i1.release()
+        source_group.release()
+        target_group.release()
+
+    @WriteInTmpDir
+    def testInterpKernelDEC_2D_py_1(self):
+        """ This test illustrates a basic use of the InterpKernelDEC.
+        Look at the C++ documentation of the class for more informations.
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+
+        # Define two processor groups
+        nproc_source = 2
+        procs_source = list(range(nproc_source))
+        procs_target = list(range(size - nproc_source, size))
+
+        interface = CommInterface()
+        source_group = MPIProcessorGroup(interface, procs_source)
+        target_group = MPIProcessorGroup(interface, procs_target)
+        idec = InterpKernelDEC(source_group, target_group)
+
+        # Write out full size meshes/fields for inspection
+        if rank == 0:
+            _, fld = self.generateFullSource()
+            mshT = self.generateFullTarget()
+            WriteField("./source_field_FULL.med", fld, True)
+            WriteUMesh("./target_mesh_FULL.med", mshT, True)
+
+        MPI.COMM_WORLD.Barrier()  # really necessary??
+
+        #
+        # OK, let's go DEC !!
+        #
+        if source_group.containsMyRank():
+            _, fieldS = self.getPartialSource(rank)
+            fieldS.setNature(IntensiveMaximum)   # The only policy supported for now ...
+            WriteField("./source_field_part_%d.med" % rank, fieldS, True)
+            idec.attachLocalField(fieldS)
+            idec.synchronize()
+            idec.sendData()
+
+        if target_group.containsMyRank():
+            mshT, fieldT = self.getPartialTarget(rank)
+            fieldT.setNature(IntensiveMaximum)
+            WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
+            idec.attachLocalField(fieldT)
+            idec.synchronize()
+            idec.recvData()
+            # Now the actual checks:
+            if rank == 2:
+                self.assertEqual(fieldT.getArray().getValues(), [1.0, 9.0])
+            elif rank == 3:
+                self.assertEqual(fieldT.getArray().getValues(), [5.0, 13.0])
+
+        # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
+        idec.release()
+        source_group.release()
+        target_group.release()
+        MPI.COMM_WORLD.Barrier()
+
+    @WriteInTmpDir
+    def test_InterpKernelDEC_2D_py_2(self):
+        """ More involved test using Para* objects.
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
         if size != 5:
-            raise RuntimeError, "Expect MPI_COMM_WORLD size == 5"
-        print rank
+            print("Should be run on 5 procs!")
+            return
+
+        print(rank)
         nproc_source = 3
-        procs_source = range( nproc_source )
-        procs_target = range( size - nproc_source + 1, size)
-        
+        procs_source = list(range(nproc_source))
+        procs_target = list(range(size - nproc_source + 1, size))
+
         interface = CommInterface()
         target_group = MPIProcessorGroup(interface, procs_target)
         source_group = MPIProcessorGroup(interface, procs_source)
         dec = InterpKernelDEC(source_group, target_group)
-        
-        mesh       =0
-        support    =0
-        paramesh   =0
-        parafield  =0
-        icocofield =0
-        data_dir = os.environ['MED_ROOT_DIR']
-        tmp_dir  = os.environ['TMP']
-        
-        if not tmp_dir or len(tmp_dir)==0:
-            tmp_dir = "/tmp"
-            pass
-        
-        filename_xml1 = os.path.join(data_dir, "share/salome/resources/med/square1_split")
-        filename_xml2 = os.path.join(data_dir, "share/salome/resources/med/square2_split")
-        
-        MPI_Barrier(MPI_COMM_WORLD)
+
+        data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med")
+        if not os.path.isdir(data_dir):
+            data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1]
+
+        filename_xml1 = os.path.join(data_dir, "square1_split")
+        filename_xml2 = os.path.join(data_dir, "square2_split")
+
+        MPI.COMM_WORLD.Barrier()
         if source_group.containsMyRank():
             filename = filename_xml1 + str(rank+1) + ".med"
             meshname = "Mesh_2_" + str(rank+1)
-            mesh=MEDLoader.ReadUMeshFromFile(filename,meshname,0)
+            mesh=ReadUMeshFromFile(filename,meshname,0)
             paramesh=ParaMESH(mesh,source_group,"source mesh")
             comptopo = ComponentTopology()
             parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
-            parafield.getField().setNature(ConservativeVolumic)
+            parafield.getField().setNature(IntensiveMaximum)
             nb_local=mesh.getNumberOfCells()
             value = [1.0]*nb_local
             parafield.getField().setValues(value)
-            icocofield = ICoCoMEDField(mesh,parafield.getField())
-            dec.setMethod("P0")
+            icocofield = ICoCoMEDDoubleField(parafield.getField())
             dec.attachLocalField(icocofield)
             pass
         else:
             filename = filename_xml2 + str(rank - nproc_source + 1) + ".med"
             meshname = "Mesh_3_" + str(rank - nproc_source + 1)
-            mesh=MEDLoader.ReadUMeshFromFile(filename,meshname,0)
+            mesh=ReadUMeshFromFile(filename,meshname,0)
             paramesh=ParaMESH(mesh,target_group,"target mesh")
             comptopo = ComponentTopology()
             parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
-            parafield.getField().setNature(ConservativeVolumic)
+            parafield.getField().setNature(IntensiveMaximum)
             nb_local=mesh.getNumberOfCells()
             value = [0.0]*nb_local
             parafield.getField().setValues(value)
-            icocofield = ICoCoMEDField(mesh,parafield.getField())
-            dec.setMethod("P0")
+            icocofield = ICoCoMEDDoubleField(parafield.getField())
             dec.attachLocalField(icocofield)
             pass
-        
+
         if source_group.containsMyRank():
             field_before_int = parafield.getVolumeIntegral(0,True)
             dec.synchronize()
@@ -95,7 +237,7 @@ class ParaMEDMEMBasicsTest(unittest.TestCase):
             dec.sendData()
             dec.recvData()
             field_after_int=parafield.getVolumeIntegral(0,True);
-            self.failUnless(math.fabs(field_after_int-field_before_int)<1e-8)
+            self.assertTrue(math.fabs(field_after_int-field_before_int)<1e-8)
             pass
         else:
             dec.synchronize()
@@ -104,18 +246,181 @@ class ParaMEDMEMBasicsTest(unittest.TestCase):
             dec.sendData()
             pass
         ## end
-        interface = 0
-        target_group = 0
-        source_group = 0
-        dec = 0
-        mesh       =0
-        support    =0
-        paramesh   =0
-        parafield  =0
-        icocofield =0
-        MPI_Barrier(MPI_COMM_WORLD)
-        MPI_Finalize()
-        pass
-    pass
-
-unittest.main()
+
+        # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
+        parafield.release()
+        paramesh.release()
+        dec.release()
+        target_group.release()
+        source_group.release()
+        MPI.COMM_WORLD.Barrier()
+
+    def testInterpKernelDEC_2D_py_3(self):
+        """ Test on a question that often comes back: should I re-synchronize() / re-attach() each time that
+        I want to send a new field? 
+        Basic answer: 
+          - you do not have to re-synchronize, but you can re-attach a new field, as long as it has the same support.
+        WARNING: this differs in OverlapDEC ...
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+
+        # Define two processor groups
+        nproc_source = 2
+        procs_source = list(range(nproc_source))
+        procs_target = list(range(size - nproc_source, size))
+
+        interface = CommInterface()
+        source_group = MPIProcessorGroup(interface, procs_source)
+        target_group = MPIProcessorGroup(interface, procs_target)
+        idec = InterpKernelDEC(source_group, target_group)
+
+        MPI.COMM_WORLD.Barrier()  # really necessary??
+
+        #
+        # OK, let's go DEC !!
+        #
+        mul = 1
+        for t in range(3):  # Emulating a time loop for example
+            if source_group.containsMyRank():
+                _, fieldS = self.getPartialSource(rank)
+                fieldS.setNature(IntensiveMaximum)   # The only policy supported for now ...
+                fS2 = fieldS.deepCopy()
+                fS2.setMesh(fieldS.getMesh())
+                idec.attachLocalField(fS2)         # each time, but same support!
+                if t == 0:
+                    idec.synchronize()             # only once!
+                das = fS2.getArray()
+                das *= t+1
+                idec.sendData()                    # each time!
+
+            if target_group.containsMyRank():
+                mshT, fieldT = self.getPartialTarget(rank)
+                fieldT.setNature(IntensiveMaximum)
+                fT2 = fieldT.deepCopy()
+                fT2.setMesh(fieldT.getMesh())
+                idec.attachLocalField(fT2)          # each time, but same support!
+                if t == 0:
+                    idec.synchronize()              # only once!
+                idec.recvData()                     # each time!
+                # Now the actual checks:
+                mul = t+1
+                if rank == 2:
+                    self.assertEqual(fT2.getArray().getValues(), [1.0*mul, 9.0*mul])
+                elif rank == 3:
+                    self.assertEqual(fT2.getArray().getValues(), [5.0*mul, 13.0*mul])
+
+        # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
+        idec.release()
+        source_group.release()
+        target_group.release()
+        MPI.COMM_WORLD.Barrier()
+
+    def test_InterpKernelDEC_default(self):
+        """
+        [EDF27375] : Put a default value when non intersecting case
+        """
+        size = MPI.COMM_WORLD.size
+        rank = MPI.COMM_WORLD.rank
+        if size != 4:
+            print("Should be run on 4 procs!")
+            return
+        nproc_source = 2
+        procs_source = list(range(nproc_source))
+        procs_target = list(range(size - nproc_source, size))
+
+        interface = CommInterface()
+        target_group = MPIProcessorGroup(interface, procs_target)
+        source_group = MPIProcessorGroup(interface, procs_source)
+        dec = InterpKernelDEC(source_group, target_group)
+        #
+        MPI.COMM_WORLD.Barrier()
+        if source_group.containsMyRank():
+            mesh = eval("Source_Proc_{}".format(rank))()
+            nb_local=mesh.getNumberOfCells()
+            field = MEDCouplingFieldDouble(ON_CELLS)
+            field.setNature(IntensiveMaximum)
+            field.setMesh(mesh)
+            arr = DataArrayDouble(nb_local) ; arr.iota() ; arr += rank
+            field.setArray(arr)
+            dec.attachLocalField(field)
+            dec.synchronizeWithDefaultValue(-2000.0)
+            dec.sendData()
+            # target -> source
+            dec.recvData()
+            if rank == 0:
+                self.assertTrue(field.getArray().isEqual(DataArrayDouble([0.6,0.6,-2000]),1e-12))
+                self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([2])) )
+            if rank == 1:
+                self.assertTrue(field.getArray().isEqual(DataArrayDouble([1.0,-2000]),1e-12))
+                self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([1])) )
+        else:
+            mesh = eval("Target_Proc_{}".format(rank))()
+            nb_local=mesh.getNumberOfCells()
+            field = MEDCouplingFieldDouble(ON_CELLS)
+            field.setNature(IntensiveMaximum)
+            field.setMesh(mesh)
+            arr = DataArrayDouble(nb_local) ; arr[:] = -20
+            field.setArray(arr)
+            dec.attachLocalField(field)
+            dec.synchronizeWithDefaultValue(-1000.0)
+            dec.recvData()
+            if rank == 2:
+                # matrix S0 / T2 = [[(0,S0,1),(1,S0,1.5)]
+                # IntensiveMaximum => [[(0,S0,1/2.5),(1,S0,1.5/2.5)]
+                #
+                self.assertTrue(field.getArray().isEqual(DataArrayDouble([0.6]),1e-12))
+                self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([])) )
+            if rank == 3:
+                # matrix S1 / T3 = [[],[(0,S1,1.0)],[(0,S1,2.0)],[]]
+                # IntensiveMaximum => [[],[(0,S1,1.0/1.0)],[(0,S1,2.0/2.0)],[]]
+                self.assertTrue(field.getArray().isEqual(DataArrayDouble([-1000.0, 1.0, 1.0, -1000.0]),1e-8))
+                self.assertTrue( dec.retrieveNonFetchedIds().isEqual(DataArrayInt([0,3])) )
+            # target -> source
+            dec.sendData()
+
+        # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
+        dec.release()
+        target_group.release()
+        source_group.release()
+        MPI.COMM_WORLD.Barrier()
+
+def Source_Proc_0():
+    coo = DataArrayDouble([(0,2),(2,2),(4,2),(0,4),(2,4),(4,4),(0,6),(2,6)])
+    m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
+    m.insertNextCell(NORM_QUAD4,[0,1,4,3])
+    m.insertNextCell(NORM_QUAD4,[1,2,5,4])
+    m.insertNextCell(NORM_QUAD4,[3,4,7,6])
+    return m
+
+def Source_Proc_1():
+    coo = DataArrayDouble([(6,2),(8,2),(10,2),(6,4),(8,4),(10,4)])
+    m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
+    m.insertNextCell(NORM_QUAD4,[0,1,4,3])
+    m.insertNextCell(NORM_QUAD4,[1,2,5,4])
+    return m
+
+def Target_Proc_2():
+    coo = DataArrayDouble([(1,0),(3.5,0),(1,3),(3.5,3)])
+    m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
+    m.insertNextCell(NORM_QUAD4,[0,1,3,2])
+    return m
+
+def Target_Proc_3():
+    coo = DataArrayDouble([(6,0),(7,0),(8,0),(9,0),(10,0),
+                           (6,1),(7,1),(9,1),(10,1),
+                           (7,3),(8,3),
+                           (6,4),(7,4)])
+    m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells()
+    m.insertNextCell(NORM_QUAD4,[0,1,6,5])
+    m.insertNextCell(NORM_QUAD4,[1,2,10,9])
+    m.insertNextCell(NORM_QUAD4,[5,6,12,11])
+    m.insertNextCell(NORM_QUAD4,[3,4,8,7])
+    return m
+
+if __name__ == "__main__":
+    unittest.main()
+    MPI.Finalize()