Salome HOME
Copyright update 2021
[tools/medcoupling.git] / src / ParaMEDMEM_Swig / test_InterpKernelDEC.py
1 #!/usr/bin/env python
2 #  -*- coding: iso-8859-1 -*-
3 # Copyright (C) 2007-2021  CEA/DEN, EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21
22 from medcoupling import *
23 from ParaMEDMEMTestTools import WriteInTmpDir
24 import sys, os
25 import unittest
26 import math
27 from mpi4py import MPI
28
29
30 class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
31     """ See test_StructuredCoincidentDEC_py_1() for a quick start.
32     """
33     def generateFullSource(self):
34         """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """
35         msh  = self.generateFullTarget()
36         msh.simplexize(0)
37         msh.setName("src_mesh")
38         fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
39         fld.setMesh(msh); fld.setName("source_F");
40         da = DataArrayDouble(msh.getNumberOfCells())
41         da.iota()
42         da *= 2
43         fld.setArray(da)
44         return msh, fld
45
46     def generateFullTarget(self):
47         """ The complete target mesh: 4 squares """
48         m1 = MEDCouplingCMesh("tgt_msh")
49         da = DataArrayDouble([0,1,2])
50         m1.setCoords(da, da)
51         msh = m1.buildUnstructured()
52         return msh
53
54     #
55     # Below, the two functions emulating the set up of a piece of the source and target mesh
56     # on each proc. Obviously in real world problems, this comes from your code and is certainly
57     # not computed by cuting again from scratch the full-size mesh!!
58     #
59     def getPartialSource(self, rank):
60         """ Will return an empty mesh piece for rank=2 and 3 """
61         msh, f = self.generateFullSource()
62         if rank == 0:
63             sub_m, sub_f = msh[0:4], f[0:4]
64         elif rank == 1:
65             sub_m, sub_f = msh[4:8], f[4:8]
66         sub_m.zipCoords()
67         return sub_m, sub_f
68
69     def getPartialTarget(self, rank):
70         """ One square for each rank """
71         msh = self.generateFullTarget()
72         if rank == 2:
73             sub_m = msh[[0,2]]
74         elif rank == 3:
75             sub_m = msh[[1,3]]
76         sub_m.zipCoords()
77         # Receiving side must prepare an empty field that will be filled by DEC:
78         fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
79         da = DataArrayDouble(sub_m.getNumberOfCells())
80         fld.setArray(da)
81         fld.setName("tgt_F")
82         fld.setMesh(sub_m)
83         return sub_m, fld
84
85     @WriteInTmpDir
86     def testInterpKernelDEC_2D_py_1(self):
87         """ This test illustrates a basic use of the InterpKernelDEC.
88         Look at the C++ documentation of the class for more informations.
89         """
90         size = MPI.COMM_WORLD.size
91         rank = MPI.COMM_WORLD.rank
92         if size != 4:
93             print("Should be run on 4 procs!")
94             return
95
96         # Define two processor groups
97         nproc_source = 2
98         procs_source = list(range(nproc_source))
99         procs_target = list(range(size - nproc_source, size))
100
101         interface = CommInterface()
102         source_group = MPIProcessorGroup(interface, procs_source)
103         target_group = MPIProcessorGroup(interface, procs_target)
104         idec = InterpKernelDEC(source_group, target_group)
105
106         # Write out full size meshes/fields for inspection
107         if rank == 0:
108             _, fld = self.generateFullSource()
109             mshT = self.generateFullTarget()
110             WriteField("./source_field_FULL.med", fld, True)
111             WriteUMesh("./target_mesh_FULL.med", mshT, True)
112
113         MPI.COMM_WORLD.Barrier()  # really necessary??
114
115         #
116         # OK, let's go DEC !!
117         #
118         if source_group.containsMyRank():
119             _, fieldS = self.getPartialSource(rank)
120             fieldS.setNature(IntensiveMaximum)   # The only policy supported for now ...
121             WriteField("./source_field_part_%d.med" % rank, fieldS, True)
122             idec.attachLocalField(fieldS)
123             idec.synchronize()
124             idec.sendData()
125
126         if target_group.containsMyRank():
127             mshT, fieldT = self.getPartialTarget(rank)
128             fieldT.setNature(IntensiveMaximum)
129             WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
130             idec.attachLocalField(fieldT)
131             idec.synchronize()
132             idec.recvData()
133             # Now the actual checks:
134             if rank == 2:
135                 self.assertEqual(fieldT.getArray().getValues(), [1.0, 9.0])
136             elif rank == 3:
137                 self.assertEqual(fieldT.getArray().getValues(), [5.0, 13.0])
138
139         # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
140         idec.release()
141         source_group.release()
142         target_group.release()
143         MPI.COMM_WORLD.Barrier()
144
145     @WriteInTmpDir
146     def test_InterpKernelDEC_2D_py_2(self):
147         """ More involved test using Para* objects.
148         """
149         size = MPI.COMM_WORLD.size
150         rank = MPI.COMM_WORLD.rank
151         if size != 5:
152             print("Should be run on 5 procs!")
153             return
154
155         print(rank)
156         nproc_source = 3
157         procs_source = list(range(nproc_source))
158         procs_target = list(range(size - nproc_source + 1, size))
159
160         interface = CommInterface()
161         target_group = MPIProcessorGroup(interface, procs_target)
162         source_group = MPIProcessorGroup(interface, procs_source)
163         dec = InterpKernelDEC(source_group, target_group)
164
165         data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med")
166         if not os.path.isdir(data_dir):
167             data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1]
168
169         filename_xml1 = os.path.join(data_dir, "square1_split")
170         filename_xml2 = os.path.join(data_dir, "square2_split")
171
172         MPI.COMM_WORLD.Barrier()
173         if source_group.containsMyRank():
174             filename = filename_xml1 + str(rank+1) + ".med"
175             meshname = "Mesh_2_" + str(rank+1)
176             mesh=ReadUMeshFromFile(filename,meshname,0)
177             paramesh=ParaMESH(mesh,source_group,"source mesh")
178             comptopo = ComponentTopology()
179             parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
180             parafield.getField().setNature(IntensiveMaximum)
181             nb_local=mesh.getNumberOfCells()
182             value = [1.0]*nb_local
183             parafield.getField().setValues(value)
184             icocofield = ICoCoMEDField(parafield.getField())
185             dec.attachLocalField(icocofield)
186             pass
187         else:
188             filename = filename_xml2 + str(rank - nproc_source + 1) + ".med"
189             meshname = "Mesh_3_" + str(rank - nproc_source + 1)
190             mesh=ReadUMeshFromFile(filename,meshname,0)
191             paramesh=ParaMESH(mesh,target_group,"target mesh")
192             comptopo = ComponentTopology()
193             parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
194             parafield.getField().setNature(IntensiveMaximum)
195             nb_local=mesh.getNumberOfCells()
196             value = [0.0]*nb_local
197             parafield.getField().setValues(value)
198             icocofield = ICoCoMEDField(parafield.getField())
199             dec.attachLocalField(icocofield)
200             pass
201
202         if source_group.containsMyRank():
203             field_before_int = parafield.getVolumeIntegral(0,True)
204             dec.synchronize()
205             dec.setForcedRenormalization(False)
206             dec.sendData()
207             dec.recvData()
208             field_after_int=parafield.getVolumeIntegral(0,True);
209             self.assertTrue(math.fabs(field_after_int-field_before_int)<1e-8)
210             pass
211         else:
212             dec.synchronize()
213             dec.setForcedRenormalization(False)
214             dec.recvData()
215             dec.sendData()
216             pass
217         ## end
218
219         # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
220         parafield.release()
221         paramesh.release()
222         dec.release()
223         target_group.release()
224         source_group.release()
225         MPI.COMM_WORLD.Barrier()
226
227 if __name__ == "__main__":
228     unittest.main()
229     MPI.Finalize()
230