Salome HOME
[Bug fix] : creation of redundant nodes in BuildSlice3D
[tools/medcoupling.git] / src / ParaMEDMEM_Swig / test_StructuredCoincidentDEC.py
1 #!/usr/bin/env python
2 #  -*- coding: iso-8859-1 -*-
3 # Copyright (C) 2007-2021  CEA/DEN, EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21
22 from medcoupling import *
23 from ParaMEDMEMTestTools import WriteInTmpDir
24 import os
25 import unittest
26 import math
27 from mpi4py import MPI
28
29
30 class ParaMEDMEM_SC_DEC_Tests(unittest.TestCase):
31     """ See test_StructuredCoincidentDEC_py_1() for a quick start.
32     """
33
34     def generateFullMeshField(self):
35         """ The complete mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) 
36         Note that in this case, this is the **only** mesh for the whole problem.
37         """
38         m1 = MEDCouplingCMesh("tgt_msh")
39         da = DataArrayDouble([0,1,2])
40         m1.setCoords(da, da)
41         msh = m1.buildUnstructured()
42
43         msh.simplexize(0)
44         msh.setName("src_mesh")
45         fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
46         fld.setMesh(msh); fld.setName("source_F");
47         da = DataArrayDouble(msh.getNumberOfCells())
48         da.iota()
49         da *= 2
50         fld.setArray(da)
51         return msh, fld
52
53     #
54     # Below, the function emulating the set up of a piece of the mesh being owned by
55     # a given processor.
56     #
57     def getPartialSource(self, rank):
58         msh, f = self.generateFullMeshField()
59         if rank == 0:
60             sub_ids = [0,1,4,5]
61         elif rank == 1:
62             sub_ids = [2,3,6,7]
63         sub_m, sub_f = msh[sub_ids], f[sub_ids]
64         sub_m.zipCoords()
65         return sub_m, sub_f
66
67     def getPartialTarget(self, rank):
68         msh, f = self.generateFullMeshField()
69         if rank == 2:
70             sub_ids = [0,1,2,3]
71         elif rank == 3:
72             sub_ids = [4,5,6,7]
73         sub_m, sub_f = msh[sub_ids], f[sub_ids]
74         sub_m.zipCoords()
75         return sub_m, sub_f
76
77     @WriteInTmpDir
78     def test_StructuredCoincidentDEC_py_1(self):
79         """ This test illustrates a basic use of the StructuredCoincidentDEC which allows to
80         resdistribute a field/mesh which is already scattered on several processors into a different configuration.
81         Look at the C++ documentation of the class for more informations. 
82         Note that in the case of the StructuredCoincidentDEC no interpolation whatsoever is performed. This is only
83         really a redistribution of the data among the processors.
84         """
85         size = MPI.COMM_WORLD.size
86         rank = MPI.COMM_WORLD.rank
87         if size != 4:
88             print("Should be run on 4 procs!")
89             return
90
91         # Define two processor groups
92         nproc_source = 2
93         procs_source = list(range(nproc_source))
94         procs_target = list(range(size - nproc_source, size))
95
96         interface = CommInterface()
97         source_group = MPIProcessorGroup(interface, procs_source)
98         target_group = MPIProcessorGroup(interface, procs_target)
99
100         scdec = StructuredCoincidentDEC(source_group, target_group)
101
102         # Write out full size meshes/fields for inspection
103         if rank == 0:
104             _, fld = self.generateFullMeshField()
105             WriteField("./source_field_FULL.med", fld, True)
106
107         #
108         # OK, let's go DEC !!
109         #
110         if source_group.containsMyRank():
111             _, fieldS = self.getPartialSource(rank)
112             fieldS.setNature(IntensiveMaximum)   # The only policy supported for now ...
113             WriteField("./source_field_part_%d.med" % rank, fieldS, True)
114             scdec.attachLocalField(fieldS)
115             scdec.synchronize()
116             scdec.sendData()
117
118         if target_group.containsMyRank():
119             mshT, fieldT = self.getPartialTarget(rank)
120             fieldT.setNature(IntensiveMaximum)
121             WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
122             scdec.attachLocalField(fieldT)
123             scdec.synchronize()
124             scdec.recvData()
125             # Now the actual checks:
126             if rank == 2:
127                 self.assertEqual(fieldT.getArray().getValues(), [0.0, 2.0, 8.0, 10.0])
128             elif rank == 3:
129                 self.assertEqual(fieldT.getArray().getValues(), [4.0, 6.0, 12.0, 14.0])
130
131         # Release DEC (this involves MPI exchanges -- so better be done before MPI.Finalize()
132         scdec.release()
133         source_group.release()
134         target_group.release()
135
136         MPI.COMM_WORLD.Barrier()
137
138     @WriteInTmpDir
139     def test_StructuredCoincidentDEC_py_2(self):
140         """ More involved tests using Para* objects ...
141         """
142         size = MPI.COMM_WORLD.size
143         rank = MPI.COMM_WORLD.rank
144
145         if size < 4:
146             print("Should be run on >= 4 procs!")
147             return
148
149         interface = CommInterface()
150
151         source_group = MPIProcessorGroup(interface, 0, 2)
152         target_group = MPIProcessorGroup(interface, 3, size-1)
153         self_group = MPIProcessorGroup(interface, rank, rank)
154
155         data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med")
156         if not os.path.isdir(data_dir):
157             data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1]
158         tmp_dir  = os.environ.get('TMP', "")
159         if tmp_dir == '':
160             tmp_dir = "/tmp"
161
162         filename_xml1 = os.path.join(data_dir, "square1_split")
163         filename_2 = os.path.join(data_dir, "square1.med")
164         filename_seq_wr  = "."
165         filename_seq_med = os.path.join(".", "myWrField_seq_pointe221.med")
166
167         dec = StructuredCoincidentDEC(source_group, target_group)
168
169         MPI.COMM_WORLD.Barrier()
170         if source_group.containsMyRank():
171             filename = filename_xml1 + str(rank+1) + ".med"
172             meshname = "Mesh_2_" + str(rank+1)
173             mesh = ReadUMeshFromFile(filename,meshname,0)
174             paramesh=ParaMESH(mesh,source_group,"source mesh")
175             comptopo=ComponentTopology(6)
176             parafield=ParaFIELD(ON_CELLS,NO_TIME,paramesh,comptopo)
177             parafield.getField().setNature(IntensiveMaximum)
178             nb_local=mesh.getNumberOfCells()
179             global_numbering=paramesh.getGlobalNumberingCell2()
180             value = []
181             for ielem in range(nb_local):
182                 for icomp in range(6):
183                     value.append(global_numbering[ielem]*6.0+icomp);
184             parafield.getField().setValues(value)
185             icocofield = ICoCoMEDDoubleField(parafield.getField())
186             dec.setMethod("P0")
187             dec.attachLocalField(parafield)
188             dec.synchronize()
189             dec.sendData()
190
191         if target_group.containsMyRank():
192             meshname2 = "Mesh_2"
193             mesh = ReadUMeshFromFile(filename_2, meshname2,0)
194             paramesh = ParaMESH(mesh, self_group, "target mesh")
195             comptopo = ComponentTopology(6,target_group)
196             parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
197             parafield.getField().setNature(IntensiveMaximum)
198             nb_local=mesh.getNumberOfCells()
199             value = [0.0]*(nb_local*comptopo.nbLocalComponents())
200             parafield.getField().setValues(value)
201             icocofield = ICoCoMEDDoubleField(parafield.getField())
202             dec.setMethod("P0")
203             dec.attachLocalField(parafield)
204             dec.synchronize()
205             dec.recvData()
206             recv_value = parafield.getField().getArray().getValues()
207             for i in range(nb_local):
208                 first=comptopo.firstLocalComponent()
209                 for icomp in range(comptopo.nbLocalComponents()):
210                     self.assertTrue(math.fabs(recv_value[i*comptopo.nbLocalComponents()+icomp]-(float)(i*6+icomp+first))<1e-12)
211
212         # Release DEC (this involves MPI exchanges -- so better be done before MPI.Finalize()
213         parafield.release()
214         paramesh.release()
215         dec.release()
216         target_group.release()
217         source_group.release()
218         self_group.release()
219
220         MPI.COMM_WORLD.Barrier()
221
222 if __name__ == "__main__":
223     unittest.main()
224     MPI.Finalize()