2 # -*- coding: iso-8859-1 -*-
3 # Copyright (C) 2007-2023 CEA, EDF
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
22 from medcoupling import *
23 from ParaMEDMEMTestTools import WriteInTmpDir
27 from mpi4py import MPI
30 class ParaMEDMEM_SC_DEC_Tests(unittest.TestCase):
31 """ See test_StructuredCoincidentDEC_py_1() for a quick start.
34 def generateFullMeshField(self):
35 """ The complete mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total)
36 Note that in this case, this is the **only** mesh for the whole problem.
38 m1 = MEDCouplingCMesh("tgt_msh")
39 da = DataArrayDouble([0,1,2])
41 msh = m1.buildUnstructured()
44 msh.setName("src_mesh")
45 fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
46 fld.setMesh(msh); fld.setName("source_F");
47 da = DataArrayDouble(msh.getNumberOfCells())
54 # Below, the function emulating the set up of a piece of the mesh being owned by
57 def getPartialSource(self, rank):
58 msh, f = self.generateFullMeshField()
63 sub_m, sub_f = msh[sub_ids], f[sub_ids]
67 def getPartialTarget(self, rank):
68 msh, f = self.generateFullMeshField()
73 sub_m, sub_f = msh[sub_ids], f[sub_ids]
78 def test_StructuredCoincidentDEC_py_1(self):
79 """ This test illustrates a basic use of the StructuredCoincidentDEC which allows to
80 resdistribute a field/mesh which is already scattered on several processors into a different configuration.
81 Look at the C++ documentation of the class for more informations.
82 Note that in the case of the StructuredCoincidentDEC no interpolation whatsoever is performed. This is only
83 really a redistribution of the data among the processors.
85 size = MPI.COMM_WORLD.size
86 rank = MPI.COMM_WORLD.rank
88 print("Should be run on 4 procs!")
91 # Define two processor groups
93 procs_source = list(range(nproc_source))
94 procs_target = list(range(size - nproc_source, size))
96 interface = CommInterface()
97 source_group = MPIProcessorGroup(interface, procs_source)
98 target_group = MPIProcessorGroup(interface, procs_target)
100 scdec = StructuredCoincidentDEC(source_group, target_group)
102 # Write out full size meshes/fields for inspection
104 _, fld = self.generateFullMeshField()
105 WriteField("./source_field_FULL.med", fld, True)
108 # OK, let's go DEC !!
110 if source_group.containsMyRank():
111 _, fieldS = self.getPartialSource(rank)
112 fieldS.setNature(IntensiveMaximum) # The only policy supported for now ...
113 WriteField("./source_field_part_%d.med" % rank, fieldS, True)
114 scdec.attachLocalField(fieldS)
118 if target_group.containsMyRank():
119 mshT, fieldT = self.getPartialTarget(rank)
120 fieldT.setNature(IntensiveMaximum)
121 WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
122 scdec.attachLocalField(fieldT)
125 # Now the actual checks:
127 self.assertEqual(fieldT.getArray().getValues(), [0.0, 2.0, 8.0, 10.0])
129 self.assertEqual(fieldT.getArray().getValues(), [4.0, 6.0, 12.0, 14.0])
131 # Release DEC (this involves MPI exchanges -- so better be done before MPI.Finalize()
133 source_group.release()
134 target_group.release()
136 MPI.COMM_WORLD.Barrier()
139 def test_StructuredCoincidentDEC_py_2(self):
140 """ More involved tests using Para* objects ...
142 size = MPI.COMM_WORLD.size
143 rank = MPI.COMM_WORLD.rank
146 print("Should be run on >= 4 procs!")
149 interface = CommInterface()
151 source_group = MPIProcessorGroup(interface, 0, 2)
152 target_group = MPIProcessorGroup(interface, 3, size-1)
153 self_group = MPIProcessorGroup(interface, rank, rank)
155 data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med")
156 if not os.path.isdir(data_dir):
157 data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1]
158 tmp_dir = os.environ.get('TMP', "")
162 filename_xml1 = os.path.join(data_dir, "square1_split")
163 filename_2 = os.path.join(data_dir, "square1.med")
164 filename_seq_wr = "."
165 filename_seq_med = os.path.join(".", "myWrField_seq_pointe221.med")
167 dec = StructuredCoincidentDEC(source_group, target_group)
169 MPI.COMM_WORLD.Barrier()
170 if source_group.containsMyRank():
171 filename = filename_xml1 + str(rank+1) + ".med"
172 meshname = "Mesh_2_" + str(rank+1)
173 mesh = ReadUMeshFromFile(filename,meshname,0)
174 paramesh=ParaMESH(mesh,source_group,"source mesh")
175 comptopo=ComponentTopology(6)
176 parafield=ParaFIELD(ON_CELLS,NO_TIME,paramesh,comptopo)
177 parafield.getField().setNature(IntensiveMaximum)
178 nb_local=mesh.getNumberOfCells()
179 global_numbering=paramesh.getGlobalNumberingCell2()
181 for ielem in range(nb_local):
182 for icomp in range(6):
183 value.append(global_numbering[ielem]*6.0+icomp);
184 parafield.getField().setValues(value)
185 icocofield = ICoCoMEDDoubleField(parafield.getField())
187 dec.attachLocalField(parafield)
191 if target_group.containsMyRank():
193 mesh = ReadUMeshFromFile(filename_2, meshname2,0)
194 paramesh = ParaMESH(mesh, self_group, "target mesh")
195 comptopo = ComponentTopology(6,target_group)
196 parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
197 parafield.getField().setNature(IntensiveMaximum)
198 nb_local=mesh.getNumberOfCells()
199 value = [0.0]*(nb_local*comptopo.nbLocalComponents())
200 parafield.getField().setValues(value)
201 icocofield = ICoCoMEDDoubleField(parafield.getField())
203 dec.attachLocalField(parafield)
206 recv_value = parafield.getField().getArray().getValues()
207 for i in range(nb_local):
208 first=comptopo.firstLocalComponent()
209 for icomp in range(comptopo.nbLocalComponents()):
210 self.assertTrue(math.fabs(recv_value[i*comptopo.nbLocalComponents()+icomp]-(float)(i*6+icomp+first))<1e-12)
212 # Release DEC (this involves MPI exchanges -- so better be done before MPI.Finalize()
216 target_group.release()
217 source_group.release()
220 MPI.COMM_WORLD.Barrier()
222 if __name__ == "__main__":