2 # -*- coding: iso-8859-1 -*-
3 # Copyright (C) 2007-2021 CEA/DEN, EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
22 from medcoupling import *
23 from ParaMEDMEMTestTools import WriteInTmpDir
27 from mpi4py import MPI
30 class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase):
31 """ See test_StructuredCoincidentDEC_py_1() for a quick start.
33 def generateFullSource(self):
34 """ The complete source mesh: 4 squares each divided in 2 diagonaly (so 8 cells in total) """
35 msh = self.generateFullTarget()
37 msh.setName("src_mesh")
38 fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
39 fld.setMesh(msh); fld.setName("source_F");
40 da = DataArrayDouble(msh.getNumberOfCells())
46 def generateFullTarget(self):
47 """ The complete target mesh: 4 squares """
48 m1 = MEDCouplingCMesh("tgt_msh")
49 da = DataArrayDouble([0,1,2])
51 msh = m1.buildUnstructured()
55 # Below, the two functions emulating the set up of a piece of the source and target mesh
56 # on each proc. Obviously in real world problems, this comes from your code and is certainly
57 # not computed by cuting again from scratch the full-size mesh!!
59 def getPartialSource(self, rank):
60 """ Will return an empty mesh piece for rank=2 and 3 """
61 msh, f = self.generateFullSource()
63 sub_m, sub_f = msh[0:4], f[0:4]
65 sub_m, sub_f = msh[4:8], f[4:8]
69 def getPartialTarget(self, rank):
70 """ One square for each rank """
71 msh = self.generateFullTarget()
77 # Receiving side must prepare an empty field that will be filled by DEC:
78 fld = MEDCouplingFieldDouble(ON_CELLS, ONE_TIME)
79 da = DataArrayDouble(sub_m.getNumberOfCells())
86 def testInterpKernelDEC_2D_py_1(self):
87 """ This test illustrates a basic use of the InterpKernelDEC.
88 Look at the C++ documentation of the class for more informations.
90 size = MPI.COMM_WORLD.size
91 rank = MPI.COMM_WORLD.rank
93 print("Should be run on 4 procs!")
96 # Define two processor groups
98 procs_source = list(range(nproc_source))
99 procs_target = list(range(size - nproc_source, size))
101 interface = CommInterface()
102 source_group = MPIProcessorGroup(interface, procs_source)
103 target_group = MPIProcessorGroup(interface, procs_target)
104 idec = InterpKernelDEC(source_group, target_group)
106 # Write out full size meshes/fields for inspection
108 _, fld = self.generateFullSource()
109 mshT = self.generateFullTarget()
110 WriteField("./source_field_FULL.med", fld, True)
111 WriteUMesh("./target_mesh_FULL.med", mshT, True)
113 MPI.COMM_WORLD.Barrier() # really necessary??
116 # OK, let's go DEC !!
118 if source_group.containsMyRank():
119 _, fieldS = self.getPartialSource(rank)
120 fieldS.setNature(IntensiveMaximum) # The only policy supported for now ...
121 WriteField("./source_field_part_%d.med" % rank, fieldS, True)
122 idec.attachLocalField(fieldS)
126 if target_group.containsMyRank():
127 mshT, fieldT = self.getPartialTarget(rank)
128 fieldT.setNature(IntensiveMaximum)
129 WriteUMesh("./target_mesh_part_%d.med" % rank, mshT, True)
130 idec.attachLocalField(fieldT)
133 # Now the actual checks:
135 self.assertEqual(fieldT.getArray().getValues(), [1.0, 9.0])
137 self.assertEqual(fieldT.getArray().getValues(), [5.0, 13.0])
139 # Release DEC (this involves MPI exchanges -- notably the release of the communicator -- so better be done before MPI.Finalize()
141 source_group.release()
142 target_group.release()
143 MPI.COMM_WORLD.Barrier()
146 def test_InterpKernelDEC_2D_py_2(self):
147 """ More involved test using Para* objects.
149 size = MPI.COMM_WORLD.size
150 rank = MPI.COMM_WORLD.rank
152 print("Should be run on 5 procs!")
157 procs_source = list(range(nproc_source))
158 procs_target = list(range(size - nproc_source + 1, size))
160 interface = CommInterface()
161 target_group = MPIProcessorGroup(interface, procs_target)
162 source_group = MPIProcessorGroup(interface, procs_source)
163 dec = InterpKernelDEC(source_group, target_group)
165 data_dir = os.path.join(os.environ['MEDCOUPLING_ROOT_DIR'], "share", "resources", "med")
166 if not os.path.isdir(data_dir):
167 data_dir = os.environ.get('MED_RESOURCES_DIR',"::").split(":")[1]
169 filename_xml1 = os.path.join(data_dir, "square1_split")
170 filename_xml2 = os.path.join(data_dir, "square2_split")
172 MPI.COMM_WORLD.Barrier()
173 if source_group.containsMyRank():
174 filename = filename_xml1 + str(rank+1) + ".med"
175 meshname = "Mesh_2_" + str(rank+1)
176 mesh=ReadUMeshFromFile(filename,meshname,0)
177 paramesh=ParaMESH(mesh,source_group,"source mesh")
178 comptopo = ComponentTopology()
179 parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
180 parafield.getField().setNature(IntensiveMaximum)
181 nb_local=mesh.getNumberOfCells()
182 value = [1.0]*nb_local
183 parafield.getField().setValues(value)
184 icocofield = ICoCoMEDDoubleField(parafield.getField())
185 dec.attachLocalField(icocofield)
188 filename = filename_xml2 + str(rank - nproc_source + 1) + ".med"
189 meshname = "Mesh_3_" + str(rank - nproc_source + 1)
190 mesh=ReadUMeshFromFile(filename,meshname,0)
191 paramesh=ParaMESH(mesh,target_group,"target mesh")
192 comptopo = ComponentTopology()
193 parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo)
194 parafield.getField().setNature(IntensiveMaximum)
195 nb_local=mesh.getNumberOfCells()
196 value = [0.0]*nb_local
197 parafield.getField().setValues(value)
198 icocofield = ICoCoMEDDoubleField(parafield.getField())
199 dec.attachLocalField(icocofield)
202 if source_group.containsMyRank():
203 field_before_int = parafield.getVolumeIntegral(0,True)
205 dec.setForcedRenormalization(False)
208 field_after_int=parafield.getVolumeIntegral(0,True);
209 self.assertTrue(math.fabs(field_after_int-field_before_int)<1e-8)
213 dec.setForcedRenormalization(False)
219 # Some clean up that still needs MPI communication, so to be done before MPI_Finalize()
223 target_group.release()
224 source_group.release()
225 MPI.COMM_WORLD.Barrier()
227 if __name__ == "__main__":