From 52ee209e3bf07fc723d330e9726786654490c55f Mon Sep 17 00:00:00 2001 From: =?utf8?q?C=C3=A9dric=20Aguerre?= Date: Tue, 27 Oct 2015 14:23:34 +0100 Subject: [PATCH] add mpi part --- CMakeLists.txt | 29 +- cmake_files/FindMEDFile.cmake | 5 + cmake_files/FindMetis.cmake | 4 + cmake_files/FindParMetis.cmake | 39 + cmake_files/FindScotch.cmake | 4 + resources/BDC-714.sauv | Bin 0 -> 60608 bytes resources/Box1.med | Bin 0 -> 26788 bytes resources/Box1Moderate.med | Bin 0 -> 158788 bytes resources/Box2.med | Bin 0 -> 26612 bytes resources/Box2Moderate.med | Bin 0 -> 150100 bytes resources/Box3.med | Bin 0 -> 27908 bytes resources/BoxEvenSmaller1.med | Bin 0 -> 29476 bytes resources/BoxHexa1.med | Bin 0 -> 34036 bytes resources/BoxHexa2.med | Bin 0 -> 31124 bytes resources/BoxModSmall1.med | Bin 0 -> 51924 bytes resources/BoxModSmall2.med | Bin 0 -> 47956 bytes resources/BoxTetra2.med | Bin 0 -> 28196 bytes resources/CMakeLists.txt | 88 +- resources/ComplexIncludedTetra.med | Bin 0 -> 40140 bytes resources/ComplexIncludingTetra.med | Bin 0 -> 41636 bytes resources/CornerTetra.med | Bin 0 -> 25772 bytes resources/DegenEdgeXY.med | Bin 0 -> 25772 bytes resources/DegenFaceXYZ.med | Bin 0 -> 25772 bytes resources/DegenTranslatedInPlane.med | Bin 0 -> 25772 bytes resources/DividedGenTetra1.med | Bin 0 -> 57532 bytes resources/DividedGenTetra2.med | Bin 0 -> 27380 bytes resources/DividedUnitTetra.med | Bin 0 -> 26804 bytes resources/DividedUnitTetraSimpler.med | Bin 0 -> 26060 bytes resources/GenTetra1.med | Bin 0 -> 25836 bytes resources/GenTetra2.med | Bin 0 -> 25836 bytes resources/GeneralTetra.med | Bin 0 -> 25772 bytes resources/HalfstripOnly.med | Bin 0 -> 25772 bytes resources/HalfstripOnly2.med | Bin 0 -> 25772 bytes resources/MovedHexaBox1.med | Bin 0 -> 70308 bytes resources/MovedHexaBox2.med | Bin 0 -> 48948 bytes resources/NudgedDividedUnitTetra.med | Bin 0 -> 26804 bytes resources/NudgedDividedUnitTetraSimpler.med | Bin 0 -> 26060 bytes resources/NudgedSimpler.med | Bin 0 -> 25772 bytes resources/NudgedTetra.med | Bin 0 -> 25772 bytes resources/SimpleHalfstripOnly.med | Bin 0 -> 25772 bytes resources/SimpleIncludedTetra.med | Bin 0 -> 25772 bytes resources/SimpleIncludingTetra.med | Bin 0 -> 25772 bytes resources/TinyBox.med | Bin 0 -> 26612 bytes resources/TrickyTetra1.med | Bin 0 -> 25772 bytes resources/UnitTetra.med | Bin 0 -> 25772 bytes resources/UnitTetraDegenT.med | Bin 0 -> 25772 bytes resources/allPillesTest.sauv | 11246 ++++++++++++++++ resources/portico_3subs.sauv | 182 + src/INTERP_KERNELTest/CMakeLists.txt | 2 - src/INTERP_KERNELTest/Interpolation3DTest.cxx | 37 +- src/INTERP_KERNELTest/MeshTestToolkit.txx | 22 +- src/INTERP_KERNELTest/PerfTest.cxx | 48 +- .../TestInterpKernelUtils.cxx | 19 +- src/MEDCoupling/Test/CMakeLists.txt | 5 - src/MEDCoupling_Swig/CMakeLists.txt | 7 - src/MEDLoader/Swig/CMakeLists.txt | 16 +- .../Swig/MEDLoaderCouplingTrainingSession.py | 9 +- src/MEDLoader/Swig/SauvLoaderTest.py | 31 +- src/MEDLoader/Test/CMakeLists.txt | 4 - src/MEDLoader/Test/SauvLoaderTest.cxx | 18 +- src/MEDPartitioner/CMakeLists.txt | 2 +- src/MEDPartitioner/MEDPARTITIONER_metis.c | 20 +- src/MEDPartitioner/Test/CMakeLists.txt | 2 - .../Test/MEDPARTITIONERTest.cxx | 176 +- .../Test/MEDPARTITIONERTest.hxx | 9 +- .../Test/MEDPARTITIONERTestPara.cxx | 123 +- src/MEDPartitioner_Swig/CMakeLists.txt | 4 +- src/MEDPartitioner_Swig/MEDPartitionerTest.py | 4 +- src/ParaMEDLoader/CMakeLists.txt | 49 + src/ParaMEDLoader/ParaMEDFileMesh.cxx | 128 + src/ParaMEDLoader/ParaMEDFileMesh.hxx | 61 + src/ParaMEDLoader/ParaMEDLoader.cxx | 65 + src/ParaMEDLoader/ParaMEDLoader.hxx | 42 + src/ParaMEDMEM/BASICS_JR | 339 + src/ParaMEDMEM/BlockTopology.cxx | 336 + src/ParaMEDMEM/BlockTopology.hxx | 70 + src/ParaMEDMEM/CMakeLists.txt | 71 + src/ParaMEDMEM/CommInterface.cxx | 63 + src/ParaMEDMEM/CommInterface.hxx | 92 + src/ParaMEDMEM/ComponentTopology.cxx | 115 + src/ParaMEDMEM/ComponentTopology.hxx | 56 + src/ParaMEDMEM/DEC.cxx | 47 + src/ParaMEDMEM/DEC.hxx | 43 + src/ParaMEDMEM/DECOptions.hxx | 74 + src/ParaMEDMEM/DisjointDEC.cxx | 386 + src/ParaMEDMEM/DisjointDEC.hxx | 86 + src/ParaMEDMEM/ElementLocator.cxx | 718 + src/ParaMEDMEM/ElementLocator.hxx | 109 + src/ParaMEDMEM/ExplicitCoincidentDEC.cxx | 395 + src/ParaMEDMEM/ExplicitCoincidentDEC.hxx | 62 + src/ParaMEDMEM/ExplicitMapping.hxx | 176 + src/ParaMEDMEM/ExplicitTopology.cxx | 109 + src/ParaMEDMEM/ExplicitTopology.hxx | 92 + src/ParaMEDMEM/ICoCoField.cxx | 48 + src/ParaMEDMEM/ICoCoField.hxx | 43 + src/ParaMEDMEM/ICoCoMEDField.cxx | 62 + src/ParaMEDMEM/ICoCoMEDField.hxx | 46 + src/ParaMEDMEM/InterpKernelDEC.cxx | 280 + src/ParaMEDMEM/InterpKernelDEC.hxx | 57 + src/ParaMEDMEM/InterpolationMatrix.cxx | 973 ++ src/ParaMEDMEM/InterpolationMatrix.hxx | 103 + src/ParaMEDMEM/LinearTimeInterpolator.cxx | 54 + src/ParaMEDMEM/LinearTimeInterpolator.hxx | 47 + src/ParaMEDMEM/MPIAccess.cxx | 1088 ++ src/ParaMEDMEM/MPIAccess.hxx | 471 + src/ParaMEDMEM/MPIAccessDEC.cxx | 1054 ++ src/ParaMEDMEM/MPIAccessDEC.hxx | 179 + src/ParaMEDMEM/MPIProcessorGroup.cxx | 266 + src/ParaMEDMEM/MPIProcessorGroup.hxx | 60 + src/ParaMEDMEM/MxN_Mapping.cxx | 317 + src/ParaMEDMEM/MxN_Mapping.hxx | 66 + src/ParaMEDMEM/NonCoincidentDEC.cxx | 398 + src/ParaMEDMEM/NonCoincidentDEC.hxx | 70 + src/ParaMEDMEM/OverlapDEC.cxx | 262 + src/ParaMEDMEM/OverlapDEC.hxx | 60 + src/ParaMEDMEM/OverlapElementLocator.cxx | 369 + src/ParaMEDMEM/OverlapElementLocator.hxx | 92 + src/ParaMEDMEM/OverlapInterpolationMatrix.cxx | 315 + src/ParaMEDMEM/OverlapInterpolationMatrix.hxx | 126 + src/ParaMEDMEM/OverlapMapping.cxx | 673 + src/ParaMEDMEM/OverlapMapping.hxx | 90 + src/ParaMEDMEM/ParaFIELD.cxx | 223 + src/ParaMEDMEM/ParaFIELD.hxx | 66 + src/ParaMEDMEM/ParaGRID.cxx | 74 + src/ParaMEDMEM/ParaGRID.hxx | 51 + src/ParaMEDMEM/ParaMESH.cxx | 122 + src/ParaMEDMEM/ParaMESH.hxx | 82 + src/ParaMEDMEM/ProcessorGroup.cxx | 32 + src/ParaMEDMEM/ProcessorGroup.hxx | 60 + src/ParaMEDMEM/README_JR | 446 + src/ParaMEDMEM/StructuredCoincidentDEC.cxx | 416 + src/ParaMEDMEM/StructuredCoincidentDEC.hxx | 58 + src/ParaMEDMEM/TODO_JR | 50 + src/ParaMEDMEM/TimeInterpolator.cxx | 34 + src/ParaMEDMEM/TimeInterpolator.hxx | 51 + src/ParaMEDMEM/Topology.cxx | 31 + src/ParaMEDMEM/Topology.hxx | 40 + src/ParaMEDMEMTest/CMakeLists.txt | 122 + src/ParaMEDMEMTest/MPI2Connector.cxx | 153 + src/ParaMEDMEMTest/MPI2Connector.hxx | 48 + src/ParaMEDMEMTest/MPIAccessDECTest.cxx | 52 + src/ParaMEDMEMTest/MPIAccessDECTest.hxx | 102 + src/ParaMEDMEMTest/MPIAccessTest.cxx | 52 + src/ParaMEDMEMTest/MPIAccessTest.hxx | 105 + src/ParaMEDMEMTest/MPIMainTest.hxx | 105 + src/ParaMEDMEMTest/ParaMEDMEMTest.cxx | 132 + src/ParaMEDMEMTest/ParaMEDMEMTest.hxx | 188 + src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx | 125 + src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx | 130 + .../ParaMEDMEMTest_BlockTopology.cxx | 123 + .../ParaMEDMEMTest_FabienAPI.cxx | 199 + .../ParaMEDMEMTest_Gauthier1.cxx | 665 + src/ParaMEDMEMTest/ParaMEDMEMTest_ICoco.cxx | 194 + .../ParaMEDMEMTest_InterpKernelDEC.cxx | 2283 ++++ .../ParaMEDMEMTest_MEDLoader.cxx | 399 + .../ParaMEDMEMTest_MPIProcessorGroup.cxx | 149 + .../ParaMEDMEMTest_NonCoincidentDEC.cxx | 256 + .../ParaMEDMEMTest_OverlapDEC.cxx | 212 + ...ParaMEDMEMTest_StructuredCoincidentDEC.cxx | 160 + src/ParaMEDMEMTest/TestMPIAccess.cxx | 30 + src/ParaMEDMEMTest/TestMPIAccessDEC.cxx | 30 + src/ParaMEDMEMTest/TestParaMEDMEM.cxx | 30 + src/ParaMEDMEMTest/test_AllToAllDEC.cxx | 170 + src/ParaMEDMEMTest/test_AllToAllTimeDEC.cxx | 267 + src/ParaMEDMEMTest/test_AllToAllvDEC.cxx | 212 + src/ParaMEDMEMTest/test_AllToAllvTimeDEC.cxx | 363 + .../test_AllToAllvTimeDoubleDEC.cxx | 337 + src/ParaMEDMEMTest/test_MPI_Access_Cancel.cxx | 325 + .../test_MPI_Access_Cyclic_ISend_IRecv.cxx | 270 + .../test_MPI_Access_Cyclic_Send_Recv.cxx | 187 + src/ParaMEDMEMTest/test_MPI_Access_IProbe.cxx | 172 + .../test_MPI_Access_ISendRecv.cxx | 216 + .../test_MPI_Access_ISend_IRecv.cxx | 221 + ...test_MPI_Access_ISend_IRecv_BottleNeck.cxx | 225 + .../test_MPI_Access_ISend_IRecv_Length.cxx | 234 + .../test_MPI_Access_ISend_IRecv_Length_1.cxx | 305 + src/ParaMEDMEMTest/test_MPI_Access_Probe.cxx | 144 + .../test_MPI_Access_SendRecv.cxx | 180 + .../test_MPI_Access_Send_Recv.cxx | 166 + .../test_MPI_Access_Send_Recv_Length.cxx | 190 + src/ParaMEDMEMTest/test_MPI_Access_Time.cxx | 290 + src/ParaMEDMEMTest/test_MPI_Access_Time_0.cxx | 471 + src/ParaMEDMEMTest/test_perf.cxx | 337 + src/ParaMEDMEM_Swig/CMakeLists.txt | 61 + src/ParaMEDMEM_Swig/ParaMEDMEM.i | 348 + src/ParaMEDMEM_Swig/ParaMEDMEM.typemap | 84 + src/ParaMEDMEM_Swig/test_InterpKernelDEC.py | 121 + src/ParaMEDMEM_Swig/test_NonCoincidentDEC.py | 144 + .../test_StructuredCoincidentDEC.py | 128 + src/RENUMBER/testRenumbering.py | 18 +- src/RENUMBER_Swig/CMakeLists.txt | 4 +- 191 files changed, 37330 insertions(+), 348 deletions(-) create mode 100644 cmake_files/FindParMetis.cmake create mode 100644 resources/BDC-714.sauv create mode 100644 resources/Box1.med create mode 100644 resources/Box1Moderate.med create mode 100644 resources/Box2.med create mode 100644 resources/Box2Moderate.med create mode 100644 resources/Box3.med create mode 100644 resources/BoxEvenSmaller1.med create mode 100644 resources/BoxHexa1.med create mode 100644 resources/BoxHexa2.med create mode 100644 resources/BoxModSmall1.med create mode 100644 resources/BoxModSmall2.med create mode 100644 resources/BoxTetra2.med create mode 100644 resources/ComplexIncludedTetra.med create mode 100644 resources/ComplexIncludingTetra.med create mode 100644 resources/CornerTetra.med create mode 100644 resources/DegenEdgeXY.med create mode 100644 resources/DegenFaceXYZ.med create mode 100644 resources/DegenTranslatedInPlane.med create mode 100644 resources/DividedGenTetra1.med create mode 100644 resources/DividedGenTetra2.med create mode 100644 resources/DividedUnitTetra.med create mode 100644 resources/DividedUnitTetraSimpler.med create mode 100644 resources/GenTetra1.med create mode 100644 resources/GenTetra2.med create mode 100644 resources/GeneralTetra.med create mode 100644 resources/HalfstripOnly.med create mode 100644 resources/HalfstripOnly2.med create mode 100644 resources/MovedHexaBox1.med create mode 100644 resources/MovedHexaBox2.med create mode 100644 resources/NudgedDividedUnitTetra.med create mode 100644 resources/NudgedDividedUnitTetraSimpler.med create mode 100644 resources/NudgedSimpler.med create mode 100644 resources/NudgedTetra.med create mode 100644 resources/SimpleHalfstripOnly.med create mode 100644 resources/SimpleIncludedTetra.med create mode 100644 resources/SimpleIncludingTetra.med create mode 100644 resources/TinyBox.med create mode 100644 resources/TrickyTetra1.med create mode 100644 resources/UnitTetra.med create mode 100644 resources/UnitTetraDegenT.med create mode 100644 resources/allPillesTest.sauv create mode 100644 resources/portico_3subs.sauv create mode 100644 src/ParaMEDLoader/CMakeLists.txt create mode 100644 src/ParaMEDLoader/ParaMEDFileMesh.cxx create mode 100644 src/ParaMEDLoader/ParaMEDFileMesh.hxx create mode 100644 src/ParaMEDLoader/ParaMEDLoader.cxx create mode 100644 src/ParaMEDLoader/ParaMEDLoader.hxx create mode 100644 src/ParaMEDMEM/BASICS_JR create mode 100644 src/ParaMEDMEM/BlockTopology.cxx create mode 100644 src/ParaMEDMEM/BlockTopology.hxx create mode 100644 src/ParaMEDMEM/CMakeLists.txt create mode 100644 src/ParaMEDMEM/CommInterface.cxx create mode 100644 src/ParaMEDMEM/CommInterface.hxx create mode 100644 src/ParaMEDMEM/ComponentTopology.cxx create mode 100644 src/ParaMEDMEM/ComponentTopology.hxx create mode 100644 src/ParaMEDMEM/DEC.cxx create mode 100644 src/ParaMEDMEM/DEC.hxx create mode 100644 src/ParaMEDMEM/DECOptions.hxx create mode 100644 src/ParaMEDMEM/DisjointDEC.cxx create mode 100644 src/ParaMEDMEM/DisjointDEC.hxx create mode 100644 src/ParaMEDMEM/ElementLocator.cxx create mode 100644 src/ParaMEDMEM/ElementLocator.hxx create mode 100644 src/ParaMEDMEM/ExplicitCoincidentDEC.cxx create mode 100644 src/ParaMEDMEM/ExplicitCoincidentDEC.hxx create mode 100644 src/ParaMEDMEM/ExplicitMapping.hxx create mode 100644 src/ParaMEDMEM/ExplicitTopology.cxx create mode 100644 src/ParaMEDMEM/ExplicitTopology.hxx create mode 100644 src/ParaMEDMEM/ICoCoField.cxx create mode 100644 src/ParaMEDMEM/ICoCoField.hxx create mode 100644 src/ParaMEDMEM/ICoCoMEDField.cxx create mode 100644 src/ParaMEDMEM/ICoCoMEDField.hxx create mode 100644 src/ParaMEDMEM/InterpKernelDEC.cxx create mode 100644 src/ParaMEDMEM/InterpKernelDEC.hxx create mode 100644 src/ParaMEDMEM/InterpolationMatrix.cxx create mode 100644 src/ParaMEDMEM/InterpolationMatrix.hxx create mode 100644 src/ParaMEDMEM/LinearTimeInterpolator.cxx create mode 100644 src/ParaMEDMEM/LinearTimeInterpolator.hxx create mode 100644 src/ParaMEDMEM/MPIAccess.cxx create mode 100644 src/ParaMEDMEM/MPIAccess.hxx create mode 100644 src/ParaMEDMEM/MPIAccessDEC.cxx create mode 100644 src/ParaMEDMEM/MPIAccessDEC.hxx create mode 100644 src/ParaMEDMEM/MPIProcessorGroup.cxx create mode 100644 src/ParaMEDMEM/MPIProcessorGroup.hxx create mode 100644 src/ParaMEDMEM/MxN_Mapping.cxx create mode 100644 src/ParaMEDMEM/MxN_Mapping.hxx create mode 100644 src/ParaMEDMEM/NonCoincidentDEC.cxx create mode 100644 src/ParaMEDMEM/NonCoincidentDEC.hxx create mode 100644 src/ParaMEDMEM/OverlapDEC.cxx create mode 100644 src/ParaMEDMEM/OverlapDEC.hxx create mode 100644 src/ParaMEDMEM/OverlapElementLocator.cxx create mode 100644 src/ParaMEDMEM/OverlapElementLocator.hxx create mode 100644 src/ParaMEDMEM/OverlapInterpolationMatrix.cxx create mode 100644 src/ParaMEDMEM/OverlapInterpolationMatrix.hxx create mode 100644 src/ParaMEDMEM/OverlapMapping.cxx create mode 100644 src/ParaMEDMEM/OverlapMapping.hxx create mode 100644 src/ParaMEDMEM/ParaFIELD.cxx create mode 100644 src/ParaMEDMEM/ParaFIELD.hxx create mode 100644 src/ParaMEDMEM/ParaGRID.cxx create mode 100644 src/ParaMEDMEM/ParaGRID.hxx create mode 100644 src/ParaMEDMEM/ParaMESH.cxx create mode 100644 src/ParaMEDMEM/ParaMESH.hxx create mode 100644 src/ParaMEDMEM/ProcessorGroup.cxx create mode 100644 src/ParaMEDMEM/ProcessorGroup.hxx create mode 100644 src/ParaMEDMEM/README_JR create mode 100644 src/ParaMEDMEM/StructuredCoincidentDEC.cxx create mode 100644 src/ParaMEDMEM/StructuredCoincidentDEC.hxx create mode 100644 src/ParaMEDMEM/TODO_JR create mode 100644 src/ParaMEDMEM/TimeInterpolator.cxx create mode 100644 src/ParaMEDMEM/TimeInterpolator.hxx create mode 100644 src/ParaMEDMEM/Topology.cxx create mode 100644 src/ParaMEDMEM/Topology.hxx create mode 100644 src/ParaMEDMEMTest/CMakeLists.txt create mode 100644 src/ParaMEDMEMTest/MPI2Connector.cxx create mode 100644 src/ParaMEDMEMTest/MPI2Connector.hxx create mode 100644 src/ParaMEDMEMTest/MPIAccessDECTest.cxx create mode 100644 src/ParaMEDMEMTest/MPIAccessDECTest.hxx create mode 100644 src/ParaMEDMEMTest/MPIAccessTest.cxx create mode 100644 src/ParaMEDMEMTest/MPIAccessTest.hxx create mode 100644 src/ParaMEDMEMTest/MPIMainTest.hxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest.hxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest_BlockTopology.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest_FabienAPI.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest_ICoco.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest_InterpKernelDEC.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest_MEDLoader.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest_MPIProcessorGroup.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest_NonCoincidentDEC.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx create mode 100644 src/ParaMEDMEMTest/ParaMEDMEMTest_StructuredCoincidentDEC.cxx create mode 100644 src/ParaMEDMEMTest/TestMPIAccess.cxx create mode 100644 src/ParaMEDMEMTest/TestMPIAccessDEC.cxx create mode 100644 src/ParaMEDMEMTest/TestParaMEDMEM.cxx create mode 100644 src/ParaMEDMEMTest/test_AllToAllDEC.cxx create mode 100644 src/ParaMEDMEMTest/test_AllToAllTimeDEC.cxx create mode 100644 src/ParaMEDMEMTest/test_AllToAllvDEC.cxx create mode 100644 src/ParaMEDMEMTest/test_AllToAllvTimeDEC.cxx create mode 100644 src/ParaMEDMEMTest/test_AllToAllvTimeDoubleDEC.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_Cancel.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_Cyclic_ISend_IRecv.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_Cyclic_Send_Recv.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_IProbe.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_ISendRecv.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_BottleNeck.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_Length.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_Length_1.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_Probe.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_SendRecv.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_Send_Recv.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_Send_Recv_Length.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_Time.cxx create mode 100644 src/ParaMEDMEMTest/test_MPI_Access_Time_0.cxx create mode 100644 src/ParaMEDMEMTest/test_perf.cxx create mode 100644 src/ParaMEDMEM_Swig/CMakeLists.txt create mode 100644 src/ParaMEDMEM_Swig/ParaMEDMEM.i create mode 100644 src/ParaMEDMEM_Swig/ParaMEDMEM.typemap create mode 100755 src/ParaMEDMEM_Swig/test_InterpKernelDEC.py create mode 100755 src/ParaMEDMEM_Swig/test_NonCoincidentDEC.py create mode 100755 src/ParaMEDMEM_Swig/test_StructuredCoincidentDEC.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 4bbaae984..ba75c06cb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -86,16 +86,10 @@ IF(NOT SALOME_MED_MICROMED) #FIND_PACKAGE(SalomeLibXml2) #SALOME_LOG_OPTIONAL_PACKAGE(LibXml2 SALOME_MED_ENABLE_PARTITIONER) IF(DEFINED ENV{LIBXML2_ROOT_DIR}) - SET(CMAKE_PREFIX_PATH "$ENV{LIBXML2_ROOT_DIR}") + SET(LIBXML2_ROOT_DIR $ENV{LIBXML2_ROOT_DIR} CACHE PATH "Path to the LibXml2.") + LIST(APPEND CMAKE_PREFIX_PATH "${LIBXML2_ROOT_DIR}") ENDIF() FIND_PACKAGE(LibXml2) - MESSAGE("LIBXML2_FOUND: ${LIBXML2_FOUND}") - MESSAGE("LIBXML2_INCLUDE_DIR: ${LIBXML2_INCLUDE_DIR}") - MESSAGE("LIBXML2_LIBRARIES: ${LIBXML2_LIBRARIES}") - MESSAGE("LIBXML2_DEFINITIONS: ${LIBXML2_DEFINITIONS}") - MESSAGE("LIBXML2_XMLLINT_EXECUTABLE: ${LIBXML2_XMLLINT_EXECUTABLE}") - MESSAGE("LIBXML2_VERSION_STRING: ${LIBXML2_VERSION_STRING}") - IF(SALOME_MED_PARTITIONER_METIS) #FIND_PACKAGE(SalomeMetis) #SALOME_LOG_OPTIONAL_PACKAGE(Metis SALOME_MED_PARTITIONER_METIS) @@ -120,11 +114,17 @@ IF(SALOME_BUILD_TESTS) ENDIF(SALOME_BUILD_TESTS) IF(SALOME_USE_MPI) - FIND_PACKAGE(SalomeMPI REQUIRED) + #FIND_PACKAGE(SalomeMPI REQUIRED) + FIND_PACKAGE(MPI REQUIRED) ADD_DEFINITIONS("-DHAVE_MPI") + SET(MPI_INCLUDE_DIRS ${MPI_C_INCLUDE_PATH} ${MPI_CXX_INCLUDE_PATH}) + SET(MPI_LIBRARIES ${MPI_C_LIBRARIES} ${MPI_CXX_LIBRARIES}) + SET(MPI_DEFINITIONS "${MPI_CXX_COMPILE_FLAGS}") + IF(SALOME_MED_PARTITIONER_PARMETIS) - FIND_PACKAGE(SalomeParMetis) - SALOME_LOG_OPTIONAL_PACKAGE(ParMetis SALOME_MED_PARTITIONER_PARMETIS) + #FIND_PACKAGE(SalomeParMetis) + FIND_PACKAGE(ParMetis) + #SALOME_LOG_OPTIONAL_PACKAGE(ParMetis SALOME_MED_PARTITIONER_PARMETIS) ADD_DEFINITIONS("-DMED_ENABLE_PARMETIS") ENDIF(SALOME_MED_PARTITIONER_PARMETIS) ENDIF(SALOME_USE_MPI) @@ -133,7 +133,8 @@ IF(SALOME_MED_ENABLE_RENUMBER) #FIND_PACKAGE(SalomeBoost) #SALOME_LOG_OPTIONAL_PACKAGE(Boost SALOME_MED_ENABLE_RENUMBER) IF(DEFINED ENV{BOOST_ROOT_DIR}) - SET(CMAKE_PREFIX_PATH "$ENV{BOOST_ROOT_DIR}") + SET(BOOST_ROOT_DIR $ENV{BOOST_ROOT_DIR} CACHE PATH "Path to the Boost.") + LIST(APPEND CMAKE_PREFIX_PATH "${BOOST_ROOT_DIR}") ENDIF() SET(Boost_USE_STATIC_LIBS OFF) SET(Boost_USE_MULTITHREADED ON) @@ -152,7 +153,8 @@ IF(SALOME_BUILD_DOC) #SALOME_LOG_OPTIONAL_PACKAGE(Sphinx SALOME_BUILD_DOC) FIND_PACKAGE(Doxygen) IF(DEFINED ENV{GRAPHVIZ_ROOT_DIR}) - SET(CMAKE_PREFIX_PATH "$ENV{GRAPHVIZ_ROOT_DIR}") + SET(GRAPHVIZ_ROOT_DIR $ENV{GRAPHVIZ_ROOT_DIR} CACHE PATH "Path to the Graphviz.") + LIST(APPEND CMAKE_PREFIX_PATH "${GRAPHVIZ_ROOT_DIR}") ENDIF() FIND_PACKAGE(Graphviz) FIND_PACKAGE(Sphinx) @@ -245,6 +247,7 @@ IF(WIN32) ADD_DEFINITIONS("-D_USE_MATH_DEFINES") ENDIF(WIN32) +#ADD_DEFINITIONS("-DMEDTOOL_ROOT_DIR=${CMAKE_INSTALL_PREFIX}") ADD_SUBDIRECTORY(src) #ADD_SUBDIRECTORY(adm_local) diff --git a/cmake_files/FindMEDFile.cmake b/cmake_files/FindMEDFile.cmake index 82b8eee53..546969240 100644 --- a/cmake_files/FindMEDFile.cmake +++ b/cmake_files/FindMEDFile.cmake @@ -50,5 +50,10 @@ ELSE(MEDFILE_F_LIBRARIES) SET(MEDFILE_LIBRARIES ${MEDFILE_C_LIBRARIES}) ENDIF(MEDFILE_F_LIBRARIES) +IF(NOT MEDFILE_INCLUDE_DIRS + OR (NOT MEDFILE_C_LIBRARIES AND NOT MEDFILE_F_LIBRARIES)) + MESSAGE(FATAL_ERROR "MEDFile not found; please set MEDFILE_ROOT_DIR and check target directory.") +ENDIF() + INCLUDE(FindPackageHandleStandardArgs) FIND_PACKAGE_HANDLE_STANDARD_ARGS(MEDFile REQUIRED_VARS MEDFILE_INCLUDE_DIRS MEDFILE_LIBRARIES) diff --git a/cmake_files/FindMetis.cmake b/cmake_files/FindMetis.cmake index 5173cba6d..c4f627362 100644 --- a/cmake_files/FindMetis.cmake +++ b/cmake_files/FindMetis.cmake @@ -32,6 +32,10 @@ ENDIF(METIS_ROOT_DIR) FIND_LIBRARY(METIS_LIBRARIES metis) FIND_PATH(METIS_INCLUDE_DIRS metis.h) +IF(NOT METIS_LIBRARIES OR NOT METIS_INCLUDE_DIRS) + MESSAGE(FATAL_ERROR "Metis not found; please set METIS_ROOT_DIR and check target directory.") +ENDIF() + INCLUDE(FindPackageHandleStandardArgs) FIND_PACKAGE_HANDLE_STANDARD_ARGS(Metis REQUIRED_VARS METIS_INCLUDE_DIRS METIS_LIBRARIES) FILE(READ ${METIS_INCLUDE_DIRS}/metis.h metis_h_content) diff --git a/cmake_files/FindParMetis.cmake b/cmake_files/FindParMetis.cmake new file mode 100644 index 000000000..7eb0701a3 --- /dev/null +++ b/cmake_files/FindParMetis.cmake @@ -0,0 +1,39 @@ +# Copyright (C) 2007-2015 CEA/DEN, EDF R&D, OPEN CASCADE +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# + +# ------ + +MESSAGE(STATUS "Check for parmetis ...") + +SET(PARMETIS_ROOT_DIR $ENV{PARMETIS_ROOT_DIR} CACHE PATH "Path to the PARMETIS.") +IF(PARMETIS_ROOT_DIR) + LIST(APPEND CMAKE_PREFIX_PATH "${PARMETIS_ROOT_DIR}") +ENDIF(PARMETIS_ROOT_DIR) + +FIND_LIBRARY(PARMETIS_LIBRARIES parmetis) +FIND_LIBRARY(PARMETIS_SEQ_LIBRARIES metis) +SET(PARMETIS_LIBRARIES ${PARMETIS_LIBRARIES} ${PARMETIS_SEQ_LIBRARIES}) +FIND_PATH(PARMETIS_INCLUDE_DIRS parmetis.h) + +IF(NOT PARMETIS_LIBRARIES OR NOT PARMETIS_INCLUDE_DIRS) + MESSAGE(FATAL_ERROR "Parallel Metis not found; please set PARMETIS_ROOT_DIR and check target directory.") +ENDIF() + +INCLUDE(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(ParMetis REQUIRED_VARS PARMETIS_INCLUDE_DIRS PARMETIS_LIBRARIES) diff --git a/cmake_files/FindScotch.cmake b/cmake_files/FindScotch.cmake index b63d30dcb..351c3e92e 100644 --- a/cmake_files/FindScotch.cmake +++ b/cmake_files/FindScotch.cmake @@ -31,5 +31,9 @@ FIND_LIBRARY(SCOTCH_ERR_LIBRARIES scotcherr) SET(SCOTCH_LIBRARIES ${SCOTCH_LIBRARIES} ${SCOTCH_ERR_LIBRARIES}) FIND_PATH(SCOTCH_INCLUDE_DIRS scotch.h PATH_SUFFIXES "/scotch") +IF(NOT SCOTCH_LIBRARIES OR NOT SCOTCH_ERR_LIBRARIES OR NOT SCOTCH_INCLUDE_DIRS) + MESSAGE(FATAL_ERROR "Scotch not found; please set SCOTCH_ROOT_DIR and check target directory.") +ENDIF() + INCLUDE(FindPackageHandleStandardArgs) FIND_PACKAGE_HANDLE_STANDARD_ARGS(Scotch REQUIRED_VARS SCOTCH_INCLUDE_DIRS SCOTCH_LIBRARIES) diff --git a/resources/BDC-714.sauv b/resources/BDC-714.sauv new file mode 100644 index 0000000000000000000000000000000000000000..fe80630766487b9d4a97b178106e0400bece550d GIT binary patch literal 60608 zcmeI*2b2_5yYBJHGr$lOB?w3cNy3nGj*@fEB3W_>h0RIcUA3OH9Y~pprD|nRVp=ZQmt;5 z7F8Ps*+&?jbhd(mf=kA3L=sU5)$4Am3<}mph}yXIaq4imwKcPCSKQj4cGkZ??dM827lTW_p@CIKik=@ zXXE-_J9m!R^^P+lsK6{QkcS*nm#r5agXYLGRh7WsnIChJID zvYyl@8%RU4ku)ZoNK>+zG$&g~OR|-;Cfi6`vYoUi-TCspzIM&czkS%+wE63Rj?#(j zEH9E>R zUc1Cxn|7|f@v`(G`$|8uzYHJ;${=#E3?YZgFmkwzAVDoGG(NcfNeDug#(Pw+~yJHh;4*N9L0AWIp+pEFc%k z+oU_639U^#E`Qg^JGAv@%`Nh?-C{r6Em1q)YnPa7)6R7%mdSE*g{&l3$!cgU zdUAujOKy}+PP!5rYCbTx~xcpor!S1oz!0v~05ESlfb}SLp*4BN^ z+WuR+NI%;}sh#h&OT@Kl=Ng;@NhKK>Ey>9g!W;#sk|)R~B{j+MIi4odNLn(Tq$i(| z3}i;hL_RB-$t;qU%qH2%9FmjFCArDxBoCQa@{#$a0QtNWB;EP)y}s^zbNT zFSrn5q%c`Tiju{oI9WnUlJ0yaur}?uYz)8FQYk;%mG-k;89&>VRXg8nmzZmla}6$s z@=}4UD3!>{QiZH4)yV2ngRCjF$QPtGSx4%U^`t)8KpK*bq%qk>dQMh=$|jJ``K=V+WB6)#9W)4Yw${}lGWrI zSxc^y_2dS5m)t0u$j$N|xka{;@5=||hq8_QNIoXF%MS7r`IOu#yU5+Lhukas$j{_+ z@(bBd9*{4|ujC-<&X@1?O=xX0f5C@vSdNfKh=&*Ge%Cojmi|$CV!PXm86^|>tYjv$ zNLDhNWG8b-PBNF|CZCf$WM0Wf=9dEG^HPv3Br#-RDMGsQ<$HZ?4$Z%P*!5ukLW-i8 z6emkaNwSobCd)`!(w)x))}|eojp5f?D(7dr@_x3f;AgvvYUg|H5_4^Gt|66BS*nm# zr5agXYLGRh7WsnIChJIDvYyl@8%RU4ku)ZoNK>+zG$&g~OR|-;Cfi6`vYoUiJ4i>e zlXNCulrH2;(v@`Q%lG;wv^JT)kZ$NMJ;GMXGC zW65zco}3`Bk`rYTIayvKr^r-tn!HX^y|Rz| zOg<;Skp1KV`I7uf4w8rDFnL6dlJ0!@Uf+b)Ci53^3}4G}@`RiuzmZeqX*ol>^O?}v zhAzQuQPk-Q|A$t!Y|{9b+_ugP`tN4Y`% zBsa-h@-z91+$MjOJLGS2m;7D+An(b2@`3zG{v{8|zvU76k35e3QyII2p}}BXICvq% zoiE?(o6y>1{zAh*J4XZ=DN$q+NlGS@XvTBrGoiJ~afSG~MnaSOx0XUv_}MO{pY2kq zo$s|v#I?i%n0Wy$u=QE+T$#I1;hF@!Qke}@a``K=YpY4XK zo$s|v%(cn6h7QAU89|PeQRFK!nj9lz$#F8CoFK1~6J-)PSzaTj$W(HgyiQJ+8RQ%C zCOK1Pk+WqEIalV9^W`mafh;87mUqZSvY2$|%lG;wv^JT)&?Q(Z%gE)jf?O%9$knoj zbmudnwaIaX`nyKf`ni_Y`Ppv0pY1lNo$s|v%(cn6hQ5o9vWeU*?~z+%EBU^BKz=CO z$dBY>a=YvxKao$#owAGEEqlnlvXA^sJ}1AB{p11plKe^zl859lc|?wq$K-4BxSSx} z`SQKK39U`$FZ3k7kyGSpIYXY6bL4rsK)Ump(Awm|$CV!PX?D?>5m%V7$<4MxaE1`C&{cLAz(E9Vec8Rz) z?Oem2MjA;=rjzvKGm?SKD4ED-B{P{tvXa>(JDEdrlDQ-|`JCh-^GZH4zZ4*!mx5#= zi6IM15wfThBa2H3vZRzEOG_EjoiE?(YuDWT+lQ?+<}a))%1L>$f>b0cNoBH%R3+W{ zOki!=aoHGtt)*&ywyW-EyBdDBtEqOr*Df*FCg&Pf3ol4*vX0ax>q&jGfixr=Nn^5! zG$or!bFziBBwI;qvW>JQ+ev$}gLEW2NoVp!=|a9FUCD0Jo$Mh!$zIZ%d|CRC?tJ-P zUt5>v-#%<@GJj!x(NFr517sjMNCuNbWGLy*XF_X}|xWFh&syhARM#pDuMN-mS-q&r`}*EgZH$^3<_z)D#~u9h|AT3JV~mkp#l zp9!r^jw{UHHS%tde`|Q7pY1mJ*>1Di`ChxkT$`M0*n8L_Tgmt31M)-JMt&q8liOtn z`H6f=?v!2RZrMZbm3`!A@;Uj1>?aS%m*iJ+kUS)Z$s=-8%(As4F!p`8VoFmW61@c?@j=U(BNOwLHTALhKn7?b}vY%_|il6PS`q}P#we!7p ziMcj8*RUUOO|Fwa$_?@-xk=uVpUGe3Hu59CksFL_A*Esw~5 zS$drzlyZ3}^nrE25HACaXwQvYJ#UYe-GfozDc;rX81!;n&)%?YmG9@3NSCB4a)r4QLx`jP!*0O`(`@AXY+Z8Cr112ISjlS5=EIZTF= zBV;7$&Syevlj92aca4nlb1l8%XS>mUwi}~%zSk}>*Cyv0J{IF-JUKyLB`3-xa%pzyY9CEJABj?Lo+CWlEdT?IZ7UrugT+bf;=hTkf-D{c}C8X=j1%;&X@1?O=xX0f8iJK zt$asbluP7gxk6r*?@4z)6Iz=bSGd1x$F2zmNpWd0&j;R$(?OfAe!#M6?7Oe^Ua&z;YN z)+WalZfnHa*|Bjhp66gx-D*|a*R52mPMp`cKJM=qf-=N^-Knh~+Bp5$e(Td}ORKq_ zPHkzmoxkn3wAv=YYD=rR*75dp-JRNAxBXoIcSJh_|14accWK+t2*OYtLLcwe@5B?S8WJvwo~*{+!zSv6^{w zYU{^p=G3XJAFG*Lr?!5qPOsYC|BPzu*Y+D5&uyPkwT)x@GO4z4tbSItjbn9Y)i#dR zSybCNR%cah+a{Z8cYk)(Hm>c@q1tVqQ?(t3y_QS09f#GqRoih`{haD7s`IF}F` zMOC}|i>bEbw*AFbyX{M;w)3$4B~{ybSY1lBorl$>Roi)3T}HK?ht*|O+j&@BPPMzg zylOiy+h0Mo+rFY|J5SqRNwuA))stOp|P;J-2>e{NysIH^hu7mBbtJtuBc)$ab5s_nYj{#L5p_N`Uh zb+r9$RNHm5x~*!vj#jr*ZP(H2_NwhVTHQglT}P`ss&@BxQf=4O_IFn8wtrEzU1!_h zMYUaLt6x%W*V*c>s_i;k-A%P!XREubw(D$l57qAeo~rG-+x}jv-S)jz+dSC*msQ(5 zSlvgp&4bl_RogsR-A}d6gVp_2+dNo3K()Jnpz7AzKS;IPez0nrC;R*%s%@UE9;({r z$?9RMZJw+iuDXNj5vpyTZ2w5r9aN7}ZS!XPUs3J0AFbNv(e{r~ZS!dLSk*RCVt+d8m%mTFrER?k*#>%i(cs%;%u zJy*4@1FPq$cK6R$ZR^7Jzopu3zd*IE6WhN~wXGAY-&Sqw#Oilck5IixwXGA|zgV@c z6RVe~cK0t;ZR^JNFH`NdU#{BLk?mihdW`Cos%;(F{#B}N9a+6vwXGwo*QmC2Wc6Cr z?*4VEZC%;^^{U|+TFiZ zwXHkb|GsLs{RgUT9oqg6RognWdYfunhgN^2+SZ}fAFH-?X!Um0whpb{q1xU5iE3My zw*OPrZu_08ZJpZwU8-%JTD@Dfty8P_sJ3-#^V?{WK((!7YyYKcTgO&^rP|i9)dyAEI=1?d>LscVt9JJvQEltm+8%O|{)8R$o_b_lea%s&@C^P;K{(?f*%&+y16%yN_)DE!B1( zS^cwWyN|5?MYY{WR^L{=L-nt!?LM;ocT~Ike^YJumF>T)+HL>4YP-*D{~xODKC}9s z>fNgEtG4^h_CHW<_nFmysc3RG`yZ;d`_A_Nt=et>NVVOEw*Md1b{|^(Shd}U z)WL-FKXxBlZ7&$R53LSSZTF$op{m{eVXE!EwEf|#-S!cx?LM{rk*e)JwK__*-KSP3 zQEm6B)k#&`eQI?w)pnm+9qqF}xoW#_ZGQ^Y*51y=j-S`%2xx=}MIw*Kbt0n!CGl{d!f^vPzvwRjbyj z7du{I?|9B;Y|m^u<2ikCR8H;LW*ix#*XLo)ul-ML5p|?E#eLuJ6=)mc+O^Q&urRwPG5HEo!Ya@9n@acNfwPi zUQzFOMKzw&m#%Cv@1V|RY|m`EC+Tc@UUkQ_SDo>kzB+d)7JtxU-a(7SKgr_W@tn=r zp4ntPr!Qyvo!Ya@9n@acNfwVkUUBbu#bd{F`f`^~3GblJW^B)Fx+m#udR}$MvsZ0A zr!OvjiTHz-@D5rc{z;bfj^}K~_RMBXgV^J8PSUA8yWBzTRh?wX_~VuIj#ttd&*{r^ zkCgHb>TJgL%%*#i&Zg&8cRYKQ@tnSFeU*woXesZYrQ)AtOd;=pvD>kI#}sn<@{Vge zJr4u>B%Pk~x_3MNqo9z+b^7A4XdLf}5EJ(iz;w_b$4LriR2ttoX23C=MtUAAKiHpX#i(i z131qjUiVxt=LPRh&*=8LJG#%KDJIUdiR~&*UvVG9G0t;IqdVIgzNtJrORUc`EGEuFjP3ZR?L50QfV0iOp6lg2y}Ua;qgz9FbZg_? zj(^%b&78hGU-4s~X`aVfjPnrF0M52f+j)$6U)R7+UtOF}a!1sY&3V7#^Higqcdhw6 z*9yDOFZUfzVdp^?JGyPVkC)hO@9XYqJFn{?3OmPDDE>2*2A>}+cQ z_xa@+-JV~%1DsECN3_?y+wm?s?c9N5uNN=GAKiJD#a`UP&bCH(o@L(GJp)^txa-S4 z$uqj|+pRdyHQL1=*m;&QfOA|t?+ZIGXmsaUrqP{k@9Un6Zm+wit>>E4m;2_D4&8IN z&f|~nJj*nIv#kN_aTWWHR@cjU!MoEly1nj>?(lHUm3-#d&jU&ndlcF6?Y; z0DES|dEIl-t&KaF)0Z`LUx@o~v+Ee=xyE+<({`R^I&HR{zTBs3VS8q|qdVI=D*GgN zFnis*9e1tidc}FT(P8}2ooAUwceXWvJ+s__oo%Nt?@rI?w$mNm=ebrm&U1}z@4!x9 zc3qqYnnriFHGn;{+|iwF@9Um{?R5rr`m)Choy9$OtG#Q@=ebrm&T}nx+ZjODE6&5s zc6z4VKFK{*d)>Poe_-di=JXZ&eH5K|&z&PJ;ylY@Pdj!y_VjGqJ?+?Sy>5M#w@=cD z&Q9-kywTa`Ic>JxfvpSY`$+q~G!3#MKT5#92sA_+^u%C{#Wc*rGT6^y>}Lt~{rD+d z#x3x30{h-OnD1wU(;+u_;0AMp1=m4ybV6?o#dypFKQ{~BfNj`|!#IPh;AbJh{CpsU z@3}+LA{z>zBzQyQ{C5UhlHD)>V>vd0pR0uMvj^wDImOSGLVm^rzO4=g-)o1a zM-CK3dGKa3v;{ijWemdv%m6+)6&z>SXwYBSLaYbp9!7s*^ymB=2>J`7zwl6`L}ugz{W*W@OMl_?7fyf9-&c+& zXJIkuFP#3u=`WoA!q4L-?(-)Nkw^{ti=e*<`irOn`ip3fo*02ipuY(Ei=e*<`ir2y zh!ePo+jz*IM$lg*{YBDWB>hFwUt|q5MrZT^{YBDWB>hFwUnKoS?!bPW##PW?B>hFv zUljdC(O*lB?7~6NUljdC(O(q(C8570X^|a8PyzIp zg#MDyUlRIDLVro7VjfmwE9fr?{UxElB=nbr{*pZAPg9a119F4@lG0yN`b$cGN$D@? z0F1^AECl@}rN5-~mz4gJ(qGc+xXYh~gd!#AFB$zMqrYVImyG_BwMJJA!FbSLGWttK zf63@C8T}Uo`ziS3!NWM^DgSH2p=>Uo`zi(_i#9?86CM z1pP(RUvm0OPJhYiFL^$cKn?Em5U?*O3;2jg$NvP!rn7C|5EKXPoxc?*MV3Yxltnpk zpEaekD!oInC0ky$%#QA-BJ(BYZY5;p)H9})FK~pqCb8sIA zwL~klhCSEXq8+#_mLCHNtle+Fa=XF4LskR-w`s0 z&hG->BzcZHzvr7x&cR&F!+g92o?FiE;oc_S!6Gci5-i0sEXNA0#44=D8t}XeT8H)E zIqf|EH_ zx4_pA=V$b{$zO2?zu_)^#~-+d`*?sq@fRNAZ#=?3ch-5Aq@( z@}mHrM?n-q3<{$NilP{bqXbH#6iTBE%Ay>~qXH_T5-Ot#s-haIqXufC7G6MY)InX; zLwz(rLo`BTG(l4|Lvyr1OSD33v_V_6Lwj^UM|47GyofG%30=_*-O&R*(F?utGWwt| z`k_AtU?2uzFos|#hG95HU?fK26^zChjKw&N#{|5JiI{}Rcnwo971QuKreg-)z?+zf zS(uGEn2ULskGHS@3-LDI!6Gci5-i0sEXNA0#44=D8mz@Stj7kti;dWX&3F%6uods) z1AK^W_y`|kJ9gj`e2Sgeh27YLz1WA(@HxJ~ejLD;_zDMc2#0Y5M{x{a<2X*>B)-8Z zoW>cP#W|eE1$>L|a1obq8CP%>-{S{d!*%?K8~6z~aSK1=7u?3LxP#wt7r)~V+{1l5 zz@PXF5Aio1;U7GX{ZbkdOokv7VF*VAA`yioNQz{LMslP;N~FRQcoM1c6rM&Jq(wTU z$1}))jL3v%kr`Q#71@v-Igk^%kQ>h-5Aq@(@}mHrM?n-q3<{$NilP{bqXbH#6iTBE z%Ay>~qXH_T5-Ot#s-haIqXufC7G6MY)InX;Lwz(rLo`BTG(l4|Lvyr1OSD33v_V_6 zLwj^UM|47GyofG%30=_*-O&R*(F?utGWwt|`k_AtU?2uzFos|#hG95HU?fK26^zCh zjKw&N#{|5JiI{}Rcnwo971QuKreg-)z?+zfS(uGEn2ULskGHS@3-LDI!6Gci5-i0s zEXNA0#44=D8mz@Stj7kti;dWX&3F%6uods)1AK^W_y`|kJ9gj`e2Sgeh27YLz1WA( z@HxJ~ejLD;_zDMc2#0Y5M{x{a<2X*>B)-8ZoW>cP#W|eE1$>L|a1obq8CP%>-{S{d z!*%?K8~6z~aSK1=7u?3LxP#wt7r)~V+{1l5z@PXF5Aio1;U7GXeH$GbOokv7VF*VA zA`yioNQz{LMslP;N~FRQcoM1c6rM&Jq(wTU$1}))jL3v%kr`Q#71@v-Igk^%kQ>h- z5Aq@(@}mHrM?n-q3<{$NilP{bqXbH#6iTBE%Ay>~qXH_T5-Ot#s-haIqXufC7G6MY z)InX;Lwz(rLo`BTG(l4|Lvyr1OSD33v_V_6Lwj^UM|47GyofG%30=_*-O&R*(F?ut zGWwt|`k_AtU?2uzFos|#hG95HU?fK26^zChjKw&N#{|5JiI{}Rcnwo971QuKreg-) zz?+zfS(uGEn2ULskGHS@3-LDI!6Gci5-i0sEXNA0#44=D8mz@Stj7kti;dWX&3F%6 zuods)1AK^W_y`|kJ9gj`e2Sgeh27YLz1WA(@HxJ~ejLD;_zDMc2#0Y5M{x{a<2X*> zB)-8ZoW>cP#W|eE1$>L|a1obq8CP%>-{S{d!*%?K8~6z~aSK1=7u?3LxP#wt7r)~V z+{1l5z@PXF5Aio1;U7HaXFNd&MhHT|3t{1iKqR7&1WAz$(MXOINQqQ<0#70}p2E{e zgS1G8^mqmtkP(^iEHWbtvLYL@BL{LK7jok{kb<{vj)WQp>jXJ1{dZ>>EXoyB=j3#J`W@wHUXo*&6 zjW%eDc4&_d=!j0}j2F=bFQF^Cp*wn@CwiebUPd4EML+b%01U(+48{-)#V`!V2#mxi zyn@jfgRvNg@tA;DF%gq68LwdqreYdi$8^lV8+a2lF$=RX2XiqG^YIoIU?JYdJ6MFp zSc0WkhUHj+l~{$@R3%25Ye1H$J4IklSY{w3Kf={s%yRaL3 zuowIA89v7s*pCDF5?|pU4&gA4;3$saYaGW3oWwUch0{2Lvp9$IxPWi*9WLS$F5?QW z;(PpnYq*XdaRWc$CT`(p{DRx~6?gC(?&5d+fqS@*2lx|z;UWIUBm9HMK|$d`2u27( z5e8lik3b}%kOWDQ4ADrA6iA6wcmhu%HJ-xLNQ1OUhxB*`8ITc~@GLST3$h{`vLgp_ zA{TPwIpjfJx#8^R$ z6~tIUj1|OKL5vl|SV4>x#8^Q#R(d>x49JK~covzF1zC{|*^vV|kqf!;9P%MQ3gCGZ zL?OhWFp9uEe(TFV?EDZ-I}`t-LXnE~R;=06v_h5tN7lJC=-#7jL7x4MecH81)v50B zI~zRv+xWEaaHi_9)NAv3_ILLA{4Q%+3@dkbjAws0r@wWcy2sF7*}5e;>8X2J|5+b< z>OM6`ADme_kEibIjF;C__m8f%yio_c^YeiKhUaA4!J%J5qPC|V@0@=#Z+)&|`XiAY zJ@o|Vdi3X-rBid^E~yW!R^nyyMC#so*X;B?kcP> zW%16z2i`B`txx7JyXe-g3VDlPxtXy_kqT3z7K9ASS>0Qgi#$-f?ICa7Z_xCta~pf> zl~)_zKXTSvJLlKlQ%_5=^WusM*SvL={41F=Z#`r|&jy?Cdg~2QF}ZFGbC1tAD6-Z~v|L+m!!V_fz2h34Fc!U-k7WaR0=Ae;9bb7kIz-A9=qQxPJoo&+$8f z@0Z-~um7XpF9q(O!2J`rfBy47ZwTB!f%_+L{{-$I{oo<+^S8jy-vU2>^Zpbr@bl}y z?~C~T*#F$$7X{wG1>U~}-oFLjzXjgE#XbiDf3NW0`}YcguRnpWKY_15fv-P-uRnpW zKlXQ$3{E{QflX`_sUG zFYbTWe=jcZ{w?tSE%5#=@cu3E{w?tSE%5#=@cu3E{w?tS?MUQ*`oHJv{moD8R^azP zf#3fGe*Y8r{ZHWcKY`!>#Qyi@6ZgL-&sQD$|CFd%r&8r=b=p^I+N6HnN=<4tY+9{- KOqT!5$NvE@rvbzO literal 0 HcmV?d00001 diff --git a/resources/Box1.med b/resources/Box1.med new file mode 100644 index 0000000000000000000000000000000000000000..d1687755a1a5ddc13297783b9c6ec29f3303a752 GIT binary patch literal 26788 zcmeHPU2IfE6rL>w3M{f!-4srF@T=6DD7x)=W!B*P%Ur#_vqE`CZD-bRB9>r4-a$6QOAscX#@}qFxHN5~Hh$ z+1%k8iq17YtP_KD{qod9n(v|OiS4zTA0wb{U0AI7z9mYHp5puw;`_ceGQ^)wu>%pWHWBR?`fLixGhnMd2c>W^_7Xgu`) zX=I4cM(E}gt`Wa8%Dh)ieO$`?X)0@4#r#>yZ)jwGjQF}Z^NR4O-OhZ{V560m=n^3S zF#bHFe=$7|NZVuE6e}6yKma!DG0%jQqb~B<=E~WeuqZ#9&nd8ZjBJiDus}!Z3P
mQ?F}O zn)Uz)v>a966G?xOH=TlwSNFlJ?p~1Hr0-FM)i!Wpy!L0F(`n%Ktj^a-b@+{!296zl zIpKBZPI{@6uXtXww<)nBv1Lc|Hm^Az-|B7JyfwZ#u3r88(jpZ-9#M7lWXHaleMF}b z=idG8@}*^5UdLM9)gj;za0oaA?iPXkwK4XN!^@}E6t*9F8x<*FIpx;I1j2n^8&B;Y z`Uo#!3IitQlv2Dlo~7*-H~Sh{*VuJn+M1`hrpOFz=5*S`*c0Ve7s`y*b9&gsoT{v= zJR9gA#`bx3caC=gcpyK_kLI2YrT$;vY4w!*#;azY<5ljsNzJpt`^GjiugSO{G4?-g zjf08WGmmRYwYRpl`G=nOJ3G*Oc_w6EW@BdWGcxAgzM#1^nvGOd=_T?kI(-JS z*n39riY5`AL@R88vEnJr|%H`>Q2J8>%(@bg^a98gf^s zt`Ov_UZ_hIonyo`CubU+KCk|R>X~ZPYK%}iM>k{qicS^oqdzwLUsE(G!dBwi+DX|3 z8Mw>Xnu4rpk%%Zhy9*vS{5{KLJ237tUc`>}c==@m!-R62O%-dr(APpG!N1ga#LPIY z6g)}-vQt`81U?x;n0F;89@Rl5gJQd$E%)J(5B?e&`t|Z*9b(DCxZb2C{;%b>e0}n@ zKQ5db8d`4J*ye6nBqBe_ryPm&7{^XXSV$Vy>cBzQS4dTq3;d z<`Lm@g#|?Tv_a-PV(%)*w=J0tU)OI>wVJmntJ~e7-8M@6FyznC1yR+bV>#E&bZ|ag z$@oCZyu#I>R~X(bOnPrwX|0qlw*LK@R$XN4KWf$gx3uVYu|vQi;1F;KI0R-Q0=9qV zCVz%E4n>#WtobniOw(2t$1cqIX9$Ec@z2=j?`$PC{<{y4GO#|2J}o7MahT2fx3jFX zd3pWTr_X$YvCYI{(fn^Y{@%j5vNpHtzz21Sm-XZ8_qqdI=jL-9!|v<$FfV$}@YX@~ zo0v;0>sl5#NUg=-HPeObUD2E>c@)*+~=tW)h9d$(?>OdZPWZodw1&4q`z#-rea0oaA z?jr)lTJOp_)L!qN*}K{|gedUoY02u+xkZ`x)rxBSMApV$2ErZ!x%8*4(kUQT`tI{JwDA*z@%4 z375g#aF&lT!v!m>v{>Ut>}ijiZsVIqc_oaSQsXgPVR%&7aCXfl1jP3vJ{|aT;Jbkz z2LwM4d_N%Z9f^$ifZ+duEapa$hYsilLATV0PYAN`B|#4edXbhGG9c80Zs> zF`NWEM)U{~=BSlKr3w@0a{S}UZa<;>E%&=qKcLieu|vQi;1F;KI0PI5<%fXnj~VWp zx{YD}m|F+A4)&qWA44FNi9g2v#zHJxtbL?$!_hji@OM-_O!=MZ69NU@=6#trN*PT!th}0xoagM z03QYX7MRz-i$4W^4){Od*N_-~4ai6y@*yX6q#U~N%@SlK#y!A`zXWx`BM&6y&<&mN zwSXW8{|X3c(Fffkhx?))WKb?Kc=6GQezX;`Qb%+`4mu%+7;?BbK6`wEXgyIQ5q^V| zAbOl=Em53kBheZncuv+4Z6LxYkxvpe5j7JPJ2-z#u@QCp90Cpjhk!%CAyB>u`~wYt B&rbjV literal 0 HcmV?d00001 diff --git a/resources/Box1Moderate.med b/resources/Box1Moderate.med new file mode 100644 index 0000000000000000000000000000000000000000..28d53d56728fa4a7d0a2a2c8f624322c64b4e623 GIT binary patch literal 158788 zcmeFY2T&DFw=PT+6an2!fJ>ilU(0!EnO+ednwCtNvT}+;iXAHM_fKx@WrATD|gQTAN$WpSNfp!|cJ% z!ob47!@&Hr@#}Bq_hBBz^z*`>_H2XacSpuwT^ZtjSA2fAtu0KPe^$xs;$$W6_w_%8 zjiaR#)!)j((Sl-PZ$V|SaHRS>n3yokKIYAzPX7Z-SBixN1G&mSu>4cbE>(l|*Mix} z0$IQ6DZh%J|LqT)W^es}Ix@)KLgeZE(23ogERSo6pPPm8?CD9K9w(gGFLW$E%R|;@ z&v3FIS^UJ=>^PlAPVD4_8QOo3n?<_>dCvAzDRyH2Gfw6HuW^&)Jvs8cH9J@Gxn%X< zj@h;TX%O?{I!=(2a6ae6ev3S3pELVDxqD9Rvvad*<^Q*Ns*-IU^7-MFPV8WHW}kik zY@29vV(+tcWM4qG6ULp`b+jGXJ;?g(bDVGg<8yS$2UklwvL}@NdX7BV-b21SgKW1q z%=Tyc2^Vp4{OqxSgvjh%{z}_FZ<2q1Xnc0=vq1CskDG^V&yJnF{tM>}f8+eWCC=yk zC!F*A+yx8_Q5+=V$b+0@t-^87oKN(Y@PF~pU>}d_BPA!Jv~bc zvq$)E8o^&^<=5T+Isbq5hB&#TSy(A@{`0l{D}HGHOEUf3Lx1zb|33WiuXO$gv;Pt2 zv-CbD`{&ND($a+bANb)vmw|NtUwvnJ>fbp3KY??$f65Q-|Losis^GsN_7^{7{E1h- z-~06suS@vNADw@f|2F9V!O9<9;;;OllR@PtG#E+ko0Ej1gNg0WHV+9imY+58-}iqO zq~!T^{KGgK$DcR;t8stC&0oh+EbRZ~xZk4e_gfhlOrsN29ioFnVglkqW-tEHasI6D zuQ>nf`^;?|{^k4reV$`K`_00Io&1@THp1+|OP(#{`G>}4ul-Lt{t7QwPv@V@`>*i& zXYHH}2|ufU;6-zE``Mm_mkW9Rfmdj3KveX~5DjtH@OW{P_%LyCU2zp%Jq-;#b#-wy z6%}o99c?vL9SsI*B^Lt&CrN@!Zb?uoQv&1JKQ)fkRr|)X6IW8%SF^oo%;q1fBd+{> z%in*0L*Q=+{0)J>A@E;8;6Lidv-VKA@IUYQuk4|u*@5ISt4{r`8~>lsjsLlSD0f!K zl1J3<@>h$2;g@dA{1aa)zwMeoa`>-fW>K4Q;@8MOeTI(xYG3NBa z`S0>q>;JC*KYL?#eY3P(D)67@KMRl9>;KA+_V?cSKkYkZ`KNZ{wcmX1mlpf)c>Rxd zqy2Bb_J{7Pf8#&nFLdzlqyOKlS(-T5*xOt9ds$GNW>^1bPyDgF{tC0d+7XVle{FaD zzX#F3SNt~w{)WKc5cnGce?#EEF9iPR#^U4@XJ!7iga3T4|H^JWzVz3aS^fHNyYat| z-S}VGZj21`e)(&EE5xj%^)p*#WMcYxg_(?~&Q=zP8~?P6NUSnwa4=}h{C1u*{*GZY z{vOZ!JC@D|5HU|I?-a z5e-I$U$MJi#KXYwk96}ZpXDF%@C)vL=;aR{|4J_gTYiCQ_Wo>a>i;i#A+vSkf?@iym${;enBFD#2Zio;ap#HDUb33ofySeLcNCC>-8bsMYO@w_zXbGhX>& zdAI${vrd2LpMKbpw}KA(AVckyemb_wDeZV47Jw5W-oZTY0&w?mhM{h7Al93p{$5ji?-Wzm-(T0gf3WM;*WdT`mR&U z=uoV@QLIo$N9GNeUQg2iynFLvd1hMxOsB%uUhoKnHI>zNPN4y`OX@O@S^&3mqDE|t zOyQF*K^@jI$LNM9b$gU8v1Rp+B^q+p;0wX#r{cCa<~h;3Uf3Rbjup0|3msv<>rtiCuHDmR;scBOw9tQ3=Y7(IkMOEM+2}-qLy>T`aqn&Xq5FWB?!_fUjCsB!LYrY zuCse%Fjjp#yWqlfF!pM954>3)f(von*XDi=!HXJ$18VC+aXxgzMxjrkkWt*{v^6#i z3a-8{>4x5Tn-ROJk?Mm3mosNXB7NcZ;ozR66hH9Do|kx@?~mT)q0frl=rFM!m$=hK zM@e8(<_}Kcj}MdFAYX3L!*db zbueTX+lE@|hhVGb?wX)G>qL2RI65VgPz6Z9!6z%q&5ofqb~En9o-n4 zBeI^L7qO1+_69l+ulwE_W)5maKL>l9HSW}SFEUTD$7jZ&x$I~7;cJQIMdT#nI6b_#3@%s&foyMnDa`MUFzrkbi_VQGPPV9faG$g1;yu`u`2C-#n<;PaIZ+L_MT70;gEukOnFy)PMqZZYD~i( zPE9D$+;CO@tY1Q;J8JaAe3qwppsDJnuvWe&#Qg;(qAI=c=sknlrDktL%}tSDdgg<@ zbB}aA8u5j1EyD&)7Jsljm z_0T`DOw|nbt{x_)LKa{SIpv_oWd-a0IV!yKY|u~TQVIBKi|F$)_2V1tVJ5fG;c%lP zE{1SeT`e?&u3y6L%zi7R%Y90cFtUg0^aLl(+8Io{PZ=u3P>}ZVTi4G0uCQ7s_`sEL zL-H|aK7|PnxYTJTmQQ+N8$-6fl$$R+PHnj47~>D=rzv6wD@p!A3lx?+K;kz&B>A?Z z5!Og}DAjGWgolvE!fjD>+`936rO6=ET;xVAXY2 zdj2<(@3s_$_3?OMD3n8GSD`hScGX0yeWKyWk1tIcY0d}|{T?$lN{4R#oI^r7=1}+Z zh#a`+f+ac;t9oBi5TGZfLzNZw8>@YxDkXI_{-z&P;^rKvc;k;x zyN3E&;^;6-{8+8aWQmJh%ggV*w8HisH)M0)+8}G?7si}^JIuZJ*ypBTJ$Tq3vOUIgs6Jk}x4+o3exW~#PS9l+NztJYzQqHte8K;$_D5WWH|BS=j%qyg z!3Md6%dYFauR?DPGyaNAJ|g3A>u^lGRO*c`F6=ZY)(O%`iBYNsJ{X|-8@ zj~kX3E3fSQ>W*6!_vr|3PXzAlexfboh1cf8PyLO(k+W#U_@K8Bt`N!MV+pklk3>7eGp{0!+0G3X{O3XQnFZxr!pv)`uslgTv=PDdO&|DDkiS^D+QjTXb zK9?5#>Vp)kO9LvN6v&KiF8t_61=IPVYGUOXjJ)@`QJqlal|87n)B;r*$a!;5dVU_k@=(LQZN}t1WTBaEd6;?qVkt zy*Svx+~$XUQG{#GS#K;d+_GnREEOq6np=HiJTZ}V(@0p=8NOG7%}cU8kRim=5qsVp z3MsKGH)MKY>V_Bl_Cgx!J~^x}v2lUH20PWLQ*O{mYqgpy;D|4V(`%czxgzV}{g2ig zeGv66Q^joB8O>kbM(r7M!3lQmPOwtZtgtn`d@GAW#P^>aoCN8sJM zEnX0voLa}S-U&-YSY6HCJ(1gddHbSy?$}`_5Ou$r0z3Y@xyd;$cz5QzgfYpdxCS*W z-rT0($?$_EC;F)H`QqUmKTXp6J$t^8g>JaJCv~t*&K=5O^A(;Ldf;{@&Pz}{QK9Cy zX-SwD$`sgKweZDRk=P$8Xl>TKk17*J%Vdb5`I`OmY#R?u|I~^hu=sf_jT+{ zS7PrJ9jOA^n+#F{5E6Mpk(((HL#@FQQCkAB@PYrud8A&u)8rw$R3<6k_PtC`tmOA=ooEHt%F`D7eqxGhP2U6i;XqB?j$Y z_;N03qE*lrlLxiGudwuoYtiGh3l~U!kZrwM@?HQIzI)>_^ezy1p!r?mM-cckl~}`t zLJ($jCu6I0C=`p|xxA>OKuw|R{;Kn&{IBS1G&t!7EYV;&pXFdb;X#B z5*@?)Y?fVG?uT=4CA*JKd!s$(L2$TP01WR$8ZdbWf|;jjTS80_(&H{W^=}VG$WEg? zwId;zIm46BoE3_7wYjz1FH_+)nEiyQh6dl0veSYG+%a{^?uTNn3%c^O!WX*H@GU`8 z!EpttZ*Mzy#(1kc;%t%%YxjGgyd?R=P=zP#jNQ~a8oZ#XZ<$fo?u~$DX#v^&KKRPt z{?3}o4^O|yPRa=SW6qU>RhRhan2r=P@;pGtMVaobV5tBMtzCaWvOECcavwVr!~(%1 zq3k-*HS$PN_xWo&{lQXOheY^*{)PXx%8Y3xY%hV;lGF5ZvDEI)C?0 zIu3Pu9i}R}K~DL-Z@Z2w#zellt())zqftV2t~w1pd9PkCObY?qI(fCjM*?tPq&(ui zG$|T9xu-lx{dMKyos-P+?hyH~WN73<5NsC|x~t!(p|(h3tMD6lSZ)luA3R0sUnQ4s zNY3}dd4EOuNq%ocRt=P|m+~R^@zlyx17FbrR8N^byo)1Lvw|)9GGC|lcN{{tB7liPj z>!jTV%Mc{*FG#TQrDNp_?g8ScKjuDLrE0I{ja3(f-(D_rNA|u+g(enK zkL2|T58W7m>WyBvqjG#P&QYh)QS66yvv1+eDLycJ^VX3+&l71^lxQ_;NW0F|ZF{|2 zFf!Jdj0hZc!(eIF>k1`59BmfMn@;dTa<-H-FDd^kmY;a|b)66He%$oheVZ>%a?M|J zufh+qdXW083=YS&n`SfbCm0ZjSgsIMlXW zIQBLWFD>mB64pUDnek%z#FHR2`ZC(`YXl>|BK3h3dY@YhMNd(QVt|9 zymf+vzwu>78MTUmag5yy{55e~PZtJ(v5~p{k+3J`)qX$PY3~h&@KxOMzNxJel z?Id&QOCT7=Z*I2@3B(2Qq@bM8$C9+~$ z5I$w5`{$pgqryn4UHw5IoObkDb-yC_Yn>Or3NIaAZGP|jFOzi1TGJhHoYYTLl7bSu zz0n)S!>8Qqg^sMF5yy{s!73+k*S=%En4*4+mJtuc?cF^4OA&xa7q2c0V)lWxzahth z6`p9by`oTfFaW~766?}-nc>s7XUj4^*}+6;YU#ikCp1YwE?xB$O|I@P; zw7t^-;J77pe8UT0)H&I|UC2tuq;Vyi`GNp=F0DON@!b@p5an=IwLts+IjU;@R;Zq7 z8Mv8d1M1l@an~w4bXI1+Ozm`l=L#R6VbYJX{@i`b8X*^4j~dH(u0(@2Q)<%FRq^}!okDB#!8aM-=i6|&!|?GHD*!GM3>ndxB< z?5?S#>n0o*znH zL&Zmjgk#P2&Pbcg+QXdgim&25we_z&&^6h{??T!;0TWWEX%B(lPb=e8FpP3eI*!3-pwb`r*NX!=-I@h`7H0 ziS8E%Tw>`k(_7#S-77IS`(<1(^l)s^N&^bAtfSN9C{(OhF6c`OcSVH{zex2a8g5Z@ zjzp3A9@95bCEqG{z;#&{(@hWP|2WR{oZjw+`%-oO+oRY2iyv89$vPK zhPBJ`*x~}H$hyNcWU_9!L=KBf`V)Z^~80gaY*5-@n-7TeUZ~Y)5b#3e5 zSARqi_jW5~(2*PXG{lQH0OjctjjKxnppquSd_+DF&o`VA?>-xdOCNX57ef%%?btBj z)f$9LS}WPxNj^I|*HUrq+hDv(FLvrD^&r+gnnR3}A-KBbwHI4JD6V>NR*yCYp*x@J z_TOw6-h>AJhvcH8GQLcG-35 z)Keomwmh@%3nlrS!SJ;<+1NlFufJs1b|(<|p-WGMJoiBHkkW0R({7-yRM`9dLNH>k zd|5Ro%@s>4A71;$<_*(L_2%a^eQ|^JE^RMKcMm+*w`$xb{dyOd>sHHIee8P1sb< zArKGCusg0J5FKYnuI~#Dg3-QF_1qgz7}=h&O8xAG@BImz4s!Tl zosh!ZkCMJ96zQ+OtLulYeOIKj9sR-Eo$$R&jgGT7bEPt_kb1+4m$pl6NxBnG-Yxr> z^zXgc%R~mBK$hm#v(k@GMHI}aX zvfdU2tK&|r-|h%qPl1cOMoGV!m68^(k{ezNcF11j^MKu?h0T7F&$7OAe5fwv0&0?b zwf9^KLeCrL#V}B@U372CgDEP`$kuU{O}OIJl{&_fw=_IuWl&=ocEi($-Pb#9%)k-J z7Gm=Gk@R z=1VD_?e;j>_9XuLLu({v-Wb~x?+QKr4ac(|x!|Ompe@T&TSPW-JnY$E3B{BS;98v)M|7R`UmP0Y zheZKf`2pB_oU`}?V<1FR&K?$y420xc-fNDX zfw-~6t&_nc2x4c}*UCEt(PzC$GxhgACBo7@frg=e@i?_Cg#&k7{p z>IjBXgzJF=*Fx~Bk|{UrqCYC)&yNhA41nK^*3Flzf)O-yT*Kcj2;xVR;_U20Fz<0_ zo3%(NJ`OJX)N+ROQ!y2^+#B-602j?OoV4SJ_^?YuHv^F4R-|3&5(qv9+OcCzftXu1 z-**{H5YF+iTwdf$>L>mO_pQDj1oeiyzB7fvh(4%dZ)+U_BVEbUSGt36zjCwend^ZV zf2_`7B^ijb#VjYNJ;CVkn>_z2BnX?hk7S!S1fzG`_E*~*|Zr+n`bey=RC^@>>A2->L zbzTZ|f~k*dh_R?Mw$24@xf}(#np0XWC%rJou|}Vxhvu?EFV8i`fogg z=&Z-~c)@R@*zlkl4HFJGMHbxlhh*h5QLZ#kT(wqZSo^>qEn@SF4Vt{6vqQf6p|1xP z)dp86rn-TXx}53hA{RJc9sBVkk^&6E(RqTZkB0v$J5oAvK~Q86w|ncPUn z;*Q;3MzY>`VDLOSv(66@SKFRGG$rlpfa^EQoc$3L7Vh+%Q-9_$@l4$>)EguXiMX`hGV`RL%G@dL;fh?p3zkl#KcRPlsM&SbK+?Zl+jNM@9i6JR zX7SHSf1u08rQW>WSjnfpuIDT1Cm#MbvWV2nx_ELrQk#8I@_PBTYoy+ulz#2TFezu> zH!GX4X6%9*2-5e(MruPp|4vwh# zA%5Sgy(Yr$;1c%LmA0j#uX)p2p4D^|Bx~-mctppQhi)eYs_EdG@0k!%?u^!m!fVH! z+(6;wdTFO>3;+7@e3g?_((m(p{rDpt&uW68rhb11rHqaTR(mTf zi~=xxHh5dm5?Aa=T>Y&#hJ4Nv**#}Sd~j~5mM!A*hPBuU>08sjxO7S<$;Q+hLqE)K z2;QJ$aN$#)nI;D~uoPPG4pGrsnm<=dgN!#6hEK_p@w%dQd)IJp_C(5?+~BwI0a!Ho zaeFf9k1!}b^mON83Vh8fV!KJX_;IW5)U%f^D3vGVtF^5$E%91w)SL7(G7Nn#-0lgB zx~z}U+i8gKKb3ZdN`fWKpZN7k+FmSE>-J7SrO9echtnsbgfnZ-UbelneGG?~SOg+qQ!*Q^$x{@KP}4r^gl2RZe~my09S z>-A%Y@};1;MBiLfE(dw*nG1R4ir9G2>ah<+1$~9wcUMu=K*@i4BZZ=YhU|llITS5O z?+Uw6PSJtab&th&$@tK*l~Z48wpn0;zvNL~tqn}CpeFH^1FX-C-%w&D;|6SI=fp{W za;|r7*+Nws8mfA;Sx7%}H?wxiXCG28Ho1DBm6ZE-m{{B%O@XwrY~iC(3T6yFg`P!I z(P^+F|K%tZXZ0`hb(4PJ-Fl-r??zn_p)0XqFq#H)9qR7UQ5rDQcEhA*-Hypo zGG43EKlgjIJ5H%D&R`sKM~0fioH?sK5U9Fo>%1`!7^u`Raj*7-pz_-k-Z4*%FXx{T zT#=Kw{df~02GC$#Wp`NM7pB)a|y0IBIWnj|L^{cYUDZ72js^vy?817#vkfvtd(%MFo(4(cYu>rf9cx7h%JDqj z?6v#^Eq;)mGc=Fj)^OKk?U-mRWfotm{YCQz0u^uHq8%$`~HZ9!lfGrM}!^OXzG@sZgDdp|C-m`o+<=!5UoX=Vzn$9J1BZlyqStf`Te zg^KSZ9}VYiCF2#t(wDecTv0pZuE)F86%)cI&BvyU2(Olq9dhMb#HMkSHx(!nd8JW7 z!4Cw93NshxcwuIUfABjq6fcbvEuR)%S*Q-PMS6GABJ~k{uCZCB%LJ}F=gG>yP$QUR zzBdQWXcIM!qkbilhD4R(n&cr#6GGvs&+Q6PO-!zT`{Cov_IF0?MH&-n1;g9737h4A0{LP*L;0%x>{jEh?GP}@H) z%2=R+Id3#8#M3pf?C$NTgGY4`+4-dE`#eLa>#UPHE@z5rd-+iJF;nDJ z%D%QL8q`x;zQ&l7_U?4>6i0_MMo+WF1Vp%iV-KtMDsq3$A3#OhN(#8tpG>I^Qs7Xb zS!EtU#X^(dKr7Pkf2vX9#(0=3G$cv(MaCB^R;T39!)e&`FlgmW9}SC4wNej6xZ%x; zsr!!y-5_H+Ij1AW9ks92rY*=}9LaTuu|!P0d)^|#qhR9g z5{D4OiIPhVgwcBky7@jQ8*9mjL_m(e?4%^)DAZD`g(g#6YNIsa?(Bz2t zm5*QKy>r4XpQN)NRyf0^d~WR7ZfBIm=JL0CyC6g6M5la*3n-1v6LoGBP<)QWY<)t( zvGXcRtlX(s;>f5N^o$Doxgle`zOJZrryjHFa>ceJPlZoWGDID4ew+m40W&{*x9{>Ic8lR`-+!aB_$k-IM5rotLd>*9IJe#m3cHGf4O z=|^MM%fFy}+XMm>t?eyv#^I&SUxP@$*S4y>xVv*4Q8nZnVzA5-Eh^?__93>=d0NVS zuiPAuUUz)q;k3t2o2w(VPJ2v5zPfnov;%&$7dYwda>UmQU*88LIiV>erE;GhY3H$Y z-O8wUhWT018JeC8VH-?#nXWol-M1H7zr_zUX!B_wi{FO4%N;$ zpK+Y$j)57ja-Z28x5fl+A5YJ;(A2RmdBlvpNC|x#svffyYm$0H=l+CDV=TB--`xH{ z1%|vEY|j+y;(5);j_?#~WH?Zcib(?scaKl(@zjRelAM;(9%~GW798^=^R&t)Sw_VU zIe>A?<89shZIIYt8j|2@3%w1N5B;_6(517)m1mhfHoeYU<3#GO=}&T!`^h-^^EfUJ zP9-N&zssj7i#wylx<=c8%O*J4lsQtW zgH|{#wSrDZxKEsj8QAvqDkf5mV54bK*tNtQjGJB#&Cqlaw&D2dFi{e&j+NnoT26Sp z_PBRFnOIemb8T9M-4f;PolW0+EOGqkOli$>c)Bk!yFbp{8pzVEhq6Xu9yZ~LVAbIEloO_!Zu)583XttqeVNI!8T;|s$? zODJ74Xxp48zoW?=Z7SfjSp6zjW`ofviPaYfs- zki)=`s6C&XTK7PYFxYpEZpx)gxT{KiJ1DMAs9h<4vQR*iP)KKewS-5V*ln77tch2Z zIDCmg`G~R-aYJ4uaITyJac;p#WH+A-kw47oXUric;qP*phcH|>0n9I^5 zzOfTZW#PwamwjxE<+k-|`_XG`&h8x%Rmg(PEUZU8#1>*cC-t1-#zpwX)V#gpz;aZw zyx(CxUmKf_?ZU$?z_PtEQ^xETxK&P1Sr|alB|FR2XIJe|a(jH`>%C6Uv2scL-s%E{ z@f@GBdNVwYH?wN%v_Ml*!`idmRvQq;FePR zRn8bXt_mAx-dH0)I=hCi(-PZ0VqnEna|mB>EunSVLcL`1g=5|JxE1XslHBcx0Y_^2 z%S2~n$Q3bFR=eO?G~Z_~bqWlkMK8RpQ6bKTH0}{nYc#%bR{eE%nF?OMkltjt3`E1t zY1-}zHKJ|X%Yndt6~gfLWN>1_a-w9FWqzNYJfZifOxb_ z#e{x!kBU>lLPGWji=m+d3&F)b8WQ-StI?XVss5bggmKcx6L+3nor{^4n;WZk@CewvdgfvY3hawmDD5q5Lx`R6G)7!ln^YR4SVb1>e-*--;F7H683iuIx8 z;WToi#swe_U|c2AVnUyFb+0 zA<&->>y4aY^L&yV{}KLqpF6pUT<2E9AX_Hl+?dd$8t<#d+a>{`CE8QQtIeW2`*nB_ z+uXH`c2Nk?ffXKA$HZ_Zs(+rMffQJ?L&{|YWyyVPzr|uy0h?*7rJqDP z918eEU*)WZ_>u^p7w>gZ>-C<-;%$PJ+~rrysx2@t&LXzl-5jT4-_BDxDhJ_)u$32{ zka)jtP{Op;3bmB2TfK@c(N}ft7A4XeEccIy-S}Y##q;JzZ|a(%y-oeQvW+RERMfYG zb?9TOV1>@LRXmt`H&1iuEmDBlJOy+o|Xv493#1uCUJMC2BQNY{P z%zFy76=3?q?f8-m6@+cR_9}FXIwD)wcAJoVFZXVRgmsQKsn;{470|VDPr~E;qV0On za!|A`xo3dQy*AQINclA|@-5MS)CgyLIcwN04N$|WNsfH_k>E z_cI)&)AKc9eL^O3<$85QY`U?q(@h<*TvtcClcm8{c&bL}xiTond{6onYvAm0*U=pe zK<`v*n}3lp%ne)DKD!|cYk7;M#a6=T@yfbvzS$TV-xif5*QpWlB~59j*9?e3<>Zuh z0>lsFzM!RUM#SBB$6`;3>Jc|@8u*kh)FS$&CgzB9s}bUc-E5&EiiGNd9V;ZnmJ;&Z z`U}5&6C{LMC#i2&PB%VkuJnpJJ7sKbx`t^(I4k`5YM%-dDo7>x)(ND@;FR-<*YBMc z;qppG<@kq+#Bp&4^+yX7h>zN=dt7W}h)kcg5r@SkiIqowbmaY5Oz^23WXylTORR|4 zo4PEYjc8x%yrqbFq_J~4?8T`C=%0w9oowTO`AAB!TPnBse!SbDq{>h?} zWXgN<#{mTyBx#sGi?dNcMoD^S+)gFfE7Z0yWU9hRuc+BwM-}E|3XsngZKAts!QHk1 zL!veL(}`>92869*oMf?p4so!kOO<7T22ro|LA!}dg%HW#llMYQjxdOsyGW2 zUPaU7CS)Q?KFcn+ZEWrzvG|9fHnCp%z}m-|%ZaS{U+uc?su9P!b%jEcb&2z=?4NX6 zREY6)0^+CAC5W$wCU(6|S0fJa_c~o?;U(7T>Pu5Px*MH0#Fw=%;(?iwJN? zL;M75$!LLJgDP>+lwG}LxiXQ+r+Mc#*D``r!{6S$mxEw6dR?Ya_oZ?FnM)HX!y<&O zjs%qna(t(ZKMnj@H%o@d{7Vxn z{j$h515b?_1X%@r1#u5x?c>RB&xkXZev%fmOR>m=KEx zuI(&7*|*CD%`Q`%o@8Et#Qo>57NyhRuD|;7fpjv@@xIG}b?NSqVUXrJMCM^1JS$}4 zBTmJWhpU&Ql{v%ih|tsZnGPtY^QrA6 zi&q|XLg(5h&Q6Lm*qpB1N_ar(8RZX`1yEd2F0MB>|A7k>*;!uiqfl_MIDgZD2Ndi- ze*5YTGGCzh(OTt_2UHBDo$9G`a>Z09%S^>pSNQ5LXAc*l!Fxn+z{!@3&%awcSMn$UZMQ7Hxc)IyxL?I8X9*53vnlX1U&%M`kwij&`i>^+yIMaCOH8P?6O z7pH+O*Dst&!VN7e58gjk>WaM2=i3+;xg+?*lGu4eD zPh>e!^|-#yk0wMMxPQ!yF`fxe*Z&y2eUU+!!&7g5K<>va>!*Y$C)!=d@xT@z>} zmHg;uQXtkC7G^RFsuE3pYkJ|NN$~kk-Sy35!w=$NzF&Ykj*M^}%$sA1hA%m-r=Hp1 zP=TU?L#{Q7jp}2Zh1Ic=T{5WAbK1DGNUx2x-xT^^F4Dt9Ezy~FZtYwlX()(G-CH#y zOe8+7zuNQC1ZyGthx1EzV+-{EQWf4|u^lb3toP{+*tphll%#(UF zcDP+K(3#_aO`f)ij2B4%!}a3+>*gAek95#apc{Zee|ztjaWm9q5eC)?HYhBz?w35} zfSf4LxsA`s{P7=yTiQk_Xj(ilH}9S+UZ+`GU`NjYT?0XW* zaBqPTK8sFGo0Zyt{)3$-oS%yHQyagJlJWK*8xEMyXp?@(Ile*}lNKm$;*U>$B1u#b zZt3q!2O9gB2IE9M)$t=Fdw@Sz3AVh|GAgMK=+bYRpGn3e2f6!>85TJ~*}Ns+f!uG_ zC#{7Rno+RTp=df@khC*J%3QfEX)s*#c+=rxJ>;tntKQ~YN)$#u6W`v!itiTpPTS6t z$B=eJG3SsAhAn*e7tdFKIp=8nD=Bg0W$4x?lJWb7eCN<eE;LeVM<@UjIbWQjUr$Eko=u^20qp5L19 zHP-|;z6bX(g=ynWi-lP7%TZ(LGU+%+DK(Tn?Jb&!F-7R!&vj<8%JBG9tFk-1sWGAr zX-uSE6SpN>+O!bxDYZF&Wy%@hp;8mqURaUes~B&j?{UDi_v`KE4%T?56>G{HLj_&< zi-O#Hd-yRF&G-%iY~KTqEfq7xLz(r-2fFRDaVH9O$>8u`N4Wemu^&By1$BLy3-VB^eEYh))>N_<#pf!I*@hb7G} zc)nP`Nd7A`>;;!*HL9v1Ow>K2)=m%3$M!IM%hQ9Sl@gId+7VwVw_L9Ho8xQyDT%T^ zGOlpB-nHYoC5+ZTn%72tpD;}KvCSlk z$gE5%-c~oY6c5UwHmtf}g0#0Ri+5~YSg(gv79+)$b#hP?*!W)OfgHY-oAoOsii3Si ztCg9L4stSA?0Owwiffw%?tfFVz~_y%@0+`g(EXBU`Gl$s3~sG-3f*dkreaN&lvX_y zoEqG?=rYMKmzwmdPw2vC^@Ldxp@=zdGy!4NW#~D7Ra&UP3#7U_$tzNL;71|$-y?~w6q__=30|mN^I=0sP4q2 z2-05mnCz?EN%}=@1ax;ZTUlVqbEcy%q+jY$gK|0d5eFR8QVQEk`k?|hY_=E6a6rcB zroA=A7FhFgJiD&S0(`yNuB*PfV(*Os8WR~my5qe)PHDa@QFh2wGr2hjaO!Ff!kt zwrhX6{Cw=6&pa3w6M@o;Ew@Id^l>Bb{U<3h|F=(Dwmo2pG;xHx#(5~)2JcEMFP@!n z0I#rX&J{%$ob7VCd6@i8#kIm|dfOf{&mqP^bw#r~^7QROoj!R&i^Zl$oyue= z(8CW6wuhO1ka?+`$`T@Dz5(#zE!FN`8;Hs;qLfEuzSQ*d15r1Q2g9CNl768ngtUu` z4pnr9Vkl$LrQ(laFkHF+Nfu`WcDV?Rr$|RaZqYVdi%XHPywqPajz|RiIS7|rk3cFz zO^U5;1P&eM>-&5+9F67)PG!_^ynOfkyi-RQrZS90r+va8ysRwkcvmP^WY6tdqb?8a z!421J0yI%SKenJ#&IqDHn&&kW%;0}QMf}Hek}v5gZogq{kFj3f^*3F}?}$$?s5+TS zf$BQ$p55ojd{^W7!g2525W~*l=t1T|^WJAqHL&wW?fy9z#FKqN4P+T+A-`W*&(D}0 zH%7Kc@$QOb|Z)!MImWLwpnu@HXWf(3VuQK8D4@X^9D24A!IO2FW zGo`H$N6xvP2-TS|)VN#Uf0-JFdtXoLY-bO{a1nQ@)~--+7{#)^T@Xsi^=a1Ua?i)D*W2GEvIv7cHI(KW_jxk3@GDh|I%^R%R1njlND&!4AAyv-r<$8 zK)zg>){9IVJgrYDEgyEkPHCgG^Kzt~J)(cH(2I(;^SYxOchazOllIb}EAE(Yt?6k+ z#>r3dsAnyc^nv3um8;yYe)!@nIQ^B(Q?$~|7Z~ymz&>Gq)mL$W==;9cu6-Aor`}O) z6DLUe0sHH>J~hxFzk_#hvJhwn1VgcxSHcJ;1O8P$NbO;GTW{O`>EyrCr&Mk=C_;UmTb3G#qQ3weY5X=bNXJp4%Z9p_PMt7fPIE&6FjJF&8BOj zOmu~HYdiTxnYP!fl^FLm(hOW~=D9jHF!nok=BUc?JS@kP`2Kf&O}iX68)n!C8qZf_ z6OK)4Zq9c6ad%VqFcaT-&64vABg|aev(+-4i89Xa<3e3N#u(R{T^5ZQ)56q9cXPvo zF|p?SmxBA|Ea17}#F^EeE{HbP!++dLnXa^H!;s5f=$W9fwfAF4>4h*`kgI!E7Z6;ugH0U`)J2c>$tdG`UvxQb^o>- zD?}P!AE&Fcn16Fm-M)YPUR|?)*wbfLPCSQP{KwD5gwNplaGigTA z579B)M>YKpk6(xK+)1k|0i(lBDW@{F6}UgIZTd&;GCYVjZ~Bd?Uj19F8THrZTgQLK zn%+TI_I@lFYwRYLOO@I##vJJ1Zu2|WXp?qcG5fa1UAh0-w_3k!p62c3RYw-Qh%$vP z-tY6SW1y+t*>Ud9G(INPuoa~&k2g2tPQ2;+B`(I4`dMe$qfuezn&0Nd`_@O8R0S*6 z>(?XH1Z7KR_4HDdxj5^z>%Ky<=FRP$H9yz)H_f}+Pv|!*$oy>5G^@kz7*jRXr92jg zsMi;&6&u3+pmkc0bg`bxeX!f22U(SJG*7ect9GqXlqoo?_u~Zqp0uZH_krG)wawza zfqim!bu%MMI>+sP(9qP}mukg?;$G&%v^fbOLEfg+o5uH-kMc3QQ@m;WW|yB?Io_*T zH21AkPpGosL&0X|#;Y^&{vN^RLU-5aFh{2_n9rLlC{m~=&YMa`vFMPP?=xm;@8TsJgMMpEY zYwKI?Mg5HB+@}+db&58H8oqdE`#jpT-qJI1|JP`9!>2`~-UlL0lL{_dDnvw^zHbI! z3qBETsx2r{;@vRr8{eBFZ?EG~#(ngJb#CPT0op>m>u6N zn>V9Id(Z0YW2}~2nIYV#pMOoRjE(N{994*Kfo|L%?Uwg)he*d@^WDZ~``FkJll<$m z#HmL@&GSah_I|n-W~RN2_y6m0xQYGIIP-1k`{&8d|)>lriR%7Y^p-OR$fHys!H zxS9`x=boAzTF2Pl8ueo$=jDBec5L~$lfOB5wStdJohZ|+eTt@8$iw2EuUs5?4&_*_ z?9UGFjxh1<52VS$b6;=IcOJU=Xpm{%q0L;IV^PLoe)5No$0Lo!vVGIXtO_^JY91NV z*1^X-N*uB3WNI(ty}M%FNA7-R*~&B5i;fI6qaI$mHM1)B$&~A}qv)(a^J!UJ!J=+% zW=Q>1k4umBH(m49Ytt;z-E8S}cX6&ro=bMzW!rsRq{&-w$nKd9qRf-je;qDx*VD9L z`E*FDiryw;VvV0CT5})N^FizD@tnf24JXPJKF@PP6<59}%JU1+Z8}}5Q;zo_yrC^I!8yK6I$)V-B6{TO|ee*A5@kCATNf%iCBV z^SiS<(ljodG1MVvh>%;_S1{{7lD2d4ikI4mB|j0}rpR=54-vKl*w8X^>g^^7+Pk z++RB?$8E<doI z&(B%=3Iv;KK6CwAdxn^qK3C@KWxEoZTb*(}A7-))z1piK_X|JEmhO4W!Cq#elh2Ip z?IX=%r-F9J(*&FF`=LWzlSi1;o$X^5U-2|^=GYxx%5$8pW*=Bz`bwY~w|Gjeaf|)U z+WB)kQ5oVJ?>WI%d6HZS!fV#kWyS+{|&ey%tmX`=tG$_f;D*zrAwWxR3Vr zHYM+!IQ*IMzhKg4-+4U$FyrCd25ounCgYw79{aej)9=Lj@iS`$8>=nfS1xT6Vj9Jq z4$pTw)MUQ#tbTfCu)INE&ZJkDW; zO^k7K8uhML_E?kKZD@%>Td8-8-FG;1-dFJ6^h|ROM4Q>|Po}7vJ(~L_3RP(}Aj()j z&-vqB(UPl`1#Cjz+ghiS@9SiO}@+pTIb=2nq#GiY|MiR-62 znw+;!u5Id4)f}z*aAV22rA_fwryAxcW^IlgU%z$Ncq=o%_o2k(8S0zPYnwFYGL#vZ zeQ)va%gUI6!*UKC{KU(24>|bqrn|4HwYuWbT$=*Sm@d6n7qf0|x-5AVn#(%OBwlUe zd!%}VX;QsQ5r-Bv%#ocFTXw5i(fDmyQ_*d>or!OlyHomTC*#)l!1;7J>zO+z9A?$z z`T5-u2|wTW@G_z07hUk1;caqN{84f2ejhWunad&XH$0cH!*|&dn*eioWt*47dA@H) znc_PD{)Fyefv0Pa#>LJIuBBcu-$eStDl+Ss=Z``72(>5{9+2*aIxq5Z=(((b3 z=K9jU`}2lGnoI>fzjx(+^gLruvhg(=rL4-T!dyTPo(rOfej zR)r2Mb}~^NTYjD5Q^u5V4|AOvA7JkI4cJ@$wzp{$SO3Ytx51`ENdKiPYIEPZyGz@^ zZOu&9&*%62JQ;2h6J3gC=Dw4O?Q+?ESm9tscC7Wf(0&)wcIx}_R@}EX!MXU0`9Gb^ zRJ%%N&s_~O-M=rnn?I|s8Q!~B`Hkx7q(RlZCq2}~6 zt84a~>X^gnf4nN)u#w3ZQR3*CbzUZBnfv=$m%L4hRIlGxO6_YRzkJ_qQ@N?hb}7y3 zD**xK=jxryE#sS+h!VG&Y+D*^b~MX(=**oEQ*Nq7s=)MNCdK83>#FAtH!Y$i1umKWFbY zT7ApMY_z^~!=t;S8J*L5(sRf9CMezI=S%uGGlP9+NA__FFcZ6!{`QUMu`(?Eb~k5e zgt2l<_jKthZ|y_xDWMX zqfW2#)H6;89=~1@?{0=yu9JSm@!Hfe+cz2g?9G~zi`Um3S;bWGd$8?%ah{*Oa(ia_ z1}?_A|B^Nrs2@G1ev0bE^|-j+uS@RB-^8rV-z{07hp(A2Ag9Hm)=f>VytBQFbqe6U zKUclR&1hzF#qJuh=1#CFlX_j;9-iMUV%^%cxLug}Vezh8L>}%_U%jTtVBUMP|Htd4 zeZEJSs_m>cU*x&46{p(7IlqZC?+%{6V$Jg$1@ruj^g0@0())L?EYdFA481;SaC`0# zo!=u>&K@uP%$pGt9Gg|=xvFvV7QISvHX(P*wV1&7c=t?qwC#r4=I8dH&M&UlHm$!l z4ehmuJa^dGzF-e;bEoh1fDQK>nlY_=Tpg7nz)U(Y*=KGQS95H}hTZPV-Ar_q)@4?O zH{w3&=WUMMr?$&K@VwjRJRcIOj%i|sulpACl zE1tvn7PsSVc7BH*ZCkT$#z>R!^GaMhe#bt|S>bG@G@N&=Id>(4dxZHkHojP+?|#N2 ze94Nei+s$DZSzW4HSsiYP8aT{OyAH1)tVoDv^3WvQkEXHBb%G)(;`)mo8KFo*z!&j z{T756zm}CVSX~Y@6CD@4d*;l2QQx*avDnM~i`5#|Z*0MR&^XeDVXa;w_BaAJ%IaGDlfz^+EP2$v+KYBIf_i3K}(l)LLxZF;VWv1=NeFfoIl zxj53(uNKc)EOoQnwySfXx#V`mrxniy*G?5+vGpqN`#BqTK44I&*`0gn)f${%9USd? zq2A_jQ#(`P=ifQ6I6pFb&2i5ojdRsb$!n&GHfGk={m%Jg%(Q-kPr9?7mu|dm_~2Zu z>DF;o=3E6`&AxHHHa#g?&z$p%zhtq_!&vrN{k7+M-X}Bv_NV^2{Y~lyP7!H(@!UbL z=+}+fg_?>sx082U5N=9D)cdmPR-}3R-M(M5+%YEqq)Ju4uW4ZNt^4WR#kd*Ov8kk(C_Ro7B5pEjC4b7QqUZh#& z_#h|~=f$=Kr`-*!A7HgxeCiAxU;m+T9e*W`<$u|8W%)w}{Iu5+|p!#2dA!#0Z8NW?yU#Ho^ zb*6-ul{@#~{Xf<6Sa%ENca2r6S?z9y7_ZYM`m7u0ZCsWuZ29AokGVYWS^V3qP0hHp zcl$+J@*a*ewiewBF@G+NT-{|wuz9vIZNJa10mk=8$z>JFdYK97PfzdCpX(rVS8nW8 zE5iKn%Nc)tRem$T?m_d?nX;PWclO34`hRvBaI1%BAOF3(;x;rsINH(1w8|CV?BR;? zrr`1d+1y%IquAu=x~5Cp*7q!q)Hh2XT#js7+QW>_~C_+fnMbXfvT#PzBlUYH<` zRO0@a>`qgBA9|a?UX=?EZqU?hT#(Va`>JLpBwsJjv>8H;<-B2=D%J}(S(o3kbxzIu zXtu4bGp~Gg(`o*m=8mTe8Lvr2H+#R}KI(d@ixkaM!St>CI%oBf_GZ_mniG!ZbT*N% z%6zQQp{^Nu&Mtb&+4?5x;-;Q1Rmi7%CzpEejg58nquo;XZekwpa=cn}wXcbu9d^0L zUEaSFk}~YGRiGJsAS8T~YmjL>Du0Grothi3OI{u}^+U}<`*f*JIft1d_CvbxC=Mo?9t6jCZ8MCzXqa4$knj2^4cRu0I`S9?>N&Oh#pATMl?%5&2 zD?8pVcWfO(rL+MB_9 z5|1^!XH0&_4SCi()a82DtoWzg|61K6YI(`;jm)ONvzd~WZ)^%X+|84L=e*xW?8&=& zyszo%z9~BKU{llZVZRp*KL;3>){SP~EX8~B0!;C1&6}HSw;D}c>c;a^L9vC`aeXW( zz&iN`&v3JR{Ps18ca3pzXr4AlObzoQY-fANH5JXxuyULB-z#D~{cQ(y%9+;uD4+e{ zqUvdO&$#hm?n8@QyF(u5O%rmO>uGsz*BHQkJ+3=O4l47oyxDQs;cla%B~55_);cZP z7cwQ2w>iDJOAeDN%6(mrv!9$&bj!Z;lXvpno$YR9UNh3!jE`U6-~hS ztYb!-8s=5nsF?Qql9~D|tGn2TSeso}TOD@bynbEA91nv#*D^hJ#s0OSVi8j=B7c*8 zjjNa`3sz4X(yM|=-~M^OFZQKO(f$F8*7PrIvM+5|=;;z`W9yXRYoi~fjq|m>Wf%3T zZa&_*(|+TC8fIUL__XtXR52?~@12mUN@-JUAjC{3IR3p|;{YiXPr^TF<90LR}!>vw$( znjLG7XTSg9aQ;}{`&B6>a(Rpy8DDMM>qapq-ADVr`JY9bChj+ySq+IcDRT4Y_tMek zY}VNQEBIY;WcraNW9P75rz~rq85e7QT0|8rnx}=y+tu7Q{ivl8S+wnR1c?C96!5# zjg8CEJJ$R-;4txgOHZ?~*QCgeoTuLj=-&5G#xS$7!RHQT62r~p9gjDa-OTmsT=kb< z%o1(pMXznRs80j))za73xu>t0`zU7WxfK2;XZn*Ra^7fW@-J!9b9GFldA|B=ieclN z&G5`UZg}#1$?R=ImVF2aF@rOIoap#2%9I#($l|sQ=TX5a&pLjqXvS=LW}R<+EfdiA z$L7F!p{D7M9+u@BR55ohmiOJBJJx(I-Qdyn`n+GDhSNr$q0uJgp*Y(fJjXP#=#9h; zN9%JP_wC>gYn@E51MgE_`q6H+`m;cKiW+20Sp$a%|)mY-Lg;yr(@H|Ou|#`|pY^}WzB z4RxdJz_cl+1(=?noDY{y>22=iJM_SV=dgSu-90ug4>MsW(p5{I-NRVeub&m?6KMkT zjF>xs^ViIAGxA={$aCEz!=|?m-LvpKBm*8ch;W|MVNs34Q#WIu48t$SnDz;cX{Kx-C}MderKhA z5s~$Xr-ylKlY8E(7eQvwf+CkQBt)7tpT}fM7#?kw-Wld}Ay>3%(`l~#^Lf#xz|CV9 zZ=PiR4yIT)gwG#m6}vWB?*LP|#J~e<-_Dd!eo=`Kesb_Sbn& zP2=5iYy=Dzp(`S9lLpu-gTziM)-mxQ^ znh_qgmsI2Zvz15A4VZ4{YjXV<9paO^nK|nG?CslP9>%TJ^2hCN*Ei8at~YFjw=-KbptnH!nepTbtIt>|wOi`!jU zl0VEmS^NCWk5iH6T)U2=D~#bfTc&!KLxzQ$&Rn@Q&M zvB2!}JlFGA*s|xJf=tUT<<@Rp8Dg^9jGNPNaELj!0C?$8K4z!SWnF?d50@Bv@&gQnmQ0T2ky zAP9n?IfOtcgh4n&KqN##G{itGw17Bh39XtH=>fQ_&THp3R!3fo{i z?0}uH3wFaE*bDn$KOBIAa0m{=5jYCR;5eLslW+=7!x=aW=ioeCfQxVmF2fbL3fJH| z+<=>K3vR<5xC{5-K0JVj@CY8m6L<>G;5qySFW@D-g4gf{-oiV04iznJ^1x z!yK3k^I$$KfQ7IK7Q+%)3d>+QtbjyV39Dc=tbw(#4%Wj4*a(|oGi-sauno4u4%i91 zU^nc6y|54V!vQ!5hu|8E!38&yRoPo1&4$i{`xCocvGF*YHa1E}*4Y&!n z;5OWWyKoQg!vlB-kKi#pfv4~cp2J`80$#!^cnxphExd#G@Bu!;C-@9s;46HC@9+bD zf)4x^kQ`D#N=OB%Aq`kUT1W@!Ap>NDOpqC}Kvu{G*})2OKu)lRT#y^`KwiiP`Jn(5 z1RJo0LQoj&pa>L&Vo)4PKuIVCrJ)Rzg>q0HDnLc31eKu*RE26#9qge7I6zHsgj(PP z2AshKYC|2U3$EY>^`Jg9fQH}>jlcsu!3!FLH#7ks@C83;3jPoPfzS+sAQ+lM2!uiy zghK>GLKH+p48%eUh=Z2U3R*)OXbbJ2J#>JM&!(cd!fRQi?M#C5w3*%rsOn`|n2`0l7mg|G+~!xC5u%V0UIfJ9gct6(*(fwiy>*24za2%BItY=Nz?4YtD$*a^E}H|&AE zun+db0XPVU;4mD4qi_t4!wEPEr{FZ4fwOQ9&cg+`2$$e8T!E`_4X(otxCyu5Hr#=` za1ZXo19%9J;4wUbr|=A(!(Z?MUcxJQ4R7Eryo2}f0Y1Vf_zYj*D}00R@B@B=PWmk% zIi!G;kP1>m8nA@4kPgyA2FM7RATwlvtdI?|gB9d}oL~*PAUEWJypRv_LjfoVHed^d zpfK1$5hx19pg5F(l28gtLm4Ow<)A!NfQnEFDnk{h3e})G*h39)fSTY4wZI7sID-q+ zhB{CeT)_?ML49Zd4Z$56fd_bk7c>TMXaYXq3x3cP{2>4Wp&0~0Ff@k{2!${RhX{y- zD2Rp_h=mpq2Q8r$w1zg&7TQ63=l~s|6Lf|y&=tBtcjy5BViPbhA}V}#=&@)025&nOok~i6{f*-m;p0k7R-h@Fc;>*d{_Vr zVG%5bC9o8h!E#suiLeq@!D?6ofByZ+z@H5K$-tis{K>$d4E)K!pA7uTz@H5K$-tis z{K>$7S_W!+dj48v58p<=)YbX7X885jME%JoC4ZN-Oy=SFJD#0h6+oAB^{<=F|N5`{ z|LWTG_$UAOQu^Dj|1UG?^L;()|MLI&^IQMZ^8a^!(&zutKN+PE znt^(CoxK#b0UgVYZa(>ke@*IE{N?$0*R87_KmX~U8dvBh8UAGfqnmX2*Z0?dfB)z7 zSx?Jc7;}a`WZ9>-C)?mVU^u?a*@7&wE<3 z!pTnmy__!B+k*O)pW$hlv|RZ!zn9ZJE5+fk)|Jo2{2%|c$w|)cjGZp)-T*sbX$NyMQogZc4^J7`wi+EUeoB#WBO5l&-yNBSP-lqP^f91u7<@GNs8PU2t*Ue1)+yA%z z_W1kKKkf4jx~b{^$es3w`#v{-lwJi=Wl>~dw=`y_WaiW#Q$0(OY>{) z{Qi&bOSSxES|m^YD*-9~o?BU{HYtB?pv|xUTKuX)orSenq+lb#=F5bUB|G$3Uukf#XBY#_7zvbWj-=uM)F>!wL zzy5F);Pbj!@?ZSZxY@6pB>zA9k7@R6%l`Ry^ncgQO~Z;60|&?e*`PP*c&X!)KSYBa zv;~dj!H@?u;vzuD%^pw_JYWcvhtv=PQD6lc$$db_?-Jk${lOW^L1V}PU7#nlf~wFC zLLm@JLOj$07l?#d=nE-f0HlH<;0ZbgmjWly_^b^rp*@5_9Att9-~*+>KpoJnDmKs+ zGJ`(X9W)nmgDo_HD$oJaK{u!bx=K<8>OweF1YhU}Eg%IHg#1tzT%jXq9jzffXs%R; zoZtryApr`28w>9rcfAaKw20CouM*hgcv9UZJ;|8h5C>W zT0?Wl3;H~@>2tK4zF*&?@6_1Q_vo{ODL8iD$0|0nI|Xl#uG zjR%cIjltqj6SV!QK{@jR<)$mdgVs~~B5B_yjWhLk1ZRi>>9!i16R&7vTH6}C$wC|dL_L;^?9w-Ffpqy)8rw8qCjSY<} zjlDV$0UEa&k6JH{QSECR(7rDNbs=eg=fchaSs^)CgT|W1w8oppnJWweE71PcxYGXD ze$^av0}E&dnr9ksc3=-0*Mp%8RDw(p3x%OA^nfBz5AuV?PzdPr^jWr`?@+$9&C0#D zS)Z%*i~?mU!+pO`d?XCsd7crnbYWyjO380)dgi@ewuK~)D6U2eWU?^y; zr2*x?F%$)jdjrZ(9?gVroYcR6KHs%l|2wXBjk*8s z=ez&A^Ra)|e^x$zukD}z{>i|f4E)K!pA7uTz@H5K|2YHy{I`RkdioXfU;q89za4zq z|GU3_hy3~5!GE8>9sFNizyEig6MxW;TK@g>Z-jS8e#h1R@A!@2tsk7f{^=L>zyIGq zUK1~W=wE-nCC!ib{p&uO2Q~-)b)Wyl)_>QyN!p+PJZ_HX_+5L{{+}>z{&(lYs@#88 zK9a`t|GdXQSEIO=RJZc4>tRXlfAqgk2Kulnx)!PT0d(g=pPoM+iLGax=McXSE>MbC z31WJ7*cCq`>H?3jb*)#|c(ZXqb1MD1fDdhYUqeY4OIr_A*A54PuJ!7Aq@J}7hC(c- zcM-fMrfZ3MCOiVo4hHnT2R+A~fW04E*C%yNcK|WH%b+djnQ0qzH>&qDR7Q2(H65z! zo4O{Oj_*lF+hpu3&>j0dnt*Nwy?Y@Qee$!+bK<)Gsq3{t#PvLSL(sbqw0>=fAI44% z_ShrPIGByC>#FylC;e&@8;|Nb^>`?St>>)wVCx+U4%j+W6a-uBCZKEFnmL(R7rjG5 z@31J1ulGb0pnV_qG*pM*1JDXv?{Ls_^po)4f}VZv4;P5*neQlKx~4r3e*l^e^lphL zXb*a?MQvh*Q0=oyEK?7^I&{V_kNSe%`A{7647$Ec@1$6ZTK(Ne@5@NZdS4@^e0)U1 zQQc-R1q!m<4q}Z#*N$`JHz(c^9RV5-dKZ8bZF=8CM^w-4>sj_5FpAh*LHq&enfcuK zCDH8oMbZ7J4RixP$VTi1sxfQDx~-(YFSc?qn7FR_YYgih95%!wv32d*7e51xBc^Nj zdXGSVRPQ$ELtO99(6jX}#Plq_13HJ8j{kZ;$SBmHRcO;Q_1f1JiM0idVGI1iwCh^x zcF?u?5ltuGLpR4yA425veyA$k)X+Eb% z=fGv`KvdWA?}OGq9`-;k+8PkkIB5l?i8Y23*tXCVJ3G{(pSB|ns`;FmSbEk`^F`a= z7^Z{%u5kn{Nt+9{_Ky#~-s|85n$w!YFNxnI9*y3>*WYDkqB_@VMO^c_4t5z-^ST|n z6V>|+G`EYOEr|u7`um0ELQYtRordqej%G$bp$Wtz;4!v0XdZ{)Z(!N~p%|n~!c}6q?vAo!yu=V$ss-QWUg_!1q=JpU` zn&17gRey4V>cLT1ODqkV60MESVY&LK=CdEM9H{1f3^;@Ke=sqhA5om%WAW zNZWLDCoy+y<+?1k@=%rdR?xd4wBPp-s|4Y&8!Cf4@l5nrUNpxwCp5=r;Rj-;2A#I# zU>h{Yn`5iSs7{{4C;aPOPny#kAp%Cxrenzf+U7ztP~N)0chI{mdVy+QKl*CE4}$fy zY5tdiICz3z7*@l0e0|PH(06E_PX*2MHY}^#p=QEf{IYNeTlK(_75qtm)jice)d1B3 z&G|RjyRmPedhbpQcGA4p+*ZGi#5MQpVr%`fV^5_`H9+s#SW8TEN826?{jhcaUo}+q zQe${Cw&t()`(MOvpr2tCw&uPy>!^903)LKKLYuxX6kFS``l)%Rd9Hb{xjqn95ZAhB zzSo2?^yvi6X`2rop!bE;LOT%83#tLnVFR(lXb`H}sdu*|r+qj66j=1PYJ6+{7X#HE zjmcD~YJujW4XXWg9&+RVKy?h){8H`Fc|v>|NT9h|99~BqPL0Z_}~Tu)CYEf z@~HKy20G?VpuIAL!Ww*6Q2iPSstKwGYgk5eq#3a#=u=n?stsXiG@OULtgGg~@;8Du z&410ad7zwR1g*2Sxhp7tcj?<34E`u+4}FQNK9nV{b(~CWA?(Ahg7${i*cIrL8NR|a zVyXuxv75jaY`Xj!*FI=DNXzJ zeN+?9!f|YkkNm9PKhOI_mep}db6@$ph$f(8SuO-!0R3PL+=Q~kr_e`rG#{ugXfElP zpm*Kq_~C@A4je_>;%nZ8VC&fCjj#DSo>+B!)rEoh9~5W(%AvNzmZL3DojCvmM0eq9d^CrA zkc!wcbOi0kLG>XI>IW-mt09|MYHZbN)wSBF#*4;A4EAf#_)i8Fu!3c}O2ARH#HeWjeJHfVgSHdxZO4xNr~1Do(Q z9>P$~#f`*tu23JlJoaboEUeRcY=hkd)w-%?oWa-JQ@zl++Aw&Btud@R-4*VD_L&nD z<#Up1##nqe=ntKsGt1h6BUEIW%ES)AQ~c9taR>u_w#LLF`eXssp}q9c*!W6YOYp~! z#?OIPL$g6RoB-7e)wV0}3VS6w7gR4)gO$^9^wYYA(ym&w5Q@PK+B8OQL2>N-^iw^V zk1FrAz!tkY?aM*+<2`-^+7peYUBA`xq2=)p5LaFCM^~YR(Dk%U#MU^z2!Z&jpEKb* z@vJOwkN%|X6smIx)s9>cfZrAEi)ugJLsdI87oM=Z&JBhV+XEAbIij7>lkkRiopV0{ zOBh7V9#k_tXj6^QxJ#sM4(NRAA{quk*s*9&+JB-B@DY?Vonxw=T*XhSADM}%hD;)^ z`PUBSWA}vt*w;|i7445MpktF4ETZ_iP}Ov z+Ep`J!!hhWuqUTBOKX4$mF^m}9hejv8|jM#OF=>)A2qR-H_%|-RQc?{Y~6CsOHi(94h;`!(7X#?vPZUvsGk?8bLSZ$p0C^YDEIh^daOB~}$%V>BLJ zfli0vwADaWlZq49SkdvNFn%N2E}`+DI;VBt4mq*UK?&l`@wJ_*30k*6;>n5CLyzE3 z#O{sV3)=_oW4oc2aFsUIiDATA!$WMHb1cO!!n&`akLJ6^+*{&(QBTy0*jDUlu+TOV zn?X#!O?1w-1D?{RTsCo$CqKUBvGoqMXzbwk?{*SOQ~ zBh_BjpC_~pfjyw_`9Qn&**8c|pVa6E`jtXY!zb*?^w|tA@E?Kdz%!@`tBI=yxP$6I zK4PiR2`r~|4972xmOwS1flOtdCs?hQllTyR>UxsaDzWo$K&*{7sK)46C*(?>ZlB3|dCxMKz-v zs$;Om?sHI0*oUevOu{~hhT&(0;!q0~66=E|ppWUJF`*iub=nUa$6L{gsOqrlplX2y zs^3F3ut&g7C`S8q;`+Q~(D6|$ss|@v1bza3I=BZJ zi79`oYiVfHF?R##TvR!Yg!#mm5-SIv@av$OD`(LusA}yiY#Y$Jg`?RZJGRD2pAC7KLMLT1c~uphcW2EJ40i%n>YN9~BEg%;S0K(@w!>aoUkNBU(&v(h#a zI|e+k-w>+>ssV*TH9)mVb4B$clD1jc{ZY-4NMfo5gnpgpswQN>zJq2414r=l!${D6 z);>zlGNVx&+6(eIn)8~=n)jOT6=+k9zKVW;GjNu+w{Q&G3pC#gK`^l&sB+N?y9mo& z1C9UrsE(l;n;WpTAMb%Plq9BkdmG&FtzZSnf6bR4+6}r_mM<8oL^=Es1NqYCovPDvz3H8n^GUH7_1wJD?id8n2a5 zE&mj|JlYL(?05l_Ky!H}ZTUcBLSs>5P2;gXzVfDHsK(~rtLJKae0VXBIum#JgTuM)p4{xw#N8r>;SY6dK+A@ZBd;sG^4Nb zpz+WZ=71l3r%l_eZCHh`aXSL7MNH%HI;wLtjm2)*8jqR_p0FPp;SU9k$w4gZf@(~v z?(6qVd#FfEwOYrB>-bCIDJ+1I#58B;(0(5@PBm`4Q7e3nXVsnw*cykbNy&-n7_a@S zbJjVi4>8Rn9h0Vm=G+l{PqY_p*-;<-OrU+Cv9OqUCl~U^LGeN;m=m#(3<=torhN5{G6 z*vj2t`ZmWOkETY$Q4iu#*u}85kF{^Fq03O!dTskCe2tO0a0VU_--vEPbJNxYJ%PUl zys$qLTaB&#q4B4AGXT|bQ2S{Hbimg2wx6^Wz~2j(iM2(Op>+_Azj|nI)R&mX{B&&P zMRRHiYKfkqZ6LO4$1U)LoA^3jRe&A%nv0I0^85=ZtZ*IFAsW%VehAt(IiMeHcR~IrsEe)752nvwa0GiE z%*L(5pff?&@(ZGm;2h}p#&^_`KHurD<4q>$gRk%NBQ^+IIaf`&4eN>D zB4$uWRO5OO?1Zu~l~`4H0o&+P3Bq7CzUr^)QAvE|vmo|FXbpk1twwc>RZdhBLLmxt zE~R7Iar*3qEG)a7SQl*Vn`V#!tbuO7&L{fPULCs^eNv+Pp#v-@UIoe!{|u*y-2#mV z<**Mlz&7+r1=+Bt64M83yn4fU-0t8`n=e`xG`?S=stx1mquNlB_F?E4RCVP7w$4kG zi#OQXmp9Pe_!f{2KL*}mYn|88b`Kh3Um;c(?TB9jn&an!RoJ_T&xb`&3Yx=a+EkO` z;2?dzfE9LC^fT=`7kCGUu$zF!Mht!l`e^@JV6Q-REYopUb?!0RA00!RzC+)c0Gr?j zG$cNPKJBpcV*6r`!`5%nr`VZ^6+|`mw9UQnRVTD9(V%fu4E7NV!JmbyMr*F=?^Lx> z8)8e)I8?`mE%ZB&ogN*C{vg&48esP&mJ@A?U!3Jtk9550imzju+Ej;X5G$fK>|JmT zzY~6WY&&cZbRvFb2m#z*HFhal9JCHL1RYRq^CE0lv?*=JAr-d%PE~_=DtxV5H1>J8 z1V8aJq9^F5F}nj>{k!5Tufb}Et}vIF_E&E-7piS|gspw5eXBWC7GLvE<5>BRBc^$# z@u73tI@s?)bz&MZT^m|Y-(~n7Xloclya|47Y}I}31D!Ly!mf_3TGs1~ccQ&IscOao{4Q{i*i3W+G{aU6iKkuj zQ1kU2XpUqsX^4j>MjXYPosXeZUg?Dyn^13p!)x1O0um8p~@tE1?=E z<)9wiAvTv-9c(*P-#M1pbVy4q3f1+bM%b^h+rT#LQ}k0!X@jj=Q5hYfWznB#BX~;R zouGc2e<^5dfri2cVtc>`+YQ>n6WVo6KR0$|+D@WN(W1ny;X53`*Rf0EBn!41v7g|9 zAB(SXvJlm^@g3MNurs3JsLlb_p~+C?p%kjImI8Yv_CDx~zZBKn(mYdL(=l@#eM0ez zL3en7pBlXdwP@GhfASO4{K-tLirS$qEXUV))I6{U)uz(K)4)|Igs-}D5!Gi-B{m)Z z3~1d>KuO~5@in#+P>ss~;uhF0=x+Q`*zFa=4&^`hpb2Qc-lt7-btrx`{sn9uFP7um zvF@sss)c9KH^jAH3Zbg86R=}I$8#rQs(Jc5)mCirji&R&KEXrm({PHoHxT-Dja;=% z#|d}nj;$Pp!(^xl^NIBZZT~s+Hd>v&dqFkOgP87ZE01bE6(XiFeHYa}Spwf^`wLA6 zGeE~8U2D?c1-$UzfX)#$XL8flkp3F`HneN}tJbIvjD#xq+hG>y*wg||32`i|ajf}Y z9CU4~2eFOVIR6=ynuDs}nuppZ5N(HQ|KGqqj}}1v(N@HD z3|2n$_lIHlHSz6fUxBUhsoJ3P70u%sumu%LR&{{Z}^IBO)MJK@o_Esl(@!_wn59BCN=?k6WRl^5Ss#yumE5CtsnY` zKFYOnRT1{VOX6G53TR!>_ud25M2*R%=sHlIRo^tv`(n?8W8i?VI-ioh%G*cmJj7JD z=cCU-busb7f*lyTNtQgDoB(@3P57j(* z06GR}Ja>b1_&TTS3^w?`K8NM<;_rdl#2TRbdq568rzq$%T+zw2eF63PhCdEf9V&xr zAE@6H>{qD9+D7aHUk&^V}z&OlG0 zvr+X?&TaA4w}1$)VGgKfsWv@9`=OiB-SpGG=>XrMJN^c6q3s!(9!h}b>vq@xT|vLAR?tVsrWW|R zPT3vpPHY+s#V!fDZ>Ac3ieQ&TEoqyG>X_k7>@{di>0GWax&loN&tVUJBZ%$APCzxc z{Lx#)tD`n(AZ;13Z=)K+;gFno0b=W6HfWiW#15d&wClG-BEF8r*HE2vRK(U;xB~A% zW8@rd+GqQ)Jz*erVKj)i6`aIgj20z+9lm0#mOTLNZ=H8srO!{$ysHn&m*zn}$PAvK z71{sUFM#zNA%YX53G27_{@<9|}lW@0O6H)vDdG{=-T)qu75`=AN-XHdSh zeeYWDeV=HgUk!oX6^a5?FH#(Pjit4zgbyJR%pVY)!qspJo7nQF(#FQ`P=LNQM z(;Kv&8atiQVxZ59ghim7d4i6mT4&`iKj=J6xtq)9Dt}sMVW2)zF&D% z4nGo`jji*F$uI!4FARM&K7+B9!x}ISTeV)tpyn_Gv>iI`D3{u&+6Luw8-2CEuhFjh zu#;G2RCygmNMkZ7pGo^XCo!$FYHU(YTVgA(y8fix{=|QeuQ{e%Re+)RC157@IQkqx zmAh4-oPC2!*vi{m^daawbv(a|?nfJ-!Nk9SE6gJnOHAih+3}S(2ck7cBD{e+ag6rSaAQbiTNlHs!21wsQ6iI~s1{D}R5Xi(msN zj|EVj5BA3PLFl|SYD1XXXb5uFgIkIwA5>?)8Ksi#*(t&dI9sfA0T#bY6_^Q#@!2)hXrB0DD_6tNil~m~ zT5nr83d-Aa+E$~=pZ1yZ_YQpoW8ogY_Q3^ch}{R8W1oVr;0;>;`Fyu>t6XXxDA#52 zQ=!VMzE648HnqY|1GTYrtwi}%Zuf(7`;z|EK-;BUc7*nzToyvL-pXeTehKVbkd)iS z*kfQdZOZRD(0pG*%opmyGyLt4lxO`$)Ox$qmUO?q@;aAg2f<0uy6AjGc|HKTeyIHK zAf`MgpPf+>WMAxm8|O_vgbtY~@QiP(C#- zx}l1x)-*(e(46RJ)P}Z8pj_%)LhCD4tqXyg^qYWAr0-Vjzp(!?k3)%_0G*F0m-%1^ zeU#6x*f;bY=qtDgeTi2E(SRZ8!&uK)J00+85f6zNm7n z-0HV&TH17uuk!^fY~@us^#dIzmDfrz9AEo4JF0vp$5u|?qmMz`u$1_ExDW0yi&zNg z82gC63qW~2i{B2E&x!DlIh{wlj-jW}CvXsyQ{{6Cd<4}nJ zHNJ0SYrW2)OQ0Vyt+#Tjxbo^p{1YVQZa=nir|Zm`%gSFl(E6SyuDqSc9}GHQP76`A z={Jtf6LnodIdvqa^;RyEa=49H3EE!}D-6nM1{elE@iiutTh+6EsPb#Dlk%#%atFNW zt8Gx8mE+nl5j!c*I)*8?I*-=U|>UbCU9p_&i2kd)VS*rA~HOUk42XGQEJx)YQ` z%`F|Po+w6JH}obk9e)p@UqI_M8@~fu4U}V@d+MC|8@BS<2vt6hq4V(5I_u5~NFX|a>m zU%9S@KNZ!u);8EdQoirfru=GslXBdR?uY4=l z`fZ?fPRg~83(9eJ+LYT1v@55F@Xz6EKWO|PCq4wfG4>qn9H_42DA$@B+74@C%JXwb z%JX(&%5g>*4#^-X-wD_kLDyZ9a=n=@O<4Xiapimts+`w>M)=CPa;WXOjjepEo@jri z<#Uwl@}O-wLfcwY=N|1q+oC+b0 z-{Lzb!yA14Uib?2v8&UjoUcJuQ#`;CTj$w#K{-~wyAoRfS}$FnaYHr6l5(wivI1Yf z$+bT&VLPF3SiUu?F{S)w1snP_pk3Rf?ehoaw+f8J)^;sN3!^DPIUd93D9=A&BmP6s zd8P840O>(_RzB0A#n8#9wnI5hT5qlI8T?srfOZ|nl~;`${eI7dYChJ-&V-JDq#S=D zro1Yrr9nAehbpgEQPr(wa0Nbsa;z~k7j%xHd7*r3Ta>#nV*a3<-zBE}DzBAL<>s-L zhit@p;48PSu$9yH_{Xu8zf%y4tvn{>a0h;Ra3WR&EZ`q=+Ml*KVoA9S!}bBy(Mz=H zTKQPMM|DxT{WUg;7lZep9BLe3{<;sYC5GmPL6t|H?;l2OiK)hDJ(bVNXdJ3sTB7w( z<#IWnlN0+iw(_WbpOj0jhw`aB?xs%$0%A?jvIn>-6iLII+fUO)>g;Cf!QSH;@upV1; zAupe!yr#leZj;7^wqp~1Pk4%59h6_K`*4;~jY|m{&pJjIMQha11I=+U?*qP9$^w+xU+$A6OXJQ&l%AxY2JeGl$*vjV$(41Hf@z^g>wG2v)%8Z5Gu9z~4^7JJ7uu|e4JCF7wC>8S*4vpj<+lSdN zu~jpbM_s$s`d!5yg({cIpUx?iqoUX;K>5?QDtBl8Zi_;d!*RsYqI+mpo$86Lb)8CF z`BcsL05`Fpp*2wDwKrM=)jdyzVJH2)@KrPKV=JdRH&MQ|PnKby0M&DCgN}uvXhl?c zO~q#@uiB<}^wW0bCH@AoUo@~dj+I;GSh;=1GRm#;cnDP< z%YyQl2nVr?zzWbh>9dqe)sx4db-#rMqRL}GY~?W{s`XR;c0*DQl|S7xdzqN>tT8r; z<&;O|?-RE2rJZowtEC%C)2yBD|Mt|7sl7A;V`c%enwLW@I7OTCn~ROWg3lDqD@cjm77V`iMnV%S)k8^+TZn%{@94S|j4-@r5A zBy8L~6U_e)(d%MQ2YLbL==|m*JAOuB45Q=L8`;<`#>aIvPDMZ-WaIUEPOxXUx!e*t zH@p+LcTVBwc+s(RZpQLH_QY^}^fs_@>;hZoy(b%IW9ZKY2g09WD*zkAx9~H5#?HFu zH_r-e#_)6C@A*Z?FgkY5p+9ZDR|;g~X#A#uOUR$V#_uBRvp$E7;Rg6)U>-Q{K+qV> zq3s$lp2KJ}&#%G8bO7u;&4abbj_=Pp7dY=r|rk9t0-Sm>%rH$5=+k(RoJ4 zavbsnFci4g%>!dOiar%!W4RaEy_FQW*3mINg*_Y?Q)B7e>(TDqKSDP?(eYb=y#n|Z zpSQ4k_Wc4IJNH#vWMk)fxd1r_hzC}I3UOIm7WORT4vxpS3UW#0+sK7!p8)*A#;Qo$RP=l3-c3s|t~q)KTNwII+C2l)!T#K1Ic*E^ zv1Yh#!+|++4n6@-rO$4#6`x`Bajp6z_jgRNoN<4|{u4g!kV}FOkRRfg6~7>4?-)hU zlc3Lphu~9?zMsR^oqxb8#%NBvIhr2%G}wath_-pipMke&uTGy+@F2!)54sm$k|}e;bh2_=o60oHuLj-G#4~MP67&n zU9=Yhf8y(Y-N(3#=$`@GGsds14F5sD1aLZhTH-qmeH(2fu>FqhJp3hm64a+{1pT{V zvwjBQ-v!}-D zS2FHm`enrSHMUpuJB>aVzKyK|JPF-1cmTW?eI)iAjM)gbzC5B|NA!>JIRz&N(?L9Z z&x2C*Yl>emoE=!ttV#1=3y5diZpLas+gW%&=2h61pl3q&J`{*wMo@!sN5W@ly8!<| z+jPd+j_#S+75^{bufQ_eSK>bkTX*CK&ISD(*ad#U-U+_W^GefJ8QpVbJ@O>_zrmQZ z(M!X_@Vg6Mp%=#QFXXNGtU>+&Y{UM9aqb|$1-x6d!si?8lj;8_W1e-6UiLr{Ni3H{)EzCns_h#>fFWBc}k?=J}u*{j9-z=;y!F-J`7~ zeg}BQIc%$G`y5`1Eweu82^n_>`Y8AV*z@xh_Gh36vUR*PM?*;fgKEpv5PzL=r&-U&)2b94l7JQ$!KjF3X-3~v+=RERX_-8_|N&8UbEZDr? zbV8m&U+?fC$UBkGfk9w7{_kQdL!Uj!-RZLjxdZ)j!Jgwyu&qY^6&?%DAg2To^qUDM zfUkk(_*P}ioS-ZA1=xz<|2J|`+O~nc=nv3a;qx2v4xZ_^$r{=cA-AUO0`}*~f75?7 zd=}d!co_N@v}FaE@ITKOgW-GNYw!f0o}euLd*KT*c!_^m`i_HBp?hbXf`1kC~~#`UGNM2wKoDHonE+ z?)be!du#maVDqm02-_dn+Q8pps|~Nk_b#?1=$GJW`2WLLW6&>y;mF_Mla03h$QNn< z7`yk#9@y@~ey^-ytVr6ulUBu^0>5(DE`oQ!NH76=Ieh%LjnwER8KQVSgsD2o%SshyKow_PY2U zX1sOSpJ4wJdjP0|JeIaAU<6R4_l#c?dmd1Mes5wc zg-a2j2iX1r=g_l&WVC;X z?)Oh4#)^Zym;Tjg8wQWU<~P_Ocpc;P!T%|81>`I6Big!veCRFcdky(2{bs;n*!;QL z4e$fw`@K2|pS8$-mo5Pn@!f?z2<)fLpBXja8AtG0k8Kt0KcZIz^%(CNxQaX#-vH!p z_+LeyiQj7MagpCau1??Kpcndg@L~Km0>4}LgDdpSi(f@-*Wk41rLoV%w+4MuV>^JY zIPw_yM;_<5W-aXg+$a`)Ip`Zndk1U_;1KK)*#7}1@Li2PEAk8c8iSm)56AX3=0_t;Y4&$HXmFE+N_a4&ogFxF&H3jHYUYj|cy+C$;`v`1n;sU2H)$H3kM{)V>nZ@1Y<#MZ23VY{62=$VVgz2GWf0I zxs|Z}3{OBm$T+cS+W_apzY}c>k&|L;NP8ua3;ask25bTFBih&EHx15)Ed}ju`g}s$Cj7b~7oe>bZ671QNxT0pQv&@K+!mYP zp9QdA2L-V$1O7~_G;MppTE^;3n?DSF}~aevm#_;iKr4L3-p`_>{#yA32<7q-E?w$m2j9mtJI38i z+jtNUxjVMjv|ooK&=VrpLH-o~xxjz-D2eS8xFGGbX)A=h9E?P62li^?IX8hn6DW(m zj=s6@SxnyH z<&eW^i;Z6}oCk!^|1@ojXv+a!VEdi+QS|+iegn{R!#~j$iSG~KGi(#^sf?Te^q{Q} z+y=iJ$cf>4@I3q$VE5PjO|@)h0))`FE{!p$iFeK^4Qli zPFM6fU zPDbo?(B~jW(0&2Dhiw9g&$#u$B*utG`%3!kgOA|{U^p<2$JEG5dI7< z3@4?n0X{iFZEWf3Uj!bFUfMSF$&B9*-~HH*V*3o+VNemh32hB&i-5miuHCVRft0jG z!g=wZ$aul{w8!T-@-M5ts;;V|#=4s`LwC%uLvFIX3oH z*b?xJ5HJI|2ppfbE%^I$>|^k4Z0~_T@L!5v342wZl@BxpjTmn;wzIUSLZ5|C5Bz&G z-VNlJ*jgfw!S)P&13U;@KIBvIHrl=cy%?hiW8H#Jptqy_7xXanMQ|N#8R(lFPK-VV z`)%Z8`1MB*Mb8b2VhhD@3S%Ecf1ma{=y#dl5qKWljJC?OHKyMZxF0?}X-kFtwaryoA^;iLEz#QqeFM<0aW zP~@}VP1;(+&3VR7{3nCW_;kY7m$r`hegy0$+gQ*w4V3=~Dv!6ul5W ziO{FRgV6r~`RUsPTN=h&0TwXFnzW|{%V;}JTYvh0LfbL)3Fw27e?@MKy$ybw>C+E; zC+yo8GX%bmeJS$y$f@yrAKic7IF7A6wkhyk?8WI<3(kuE8AuA&qEBG_Iq)m=M94en z1W<1Y#l( z6M>ir#6%z_0x=PYi9k#QVj>U|ftU!yL?9*t|8)dHLaV=a2x->fHP@{2FaEE4#k9sm zASMDa5r~OEOax*g@ZT{4wQE#q=)AK6ezhWCi@&_}Kl!g|;~F*e_~$?Tr)x)lDUkn? zgJ6Gwk^d&X{=fhFKi57E4g7cN|CTrduXLc{_0T|XCR<8n-S1OGE6{RlXyBnwGw$=5 zq9=Y(KvrPCGB-j4nQ*|hsDA#+NH(Bf(bb`W(fx{k^sj#Y4ryL+-nlX^;=ldpD@Q9Q zk`DcJE0burKQ!<>a2%hP_iCtrRv1w0Qks98XEF5TjNfxdXkc)>hJij`{lt~fz_03t z1g1bA`D18cnbIMFEs-6k;l=+vPFV)tlPe@}WaPib@pl>L^XwVu*N*5P`*mEh(%=75 z5E17x#a~G0zqV2QTH*i13P|v}{a^ow-&N)RHvm0j#g6^DL!7Ao$%!bxp2L4xU;p1& zuOp4$zlII?v(RfZz0RY4u<_|Ar}Mf zk^Mbb|K`Z&`}_Ej_zXk$Z!i3v)}F}a;PSxVoAvLd7J!Si`*+gC;qK`EZHIr`a036t zw!G?n zd%ON_cqyK$rb4S?^*c! z_5N*w&olqb-@TwKzOr-u3Uol8g6casPvJbbGmgJ6?!Gb?oNqn&YuLX@>4a<^>|qS& zX`Z@X$B~!9=9+&?wG#IC_KmlH@8Iut{}$EOFv>3o{W$g$@MhS*{dkC67j_)Suzx%H zSAfgGf55A;`}a&QkliEypc~WYurc!8T^HvRifj(}x3IgwEa2au8kbi1+(9;vjoBNp zYvB6(_eEQgtAae}{x1J4Y(>!*1J|GfdKT>Fv-5}pdNQ7WcV#WgjNTm{kIx0*-1>o^ zklmyDJJ+eeJyZ#-MZQOyf7|5RGy=V``}h3jwPP#+{_l&p7vBK0fP2gJ&x~ySNP_HK zi(q%I*?`v*_ndzd>Rxc3#?-&H%ZKh>>I#zJSBGc$H(bbH7+$Tx0+K&$yc_{yoZhxD`AV7zgKTeU1;@Kh85ZZE0a^M{^K8&xFY7X!CDE zjla+Bj2r;J!tQ!)Lbjgd=Q-}bG&H*|2hsg3oP<0WHczc3S@Ee08%NiFBNzi}g81~c z&id}w1#>tlJcB+3VgKHvKkVN@de%61_seqpoL?Q-Yro^Wm;BoR|K=buatdVqUDt*1 z9PHm9`*&={J`64l`!^E9ko}u<|K6cAav|^x`x~BrUivLXD-^{!T8>@O?19E!U*jR@=U#u%b@H1|%&uDDt!3AvYHTT13 z_})b~Pn*Eb!TBpYN7vq*whsRQTbuOhh&~j(0C0M5_6IFHSFtVeh6TtYvDY+Z{5oP)K_TvoOwxi9>iI^$=4Su3qi z*81|ayC-(zGaOrS^sUG{u&o2`g?-q~gD$XtKjPRGfn)gY#?$@271_4**wetC!@hq5 z##1-Xt@Dom3wmE*-n%c|Q`T?ueKYXybK+w&o`uofOWVL&^og+RewcB57w2i+@o$f< zH|F$Mcq9l%zK@@4uoKyR+887P^Fak{D`4NxyVQ^PmBHq^dPZdhR~fehZChaTbPsS3 zSp#|k&tTtSHrR=5?XCg4_k15?>z*)2T%#tyd)5#*A3mwzfnYaqpBi)D-}msnjLQRL z?@HEV|IX07`5s^Qg1#M*J=c8Rc-ZkfVE69k{;vd&0{Xh1!Ppi=`3}V9Ts*VQOKZp@ zWY@1TZPsM#m;ZYmy}=0DCZI0|?(b~a+&7Nr-yE4om5{BeInbTI^~~6R5Y^Xx+>JK( zp!?tT@cuUq_Ri6PF};IzLk>Z{4~Nt4TA9a#fM;zu?7d6-$LLp)T@!Q2I@uX4!ESDD zMRtA5qr30TKkqcwi4x#B_A0Prnj6*-_eV!?1-PERL-hJvcgzW6YK)9=Z}=$P1?|9oe47I2YJU5-{I0ci zXa*P$CSa3Y^Y4(Ir#bQ&V-;jfYmqhE8f%{G;~rT{dnlX|-8SzI=0pbU?zs`zE5h4= z-#zYY-*Xl4JINYoE*TH+3O>VH>X|kd_W5xc!@TH&J`k9X!+|;D*q&?NH~c%k?da|W z>uC{S{ci-%V+`Nv9PD~^MV<_spY^e=135uaboY?+tPI>o4Pf{6PqYUECnwzXWhUn?I{)_ub43pJUF{0g=F(3~td@7`+QP zh~64KDRM0ME_yg)?L{_!t^dZ~yz%|Ff{XaL2h4N#eoNZjXZ~I5LJ$tL55VR=w6!`ZXGel0zqf|C&HIt^TYM? zXA|*&-#E^t4{(mwiWk_8UlC+;`e%H+V+JFe7k{H?L^mJ(&b9Wt<~~;r0LvhA#(by* z{JUxQWH_?}2V@Y7$@lL#gcH`%I8&B79I{Y*3{>^Y9^uNJ3$P2LhF1}wsY}w${ z*j@9PAQ9ua*E3#BKIYp5*mX32R{_^L9(o_zy!U#ajs(uZSRX@nA3I0wr(yfQ ztuJ=-QnnUnN4|#LdRGn@r(D=P!>p73tYt3k#%dR`xiNsY#jtB_Z7YUsdo%n7!mf#P z9|C-5|2H;V^AKd$_*)Rf7{=OtuogB4jGuXE&e&F-Hgm7NK8#Tsz7Icy>ti=>tr>o2 z+@P=HUWPNm?wz=_+2440e(pi`d&0eA3`c>EU?pwGfqTQ;x=r8T@XLd2-ey6+4tw`- zZhrTeH;Z8NrVO$**1c>Ds=%wkP;9=xd#4bnjGhaG<6~|cEA!r%_GT>OW1X1?%op3; zkDk+$>E{@Jzn`Q3>zJUMGuBz}N5R1He7BSM=ZD=3uEkUIskHm8;Q!{FchGvsetW(h zHTFgHAF!K8#>{nW4y@Dtu)9XCv){#?VB_f?GiR+&-V+AHGlAz@bUqnZ{c191BG@~5 zaoYX%)psa*UHT@V&3CmfHp4awoAdUWJCNT3o}s?m$G{l-zu#dl`mV;yzM;UJoeZaE z9N*6x<(g%N*TBA;x#ROc!q>UFzo#I#fL%N5qxXK>Dg*Cs1wc4<*Dnt~KErclJnWfo zE?ZyRM~~6%TakA2#D4Dg)Y$wvjy}Gx`PCl23mcnM$ktcqU~ZPCUl_XU@BX-h{tA0t zByocTfc{9*n0)LP2{=2v8)e{^#QWElne&_qVm+)`cT?g;r)&%#GYvuYm-v`+J`GfNuirz#Z z&jR(4--Tz>J{{R_oxb2Iw(`K*KN!3BOY`1b+KRmlx@+wjBa910{iHh;`2z;5o5y@$B&Y2m8Cd#QPDo%Iag2CqZkMw|6190Xw>gUz2Q`Ew}G<2cwn+dlzk z(cKHi+h;b$CclrpF+2qQ0R4=&cj6kbd!!-k{QTcR=|X!wbl1#~xXy!r zLGKB>NBe^A=*G@{)fL%s*8=zB6!bQJJIVfwz=Zo za;{xr`+3g$o;yJFT&-os%saUIus?90+vg19G^Nehjz+h=5wOn9g{^6Rr`$&W23R-N zqd$gw(8sg7mj1vswRRK*NAZcaTSMKK#@2dv2UMo7=j$Zcy&M-fU)RgKmTM7Cdto>m za1D)}Ipn$FTpe>R{k^l#M|Pg(uXUsph(tFp_Tb}t`90?TxQ*TQwifpX=B{^p_onYY z0ok=N-g97c*ZQH&Ievhi7H$fx0reQO73_DDe!D;}Y|ZFj7TJAe?i`}seKZk00q{Oz zjI1}^@!bOamNB=#hrd8K52^tB&W3%i^Ysi5r``MeUi6yCalkF?o~iC@=W6ZnTr_U} z%(W-zhwgKXso#p$)k^vzt9#$|y@G-9cl^xQmodgr_Na>@LoRz3(de4ci9QHAX_^ZfW@?@fvvB8Uvvf56ZdEj z>>Yq>G6%gVcHi4PEC8HuAJ}}(j~qEDYIsE&;uw+Rf|O=&q}^Y7x5c?Y>Hi zJOQ>AETPSQ#?kyWmwdM0hrWw_Hqy2o9tW3%z5f}%-01yb*SiHU|6KoXv0KyK7nMLx z?AEVI!0&;r*t{S9iJvj(jotmw3Oqsg?Dl=!OC?}yKyA1Uy7^(fy@{moPs)k$@mtRQ;=QgNauMvl#{k%T+6&C-`n0=`T>BmB!24Eo4RCMz?BB83 z&)7N7?QjU}e3AoyW)OkC8R)+eG+|8Z!(3#M7I>eS0+!IndTG3^m8pR9w5H}pUQa*o zHD`gbH5a`Hm`^i+cTwl!9_&oDN-Mw-GKVw%1b`QJG=D|)l9Dj4j zyRCWrCHRLnbE_9HFWrlN$9M*eK|Th1z6a9g8E+tYN7@ z8{`Eb39`B8S#NH-evTK3-VwPW$WDL16}lq-N}F}n`MGx&f`T9rn5QdYYn|_6e63q= zz$4%9TS?FPc`A>U;KzMhZn6Kkw%?HsH{ z=0pPIMA*z>*U;xUKkp`4hx&PU)z|%O{TYd^H&_ii@eJc(EcejX z3*ET6AEWOW+i0&uTNbc1s$Y4yH1OH(n`QX;9OpO>I8XOYByAI5_vSR%J>WcMf}z-a zp0&aGxK}d6=A!rF=Coafty#XCYv*^D>*Y7mUbsIfgX~<)?fLX+C1b0K&GW&SS}R=3 z+n@w09XOsci-b%4f!d)=4k{r z@2+#O`R$Mbc_DIqWb23a(byi~+X>Ez?p@RMvCUex9(zmJJ#F2+jvkBto9jR|1|n3t-o#G(P5dLgYoT@pK+}(f_8+_wEZ% zgsnyX{LAz1Ezk!u6HZAE_x2eFbAx?j-zZ|=76mSnXtRR9NXAM@1aq&6#%ZU zd1ijD$FC{u&qQ1c=V_lr;0m&L;Rt-pW9zIx7t-Db`E%IX_mt-)KyHD}`&>r&2==Vl zUGETd>#^(OJJkU8cYnqO*63yE)+6`A5ZGGfw`Xb44Bd72{?r3Lfsb{nCTuR5rys!P zyfrHbAJ4r`*fYYxuyapHdmwF_kzF&#iUXg=Co{6=dTUSwoAcZR)?s)5d6w@3=5>4Q z3BY06{JyA)d>QUd+aIv!nRVUu{t(%E;vTAlJ_&ecZv_7UV_q0`-5b-_I_&y+UK9kU z(YFHYrZv?u7SnD{kAPhZb8QzeH@wfK!@ddlGa74UeC#9P=d?Lj)GYletUF=%>i>J1a^(D!uR3Wv_*ivz?^(SfA^?kx&I~s zzty*ZCCJu0e@^B&D}iU5cP{Ue&QsleF&O-g&F|L%uy=W99x!51j99VEvQLftkRZwGKMY0{Yhj ze%shj4u_j!D-CBw4?{LjjmvxRPV@=%kAR(LM__!i(bfXlb@Q$l3j53k$kzM%$o=4@ z^mXpWsVeA({wjCl#;2mlzdIju$JBFZ}^E;zpP4z4+1mA@H8G`fiJ(EP) z{C-=A-Mx_)>;V33!MbB?LxAg<0hBh2E z_uFoKtz{#yw?sdIYz=jNjFPRQ;D>%9B?6zwCCt=ZN}>#8~7y9DEVns(>vIrEbC zIp9z1@4?04;j}sLvgqc!@hc3k0z0q`px*>++Vypv--O+F>Yly6lk+z|?hWsa2f+Kl zIQYD;ktYJ@vK)AaOb&m7e=YbZ?D}K}=8WfDKji7)AhPxLH`x4H2Fz>o%=r|G@_mfm z`-5YIfe6}+rR(JWah*LEtgmB%d1-E0yNqQc+Q%dNK68*u!pVU9dky+x+HS$#8O%Gs zy$kZ}V07>C=DTzDolfKD8QKLO?=8+X5w`f~lhJdbo45I3>&-S~=a5=I;CxTuHw=CS z?}UrtWBxl&?-xCQx#Z6u!og&8@14sT=M&)fL|fP~B0zvP+Kp!q^h?Nnv5iG8gRLC0 zIcwfc#^(2=F}i{7+#=DfV}a=T;lt?0G&*N|ck{tHS;zlI_G~p~1>s`g46=7DzopIN zIq+WCHE{inWg2`IVJ}91&)0<5%y0Ld>+k$C!;{d>A#0ZFnFqV??EPUr?48OnT|dY5 z468|-^YFe{1G_cOb=n7ew{^^{;5>fjkF~!na8Hio9Pb5eJ}djoyXe+nzkU7A z9)#{*c>~!!uo5&xx27A@%E+aF@8mj6L(UHy`)~1a4gIFhhOcccKx*_9=(6?0oEnL* z>tc;{jonY)70eaa))@433}DQiZxv*pof7u>x!?fwUC8cz=eZ2H|D(S>U03U4AAGW4 z|BF5uVQY>tvhFp3&9O7c{v4w&?b~4YertFRyEi;4uH-7d)VA-#<~cy`=~$d>Lajcf`hQPg-gM+Ve6u`uRI(d+e`dP zBby7(&vRuH_6_i1I47{)l?K818UyEP`)Am^Q@Xwhu$7_RwR-?O@7;%$a}##bYpFQ$?pN<|>h7bp=1>|M%uTCc62qrg7or`Vst-uZo(LfYZAuxnzSF!ztb`mIJb zN3Vj($nGE4uMm0}*zd#bu=#M4XCDIA0rRv6>~rS;za7+&HJw@FR=1ofY z2mI^8-k-eRB*)$jxF?LO&shSDk#luiYqGf-40nZh!G6bE6ZRwfT=$aeXH2bc(RE`S ze&)$h*jT%kabVxOFY+ST_VTow&*radU=GKFHvsE|`#2{s<`KaC@BPm)2h-QH{3aNr zKXWv9ZlO1Y`-6PIIl5o`_I(Mv7e9urEADA)Sz6>qa476~*cjRGSof#zZv0QcE8qv% z7Q(JycTgL+KNi#XGvN5W(vJ}n+S|IO%{my7aKlhq-I2_qMbOPBu=Uv6;n!g`nn*zJ`Pm%q(RA1yA_}yom z5M+O*VO=tA4S_Yn{oy^w{k{pk0PI>l0luem>j%t-irB0H#^0LbT0dsYWUzZA6(|f6 zp}SVb)pafjlHun%y5`2t^_c{_M@qofj-1$R8w%Uknzj+{LSN5{8Nlbs`GG%svHrM^ zou~0L7b0QTGB^Fh@wtud3Gn?~fA^WWozED{f%A0DtcgwWcRcR^Ibq*%4YJR_37o5U z#g5p0$JEGGV4q>EjIlM&yzh$cT%D8kz z@x4>j1MVH;xCppkio?E}waMqWzjneq;rXy@=~)^?yY)riUzkf0*x#pipH~Ogzt`&x zj|Z+(QP}k}=WRC@erI;3-+tI{A>Y-#=)BZN!qygb_hvP?6Od;E&v5S!`kKo{fMa(A zK3Bh_JlkA!ZKk8QMK1=s*441hM>i(j^#jhk8HkVUyMF;3v}Hg(1l()h=bX2(2?Ew9 z&pE#{%OZRCuYqhX{eiE0AUegi2Dd{u?~GMVY@^@>z_FJA zbIvtz?T6rJ%&jHP(L0uY-sLNR)4)1n-X#F8!DDPE@b}(gZoGr6tswFS;68J2{eaCg z(EF4z@GM;pd&l$+WL~TRYk}w3LG12>!L&`r76e;=^8ojc=d$OM?=}V9dT$IT;b%PK z!sbLUvU_MJvbp0q>7I36`okmWzud8qeK+6XFMRUA)|K~Zvj%v#w{{wmcO#O`_f>*t)DoBPMT?HO4RpER(sx7JqyzFz~_xU>b@+ER<#&aI- z4aawm&S5?9d&jn^`T=um8F2lS&11)j#811qVIBm-L*UJz5hz684vf1AyK_-afh{%i zeHq>JJRIG%9tw7%8xPOmp7e7KGlJicJJFsP+3%`&$Pviqe>2z`)CBts*xF)#8Ef;q zJbVdzX=HU{V_O|;?h)@`#<~kw4LX8z^mm@-*cjLv?YK>}0nb=}j$`gxmwnfo$Z;6c zwKXQLr_Wo5>>RDJJAiq32R2T(@NEuTyZt$~a|@=;b>9fg8{f(Ivz{0O>z{4rsB7mQ zYL0BaSqohsb2}?M9{XYB5MXjE7V7>3hzTV&#d!bq3`CI z-j#fh#jt0B=cMo056nb9M4NZ*=znXp9$0s*&&JO8{0Z4Py0@~>RuA;0kN0rjVJvbP zWaGITALE@J-FwPb*!>X!yDtZ0OCRMo5Z$#jzWQ6o4*=J?9%DK`^Q;~1{`}N`A93F1 z)=k>1KdXV?43n@MW8>-BH)G?sjP>;{em={%xW>(Zb@~{3FW7q32-&l(8GV)`2g1IO=SVoRb=y7S z8RFV`wyXrMOB>+b%Y70LZiw#ltr0#qG3?L#tY_YjD`4~a4;iZxy62#0rDvw~!0#CM zvF{iF{Q1ohY+s`qw~ff=l6R|7QT>V|w*j7;+wqx>-P+{7a?C&J>zU+pjhp+~xtIf< zYyRBOTuKVpK=)j8Uwc>ZU1uW)!-4orM|Qr$U~8`Pyor6MV=-nz^yhFGZGHpX$L{`e zk9ux7k4~`jGas!nZGm$b6*VVgJU*)3{cKG$7Yl*!@NwU`-~Ikx3e4Ra=(*s6zYl7e~IrIG&}m;cDnzZG-c}ws}ry?*z}LPa$NV>s&n}7oq!X^XFat zkjvm}oiJa0hnMJ!;XmL7zVHa5Bbm z&c@k$u=~wi@{Ak-%vWXW>k--y+J;;Zo&kIIdM|Wc$KdBTuX$^}Ck4yELu>{c@oDp7yE5SkARNHh;F{ z^NRs<#CH3*|MWAqj#Cs^mos78hU`A`o)ZS#kH$BG_VmbukhdX^hONu4spDF=uL0-p znPm-if4R5Jt?l?(E1ZY1_uOzT+&lhzk+t1AY;2uhR^-#jTao910>Je#cU=EWpb&7L zzQ+{>#yxW|hpYLvbT^s8}JWv+I0-gs=-~@01 zeDrf|Q=wM^K3l(4jA5+}LH9kjgXAD5ZLY^)e63+m@bO;mw?$prFVNFXXXhYvYnEq<=i&kE=59;m2--cf+!uZ;z5;%e zxSy=OC6KK#)(YdB54kpW&rSEY``}OHez5vD;9PUs4jU8S$^3MU!f9^}$AZsbcaOO? z=ACE$1MKOLUBk4%y|e_bCunmG&0X`zT<|Qd!Z>4*t$9BpJD&48fF2H8V~y8f;2mi- z>^;gmjxloHYk;-Sy3q@?MlVfUCeSv@?)}&C+@t15E#R71BdqB|U~5Y-aK6sTeD=O; z?Q$Lguz)^fE!lm$O53KpFyLHgGTC>dW<-l(} z-+wnY_frINWsnPWqRsWTo_cn9p836MzPR4bH9MFIl4CCc4+pjn12e!5`d*~1Cx{Ok zW3wh2;}yW#V%_Nh%n$4BO6K7mHwan(H|h5heG|ML-8q>f-c|K~AA42$n9JVlJ;Oc# z_kn$_>(P5<0DWBJh2RCUd&(MUEenOU5FSnPX^o z{mj8{kPE?ydG1ztFQ^K9=Sa{SdwtkC=lsl3Yg-58P}sOycf#QF*z9YbPXv#Ewde$# zAD?Wrd-pVdC&L?Pv)=bdx2}%^wvC7H;P3vi9$VvFqdmYHX%3iI-V2T*r^04^x(Kq- z?{&?f&Ds_KoA+gbW2C^x-yiUf@Emjm&e5~SpC4sIwjLC~&%2BFD(@@C%KhRud4njM z>u{Mi&roySv!WHelXhcneRX}@3(nITy$9WQbL;0F0@1)A)N1n0wX$ z?*-0d7qZWM6WR5h4y=dXY22&U!+LOg`WAy-lU2yph7G{lGznV@o{eA@wkNjBq+uhZLqnAjY}Bt+_hF%liW|{@h#Z(b&L_n&fC4{zj3)2 ztzkjHJeb1xevki3zs#`rL*Lo9aNwOyE&;pm{aKfFvI4g5^!59-4stL0E&fvwjOzg zc<*!n8`tlU=OMdi!;qb4E#$SJH1Pa&zP@i$WZQND$F-hKgxkT!D*)NrYut>B_qXV^ zbNy0*a^M`UN5KU8IF5cxK{Iq?wuLdgCwk_7jbDFk-ph)k8yE8@y*C;x!M+-vfh`g?4?L5so33?TY}R1+xpyF+JqN7EZp;QDyT+b1 z%GH4R=XZ~})g256MbS&bo}Zpo8G+}OIo1c+9JQXg--D2wBYTz^7tcV)Gv3y&G{E?I zmbn)8zfF5j+THiAm22#}F30BnGwz=tztSHz9-b%eclU(f2hMRWy7BjCcD|Qqq<6kG z@Cfwn$UfUU%u@OcL3WONfP2TBY^II260mi`ag1Sgzn)5-2N5&SIB8;GmlrIx5Mro#8_D?Je$IS&v6aS8EcZ~dJ$m$ z_;Ysa%uZzQsm+0UV-N>a0LIF5a}Kh7jGH-PJZE5k1NjTs^K%%oIq#ii0Br5@{5EGD z!!4jv7{@U^DA4bP{k zA~w(UqUh%R@7O$({MmCDdVK8Ww{cGbW@2}ao;iQv=bF22{%pdv^-LKBGNF4u`u+1a zK8s-cw?y{6w$o=JatGUye+F5QtxFZKyTAN-);x4$XI}Jz&4m!)*;og54O|!BV=L@D zlOj78Yo2FbBx4)9DIg>M+J6UQ(fv7>_0coavAxgb1=+EiZ;@~f`s_w_uF3H;kJ4as ze|q1khL87K&kOg0^VtE+Y2)F!ugy4HSFF>yVAsK1a?jKR=KN#qU6BU?^V(SWeUu6w z1$##`Ca#@zBRlMxdv>RX!)?b_4fgD}9<0&_Hm2rWKM)Aq6Rz>Mw3`pc)!cOdSsPq0 zbFC0C|9z(qY1cLaHpcc#fv?{U&h;*6LYwPQ9h^ZP4j-XCADj&RBt8egNN^e58er~O zSO0*=;_JQAnrhswf&P2E>zA8$-!+If>&j&8=0<*G_q+STv&lVk1|MtMb8H*%tD-OZ zQrNZ8&-`$lC7=PwO`jrg2tKLlXRY&{Pow9+-VWLKI0{=w&4c>ry^vkEw!l28gFb|| z?7*>0fV9}mfroG#;2LiNb3uAg36uq4_?QRog?7m9Kj&acg zw&BR$t<1ZFaBJ+w%KM7vZ5-I2dszSE8raN3pLrO4GJVd2mdG8k`A#Y6WA4Sn{s^?C z?G(HS+ico>=f~*&45t`y4esM(d>Vo}==adS#=jb}&s+dJd(8#!Z;NSv0e3++4^qMx z;8gHO*qeb*fw>rfeH!!=x^pN8&SUFK+Z50cbjG$F-hp3hcotX#T&s%M+$YBF4x9n@ z-OmBn;2L}ZpAC#*ti6krMRw0PU-z$jU;>zk-T7_=>(E!D-v@t!K(G?Kd*T|h=Wcm? zTETvQ8J`l^%OL-StrBv7hp(zBeXm2 zc6c6obKsfh9*hT8qI(~(K5oU&`CdgfH>_DR@T~|>1(~rmLN~Tek=3nt$M81}uIZof zV0`u>yN}l+`wq(PlXxH!x*#A3<2(;WUzVe-ZD48!?qZ<=I&?w z3h+Q|YvFpZxnvytPBU)`!;4_&>UVi4_#GeHax$hlSqgmt@8c z!$CIMp1>op6{oEjY;NQNZ=rulo4H}m3uEB3tmBRw7uM$r_SnelkSl}r=nH9YjvO1d z&KlD)=%bJ?(Ow<>E=YxL9-9O2&|VstueE{kb#L}}4D9cMx6tc?smLE;p9|bGgMl?t zf5++qB7paicfoL;u?1bYL=5qRDi)A?|2;F_7w&ee7NhcWV@8zcAS4s`vt z0@tkwHup>(*t@TH%S`CL-#Gf&zYVf^;+Wpu+@Dox8-uL~ay9s8xDv3oxv$#cn-u#o z^pxnv-n!wlTm#?HyJ&Ci=mBt6^qIhQ9fZxidlO7Tb}y6$-s{=|>)SP0-|Ch)& zGY@$lazEIZCIZgMJTjK9sr4}tvTK$L*|VhtvTN$|tlQ34-{iCnhY!GiWB(0*zcsG_ z_g!IZMQFEPb_bilLwrZW;j~B6?%H_w+YHL0TblxD_ipzUHqjZ`JluobJRJuw14rqb zh4ww5FZMLB@8vu__hQ4vvDXK_gJ;ZAun&DMdRly*qkGTWiSBvs+`d3}d~3ux+CATT zV+(>0gP-XCF?t$s65aYL%+X(9^LGs>j9eX`z;t0KzA)Y2ku}W56lDa&$K3cM$b^j#~d_=>}yWX0@FZ4;FwAA=?u@ma8XRIyt&#OL_L10n0PlZWVRK+4+z6ky;OM$B z8T)$B5u1DIDe_{*vOby*u9@fZD%jd!{+m~qKwNMZSa)9F@7fs??+`h#*94wlgBWil z@?7LDu=DJNELMW7AUjA)o9BadsyVv%vl{SdV6E*5-lVT*kh<~Q1?N1PBGqIcnfe`*Zev@#>qPAIyuHY{62+?fk1uXO~7@Ui0@9|Ir|mxjDG|h)5f&d zKsTSp0_T|$o9|T*yK`&_JY$W6-yzPeHSn(d65nme?rF!02h61;*iO>!J!d*-iX4l+ z#@PGXJ!J1n#>n?~f3CsDGxQa5e&AY_L-&qoj(HccPQHcR+UUGCp!Y+6grBu<0ygj7 z3t-p(1LS(JeP7`72Iz*LIp+TK-sjp{r{-Zd2IjIqI~azq^R=dV4vxj*m^8pNxU;{s(9Y%&|hWpT%yh zoO2uW?yzUUXnd@9?(h1@`Dk#x?3w+n)peV7 zy=pxx#xu?R1+?k!yj;7}=;rzo{H((lkv)&xd)6IeU>$YNpTnLd=isu~Ga);-qM!`$ z9R~n?Qo`OrlEDkX$MmlQTN|8nC~dC2F>eA-gPMuCIRq$+8e;FfNSJlUItuqzvJS-^YQVW{RS$A?plpOcMk3?bKHI4`)-0e zGqiE{d&1c72FsDnd+T;$`nul}!mB}5+Rf80$UlOm=;P5ni;khYk6OaF=wn~U^Es|} zW3WRR-!`=S-RoXA&#iy1MK|p3jni-~^yI)?ssUzUbMJYM%w%k9`)cF^AROJ;7`HO` zH3YG+8PjUCyRThW=hq*!#dZ@oH)G+x-a?<{=w*{N|Lib&-!ZXpWHG|R3L(hRv03;}*Gl*HBuIXUbd@ge**Y<-^wenfAEZfw)QuDRp& zfnB%Uu(fUo@V@Rm-^NxN-S^Ll?)$_=?*Us&oI_FU0m!c5Ku`)>eqh~m{akC$9p85$ zcH{7jF5B>p3vYnEmzbZvS7UG=%s^jEn|BcF$qRg{B3tJQqi2Q9rw_6FUVqYO5JvfH1HLct;wKPi*8xaC+GD*qHVQ@j)4M z^J6k>ol1+}5XN#Jm=o8Ltqs=gCg{Gq&zOe39NF*Vf%r7VRts2*GDg|gVRxQQf%)gY z^t^UHyO6Da&cpb+_sm&i=bp<9oL?p2ntSH(|6c#ziT{RG5xAG!bB^JDF#d~S#~K7q z^IXr#w!oj2x%Pg;`+jGU--WGN4S{Ec=b-QXK6Z1=p9_5kufk@odpGwT`+;S^x%zDP zpfPU=65{VYt_ZSwcPe-bB*8WTo8M)%@Q*|`rZ0itoSr}4HCn*s;K}eI_%`rc(>sCr zV@$0Np5=ZUSrc5pxxjTbrkk;M2JV@5_&&nsIbvVe*tt&!-UZ$J?h)f?UYVbc?|V3= z``@|hlLM><&eQj6h>y?ryV{HAtaUHEy&ZihQ?;q}w zlW-k$YxhXx46wQU1T?}vl0GefwXZ2{)^p?MyPGeCfM>LI+gMq1tSgn_%)q$bMfcxj z^r;FLhA+c)VCC(=y_lA9B4G2zJDcke4vcGYI0()Fe4oC+I%{nj!*fQ#<!bNCwBc#i_B@C}BoC)2S{ z!pEHP+V?TOV9&_qg}n?7%amDljjX0>4$wSNFVk+hh3qe(iv3aUa`3I3v1u zisrC6Zas9bB&N+6tfOrkx^Z$XKgRA}KME4i?wS}=-(d^w-f^vW*7aT3ovZgV=V)HH z2IlAlY(qggHM_#X0<}C8zKih`j_bXN-MS+KgcU?bhS$QNGrT zi)8W* zSL4?o-S_d1YfUKx8@B<#=ey^|fz!yI2XDd~(F@ae3~X$Iv3J1kd6)!w9kyk_yY?X1 z*wzOlX`c+7Ydqk*Jp(;Iw5y{Y&f*gX~^VOPngK>WYf1>-HY0SSto{jG`VBNb1I%2yO}l4yTVR~b4dbzoHsA3S?Z$j62nC{ zU-e;}j_A(EJ?*)=15O6UAv?}4?m6#zyI1mK?*puJpVIF7Kf(Sxe!s!iFUJlC&Lus%YnlQWyOs3W1J6S@ zR?go%cnMthlGvT=FR7 z*_>_$J7;5DKgz!fHglvBw!g9UfXxfnuMPH$$kwUBjPnZJXLd(^kM;-faCE;5@}WD| z*vJ8}dF5H+KC6XoCVEQbhVUfx(#YMBlOww?t29H zJhJcZ`Qvkz<6}&nXE3^Z%NTiA_L*xyV=xuJFl?Ut=I2NFI%i|z8f2x-T-lA?J(3xD zEjVmH+O3O@(iDT{SD*? z*eWB}!Dh@oPmG;+7;Dch+TEMJo9p!*vfn0aY4=Qf3ao9`KJTW+=n=X#$T`f$<~+h_ zcbo#)G68dA7k2A|@wOhiehq+gc}_p$7!0gw5wLq@4DkEL^T?XC7-R>j(S4>lY+apz zZ0rVt5D*{RAlN)hMSpY9v)lDePanV8PQ&Jh`(`ZdKcIgBr$rx*d=!7rc-P!@^1I;< zwp3B=Pq3MrzLV$fP~d&1DA)m7f}GeA(QaP&Y-_Z&^F81m?us4;TwCX9Zkp?^Q7F1~ zu@`zdWNWbTHs^lDRt9zs)+OJ=+&+(w?`^&mN4EA^vt5gy(cb~X=;Jtjfq51e+{bQx zHQ$#aKLg*QyRV(&H1vhQ{IzadUp@hEg4Ec&Ykq}X6P^mp`(yAdum{;1=pGu!827;d zY~CrX$DS8SVExW9#y{xS;S%W1vljifz}^X#AQwmWS+0@ywtwJtz;(C3_qQs*GsE@i zi7gmDNdI`S-(Rbc$064MmFU|By&JMM*gP_(PmvR1Hr(cslxB`t5>t=VH?^T4>A#Lt}cT<~6y6Sk%_L!N=p2H;vX z#I_k*Ms&}gm&nG-{O%2`|NXH)1ADN$Ki%8Dqjk&N^E|QE^~AOS-8nk0cYODacX91$ zkQX9*kA8%Y^E8i6Vc(A3yf$j;OIcW&AS0%PSF@4U3v1amS?iDKv*fpbd+%7S~ed3N}0<7o|aO)8=1xjnFUZp2m^Y(;j>e*yN*!g#x3*TA^sre96krXvpp-M~cjhCn~#Z;ZOZ z#>;if20ZV)kIh9kkIgOXLN2fjyXRPZ+N^aSqFbL50QXe}>_0dLZLWDO*g7{5ws!W% zrwRH)U@RTy5whoG9`tnRu6Y4)5&3=C*jdl5+rC>U@chXG+|Ps2eUB}``e`56{B3Xp zy*Rpi$U0jCc{KLnz;rECp0mD_x#!-UiO)m0JZ;6X zSrcXg^YI||TELjPhh2C7ePj^wN_?DWL1fPy=iU?K!)_jUZg?hoo~#7}fa_}>`p(sn z&oP#D;|OqWzryL@R=|5oa{Rmx`u^Tw=h3GoaIGi6)3CWGL|S~V0H0-E_5wL*&jMSQ z*1_(F^}sxe06x?8-h-SRU)SB(eu2&PvkqJL7lC(Z^GwVO3TwlsGVDIz4Yz`q|Bv?W z`~U0l{r|X-RMJKgkrbt&w9qEeE~%7KHcf?=mV~sFvYNECWMq#@_TD>^nH98gHI6ppKp{fG6%i+`CiU*O#dpHy_nx?SuYvqF&OQ{a`s)xve$C`oDn~7W#mBh0~H?e$= zRbv@Pm~F+1vdmfLBKPt(|7)?VfvnGGw4=jl&gs&0Hk&M#_0o~0X8wOQ2jLIoXV2U~ zt47&S@%8eC&}G^=J9F>ZPr2TM;>~ED!xomkl{G$IJZE-(L*<>C_ds`+@2->SwKQkR zb^JZ~nV%Lk+$pRf>&fP^ysPTV%^5J1WesFs^s6PrICU<~Yx@ z6wUKr#B#nSFXQb*bMJXBxxeg*_58eNRF3%#6{WSOURH2+NG zD)|}jyV_;=N9ildUeVr@ovttA&)IelzbQXw_c-k?H1CE zvS;NEr8(Qm&^+(Fn{(zb<>xowt=gMe&hXr4KbCLy%x|ujInI3kim{kwE$7{r`~Fh< zTzV24tUlj#nX{amnaiW~S7w>xoF5hR6=AtX&c5X2_gH=hJjXJ|{0;8~HkiMOW(*n6 zB6%BWo>P9~Zs+Iyp1(73ft`D-MenAW z(|-E$KG=+#d(OF#IohFJRe#Qb26FP9nDu-Z%i5nHmh&rTZpPT1=36-P`Y3B6mUmzF zXx^h)kKr#B%UG|WBmQI7&_0FbzRqJ=*L_*uC-c~Ka&zxFM>3v=)a5;tdCdEEj&={4 zGorcJuk=n@OS~b?IPc)~!SbDQy7;}~{b_QZ5zF^Q z_I%EsHe%U_z2p?-=h@|1-b8a29>w0#?!#V}(^Nd~&+07qdnC=ZcIhjvowF+3ytlJf zbM4*yoO^%MoEcd+53$VcWH~v*x3N6itcOqe*)#F8cJepW?EeO=yI3hU<-eRf!~E@I zfV`aZIeW4mv!)lS%Q`K@UXwFMJKs!MD;Z~fvCMhqCUtqg#G3K57V?}vlb>^^Gh5Bp zu?Z~iyga|05i`|%#uo5%uI@p9#?Q4!vJUJ4;e69>WgoM>^j{}lT70nf6gEP887)lP zvzBry=)a7gF+R`F`uIzHCGEt{W0{-cVi{W)Irq!auIyla`7KHJ9cZc^7BTz9%PpHRn`ImNRAv&Dio==1iJ%K6@!+&Hlmoc?Q|nITNxDO0xVtq!a7L&w9;yneWer{5;ou zpXK}OX_k4qn`WKnesiyx&)dcFKFvKW)DEjH%f05@c}o9U{rMe{dEU$)Ef#*(a?b30 z%gv*Av0OiMp8L;zoU89omizrCaemI8%x~s2`N_@qaNbG#@pCrhJ1*ZhxlW#6cYeMP z>e2=J`m#KiJcFB9_EXkQzAG|6ztPNJ&hw+Svlm;@tHtu1uHt8p|}n{ zdJldVn)N$_<&13`2LB*_&a|B@^V)+=;pbfV-?>QFi&drlXkpjb%Fi5U&5qDMf=>U> zUd7FK&I>6texB8rb2IIniP>}gX+ye_Wsb5otFo;5#;hD0#S6pMZwf#6n)l)b+Aq(Yh?pD7<`6xy_~}y6w9+Zfj^&RkCzauNpEGPswl<^J;ypGn8-%eb4eyq9vHm&whz zGtSIc7n(hqZ;-5;jB|wcKCCm#eP-l35-uFgAo5zE{*p!L+G?-PCbX2^Pf zgLM|q9KS^KJWgUg)J$Vp7n!f@nJ?KeId6z><7fSJ)1PbXp`E|uKE?9eJLgKiA)2$? zYvy>fw99GE%EU6ptt{`utecEK@4%ept;Did4yL)+d;?@3Y~yE+$FQ6oZ?No%)bydv z>CLng&DyCfuMo?g+!PLf0)JnY-)Q+}Zz(T(yQ%(sgO26r?0%acUdETRex2O&Y1a2{ zejS>*nx((0-29Cv?}%;mL^fP}n!K)JIY09IE%!ZM-!AdI=W@0b)6Uw;eygZIdoJ^p z{>&f&hvyv3{u#~k-piSN z2+Q2pV##a8X0ehi>tg(W*D5M6XGorTu9e?IHS}d&=iJOUQs%HOo6Tx#=Q|?LDPzpJ zm-)&Vm#}&)TH(zrVt3svUh=`$YcTEboR( z*zMwZ)>*@Qv+k^$yqvSyJ9(cT&(A$4H_vv#e|i~hpf=xz{rDOGGWl<5XPySL?A1Jn zjQw!_yVyC~GS)ojdUju8T}^>JKyPfFXed;Ww+_ev&j5DL9=%yCBdyj~ znf`B_VmVtD(I>@=(CoS2^$*t0xDKEn(bL4UCg#$e^aD97=?VHzW>>M<`uej;{LJqt ze!hP)--+itDEBp!<-BdFFY7sHcHWhF&bfZRg>v0H^ymDXsP0U9h`!wGf&32iG4ap& zm06xk&Z(RmS*N*n)4R&zbziV$0|vmO0H_mu3s`-eU){ zb=s-TZ|>}otn>EjS86|^o!t>K`8IrtW_{*+D)08Z zucnG+PIGp*5Z^(w-?APvPmAQ`o?n%hwVdzE0j!;P{H&>rsk!{i0PMCTQM`yV!94cK!GA&!zb$f0ln5zpcLfEhpEQ&(HaKkoenT zITN#H_GL%$^F5UFqLBWK!pa??14EnbM-LIdd)M*8a{&N?EZ?MoP5vK)xSYI z>uDCtyk_33iRbLiSZnZe{aicu^niB$&YSuV`47>2pa0As&vFiY!LDODck=htJpYk2 z-$%9i`KBy}H-YAPox!rk7Sp5UB!4^0c{t&}y1x7$_<0sv^yOLIB6kLVEY139E%pq} zUMfjviRZmFkKM<=Pb~W&_c#$JXU>Z>-=v-RdA9Z0YL+#9wwjIl^LLS@+T&ULb6Dmp zXG!+f?Q*8j@N#CB*Ir2n;r<}rk6(t}%g?t(&fbi5vi{;?nai9zSsyurvOcnZ-;!H{ z=1e@F4c6CFe&#IaP;as4_`kC3#az1=`%QZt%h}ype%`e?SF%2`x994+o1bsztihA` zuhTZ#`97Sk-BnKJ*{d z%FlPeWd74~GS-~)8RJ5hJpcLM-a4e% zEjf#_K67vVXe)WycbV7g`E_WXUDkEtTg0-LvlepJRABkGJ5D>6@1XH?s`!PhJWGBz zmTUc`KWEOBtR5Sw|7G=AZ~42~a5`PP0slspvnTg3N-VtWt*nu}6Y{N=Z=5pvSJ3eC zd`t7MrP-@3_;dCDNb|1B-vr*I)y47+nzc4h`%bx=v~ylh=Z|4oPg&37#IKeYPR@ed zTex{%^J&JsnLaGul0KC_d3lHb#b|{#0FAnssmq)->%)X};&L6z|C|sW0=KbzM6-{44e6J$3=hH$cv9SUZd-`BmiQ`L|@* zlleAnLvNwk>(yA+<>B%UqdAZ6)|Yd(hPosfn4wh&AkDM?rqWPxJnU?2ri`<+A8CMDR9zSa{&pP*X zJ3Uc6XLL_C;XmyqmiKw~-7J>9ly&j7+FIf{gYxazO{@+*j%Kar8FiAAHTf<(fj@`7 z!ZHW<={~f^}m^^l_ob}pyrkTGyzwolQbN#HdDr&Mu^G?b5vNnr~ zUq-X9hltH%|FFdJ&60h)j^#JqQEY&`oVlG?e)HU>y+l0RJjb45*+W-}=UmC($ug&T z_vL+&ajxZGP5;r}MGs+_@7!A$S(~TOH)%1oc?Nm!9IAZ-Ki{C4mz;xnudSfD?|kF* z(>Iu3MLgf6H?h1&%jjz>mVf4*?~FXJ?X(5`QeVD7?q|EizG9ukhp|~K-yV5>`P+Qn z16Rn;`{YL2ovvotd!Ol>&7aTD8qD*{xE|Ne{N-JeHI#4MlB_N}Ts+@cb>-x}Tb92; zyiof28^yAhZ=hXh&ZeAcnafl7KjMwo-kY7nuOZ%%|CYYYODmQ&nfF3_{e`u2rrx8S zF|J{`zvO0r{LH_WW^Q`(v%WjAtcN_;q4YZO+*|hcR{kjQqxsppud#`2x^`K86Xg}x z&c4ZaSjLdwF^|%mi`)1)Kl026(YwU+?VR6157S)xBl?)yd^7w^^A5~7vX`=kv&PS% zxqc(MFU$Fy^Jsv+oL!mYgZaz&6Y;)h)A<|e9PzB1?ffEiL;A#WF1KU(1_^fw&A1ZJ zp3GTYPrE$W`2_iy&v zU22|VnU}s|IlD^Gtl47nUf^dwGsYeC1Mzlp#?t&A$(fzAF85MNJZt-NdD)ZuYv&nu zcYGYT}c$vsMd<<=v3;EZ>CPSQ+hC=)Gd88_V*}%bv`hzJ|U|4^W#kA@iQU z^^Ijy`PqLT^XIcAY#3{eH%k0?{n;;P(+X+ptITR>H`dOc^q>E&t?ZHW^yU1?`=&43 zrZ4ZHA>yC2Bh^);JJ>J!>d>sqL-^xq?&VB5g|$cUbDn0b`E8N2Y6$z0pJ#py&HcVa zJJ2?AuVDT7InVQKd(cJtbAG0s{d6PCp6)0&YxqL#T<2n%cV_N=0zdDWaOSa9V#jLN zU_1E-i)EiTm!H~Pdko8cZ`8kx=2{1ejiWiE=kgb@BHFKMKgjYOkas|y=`hw%f8KRj z*O~X!W#6U$I98p%4@=EBeN$*>n(x6|`B{ghwet;}vwP*{ z`DLub`E6M4?{fB)Sl&TdKUq8Z=K|52)n}hC*LN80BDRP^g9r5t9&-2`)`>4r0W?wXulY5%3oqNx|&Ag14lXI!E zzMSbf>)w?6zxTFwZ+=C(jplb%&eXvy<6A*{vF!g7w42esbUdAbli#4t`B^jJ=lPw? z@@;nzoys2KXDvKIGpF_Cc%^c=$2_NXoTFQPu zm1T|AlbiX>H$=vfF*fEGW~H?sqT%NKnmG!)i+09&xB3qBb?v-=2C;nC=6Pn%rKY}| z?5C`O+H@eB$Imr0w?p{3*GcNa&pOOCv(~d-9xP@Ki&~p(PD$&pl)n`gcmC5;_Hz!F z_B`~x#($FEyM*5l=o@L;bM*an<}y2Ndz%7F(|W4s{=B*V29;`#j7-I6u-i z^xwcAjd>u>KA4AzE#V)FQy1?++DctBj62vvIA>#i&2AE}jPt0xRkRIO1FQ=AUgvM* z_rYq1I{^19{{!W`=NGi?GNzJ$K4ZaCw)`@H2xUegRqL= zRu;R1UkmFv%!g=eb-skPCv{YY@XN=7>~uP_b??T+d+IwUD%zsrGL4) zt=h+Ff2{8!%zd;AW9{O5O?H=6C#^>aWBbqh>Gl7h;YSTcdxax=*$1Yk#8e63l(Ii(u{MzbEHL{si1g zSo`5t6bo>!#rc)KuYWdwBIcnuB`{agqr~guHNty> zo~G_Dj3+UQ((^EXWVee~!`UqF9@+uxB&^E%-s3;ZKNqhn?!}lt@PEb_g?W{l!s;)^ z8!fg@|0;D2_}jz>;uXdCjeekS4u2BfVHo@4-9@X3ACJ`->q&aLy3g=dv5h$AJTtypTHNo0Q+o{=(wVG|hxB%-XR$c7^eMjSLk$WHQgw+h| z2>l=MpXZ;CcLwgI@^|uo!5D)(UQH48#c?lXW5w5~Yect;55_Bw@dy1#?Ogt3yu&d{ z;jN|(aZbS5MBA(R9A^#NjBz2>&+KTm3-uj?^R(RiX=kkFSXK0Y$bW%<0bY09f%1Ri z@5UI5J3-C9>i5GP$gUD!tFAHqTzm-Lei(n!kJZlOUxRl9Mrph?v=PpUIGbq)HEVI6 zQhyQ7E_RH%MdCFuo{_hKcEM?ZbEN){_%HImPWX5NkTXtf zow_FUEU_=xP|OlIf6=XKZ{|xnx=%`g1lamM3bt*$smNjVe5)~h>__7VGvU4~f_=O6m1+FSY8VGfaB z6{j5L-Sh;kW>`*VD<_i8T@T8nq>G zCTZWRt|{%S{S6y|SsEw*QtxN#Zs$+s55ud5aRA;u^dy`XIM33q>Yl?Gg!MbCt!}AU z9jq7SJVMXJIR)ov{oDAj@poX}%Ld~0#=T74Z~VWoCgDy|yFX58IaiC_r?wgGC$@uK zj#~z+5Y~3J^Z7UMFOy##=RnMRX;ZA0SkKXJ>Ym3KjP(brqi&g4U96YnJW6}uw81$> z|7ZNy`S)QC!s>%LT+Q$Nzj3a{yjERFjLF*ft7%Tp(f*cQfmaq|PmIsiE#Ob%AFci% ztovv)j8+)W(=$@%S}$M>!5Ys0lh#$cT;FkcFUx(5_QX0BtA_sV{5SZ6@kYq~gZ_gt z8S^^%rPN-dy+Q8%m@WAI#lB-#VwDjqhp`vN7wQ-Cr}K}&sbHM<)8<&Mv0k8C{h02a zdUY>i4aFV7|BD`{euchzxUa~6oc6+Ni&sJg?q4+j589?+Po-tlUW@yn+y^jQ@y`|efsMjC zKz(8LU#VTppUJO@QPEf*pe=Du!Fh@HNFBz@7?s!k@)I z7V{8eeUP@oZG-zV?WyKRoL6v$WB$uli8a7^UCu_@2csQEZGB(y-{D_|^AFoo&UHA` zXgPJ|v94styY}^%536}d`(*z4`bUcIq+@Uo!YYFGjo2Oh8~L>`D;eiQv^DOjxUbM{ z=B<~yomj78jlj*n;JaF^A=Vr6HqonjJo>2;lIZpfs_AI|K4(@(i!vsbq8XNVhvsU2CPTbJfhu(f1&=L z*;SYa;}pgDR^C$n9DW_l!;J9}dJ5)gn6J_8#`rVFXt}TBT!FbKU5nKi>n(Xt(SA4` zagNi!ga1DNa;!oah2`8pXVL@J9TXmGggFiCF*T2BpUS^T|1LHTvm#D0obTi<JG*`0dFSGlk!iemx}+&CSo0`zJ&T8<*el2 z%)d%aeY`5h_XIr+uLIs&^b619SN%`0acbVi8;!dUy$ACo%=hFzOV7pYiq}B@5B!h$ zqwosj6qPfR&Y>06Rm7Wxu~BY2I#BF4HVNl2wfn2x>3VnZZ{d%Ve>~Qa#(1Lmll;^1 zI^w-ezckL4a(-ju^}mBT2D=Eo7q=PBPkgH=r4EP4|? zMC~E^Zp7Inzdao!_B*>8tFro%>VI<0Rs37|4RER&=SgB4`R%YeVZB3t$C@DiF3woI zed&Ez&9FX@^E^Et=M0=i`di80sr`xeM*bMQA~?n6-bm-tO6te!n~n9AdDyJ31Anmi zA8ayaCH;rvl*0Mh^;h$6<2S@R%2=C~j4z{M-4B@TwVWGqKJ54wzjq-=lx3nI!%`);P>!bOT-sypQC(NH4@W6Q_y(pZVMP zo7G)~RTQIyygBq{dYIaY`fkGc#yC&rZ&BAtd#Jv@*c8kvIAw5taqYGI`TWLs)s3^c z*i-zDm|Zd7r+?vGE&c({Vf^u!#rY55wZ!{a-b?f%tR7e==-hc_dC*oZYU!j=zB41n+2L-9lT4cf#z3`2qbK zXR_FbI1}*pqYq-W!rCh5Wx8Jf#aKPDPL%Tte>;DR+VPmhaZ1XaOK+uztFNqY9#$Lr zjG8X|%f$a-*I`!0D2MSY_TBu2{1dRQmVb<~KFx2buQTQum><%A^j#zN5!OV^67(Uw z)_C8VlTYNm!oNq(C78W1Pm;Hr|2h9@^%HRS!zv|r9=(kofpNILoAI8N-<1v*`VLys&tJqp5ofZR8piqzzm>i&c-`?nqW|ifBK9%XB+UKk!+0m-eJbZwdawRV z@y^0)D(6@J7yM_`PQ)yMQ(ErL^mbZB{So?Z!8(;br=}Z!g!mqKQ&}~P12BHazK6e< ze-c(r<9wF37VnC8Cf>(%5AAE&R-CKxO43L0PQltH=QVnt{()G%v6{*GjsGS8S!0-l zxj#-Bxwp{y^hovB=(`o`JI|#p|9N$1XkV_c5atc6igQB`d*E)Rg|x3@pI}YKEJYv1Y=ifioY(37`Uhe4!D=q&cm7xW=Une<%#t`|<=#pc z(5mXE=(`Q;H2Q*??))pn_r#mVjubx{=OCOv@$ci`!Ec6F%UGYMr-+|{*%R{{G03FiX?NFi*wXF6RxpLH}T^v$0yp`Gfy8|9RJ&j9Ch&oZQ>!LVA??+i|`(ucym- zQO%j!SBme2H=R`#KL)1)&R_WV^Y7$eE8ZNlw(+(Re}Uf}w-@fGbT6@~Y#ZJb+%oiW z+_spX%XyPNpnnKnU%Zxb{^Wnde*ya%ywW)3<=##g(Q4}Fd@q1wP#`=u@XuR#!71lS6ZO6P0`+Bi*{3r2G$Np0O+w>th!*I{RZ7uI_ z{~oyzu6I4=R9aqpBW^p)ujIZ%AC`9+W`E3+<#mwvkM?)kFY~X%E{A!L z{Dt%mdJM)QtXJiqMaO6t!Mc&vQd>#w9{L{Qui&@FsAsIFyVfiGUN~ptd`^pq&0t^P z9PN5j)!jf3(B6dG9&@L;|62aL{72*t#~pxsirjzs-}7Hlb3Jx>%nEWB(K~4kjKx?T z^}i;+H-D_yzF4zaZS{w$FQo5b{!0GIIQ5OUool_yKMSib))#bNv6<{koMSL=P&bVp zsJ$7t1Lik!-=mMp8-aN)W*d2XVEw>GUA&r*J!B?vVRF zeN5ivnCD@hDz6aMkNl$Hs<}b?K+KBrm(XSOSd2Tc-jIJb9j9Fk>n2tgtFrpN^gYU7 z#czYrz*swAzRvH1a}Lf|^k?_n*>!HzH(kvPeg(`en4R#xmG=RCT>lk#=i{}Nw#u-SN&cO5 z1+9&7tp25VyUfR1>iY2~i0_9vkJZCG0;jOP$N6jcZSfizYbVS%`F-&Q;C(~8;>{7C zDSsBfBHlB2U9i5F^AUYg|46J0u}+t_H^$HW8Fare)g6p=sQjgLC9Q+84C`(A=g^7T zC9rO0wdB{wtAeqQ{wMfr`KRGEHqOqNZ}IzKor|@D{^EYSsk=$vEHyXs55avFw=3ok z@;;^;^^d~42(O*I!Z^G5GieEZ6|oMJyNuRVyBy;kdHv}m?fo(C(tis*9_L84Mf5+( zU&lWkr-?Ck!Frp24$gVz&*;oHY{@wg`m?s!#SKN2_ z{V~s%{~ettRtImk{6q1c$2$XOr~XgqX8mJuF2U&_r-=I9{2OUWeU&f{m%D=2Q#%K1 zCC+=b?F>+ zhvB|}+a2#Gd7sj!^pC~66tAP4eQ|!}&!(mH9g1~?+?BMx+MBTM!g*irdGs2w(l~46 z+)f)|Ra0L~|7QL@{0>+r8Rr>z@A1#Yx={WP^h~^4#BY*c8Sh2BGjV>_zm0CucNNY+ zoKA9zs{f5&T6_-wFpMg4@1n=6Jq~X!TZQ$3{PXD)@iJIz<;|y!@v37K*Z&m%UVcZc zrpDME_kI3(co)h4k^W{bdtlzGZ?2ldF<-*$fwfD{XY^@(H5Vn{?IO~Z!Z6EoT~Cy(}wDAma_)y zBl#E7>%_`o-7RM!t*`$C%wsT0$a$K7Kfg0pbK~rR`yu}Vyi3&lME@|(+r@8|Uj_3O z%wAZ#^?y#E)i(iW2u@cyy6^zt0_Q0CYiJ|&x5!zG^|Abm==EadvDV93 zM32{hB4!PY{pCEv-@xyJ)xtPGq8DOaD*tCX-*s-0cO+I%{;POr;rwarzsmW7|D57r4%?@~UF>;=hL18|N?M{7uf6{O83d;S9q$LvC-^*dOO_ z?F01P!aowLy8Lyt3C3-5?#BE?{tem(;@qo$32msaDc-T_OX+`({~*5`PAg;FN-xG4 zB>xwB3C=>-xlQg-{ImG4WA?%NUCvkZ1+lAfF2m_Aua9e##Q8`2Kz+CJt7083|Lu&8 ze*(ts{Pmchs+p#J5Y~Ni?x2nIHN&ihQCk1={D=5wV6`^RPv|8$gXQn0f19&Qu@<@B z?eeSfd-LDGJsa;2d0*2P#U^77$2wD9DV%@#2Z`UtKMLy@x$EhPYUdlrJy>VcZStp! zRlvDl&YiS5R&DiV^u54;nBN`eWMll4UWzkB?yqzp&SKYTj5S~WTeP}ZAF(%a`{Mm6 z_Z#|>*fn?~@OsECjg|j`Nd@uS`PJ}h$i0W2q;>(;y;%R4htK5C5IY!WgPf(b1y&ub zvie@+Kf*r~=M>CsbRf=9xxdlASc9;ZxKo0jb=*#-2U|o*YQ%)J2 zLU;#@&*xXiswwwg+EndAth4pqhxM;3fO)D8C2RJ8Iit zf5sn#H%$KTw4eB3ygP7D;4f5njP~2O=ivP_1%xP2mf<5v&0XoiFGROb~+gAGP!@yAvky9 zEt2~#UJdPc@cQHYBj-E%s{ZS6uEaS*>5cg%n=N)I&ckw6(vz|3 zV;!jPRsIwFvv5ws{DPi?`;Gdccq7#OMfb!UCbmr95_RuuFJZ@uy@!7;?jBe_(AVWm z<&VNW0Ix9ap<;{qHL>boK0sTlYl>0ue+4s8FarfMP%r}pGf*%C1v5}E0|hfsFarfM zP%r}pGf*%C1v5}E0|hfsFarfMP%r}pGf*%C1v5}E0|hfsFarfMP%r}pGf*%C1v5}E o0|hfsFarfMP%r}pGf*%C1v5}E0|hfsFarfMP%s1k|Ifhx12RbO@&Et; literal 0 HcmV?d00001 diff --git a/resources/Box2.med b/resources/Box2.med new file mode 100644 index 0000000000000000000000000000000000000000..a7c9e049fa2217e2b65a2c61e537900ea41d20a4 GIT binary patch literal 26612 zcmeHPeQXrR6`up=?3kk*uASHvoCWMSn2*}{62PVM9^4%m*>{Hx5fBqQ5e;r-lqdn@ zR+f@BG#{$c5DBSKLkg<+10<&QH;kIa}h{ij++rwc2F~_;l4VbRz zh(-mClCk9!Lk-=e23$hPj=EcArk}2Vdh`}aucGV51?7@HND6<@HRg)qym7AS zYeTLfjR}u%(S!YSL3{0o5%pw}Ysl`01XiBxhvqzIowCjyDuB*wW^#>6W8u!U{cz-G zV2^$jVHH}P6A0H`=fxxLhqD4&m-PHk4{AU3Nxhcq^~!r)zuq5{I!)&rZ8V=H%>6$g z8YC1xI(-KS#rE(rnGX_XbQGfC$SY&Y@L$H)8kg;|@$uuvZBZAHofuK0|KXQ;G*za8NXZR zf!A_bF0LihMGfnH8(Xma~m4wIdkVX%xj!4emwZu z1QB@BCnnR%4!(BNPh=ix?=Maa?WU);U^#DX@f6?=W^=QtQL-h(J)Z^V$uI3AU zEu;kJ_Nf_Ue$P9{JF?o_TtyE-7oxmXnWtlqJR2}Q>lYZwb5$ zdmuQg_?by%?%9EJKYZU`?e6~DyAA&3j{C&gU$uPIw`0h^-5>c+S)Dr+?|Jy|hfCe~ zK;0wvPpNVD?|5_VoD&yX23K}J(fL`uJNVnr|2q3ZxqJSbIr|ggpt~>j_Tzo8PH~$P ze>}V)QR%++z?z+#PfT;MW#OGT9(~x8=+Q3l&bpW)o@qBz#AgZkoQwB0BENOuZ&%`p zsN`)Ax9X>*616xX-71j*6*p4UTdwGLGQ&?Zv(C?qyc9|o zQ~t_$tE@8RZ!q}(SDxkD)FNOJun1TLECQnu0n`6-OnqiI3`J*De#Y$i9dlrwY7Q&Q z`d2>43;TOKMcC#OD6)MHn;iBz?Dy|bge^~x z2kF6v?T$IX>yXH+4)pUaDZ{kWvk6B3&?oc*`olQTKa2x;^bL9R6=Oie*f1vWfks=F z0U9#E3)yH78HnIPd&tB6AkzkmfJML}U=gqgSOl&=0=YWx@;THz@Ahfi67_iIU+p_p zzE8ftQ{{e|cnlU89;UD0jNu>R8SCXMSh`fT0w&f!L?F-sI3ND^Bb&XRt36?%o*d>H zh^SYD0`NTc!!vd?#)x2SSn)4>4 z_a4+2!o0cGcx)*!JPK?&+wKwq?8AZY27Vp(^T3D0b@1_E-$OmOK|A&vp$>i@_6tE4 z)-QZQNLe=8pbT>1M*=@m_=%9F*P{)}!HX34<96xy15f(>z{fne7y1s_oH|5ff5G=9 z-=jE<;uMOvP=pV(hT?4$r&63m5kGsxzk=b91g2BO+W`EtSvAEhKY^v1s}Gmn%c&nv zu84-D4p{QH^#d}WO)UZz0gHe|z#?D~$Ug)w^T%|L+($2e%*U%0kM+mo*S8zyHx{hf zT-lKgTA*oHAG=popgtFNHO|;7B$>!*n(mJYc6p12EpS+W%aW}n)`7SuTW#T%=W5=t zKFxWvP47LZFNAq>t?@WiV0aYn%pScxXMYC!G}s5ieiY8b*MfD)J{i#Pb8wz%$Uzz8 zB4wWl_D$3w&;Awgz|R6%+>ZS~r~?oDD2VK1K{+DIaF1Gw^%Q4QyqzMRIWs8EqWFD^ jcTk*3aSp{gidPz}|Km!LvwRi-i-1MIB480HJOus=g9y3a literal 0 HcmV?d00001 diff --git a/resources/Box2Moderate.med b/resources/Box2Moderate.med new file mode 100644 index 0000000000000000000000000000000000000000..c68570b46fab16c0371b7307dd75cade911ffa28 GIT binary patch literal 150100 zcmeFa2UHYIw=PT;K?y2Jlpsh@KynTbIWq$@!_1I_2&e=V5G5E;LBuSgfPewSgo28Q zA}yjQf~bffL6Qh26v3=;8^a0j_nmL8f35$nd+xb!V^4K;RdrSEy=&J_wcP4pV>ex3 zwgAWE#m~dR!y&@K{WJ6HXX5uocjEf_;!l1uBl5c<=dY?9F~1*te&-!*EvY{r$@g;P zL(K2@e-cMGI||L;-qy|5!_vi;#$oG5^LMqh((tt|&R%RjLElTJ=m zm-p9%`N;%3em$@MmHhm@|By0S^#AEfBWp{NH?^6pQAseizwy z8kxU;WwJikPq@gD?I$l$5+ajh`73SzEF^#a(D>xoCxOQLkBf)QPqv+W{|o0Ff8+eW zCC>T&6V63`t^y8@<$@&Q$cy;-`|m$AH;T<9olMf0+|QNr3(u1^Hu3$c=kq(AEav&W zrYC7(@(TM+Blrug{3`vQ^ZRFQ$dOZ;gq7N~e?GT=#Sd+M?My${(BJ&}hB)z{={!{WRwX>xC2Y&d^_rL(LUv(#W>fbp3KY{Zp|CArP{8_)hM8SVU>@R-E z`4g|Rey`U*ye|GXf296S|E`E@%!`mPG~_Y=xplG z>HSxD{WJGz9PvLN|G4;_>G=T-leK9o4wkh~_vslR38{}ZzDKerFcQaQ{OufAGlvo6i2w3s1r?Finq8Bn+=YI?Q|98V< zU)uj&@c0AdKYn;O%4B302kCQk{Jrx3)c8C7?*f7II|6s_yX^t-XvNlNUiP?DZP>N- zq%E=?!|cQ#sAH>SMA_r0AS`Vcu-I1X3u7*^8sFDG*mKG@*W>{MQ}UNAE_QRkr^e!u zWmb;J@Lw&Jq3wjE9hZwJO3sL^`ld5;whOo^_1-!Ht{6?4_Tu9YS3EoI7FRjqhBfAM z^V)kUxLAHxtZ69~>i4Lf=blmVS={g);p2{IjVI+t9=l`6c5Tuost5A27C%odbjQ|$ zP?Lm>R1A8w$aP(FL)GGr`Wx{s*c;aybCT>oJZ(^B_iKBUY^*SwabFX=dQ4ddFKMHa zyZ!i+EM4RT`qc!i(??QuU+3TwLmcm0r>0_Wj5P4rpVcr$cwSndzPLH|U7ueX_t65C z!Yu>i!@wzXiB&aymcadi?7oLq(3((IDY|71d5xtP!z*o2G%k}Zb>0U0+ogQn1g(&? z@mkn|CUfNFpC8`4&Inaki(`H)(na6MDc%TkKX^}dZE&mb!+a0j9zRWgDc|Y>aIe5xl$$3I>1zANMiB_Rif66Qk^*tc zr>*z(ey z^i@`R8k6G@xbZw_uCpgn+EZ00YCYkq5M?H8#>6dS5uTB9Cd@rb>mO>ez@KyAXRA9oPPieJP zm@^Ju<@xNhoPt>A6<19v+>vM4^WCA6hPwF_Gta9Gyqj??Dfk)_k{s&Sq6s!E-{g!Z z)p%p#$o*5R1sQO@WO2*OjfqqV%joV*HkucnORs73hAdEmVFO0vw zNDYM6RHI-!ZUZz|jV(?Pw19~bOQ&tCHMY;xzwmy$J)XXqsW*L(Gc<3HTvXmq0Um@( z%Qw0s{Muu0jV2miXqhApUUG!-G^ugUtFEwHm0%&%Lq$};%XQLy9>{WK6DrT>AdGKw z=s)+w3uOaK>me3I#fmmiUwYyBkDyb^K6a=OJa?@o&;b`Z1kUohI^j+F6Qw3y7vx+W z$a*rz4Nun85i){QwC`^oj^TGl7x&zQyBnxjUNz*ttjPsCx14#{_Q4(l&lFoag7ooG zb>P{3x*-I)-R6YC7~N~K?JU(y@j7F_eu;Rv9%KWWqU=az8TVw#)w z#0uK9tp(9{t(ONg^^)Lpp;Ilw`%$}8_L_}b%w#~Q0#SEo4PanYlW zr^b$W`qnzqfaFI9Ij7~l@3zDRLmtVv7Axd($eC>=$07A3iDzfE4YpgnY}#68iwU_y zu^~lv5J(Pwc`U~s&x^j^_s@1ft)=3OUUK}+=BrOi<=f%Si#TFMg)?^CtS$U;!3K}d zYpgqd-5TYNwwLmXZNa1F|9pP3Bj$Up-rZp0fD^)%1HHTL(HebF)N-=}Y^xVYrIC2j z3ZFG&jioOJD_<9Z+&6Mon>y{b_QR{na}OtO_#w0Gzc=j`d(E`cyNT>Q{1IS^?#$_5$7 z1ED@cO#be4lJ8j%ZOn)O_#D35@}|%ql3OF=Gd=u}U)?+-A(x58eN*g$x3ZA+T2!EE z6B`m4-cB##z3{4w<#0dR8&mHbl@K8L(SG&(1EGt3F|lT80PDFoI5+YR@4U>0%(SOw zB?nme7$Uq^Y0L}rAFmBxA;f~3XTdpVel`kHGz`1O*|;wK_1WnWZya(yaMyIu2Y20# zcAUKI1<{#(p{e&tesu4wNjjf5wkVcF<+k|XcN`b!ko_{5y>zOY`zoqBj4Dy3| zR&~O(r+x_Hatph|@`tGP)9LXo{@~`;y^*po08INsM?OadV11UsN41jy@aRfSJn|#} zybbU3O}+=iYcGM4EnKC(KUdf5 zwJ(OhYTSx$_QCpwu;~}weDMAB#+wI+*|^*=<#g6oADHmU7O@0;Av5D)?u{+JNV&ti zeuUExamRH7wk4DE>#VqE#NiL~RpYliGyLJ}Uu4296@ZZ7d1?o>0x*7DsFWQUfS58@ zo@`AYG<{HgB6iXj7Dulf%suaoHS;r=B`Ut4ea%)&ckza{VTj7a7JtMXVD7Cd_QKs} z<%mpUKb$6X8hK9pfHAuF&XRFI%vvh9OZ{`k@oU)+=1x#>)nALi1Vk`fb(Lovj^+&+FZ_V#$44XIgo=S0)3~zC0~jILbt1MPFm7 zju#xifBKl_=YtsuwSB4^{m_1ePia9&06JG(enB|7z=ZjNdM?Zr+YetitYEs~ey4&^ zs}lvzA5ODANmHSvU29pgmkI+P!%HI5+>t6M?iRP%9p{Ii`#W)XfWG!#_O&Drd{eye zQF_b+%FbRvISMqKtE=6>y@-zBwu3O@bVtRyu1EQ@3}mN1Hi&a!!+z(>h@?H7OaFSxz6ZfyW=mH0)R{ThIQikr-LiJn-{5Iis=#sg0x z5Nl}QgVSM8dy_}JkRc*-QisPE)KgYM*(CmKH>p?}83rK3=1o-oN`I`LpVxXk#|Hro zZmrckr2NzWdZcg24;QOm>g|oEBXSnMz=lE=eu%S;W3PDOq)3FCX^{`^s1^6!%VOd9 z&7)?SHdJg69rj1z+*ohoAxbUZqK$mp?-LG zD}C2TavfNm&>51<^@B_Kx$5i7y)k%1%1n=>zf)8Ay?VQ8Fxo4qLtJ1Yx0QL;*olq# zg$*?GCtkQ@ANR8OhX-g=rB`ZQW#h^W@nl648als}h@P#W!OZTFxS$ptWvkW|ttqC% zrtRZU@;nC8cCP42I?TYrX@{1dpYI8^GsYQ9OFWVBrlhK!!yET6YRo=E^09*qe(O&G zqCQJ&aA@|*aayCxw_@Yc^14JFs{@>b8pj$Ut4PWP>ME%!tBIlraH8W>m`e2+&; zzyqgSTp!4n_<&fddYsnEMv~a;j#3FGhO!);_kHt)fSBnUsd^R^7QOhqQI~<}yJs(S zE%Cy5g`6+0vT*3O`pXC9bcnBrX=6AuFtbB1M5mU4sgZUK*~Xr5dG7km@VqCyUuaDX zX)?$i(uXQ=cL&SZG-DWtPKhZ?McbR9-cDL0XD_S&Qn0gkfWu5t4strx>0r z4Q8W1dPQ{dCl-i9ahE<2EF23u;-YNJLej@=uLm1EG18kgYq2L2DLai!pOfW5T^r={ zt=X`pC${L`XQN5NLFsWL6H2#wzMeU0f(O;boa%~JSSiBx&3CcG8UJC`V5SrFTC@5W zuXID%()tiCraKmB6daL`qrp}7%HGgkI#L>1c8{wtaOu?>r@%r6maf+$yyQK>J@vDM ze4Zzsbk-DqmS!ScH{@t?78A?fyiT?jVWFw{!30Md3yYkp$J4&EU|)Ji>BBNMGzAA9 zSa*^8%3-5~6D}lNn{FQ}^zgzkD_@{0%n5s*=V!!hbjN(pc7*{_4({UHuX?bRiPpps zv^5!m>mg6QkFXi$D)VisO9Hwl6uHjA3gUYV11cnJpzi&KUH8rwM_jHE4@mi`cg^Ck z<>hXO^It!pRX~H*l3AvSq9*v};g{bjYk~b5-|tKKS|a(|qw94Gtr0l&)3&L+wkQk> zxl}P|2M6l1Wa6b2j@}8mQ!~dFFCP}Z%u{xR;)+%GMW?&Mee(eEv z?AVvf2P~1}Gk<^W4jK$%(@$&@pyMdxmDBfabjaDV=4#I%={#=h(D6MC(BjX}9uoA# zqP&cX;tihIwxopCJL-vU@i*_P{F%6aN4euvB@=6{JUI!V2ANMu3xpwnslb44OWkz*Sjg@0OH%h_AiyLIApOw>WV!L*Sup--F7DBW37O( z;sf^Zowp|Bo1Zgyba$RNGIGVl`DX(O*^W?sEBrpm$pH^@YU*|3ozQSu%WhXO1;^T- z%zRDmJEKSC1$?@wh(4OvZ<*;1S1-BSW>Y+H_vn5J<2VoaAB?QD93c5^=;CN?8V!YJ zr9D>&8eU4T-u4gEn>TZij}5DgTha#SRu44T9a@`ifNxEXNDUr zW%nNq*ye%=;pY;Aq@FZ7$IQjR)g6`94)Iw`67HAT^SZxSLeSQ-Jv+=C12Wk``#Btt z@w#WZxH=VnQu#Z1t?5wkx@2dY!p78RE6yxg$%5WX`9%jbn5Z9T4cw-9!tX{vbbU={*-{F6Z|* zJ?V%|HKEr=PI_Q<`KPEKx(=AQe$Q8CHx(A{P5i@lObCZeksRDeuG8BCKOB-7NE{4J zzJHjGh}oGp#ByEXHj!R+na>^PI-hZFccx)wqzPL%oeAF~WlM!p7~rV6#{V{ufx}y7 zN^BFOVR-6^!`_mvxSeX&5R^^BZKGbw0>f&7_Rq(dyu~RILV*Kj_y<~ zNcDz8;R8(6(eZxt{lWuWzUVEHY&8hxTzNTR`WI+&LPWuwLj3YEHAXH zpvM!b4})W-Y-AxjSeUab%L_ueZ$B%Mc8hmGPgF0rFhPBEdcXc&Z#W5@aOq8AVCPl; zH~b68{nsJ=Y@0h16xH+HT3zl?6vx)3Dpc5vc&5Zv>%>max8yF%K4_Z>?FOo5I*%R#?HNdG{HuJ2t5Myf*mG zDjSTAte}XJ>zH?ee2lxZ1B7B9tkYXc!6`~fq0=Tu=;uV{QSLY)YvxhaQa*baE~GFi z7cFs;kN=f8$zSa+9x8V*BKOZri3j7~IpgaThguzqDm9(4u)L$K+ua5HrAe8!SIZKmiEORms9RPWc)Ey z7~WOT=nac?)>~hacDfVsOSTtW_XacLQ$!3Yzy64~SUKcK%01pEn>m8LF`e5ubc2Ks zl0y}0F5DvXRh-WmcUhSBphYpI#t&1iVm3;8dqGTe>XA};FZ9jXHQy!D6Sdr#3n+Tt zxSjB<(v8DjCA ziFV6-l@Ev5DE(|Q=j}~@%;9@^QhFvEo;zZ!gc@1+)?yjsUrXv|qr2s9!F1NmlJ1&hRG@Rz9W9DLojAP_FW;Rx7O(E@q231kL5s!SJU>o7I zVJRE<@}j%A*As)3u1ow6*o%UP5r{sEE`YI-_I*GI+EjzZ5=<-0r=f+I+Dhl51PFgQS+NJ;f_uVj_7m8_gTwv_#VRw*vOxbM9#v^1u-SdS%lzM^(wr*x+PPkJy6lnh59U)=6 z-%Q&<+Al?{;(EG0AU9tq%YUgSW{7utrg5z9Id!jL| z=+RIR11)n~pB{M0hWtv|8pAdQ6xDif7p1dMZTaRqo!np6KNXmhNy;nc(o&@XF*Kx! zIu3UqcEN!I29LIzJL8B#P?NwGCwOws`t*LDJ8T0|Eyc-s+zUuO6h!K=d$N)DJ&g+e z`)BkPoFnD6BiA1UiPE8W^uANP9t}^!SY@lcsi@&nv*zfg<9qhD?ab?RT)cCD=gMgc zWD91hGr62W8{^d6@zw)-@4vpQL(cDI#IaB7NW04cp5va!Tf9-{_h@){6$8N@Tl35F z85oj$KeH^{ljLKD;&-l->*e~Tt;NTfSpNkt-M~QT9MQn3$4LA5sL>}&125>_ynXeZ zC<_msbEqyGC;8ynf>RSY42;K~?lUR&!FI*V57gE%F__|IeV4S4>u|-zWpH@nhIozm z?qe*{Z}Fw-RWch2JdZ0ojA__v=Q~@)#SL~T<70d$-61jm{>1kX8V-p*a}M#Np&wrs zsf)W~{i^d<17z)?=u%?vjI;+|6u!D(yEYXU`KxXg?RCUAch)AEuO9HK`1Yn{jR#KU zWyZ^{qT|LxUq8tS8k~JME^*vW#B!}_t!Z&>Iry{nT?>ZQ86b<{`vq+S!aM<#NJ9RIhT8=PGX z=xMHBE-uW(ffc8im1#6w+?+eKo6W$CDWkh&Oqs|ZGk=m`N{3_Zt#e<={eyS)*)1$m z-;oVViyYJ=?MGMij=N-#@Z|gA*QCQl$mk&_$+u2;F=(_Ucbf+m$OqLXFY!eG+4spy zB4`i{7UCA6(y(KO60gfG4@7pof5buVbKx(gN3*xMV)c}gzWcHsh)}%ujN6d(V=S66 zI&hta;DSf8b(^S=l0I<0n4qFpPJ3wRA`OPiqeV0toQMY-)Yjj1wnWB}JJ;21REYCd zVj`=#Nja_}`KD^V67ecOZ>NMI4Yemtea|VGqx|rp?&3^e@JrvJaGW(ol(tcR_$__# zna>VbGNy?qCujNRODMtd{EKh>8MDy&eix6!lYwf2cu3nr8KOLWR(A4et5s zu1z%PjqciQY)FL2q*$sbm=j@B-HFVvmPGlF1CqVNHU#JUo@ljxd&0YJG{&mMo=BRv z`ibXL8=@fTtJ$(aOJYmuH=BtbGvaP2mypV+0U;tYwUx@NO>B1<33lJ=jwNo5mlg_B zabTX{eTJSZoHcH&7O-@}xKV;^wTnIKt6#0F3bsMg-J{AYqbw0vaw*UCqbWXJKH%@9 zXaK(l{AbUbXkqJvk?%%DN(g@t`eO-?IGjY1Tur;5RQs=zNOgl0G00gsU%bW!KQ_$0 z8(`>w+?0j)eNH%|jQd#ZaajtUS?-wr#mOBv3)l9KRFVFjQ=SG5)>QC!&28GB=ZY&s zEpg5#oe(*;U52CF9>#0OlgAru@Md$l()l(^SSoboHr+NutHw3ohK~j?x6?SIC!`I> zE2dw}oK^7R*pCm9Zc->$?lG~x-fq#IzhQ^nM=|16w2}Y#Hc?``tk}UbE<7-CQ2x$% zJr7>T7x62wG?98-j#gn~fL+Je=%h&dAp8s?)%UO$DDSpk>QrMx^<%i;r7|YCzT5im zHuS`hdGbz?S~_ZMy!u}{(vah%Z*a589UHbshT4$)>Y`F+oLP-4!n9pw-Zwfyr=lwP z-80fZb0b!%@SP2^o7C^IzgyyCw^yI(C>@+~5_&UBX6)3agWqq3%X>ctzJHmzg|W>O?=p{6S6pJ^nU={H z>1QlFX>8VBC*p<1@a4<*4j~)+R(|ZOxj5NNeJgJ8z$!oDGp$r<{AJ z$Aob09q3K*(I6&9EnU_hc0=ARdxyeAXV{+Er(Y-AV)k8Ac2>F-blhZ( zoOCTvU=>vfk$0tpd6ez^9?mR_#mdV^IV7-AF=#O z!@cnhHb_4&x;a6B^q1Te6TfThgfHvDMlN4)f%&z^r%p{Z#*2>8Pb&=6&=xJaE;Ek{ z?&%`8W1Hs@V`GaC@Y(7SR~I{nJ>LW3*v-UEGsi%z7rwrTh}I*P-_NpH#E>Cs53Fcz zo??dKW# z&f5HKBE|$SO(Vtg&f=ZaWM%&ym~F+6XJPj94l-6&|Sw$=X8GEr&|N7wP< zmd}#Jz}Q$p0*4M^@@nGR!f`!fSS<5{h5S5X=CPpS(mGXQE1%l2nNLW4nvhTC8?wX~ zKE*wFZiYg>YE#2;15B^H{wZ&+HqKmkHVI2nfmX{4{dJq9!01dr{ceA!MQ_l<9rwP9 z6X$m|Zko$yjVCKsiaGkL+Fw(~7q3 zjZ=spW3~OK-pwU0uaiuyIwg;DvHnGk^-~adh_AzX(?ViRdh>AY84oCKnh431a7T}r zioL)*3e;s;X9mPw;I{TGWv8SgHpEgKKFisG%c*?Fh?+I>0%|tWO@Sb%hy#x@jnNtA zf7$Y)E=-dzj#|7?hk34)f#{ccn8yEtwq&&^R`b8w5|YqdO-tWxHnv|JtcZgv^Ym1p zU{x16ouv(-Jz_U!1Q6&=3HYbI-cj*%f^xNbvsLQ>-qq4 zTvfu--~8<0G^pxNF95{Dl-qYdqT`hJI`%{z6ZQ9{;Y;dW`g&-tBtQ7Z+}i(<=n-G2_Vi3z=CC zVVf)wC%5Ndm693Ui&6w0IU8V}SGTFvD=qA7(Ge2%T>y$`ZFS+OJl^sX5z}v|gD-4s zG)PAeF&DZXuX|tw=jMF*J$G&J$#AO_NVy=OFl=VhDo>nSC0`@`!yUCN+hp`s)4(fn z@X?+QCa%@Y_E5JrhcG>D@!Byu20yJ|HrJB|4TpY;^?6I2-m#{65vK#rR`TR(h&e+( zt<~J1n$(~2cu!{CqoTX|0{_`eH*iNtv&VCsk(OjHGhE~V>Fs^)AI{mLd~|;My^B_; z-}gDrY>5R5`Ww!i_A!FN+ue;dje3|%ox{E6rY1NCi*AY3Q&2iKWw}O+9m3abRNxA9 zN43G0g2YZo$Q)&>`I*ykghS^2scVK%P`SdlHkghrpA^lb4qId1y&kdKq~5KkAsWcN z$%3?>GS7%E@xV1=iEYda25$Dum$4@8G;^D>PkkiqW!wiGwvT+ULxQ8Wk5P#;cr!w6 zqnxNvJS=3gdk+PIWx5e4IpAdfjkTWmbhM3={pER?YW z)h%!0cxgrCC$P=Bmf9eegZbbI8Nbly=cPQz;fCcEbh-3q7c7;Zr82;856f-+g>DPT z_y-$a$$f?T7>v)KqG+asrTeW7FYRist~0-rlJ?miD|<5HT`jDUYJGGdh_n}%zRRvU z66}I+;=J-R5=ncsvL(k-4@dX}r3yFJ1 znJRH}2N{?2Mo)>Nds7Evtoh%=ycR;ttxL*hzAP?h?kuEak6V;+y5?r(3lsWuJCTG# z3WRv;xeeLuMMQaaa_jChdc+TlR|;#=O^I&(w@rzQK}>rk8WOEwg4X;GMvYR7uxRf} zrXF1n8yEL~D*vuZ$fZgx7j7{^V|90|X}uLLZqZ#C`OXrT?cRKRx7iN;JXbl`PWpt9 zNm^y>6HQ`%OV;wV{U)$Hx2Akkumc_)PuSUeL>K;kEgSmC^&5OPL?v5h9&8f4PAim5 z#qg14_u?Ns#Fg78^s77Ni0R%NJBm2yS|&3=nLHC7FeTo-cklk&ybOw-#>%A|>-=~1Sc7B&#u z@;$b&-vSDnNPUAy}Zz&S{sZPN=nIOJO{WHrY^-{~?S5v(dud|d$t7>wl~sv=`#N`;Ohk#@@gFFrN9Mr7PNB4@OBFhd zuS++MeY9vbi@WjSupz1=chyN`nnOKpm9w3P71mS)Nv66fW9YkXd9|Q9#x}`%EJ!zi z=n-*k@x>BQy>myjbIE0k*(L9iXSs_L6T5<9IJauy-V>AfQ8o?SeLQ!Vd+4zIbYuUc zRtDUguJ17rBJGmLT^POQEPSwyzOXNZ4e{NJP6?bK{hTg)xGpMs;ZvGx8g~^7?~7Hr z4IP;f^Yc33^0|xC6{WmJ z!$DF`kP*}gxNKk#4%J0G?U6Q6SeJ7p=!zw-wTt`DhZ#JUMtZzZ?5ytCqU5=Fi2`B{ zFTpKd9jFYdbQ^dXpwoAm!1IIdC_TnzY$ z7}R<#lu9+nT-Qg--=%7k_9fxKGxgFiso!}fr^gLvrY(!~BlU2HmD@ZMwHTo9Do`pQ z?OVsxZMG+EXX2Hn2=!tr3%hH=>!fd!ev#|4%bWU1`-@$G?Uco&J@~?rn3NGxf8E2E z?z_PQJC5v8&6@5GF4Llu-!v%jUizSg%hm;(1BEg|0v#dLqpg^-(GFkghwbjm ziF7+o@{1Fy)gxcNl5u(I+ZCis7eaz~MSpsfT-S4V%qgSlqmjMq?$KN=D7AhQIla#q z0!zyWW^J{A^ob*xX;oI}k)Ai95uiuv0s551q(5zdjC1npW?Y{eQH3ZhCw6D2*+^6lzH#}X=>mYXOQu69GIk#+dn zUC!wIp;WT(tOL9rOnoi!#1?O>zNN%+S!3%Vk=erE%&}}jKI#UW}`96S9FW+no7p)?LE?PlL^e#44HE0i!P4Ie{4TN`i1r9 z_VxGFx*&Je{!h%rgP@$e&+O)_u?t`SSA^4?C?m}NS@Ein z_}6;8wJh`~1@_$6N1sX2(7fz^87qW=x|nryFAX!XY3rM9OZR#~@#$!OD=!0^r<+lq z-y;16ISWKSFv$3!+d_#7Z`jcOcKJry6dLA~_U{c^XODBj?2$5kC*0`Q9$G}|KOX$g zcKW_^gTIP&#K7WDqE3MQRRkU_NSi^x}i$(5O)3h?m!1l#@l%W~iEGpD-bw2+A$GY94&G|eBUnlFbI+z}5(6!~*zDcZGw02S%Ng~~w|sPo4JjH8p&u3!XKK=?NF^B(LW70T z=O2t(Fy?9bUOT6br{7EGA4{?%)YVl&vRaLaX)_}2t~DAH&iNFH&TegjON7nuzHK3q z;n%!%<-j~5@^Zu7jI$!d;>?LP(K*8wdRSeWoiht=)=$*CJLE9!YobC*9|*;e^wSY$ z{TB6g^yI5m8bsTx=li>kNE1ED>o08V)FT>I5{+kDs1R|=ZPg!2rHFLxqKAyxU#f2z ztY8+=8>;QAcD#ORrbc+3O>FX|>kyCp8&2uk%_SBEDu1V!=n%QvI#qZ#&Lxt*t6gOn zE+7u}rJQ*!X@dX*+9j?h4v^sMPm|rM1HJBn=MDMe7G2C6hx%R$ff+k}&zjts#HJ7_ z>+vi(Vupf`dCfAwOVM)4q38OgFgzh;9W zkw?4P*!kEPLWW~G8&+!(uE%R#caeN+o_hT>)`%;@#T8#?dctt2fU`4YlZSfWFlx+WUo6$VJUu zlR9on#B^(XJuj$9xXx2pwDQnYBC5{${La1hSk~5F&9<>bv--?OQ9PD-ykj}+rqWO4bAK_FE^?q1{33I?9W@{waog) zp4-NF93h||YHSXDLDxevg0fJ)ekn#K%K)45-uA9t=?+7stVN6ZNxiC#qvPvgU`oCE zsX)@-vRO|+wo|ftwTO)(N9|&=CaRnJY_elEs1RFeUmxFiA&9j4qAXQ; zdu;bVmSa~946NtImYHF5&PWsO!H$<2YfPasq_BNx zqZVxEgb&Y7)+XM4{Ezt6VQLUou{onV???4{ z9AZRQ%3||rksk%SfK`>Qz6lp~@cf;?0R0=u|F3LkT^1w#C6Z4^ytaE_XhnPNIss?6 zPcdF%r7*K`+`2drI_-0dY9xZsje8F>(>oj>9Bn3PKn zne^Rxt!0X};@sbSNIguRzAoQjg(Y-V;^ksUxrV1DD>Y$>A*MZ@<9+ypDfqT}R-AcB zhlS8^Wvm4SNo9TAW9`mZH)lbE(@YOkUAn54zmyKjY0;C*rQLCP(|pfs(_S z3z{~ZsL@3c+v?JZ8&8W0lEdBq0(>kq!!m*A?al?lhogIEw zI!KAhXt&*IjlqnlZ_8RW!R9;PsCGyKjc1i2)$ec;l@Ev`)oSx0)v|wh-5zZ?C9pb= zeJObuG9O=?8}}XX6eErdCrBI$nU0W~R@d{Sw9t0v z<=9+*19+ql)to2gm~M%(f@w~!@Nsn8U`E=5a_80PTEBF_yV5N0$D3WTZ!l0=(1rA; z+!qqEcXL6R&M-_#J7MZs|JjPK4B+}mI+?#+*o+6(5HxoEn9AFNZIF% zbG79XR4X@Zeh`{zECo1+e-ED+qT-~ymILz{6)Ay?&29S~v4^K1+MSGd^6V|!F8G#= z5BtpUB62j4Vz}}s%SsD_S6O9uW5m$9;Q56o)$;IJprO1lM-E>057MUICF6IGY*0JS z<%Ij^Kk$4Y&mj~qw_>zsS)=K$_>X6#e&gWX;gdl6!75ZOf?wtWEsfBMP8>7?E6t-mJmT>`UfjaF1929WV) zMS|Sj3lZFRF87gp2<&BUMXS0vVMX7(4o|Bfd^erbvUrXuu5e`Yt&j-CLub7V*HY47 zUN1lQ?qfZ8J4aubb_~H?&A*QOXR*eCC{sy^!FSIzj$NmPB)CvOX^ZB zNq^{v4eDNGeC+$e>3#X?42aqF+OJe*!tnWpGdA*U%*aAe`b=-=8_c@xGu;=g)F-4TOO{--i`DgRs9e|1KNB_@HKV#BwAU^x}u{sz(-MyI+Lq zJqtH1r3h@gEaeUxj{Sxcf@GXfL3rK+QqE0>8PBl^Pb?m}UGZXsh1A`-GFM-b@yJ@y zn|2KNU~}I$!$+-tpkEKAG>4J#?7Q7JUJVIEb9k0dw_gwjj5h4N?H!DdQ1Bu`XDjT=)f_7~?{3tJDW_|M=n(x0XrD=@u2UIxdUbqt;JPzm?Mpg87}Ci63lH z%`vugO1mR?EpzlwH@cz-@*9fDb4Ke&PU{Un^uW>G>vM<(I&NvvIroxsc|l+I#kL9- zZfra+eX5M~Bh5?QkVMACUvHkFeLIEzrfG>lg^DB&StxMS?L)a;0Ee zcQ6LZ4jz>&S&YzWd|ML}LSS@UU0%#-2|m2ye=mE=1Ckk?%=gF0cqr4bX@+Dxk64$< zN&!-yHcAq1>rD5;OpTn3c}YIdzZdy_EY1&V2J)wa4FixZ9J*zT7I}`Xn|e7)BM4Pz zjbF1C24g|m`dbN#i($%I`em_v2wYX@YHl)1P`g#_?mh=gO#M*5@Y6;ch}dapVG#UM;oGi%ypqjHGa9^ zfgXbIzF+($*Z}+OPIcFPG{l|j!Cw2f8e?EEZam^*0L}?s*d{+50HYiAhK|aC;2E59 z>$`s-ngncV=MDrSTVrES@Pj}s_Nb1K5C|guylLuz63$pT^la(v9%oEbvMgS7(glUV zhX(eqa)tTfFZ0Gt-O%=w5fH*h!HPL$hZ>qF;8jn3bCBepshfMZ9oZCu;eCeQ-^)Wl zKels$=Zg?rIsg32S?MK^zhQP~s@oDWj%?q7;Pp!|-1{oDs%#19BUT0y1504G?Yml7 zo;lLgF1B5av4@e6cJzB!@_g0a!Hw8OgT!fuzFRC4rgP`5ilKRAH+3t9etxa1)#wqrGTfrf+m<Rrr;rN;jksZpx z&{y6Rv2%d~I+XTa_jyd7W0?7^{Aso~DqH*GFHca=_&UtRC*2oYOCtGZUScDn?SlpF zL?B+CYvc+Z2!KkEYR4?|AlSJWt$sK_LmqWYZuE#d`n2-)?Vag@XxUQP9Cud?)C-3g zT%{tb+@|5v6%RbV)Ui41JRP<}%d1xxd&1n|%H`x-7S8S2dC_R67q;Hi8*kd+197p0 zEaiA|9qubr-)K&r-!R0?=N&TEHXK{-iO+c-GkE-25ZoXaRzSv^FFJG~!p4>Ke-}Bu zRt)q*z`FjrVdF~(_p+kp{&C0~L*L{(jX5j7ZN=1L?t~no1yWnB0pNzk`2QrL# zwoe!Ig4EjAL%x&%XnB5LdO^_>8`tTKZ4qbTEseX~i^PW`^VGUM8|=tTNDD04)?BoI+19Qmuq1QY)^0uCc{ieS~Qql`A+a@FuCwvie^X-Aw zu>g$KglBR*3qs$at8Ls>i{bc)Qn)$A96~KH&pF zINjlW^}+UFoJr88S~C{o$uniD@zFpOB?NuyOYuh486Fc)K{8I?C;Wu>9`ZbJpd#;a~^yi@U(U~~`KOU$@Oo)2kMn45K#w=;5T2Ht ztgy@iLO>&&cy!_TP)%?xEt#YrjF>xFB$C*u6G5jD}48|Ha;0KxdJ)ZMrxC zLU6YT9^9Qxh>!$`6C*CTy9Rd)u8q69ySux)yZc->bk6yvXHK89*7U#T`{pF8ufCt> z*}JN?SJg|^`(C#{(U|c%^}(ubyuZ|JS=BK?yocDV@QvalM|qpYKbt?>XD}na!=b$8YxId1l-GOLtZcF{vvIZoYJAxY@lo@W-4c5oS;( z-%O*fgc*m)SFK)_<2}F(HT)uZpL@-@BBjU73^#|*bUJs1_r%ZM-S9relW# zdcNIP#q8^nX57@>)y<|No%(f;t8K<**q^*RpF1|ITBl!7B@Z)okk^`QJ$=mJRfl#2 zNBW!H(*`8jR4>@viMv=m)SmZ3TTHm@%KIRdy_-zGlhW{2f6_^4k02Z83w{v-=`7w7%AeFLgx<$bZ&FP>lPoY&V>3UJPK z@Q1&-Tj%+T-02&cLYelZw>lkWGM^}2GWamhaTg0{KRa1NQ^ev@=(xRg&6Vq=t#=l7 zGIwm#cDet8_frpKDY}I9*8XH}i=Mlk&ASn|P8RCrY@XbyQuEU-H-Hn^D>8g?7!RbJkl8FFO%Qaw27U^RO-TiUOk@r!C`|W)h z)UUBQ(JjXE1n-SLZS?F@E8Y(uUcH6om8+~Pxm$dT@Tg}xkKE$E&dSYnyE^H}fpHDY zjWOv8X6L=W5Tox9onredED{5wFG1rv%7eZxFoRH*3uNB$;P)`E6ccLL45 z+)ctlEx6wH80KejrlARJGhkt{ogQYus!B&Zs(6}VW6$+SJ;BqYXkGI7m>-_Txy9w1 z<@?u%<-sDQ{L}xR;}`|@+qRs(7TmWj#}eqQf@c7(rxfEovu3MkI3Ooeed<`8{%y? zzMt#(nD0+`d};COOxt`+k0cFx9Npk!rjP7D`HWv#V`vpB8t+%<~-OW)xw4bbg%OsohQ1)l@|nT{;zMKIgw08?`;k z3_21ub@KFRQ~8hdt{s}hn3cu4Ux{82Y=SQAxEWPE+T?hWBYx@PK(q41^Y+b0c$u%S zi>2&yHp+CoWmnwC#lOkFc44>J>8PUM|C-p`Dwow4km z?*69Qf~b|%ItQAbBQ{trZqK~;Ei$osqY$$uY~(wunqj8bo9Dji7~l478ax=^FT(Vy zH74oZ=3K8`J@}$O>&Lk^C$eR!6~*%^g{JJXk2G_qT&wH&(9hWQxs_v>eS|sQ{d2ru zhfveF(47S(M+6yv&%hb!xgR*=bjEjlK#*zjVN2tZk2z25iEs)$6=n`)%>QWx_i?rn z8DCH1{qq-ni%;p!`(ArCU(GnmlKa;1iPgGY3pW)#&Eti;tC~-qN4%PUb1V@UYpoDWo>iMv(L7jZpPT2UzOt2 zXk!lUEqXEMf;#5-&z5`qJ{r^e+v18hx!ws|Sn%C;OJ~zyexBAlYt}XGA12GR!nUrN zaw`9$<5rF){>9hXYo|u=p23uNV}m2jf>EuPAIcMD=B)WLpa|E&-M4%he`sX1v0Gs= z^=1}62Uzd;^ZphsktXN-{^4yF@%c=d zY^#>%i8L!8&)I%GW3(x9>qJNU=`rSdrJ|L4tcWn{@5JpW*DKO2F12A~{EbKx(WTGM zuw79m=iVv{&vfPUkumSOoSznLijF+ryV=!fg(|d2Ky#AXxFPNR>@;&fi{YpDGdPjh99GCH}JMX8j%@n_3)!jfd)IM(U zJ-cwz?evtm`L7z8`8!g?eRB^qyBtnlT(LcffA5^0EBisdccXh~Mb~q@=iP1L)XYv1 zre52fM{5q@bFR#nre>PQye&3kMR4|Tb8h3U?OPr?nQ5m^&8x!aQgQAxJ9VGJ=QHV6 z4l3TLk?GZ8Qm)i(_#7h3mdcyL`F@`Mo34Gl6Kq1xKeM{Zb=vM-SE^lG6J_3754kfo zjlcQXb@1oTmH3{Eo7rZEZE`Rnhu$B5n3?Z6s($4~;Vy0_SzxxxnYpho=W=8G6#iYI z)4b;m>M!>)vlgd#U|rJ3tX#T%!iIr81>)ZGUR|N#PiBs9YYe;^8E%L(@&(E$a-e`zEnS3-e<0wG_c(Vchj(K_E+TtxbORVtHRu!VaDp)&mvaG z!%gglg}E1G<@>E3=kg!N_aDSJ-?!GyCfLkf>ha-^YyReP##ATnbHB0A&2g8v7x(jn z7ygkldyvUcr^5QZD?`oTq)9W+Z{f38T|kiKgm(U>?2xp% z%A5=^QIj0=9Hky#FPxdQHj?)(lZGCPUgT$zT^P4|FgwwWNGsSNioK9rk*g z@mF(|-^=yg&krt{FgUmudz-4I&(HJ^LALqsHdy^%$@$L()I7_Z_;1%in?+u$T(C^W^rX{ zWAk$7;tOSY|IGFJwcv~4k;c7ilw(wrXp?khpX*kLy zU#ax7l&|S_a7F4bd~Q=>`MB|WngGc|WLuX`RQX;_axV>^KW5FbX3B`F;pX|-g}0moB237sG+8~Xg_xH^nmqXUBg~YZxxM3|i`*w9 z|FX`_JKT8G=ytS%MW`9E;qkMM7u`&B$)w*ljdL_@Y;Jz8>dbSm@BM4oCv`Swmw3+p zJl@TeyWVQh;H8e{h*P;DM_ipv%z)XcJB)NU>2~xT;q6i1IB&U8v*nE_~uE(z_%`Pe?s4~u1y=I8iER;|WInVL1WPI&Vr!jwyy?$EgnQRYI& zYnRhBjx_gzduFNEGtzvqZI&iGpW8lamao|LnY`Du^YU$9-gA#{_Vhr(WS(Y1Sr^C0 zal9XL=Gdi&JU5oS;KMr8t8%~MIOlyu-q(s4I%~ZOeZtLy*URS>TODRjZylYcA@60KvI({7m?PXYw7FAej9rx35`Ayt z^6Y#sPI%2GNBO?Bb7kARG~Go@UN<@0k`n|Jx=(%`lq{)-_;zgTJ zKE`@)uDHO3p~ipk^zS~g7$#}HP1ilxd?VY)&>bsf_4U3kW#dT1# z>+u7E(=;|)_k7IP^N+^nLiSYOdnNTWCPUSdS(p2n6Ng{qxySp@@0SftKO&Ut$q#;| z*XsL2hCF)gl|Rz>O`VkCVKaYIY+^I(H0Q=NE_Cl|Ba{DI1>Ze<58eLmF11VY{Zd)Z*=%ea5MrLZ{?hVWe!dTG zhiBzg*}YAvB|FnDOUd^JZE&{8kUYd3yZ`=6OxXZq{j~a&tvpYZJ8aeI^Sp1FfAP4K z2iu33(w7&Hjy~dU^8PW&`P!;T<2cZ2PI?n*;ywmB-L2(g_BV2C`e2@qDf#ll%|d)0 zS>SEDwFd`jfg3?e1%0f9P4;7#y zRD#M-1*$?ds17xtCe(u3V4w~-KwWSICvXNAs0Xg#2JTQF8bCwv08h|w%jsKzeZUv| zpb_{(00cr11cQDXJOn}^48kD-A|VQ*AqHZh2{eUf&>UJoOK1hHp$)W!cF-O=Ku72V zouLbKg>KLtdO%O;1-&5-`aoak2mN6H41_^27>2-57zV>(1dN1HFdD`{JdA~LFdinr zM3@AVVG2xzX)qmTz#lLZX2EQj19M>>%!dWA5Ej8=SOQC787zktuo70mYFGnnVI8c8 z4X_b5!DiS3TVWe)haIpJcEN7g1AAc~?1uwz5Dvj%I08rE7#xQaa1u_zX*dIC;T)WY z3vdxG!DYAtSK%65hZ}GcZozH119#yb+=mD75FWu}cmhx189av<@Dg6ZYj^{1;T^n( z5AYE_!DsjaU*Q{khad10k|gCn(6?$QhZK+!QbB6a50R&Vw2%%gAw5_@2FM7RATwlv ztdI?|Lk`FZxga;>fxM6p@QDn}LM^Bb2I_zV)CEUy0%vf6df*Cf;12bn0W<^;@B}aL1|RSR zKWGI05CDM?1i{c4LLd~vARHnf5~3g)VjvcpKvQT2&7lRfgjUcR+CW=q2koH)bc9aO z8M;7M=my=P2lRwq&>P~Q5A=n8&>sfCKo|srVF(O`VK5v*d{_VrVG%5bC9o8h!E#suD`6F^hBdGj*1>w% z02^TwY=$kc6}G{4*a16X7wm>Tuow2hemDRJ;Sd~#BXAUs!ErbNC*c&FhBI&$&cS)O z02kpBT!t%f6|TW`xB)le7TktAa2M{ueRu#5;SoHBC-4-W!E<;4FX0uuhBxpQ-obnL z03YEKe1@#%AU_lUYp?-Zu!Dk72ns_HC zpfXf}s!$E8Lk*}2wV*Z_r~?jA7aYL}oWTX^fh)LyJJg2;&=5Sp6THA1e83m{pb_{( z00cr11Vdv8flvs8aEO3Nh=OQ{fmmn)O`#byhZfKhT0v`Q18t!lw1*DR5jsI<=mK4# z8+3;r&=Yz=Z-|3F&=>kae;5D*VGs<4Autq%!EhJ>BViPbhA|KiV__VOhY2tdCc$Kw z0#jicOotip2h4<7FdOE;T$l&*VF4_JMX(r_z*1NS%V7nqgjKK_*1%d=2kT)2Y=lj) z8MeSy*aq8S2keAhup9QkUf2iw;Q$Oa2xKxUAPDL;Q>5^NAMV)z*Bez&*25Ugjety-oRUU2k+qne1uQ% z8NR?*_y*tM2mAzG^e2U6kQ`D#N=OB%!2;4iT1W?$kRGfc17w6ukQuT-R>%g~AqV7y zT#y^`KwiiP`Jn(1+~FI9dLlU;0R9O3@%U)T)_?8p*}Q#hTs97;04~`1HRw~jldrQAP|Be z7#c$eghCjELj*)Z6huSfUt$X+wm@PFB(^|e3naEcVhbd;Kw=9dwm@PFB(}i+-WI6q z;qj{g3BM!G?~(uVEr0zhk3LFI!M~STBx&gJJD!EqvY?OI^w-t;zyIs@-@De`|Kk7N zO8@2-@XJitzh6VwU;bbB-|8>h|G)VO`%l!w7D#M?#1=?wfy5R_Y=Qr=1^)W`<74!2 z!N2_ZIF2<8yzfI-atvLDz5dmh->= z$=~1gxuYIt;V-ez`L~p#|2}o_V`A`*$?q+WoKF zURr0n8aT~o|dbA`;LF=Z|^_r|Nqo)iog1^+JF6x^@4i% zi$A5OfdAFsSnsNx{rdAvF#b_Z7&jUd74%Tr-~3sSR$=bnWFRRc6x0K5iGTkVEl`kB z_(M9dgwl`()KAsH5eh(humg?RS`Y?pArLZv{<*y=ScBGsOb`pYo39N{&;nXQIM{$Y zjc23DZ)SO(f3BJ$-w9g_?5As0}_(5IB2Bjb?ghCZ)1Np%Yv|shD`cU~) zXZ4Tj;{?iQ1!{NozmBE-s10=-jRp0C%Av8KK2u-BfZDSbs2-}L`dDMb8q}xiC-sBs zu5r*3)c5LhjU|mel~-+~{%8ssPwFEB>br2zcu-%b0kus+zoVVoK z9h8MapgxZPwP7nzA13ruIc$xy;*c8hLNbVi@}NFZee$BQ`@R9T7cR|<45Dh4%Eh~UqMj))W4k|VcghaYfPvg)IJ&~ z8cUXt97=&T7>ELAQ2S(s&fpEDLE}T?zys8lYVT+$1MNU#Ky6(Ox_}R8ylZ@F+|~#6 zaaSk;8dpwG2Q**QzcJ7r)W1bRV?<*k7HmM{AqS{`{lEdNpcteF%^i)K4v-TP|Nj5c z0$iT|s_0)ofB&zp{XccRt2v>+zh3X&Tl_n&^c3*#e!cr&-5&dv{%5tvUq35Yms>0J z|M>TJpB2E{rh}Y@c(qb{!h(;eLAu6_t!PSantX((*KDy;hA>w>rYoA z|NP&--UIhp|M&aXwN#Zgf1gM5pzXT9&-0&{`cI9Ug#P^Nxbc1ayH%9_pD=FztJ@<* z>fdYee@c5KjOqXT-xSLK3i&!!_;nAGke;ajJ}sbouxN&v-UaIlfuQ%XJ`ztMAH5EB zvGp!pZ#auz64gCVL1>rk_x+;Yx4VU(33O^0409nIsz5dJ>Hbyse|qOFAHLp^)4i4t zjDk>Ny6vDnMWmlV4fs{1t;?0V3Fw8`i$ctpH5zU~L@ z;056Rx;G5O(7mqill7ik9kdTLhhz93P`w+c^n0Lp*BU?xw$pv*8HmLn3AZ5>y2E$k zRnXQ@j=Z{OehRt=90a-#%nZ7Z)IIAo(0!@i(^EO;f!+tyyMwwX&dxqEVJFAdwz|iy ziN6fhdwKqF4O{J0lDw(V;-GiwT0v7{8Bop0e9#f!5gkm-30wEEBf%Xz4%Pi`a%|n( zW+spNOvj0VZg2~9kNXJ*U{65%qUX?z=o3`;&bqH30&^e&^f`g<$5ltY^LPSP{q>%t z?x({!raRgYY7l!4S)et3bJUflPc#=Slx&^wOD(uY+d!#!d{7{A+C2T+rU21_{oMk zpdqL}-*81W29lvZpm!;o;a4E74XQkoh^bA~<|jdAQNMHpy+7Iq)f{L8rJ(`aX&jw{ zgD?y{;4^8t(E8vG=kaaGI|5r{s{sBDXiF?3IuvF=Bxt?Rn6F8i#(FXAFl>De<_Qispt%*cuO|$*Vcwf}K#NdieTW zM`Ka#QkOhC&`A8-FcUQX`hnJhtfcGnB8`33RsEhCTVr4IO!NH!b}!=TLG4rm9ums{ znh$EL2vmKdW352Pqcu>qLp}DPzR`HzKwNV+gjh;=gx?U=*w=Au<8OnU#8%^L>}V}) zOw0n+_|J)*8M_hs37QiNLv27~sUm5b8;?=Vzn$2p(0J61ygL3S&^YZ2nh$NE1pFY+ zbl_*w(wu`hrdHoN=vo~+s@2ULVi#D;+*b`-i1G*9Y+=BDPz1khTg z^ldN&>f>u{E+?%czUGPQToTe^YrmRfngi!yDs~p~hhl5r8iQrC-@Mqc+eO-vFNYZKTj2EJ1Tb`BRa02h|+$Le*Y3LF2In+5xnF z4~IgKA7+sj2{j>@?e`Fyj=fO|&ct@%d!W8(axla-Pqn_QOj^Sp=4(7T%Da1TG*Nd%DgV2#fG5p~uiTD2AN}O2a<< zl%TPu@wbVX)@3iYpMt9FDyzo)ZBU(Dz=HS*e66{!@UucQ{4Y?ESOmHY20$k$2bRQ> zp*G|lgH}UVg2q#Je9gy|=rPck4mHit`gvziGR6d=1 zwN_q&me}dger)pu)p~y))tI)y4n=E$)<2CmwYSRk2WSm5un{|e{WL{i;HLqN|3;|B z{za%ldN}$F4nr5xHU0_{&kY)9E%B|0{~g;EG#At#Il&(;g03M7f!2s|_>18=wyvY!VwcDEMl~1Ih8ib%iLFJ` zLI+T}HP(FCMr()4QWn+t|22k)YffoCW`icgzkH5i5~~LHt3Ank*4x!J`}-k z2)6j1Fb+E%@sZdD?S!hIbgbGi3p77g!GJ$wPN>S-9Ja$#(zFItMKuRBNA6<3K{XyP zqZ(61;TC>(e2rJFVOH=0-v>rO79jNNdvvywM{`_t(m1?_-w`_iT>$Rbnh&`Y1I-Dg zU4l*cTEixR=15ii;Dv_@%MYwT_T&7b+G z=1B|C{27FnK{anRr>bD<{Q46A3dCY(K=*>qrAtt)+ZtPAu;ZZtzRqvDj;fEZ{?Kvr z!&cZwdL6VS_>w1K{AoUEUZ{UHcYc!I1k~Q?Av^Y4P@7yrZOO9>eT5$dC&350fXZSA z>S`XYoWz_t!N3(&}_;26|<=MZ+uGX`cpmX#obSe4TU>C#II8d9E z!hVWs980ZX8tjDB-~oQ5CCnMk6`dY*A( zI&O8)m{q@P9zDWN1_eRwq_I+rZ8Y~<;lG1G&^qak-X`vVeFqJNJlN_B^`ReVZWTZ^ z1~o?Wph2Lu_6lgusNFS>G-p1Lb`5lnIfKR$*BqLHUl!Ha8I7g~%@wta);C-1uAn)i zIi<2_-Yi7-qONFt&|K+^)&hTMivL~dpmkMiSqtKtFB;!k!%l;)nU)cMh@BeRLO1*o zXeZKiOpS%dkP)gA(|DZ>n)|BrN>E>VK_g&7Bz3 z4Rl`Yh6aK9=vR#N(b$%#1v(Jb7@CJ&6Yu@VoqV_)q8ZWs}wXepF)_s*zV^!m)I;c!)leVb# zJCZcbY4xAR(ID6d>aVk)bJS+?gkWo4tN%1d3gi32NHCx=wT^f=Bqxu`-2z-d*G8{M z*XI^mTQqhG5TA%11C6UnsK$=Yt!j@u;Eb=nlK%kgL1R^Y)ev+K^O^i=vpC`!GxzZ` z!ZOgDzE121S_o{&r}3uoq%qVRbX^q!YR~LY83usnU~=*$#jXlk56YlfP_@6#J9AKt zDb1n*7C#pGLAAVqKebr})vOTdihNgkWkrk@(lLB-t z=!9x4)y3ADvI_M1rH-vWQ~xX^JriimXuNcXaj*p%6Z;55uzR6uZ}oEq^cH#(w6>}~ zdmtRnL0KpQm&vE`tNuTQtyl!dbVJwUM}R(q$c(lk))&>>)m+i_%Xe&V(6xxhpvJA% z0uS^dXzghYB}vn9+_Bq3Mf{GCkJw=RZ0H_TW3Vo0{HktWh?PM5Ky%1~zaNT#+T;&n zYGch6waYM=3_2HfVgJ=o&GE8mJh))bfNG#NAA;ThjrG+KfvsyJU0-E^>%@wn9>i;6 zYc0u&ulfh$--0rVp>@#4Xiu~mT9rJh(UR~1n&OuyO`lV0t(-zkela)!MM2|P<_eibkfhG zj-*||RvEN@yQ3TMBSG^#C1_q9z20Kob11$USV$bFJAL&vl5W#m5`x{zUT-D~5hXcccEG^OgEq$4`S+BJWUat=$zMBQe!i<0ukc z0T-YpXkF0SqrP=zyJ-A9FcaMIBVie7sqv#wZDUI;3AW~q=CwX^IEOtQTWd;tY)9-i zsMh{R*sAX+e0@fc6OF^a2MwSy?1DGY3bbZTC6D^a9$Wj`2_E=fu#vQW_+uaow$`K9 za0WDI8lox4_Y76r2cf;8B4|Bzgv_9ETOQST(RkDxnF5+0(?IQ^`J{7LF;LlR6MqZp z`>D_gG-lL?@u2?9jA~50L_eXr4od}b*kPdZXlxEc)5A~f0l48apkjaig`R54OsbAD&>ZhBj~(KQC$rYIoILp9`p5>KEkK=UEyn8zC!!CTKG9 zq`}sF(RsN)b^}<5-x&2m)pst~mJp3S4Qj#`{Nbp^n+@v8_H(ecjX~dm#@!_Rdhi1K zIOx3c06z;f!v74q1`bCLgZe@3QWmt%-XxzjjK4?o7^9@IGK4fjF)aS&DeXikhkb^g*gJc{2QyA1jsH1D3FB|zI{#n<@$ z0xig|Hc+2M;H#}vH?@(j`@3UjK+B-M#B5Lp7>k{YxQ?r9b)ufJKCkdKwly!5ruN(f z`Qa3(FJs8Jhix=&G{&_S)hBMyoTxvjeogQ%!fyPVXdHBgYWPV}U3+Ryyv5gg8jI$J zelP=H>s~$d82)w8TAU8luWFmipnf<{o_VM*@xpKye;~dC>Wu2&>O!y;&kfqH2bvMp zdO8t%F{*Qg&h1^WGk`ySUHts;41Y9q2Cd~=1nCZ_A84HFd|CoJ;C~>kDCqj?2fpg| z9A9HE8>Gk9wVCE{d2}!Ol2{~Yp80{=Dm`&q7>AvOeCnr4*w2Y+{x*cW&=YDyTc`wU zK>fdpyvtz$s9c(p+U5v;L1>I$17G)6nycHen_&k+B>qwSD$w}r z55eTob^a_=`&9igfBoBl+DBtV+dqNRFcS`u;R1wXYYm@|ZU;Z|-^17Vc7Zk6jt~kq zq}4$+Zoh)Yq8+gu=wNsOT4PV5OW0QPsv-6S>{O`MmfKK(xGQP{8e2P{BY1+wtk$!V z@R4mA5z{f6pgPxTj%q!(f^WpSLvzrai6W+BsvVs`pK;g`AAzlVm}1zKv7=GVORX&$ z*P4q4od&9NWe6s%E4m2+VKsSHz(SafzY}V~R{U>Z51RAAPz_scVUMjgRl3$Kjf1A> z6g1&;te*I~zS3HxHM$h~lJq**nq#=X`td2MYk<0F6Vw{jI+GhaH@3>C>zvEPxy^$S6Yxj+l_^b*bUG?;wP{dU~fk2p=;4F z7=x|(rt#byU)M@H7wCMJ6KzR+Dr$jW5&IOj=Fn2mcuN6&U>yDgRDGw)t8O+)7Wo^T^5~8JR{lzly5TXOxhXjLfHARb${-FJq_xB*19E-2RjJ7 zu&+P}c6U^3j@Hw5kc@3Jp?2U1KgrV=?GMx81HL<|IjQ>Vvx(lQ(vpM5i^jkDbw8BF zK8MyI-(Kt@pfPHVuQ4!$vonJ`P@pax*-zBtzj-SvDSxNWD*IJ_M z1+|6xO?7__?O{CR1)X1qgT_p8@~LeS+NCVE#<}L6%6I@h4|lQE9%_p#*gE!VP&?#C zTM^eB+<>j~;x7De=m1bZRYX-b9bavsYi*THV@K_Df;^?6EB<42E-~HnJ;c^A)gG$9 zj-g|!9dd%Mp}(T4tLmpUSnF$MP#tHojmG6iv;dj{wEybpGF0^|hpjpmMRl%K{a)km zMpe(D*gF0i?1cL1{I2^VPqZI;jXc?~BS3ZhwT5D=esA!1limSMj;}SSJ!lItmtvdH4raEiAY6pWsb=UqA>aKe0 zI8VrT5H5fl>8h`ed6hKPS=Xq#PS&wi$0x*8zl~@V>?W_)Wz|h}ybGF3D%W+=2BKOM zRBzQ;=Nr{o*DzX3Pm-o`^+abAF9t{94gMbV7^=FT!A_{N7x^{MBefmzw{Ra+UzH&X zSV1u8SfAKl<8K)D4CsTO64h~Qpv&>q2IaA}wyG_(cBu`9Ve8s(Ep|ekRbQ>Ms+;Qg z4NU`DBULYzNp;&pn$}R2s|l#>v@ScN8jrKEo1zZnQ9BgJ)*4Y0)p>Ous&<%#s&CW= zI{#}uUk7T31?XGiD$^2FbuWkA2lXUPbyrzb|E<_ViQ9q7p!HwvkkAG?$Eyugj)QQB z{BzKY=w4K1PdTuH$Egr!W9! z6Z;Hm2YXcaD-*GGK5hZ(hZV%@qZL5yp|;R*G&a=+Y9EaiU7sH&Z9QncP+OcOEuk%R zeWY?F)VVme>imv8dr8xEgX(?*TXnCCpAJoEgAVwa@cWRS3Vnv!qbifyMAtn>U?%9C zHk_E&I*sdSe6@?pvJk&BB(y^g;@^p@9eSfbLH9c@=pay8)E4Roonvx>+C=Sf301w1 zVyoWQQRUNlO?A?7RZpE~Vu-7*OF?NW-y>|@Q_aCveQV>V1I-P6rl5NSjq6GHoj~Q# zb)x1|E>Ju4#aDS6V5|N2p(@i>RAo|ooCB3fbyuEN*glX@chy^E%7)$n)q4y6P51(; zx9)Fp!YJ&qJb~)(+Txu{CzJ?xu!>wou)5EwKyTKpw5-s{7PG z6$;)5S9Ye;4EJ~|2-V%vhsqx0!yP#>rbZlF6s_1D_2 z`o^N~L3Pvh*)7nqeh^Ejo579)T_3C6I%7YBHZU3jK=*2@XFs-AAD0A;DU~TNw#suJ z?t|(VLVm4bs-xk#QuT%upUB@?&Q11`e33X3R9@RgdZB&0s*bFc`ld z@fqkW)B{!7wI-=7Dn~a^S=1hC2WwE>7ZX25Ol4e-YTZ(uRsY+>R4%Q>?!-$#Lfus^ zm8Uy-^!b72h02tkSRkk^bS{~NTA-?%j!_A1k0!K5EZaOFrg_o?ROkJqtG)^4s6(s| zXq>CvyO16OU5HJAaoA(fTBJ?IR^5u?w}SroIzP|CR(q|&)*7#SGtJ#C=oL`E+{C{C zYLig%s6G79N}#fi;tvOCN!ZP)K^RJRkjt7&>m`s0-!cf*}9?{-zwL3 zGzjg?zNcX;tu?+AzRGzAKjHXl3#}oy&@j>pLla1-`zGS5x5l{Eu~g`MNazQxgQ{~T z(o`0;y~<&Mb|wA-hC)1O91Q@iOL1Z5>#t+2KbJv{ir{tV|&3nP+PPiu6|LQ^ukwL zd-r zZncNnLS=6ZFR^#9FO^ko(-T`|Dh)|Nwe%LI2t& zY74cg+M*kIVz9@e!Ju_#An`fa8YjcB)fOs;%9Nj&)@HTEHFO``g0rCYb~UPYQT;E0 z+T$e5A*OS3G(5sjXp42&P7n?+LG7h>NGNk%V*Nm6)|}0aJq>i8YKyAvyiv80`YkP* zoxItxZ=wF^4ODHkgS3P;X^3AMRF2QY)ebJ8c2iqqgdc3DHhGU~y-R2p-KXgsIT@`= zy6T<^{|@#cP#dW3nj<=&sQy}i@1mxpS>Zo&2Gf1eX=2=2r>k)qi6^ZR+ zyQHYr^U@j*7qt)keP~bo)u{Tf59vuDANy)U ztSihVUt{t-#D0st3HIQt+?7!m^6EM%1M!n=u09)#-5o+nb7H$`ps{g#s34p z7k(P@lz|V}Q@{%H<7=JN?|#|gKOpZT>}dA64cm>_X6(J_U|3A-7QTMx>>C`%)^Dik zTt5v`VXr3NK=!$pyc(BJuywDXc~ugfPM+eVMZzU)^{eLpe(W{S7DCvE#>hvs0A=h( zY#x4o@)acZ4qNN>7IYu}SL`9!Rj~E@YxUSh=T=|h8rNa?L$IAmyF;G7*vX(Qajk=C z(NN<0?K55DPsPpvx3Ts6aF0Rj>_fImjeUx3^c!zW@U;f*Lx+OK+f&l)v1?#Qp{39n z_VGqHs3a#0euHieemMCxcU|P8m(hOYS%)S^r;#Tk^oHxC>-XQZ20uY#(MRMt zP5ch_QtUj$(?D6+j~~JQJxSBOzOF0v+jN@WB_JB^vfpW>Y3?5+tv#e;oANN7wEB<< zhC&b07O)R%@`mFVhLiYDiR*eI2Xr9q1FCcTHuL~_z7rdUT@Blf{WijnMAwo>zmr#* zv_H^U#D}8;&=jy9Tj!W?^fTMe02{WuOa33ku0UQmh5wlRx`)^f!(kczTJonQUBB}; zg#8>K7RC1Z%uCm1T5omDpx^0>!M;cSL)bbeRv=#o?4GcYJQ1WdfRET6$(tEl=ape- zb+|{|oo#H06~WKPcBhFwL)WoCohNmzp>x^}^dNcs$=d@vn(cHirF-_721=8+iSmbgUpy*FhWEA~B)wA9!~wLf^m z_Kx@kNgIjXhu9MGC&P}$E=Zd0kzZoZCUzeC2@QS?O z@H1m~1zj_Z#`j>GrsTCEeJ{R#8`K&dMS3~Z0h(bSBR-4cWF-FrZ2hihA#8nr%u@1r zqd7p=KcmpTq(8#0jh!4ef(^0r#O$$KqOb9FpZFd7D6y*?XAHLP`(ZblzvG1~N48AkzFJO++AsPExjqgc5Thf2vXTjf( zjz;IRT^zP8)P?fInnOH!XOnguT?EzGUcYNPntd)KUkbDYY$i`P^7~@vgkx|GKaf0+ z!QgL#3)pYi{wJCNjYpd+KY8`JU{zw}l?QtP*kL=uR^nIT9?0$30>1^sK@xm@ z{#6Zqi}oSEKA${KS|RL~9H%$_Nz%7L7LM6U+oAW7CB$6G(*&(atTo%{KA=0~CSOYY z7ic`JCFV_@E!gk0Keo?CtP%c6xPd*E{PAc7RQJ9ounS{XM4i!bY&Vy9I{c#S>pFga zSV3AU_Tfgn6xs&=HFhxi6dZ{8kiRF}_QAeHtRUNM$IpbGBBswkTBChQI|%v=Y%Kez zPI^n?@9{67i_s#aS0b$ySi&jo3m%Y`mE$zUPKwSWzdPxr(OUS^*)BW2exElFu~hiu zU>z}E(zg> z$cw~Xh_@zI1$K}(J(`Si%_r?9{zLMuB2P2y+QesI=OryQx}LnxQQaFiz%ERl9O!sr ze&p3>O}pR_{u#Du2NOvDL~JQcAb(Z-(`YvGFCe`*`U)Kf1BqLptJ&TVe@kBdMshx4 z7Q{l}7BNTs4d^zsAzBu_LfQxNCW9=no8ygvn%M10zXbKjbDI2ph)+a!z-R0>=tT1A z9^4A5!A|_xn-P;;|1CKS$nLY_k{t4Dr5bM`D-3mGrj6CPN5(RUY#0g6!;XIhuiZ z4M>518-EbAfR50f?PtPb_7#S{1SjmZ=nnJ)+Z82WdF;Au{}DSEu>kA=6L1w9tWVmY{*;4MYnlPV5eOH-SFa?N4kz_BG;; zY@dz1lduD^_1Q*k{438a4ul2rmSJZn&t10J43mj1K%Llr z9Az-1dEs9p-vNAg(!=n(;-4dJ3VbKN2c1ql7u&7G&V+pzZ3UfSHrrhyUux1LuwO$x z_OTw_g?=Lc0JJ!HE25o=f5u*porg3V)RI_oV){&~4jRn9&Xd+3e>b`Uy-vQWZ}P=?mntKc$e_t6dPV<7pvV9&w+g61V} zdh9pgN?HnHC&<4W`!i{kP-phlmGqU^jj_Fn*CpRd@*RYW_(kC+=@*Dkg&x?Y$(t9h zkjDaTO#A`7Cv7rpWS{BL#J|KANNj<`7D#M?#1=?wfy5R_Y=OiUNNj<`7D#M?#1=?w zfy5R_Y=OiUNNjtW%DE~;d$M@o{2KnPgu zVX^5_o^?8=Xi7dLi8W}uD(5^boX}H$ZpS}WCb0qKD>L20B4N8Sr+;s!=T3^kA(fT= zV*X!$Klw{=cBSulX)%THT<>9V2(%v^ujok+Jz|)|<>Hfzxo@r-_j+N+%v)WNA+KSZ}OEM1o)dj4YEagX!URY zG$wB8!H@sL{{E?PlhB`k9XB1H{BAR)|0j%_|LXRL*2DSy{in1?!f;Q#mrQ7he}C0; znRgo<+1HUo3V>RL?N!dBdalsnN{nY4Rk&-j1e1pQ5=@ zJ;$eK#|o3IXS(z(tDfP~Gt*;0&*t_+^{l8L`IWBIM+E3uC>^H;v1RyrUR=*he*`^) zr}s}{VI;nuMbtBkdS+11T>-vjIG-47+hyRI_75V-K4L<*Zx!|^`D+6*0YeQ@bx^V zY(2*|iFgP+0dHdOQRUZjXZJzR0iH)y=9O%h9j<_$hb|B5(*ckVTlKsHYM(e%&*!N< z)#roQrZS8MTVfg)s;9~~0CY@^dtC!+&gh+!Zs-iqMWV{7XHGRXbzSI>e-dB&^nt0w z^n9L<9nQW(Q9Wl|f>;%NjlZk-+flvCatJCC*YkO^i0k<~%@MV&=B@gr4C#8tvH-T~ zY=`Rk%#&zo^5h|1$9@QU$Fw{#Jv-Y0ThATqSw8iX%C2#tG3Qf@EY_Ks_iO+1Gd_G7Tfed*TZq_)u8x9@@spIUDZkBK7{Rbk*{YEkK>2o ztKakvltc1b*hiHf%;$1>FOEz>)7AHhV6ohsf>CK`T=$rs=8^sk4H5R zLy14b*8FTnJQy^#yUIqjozfnoT6fBT$~;2~b>Jty#*E%q>xO-R*k#a~tTL$&+_Arq zrgiiJzUrp^s;_-W^FTGOB8aICeL!o8`bE!vtDYaROOdw%syb`?!Egz*cB(&b!7R{P z*PZRsp=y8ineuC`a%Nu-NK^f^9&2oDMAgPBuff;)u?SmpL(k9J;p=(Yq1d_{w*j3~ zRMr^ss82MvbY9Y$qvvd&lE(#A9W*C&e9enSe{QQb)|^q_=OnJVq_RmzHNNz|iq3Nz@Kp}2i>j;E zEzJY9t=5RF`05Y6JD~L-J!~dETX~499bMo&XuZ<>)m+rL(wtw5y@xc_U-MOC(i&T1 zOnLVa)A+xM>ikp*)$`*oQOy@GVvRu89Lg(fvDIIi6SH9{w$7;&K<%HG{8h;ljw=62 zumrWM6Pk@}n!$W*m7x$?3*6aeBC7Ig9CyUmeCz}o%UXM)(G1`ZJ=n)P^2B0m&H93B z&b-0anx;P180m+v_2m%$HweQHB&L2)y)|xAW2@YnQ{_mL-X^cczs8ZuwGv zYHyua^-j=s(sjPnTx*8?4z!&QdF{!geY?R4>^d+A)INL3r+sI{SDCf_Ez}u~fy)1c zG_4ov6V*fOhRUol5DMBx^F?ct>Zo(A%JK(kqwrN`T~lg}+li{J7g8=KxQ88$RzSOu zuJu>%!mho=m8&fmB|VL;KP^ zOGX~G@lA9$sQe#@Yg}rrYl|vfZK5_-nL1#nMn9oV;4UO5tvE5QLn^<@sWNFCYV6)e z-;%xq&4{nLS`gHSn$!P{hP#gcw5ZxR4$>eX-LN!Bhk%3vOP8RONVB4-s62p#2pe=u zcbAlO$FfUzNY_$JcPsIH-n^DSuGe+u%-nO&oH^&rxxc>!d->kJQ>K$o&g%IU!nMio z#r7eO%Rkyf(mS6A!x_QZisQ$6o=>ycfUs?{J?~zhb;utl#?yqmf|yi8*suD4|1O!# zm)!H}`g3CS&I0k~_3_#7?(F*5-TLpT|9{DwBs&vvxl~vd)C|sIh4j{RaBX!DJ5CU+na`>R*+CAyxi+8Yqt0@yWY4h8A+?>= z$*~+fSMtA-zigPh-1wK`VZoj!k8i8bnl4RtPn}o$+xWy-jJZG9V3y|N`|KpYvF(}3 z@(drir1oTC+4|g7V!^xR<7LvVAIpJa@$jII5Wg1(_Vg}mb}sgFc;Ign#NM-lF|qu5 zOt9aH<70z6!5%!jNT8dc{?Fq{;%S3>L|xCHv7Bg)8`QT>EWh!exzjsKeR7BY=0>f) zZ*6CIeqg&J!%JbUpw{Nk$24}F#x57^{qx%VSzX2_f0KMeymgQx=tk5hFB}>U3+{oZ zn(Mdeudls$ZFP-UR?~?s_cOodyJF+<@MiLC>7Gb_Bl*kN{D(AFTpbwBt35KdfBx)D zCW{Y_ecQ32IJbuD(#wHs$L?r3$r-y_R|*FN=f>ZB!%h;cR}8V8dg8f0l0V`1*KgP+MBFm~>VfH-2Eu&u*x1 z`Yc}jXd zc2@EXVeHy|YY;om{e@&UmecPK@7F#cmXo%L`Lld!4srW*I{xK+?eY5hUrV+YYsUP= z8Oi0|EC0%qUyfQ$Zhjbk6~+%I1bqX0Zk%L(BDWu# zd}eyzK)2N=cUj|m>Eu1}{6Wl)$JXy2b6)&!^6++jpT=ygrhPY=-}?+F`{oPnfM|lRCF<2}wo46rx5TNis;+mp@V4p8%0v)DN}J)coG@LzMQk;KL|L7Z5x zJ-^kM8Is+lpM|S>-Wc`q$k_98*X?18+P1Ht=@TRL2s{=%T1c&WL>yqvmj;J@-E8~Qzf9n>pN#8)=X{pdVC z3L|QZ@8uh3K4&B5){WfHTut$LZuY^Vruj@jq+J#nvkh`1|_F?C3kx z*ik{v^~?Al!Eb%`s(#@Ii#Ptu*u8XnI%^tU`@?Wg*rfK#jgh0?sVxUPKY3X`zaX3t zwyf{LAScPI&g|&)=AWZy-OmRki+%nub@B$WdzBBf@#T#(xBM|hI(c)g_`KMjzI)`HZIgd9hJT+<-g6&2JNZS9w68V7i2B9zy*Zl+M@%;``%m8uxJU|4y(*cjKaA+}g&;h36-W zSKkMR#$wt#CaA3z5w}MK`Qkt6l2@shDFj%9UtHO`2k;%w;Eq zzmiX1Y|Nv{?zUZP|2FuIieEet1_x)oMyzIDHRdOaH13^1_igPfg5Mt5_VeWB(tkK+ z?kD04!;9(XNWWh^SL1#c_@}#}y*#F0 z{IkcuYs)EX1~z%M_V?eP=AS0a)O_|Lf4><%2)8wM#-1^TAFLFsi-re#-8RS}?v15l zwsu~B3Wp{039;dh8x?+&>^BuT+S$$%)5b9wboJf zKN&pFXLd`zJ@#()R?nP~EWS2~*+GoS1?snN(_5FnC9pF;@>_xPXVWcWXT_Jr`dC4H z@`VY5wmGfWcjH0v>9KET@&0yg_mq0+>fnyy<6{4>wb|J^wo30@2811wp9-VvJEJk~ zXLkHqSg5wOiUoI{_p-+uL2ab|7HMXCZ(tH21@v`YIiWd)` z1@!`-VPA9kZO|R6o@Ad{WBYeL>XxhHeS`Z&EZ-ZxX$Tv*WVUnHi924$zs7=Thu-{$b}Eqwl^`vzT)Bjc=h=0#ylSQwHkfq*g7@}dnEf?%%|x$PZk@K z1?w3Y#JhKh5u^X@b2 z6Ju=b-Tc5>d~u z&ysA;?d!iO=4brEJ>nb&)b{=IQ2lCvW9yUmUP{MrKTYQh#k?368zbts{?*ezpUj`O zu6=2|Z#sG9(eU>m#@va%a}JE{Nu4Cl3#qEJW9Gt_?(upg+ zDhBzlJf^;WAn-4Dk^Jf|^t+C;V^euVe(_txx3L(VB4*bE;_c$6;%kHX#HrfKykde+ ztY3SqSYBQ)b_QQ3v%v)MGmZUSSSVS};0GTk`yOVS*^}9wpU6MX^7F>5n(l(yGX?kj z3t{(k=KNV~y|)H&AWqz)`-OP}zq9Y9lkJK9-U{wTxlw$a(K{xoZ=dx1(EFT)9INJ> zIp*tP!Z{c(Hq|v^eXIJ^9b3m@&-3z(?|?UJUlEQA*6qBk_vN0m*70jw!^ySzs@UV# zGX%0c#|PMD#aIn=LwY&+qM**7r!jZNyVh36Tknt3IXm}?JCE;P6Z3~1V>w~dbo}J4 z+Rm5n^9%8YrmAn{^!!P^@n*8MvH|;9 zpLphf^4}{#%v%pBZtdbr`C2x-pQtN!GQGqN8XS# z`72-L6DQU`tTsOu<5LDThZ>3!#ZJx+K+_o z$LQ`%maE#Y-+P_)?y)#>Pl^@!!G74+x5U@!UQE6; zcJ_V)zPh$JyCU40Y|Zky{2_MasgJ{l;ihnG*txz-!sz6!8?$7PW5lESL(cJ=h4*e5 z)FHk*`0kH-X8$ljc&N5`;REhPva>lN$n9e2rN)VgmwWz@^z3T?`^W6&UhwRiv6_p$ z#g`a1UOb-_cBoGteLUUUVYAw5ANIXC{JK85WBp|7PzUlCad1xK#Mg}VFAzIBbB>d& zKJgvON2aX*&e~5WtEoND#&^VQ<=gJ3wbk9r#;3&J$KQU9;Y(t8?8c6+Pfi}M_C@g; zfzR&}+&k9FZsPOy#$OrOZK*&nx5-Jr3G6ISSjUlL=A9w-KK}T8ylxPu2gk-Omi$>P zcKD^=hu(|%;luH(VWpm(CuUFi|Iqj!VXS29P)ltRypug%N;gS(z2|(F$uln`&z;U* z7YpKi_H=$D;*075Yuz*)6b=qk)Tbt~Rxz-BJXu&Knf=bMZ-v_KpT*LNw+&-=6Tf%1 zx77D>xHK#s_y!yOD3-6~IQtqHkB-IBJ;}b``5+tfLop)ecS$c6K8x-7l~}BO(KFU9 zC%S)phwqrYXV@cb9?ZQ&{8oKx4f$U#a^B+EJ?(c+u{3XN-SW*F`U79SE*=uBX`gs_ zx&e*#z8Av%>HZxq3=0J3uHKYqCr*~H`K`UM(fsjo>E#5rRx64%wT82HHr9SpZT4Y@ z7n9ZP{O#At*Y(^l-d#0Zx|eYSpO;U zVZk%bm!0Km`O>*Bn{L7&_W0l=jk_aES)V-NJB%MJQTvHxHQ!`o`os_am17r*kBq&Oe_dAl&Tw10qvO{C8;Kp~BNm;_sbQnw-7BWEFZsw=c~cxXSMTAU zZ023=@QKo!#~rFB+b}*j9+3XZWH#m}?k4XLZ%@Y7|7?70_*1%ZYm2*`ljY)j!fs(? z{qva%8dweJ>d-#APY)(UdtPW3w% zcZhuTNU)c4(@kIhz}Q{)Tr6ks+Xw3ZE)S$*<5i7GCl`O3Y(D<%_kla&0b&1SbMnWN z^o1{z<&6=sI>=al_Cny};^VIRoar;ki^da#$$}WRfBBj%1Iw|Dtw*H zj`E%N@LBQB&d!Lx{W)3Q;@fhE9Jo#6mq>QMY!TK<$KU2i=94qm7AKn~v-9rh#oNf> z?Dz!RY+3)F^@;lvV*4GI%vacyFTNWlPQE@?Z?dl(;5UPTwNFfcchAbPla6VtYusr^ zrJq0kpt16c*pNGanoj&ZpUki9&Ds4Z{vtft*x%Owt5^`al&#=8f3Z2rKmiwAe)A?cmXO34e<$JX+u z{omSJj|uJwHT$h$XtF%v-d3Z%(z7GedyjcPi23tbLA?0RV{i4|?6vub8d@$GJ6T

}h!-IXS82eV^&wR?ho!ijf@o=)-bY^X9HrBe&PCq>Pgz!iZqxQx3 z)GgM%WN;r%7k<@v>tfo)=DO?By~0vYni>Rp6(4)yKZpxMj>X{PLDqohW9- zm)c6c`Y3)Zo;t`qABIKh=ikTI$B%c4t(k2f3Vd2FIwZY4^Gi1MPJ3}5xJQNs_LK|M zQF7WdjqzKBy}VcdZbAHAlWthLM`G*s8=Sm*KrEj33C?_<#;g90#Rzq){Z*;!7$GMVp*Te(vlKON+PXTyH!u8-wb_lG)r z<9M9L%#_al{cUoWLHMVjbSCX1gd@>rKtM zLwK_GJ>h5R4vFPY_n0}wjrH=u_hNUvbNV(}Uiv6m?o?x6o^I**u-bf19Qw{O-WiRP z%;%gf-*wNrzc&odFJigJ93yM*AG6igjsJP_E{&fgel3V$Il{eQP4*}Dwy*tC zeG3Kay&@QQQV<*0ht&lnhJS4v+sh*4`oz}j^)D6kFY6WWe9qZeFTZ{*o$o9* zJ}8-=_#PKa3ncSPyjE@Z{nE+S`+Q)p<pFHP``Kow&Gl;KG)7!s#XR_pB z@rpqm#5cZ3_xI$t8Z%E&TYXY{rTToAxO?q=<(R$I1J28P&WOeLGU?6f&hWd;GR+}> z8IR>7cD2^SgB*HiW9Cf9@8p+%#(c|q)jJc!e0rk#ocAAlMlPN?d68IL8~dx;>d}Gm z8?~>AeS@$+A7L;1oFjg#_NMjA<<4QL^rxox4mM`5#cDeXwUqNcEnYUPk)A)El2!m_`5iEHs{x7GxqsI>>RdB zRyWL3-<7qkf6{dBkwIaa`bNhC!!hB*FgU0UkB=vb=WYC=jWhS~+Wcwdn%h~*V*~1EoBM*Z^6Xf#b=vD;fls+})s@yPo_uRwoNP_}-Z@;~qw?M9 z@mBGvL3_OLewaADSYlh>^#4s3D`&-@*8XXHYJCSKt4-|DTAiOhdCxQd2z%DYU(D}a zYc|hEF&kJf8{HekkDO~BHIzG@ZRJm6+zIS2p49v9DbI}^_~b*edt|#Xb#Nb>@59Ea zEvAou8K2l#d*N5V2ooe9RG(buu3R&Amgfd`cGrpXts2Xpr*7;W$=;)W{w^-kpBc`p zPfc-K@>a?A>YlqX*}cUVXHTa#kdMv@dj<6got(md#MT+HyGC90vv{-k2SHvDpPqTI zaYKXs+}RlSyLTL%ZsPdlz`vc3T;m+%`{V0BGxiKS^A%^~JdUhySbS_ap*}ix5Hq_Z zTkoQcIV|=*cj3tNN5xmB7t{09wthLw8C?>@>v4^7Htq*`;D|AO@{iv*{!shvphobU z#`UqW)+{ES)kNvtpWpkN`@;O&20p1Sb@zGiO7-2A&e>j?&YmU@@`H1;M)n$7pYwBG zLmF?L^6Rp-_mA02ta+ZT-3wPFi`8|)hT+n5Gc|0DnD5<@JTTckeqHQ)_nvrEW1Yo{ zu{fP1h&$_6_uK!Djhit2O7SGM<%c)xv%V|RIk(qq-;z9bc&qmPwdHR1=g;zu``mh+ z;h{kt_d0XG2Zt5~B9>p=soU1RDi-I{#O@b1*(+K8a;AR{=3Fhv zt=7rECJr0bwr_La9K7q6Se{fjdzaY$tY?hBCY~rhp>g)+Jil$s2QmM0*YdSRlD7-u zMsB_#{xF=|bBosgQ~X8jPPccz0lpf~(wK|mRpO`PsRJMN?uXNfeLgrSSobFBK8nq? zMPt}lep36p)7_UBru$)hMq?jKRHbjLd-)(=c3--W zRtbEIY%SX)kDL5O&)iYl`O4qyXMbzOa-?(Gud$1V&yu}o>mUYhPG|jcg?q_4`Ih0I z1Db=4f83aXwbu#GfBSUi9uV9`H`E>$`0c{!UWoam{BLh(hxL=?D*gQ5JA9wA&zFIZ zvc)IyE5Vtu@vULJ*0EoZm-Jh^eCzw#T_Sg{6tmL<;ef`vPrZ*%?UT-T&|TrjwJ&Qe za`2tCX9(t+qBfrwZ&$?pc!kCtRX_imzqUJ^J=p1^`mRo9J2qK6W=qey&*c=~w&wPJ zHJdrVPWN;eSX(`5ul$YPIqs0mo=4Zdytc7y^kCQ^Tol}K@-iQC{~aCA8~zwh?iqHl zcfL7u{qF@nd`0|xaAy8S=-umwH^b0$d`PWh?y-V%7jxb}ER}oZ?66L4{=i;piv_|D zlSee>s@Oi&{-4+Ww7&nuzFpaBsrb#>@|`=xnwJmkc2D5betY!oXs>ka>vyxqlP3!2 z*FGs08_rj~Bxg^M{wK-KNc;~?o-FygFk7-cAD@0iZS_8%c(e9DYp)v2@BSYv=AZ7O z+2gU(9T)o@PaP`19$Z@<9w*tjX=<~hJ&hCCXPIzbI_LXoU`zYCFg%w0V*HC<_gp+I z{3+dMu|2IG_}CfsKaq}o+ym|rdEw8scMZ;d>Dufo4%k-=+w+QHhoCM~&z~FkoI1pO z?n_Whi|3K)0Xp8R!Gu+}BgUC=Y){qk^E_+L8yWi5QyI@!gX?8ZO&5g##+I`y(x%-P5I;q>4h zbOwC(-LQR7Be}Qi)!ifSOdPYTyV|)qheZSX4iC9q$K$0_7uuhF^XErnd2G8_u4K>W8_Nf-uI-+ZQ>>LQ?Oxw>vFF6tOSSE5x7dE= z#pxUCoR(^w*nKkj<*;vkYOh0M=O<_UDEZ@b@)H05X?k{LWA;$Pt`xJO_;e&8Z7&pAz zc;8D0$9&cL->B{EUJ86gEi6wtBR-^-7WcQqU?z!aK8pj9r>sjYMAo>1e>o)eU$-W8ROTsGAYLB|`=mF|SoNvt)Q4>QP542wIn@!?@m64C=etn+OzixJr596vOL{$KW9#P+ zCpFH!vv4d|@VAA-eu3}qlTK|aKOUDXzWIT>mH+WKIePiV`$pYAow%MT=4<-~weg2R zt?V4Gjh74aB#Q+((=&55ueGZCFG}ak4+!FtP45cMZOZxwg`v&Kch679?&@N9lp1hQ zW1Zb?$sfm))@QAs#_rAe8z+bGCwIeH@j8t?KKS;1rM4KmKRza~<2yZjP%`_ng}f#1 zou#}WH{Tt5-+zNSrfV)|es(y&=hio8I(9Yp3W053jb96+g51Hc`Q5N&`TmO9=8?Pa z4s0$i{JSsv@|%skOMh{2-baUL(~Hj+V|H{$v#&bA`rIx11u;(V&M@ztwfXe%_1%-s z9BahlmH#gg#!okYJ~$iH8TdZNB)iAkH^!4*ODro?3gw z`q^}rz^)_G`+oa(>>dA$)lj}MHm$vT;G+`-{<(4RjpJ;uPu@44J3Jno`}VPKI_u;w zvxVO_POf4<~mouIS&hV*t zm!NKN|M3&Gix^{o)A5&77>_4&5oS7OI{ z*+u-iSFFqa)@zKlv+)&S#oB6Ie&OAGe`I=hEdD+pyxbUZB2O7(t%o#b>*T9qzU3}I zDR|eE>2_(%I?3|R#kH+feZr63Z6DURRX8IZTUfU>IqrkGhb2X8KEN z`+hrM%-qA1)mYx?-jxgFBsTZ11C!?p)@iRp>%SoIWB2%1^|_-Kub(~mr~8mEu=&}I zW!FRF|HK2r>B$4*E$eqz4hxIdUNgPA*Et&NKD6)Q^(hR)_Gr%PM+8;S#Dr!eST|NC9oTRXGiB_KkRi#vNM}1J)4RPae8)qU64~2u5G>U zoDt~;q?;g!Be_Vd$PeOK-W@O9N9o)ntJglbwzxPo`H1jFm_OZZ@vtyoW7n*o-`S@e z&&G$w-`#1Akt5}aW9sMoeBrl2e)OERZxUP2%Z)!GelTp-*dGMXUS4~N@RQm%Hpbbo zr`$eO@O?gOZS_AtP{-Nl8iC#9lSS$imsce_ukU;zofZY7DMiawT!KAsbqUyG>CKc+~LXRhC#JGJ9p#M6(i#rgS@6LpC*|Yi`@?R5J>>;!<%bUiwmvJCKQ0Jt^nLCgRQtZ5R#eAcn(Q|Pe>>q*e0G<> zh93oYl67nvob73~#VI@5ubg;ie8(6}F-FIC`QSdyeQEN%$?W@RSTVeq{)Kpj*s}|S zeQ{%#k53Qm_Cft}o7i+;U6hUu#h)`0pZmoB4(#B&{gia<=8m5@@Y`vEHTeDG#QOQf zd+GR)SQCH0iur@JU!5%0#h-6C@ZLqj&dJVUuCQHW_lVaGPtrBszRiC_ZRhXbCOp+R z-x7Z3+A8*KWY3?4NotFQU)ATCi))Wgwl;G+cfRy~SUvd<^)FrjCAGg>Q?i(Qr1my3 z-Jaq8;I8yLif=IQ+cbHs*t};XI-mb0@75gb`${tZl_zhh@4EQ)pdNd*KE5D!?g(OW z+}gvF<)+7Ks|&=l^YXnTp5ILGT-Ob=*Pf#OW7F~VjgwD^?d6>@^TCkMIB`^1+!PX7MRCzJWDv)LgSzfxoQlbo?l z;}?x*uRSEa9DPW#`oa5N3v!9tcfEAKt8MKEHx8QzvyoU)hq~)Ldv-dp=S=y9B*0MKHiOe_lS#K)4MaaPXFuh?_~a?J{(>@d&~dMT+FT* z&Q7Pz4(`HL8tcsY;biI5G7~lK%OIc66@H#FgK;LDfQW@l%_-yW~+j`4kUe6pM( zE*EbMzqVd6D;}+D%Jfqui<1{)zH@oHm6DyYT+Lp5i;Y+8nZdQyN6z0jlKORcvKUjF z$eqszd-lHl>lepzwmRaz;62uMYxAjVeB-kJ{I%t|>x22^fA&5(zM+0=aJJsZPvl>A z{CoYram9o?kAF-M4oJ6EI3!pn{~s7U&-Z>D{+TRaJQvvXCxMTu|9+4>v^m+*p59D% zXe__`&Bq?@kDcRB(mfQLOWneLXV!i^@FVx+JMlZUe;s}loaeI*e?J@;zNzj0-KRcl zo-Od_zlNK_YU#IZ9^bp;2D$Co^lyjX*Ee@_sAr$3eNFP2@iOTTj86>mrr&-Sr*ei_^yFURvV=JI&G`s5XRQLk^5{I^*A`+K?lJB!=W zoA;;5);d>uv3N!L&*Q5DJ6{|YuFrkoT&#JPi!yY@9f&ElR|AlC2P2iN{Otd`97 zTPE)t%zsh*NIXyccr1U&5f74^_xxn>^>KKh_Wy;4lik~+;^o7h;ljpS)4=p=$3O2q ztHmD#wzH-mhdt^a5sw$x_KGldxFD?5xRsm7dPj$Q8tc9=pXYB07lx)S@UOM$@FCPhId~VGAZd*ZQUHWKsv`RD1xIKQ|jydA`%Sp7j@-~Ho{!q#DA-#o&(I5sq`?qwbByW|>f3{4PUlxm<$K>_D7r&Bj$M}tSr|?ZO zz1U$BIoTNTa#%PfdESssaoN}?c*xe+5P8)plIdis?L&To@@V;1W zB%eB4{&8hG_45Jse-*zF<+n>SvL;JeL&_TzTM^RuY>O#dCa}D zTx>1wh(Wb~8SIB`y@P+a2kd8b?NyT9oqUvTw%WeKocj;c+Z#IxVEY&qoWESNQLJ8K zUp{Tk_HWKF8t<%*tbJDCBmO?~>zMC76Zq@$@t=Zwgdec)sPr4eew(;7op;R|#3Mi1 zFXj){Yi+*A*kx#7XL)7*m@lfk-Rq0iHm7IIb7N2!46MCLeSC<lr}uP+@t<1NBfwdD`7$fxa5e0>(wjA|0`Azw@tv&Eso zUFL4K#uaN@+eCpK?3Ldi&=`959+oV(JRBe2bN@)T?%yU`qxb%&KKYB^^XVCr<=9oi zfcnLbd%zn0mAqtpWqfAOshQmI;(d?w{}7jrFE*bu6?;DyFOtq))EoPR-GVysx%h$lUy9!i>RSFM-;7FT3wiH`+I--Q+LPCR zQ*5u|kH5&pY&U(-Sbb_rYj>|K7vI?P?k&2_!?|PRRU6Bf*>PAp_T`6S zn!U|uAM&)jbehKUK{j5o_WX@u_w6EU6O&`5zbN(_#;!HRnRQH8+j{wyc%La*-a0rq zQ~q>LeeS0bfp77N3F`Z+F}211{EZPGV&&f8Odkz%r4t9>2^|?pXSl;FP`+<0)baEEk?;Z1vd(+wP z=Rqw!QMz9w|0Y&f^T~^Y+&g1B_a3{t^Hxn3Km6qEFtYZ4V|iFy&lA|mS*;(-e`>$W zlI1|p%oa}-|2ABm%ooL(eDhY2BgYQTz}}|r`7dI>5AtO-mpbjdbk4|_Nz=Q7*m!pX-}kNjn{;;vd5>*9cUIUq zTwI$CtY_kIL~XJ3Q7kVVp3d4&uKiiEcfAn13%#4~$Wvl-t=j6-Y2qQZ&B6*l~uJ1^#k>;CIW%mxSXQFHYS@=CDS0g0r>Pp~=3J z`PSCSZ_tH1llh3V8ChGdaMs(W=VR6{hdLk6_>Dpy5qoC_=ja>w*5v(yy20~)vmF+m zOcu}XvGbF~u{%f3-7)Y1^`{*BljQ4z@1vXIUBX6zA2^S*8~2Y&T`^}7RRjvoo`e|OnJ_1zd=sE=RIRo`pz_2K8?!}M&* zHrDCBb*}8Xb+SD$_>`ATkYdwHlHdiSs%Y~*O^<~$g^V5-0~Wq^$eSC z9QZTeau<1*IQyb8a?YH|YVao;!)|Q4Q@UdUKl*oVXZ}HLXDJVvXVLJN;Js=SHlD8b z_08+Y1&Wr7VO)szg+uHX9dCq!0!&WCYW}9Gt>VaXg-x=jK=X+<*u-%q1 zn_ksia=f?|1BVA|pE})v$?lJj!{&|O6zb!b#>iu{Htrws?ZN)V_~nf|B=|P7W@mh8 z?04b4!@%~abO z*!kq}dgB(UPmF#O)F}If{{*opzRbBs_$ct}Q{!*Unar2OiF)wLa82@>@k#Ny;UCFs zG{$;w4C08NTdx|)_r)J;TgPW(`uNM#VV~e0Vw1VzmwU#Ud=iVx`-8dnO1E-mXz0S~jo%2n# z&kTHnFSvuBs83zuEXB#h$#RqZj}>+e-Y*x9-&nb8SzWdHvL#-cD!iA@Q~8t?OUm zFTt6ypE~cz`phTBE=WIB?0I&xU%A`)@||bH%*hwTAIE!gHv-@FH_tEQDbk-_zr17p^4O2kFHpbV zKRoYjE(?o=A@v^-zZ6anvjxAI%awO0t54Yf$oS{MGv>8kd2X(FxpZn3_8k#^*1LS0 zIB#=`wbg4|>nUNH#;Og~_BY1%b#$0AxM%$K$seaow$`(QI`-B0mipYE>YnMw=zo~J zXR>h-CGK z_GGcT{=D?^Eq`S%vAa;u`px0v+Go|iJ@$0q<9ztUFn@R@oZGz4LEZoN+VY^g)f&A+ZgtP_CF^4cds(S*^1;LD zUysERyPg~t4)T>Vw`O+Ss($zWuC>{Ev%p@nr&mi~o$O9_Mw`{%Gv?m|!ynVLhnTV_ zwT>E_Z5E8}n_cbCTK^p6;)!Z|H~aEK@p@Ks9U42ovBML!&#Yg3Im7Sv)z~9z%NuH# z!HqepzH8Ij-+1Bowb}IXU~L0xlfMp!g#~&}UBV{vto8EiCxbj_Eo}Pv82wvgbi>n& zgMGst^*tENfqcPttXg2r^!!wwJS|>7u;aJ!!p(DPdhe8j$4xh|_Oh`%bWpPQv!z;x zJ>^O<-!-7c~~y}GL6&c4j&TuHv5`u{N!WPv4Q+9@7urJ$Df9$ z<3pc^$HMoqbYHSHyBqdO{zWX$h`Z;~AJ%i$r!J8X_6ht{p4qQ{=YM68w~ncAxpaJw zUc7%8zhhjob+F|T!8$f>4ms(v`mF7!VEvy4Im(>YF}Ua56V8x-S?^`ZX9j*ey62Zn zr)Cg0{B4DJr}Un^G5H^{eVc#P`qhVQyGqy~$obB7vcT`~aXs(ZpU1v${B~pS>}&mO z;7k{fN7vpq$jRyvYdt*K9XwzCa^S(?p)ewyb>109HJ1NbD_>bSY>~{L#r3<%*Cc;; zrzZ1}ml|X4Uj#Yt!rEfmoieaz_^LDDyQ|bcQ}SZLw*#9xN4A)}wmRmnG4s8d&brl1 z?D}@>Zgt*FhgzcW5-w0i=RP_eJ1c9 z-yH1f?9U3~dith zTwA|aBX$& zRWaMJ>r##NJ;*1Fe<+<8^!pKCe>5GRm0M$y|AWc=-0w{6$2MYQ!mxa8`M|xx9@{rY9J0Aw#s?3H z#mj<0?6@2F&^JLm%~RXDXGvyPHR0Z|_IAl~HXl$wu+wAdwg?9#4-Wf=WokbVPHlcR zeIcFP!e37d-p{wK@i*y*#e*8RK;!k#5&VXzM&;M+yKa14`c1?6VY6^V_$H{)ZiwFs z-l0Af$9tq-y0)6gnw`NB$-AZJPreb~t$k#Aweq>OCroz6qk|kGcZkOs>bG8bb8zF- z+0OEn+G6YnwcYdfW^HnvwW~4I=W0E1U=3nJ?iYJ!$G69q*MC-Qt?F!dncRO#{AA!W zY%pi?e97mNrG&wi(tkB$xY!+-4UvD#wOI{2$Rv}f`O!QJst>>TXnfaW?Z-S2DP z67H()EZN+iZ?DbQK90up8ss zSL=5Max|M;>y7bk!FvyCj2y+j^7`>%zhuv=uck;AL+Vg>>jSlihhu{rVgEmlt(lEi z>v?{-ddwbj!HvoCnK-guF>1|E#bejEbNrVeFUy5PlGO=Y#FNy|<}=2R*M2dXjh(4` zTdi_s@=;;@WXy+##$v+r_Q}qC>gM>xz)u#gk3aCANt5}~`!T#ce2AkLOZuf|=p zHaqg!6B?slG4BSoPYiN`dF-8!@_}{2yglp8#oXtC?dDI%@8$R7YabW*G=F~ zUB-v)iO+o$+=GY4KMAXZTO0d8ST;T1{!Pp$*vbB^RZOv?yK3M1oy~jkdNH4Pp|K~| zHqKdH8+>d1EZ!!F$Irs9>G;ok$@Xon&gk==mt))$>I$|MKhupFw@GYnF~HY+_dXo+ zX|YVlw-%|*7w&JIbuS&uaeVGK$rIMkpRB=eDi5aXeql|M)nc(6(m#WaABEc6u~i(>U{3v$al8KYyD$-njN__48f#_EWWe^T}=M zM*BAZC+V#BoZx%;)7tclCI3Bkmi$INV7&vvHR*;oj-CIL%;xODA14a-#0UQt>`TsO zYc_f^Y!>*~;8-2LT4Tifp!k58zfB)|);G@?$?9Kqw|d~X|rklrW1Fwh1+h{+-p;$#U>TG257X`^G;K zh2pW&{k1X9#k#Lb=NnCKxnzv)o7mmKSDYLFmp{%; zw%+ICuhPwt%n$ZY_Pm&|Pxs;E;egsVhD#dfF5|n0r?-CR=KL2)_gQ`J$%kvNRNJ@I z+VSb>*Z-l#ooZ;U1b8ydWU2~hBd$2Zp9a7u7o%PybP#B(W-LPK$e+ulsZ+dIJZ4Bg< z#gh5OCXJB?hSau(r^8Q^)w*I*F5NMmyPN->So^RrZ?NBUdS0!uc;K7rWI1WRaBH&n z8$W6Ic^DnE9}MaiYvRlFWO-!5VC;aNcb2P#Et^NKdMd~nC#Sos=hXzN&xni~UtFbdzvs&|{ z@u*nMBNo-~;_j80z4)v%UMKLWJ>zlHjn~+Hk|&G#gS*u|AfD7b_PK13Lx;pWhD(Dw z<>F@7V+DTQ^LbtbY6^J-aw( zd0@rxT#%c*gKgy`aU$o+DJ%8t`LW!`zSig-z9c*p+|P2%tMS*3A76MbST%mT{u}Fa-#DXv(>pKou&H}$bbM&zE{v^Jt>8OEy>di)dGf0u z|9nteUHW9>u8#d3&%VXcY4H`o*;(`8z;^N%-%ty_7ubg{92>-s-;5`U*^+P1736ua zZkQ?E+3}R=tm(|!chqL1os;iQ{#UXXV!t8Dp4&GbS-&&hEZMh^ck}%jYx_p>?8f1} zAXl&td#@JcJN7#$OdggGzYkaRoZRG077U)@b8LKhY`yN(m+Jd(aL>r8{NtL~+-Jt0 z*T+VD!yT*cm6NSgts#Ez4}8))_K4-D&*E)@m~@6Cdh8ED%-Hwdfj_uE#jH4dHtgS= z_Bcs0d-<*7r^)I!YnMCum)dXhm|skvtY(n&SB{;xT1`&$o{Q4SYiwgLe!H?ScH1}b z>1hL>aQ6D14)+9e$N}^8ym(zM{zcD!8>@fDN;fLl7hAj@J_^=6sxf?Kt$5F{QMzY> zb8(OUFrEAN!*uH>TkG**%iyft#eCMTdd5HEZ0p{d-r7bjaf#L9(-1tTx-87JeC4Pq$-`gZTmb{-8PLO;#^o6!YEb;w^&t|J-xeh1=@CCbs8$ z;}gg9^C5ovdhA}%cVN6|1Z_8>SMOIaxj%obJt- z4@{Z-ad0kbrx}C&EgplMXnkv>bN1rfo%HkC@|m?RnC$+Lhw!B7`K|T2o4oV(=D9AG zcb1D^j@kLPVBag(7LU6HzHIK(g8NGC{$f0Ndhh#Z@*+VzIEQoV=fCtrlKIQ`>y|U* zaxr;UP($q$%LjYKtHsXYwV40&ze@ri=8y8;1~DHvKKSPG9)DL5r@s$_!-};xZ0sMJ z!@8aSCgIFvImvlim$~ivoW{Nq%LS{)4+gaiKbkOn5{865(zye+Pqr8F?S6Pa`K5GI z#}($>Q1f{-QF>KoZOiI#qM!_ zB@ers_^}#Qj&Og9zq`}%CwXGM`i6&f!Z(3^osSwvjPO}!F>cRIhwBqBucTil*>}vE z!5RG~yxJHsVecy?pAtV6%O-3>%rY% zoqtRJMK~r|o_IHwubtC(@le~^)kSpj9-m`Z=W#&Kt{e{ya?2#OkBh~>ymn(OR_;u1 zJ!>_OSaEl`A7%*7Oe{a%n8V_!YpYcTCp(LUV(0W_SgiKp@ga?IKi!=yruf2|vAp1{ z>{l!<-5B@gcD0@9|H8;*J~~a1Gu)wbHqM=C?VghpA84F)x|^NXS;=h6K73)qbO+Z* z_j%w8e8!s1&)>!hYotY1vblJ4PfQS&UB?(rZ7$xFoSWIf9#duDXwyi;y^A@Ct<7soRs-W z)7GA|e*0HvvESd5`Ky<+SBQ?i_}NwV)XzZosicsA#6`g}j1 zmF^GetoP*LZe~lrC%ql-70ymC&;Ka?D!A9rseOL(kmN;^-$>pkRy%JVd#5#DAH>VZ z#_?HmToQ%_F(mJ;R-ZL56;Ii77bkC$?9RTnets>#xEI`C;$ZynpYUONF(}@AD;yNF zk@cz_++FS%d0_4M`Sd?d9-J%}@iTeX-L-A9y)Kh{M>^*ucE#~CvFF6UbF&s}wqO1s ze~xHQd0DKR_lJ!;C3)X)UTtGWhi8M|L*^&P-YLh3Pw(^&Io`Rl@&AID{vga3eqCE$ zwgz?4{OJ~{ZM<6YiTe4?iOK4ehhsL83;ag0VdJ+*Z%(#cBE7Tl44dARp6{C5ohAOK zs?Ar$s&^e7^EW=rrsKqS&?k?Io%zSf_A+buKKJ}B`Iz802sY;1=Oo)B-*+$Zfd#|S zW9GXn-RWb-@Ex`}GPYKBmaD9By;$yZ{H3w)q2+?PFEei#Qs3FZ8Oxp4?)E~(u>&fE6JugOQ4$jcp z_Y31S&a>98=G#B4ob0&=8|&ZkdG~YS#NgXY49oEsCSRHCHx9X*uS}EPz3&dO-o5Jw zwzUrDE&fhOcHVa-yC1e|torfzbd$w=^Q19jkK%_(-VV*-w?c zaq{nD{=n9J&3+#U?8Vot@#ETjc-Q*3PF^AG8u+ljdjr3DC}s{Bv{s zB-!tEzJuj7ds4$#uXX&ozS)!6&TpFH;re9jwI=JGq&B~{X6Gz_jEFz0kAJeq=V8~L z9WUL5;oUG%zX}z z(quV!NU}ZiKYsjv^3WhpToDE)^C!MPeKNLgzdekTET_Iv`+vcinQPs~kE-9C18Q3r z+m99KtpBoPcOKv2*W)K&o$l`NSXjT|^2KSv8Ms%(ue1B6HvLhJc{%1=V#GRk9>dPn zd&aAMQ#hirVp#57Ry!;lz6n$&m*ShYhy=v^cj{oc#%OBQe z92>mV7_}NZKT~^9^5*H-#d`VBg2`jY?j831Ed6&idt(m|YF@GCH~fFq_RJNHn<@FL zbo@Zg`MdZ>>D7ehJUV#RTF(mBc4lKPjDHZ=VV3%hdA;Y|b3+23vR>=)y?ARpR?nG_ z|Lzvp{({(fEf&j}>J@oxkswd8(+ug?j;Bp$9qvvxUa7YATDNt$Tc!x&>XfiV{kzxC z4(_#agSDDtt00!HsgI52ubpd)x#i<$(`^#YtY0h-PF^-15j%hR*8AMIYa}n%IQ8h~ zbjbsoXYH_cef;Z_bnbfJgz^tt0w42x&D$~Cz8J))dH9X9Uov@)aB!^}PB_sCn4rOQaJ=>IpUDUa>p*-^qK%F9msO%^(L&)id^M-K*CgI~KpbrHr+9KKfGX{nCx9 z{eHNzHv7s;(+4?+&p1E(dAvS0`8e4b@}Vb_=S=qBaj3&*ZOo~)-%Ib#8XDHG{Ydz8 zvS+4Bek^&d@Ok48jXwx%q2KrJko4~UV`KO5B0=6$C#!$23X?XE_|<U`(?R{hSlNw4czo@nvTmBHsV(EzRRQ-J6 z2g&<{2ZB5!f8LXPX!HLmo~!mXF+W*7`O?_;mv0O;uXn7J%vQr||EGTOesj!6ox_Ou ziQuDiwk6``shdPB_ z|h-xn|7AA5Z5D z=SW^Qng6<{tbe`a5wUo6=EhsAys~hxhjVIAmi$zFS5U8*-&l4(KY7Yzc9j>@E8^Gt z`17+tO!<2SzgDO6Z9cMh;9IN3YsBp8o|6yGi6@HvX7gz5yUU){xbwy0e4#LI5ckf) zUfI+hd{6OpG5JnyJ|M@+)BX-AubozVvUsL3dTSN??u>UEBY)Y8waSz78@rC*n2iHF zOc&g>#yDHPeq&(60gV|DA647l`M*1xJ-wgrKOWW&*7?ik@eaQ;n%f=6rn3a!QhaDs ztj)e)-_ND{OFDMmEwH2fZjL!(u`n$Bp>gt+xbxi9@vrKiJDE>C7rtzaHSQ6!rF!$d z^h*Z!r8qnyzB<0PKK1pv$<}f~`lplK6Z~7eS<8O$UbW?4`9d8zWB91Hys6%I_xNVA zR=zM-ZL!SfUrlGeIg{Cj|EObDPcJ6Vi+yVk?wL*FJ8E0IHT^jj+Y6*WFy1iSll+_X zQwBB7$zi}4em>?0(+2*zbogQ8+!6f$?(nZ5Kk|M4_qXOdueR?MHg)H@yN^r0G~F)o zpxVy`@8fs;aM$`?N%ynb_GEqH${C6?vCh`+=WXJLg6~H6@*{zLyhH77-PXHYSf@TQ z{cZeA?Mnh1-%#6n$8G$T@pZMWmp{H7zc*$cwY9UfUiKCT|5tlZbKIVeZQrf!9h=40 z_KU{c7sRRc%DZ1d?LF%k1I|^>Q1d)d+g`0zoLrt>d><2hoAVhywq*Q$jkR?$pSmu+ zT7b>`KILwe+r$*V{;0N?Fh4uKR@*w&aaYzJk}S`7-*)L%YOFi${n}@xpFH`^bT8CC zCCJC_PS5c}IcTHC$)o$H`%ALAeU+Z$6Ko?F93JGIvBF7>nIpb43`o8;sK3YRncJg5 z$-Yzg*Dc|!z-F5U?{yCLz)p|GGxwZ)$9CeE@2CajuQQVUma1N6gHPiBrt@uP&0j8Q7R#Oc>+{u`Jfa?5Nt#{>bEgf;hAP6_e$6>y$s((6_u?O}9~S zj^1-}dh6j&;#Q84bGHlD`%!JV#BT%EqaNTpBjP{Sb{^KpC+0}rJot^o-D9tEr8@M( z#!g+^^A84go+Q2d>#W*egr77<4ikIMOMVh_YSpD0&o*-7pxV}=z7y}G()~7AKi{@q zdH2Sk9(y;41HW~h-FS7u{mJ_#9~WNlna$(XgEO$^!S%}veCw6udDBf>zkGC3I%Cd_ zPY5U0e^YQK;zX@3$IsT-Ta)>wGoB&cGVy%DZ}vYP^UM{oxVPr-?fXV!#I{_)mk&s0 zXZ~hS%QXJgux{;d0$*iQ>wPQmTkDk%_~+s=ds?%$d~{v%(T!O#c%N^t-^bR@#`fqN zN!}o{hi?hFdA8(x>$|zO_;S{2YdU8lpYk2|C12dH{(r;^$NcTS#{4ME*SJ-}jp_ax zJ_}1XP7a(a*}iY7?LHA(zULoFXAkV4hOsYa{?qjGkvok)PMGYzWK;V%Js2lH-Ji~# zAaDIGd>!n2tHz8IPg2{SN7i<(?!vQU=i$!rZfD94>_e7Y7fF9fW9iuUm&t5P_seuo z*S|?^`}#caYwtcMu-i<5jpc?x$#VD9vFGf;?+(5_)mw{&Gs2U>+MV^RLEbj^dtt-I ziuH$+F>d|*LmrlcZ*GirTaS9=<=8vr@0)9XcYh@>7TC?1p57Smc6Z2C z_W1koc(@{b8GLv7cKA!O^|P;>@=)@W$=>x@ZFSb=={(0q=JB2B+r<4VKZvgxYj2n= zo~`%duvV{ucOv*jhRs9r*UKVY|j&5cA!Ylj$yMoSeaicL(uv ze7doMb7B|kJ}f;OV>QJD!5HgSpS+R&s$_n_KGw?z`3k#PuUbq!WxX4xTQl%=e!zDZ z34F{sv)RYtp4u^No?mPYv#?$-^wipB5me(yUV zT+kTz&;`kIzq4YKcaxWhor|-7B*;~##bSdk`8)q(qwnK_UA>R*+!*$3+5>yT`A+u6+%_yKU^GX=B@FW7~Ec+qP}9v2EM7ZL_h?_5K&Wo)UC{x59{9>WkWcBfATyZJ0bI!uEHGb*&W@2 zS(*7!-_{SE?Z<7-4bT0c`!d6^BH5p;+b|Q#SDDQm#QaA`=H1Z+*)yz@G1tm;=W;WX zvJUGtx*|IlH?i46x&rrxY-AqyY_@yk=O*GE*Hw8#Jin`3+iS_K!u_Q0=trLQXSZNS zV1Ly8xZ#_f=GRGYXFi%J|Rua#K%025=%&N@K`mP?pY{`ts{G|IcBk-cwo2uJ!6U%p*>uy7s z4fOAB`(Z%p+2XcS|<29=l^o(?Ev#YVc=zDq~yA?YU`?DUvj>!Gp-Zb5wpG1DhJnm3#RMvg#e|1XETxnZB8AaUfloaA7@%xmn!A<7baCs1PZ+3O|SAAa(Vz*{T zW`EHG*^#(W?M>Gm_(|ob%xAXsu3_9iSP%T3ty8f!%D>L!=cVKv*Bf+IZa(HQslvM> zTUk#zMcwJN6y_ymoz^vZ$vqn(53SpBYjD5m2YN8K4L1t+s~*IS%$&#j!~P8Gj{Ia2 z^e^ZOxWjcL{U>jZq-JfB+2#xIQgKe`jk+2)Kl8Y>V^-zem2JF-jAHI}Mv5@cNG)Cp zzaQy#%$m&a`k@}eY|H$e`ArXIMq&MFZ>H|VOfJ80lCei}o_Xg8zZ?4W$BxjG}jI?fI9i2B%(y|t~#}+-;xkAh|tW$ckuE8$I zJt@_h_hkq3F{1?UoD^f8lSQ7@VW#rziSEFx&HSk!>*wAxjN6|32ls~_%8km5ZvS81 zg`HA<n*w_w-ED`)ZjgkoxG;3lC1MmoOfR8 z@>2W#H0Z9(j?6mDp#NvKPxK4VhFf>w{>lBRpKyk;|KP?jGh27%r;=dIl=jCkM_b3_ zE|B!xZ8G0{5nejZ8NF36HeZWfn0Z=i@*c`AUNd{8SQn%O?}F6hrSbcj?!>Ii{Dt*Y zkI)^N(Re}s1Bt`sPu3!T9Wm|CvF^rAEx}o-_+wdPtYh(>`n^zR;BJ=%&J<;)XP(vD z^b#|*`9;`gq!#m$>}J07p62#TGfOfr%HO=Sem~b2b!Top?ytOOdZg~ejn4dqH$tNE zV%eLkyK~b>2v%zLI7UbB8S8g!<}=S0>5SaPp6}2Loh`=Az&)q8>pJ|R?6Xpv`&jmH zUpdo)QHEQJc}bS~{SPypXD{?6-G%!%_cz{iJxX`x#^C%ScbL+)A6K{!k@6?N&DbCBtJFj=>y4+&S@!ow-*I_=9z0B9nwd9nwSDJfSmib+u zo8GgRx|5m9)?NAk@Plz*=+U|hKPLY-?kI`Djbm@V?#WLlp_plz<8@r#3+p91GjAz- zmtO2l30@}N1-(<(;}&O5@b2@vF7v7EW4>{&6{noNGTbY&-0ue544%ExS9CXaeRgo> zOFc$+WyfL%V~&=X%((Uz=w9sf5}KEeH$lf^y|iAcv+$OA_inw!nUcKByo-95F2S4V zy%%&n-ZRnO9|n-wm0srL+C3_PTQ$a6|B3>9M*SH@5o*XO59r%y{+| z>fY=O5{8$aH&Msuy|P}Wv-0-HQuC!)Sy-3!Ze5Z!$vZFVzgf@a0PCH*v~j+Iz2(;B znAc>bXN{O|q>Ja*?Dt?dWQSzF*5h<{b{zK&!5u5Hx$*5S(tY?DB`hxkZ<0>HTQ1pH zdu5r~(wwZE%X*J4#hL7Vm-IiJ7jlsEpL@LKRJ2!~bzN5Z-I(`Qu6y2-*@zj6^+u1^ zJ(z9H#dXh+%yANjxq_F#%wp@l{7e##nbB-ldy}mba#u)p?mk&=z6>)P^NQZ9OY^39 z_hnt5^->P8-ZS2?D%q>Rx*@CmZo+#fH$3mfY|ISJdaEbsp3HXU;<;}q=6H$AOlW_J z?#Iq7;dzEJy{n5*S4=0RCywmd5*D~tW>)(5QDT9;#9 zlOD`Qyu7S8`l$ZTepPp?%)KqmnbZCLpl|EG>}Kq6y!ZMOW3qK0c0zX!%bg?%cuDLp z*8{oPBoc4Bbo7p-yfu=GcSu&7-NCBB`ipf#AJps2mgilUp7tB_^0D6PW4f9=q@x&?QJ=O6VQ-H+d#AD;O^e>OkGx-UPGJBMSxcXqN)$W3Zyg&xGuE|Hltq?4Is z%(ar6d05t%t;ov7x~UK8_2w$DZb%bOe#Sd}TvvCWHQv8dsxq^gy{lU?XWILu@9O^S z7VHSjkNS)Gsn-4YiQPFo`-8JnbRu>#Gb{CA@5muhm@}obndHoMl81Rj)|#!v%FVi^ z59#EFr%$%&d*2k=?Fz-kUUSZZpeOlLcr<%Nnve|rF<{Zzy>4$nSw+%N6=c}Hn z2l2X@P43Q-nKL9AFO~hZdN?EpU8 z^RBey6=8kSXLKERs>S+?^GI*e?RaxN|E?eDA>6jy-&x=EEIpXn-E0bXj>4TO$(ifD zJGGg0)+6|NBpUap=W}%$=2z!7>w?^qvdR2D`_;JxxcBu5U5$B9TCs|9KI^l(t~=Fc z<>EZn?OF3Y`=KA}q0Dy7sGRTmUp<7^!+c8j{hc{WQt;B)U#~}UqqDY}>E|y0)ARgJ z%lu}&MHk|plFiQSH&cUKko!QN)YX~yr8Tb@?~6XC>$%eg^L2PnqyuZdXFv55J&f6& z`3L8Ro~?)SdU|gv_l?T@S5orQ+TWl@abvJ@oB2iNTc_h~mBPH!vc>EH@2$xz#Cxbu z>22m~@E%Bi`)zo|d0+K;{oVfG?p2rhR5~&j_#N~=tou|C*B#h@@_y<$dKkBtbE)0; z5AJM9#oNeFXJ(`IXm(6q9&^9S0_*hbZBm4JMz)$g=u9nUVdf)!T5mUBllf5EvP!VN z=?nUYy?@-R9`BiS;w|+17v3{HfH%UrBRd-Jf4sSRxbE$28h8GaJ4aG8)0^9*$FO7Z z@|yWg7FuUuZ9`ap6iji6E`~R7uGyILicelt@}n}&XqL04CXfLvE10Kd}ao^&m!xL+#OPkcTTpO zJ#4QIuPE<{KC5>+Q=9i#+OtZre&|d3f9}&jHaqtZ^M!O_F81s<<_kSacV@@n{mKi* zn{PeRI-UDQ=gyO~%#7x?=yB{gto&vd>rBj@Qk->Oc9=Wjopm|II8XICU5E2T2HESt zDb4w*FYCUnU)-mm)MvhwU7mI2F7YfF_oW`KyYOQ&e`5wWv%q?kb$a)W!JjYbxLdp@ zli97-kRH0liSG50-c_n+1xfgfqm5b;xU&>7Upg#$+{@J z%>B(N!FjGP=z5H2GQ_z~jIxYhlum19zcWt%bkXg|0*Lsic&R%9O1pBoftGjte zY<6(=B0WZDbmv&!(b&vFoxyvunA@%=dQW`bGRex@Bc(W(WVe}P-un-yB`sQ+d> zm!bALGs-c3m1`2xy@Gj9BVHlvH+rw`!CmfoNbWf4&W^(i!CS1y>P+q#o7=?PBAt<) z)!Ys}iJgG;#@=$Bjki}yv$na%WxdC2eO4*fOMOZI!+9ZH808ti$#ogvPQh8noo&o3 z?Dt!}Pxs`muosFsUV3oj@dD@bkoVT!3Z0#~ zPs;GF$X;^|IHftS^krS2@luAlQ&&a>MliV{1HCr{s|l-!^*g;^_hPQ}JT!BH^yJ3l zh2n*_ztnoXbryGx%Wh_NiO$T=Zg!WR!aw1iiJ0%~uhcoX`=u;%yS=M=pZSKoGQ8LN zif+JqCBw~hV^w4Ymzy%k`$O`Y@`_sjr+YJ3c@~B@QF<}svqH1Nm|12$!8)t^#^W|O zyHsak=P^I+tSC;oiU)2p+uVsYu-C31bA>@_} zW`$xkV->T0ulw*;dlr^ANqRFAu)?sG>xnv>yT)g>FtoZEM^EFP@~$MTgF$AH zrC0m?7yF=;=U$fs&NOD0W4_hbbR*sy>A|YZ3Msc`2qQGBIj6Yw2i=#o#a1l{lVNCzjLt-c&p|f*y zncJslaL>43GS=ZB(@Z$M*6-Zx!%~rZQw}-Plv#oKpT41+@ZL!;R#jGLxhumMVOcFX zC9OZ{{;YMLMPN;pe!Rq-@SIh8iq7e-iFmEet<*WVxy|j@Gr7q*M}j>2;q^Md^Kg$y zCFU(TZ2qjjre@5F%=h}HZp!;lM%nAltHui>_aq#z6{{5UvmU@(?^#6NH0jSw!im6H zt*7eYX8&^E#LPBkSLvMGJZ2B*S=Y~-EB7UW{qW4z%+l6h^g!+g&mwWB%K&y#UPRs+Jxz}=o6CKZaNC+)t^Z=@ zHG5G1%TCGqVsC@a$2=xgcz5KexfZO-tdIJ(KE`a$`5=86H5lRKfkd!s z#M|guWZoEmZ8P*hZZcjZ-da6f=XTGe%ywqi=v?f4<__uE?DNj1;(fKhQIF*3XCIfU z%)4^Td`n&x-Y0!WA2-*6_fh(>YO=!1Ly2TBBCjp)n+#@c@;eG^toO~-gP6%#ky-2X z44ubalkwI{Zf1TnhxHuh1?N(;%Cf()Hd&9dF2HW@_X%B%dryu#*NRz{`B~r9C(O3w zevwm5DyK72T6!RN&K4u}a$Mk&WW#`icx%Ri}vCbCeck=tRuE~BVr<`fatih~c=9~2c z>(kb4xL;)uvo15TJe7{T|1mPMhU;zmPu4{5oTG;^Q*-{{Y|^uJ0rySC`rYhCou66Q z+;P2tdBxfE+=lvpo^P`*!tQMTjIPCfB&Y4QW7cF=bjR=353SEwx8;75!OVKhDDq5x zw>M5YF@IrYVvf+;bu{K&8O}|^`jfR;&(Q_lIW;e;xlOtNw}`nDdLj3!yJiS7J=?B} zGP{^Pt7|hK%Ncv^d9`?z+~J4yBkQx)?U>(X5@!hW@1VQNbBStyymaRN%FE0hsdwn; z+<7vBot77kw?)s@h1@v}^AB^IbwO@Xb0_s8ZpI+b?;W}rudBIpx(@G&oVC}1Rhw1W z`+r(LwmxUwp7%qBvi=FWqr8wm>`jnH{yMHn7xr(=EbLKwr;fp%FC+Qsn9-SA^*mkJ zozrsvG`B?;;ukY_N-yST3iABksf+WvnLDrR@}9~$dmUMISV8}X&`_JXd*d?x4ZU3ab%vo7-&=2LyaT{<%V#~CgSg6=A>q#LWM{otHz zyfJ#Wj>TIjqnR0aF*(u9ZMR-vU6h@k8QttQU4&c0>>0h3n}r+P@7;Qmb0xVw%wE)g zbDzrv`<;39c)#+V>5FDN@qXcqkcO;i@>*{AYwOMn!N|@UtM};GtSR;u=`qZV%vh}G ztR2=1t&8z9aATO?uA7)I$}egDtX{^?%C6>22+#Idm*V#{e@Xwtejyj_bz%O^{Ehis zUvj21^HyZ)CE)9;}dz9ISDAua3i->JE$bSY{?>Y*q~OJFOR47w2c>#x%b} z7h{()drmKBXX92kAJVhE)}`6K%wE>@xi96Cy{^1}c)@rt^krwd@P6ZrlE$nU@>ZtU zYwBJ-d7(HtdE@my9hbL6#xXN9$#;nf_&U~q_IMbCGj5S)C@M6k4nQE^WD>UOT)&#v@$73y( z@w_a&xSUvKcUv#9F3HZ!jBR$8F2OBh_JUr?z2&|+xHX-bW*yqze!t7|`#5t|H{`#T zEB3l`8*oE#U+JsPb>jx-jgh9zSn{7V>)CY8Y)-svE%gT$#iOtz#z0`V| zbt!%pZXENwbxC$v^B46h_HA}8b2<43q#V1i*=xEH`;A;R--FqZ8It!}Uvs8AF9auy z{js_kFSfjwKCG~e+^k9ZpiaP=?tRPkL}#)wFlILUzr5q1l;`&|dtEnXmt()x*K|*ABW@_>8{LB!k`vb6INh8VM?OejRyf9W zb{^hj>q9yrYle5O(370Y&P>3HYi^(Qa_bq^WxOLBKb|vtb!q-(S;M~Td>u2nyyK8m zVD~q7L$|Qkgk7HfPG8r(xQ)4?nQwJZUMN;Ld*gKrW?cCw{dnOyH~4v(Q>+i`M68uE z*_j-?gq(P0_FJ#8o@rf{pPe1w{61ZVeMQ!C@3~i9Gr9SPr6PBLxtqEvw*vP+eM9$V zHerV0z0?2Z%Ine$s-Hca}_g>%B_r0SJuPHAq>p$I_6NVAN{zTo1 z6<qni literal 0 HcmV?d00001 diff --git a/resources/Box3.med b/resources/Box3.med new file mode 100644 index 0000000000000000000000000000000000000000..d9158ff61921943e73be734588ff4d345ebb9326 GIT binary patch literal 27908 zcmeHP3v^V)8J^9H5CRR43YDO1d6+=NB_tt6>fOX_NFdpSgdn6y2uO`J7}}r$4UckS zE3I;DrGhO7ZAshMLot;e3rEZ49H^xj5NgViK5T$`Kn-n*fTRNH|J|7%vN!DZR5v*+ znZf_g^Z)NZ-^`tz`RJ9pxRKo4`#`M zs-$$x2$^0@X&t&E;1FTf{Bx?Rf%H6J6zp@GHF=K5Q31wGLCw{cF3RPY3q9snkhy*m z4iV)u=5aLU2vz;0kGo}b;>In5rbDK)FZn{-0 zHEz6X=V!xuh7=~;)2!;H6Oj(I9%jp5ChOVK>mei8bFshm&_2)EBOY@n6=2NklDJ03 zIhiHaSOyyNd7qbU@f7MY_rg)EGQhb`;H|P3o@r#`8dK%?dVj!M2z9@qL=3lsK-AycD-Uxen355 zw%LFCnA+`S=M@CBch!$Js~!3sP5-j#41h40(BY((X!6 z6O~)jZW6J}!$dqi*>SJ`K;{u&*zkn8qv1iTKHj>CzGwtA0vZ90z^6u_^WGR|$NDc^ z+f<;vaV-=nU^}Jv#ugMkdhvH2T8|%L3RNnWIU(fUINTa9mpa$*@3dV9Lz|l>HMz~! z6q&D6G0sGlBYoR6-6r$(Dwes>y7IGu{$Xyvc$2UHzysxhcr^WN_+r-6IqXX@PEK`W z=F#@5bx!VbuX@$_+?cP{HRvNWs_}o)HVp87$sX_f344rj9Aab z{?3j5&TbA7V~w@E2oXuAAB?>tEX>*>htu0s%y+Tcq?8DIJPjcc^#R4y&~g z^udW0qLrR9BkB`nIqGq~8lC0 zr_{)6%upfrsyV*f%K4t7H>&fms_GKqDA6*eOR-M@?s90fPtgsL;dgib=Z*ZHrRz9I z+Mte>hW$WSo1{)IX{9u2;GfGf|{qHf=96N)%Lm{fr7W@uZleTH{(DP z!dc_++KiL0Ol~({KK9hvgGW!AkFDJF*C^K+WATP(^8UE0!X1-H3|Iyf2&zeK0CbUf1 zeAwtW?`cQ%kWY-CC46URzn9J%J1VE|ICjr2qxqSv*2Bw=m^Gt6y4Zf=ggJBW_HBQ! zJ#ADaJ^ZcNk$cSpmutfp9X(-uf71S_2Ws9no-5t*k2PP5+AaNVHRcTY>VHN#FPoDt zWu;C@yky)y;kAwrFNf{+yqxA*Kkb~c;?3t>A7^}Q#%x);Dx*)B+Z@&ynbvaDoKf8W z)WE;An~5j3$F+uq?;iH2qSyNFIBx8Te=DQ@Unk7Ddln>be)F8M!PWZ7{{1_RwejrZ$McrNgt_+} zNvODG;oHW#I|o$0GPtk%=Rf^%^PUgS7!`4AhClq?>t^%lF>gF}sKr?P!X42U+bG(M#_32If4tE?efAr8d5*w=jX}mvi!uRGq*KYKizcPN=cO%`u zii%uUoN>v>9nex;(-`ha*@ z_%;Ar0IbdU>JhdBZ=ozlKPHf&ygSo=H85809H{=7e~ zz~6a2SZl&c)iwJwQ16u$s1L@PF!lD%yFTfxgE3v9|XP!Hn0wIJ<7n5!3OS!cHov!j(Z~w+sHE)1x^WVkw<&jgip*h zp$^;;bkPRoun%8hlkGtdToT&BNBD&@?gRW{4ve`g#L$5)^baxag>vY_AC#jFY@(kK zv_U;=g4=^1;QinW-v{kb4qGULpbc!o7JNb9pwFBk^f`}y!XLKJ_K=4S2#*2u?e?IL zZF>sI9V7`PBS~&1xszlp3I4|vStQubrjaC(j3T*(1b=d9kff7LAQ?^KB*81Ai6pm^ ze32xRB$Z@5Ng~N`642x0NJfy{Lo$iPMRF?%sO4mmF(kKGy)m{jetg==Mk{;Xb}sq?L#0Qt!<9f!TwS6XxGNCJ4^lXX!dVo z*oR=9|Lc@NJ0H?oAuCXi{r+d=*0HCSp@(($hR4%BZ!Rv7*q}ZX=1q6wvHF3~!XtENcJK8ZJPUXna4^j2a2i|=_$Kf);AT+H zoDuYpM>%sm&<8&RozA*w$8Dj9I&RCH4|pH&T+l}v92InMZ}tzo6UtGBI@m!P<>0T- z4)(D|vkv=)a&8Oz&_@|_W2_7N@Ci1-H=zys3_HlfKX7)qH|ns~BgT3KJuZVj@~B4) z9rO=2a4)3M7FeO4`;RnuFWe7pSkFOe*g_uqXu}x52J-L?{3qI>3_igQ{6{(3K#<4% zkY@kT4((@<6p-YQ;HU6Dk|L5klKCX~=_?`0C-IQrXKp4*AxSPt1qnXMEhd>pl1*|y zNhyh!WD3c>B=bn{$?hzY=_FH07Lcf|B&Dv8pn0$BV;r;>8Uc-fMnEH=5$Fj7{tF+b Be+B>m literal 0 HcmV?d00001 diff --git a/resources/BoxEvenSmaller1.med b/resources/BoxEvenSmaller1.med new file mode 100644 index 0000000000000000000000000000000000000000..79a495abc1a8cc8c9a76eb2e68cb9fc3b3700327 GIT binary patch literal 29476 zcmeI33vg7`8OLvMu_?qFQhWi$8%0owBoHW%#Bf5gc|fuW+3;*6Y?6hnY_c&~AX00o zC@2=|11nf7wAv{Zp|rK27Rq)k3iScCbyP$Pigjvn>Wr3lI<@xu-FuHF3)!~QFeY#> z^UryG=lj0%Kj+?i&bOD_N(zh4={KmK!IkVWJVvT!e3kL^?9rE%e;QW>+{!^=jR)!Ijn_F!?e1}>osJQ znp>SR)Ukc-0Vhk6{*Ik*lw{&>?fh~{B9ZW-P}E!yX*HV`hohmEa3Cs$CbM`(sX08* z)RJQcT3W)P`am=kX*L^zOM>A@OH;5pIx07_B`>!nZ+g^hX^kuiH3ZwtwovoJaPX?O z$l})epxF{>jRxvM;ZXE?Gf>|eX=^jf{llB<+FEjEg_;{8OWQKc@<6>^Q{lAC&M?bD z&5PSJ%s_L483>2Xrogr0U45jfDcD*c3WUuW&7pR4NuV_pXpXjx%B^d~hftdcw?)jr zl0YaNs0#vxSnJQU={5$) z{lI#RU)xTzq1Sk-R4`|=JKj7}rZ4sOoEz?(_sBUvyVIPf%A9kB`&)?-vy)H$`h69J ztdk3+#$I9_dK&odM^?G{7*5mEFzL*_njgvmgi5k%BjMc z$I6*byOqeB$G%SJSL(AGy6UH2H}U{T`0*!_;mXZ}-$P zDPOkBN5|PIXQIYSJIg$mZkHc+QoGc<{_1;!@v*iyp5NJj_@F*f9L>Eq%+EM>>N)AT z@z>6C&fdj-IvQTzGu|QeNA+3+%(MY!~WVmH};%! zV^?RlB%_}luXQ%WedY8+HmOe^tL7~EMU~6|bFy_VG7M#m?Qe|z*a@B|Ilo~iIsF&d z(=W1iKSz!6@D+9g?VPXD9Bp)a{ic0IC+s1oR9PLmibY>ywHop}MwXS+ziCu??cIM@ z{hV&%H7nFGhB<3IW|cahqt`j-U#IDWgrmga;U`pgs=&&D%Q{t^7KJ3k{_W0QJce=N zy0O1!IdL9r-t~IX^BBKgexG5}L_5Dtb=P_^410Za_avNZK5Crbw|fg8-OaD7@6i(<|GC$HYLs9sf0n{dyb<8>!hzT0S2IZr(C<4q4$ zsp}g4oUuKxQSEY24~JXQVu2K9&Mr%c;_%S?4g zs9@fZT|VWXcx3Oa>;-E1*AL$K;Gh}mj#qQG9(ZV$x~rk!XTNwRq;{;VxU*(uol3iE z=-lj^f~w-XuNAF2XTJK%qiK>UhTU6FncM#r6ysxq8jqHz!}QDE;kscaDv% zpZds#f^j*q;S*x3Mi*Dd-uT)JgLh~9W6Ns)^xBngq{ZI)(XN7zZY@^tt$nm|-H~ar zm6MLWzv$KKSaGy&;Na!iF;B^syWYy1qWR2Qp%R z+yB_*hwjNy+us}WQEp-1*p@xBN=8;*8DsC#N5EEycNCxBoh|sBfIZIV1$?H$=Y4$A za*p5&f^!AD1qTSy1cdSj3JBX1hP*()-hy!EAi-b(KYo8nFjR1{z!anl^!rn?J?7Hi zUim7#mQUsRrOs~8cCnzs3M(2qPAKt4t0BdhZKe9V0$+t)4?m*?@Xgh3cD}CfTr`)( zmF-^4L1+Cle5F=Xy#A)R{{N}0^}DIdfXjf(fXjf(z?sNEeEjfGb!Qg*-Z551qWIx+ z6YW0ikKFiS&xs$#e;=Y}Hh#|Uyjr((P7}?!C)bvB=e%eAUo>NaJuj~~-wNp&?f9+H z1J3=b_vZ0^ILB<9XZO@Ho@?y#p&C17-uK#D@lW>n86|d`$4Tu{@A}(aF+uyO$(gcq zO#P^HuhHHeF;eIA8PQgGN{H6$hBfhFQDPIEpFftK-@vc6o@^*8Sw^U4RB2Sfmj0Z2VxD}f5a!?O<(d~46SVZSzNdO zFDX9aJN9+#1D~;t&pJMWANa}`@J+`~7_S}^?ZiqLzmBosH?o=+^dPUtr8zf*{iDCuG(`+gBcy4^C=fsD)IAT6uoWAs5Udn9aQhu9d;R^mUV02$iQjZfI5Ki}M-mooiL z0XFD|9N*#K12HD_k^PYf|-JBL5AQmL7AXXkSn-UFiub* zC>In7#tYzJx*$t1LQo=@EHDLr!4-mh!9>9f!AQY0L4hDgz}V09>(k%2VPEFRJJz}% zG%wv5Yu7s6)MdbBz-7Q?z-7Q?;Im;MKCU@z(aHTt6xTdlYxiL<>BcpCPFyqoTO~cS z-4%~XNwPJ@$EOZ8*!8q?UiyE3>tLU5Uz|3&|LPdmys$2@e$I#o<|UTlK1dEEmg$YH z-L-D?e8#Vvl#tB|?Y&{$oN7KUyta4o(HqzFneH>OAL2qCV@kwz(gnn3h%1pNrbB#= zHtL9(=)5U;WY9r9vgjl(L%fQ(6mciyiBBO*97)U2hx;8_bQ2RqhI(Y^hi&wbM;E%0 z!GGd_*hJnGAVZr@sYiws{m2rBqn$GPh!J87|4502(N1a#@E1PFqnnr^{!qubkwb<$ zd?lW#`M^i)U=v%qAO7HrDL@8$^g#|?z!abhpOC>0_DQuIx=jHx=%gJz@Q5w)r08RO z*rX4>Ba2?z(Mf6wbgY$>I^>BX!V`J;M;~SMGk5rpZsro*Ndo#~1OAcc-AG&(nX?3@ zfN@ZVF5-j8VN0j9Q;$5hs7Eh;kYWdW_|QjyEOrc5#4r5SyucZHOaX0SK~QkB;A?{W1a}Iq5iAiz z1dW1Kg5`qy1=kA#f_ntt6wDE{3Kj_(1gixr1UCr2E%=t8MzC1WELbR5Dfqfzy`Wt% zUvRhJc0sLRw%|Geugu#7HwnHfs2AKTxI^$2K~&HrSRlAXaHC+IV5#6P!8}2WASAd| r@D0H-L7iZ&V2xm|piSUjCoJa=GLu*=); z7FyhTRkFO^U)1)v7WTXCR#n$l)-`Tkm8?xRl-DF1y<~0U<~8Nz9?iUO2VI2qx`t%Z zQ!hhR0K@)<3kIqA0RnCD(ZMbB#! zZ|)414{G_t()L)M>UrWnT5kPtOl+CA*ju1=uyXLwju_`wiFjFijP2Oz*N;6OG{}l8 zMc)3sELNRZ`dWNxd+hKVJ7cY?ZGXP{)%MZO*vXFBCywQ|$4>uyXH22yP1`reJ9ub$ z_7c^H|2aNe4QFNP&r+RO@cho$QC-{YeM!sMTQGpVr_aIO5>GBr!QRu(-n?b)u`Dm! zkH5IT*YomaB%KY%w%hGljAJ96ZB89LTIXE02R;upG=H0(k&1>2*pZp=Ed{Z#ztISLaRl& z>HF5-{jehVW%9>RbUz$E_rq|Un`cvfPP})P*`A=TEN@J?AEy5eY`r2lmQfh_{6hGA zOxpWljg8;nF{9hD!Tm7H_v>UgUhQ7j;qH(1uI@hR5?runiSYeD*DLVV$kNkyu2fu8 zzS>{UlV&;oQrzwLEB*2WrO$cQwf>Gw=h@Fq_`HT>O~2>4pE7q<&s(v(qiS2nmL;|U zU*lfuU)bm2>s3^*@lPGt&NKdT=n~U^6`CVZE*I|#i!rucOeL(>x{dXI_cG~aeb4ne z{6*8Nsdl};j$Uhp>2>eEE$zE^Z7Ysf?%feDKd?PsSR7wcw5+&fSwT^}u%MtcURYLK zvZTl>IzB1e%T)+Yd*_A1<5hc~j-8!x_VtckPsfhFeLPm*{OnuL9QfDkN8k3Z68(u3 zh!ltvh!n`AK5uGhKJ+Ljs_=UoJsV$$XXEtwp(FgrsiMMR2Aa^IjrY?{qbDqPAJVw(etQx?@}&tc|R{Ha6%Vv3=~y-uBa1 zw)dAui^j%<&ZFg|oi}#5`&{r8+vC^Nd1H;cuUSmL>imBw6bC(LUwTWGIYLc7p zNH)~bdm9rr?+R-co=4O+^*`^r^c0PH94Qbf5GfET5GgQx6|iUHxJ=|XzpE_0t^Lg# zUz#19!}E1CZyY}J#`k6he(7|zc{a-%7vyVkxXNhFept-P<`W4|-{?&7*Yo4I`ST*r zOL)cOz2c8ul0VD6!{%EGN}H==?mX9w*EzpuKl7_et~Cz-Z`VI6`#eLgXz)+Sl#A{N zn$vrX0)NTgX*48*r~gdnxzl7d5$bs#xEMd>ul1fsA9eFzx2sRZRO0=)eKz;nz*SDq z>9y&iSjqB&cXvVXq>G#2dzQX_2z0l2F+Yam<%8ltCie4gYOvx(Uo{Ph{zI)tSINlI zV@O1f8XX1-R3`!>GSx$EVy5d3*7o_Xe@lb<)C#vgSlhkxEa%%#>xb2@pI)fH*6(m_ z$931%PPh)W=h^;|vfbkw|&*3Y;7aCqJ2;@9d=y1&ewU*+ls zS2w%b+O>M1Vb|AH9z30wR^7hK-VAY{R4I=bildx%Z@Yq!?pQ8Tw6SZ z>*hpY$hp?x^KCtRy{g3j{12N;`_#er&ZX|UcAmA z$4^%GwAWj&@OHb7Z7)e2_YIiq>c0DXx7YXhOiB#y_P)9X?Vtbluzz~%F*gz3-&+sU zt)~v}5BoQ0-*hZ-?VIj>>+tbG`#r~z%g7d^1anqC$LE{IF@l`OSTRnF7kuJ6L3~C` z6nP?FOcK1>pDcKd&uhu61i1!YZ(c2~5!1wUajlpk;$o(_PRtUd;Yd4>p15Aj6Qp1k zh=pR2xIruyH;Mwmhn9t+NHG1rNigj!6=i~dE3-^27dMMr#I52skr3r#g;*&nL{e0W zRia8%i`C+G!Iz-CCFDy|UNY7Tz8xg%(@5Y1wvxI^42TEr%Cm)I;?#TKzu z+%4`A+r+(MyJ!9?XcxQ0{bIM+BOVZY#XhlL91stRhXi?us9YKf*uNd9ts{aJ zr{&cyTN(6!WgQW#*mdu@^}gI+&-J5}+`5>+7l{`&|Ik}0Jz$!mqmjgnj-Ila(~3zi%G8nB=H1hl_kZe0+Y2 z$*9UvR|ji7@A#)^Lo<^=6?-;+lgXjV5m)2WLrg|h?3j<3q^fkenz7#E#=RYr>DvB@ zBjnFuz0+=W{SCk4?<6Up(kgPq`M%>XP#Zz1w}nBi?cZ1sHz!C|^izn56T$Q`<59#` z=g^p%@tU9J4*l;d{L6*s$)BbTpX6GM-OB_yIr41G+wsv@u~3j_;}~*p&+L$2r)fV%oW7LWHCX^7UTiR!Nmo6J@R}L1#OVu_Hb?bX*Qt^{Y3}b;aIfd z7_`Dp^ubna$4>fTwxWmGi@s=O+R`rP(7_8@Aepe{r=iS+>gYE3+cWk&n?+&#dAB-?P!nuMIcL)9q|LreV@J>^3G|wVY zu3-4&3c_=G^XXuXfes!{Rl@g#E?#J>uzxV_3A_&vO1&)%YMnXL~uRs;x`M% zd9@&~K`w&pm5VAto`XC^k+7T(b*s2tl!%SO@)s+$Ob(+_)QB=c-h;f!t%7_B`IcI- zRI~_eCnv+>3*UQH3TzlB$l271<$`<->*P>22z~?T-zq`grcluLwSw=qcm%vjG>a92 z_Q<^y3;NX{IG(oA3XSagw8gd1gmch}{EulxyJ%(Cr%iOZRFBEx=zdyYXrU}M$ymwP9N|AwwR6h-E3T_ zWo)b#=!<^`vKOt)&a1Rc-^@<5!p2nlXftdhT4U!Zfvt-KwsJ1|V((~yZ!^V;#RzeD zmmo&D4zWpWpd;tEig0W!&^GkW6d#PgsRFGnKDaknY{a!3jtlH1zLJ7|S!~p38GUGv zep-L=AMuBVaY2mX!%X87ZR!Mi;6viU#-;U}F~s=7hv;H4hi{w3Si#t!|HQ=@fmXyh z+wiOTv{cK)9x-BLW3iTrIpPhC?i93P{bt;t`7*Id%o6l_y+{b!$`@!ueAf%c4gMnr zX^TE#d%36*T!$E)EbwWoz@LnxjRHR~UhoyMiC-JVZo&Cq7M~MrYZdrnyGV#f!~5GFy@EcY4RL^9 zJ}Vv)PmBA-D)A-psK6ii5MMkj_6T(Piuk<17hA+qu~(qWlVXR!&j$tC-!0I5xhNF- z1wQ?g zWSM_oH>*b4dpxr{sv`v=1tJ9^1tJ9^1^OxwexGrc2go#~zxNqOc>q=6IXZfuF?`-< zgui3-ouog5m4|(w36x4WFMWiGj|%&9RG;rVPUcBLVHPy_CG~yAX(mhkl$iD%VnWkT zVH})dB6*RO!HOI6XE<&qGpSW!|44|Nq1GeE-HS(x9=`LUKO;&3a#iG!$O$C`IW2NM zmkaV*;%|2|qOqgxt*05`<2>XE?VkRGuyF-8a zUMAA{;Rt+)rk_^}VxmLo7Rn5&J)$b#&*7zD}?D?uVpmP5yY;=S4_*9>?+m1xX5H1ac1K? zoqfbw>iE7!>t-LeO&5CwwuQ%bsvj6rnS5Ylo4!vIHlEgMd5^#kjH!Ksc*llTfgKj_ z*n%BX1$HbHXmqErxF9A{$2az*j_)e1hsQVeJSebZk?3!16AKoX=#J+13me-Dw46G& z>E{?hOr(x+;sV`?G2%V7U5ocjZTC06X*YFz=V`rFq|=c&%cLP=im{zezd~)du{}!5 z=1cS=j?s`Y)grbF#<-2Em$m$^_>uUN_?~z}d{g{N{7k$fUJ$3mcg0EZjCfVNB;FRs z#9QL1cwHP8zZ5Tuzlk4;KZ@^&--=&|pNhYVABaDQZ;RK&&&5wfmv~;hDZV96h~uJD z{6#z`elL!Q--uVlH^kq?kHw$G_r>qTv*Op{QZ3Q@lS|d}QNJSvA_XD^A_Xo*1^y2X CA>Ud6 literal 0 HcmV?d00001 diff --git a/resources/BoxHexa2.med b/resources/BoxHexa2.med new file mode 100644 index 0000000000000000000000000000000000000000..f76852e744847420bc0ac2a7656cae831d60675d GIT binary patch literal 31124 zcmeI4UvL%G9mh8z$Q6PHrL9_R7Xd|7On^dy|85{RCK?h*3?fkeDA?G_3H^FU|pj1Ro{?t?S+fd}l+8Q<*pv*-Mh&2sk&_EK`g zIn4K-J-cUr=lA>me!F|lZ*Ku|066$ z(U9=BVYhuq+n=r-DKx2GCVZ~#-Ji}WtczAe%hd)}4jp;Az`0#Q+&WTVKYAwh%da|t zG<;bV9ej?>t`lwV#cd;nH~%tR*sa=*FOXeBXNC(WN0*#9+Bi};{h#52LM^)a`K8gJ zBlp&~sDAUKdG!)pSErvgo!I<`;lkS*+w^@$+vwZ;CHkJe0Dad)a)Ao^p7#1Sts5!S zMfFMi#mUhqS|BAcHXGaiaAYx#ju^W&eetNxx$Jj7cO06(jnClo%kA^4rue*xz*HHT zmsHWXt0sO2diy$vlN+=r_7RSsDTl9e^$uTixWQl3)@R7aNHG&D?#!9+Y-2FAzm#Z-N{Brnw zPTBimk6piRnla&6cR#F4{JPLzuXe8+_4mh3zMeSg3f!={ME3sQ7&Rws)M@tJC>A%i zKb*9i#92eK75n2!r6-Ro=bhKxn;ghH&mK3~^ZH9Y70>gJGCyh*t=~V|wQqFKO51_2 zaj)5z^?7!@j_!@gsb8+=*`y!56!fn{YXr*m!aoIzG4{=*lI82s$2TSBo}o-`;p&-+{PgW!&7lZguOrmbQ3hbMxA`ZB1)S zOKY_Ic#9%(g=Xm=wC_6Jx~HvhZvVgEAKKAYIJ*6>h4{VC{{FjveD?m)?a3(NCom8g z2n+-U@)$_Zjad(k-B`Y7rS;HDoT$R{Hq4DL$K1Gle&{V8vMM9KcC9FKbK_ZwzncBJ z#-793b8zj@{q9Wb^WnX&J?3lHL_>=+eHQsXzILtJ`n78V`-ttMU!ST!e6YV#9xYrO zmphNvP2+g|zpq)`zw7;fBohadXJ2_+o$VXDdwNQ{ww3z(u->i- zTX$tS%dR7O2P?0;t~^GA#espqKwuy+5Ez)g4A|T_mWn*{PpvAiYo&GLJ4@U-%&)_` zar&$qKV0PeGUjXZY+W?h!K(sMyuyVtH1C` z{yP5-n{O#7XI-Vf?mRCh8oR?C;5h56>%25h`}6Dn>O5}_xxPO+AuktgbNuXi{UvuK z@4Q5)e(c@*CqBl<{ekp4`fb1dwR5sHONozf%kG`pZc_8~(y9AqLaQ!v@9tdil#3hp zJlhyQyl$i@s`df~)*{srlIJ--6W&A62F|i2J`W&{lLq z<<#T-?s#4u<(&6b_velC74~l#RQaf*_@`qk%QqG;wq22zJo_#}Pt(V0(=)HOe%adk z+x}{|O|I1@KU>@JY@O|AZO88eF1{jPk3ZqsR@;87ZNJsFf1>Vn=O5bQs|tR2kH4!bFz9iFHwz4JT8#9f6oNbvde@%gy_#`A4oCr;9jz2jm{cygcHYjg9P z_qz7ECtZzCkGSdn#Qgn?o7P9Ce9g7K!(aC%CvR*gwKje)^*fz07P=~|U0Lt{ukW~6 zqq}lxR>SGIIl%(FLLnwj6f1PlA!2Kc#rzB_|jj#APf7^109gbe)K?3WT8(im_BgJImkp0)4|4POvXTGIKX~* zFq^Rfov;g?%olLOxX7-RqCYE;sG7~+)QbK=&%i)nATSUZs4fH5n(r?2YGuFaIv69>d8Tu@?Bq<_Z?>@Q%(@m2w3iY;v8M%Y|IQ^vM-u*YxJnYFz{R zgJ(&}-V;s@`u#dyjcdT)VkFcDniF5O-}D+d8mKImHOWbzJIU+Gb#Qib-qG6KU#++? ze`e$6lgAx59Iu7Axzv1|*;ZS8)NW*7xtf!+xl17b(I)VH4>^(+fgB9E6!I?QR>)bA zhaopo6z&no3*9Gd5LyNDID9A6E|7bv7f4l;*Wunt8{8p~d*NQmcz62kTUC?Sq5l%$ zZh`NM)(A@l-d2&8ULo-MW352GY?VOXrdc2tgp3Y>dFgtAoX-OS`jZb@E|BlRHgrZN z_V7qEx$HN&T&k`u!w{Dm*cnc-LTjRp3>2gey7PVp~*TYL+*MS-!f<3h*7 zb~v{&sbc}28JBorZ1^_&89!}zRu6n(e!w>TKs*sM@V!{T53#UbU?2R$JuvQxIrxWr z^nh<-7M?i^Q) zK7=#4Y7mGK&WB5MM^Ai*&P4&<%x~~!al#nrMy#MG+~G5Phrftt^n^R)6YF)tZUH|G z36BZA0=$%jCj{g@E^HDQ`yl~c_X_X|Z`*~Zg?<73yM?C&VtGLLuJEYvuz+6e0{(tP zfDbss2U~=V0-V6>9sygo3Vi}T?G&CA;C7p^S->A%!akuWtQB?$-xrAa?+I|cQy3I> z3Gmh=fux41PR@|6BvvG6qfFq3KwGcO#nvVv5FRm3nW>~mi z5#}tDmE?ZN$8g_b^*>i2uR|`2{0w=V+XZq%+$NAmBBw(xW~o5Vh5SpsK)&Z{ z;TD0rHhCX%N#t?JMUmejH$;AioX-M*T$ITp7lyor0y2?zm4K{f!DJzaJeA2o26-d1 z3474tdckZW-(_}@E5j~wP-Yu>K5VmGQCj9~?I$;fTx1r7v^;WKX;~d=BggEDwT&KW zyU62_TT9zUE|0uf+CK7p^8}Lx=dr+J*f=sd#<$5ZuE~92kNF*2@Dui!-;3IwFBnJo z9=jR^;|Tv_o5@Z4&}0)+OtBPY!_Ho+BfxI(a)x>4I^S3%p@m5zaQOd0kdzUV(wYKwuy+5E!UF1OEl*tL>iv literal 0 HcmV?d00001 diff --git a/resources/BoxModSmall1.med b/resources/BoxModSmall1.med new file mode 100644 index 0000000000000000000000000000000000000000..7b0ebc77b0d5d31e58d769f2a282942881262454 GIT binary patch literal 51924 zcmeI*b$r!Vy6^D-p-6FuLfN>x`wAokh!7_sKs*qLK?HYqDDD(@cPT%#0g6+sg+fam zsL@iRy`OKAeNIbfdjGgR_e^J!dA)gly4JIvwf0W--brV2C+Cj&^Oni$ps(DH4vr3m z9CD=ZWc>4M=GXOezu(thq09@jWt?(I&fMscx!=jf$t!)6<2E*uGmqOtXU~pZeM4Mb zJY9U8++BPfTs(b4Je-^y)XaVRMgNA;&&S2Zfl=CPYmXXg<{uw3$d(#5ASNt6IjL^z@BzvpCW(ua5^Q0qVKK2`;jvNa zV~UCkkBW?pid5n;5m8+IUrFl*o8N$h*x2N#h&~!aWJ*MGU7K%Il&y7ga@#)1$%!rN z)f+f)VBKW5ERkHL7rFY=qk$VFRFPlU5^z~=UD7Qml z9uHcNFRty((+>7EUWfylv%udzc-YsyI-W7-4q0>lFFEHg{cg?+G3WYa2BAiX%2nE?=5weT{o5BlgIQI_;SGn6_WjCv%OM7uHI~o&P@n+8-M>hNN26 z&i9YU_GizB&KcjC(jP-v&xijy&xbFrbNkHxcFwilzpQ3IlMy>Q`Tpkl@E?B<3@x5< ztsaG0bN;_E=ehpj^P#)_@tghkbDHsdm@R!?i)B7u?ay_CGM^v4GUM;>{Lk96|19z1 z^M5{v`sszp#!uh*sG^5cm-PKY)XXt`%a(aQJ@B0; z%xeUF6~$TD*O-p|JAJ)aFTWtK^x^%pdfCUG&tXvdroCSNo&o9ms+TwH^@>gki%p1& zYHai1)qzt=Z(GCqwgwGbHm={YQ46-~H?uWu-mr1w#txmfB);sBZ?S_z*)%8DE%(PQ z$o+Ktv;BoeEy(@lPpfh}e0S#9?3~Z`fB92-FIoR&RUoSZSry2tz(1?NZ+|w{>!CHJ z|M8W7_IhZTZsbefZ?k?j{@?i7_#eMNG*{nb`HIbq8G8;689y86NT09znXhZ?--lnk zM^n2Y<43sv`d*zc<3}ZjhZ#SjrN1-Lr&31i_IBQ?kg>leGur$9J4O3rL$CR?wpW(; zefAowj{n*FXjzX9+yC-=r;h3C)|NSS_V>|FWj@bk{1lsclJMKtjqaJxYxXt0BXj;I zy|51c`|kg|t&UC}o!#ACLb|*7bk*qXj|uzht`}?e;_C=c|381-^`BSKtPy8bAgcmd z709YURt5fbD`5ZG*hY%_p8wGKALsgKuNya1&bUWEUuV5;{MUKi_|IOuWpl`z@ftAm z2jZN(`q5^#?Ag09>CG<;k#e)|7s z_hsHj)>@%_?!j-!=*vp~*Zx`|<8}1H%-6q}r~VN1QsREqKOFwu0rhgZ%I^;STkK?W z$oT9oWAQjR{Bhl6{65Pc=OLr+_Vr?)#~0U2v9=jy(s_M0^;fMI2ZxOH@mEj6|8zZu zIsLurVXv?Kb9m3L84olMr_Py=eESty@n59^gKW7sP4Vqy((@VB(VU$c@XGBka$0LU zKf3EQx3m@Sd#KHZei6pYt@)hO--R2e_viJ@_e+q;d3APf*J44Y#*mKPie2nzyq`|n zR=upN84`ajXPdsRCTdETEw}f(n(2Gw&ZOrPkd9B^c3VqeH zs4;UQ9?s}$8kzTpmN->sX=!uk(>xoykEedl7Bab4BEX}2b!W^sXi-HJBoX-XGt(%8FIkg4l_Da_UWdpiS6%)Sc;Mvm!j94lvAU*5M@#__^oX6sM!6V5&DYWjcPw?WSH zzUJki<>l8d>1NiRxsu-{HrSZPIe&iGI>bEoDw}d*LYQ&rw06wBLlsO+ciWFo$~QML zxd#R{+u6Y^|KM(ufj(}gZ%D<-?+o@fju-p~4(;M^?w>tfGf$oxru&lmo{n#}G7qYc z%yD9ziz(sw(Rans`3$$;TO#mvepOFubvsg#NM#x4G2-?CjQze_rML zVK4LElcg)-a`rISZcp62Vn`1&?U(vN3m1l(aVNU<^*kJEVshT{Etf6K%!s~X3+)+V z9Q}7a3tt;*dj0&CTY=HN%r`$Q@49PEFOxRyy~Q_Q_cDn;wQha;<3QuP^NZI<4h%I1 zi@kke%eXKT-7Im&E1qGdLh$j;57v2`CPC*D^4|0}+lTBrl500|NB&iR?NW~F=Q zc-X^?sPJpK@4AGUSJqd3?EHF|nfrCgS98`1F%LcWz4A$~5c9)|T2rS^@i8r;=1i_v zv6q?e72bD;lb@-u`JF43YXzDy73++swIJApw(gw!8y6ol{@T_Ld$bKUmp-4;WliyJ zX36{>%gQ|SF*o;Z^mh27yZN+ex0A!41ekz2?Tc*uHqZ>|b8l}4XMZ!~n-gQ3`T3g* zd2Xh)I$p<|JiV@3K-qT2Z`t>OUMpSAygQ%oT6nOl@ryiqeaUyeX2tT$i}#lcFdx5K z|MU%Jt%U4R)>^=I!6Wdlt8khqV3=-JR*JytdL*H7Hc+EMrB z?Yv&r4DR7`uwq~(legEniKpLpGQ&5A1h*Vt!CV;m?8b~DmCUM#BU?4)@o;eBte%y3 zyP5N2zp1>ry^lHkd}P(l-c8NX!{7hV;zu77|5X#mVL5!w=I{M(9F6rcL;F2CJZ5qW z^SJeClogSCDa zgG`C}M`p+03^wOZO?CIk)!NiO?>?^e5oa^?wNtLeM){bO;|b<1JV7 z)6Z3>e^JxJ+`jhCsrPsLn6I}^+i_|}fXVM(=$Gti-sVx&)Fq*_oz19YDaVWUGv@QQ zlM-`RZ(!QEFL8YQyqyVZ5p}oh4p)=%vt!b!=&q(?l`ii#p5|*d1H;^1!P$(A8s z*=F`Mgz<46PfI3V-g;^l_r3dWx9i?+rt67*pB7l}X;QncS=s+* zFXLMArFMmK1sZGe)rV7_^e{^Ybv>Q^^H8(FZ_}mc-6KrYq*)aYUyL$$%XPitSS-d^ zS8oTd{5Zzcp4R61lOui2*msUSIk2f0&lerfl|CD0TKd;6ce8b*>DXXmwX0jAO^&Fz zAv0|;=G7Y2PmfIVGv6)wB7eE00OOMPUW;611I?7K%cf7w)5R29(ejm&McvKvCpR~& zD(jiK?`3Xx%=7-cCA~9``r;JJ8J{pZ%{bsW`TeiGBIU5#w{ln64pV;h#*5F%5R7xwz%+Yl`KnUt`;} zzUI5`3-T2%*w4JXzg34b<^0X7>nbff`Jk8S5qf6#l|A96j>n`H=VBsF+T@dyJMWD$ zv!_hSG2JE5)F|Hc8Gkmxuh!qok&+oxfDd`!~x{Rv-R@-v&l zm%X!YMv!Up)U9%p={?N7&H=sp^L#(P$D0B32X!;GoSXl+d}}Xr_(agC!1CQq?^b!9 z{m`L@IU04M@rNBljeqg?4|z`yH}8*kOsi!@n&CeTKJd!(Xw$WO=e)R2 z(gKa+@X(Z}VLeUTGi67bT-2#?ubZRK`k0wJAEz!a=x=HS-D)w-qn8=pFxvr#OA#j9 zuhDG>riGg;4%0e~;^)iYvMpZs91~!s2A}*g&&wgE?eNpTRDKj@jyaqjaehv?IhpOj zgF8(k%#rd9EndzTx8s3h%mhnfTDixi$dHQYEh_~@bACy}OTr}c}rH0^En zcj>;Nm1m4;UuEo$BdHUyR$vR&AUT}Z+cj?w^_Sk zaK$}uhnPwY-u?PUe55&0X#1|6KlL>2YQ471yG@9>_ElP(D|zqPz53!}o`1I%+g)ua z;~we1vqzs9QD#_pkuhb?^fpxwRXZHqHPQ?lkgz*o6hG&-OKJ5=rzrDMYW)g}PeqwC z4?fyAJ6og~7ZNx!w@a9b-2U|a+h2v5H#X(kH)DOMaqhMFWWKk<&HQo=FDxGyVd{Up z;^prLcQti$ZfW|_b{{h(aa)ZmpZb}{w{ML)_fdce|KRhkS10oLZIb`Bv`PV{MDxRb zOGD+9sYB|u@gS# zbi<2}Zs+tdtCHSYv8Q*m*%SQT^*lVkTq?2Le0`y}30dIrtW=#orr(wOOYd*#V_xf2 zRgWbXOiX26VdYrK5>m_3c!4=;JA zkMX>cd%)JP7_%?5aKoP-N1LYa^vL&QLLbwkZpgRaC-pXiSLMm&`V;N<^_aJ8NwDeh z-m?$(hcI8Qa(%Y^X}C!#nCo7NsP4x4alo}6HzQ5Jr$014X~N8rtv{cAklQcw`Rmsa8t-A;JoX_3R!`hVhX1$MI?{0-H zPdl6=r?WZHdiKCvc{`a(?*uh^xuCl#uzmx*As9h5N>Jty0u#^Iqq2FRjjFe7{Og zy*#X{d9)!{e3dixO{L1syqleW)^5$&vn@P_)UwW}Jsnx}e0%F{zrac5r?s()mU^>L zsWDBgcCq(wp2=OyO8Ic;m4r`=TNRdmKda%i8rH1SgOYlD*~%(6qG{8Qm%3O(4u9<( zxgyZ&99tmy{iOj`>2fvahc5QBmPS5}{bsR`)vtJ=nf(`fS^Hi)IL{sk##S( zuXX-W$teGu0amxKKB(jRQDf8V^vwNb7gaJ#jgs=4{$ z=-kxEBDKu$y}wMG?ci)(Ig;D3L_j~m@Q?rHDZD5;a?JBmnHji+7 zsqA)F>*#xVtG@TGF*kGef4b~&M>B3wuHa_xbvAh)?47syJ0~;b`1T8r8Z|dpe|^5M z=F-AeSlae!Lyx>)-yUvj8h-X@O-3kdGDP`oZkZb3Lr( z^|ya=Yl*8hG}Y(H?r|Ng;>(8|sI{=IRliEx$tN0hv|QUv?$;-Wo8`5n)saH4xLY?O zJY1^h^s?fspLn)=crz2YX4UTE)vp{2b-voX&HZMUfA%e(zCFL0Rqk2r4~0gQvATU< zzT&In-Ay|W_f18&bTq54HEGp~bzWr3cdd?WZfc5sdtz;?-Brx+UjmNKKjmcd4fh@$ z_qMZ1h_g!84CrK*JR16}U0pZh+u`V)Mjfh|pex^djUQUg^65RaM9PjjR;L98S9w1F z{Mfyz#m_cBP|^%?x{3v*tn<)4uTI1CbRC z&#N~_lz-gR)ErrvKe}`>-iK`?PIr9LZsxQY&rA2JnXpbj>8;J(FCL%mKcc<0u)y16+xT^|nlJmM zghK~U>qP(J{x@^@SVhkC^eSJ_-#Sv}e9u>3^0#h{=~8S=Enllq;<)Z-oxQEt=bid2 zPxZ8#zP#|p;CWrFI&W7TS#M%KYy1tjr@wA%Yvst*;p7)THn;ZvoZUQZUe%iN>L^|m z)VB(UmcAamwV^e;W%%8VtMge21q(g-;yqXEPQjHkye4~CMZWsr@GINBtp~2-ydUIs zvLe>`+$#5^tM&fUZ8?^F=w^*A>EY*?$JJW7uGs3Y$272hZoK?az7@@_`vG6v*#E4J zwI^-DjTO~htdt>RE)CD$#j@TX+iW%0PxTaUY~J2d#lOLI)~4@7%Tgh zU9-A$cd^c<70q+-+d|eSpWhkx^h!JHg8^@JZ1=63HKlj2w#!p}E${F*hb~HSwLzVc z-gmV;KRv&G<*xRYU(w>vl-)c#S7+L(cPm(RV?TO(lS^Z(f4*nG7W%xMwP|6X`%J5v^}W}R62<2~)3* zPsicsoGg9zqmPU9QIg){&k6kvNG|9zqCC)_`RLCh^(R63Q2+&@PZ0{E2=uFv#h_n% z)UR=sgg%?muY;ArD=3R{copSgLj_cXKIYNKmQ|pSbM&WV)uE59YN8fuqYmn#9_mAX zl+qB5&=^h76wS~aEzlCJ&>C%^k2TuCKyT@{M+Z2=1s&mvPUsB%g^nBaYnl43D*f7~ zekn_zV(4!z^eLGy{NRrObVDG5&>g|h-^%wyFN7czVF*VAA`yjX^hO`VpfCC%7IBD2 z0us?51CRv$&3y_|F%W|=7(*}=!!R5pFcPCM8e=dP<1ii*FcFh58B;J7(=Z(~FcY&d z8?Rvw=3*Y^V*wUo5f)y%(gf7k@|iVdi5^=N<9?`a8nT z>LmRub5iZQe^Ior8?A|_dNb}%zH)Q!srR2;z$vYphk9f0-}HuR2`K zS#2~%RkTARv_uY+fel)#S_3sv1#O_$|ILvXB~S^kqBZKFDYBy#%Hd^HKpj+rUgPJ4 zf%13-S|24*7`5;c+Co2t=MHGbkQ7(1S1l5rbrJ-@^ zd9Ez%`KboV(LjBxU$v*})t_piJe8yJQjYe%HBR-V{M4^%rS{dYa#VfPm-<(J%1QZX z44MbEr~0e*%1yOZ{WVYaIa2Lyu-9L;)V!!4&4KEtxw6lLYM`2Fj#OjKhw7;Ms%_O@ zwbZ`mOZC%yXsntq<*vRvKsjm*YDc-LEw!y2R1b~Y396fY9Vs7;Rc#umR?1!V(3(?S zm4^+ghib2Uw1%9a8mit}YpRoKta@n7?V|6cX@JFe;f^qlp2ek)c(udN3_ufO%WdlK|IT~CQ= z(9>`QEI@zgwf;!xb@)8!HU2T^b^31T^}JsD>vgpr+Ins82ECu4*Zq3G;RJL(6w!Dc z(-DAem=C=daRz#gt~@Kk38~QQdA$#z{zIVgbb#KE&};q)(Dj?3_c=5^wWo6zV1wR! z(0dy@p}h3IfZh)ogqb*rj%W_O$1oZCu{R2%p}HkPb<=w%%c1u#`l1swM%AJ|%0us6 ztcS+wkL=KU9m-4ZWmJYc^xnxr=pMCw5a%%zx<+*#h-Ns25?G0)*av+Z9s`Z9edhiG zq753?c4$nRmn%>^i!lPTpd4&aO;rQEZ&MJO_dryFa@08WeNpcbRfOj4Dx49E2GDyl z>PKVHI_ZN|Sc&oI4~<`arl2LJU^c2?7xF>tP-|g0YC<(V4dvAfJ1_)VV;Z~eReL?5 zIoCW_M+GRSWzae0rMCM)ZK^J=$d5v}jt!WPZctl;F%oTY5x!`LRA?@=28Ka%rTo=K zZJdMFsOEYjw4QX$7Q{nqHW`hec?-c*s3vPL3&o-JTnhD|Tzqf@TGySC9dT#~-K+VS zgHzC$G#_iB8tC3O&{~@a1I=$$EWup##eS$JN1=YSHuTsy1dS&r)h4(1@)=Ao`vSP7&M=1uNoHN zwajhRIuG_BH!hIF1}pyWLSG zb6;c7{AvAYt~57E=z?(vi6kpRt| z=3n_~EqX!orm<|t3be*Z=$c%Zi){#p)|AGqbv+O2vkZp7UWb?1R_*~f0?nn?z9W*M z`B%<4F$u~?bER>GK)J3&1*q0K-v+A7Wf&|+CAgz6vO#s$Tt%Qa@4ZG%0+Y1 zAM>Gl9z%Jwh1U5&sD7`aDMnxhPC)BM=W0N?YmAZ5yjjrL7C`qdLtYGmD=t9e*4)`p z5U*niLZLS6Kx5TBYJ6H3(I}2?=!nfogW9bE<*vD%jFV6ftDro#AO$0#dCdpSbx{OD zIc`G>XfCxzn`04%V|Jlpfy$ix^5FxH{~=DmC*;?kqXVnc2q=5 zXbx2GeYlP!DCa3q%{w3mu0pk)gD_~G)ZPSW-mXFGZ8mgHdGyB^^ukdn_gXj&tpPu* z#!{%>`=K?bHKldj5bEC-$|pCLVL6JSE;d8;9SY5l`niHm$b(p@Mw;hBXp2iw|L#~1 z&0{c5;2hMy#-ZB1gvHRB8VI$M4+o(!?}Qt+pj@W4#1*K5QHVo(G=?JO3FSq4k&-N1%0} zn$(8wNkC5Ig6g{wYSRVky9w04a?=_pg)$g`;n)X_S#7)F7*uVLelTZz` z7Sx93&4!-Py4H27*=tY@HTR?7hi2G?Y|xn1t_L)at)P0V#=0gRnoF&R-?sfa`_rHt z@Zx(7zU@#FPS9ARQ5K z=5;wVUOj#~A`nZl9ih-;NqsACn{cK z;fI;f<5$O(t8!F+%IzAIm)7+e41)5ygf39Kuj3F#VIodIeX5Un$OX;!0qlozRl8fT z7HVJTRb$QHPN@ApaE5Z!`c$48_drxfCur@fUFGJ9VTi?4xMLI4&R}fBDvX2XMR|Ha z=eJ@JRzPFb+zh~Vyb9f;yw2k?-hgt_v1Ir|*UWQquN%UitErFs+Vf;+kCy* z&j%N1{Puj+PbxIOs_#iCuLvmLzR)^RzRE%EAHh}Zg7S5Sa$EzAQ+XbS=2LBsMm$tQ zwYwPAp?uXxXQ)nl@g_zg7-~b~(0wH^9C4TijiVjZ|1mgW1C~I2XdIe1<*e(JcU`DY zt$F39nk7N?x(JO&byB}t2LVu>=3q8{JC3*5FAR-E`Ikl%RI9mAu0x@GL!dU5w`%JJ z<-7n_pnP>+eXWG%_!Km5^`rS!AIdci+pr!h(F0od8o$>f@?HY<+YvEPU#i_^jDYg%iVLuy+C)O_xdMu2A@;VLmq5gtl&s}-w zF+3ij*aGEU57V&_-q0KuM=7YEBhbCdS@lpJx@SAowvOxhqdk=SSXfX!RBv714J)7= zm6O(9KPXo{uDjwmhCq*ja7@NpyoONdv8i>Wy6-^~Snz=6Ky5C`#&aI3;79hAgYGx% z4}@|)LF`1VO?1N}wuj;+wqHRLXl~vis#Z%CaS#dkiDPG={I!;gaBL7RqC5LLpc+Q7 zUkCa=Fq+sDJMb+#fqxB?TQ^?9GzAHn#A{r1Fo;%VX#T*Wo^-Jtcl60dShYw8_n-T7i3 zR&x9S64?F_M^T*pUc|Y?>%=b*&9)cv;v4pN6SeN&#U!>{!5Q<}f1P-i_&)I{er0l$hshk9LL5Ns0X@g9B7T66*nR^g*iIydz#aA3 z?+tIPV*hP?g?#MqK{ae(--UROIE8qQ=s;XZSWSG0VYrS^j*UVewtZm3e0%5k{rzftj(@kmUfTXezzTU<&k|4G!?uw-`P zY_9&DZB@hK5?k5A5))%%BEphm65?%?PIN>-nmT2g+ySg}nT?m3cL(`aUAPxnf-1QmBPH3#kTla0lKBylZ*S;qr5DE2Z zgW6jQ<)l88kH)Ld9h6g3Xk7Z%G8nZG1=T{=kHu*yw=Ou9xvhPTS-C1d)u0X3b}D8; zb2}W$QDa_+4tNVX=MR0)stxt0&rZByACs=pctknGL;V!Rcxap!!l83RP!bi<9U8l6 zuGGJBu|eZh8z-Q4UP3o$JUTZ6s<(1gy*!{ApUl*eI20PY4f^aw*Q=g8pgC2$%2f)Z z6WXI4G;TL&je9=Lj^2{`c~Wy9oPEPeUlIYwWWTv z-js{#t9ewdwSIL?ai|}S>m)QTo!f&2(0E#5F;sKaNd2#d{k(G9f+^6w2ca>lPvxb@ zhOVs()mQl{FRcaj7YyZ~do&lSx7NOniROPP^t_=lZiB8-o0`W&s1CZf2rk0DMl>$f zsVUS>9cUhILhDZXC}-ua>y%G8oS?^st`pU%Dh@&OrMhTNMdOJ^UuayqUhOLSApq)M zW7r4Hd3~sE>Q8Mc&Vuey-L$4OUmCyGgK|;+%2C&--!51L)k3u?2(_tov>$U20Oima z>O+rHtw*had{7(eOSMotSFsuKnbBT@VeEIq1}uQu*Lvs$<)tiZ0|hOVI&nnUZLD7r%TYOK0ObEr0Mp*vJhMU6-EJq@ad z*1gtCKWJPVp*~eRwXpyivmP@#KNu~bxebEqt9ojzZ{Z-6&q}DqJ)rYCJ_MKH2^*Bp z0qDGFEojW5G1P(PS-ENax@H)Z)9p-ZUv*4|=1;lqg4S;Y)Q;++>vjJLEQaQ63RF|I zt!s7NB4|!@t=iI@oQBr0YN_LQuo9X})npY0X2wvW@~Z(o|Ez@CIR!uLgK`-S-Fp+0 zGNX>GkAj)|s)Nq!nDW~OwW~F&JXDwFm;>E=1cjiS7eeFFJZgLgq4^mA^{u&`4~=CL z4nga*cP6b5jY;cVF&_D$enjo9gYxTxU?@*L4%P2qXpQJONPX18Y;3?HXpO6;8i(e1 zG4ew7QNC56@n{}3hgz3W7zE9a`sfO+C*>Ckjal0lq1;YFwbVT6IY~L+hH})Hi$l*n zdK{=GWpNCuf%;Q^TK5tP-FpBUrye(PP^~pi9n&~&{ybJf{f~ue zqPj1DYIy^7aSj^KHmF^#Z_T&nQ`c$y%14jo%@_==r_<1SI0|=Yy=XjYS9z$;8qYi^ zKV2ID&4aE}&9zq5M<_J^t)R8z3C*9zpqzBSt`pTkk3+S$2C9>eD+k@DK6^m*(VQNF z?oq#P(Dgd6cGaKy@Ww@qhvq`LErsf3VG3qIYb-Z3MzyEMzwXhT?1S!`4CP?YE0t}v z6^!nv2Cad3q@gsFLrG}8g(4PutZQ60tVbDWPE>#Kg3fE+RS!KsFTiamAI*{0pX#(3 z?V#~!-c%c{yNXcGi=gXt&l#xA{7_r%u@H5kF{)3^pXNj~o=s3aRd2PeaURD&_(T1x z57kkRfqu~Zsz0qCU8lUpAP76)0bSD=x?a~RpH5J}s=w+Qiqhx+<*LVn#(xsJS8b~{ z>o6alsEz?BiCVbv8>*SsU{k2y1vrm$cpY9)`{7Vdip8Myr!``~UdL}@T4szRDn~s> zOL1teMM1UF8rAwztplKYHBQyD6jnlW<_MirEf&EB&DU6H?n0pa)xYXD5R;&BX>Hbr z#+wgnTVv6DDu*8EhBq+?RiJiPVlXtuI?&@reQHcv>l&Bx(K&rD(pu73f=~mh%Vi8f z8n#2{H3u5Ea!^~!(;3QNYe;P>H?14xwE*7mgT|*dw*zWpBQzhHXGIGqp>vm@oYc-u zoQ7(k=Nzr067Yi7OA)ATJuaGK9yCw!&{&nT=5I2TkIt*V>d^RfJQ-JE?@Q;mLF-3j z(pc5M`mTuD&|IkQqIR_Al#{OAfCErFdW>n@%2nG3q55@)FLb?Xq`ntHbEa{qrUjvE zv_{q<5EGzt`*8}Y-#(1NGR%kOO5fL1L*=bn+w;%Ewyx9I24V&JVhePQa?`!TpxS8s z*Kin`>nqT83!(W@U%^njd!hVP3$>@Vm4n)J!GX*eLF|tt=pN0L_H~_V+!~t0olw7; zi&5B(3Q)evLG!AZ4c9RrYD34=j`FGt<Z ze$96fl&kJj`|3;W9>DC({i(!a&~=(`wRr|=Q{&kL)v_}5xVi@ArRy~o<&}a?xPY;! zitT4ZFjVV#=RFhr?G3C z?GXm$XdwW)M{TNRsnBCVl-D%OgdQuap}wv_HBeh~q4lNu?1K8zJsOK@v>!UJdQVB)j(s`G3BUhbghnSjkUx^sGp;lh{Kqi8Fj73sO$BZ^MgI7o@}dcwXJ!(ju1)loHQ0Z*t+=Ga1j;K2 zn&Zw;&E`XEOMUN!@>Y(ea2qEu8G%T|Py|3@IfKj49IG#_3*DzNX>ARHYJCBEUfBiZ zpgQV$)%yn2wxas(gZ|LmyFj(mIF*~)(sN-7biE#zWuSU!p7eM<2h~pJ4eT_NjdC0Z)lO}= zLj9^%8uuuuHVdI~==n+Kl(Vke4BeyJXzgx=+EO0&aXPcDwkl&VHee&tpuRP~dQRAl zLZ}a|5!Fm{thROSDnwy9)VAjOEga3<*PKVf52~*RjzjZ50IHwnRc)t2wO@}}7>P#E zST&AYm;&XdHeZA0Qrq(p3e`^guVV+)r^co}wEYU2L3JO6aEyU+9|j#)P8zc_G#>S% z9DGm}C7|m^Lu+C`)VBIoEptGRexo9KbwiK6H)lSJc{3ekGt<&O|KIp!M4Yx=!O%?bL?yRv)FI=PJ$59xTBvC|A`> z?GD9!w1sLu3YTG^b`L_&OM1+V!dOH>YiTYPVgOD-^Q+ouj#8lYuIJ`~P+Q7F`8Y$5 zQ_YX+JrT-9H4TE+(n;u^ZRh~4H;q@<1;c{IqWqML>Z^0wj(~Dh4OJ^QXbyE=_oU$U z%&2R0kJg0hR~@&Z_H^wyXgz9M)P`tImA}@6_NyWiIIaU8Y=n2iGYO8iNp0m(nU@$a~n(t9ieI`TW)i^3a z`PGKjjqX?Nm4oJ7`x^0Rn)lK>BgYJ19daP}S+S&}|bP1Z{*-(2C(A+AQ2GBSrK;u+jF3>u^f<(kZ z*XVlPr~5TNwVjFpXgr&sTI!tYG8|oDUmwcJUIWeRwe0*oG{reh{F1nz_&#wX@j7t{ zF&oaXeE@4PACoWyr4fsl@to_vz&t#_L<~kh{E&^mi-Zr?z2wN_lh_p1QH%5C5Qc*2 z3MVweb^OXTXNX@B4-h{fZX&)-TuRK2H`zXjwOD}37>Y88Lq3Gy7q0yh^YK0=VF+UJ zi2QuHCKvg+5SyU}YIE)t;;Y1P6oMbxqcMKuzPE`E-1jE&Ypy*=d`R3(yg^(>%z-qv z4`B<|VIihq7~+v1q43^ZxWXi9dVZJ!`O=TScItoB}x42`y0@uc89yilQ4_&h8|OQt73!coDsrwE0?`r8aUS1s?kMpS z;&$R4;_Jk>h;LvAan2FIyLJ>s45#MpmCzydd7=;0d z#Mk5$#5HbcgL z{F(NS6L%vo$5s$$V=Pio488FZzUTaBn1y#S2FZxVQ`!mU8V|HX15`p)&XqzBbU|y> z#U=d6`QyY##NEVu#I?lB#OJhkg184Sb8IE?HHOjZa?Pj2uEZSV=R|CTDyYsiWzY-mXp8!|f}c5mlK6zU zmw2DJo_LkGi1;gQo??3+^0U8+I2RKz2qn=MdGHhGA7c*Q!*~ot44%MazqZ1r~I7v$YL zEZ)Z}$Sc%6z$?tbD<~{J(B0iZ)10?=`Ul$ZP%keBqO@ykw+hv@Xnw#>n7SA9%L6c- z-^nj$V=^+*2c>0MQZt5I29HS3N*kJP&EjCN#V5kg(#Se^XjhAM=+N}EBx_b$#t=(# z%E*-TjG=>5hGe;PYc;e-x1l{kvMfV~XN*isPRX=nrVZ(zp3*WiW5n>J6wA?LI9xuYWzsn_!(tt2O-r{X zrl;h^lrlImB{?}IS&pYArO^9dPHTWAe0WBBdR9u(0L73zA}Pzo5|)x;>6Vq%V?b8c z&@Qc7jT$w|CClE+1ut3+%WBoF6&>xbN;oT8x}&6RxL{Z zc5HuiJ@m|fXUaQ=3a*ELo$KL)``n({pXXfr-P6t9l5f4;!+v%>{O$L^gv$ASbru%n z{Qn~7#eeg97+^nt3;i;u`PW0oyu4Pjov-$5-DunOG1z8*Ir6{j$o^U4!Rvo%ht_#Q zIP&ScG%5zV_sTmihh|0cN)}stp5dJ*|MjbJenEK+zcSUKQa8*!P&~ z{ziZ+F^8F(XLlSP@lYG=@kNYy;3u+=^2AlTrGjz9k`F^Z)w}w z(xz<}*VbKJJ5X-j(bBP9$4+fJI{5BfQOcq8Qw|O_IrGcydoBN1*6D{s2cG}PC$GI` z-XaD66ev)jK!E}U3jD4DKmTm3`=L$Mf4l1+-4Bh|Kwf&kE%i@FOvAX%kn`}1! z=n;-cLBsOVUzP+m=8zEM@7yu~f=zR1DBIJlN_aJ_GP$nR+T4%^W-zPRm611H-! z+cXZFn&$5dwd?+>f^9YpdpzTa_pd%f_6*G%@O#gqcjZsV_ZY47O7`zGLcQ`o{r}!L z+c3P=4&{4v+aAx&JNo5)^h(?PudV4pbM^Zr?BVJU+Nz&UDU~N$vV>>d)FQ4i5SIP@J2Pu<4@++|(A9hOp+e#2#DIC7=6m%qZfo9sKWtW%+4fGgVGq3#V=5Ml zFY`&6er95q&D*vH$D3-m=AR3GJkGd0Jb9(tn|;l;?^~Up_+^wiFt`4rgNyeuDRV8i zaz}J9W&3qbJDu%jVyia2a@)((w8?(9;jA}&P0_F1<0^*)nk!BE`-fZ#HaCvPE||C{ z(A53Oqj2mE9~0QP`KZoqdz$N88hp_5d?!;Ura_C^H4{vDme1J9-K}Q!!vkNx5SM87 zKQ_OLZ+4Qo{>qcdJJu(grf1)K+WFNK^FvqPz|V%I7{}s0x5qdpoBds?2i<-+(HtpS zamTRQRKD>VT={{z5 zwV6MBAJx~?e(~Ozb#wcg(ZMY`-|rD&&W=0!;jWU?pxZtaIgt@$D>cDs^P}J!l<9>r^K0Ner1M64NNeTnwF3GY=PBm z@VZoQ&@S}3yZ`!)b4ljTz^FN|-$*iho=*s$7oBJp++CBf;^hQWwsgAhm5^}LeeSM{ z*X~A}qlfw&h!2c2HNHBr+VxgnQ)kSSNzb2)G<9lB?KggTUvu!llI(#iBF)KZH*Z-B z#h5iiKJfc#a=b~K`Nh~K3ldDNH|l5H@<}u=?fj%@?F;?PGo7cEIJ-T;^!)s@6<+0I zjsJlcti86znrp{yw13Dg(oAt4x%s4bocZy`fHnh$SIA&|vDGxJ zv-9ZC%~9r5ox+d&v^vI&o6>9b{v)ww!G~+T+mwhmQ|7K2TaWo?x2m!yct(ub-YDzQ zRY3{HTK&h=D~}|aHVp?(d-`IcId8q#q0l3-X3Vg9N5}58nnhJTi{HZhKWk=w_d zdU00~&+uqd`ZK?PQ^g}pqmUYxPi*!u85@GXdFVudiGKI&1^;KmP5s2EW)rJNnbl7W zGe>*+ns2ghTs?j;*kpe);KjF>g_@vqOJ8wu4>q0eFT3TD;csqVYWMlemxGNdy>4)k zrQW8VW0y59TYH*|rS3gk?QS2_`_+s4_k9v(T24GubNKOK^X8b6ugy$~Fej?ktNqSN zf8)`r+qxH*`I(80=T`i9Sg2{@y0>w)+PzKhA{SQ;Y94Ponh_Po-;6aKu6F#ORC`n9fw(o5&eAuXLpQTax*I9?ICh+*} z^Jh1vm^}xNKb?{pZN6!D!q>G(ys7+rZ05fBUMBR;J9m$r9$;R!Ot|dvS-hz=`cy{h zBRx%*tGhow_Oy@bGhjjQcE>C798t-2JM*XEl>X zclvyI^&~T+N~_7o;*-tpud;Gx74b5yi@Ma9I4QvNS$O?u=er@MQtjYbyI+hjZCiKz ztlpU3rt$l;wt4!6nhxbgesc3fxbX}~s=gyJ(uB?m4vG6V+E@>EpIv@@toh)4wW%K_ zSk1GI4}Y@MEy>hNyinpf3)kIkiw9)gPc;Kv+Asd8qt#>_Ywi*;G0}`1SnufL2b0VM zr|PLrrBckaEoBQOhNPN#p2gPgUD4kh`Ksj2s4Kx{=BWO=-`gH;te4jpsWv#u)ERs4 z<;BfnO;|hEcf0I0CfZu++osn&Ox%J`OI2^~V=fL^ye6=9jQOhQz2;uW`kKqD(_ZX0 zpr1)N_hpU3<@=h%5#if!?~FAmrE@!7^Nln0PS0Gnb9k!RxaFnT$4r0IVEFa=o16!j zl=+tZ8{Y6Y-`+gAad-7F^Iet2{bLtLn!Vemk9>P3?{j^+ott>lY8DiKJbKXlX!F*R zPSHE8v1YRSzyt64#hY8H6-(Z=SdF#(<@yoc!N%dl#_(OAhMUEc8t0T<9c=bia&_IZ zFv2_>v9R2T@$sfjaP8iA)>_TD_szJk_QaS8RjR#Sa#DoZalG=3?_%T4>ZeM_Z`u=S z8g{6&@$!yHv&VdR$A5E-X}xh>cJ7@xKQRq*7Bh1() zcb}gX7irdBuHx{*nP~Iz89w1h|B4l`5Z+8rFWIM$?>+t<2I zwPZ7TVBo4RD_TvwSG8J=dEe{QFy6B7LSN(mVc(ezX2qM~v4a$uzxLwI;1JW?HNz+3{V+3Q$I&0M zLi(8R>d(x+GSbfsD)!Txr*4Fps6&rhrq+!#HzIa#OC1?)-kkCJG4F&Rb8ugBapyPt zm_w7FU($S6h?%*+=7fD6!cFGaQ+k!T*U!Aa?5(i@$K%a=8(wgDCD>}FXAk`LRAQoe zXUd$9c5O*Aal@9sKIeyIH=&i%8=sa#i!P2BNXg6aEtkK= z2Lr!zT5mP}U*9_r(>=*JpB>a;ceNDr%${2PfA})RT%9;4_}~PqNzF-pJ$PlZaXPzi z)c)K3OtXygjVnErU@q3JTO%kg$t+m$eW@oil1z=&&atbjTg{i3-qh_XEs@S#wpSC`|RsiqX#FLA!~~bYEvcN zoSjgs-k9uuw(D$rli0;)R4V=FE$p~?d%w~qCcM&jquvTO&RZHBEOM}W?n`yY&pUd- z*W5d~|GCDieR6j=j9k1f!`(zWUT)wP?`sMbN__5IQ1ATXI$>s~W8-g}SMJRP=V`|nGvNB-F*gRbF$Zo;T-&afn<+K! zS?deD@9i1a{6^HYK+}4Eu*aRwq3oZzrH?%oVNTxax9sByZA@sLl@EYkF?4yQX}Qvb}bxouk7s+G3=SfCNO^PTQ{HYY(k#wI=03! z4|Dg)r%LbL+so9v@$Q-$=X;xJZWE$?>IRwby0&Q0F@g8*nIC+dxW1POJL%hS)=rZf z^wj2t?~G}e`|*J<3i%GKlDmIHvTw|6i`gAo+wXGy4yIVMqR%aJ>}l2&AF$?FFko^>;0(~v%+&lR?&UsbGO}gdAN|JP44)1 zr%!YWkKuFI%GtLXN17Mc_UZooPhrL~=+fiMlI!JG`go#Kz-R4q1Bx76-Kdv)?$~lB z{fp}nruw0~b+#N1H5oTYZhfOeZ?iXetx+ApEiD?aHP4pATu_j zL$rCjNVU^1`$w9qN493qnbXy@OutyR(-1GS{ASM@#Ty2gnVa3dIP-C^d8&HQ)q6cU zo2{|4T~bGNHnAJ}F0UEuXF|>eKmFFRF!M%7&kly0z_fuWdz*xq@@KOAip=pe5yv_#>9{Arw0|Z2Mo4ssdA$FX zu#}-5WgTcrR1DWSn*j51;f1GdC_xioY?bgE@L>Ywi9wdz*I~ANt-orl)aT7W+k)o_^-SqK+}| z_A8QmsKq-0QRiFdURvpJVks2K^@m@<47`*MpB34gRbmA*-EidD5yjG2NIkO?i+`2aU)Wk7e z%&IjVKiV}v!1Uc|IoiKLPqW&^p<3D!SM%78Ns&9hDsRp^9d>KnqLgu2@p0$lM=Ioo zhV{ESU~B!{=?+605B#ou?q^3&AKGxObMBML{vV#H+#|R0rHWmv)v2Ak{JSN$uSa}( zz@@_zCzG$X%RT&cnL`(LH_BbGxwIwVs!Q&$h~uY9Kg;Jxhpk<_xj(2itj@8gzvc5p z{inQRCq$SVZ|rt{XERX)#=ZW^jq%Y zpl7`$KtI>lpE>EL{bj%z%7UH-mWK+UpEOkh{d`G3$EXT=#;TunJOnjBKLV}=wV@7J zpf1z{J+5s4dfckVvW=k$=+SdCXbvr)CAdHAV`P7Fa$DSC=7$)kO}(R*%2@jM!{$p17l$v zjE4y@5hg)4Ook~i6{f*-m;p0k7R-h@Fc;>*e0UfZz(QCAi(v^Yg=MfDR=`Sl1RjM| zuo~9DT381;upTzRM%V&n74#`Zfbe$SP6TsrS+qVIO||IQ_-cYb|ET6GO{%JN%ev_A+|1qy1 z=YO&P)=xtI$76?1HfvAo&;OR|jvgHFvVS&za7;DYUe`@GFMFRBx)JA9#AbD_H_^ib zUiLoo^iY9Uvd!in{k{JEvwm}xZ43Ls-=KNs-$vy<<0wL(|KH!B8QVJlso$X4_l@>M zc>b-}zgB#(;G{l!81)A)?Hf-$g!+fR_|GflMfTu*@?SMvJ(Da3#i0z;0X>f_4TYg0 zlmdMU&~wZx&;s=AQv0_oDDS!kG=o-92x>qj=mhnkK2(5`puN-tT0(Uw3KnpN63_@V zr=FSChN{pN+JW{@bI`qiP0(7Ep)<(e*3bYnXH#&2hd|FGIzoA<3$>szXbt(HHMPFh zvuhzgHJ7x|+KN$Ysa^hPefgucrGxy^`tt4PYid3DBmbnO;<5WC|CIyjuKA^pG?#DE zTYgG&uCsC?-K4cNx939|D_24}uz==L4y1|ZQ=F>PoCf5tG}Am9 zr}(9nUH=}GrH|H^Z(3I|DgTOJK1)yeBuyy1lxEUi`RESPzb;5m#i#r$XWHA!lk}IS(nL8gcois6pg@5F|E&uAS)X@} zO~7A$-VMobP<#ISkKQTruiO8dztJ&yqxDblze;~*;XkfNXuh`w^LNnW!Dj^x#{IV1 zfAp+iKy3aPeRe2#KJ?dlKJwnfc@R2_3@p|x@kZjGbSNorF4*2RrVBYILb4~d9 zJ#bE(eXZZNy1GU6{p~h;tj1nbkG`0VLs@Y zg~sVVU(Xw2AOIAT{PYL;*$J{iG3j}Mo{6}FF7mQfg313tv?a;tRfktkvHmMcN?%6H8rO*TRm(6bBqBE6-H zp0|vJ@t`_=)RI1rfY#KUZ9qCXgZx&ETDK%@0L4EVnu7(FfaVa7LIg+~y=7@^2Ka)s zluns27}|l>m7mJVYS7#@p*SQ#7RU$1ARV=4Pf%_Zx3sSc%A@pE?zA`5F8_Oj;)w)3 zqiPNEy)(!!=_iepld>RRra&4f7Of>sfkI2a zd6WOGzzdX5J-1TK%CXAQMfs};>R%0{%}kgEiLeHuK>NNWjDk>D2^~OVoIvv^2gASu z+8gSxvi$V`X{Xo+f#y}d!l4^T_ZX1RT0?qDw-k^Tib1++uP9%0p$AC2O(5MjgXWjU z+QZ6C0w_N118G(d%EKHe1tVc141iUz0H%X-YXx7BhT}lvl{f9rj-WgRLNsWeE|3k1 zF9+I#;&2C@Ga5e>q}xWYYu%l)A83!xf>cmU%V9mJPWz%EXl?`A2a2UANFQl696Uk! zk}sn{`&2#*1Zlnwq_^Tz{L)J6YcBcb2+FlI(w-;=6F_}bmkqH{7;A5PX0;v+8`a(CjZNT;_-oIpghX&*smp227~gWz2FA&S@Ef^ zGE@iUQ*ByLb@Hz{1VUGEfnhKNyrC~t0coJP85i|b5{iU zFHPpaT9Eeg-#`G!ca7CNI`}@G`<2vg7%!&)7l}h z3>4#ZP%foW5{w1)Zv!q+7c8Ltq&Z4~6;^}%&|K14e%Ax7J02XNHAu@Mpg2F`IZA(Zysp>1VCF*{EBlGEClt{yiGv+Bm#PY zt{Fu^K6M1`3u!MsQ$aCIfQLbAN~cXAjb?*1^aaf)EBzItGq~H(`qF0{c!P4L`Lr+O zQ!wcIF&XUZD96%LInjBkwW@>G7z7@mb1D|n!GQeI8r|U$&|K291V~3G=np|K8is-w z$e$G;&9&BIkj~1>0GI)aVG2lRWH0IfL; zr13=1_^F`tPGhyc)|0;pptX`f>x}{V9uCq|=fx*_pn1FfmH6zBljJIcFam6adLyXJERX)zy^`^_LtZ%2_|qJW(K@#5(Ppt)v& z_L%lyD!7C8pY)V}ic|inUt`Du%_|@5z9mo|4MRXaO8ZPuOwvCY>(ep&Q6Yjg5yKkQUl6@}nhaZ)Ahc|C*rO$``G%3Z%PyQ9fpX=2i|?!fc2I z`56X^YXoF~f#t9Wd0FY1GWA#D4w1)8@pXASU_#QeqIyiiceFJ7v9s#vMbH~6OkTzQ9Yj_`g zK>M&a{oJ82NZ0<@X7CKvYr zi_lM6^~H9APbj|$qbO@n9)L-38BTyS9za_#G>3uE6N-YaIlG}My5m^EEI8 zJRp&}hM;}i4cihX!d8wy0$od{K{$1fV~1e9;40nluu$0LN?_{>^AVGtoNlEFqEx)IG&7s0viQBl)uBygg%sq!9!32&QSLP+=YeUPu)7~MR*5> zLL2H%VGqF+%GDKMI6`?kHWSWK9tXW(F*s896*du4DK~{^%IV-oxjh`EJVW)M|NqHaPzqL3 z{}POc_3$1nfgsup6oO2SpN7hm3tj~Z6ev)jK!E~(tiZqjPN!X7$8ay#yn9yrsK49q zlI?yM*a8I#6ev)jK!E}U3KaOY0uTN^GyIuf_fgFs{XWx4H`ct$LBa1c|2n_VeDH5B z^OD3rf7b8Ho-dX^I1LD9LK6){OKWI7#j1AsyB^{}?TX6{G?&(wFIrpu)HV$?w=~c^ znpG!50R@6wny0uiRvTCzOPlpfSp!_Q@P* z1d3OBXzW58T1URg$}jt#(REC9x@PsZl~uPId|(OK*H@kV)0*-}^GK7$wz36V0;H*| z)|Wr(Cm-v9bkiP61?el@)h`_q!5!p}=I;i5p%~PGqTmnOcbZ4@O4A(B{Hj--_KtLy z-@1O*htV(<=7Q#zkEKEW4THik1JtIN6lZ@>tP4POnp^W|e}#kgr24CVyvU(Xhyled?X+)wK{+S`(o6jmqxO#UHlXztvwYhCnonc4faVurpgC36IO#2a zvSALC1NF~=aiAQ_A88_g2H335J@waG@yVAI%=$9bqAeO>fZ^PKo`&) zifJ&&f8|{MX$|E#3zSFoQ%vf+26lmT(Rrf&Dc}OSZjAsh&={@X6OkKJFzBTeRm&L_>SxU>)C^CplETEiZbw6Mo7-!+%^t8`JmRM!4c z9(1170PRo3w;nWBb7_8mkgt_Me#!q1p!3cd)MiKX_6E(ZIpn+6)|wipT&b_x(?M%0 zKdO^%Dr?^=&LGfSTE~F;tDoZ39#mP^Htktyp9ZQ^S$=E)#h`K0D;QKSlt1~V*fl;I zq=C+NyT7X_Yd+;h=hRr+v0~7=ib?Yaf^xeEw3g;kpPe?VTxbsMmuwrFyCtYizR7Nc z86b@BxpZsPQ@gjbQ}WGr!h#AT_DXAm-;BqNN@qg)B?1Y0i9p%ArTBn2ldlA zsPj#EQT{ZKe9*pW4ue4TidFln9%zj8)cV>_ZlLuPkF<3MFWc^f#w_m@=H0=x!?ivK{0AB?K|lzKePv=quS)7^3Vm;&w%=A z9}Nc0DgCs@BxnPQuN&BPs83ljC{E4M7o@xV*0|=Ny(%A_VK%65OR#|Ss|s3Aerf&D zAm3`jFi_ni;0YS5Hs#9#J8Wov*9Cu&{|TUL zlJ?MW2m|S6{0&17Wy!|&7(c1^`)(HteEA$`YJyyK)$Jd7HEv>mD6<4d}@=`KJx*^ z>JM{mWoe*zyMpFWTWtt{QXpToo^q^wt4;G|gEZ2(na~49LMmv?5>T$kfppb)t*@L* zWA&Au@<($j4vifJb`9(rR-|sbjgDBwGy&v?*3md=*b1^>Ei{A#=nqgXwB0W{V4&;|K(7M`J8Y8Q{sQgHGjZuGD`K6e&SGI%dw63((xuO2^Kz?Z*p*gjV z%H<&e;$RbKjU3Q^SKQLH9fUzCP`t|bdeFI~eW-X8ZxfI&npgg1gRTQA%m2lon6>BD zfZ|e{@+^JIg7T*Mb3pxOg7$#s^aWi@62S_}uVU4{stcPz@q2>i)1FjaU(h;QGXYA1 z{M0;l9~7e-c!Bb(^>hwt?hOH zU&W<0rDZQry>!yC2WXGUFU1rE@+%vvgZyg&DWKeT0QK7f{XxE}PW{whddPRJHv*J5 z`CAK=9Y=%K ze+Z_6=GS`iLGvqaoo`l9n-54&og0&2J4kbBsQBei7-)Y;$KfymlxLMSR=Hjaick86 zg8Y`Q(m?a}fhC|^Il(rNUNd1SB!K+Z+VWN7vS9}(r_z5K^asVJwI_pg4~Gn>0P3sp z(r`J*7mZWywXV*`&7kv2J_wD~yzUSWs!Ia-rg?Henk(+IARU!6#heP7Unpk#+BuY^ zul9)63$H{P6-E1FmH%LnaEPmsUTTd{Qp^|i+!jk6&GH0Lmo zzSF=DRM!yt!z@s&>XQJPNB$@eic!AEcf~2K6uY#P#)?J$C|>oA1LaBW>aRR0CsvSe zierY&Y99xHa;W~&P`WGyW1~D)_3~5w)h`~jRtm_MlAt^m0{K@Q27=C!7)XG1Ag$9t zb;|JqP^{BIerXRYmMtJXBB29R1Fa|jJwU!_56BnAsI?Wdz0IAn*3f$LM`-TW&;#sy zz6)i|Ar0h5ECB?8fvb+3f7ljK8@}PV9QHi+D)v)&9}dA{>h8iWSO?7u^S%pZ!4YQC z=Op!Guy0@oVPC@b!@dAJU=8#JcW4JKp$%i}V85krdF&_H@35c22{;T(sJjRH-ZuwY zFs3$?gF-NiKBuT3i#>)-$L`0*VPAxuuoi-$C%A$Ov}LRXTLC_$|26Cd_yAsqrPSSr z@8Aho4=ou}2g*ZXm`%Ua)Q`iyi5-kRfQ`rQg^kV=Kew^eK-09xlR3cmoQ-4ceZDP0*S#^`H_IgL$-1gtuuMihTu}h}{oQ z!d#9wV3)!Q%HddV=mZ@Y-vC<$zM#zs`vZIlr{EYAhVN;61~x-|##DylFrT(b@DBCE zu&-j1u+L%-V4s3{ls96R!Ai;zSRd#N4WTMrq`n09CVT~_;Y}z4KhU-twm<{ctO8E( zFm2iJF7?B)2eHZ6J=k3A(=ea%ChT(P1HRA&8bLMqlJ=6=TktiUf#dK9^*3QFG-R!+ zPy&9UZ2?S%_h`$+zJ^V~K8Jl7`wToxc{6qeM1mi5g~m`FzM{Pp_D8q`XW=b)l=@rn z7&KzNYETmH(6$h!!27giVGm(bvCm^)!S03yl(%44LKO6ZZqNiCg0E>WjlB(*;T*gT ztEm4Gwn1aos}7~$E^Slc1ob1Zhq3*!FJNEAJ_}pn5r_tV=nk!*Db#>V^euxui@k#V z7~X-^9N&iR(1f)fg3@r0wrTJI^&_#bV+UYg#2&=%fydxcSV&zAHUN4+YiI^F;WBN` z*mKx#u;<}jSi|v8umhU1UJWP%_i39BA5uRGdjy+?-HUw=yA4*sB98lF1Hld2Ky#=C zS7 mP@q780tNn#3Kaak;NLN81wI!jP@q780tE^bDDb~i;Qs;6OmejV literal 0 HcmV?d00001 diff --git a/resources/BoxTetra2.med b/resources/BoxTetra2.med new file mode 100644 index 0000000000000000000000000000000000000000..3d21669bffcc008180a47881c5764ffdfbb9886c GIT binary patch literal 28196 zcmeI3e{5Vw5y$sjH$I$e>y!Ycm~y2~NZT}=?ZdHMn%<>8`<&Qk$3KD_+}H_C>);EG z(jb}25)rCYQ}KtTg6JQq6seJjP*n0qD;ZQN7BUhDR0XL>{24@q@<*s5p-Kpl%6#7L zIKF4!1=>sEi+8Q>&F;?5?Ck8m_jcz!|1jIpc~|ZIwT2X}Hr1xi+z}M)&qppbe5bHX zsm?WwSe7~NGNZ1X&7}K-tklOPbKKP{>CScb4UTnXa+!g2PiD|$a)V>N>9j%3MQwB& z(xHJ&#>gnuw@Mjm@=iP8sF0&mRY3tK&JWxCDarT$$mY+9qh^0@^H(K*b@O#y<3g3cLB=(4WimQ9)$sPISU!5;!^x;DHBVowiuZLN!WYA*<071k48*Y zu>Rr!*O(1rB!dmFZO1mc#<7vXjx4=+q>L=}cFwI2b-vY~e$H?8oUbdG^Ewu$r2P7T zn0ZSUe+P4Y9juf4q$ciGIP*+;d7Y)Bj>4{X*R-~oaBCzLS}Rua_W9LoBQ8V2S6gE% zkFC`Gu){u?f@{d%4_D9qFucxnW|z)6>wRIXaxCpk4=%YMuKWxf|BP+RRp{sZR?KD>qd9I>b-8t-5>iMT@+e_8Qn{S@BcBA2)q%I-FHm9*qh!J zlyR@7>C=MW33QE7cRQu9zlNUf4~AFFUpn@fnF_M% zui@O@pp3uv2-IKCy?7x1!jVJCc;~Uh@$`3}k2fXbjm_=J*7igq-jqmekGE`ZYHMmX z&1c7}OiZS^UatJQcQ+-2GIV6*wexeWmR}!q;(xpcya&7oya$SUVCmkN^U&$fuiUiM zd1x9zNjz`;-uPDRjaQBj{g{WWq`afHWQ^S#-x17L!kugMIUJT@YG+!kq_(+|wB-dy zITKAhRM_SZ58Co+M{TLHe)Za5e5~zLpDnZ>e^6g4j^?k8TOCKMUh&+x;O=vF7kh19 zm!2DY+ ze5ag#$VRI8M8e27I`;+T4e=*~bCEGAlU#3-AGsiZ#J$7jTME){OQfxpb85`#hb;p> z=c{|2HpGtp=Dp5kd&r#wfk3fXw1zaoaswH=`!6OJD3u?5_g?*&t(kX>rRV6^-TBwC zmJyB;7dJ1fE~wx-xUi|9>b59EjD2@!7mqQ^*Ny$2W%)ed5m{euz34oK*UNt%wQhpX zyQy-m7x}8mDlYTyjrPa5yBAlAKg#T0ukNOKpe<#)DnjQ6EWa2vMdXUr7t^ys{Vz73 z-}T=5U8%SB?XB(kYc|z5^wX|q_IEjZyHo3e58kq_yrYoLAGBqqrZ|80*B@>fy(gE- z{^g~OZ@seD(whCLa@D__}$8UR`{ZBZ$aoz;!8iLnM1sl73s1>&pcF^`^Vs zZpL>1R9_G_Ga3hzcv`@SnUR3%d-GhMt!G?~0{2KXQ}M#|y~1MrrBFFgDnDWU!@2H& z71ni5)pz6(2j>YGv5h`1siBiI0W9 zf6$o?&w2jq$|Z5zZ@|@=E|mi&6Jhux}l%E{LeX`vp;bH&i0)7i4_nB;Ji2{n0Nn*g{)$qX&6xq;KTu13QtyHlMr)ya&7oya&7o zya)b&50q=atNYOKemCa!yYP8tf6{l}+Rti#=dE^z{qgb>c8)5=AK_TR$$^#ESgBaS zW&6q|*a!Of(CUc~6+5q&>z?qcTPope;8M;q;Fsf`ux)Wk_AlnjnOS-Uj`x(-%vt5n z+0tz|57q51-KG+`X z-y%*!e1?0O#&Y;(h1d-B+MagAb%^H>!y)EHe1|--9O6F8Bi2J%V@$+{(9N}uUhL3# zP;d{GGWs?CL>qKtCwc5d7X8$r8##FFzbIn^bsBS0UGT7xKGlBY(5H66#~+N9abb(b z&7jod3uH95g)HrKtmx3V7B--VI3ewk#ix|9NByrpr*GA6`&oXlPAKy$em`@#7x#PvE*HiBS?*Z=t?*Z=t?}3}|fpA=| z;klJ57G@r;#WKk4Igt0yiO{?bZ|mM+^r(aUm!?;y3V!8xj8@u0uQrKCvk%@g8`@XwZQSW%P4> z)22oMA3fY#(MdbXs*~6jWtBk}F)QR~ul6HH9eQblz1RdF9hA`tr7!IVUD!b#?XeLG zpRv#$zWPCZ!uV)MpHSj)=%*b%!e;XH4Uas1!6Q$9_!xVsC$DYMM;q$Y9&~6wZ3w78`9rLX`DD~Kauh9h`3aF=l^pV$b;~U06J$Y153vz;8f=Wm}4p#}YHq)$ literal 0 HcmV?d00001 diff --git a/resources/CMakeLists.txt b/resources/CMakeLists.txt index 6d185dc17..e891af65c 100644 --- a/resources/CMakeLists.txt +++ b/resources/CMakeLists.txt @@ -134,51 +134,51 @@ SET(MED_RESOURCES_FILES # TimeStamps.med # zzzz121b.med # zzzz121b_without_tr6.med - # UnitTetra.med - # GeneralTetra.med - # NudgedSimpler.med - # NudgedTetra.med - # CornerTetra.med - # SimpleIncludedTetra.med - # SimpleIncludingTetra.med + UnitTetra.med + GeneralTetra.med + NudgedSimpler.med + NudgedTetra.med + CornerTetra.med + SimpleIncludedTetra.med + SimpleIncludingTetra.med Test2D.med Test2Dpoly.med Test3D.med Test3Dpoly.med - #UnitTetraDegenT.med - # DegenEdgeXY.med - # DegenFaceXYZ.med - # DegenTranslatedInPlane.med - # ComplexIncludedTetra.med - # ComplexIncludingTetra.med - # HalfstripOnly.med - # HalfstripOnly2.med - #SimpleHalfstripOnly.med - #GenTetra1.med - #GenTetra2.med - #TrickyTetra1.med + UnitTetraDegenT.med + DegenEdgeXY.med + DegenFaceXYZ.med + DegenTranslatedInPlane.med + ComplexIncludedTetra.med + ComplexIncludingTetra.med + HalfstripOnly.med + HalfstripOnly2.med + SimpleHalfstripOnly.med + GenTetra1.med + GenTetra2.med + TrickyTetra1.med LargeUnitTetra.med # LargeInconsistentTetra.med - # DividedUnitTetraSimpler.med - # DividedUnitTetra.med - # NudgedDividedUnitTetra.med - # NudgedDividedUnitTetraSimpler.med - # DividedGenTetra1.med - # DividedGenTetra2.med - # Box1.med - # Box2.med - # Box3.med - # Box1Moderate.med - # Box2Moderate.med - # BoxModSmall1.med - # BoxModSmall2.med - # BoxEvenSmaller1.med - # TinyBox.med - # BoxHexa1.med - # BoxHexa2.med - # MovedHexaBox1.med - # MovedHexaBox2.med - # BoxTetra2.med + DividedUnitTetraSimpler.med + DividedUnitTetra.med + NudgedDividedUnitTetra.med + NudgedDividedUnitTetraSimpler.med + DividedGenTetra1.med + DividedGenTetra2.med + Box1.med + Box2.med + Box3.med + Box1Moderate.med + Box2Moderate.med + BoxModSmall1.med + BoxModSmall2.med + BoxEvenSmaller1.med + TinyBox.med + BoxHexa1.med + BoxHexa2.med + MovedHexaBox1.med + MovedHexaBox2.med + BoxTetra2.med square1.med # square1_split # square1_split1.med @@ -203,9 +203,9 @@ SET(MED_RESOURCES_FILES # blow5_ascii_pd_displacement # blow5_ascii_pd_thickness #test_2D.sauve - #allPillesTest.sauv - #BDC-714.sauv - #portico_3subs.sauv + allPillesTest.sauv + BDC-714.sauv + portico_3subs.sauv agitateur.med ) @@ -225,6 +225,10 @@ SET(MED_RESOURCES_FILES # ) # ENDIF(MED_ENABLE_GUI) +FOREACH(resfile ${MED_RESOURCES_FILES}) + CONFIGURE_FILE("${CMAKE_CURRENT_SOURCE_DIR}/${resfile}" "${CMAKE_CURRENT_BINARY_DIR}/${resfile}" COPYONLY) +ENDFOREACH(resfile) + INSTALL(FILES ${MED_RESOURCES_FILES} DESTINATION ${MEDTOOL_INSTALL_RES_DATA}) #MESSAGE(STATUS "Creation of ${CMAKE_CURRENT_BINARY_DIR}/MEDCatalog.xml") diff --git a/resources/ComplexIncludedTetra.med b/resources/ComplexIncludedTetra.med new file mode 100644 index 0000000000000000000000000000000000000000..09a59279c8d46a660367fa2bd316265ccf5a5ae6 GIT binary patch literal 40140 zcmeI437k%4`^TRdvl}yG3?cE@*BCpaoHNW|EHlH52r>43O=T%dQfZ-5ZwnQnqOv3< zsiSy9rqZe;OH?Eod$#faeV=oVnDGAJ|6ld$eNLb6+-JG(>%Okvb>HVa=YAedX_3^d zKt%Bf4_jD}C&&}y$>%$;|D18Q?3w2ua}?uL46rYGMmjrPoZ}YBiRr#wo%d>Iq;uXa zQc|0xWprzvoSNJ&v2}8WCpk5vTbsm04`#NzmHvihX1nBMkD_$7bxT666Kn?@rlM?_ zK;Ho(jz4XSJ4^e~b?oztwck0-KHpFKV++~)6SU8kC1OO*+WwgHZR=HKpo?s7p4`iSOwL2j%Xy4%lz2RY^4q z@6XA3=mqbGneP0xKYpBb%)TE6`0^U(%vbkbH_Ev`raR@?M*oF7?q`X-_y0&wyibh) zefo}+ifs~G`i^6yS&(nf>sr1wC9%W9i9#7Mu zgNF_3Jt}2L&q25J>fI}|_s9|5xk%1Qvm0mL+~@5#DWwg~JpbC?9A7>33DbW`dRJg8 zB-RUeta0w|wB^++^Tu>vco);ljXlyc%D3z4)jstG-!Xc%6|P==Ztgy4=-}RUyqUN3 z_9oua*XynCtywRjcCCau^}V&?ORH}OGh z`4efr>m_`h`2YA7@GIa~z^}mX6gc;@G4GCZi~X*O@5H6vhi<2#HlDNo&&EC}&i?sx zJ{w>7edtjhuG$7UrG4b_*q@E_`SKO-yw|wT-@I)|?e0XorCX=9X&;w!O5Tb3m9@{g zb+Sv_$8(+1KDx60x^sj07~8XoobNxea{f~5X#TlzzGmkh{W)_Z=z{N!Yn=O>{ps~L zug-mMZ0+3F+%dh_$^X51#=&o`|NHiuCALXv-8#8jr{s2NMDNZC_kAm`W_jNqQrlnp zee1tZQGdjK1^f#574R$ISKvQa0r#`9S1$6*Pik;sUoZ9E_+mM`4WF<5?~Ui{_vd^z zzVLhF#opZlJQ4Oft@D96SZ_b<1_TEB&g9d#&+HWR8ou`;kH_@X$?vJ-`;i0nobRv$ zoc3LuGR$e`nDNZ5Y-_=F-mhXEYgC_h=kjL1`wZEnov*>g)}Yn>7-_iN+(N8$$PKSX4|(LY{+ z2?q^dCld}>cKNG)JgbnI+9Rf8!`FT?X0Gur9?@^R@!|}2)){5KIX-T{%2n3UN?oSC zcVvrI?bWCEzwrKUt2W!xLq;FA9}XK&J*lnOQRBmGZLN7WRIVBJa*0mA7_D@8&zFa6zdR@Ga+SyA z`Fdu2klB34n7D<_!|XCN(k|CVn^@hnx}cdoJNflPr#D+$%7sjPtm(%_rn+C_-DfN> zIeBl}2FDHGVNQTKVCBf^MUz6!1E03(5%WZ($zjmZ7}KoVbw$bpxkl^yQxdb115Mli zU~?|^n}(U`4?aF5wrPIz)O@vU5@q`A&x$b@V8|P5KK6aSDO=)lj9Z^taN7rWert5x z9PT|b;#K3?yI%Szc<(Pp<1s~s_x|J)tHQz0H$3p&P9y2_{gu|O-(lVVT!E0;D>oVY z4=?GvX6g?{zmgButlD9}G39iht8bk9p|S7&>Tl<4Ic$XeH0_6zy-pcj$8Sh`yI!EV zXZE0P<{BYp#|q^Od$)v}i=Thui%l`#8JoJVJ{nLw*I4q#nUY;guCUf@Ti>bkw4KJn z&)(@OVXcphGBc#Bb7;}D^ zCm)&nmh$o1jE}$Vli!@WdHb5@8wHq8PVX|e+BT0_uf?>56{3R7Z}(j{e^ZOK#>&se z&bw;+Pe%COgsm&%ju@ruz8ltk<_=@_yfIC39z0}(dP9bcERbtV?salfwYEX#b9eM8 z5pj38`DN0Sal^MqnPI(OoKgGzGsa`{-x!zld5}5kxuF|Acs;)vbfi-Hy%DRd#sNhZ z_Wta+@%;QjagU^LH!}JxSza~qfKld5c;DLf!pzYtJB@f|c%V6QLW@>=2cI-vnLP3C zRaK4~1vW=NdU9ldxoF94C)?KxF^fb;hlT%q!st6<^s4erel|W`I;2i|Vt|>@c+>nJ zryMqhr!M$v>dnWDG2RhzL+TuM#(SXYo0lGEUV4f;$APAAdXx(=ebcms$E?VS&AEm~ z>GvDUoHq8h)d(MW(y-_8amQYm(>}^6W1X_F`NgO`6>5fgOnd$XnX`)5__ka~n5kK^ zyku~sSx?g-GBCz`@%>iyrv${BS93Wa&YY^@Kgm<%tY04A7WltDKFECQc-+uzyCLe8 z69&^QGVqk)yDexujmE8)a}B21Bz++DU4F@9(*4Bt0cJbjL@r{s;6Cs|oY^=+Elc)b>VRt9&>l6N2h;XD%U7~Z>LW(N_xz|e!1B(mCqQ9e;Ah( zupz&>bVR>_0kdPwfN4=vS`I5@&b*_+k*QNd%*S^JjPCJvh&d)FKEBioA?Bl3eO&WM z%^0&ujq6IimZ1CJ(%jjvOr>RG zV$IO-mfK7B(zqYCZb{pKKr`;!<*gSE2{bcHHu^E6U_Nv7JY(y~{rSw+6?;BbX};!T z&G_nXWyhLXoA&I8A01^DxFKuCsw1K1*2#w!tX$?XI~GdKSkNQR%zh~)5N~F*ChQctNx2=_9kp7LPJ>6DBXZ?Rc=CCa{{d!-Cf@bmu8|yB$#I^aCoKCp z>9NOc?Ogl0nSIiJp7uFz_{MkhT&oll@|k1K^Eb!xJ!hZXWfk{L)DNxFjPFY8jII=2 zb{b#tAncIi%zx+k={aE7zxUd1>^%LsX8scEooAWwyR5BxN)L%WWbw2f5_`(>g{64@ zuchsGj-jWeJlSb1_C2uI8ngL)@Z^Wq3a01sU6#-N-~-ko-}PgbJ^ynpBfqh%TkT^; zf#GHD7&KvrvF*KAR#$uFQ>$h7S05X{D8~rg)41F-`+v3`c{Z?6?!>Q+hbOImG@|TL z>)=DTC)~Y!nX&dr;y8$8uzkF{XPoHGrV9SxwiE+da0{N$Z)C-@e=<=(x4lGtIkk$S>BW=h}Ud`%;cI z>9)+8>DPQ?J+p5?)p_X$t&UA=#lJJ|s8MI%+P4$GI&92({GrM3%sXg&G5e*3Va1LZ z+tv&_HFnH8-R!=Y6}XPkY;n?YB0n=fW?oospvouKwUh zqtg>*61J2+W|gkga?IApel+$x|KjS^n|E42ZQGbKXU<-uMuj2|q<#L2@!tKfyk!pG zYRp=9xJUV^$E+td)bEhcVwZLM%+dp_X4yt+r#C>+s@(ipt~EJ&aKfCSxmMjxeNwFUxz>m#zZye#Y%n%@_Js`Hw#z#8UFMu2OO9FP zLxz4|s9COcEc?kv=PcW4?0w_#TMvGnW6ix|RKJwzA6WNq&mLK@!~v^J^^hYKhi$N` zSUc(e{MNub(v8lvul&%R%-74b@NXiv%-r$w0m6JRr(xvcy)~jj#=5a z-u>{d_YYXfr5{_={-Ydg=f2>No3+n2?u=^}6uT+MVy%HOo=@;j$MJ&reVbh$s;)cx&a~qQnZmdXE!)&Cb_`R|(bv`1-N9xLWWdwQEF4QA&74X;DV7 ztWZv{EWq+XMNvtxL{ddm71hMGqPnOd;zdnSOVk#;9@iDT7SA>AqLH{xTrV06 zCb}sSMHA6fB#C6vOf(lQgnoGE;TNH;L~Fsfc&UOf0eNd~D_E7u5SgOAxIuIf9mS2J zljtn?w^=uduA-aB65T})(Npviy+t3|{l`;H@hqmVU8`}e!4Y4&;MD|08nSIgrW zneGXU^55*YPi4l~_k(lo56;v6Umg35`2YA7@GIa~z^{N`f&Wqk z@_zsPko~wlt42mLpMKrnW?uN;^hNT)RvVw!{og@D5hxvZu8ZyXpCby$qJ#1^J&b?n#2i`HV?l>1{`y=b3*<^}!lBYfTEeNX-~efW3! z@~SfHUj;=4!8#l{Vl9n#c-GqDM5rhyN{VYmpa^!#(kjV4YiJck8Noe)wK&%1SYuuee5rTfPjlbBUF7~k(E9j3H z=nMPwgKpZ@8Sx@bVm#6;Za;uXY)-)Mq{t`7L=`i*bsf(B?9 zF7O?_@Rw_xBSw734*d}WZC&51Yv1)9pYff(i3vaPo&H_li3vZ^0AKK(7|_Kl@ST{s zhoB*G5eL4b4LYEyn?HOmtcDcsQc1M@I z<7Q1S`vTXmgmLpn>ygy&%F@I3R%?-Gzf-8~JC&JS$M@dqUiX*(ISLGxEBuy;wGh@X z_`MO!fUGO9-jF6(hhU8$Ua&?{SMafq-yjVWtYffVfsLM`jbP1!?6(plM1o-5qnu!U z0-Mz5cAF$vr-&AOj%4kWj}(Ih>m}6ZzRO1w>aotjlc1(x{Q`d*3D#7G3f4F93H$v8 zeKrx{0^cZ!BTKNBg1!1ek1OB02Rr|_}EJn5IiCp zh~A=-L!wHa4fstv^rK&|z;|L{EvT}<2FLV~Du}14ps&87svrh*$KTF^bFANxvnWB_ zT?Fm9&PP*p=NfB4r3Lnyi6DV*Ji2*wT`!24TTnGYZ1EyRR1}OE@`?7>iaz2dL3?z> ze{{w!x_JefVux6h1^%KJKIRkni!S{Ix|5gIg4pRRK%h;cpilf5B(TAHRw+UJl*CDF z#6i8DVwhlza~++reWPe6@CkpL3vxS9Fb=O1=)%}UFUA&ms3Oo2e_A-l=srY{_v*r} zKS2BR;T4SKT7o{@F`lG-;wC4=iS88yF^~(cV>?t(r-q6-@+Vn6|`%ePSdQG{iodVguVH1i7VeG(=0Z$Di5)Kd74^h>cjuGvk1B=uuW+ zhujk<{nQlQM1}|wijVL7O1KZ3|;=ne3(m%0s%r$J2PsTBJ(E^R}XRt6uJyAw5K6;9d0-YHv*a{G| z9WH2(4-EzWplKh$_@W(oPZ#Kq2G}MK^fgE@9=L{9*rN?LsK;2~TyueK^l)t_XrD1b z|7cW4&<4A01?{lI7>IX{Y0G$|59-n$-?2}kIlWBCyq4v===D=dp*@BEVjFF# z=i0>%{ZO}qpgq12FSfak{b2(A8Q;`tAj%7Tqc42p9Dbv>+m?3d+C-2abio&n>5tq) zut*cMB}Z*#Xzy|uG7qKyJUEA%nPv68#U2;QQ)WK)QcCz69KpSkL zn>!y0Yrm(!AMDf5a6$k0NY*q2RSS858L*g|tNxUzjRfqBXo#-k8bo{Z(+l47+3C4P+I4YKjC&gW& zp6DxH5O;_~fv)XEH}SJrD<+HjVxzc8>=eTUxjrTy7O#uDMWFhBQRO}{PBatD)hzL= zV6Lqby98tN6Y+~+{68Xwi#J4bagX?3-tHBji62FfY`>@SX3;~O5MPPGB1OC@o)S|;f>2=p{CY*5U#2iug%{sqQ~iZWgzR=fr98mbgx|5f6%mB3yM3sT?oXh;Kx;xKWG} zGexTSQcM+l#Uc@*b`wtS!oT?y@GJ1wD{!U#uIR5<%7zrXNFarVz&`TL8!|E9uq>T-Q= zTrJ49_`C1X$J*z)PL==fZ?agUvA1-0=dzaP{{F)El6nd2VY;@Q7xhbQ%!3?Wu>WS; z0x{8|9d4V&{oc}?wlzG3fA{n`|K<7i&mnn zU>){q5g}ODqt3OWv0y!~lNctjhpi~VT4c0fopgw(DOe+JDp)s86xgJX?xK&VEU;Hk zu-=L-{Ocv+1h%M8|I~|jpiURjPS6K&us)2xjRfnkv}qu)K@8NzFJhw($0G&yv5iml z1#OZ9?ODGh4(inv{RJAZeoCFL0$c2}wp>CK73k4k@cdw1_9{VpbOz$UekG@*9_Nb* z`a&!Ga^oqaee|X5D9VZE0?qJ`zFf^Y?k1>1zhwk==yQO`6vRtC@}L@xUY^dNVxy|&Ii zu?`feg1);5v_Kc?V+Y&lhhNmAf6n1Q`2sZO8h+6}zra`GMC z$JnBuXo0WT8ZH=@TzAJO`-25>uutx>>H3V8jBDcKI`LEDPi?_iYav1eZK)S2@HI-{ zBXa=%N(+3zuXF8S?*=hM&=)23+X+*UTjD9`l+4>6f_991Y|$U%in@$@{6JUYrw(}_ zr{snCT~~0PTrubK33AL>Kr3wEds#t$_=UdcM195?*V6>H(TNy_3gSiIeu6k^I^{r> z%>;JPjQ)wMguwqn!W8s@|Ky$X*oYR?bz`Fr$Ml6i*rv}AK^rurU(VqZ{S^|}MFZ@( zdXLaP@i9(P1-U0Su44g0F!164KzGydGpe1dIjdp-G9i16N z#7sZ#3sU#oRDJe<`^r|L^nP(HW(3AXNtB9y0 zOhLbc1?RH_V;VvQ{TCM3I3?|eiiU!`&}NXx5a`fO6c8x_-4jJ~K^?E44Rs3&`V1HN zO1#v=&!Qqm6cg78#tVKV2<%i8_<)@n0vp`~eKOAIgM6V^72y^11+-6g_NhaC`XNU4 zu|*861$OWW%>jEo#0?@|Fi!CcdyH%RGR1Jgye3B4P`|ri46u)8#EU=FixgR+tRSbv zLLS2eaYDL?6RCo>%t351e()QxhbGwPnE3DwpRr30=?hz?Acs&|5Hr4_H#rCv*kLR& zSFnxNjA88H6X$CS`a!FBfjvVI7q(pgsEbeJqqm?w=P2FrNe-Bc83KRl?|MNF&=mXF zXerPeAGroGB1qsLb}I<-Q%}$bb+M0T9N#F=nAov}rsRmY=mUGiLN3~fx&l9&3eIOb zCD+Lb+A%NbpE1TU*VxB?W#Rfx9QcPnTqoA90>7D)!vygr3t}S==s+E8&_6ygR=MVC zgWl932J|O>{2*51M>p&Y5%_F6kb5*QCGa6u(3UAQoo&p1M7>M_r;!!_y>YoZ`-d`S@3h`xdt>7O!PaE?CFg7f%FY;B$WVk%n* zeD5f*|a)3_whQ|0#J=SbrL4EdvMH#{TC8xyd-ZMDn8s}(> zR>ZOl1R6MNAYWMU05nwE%HQ``?SN#0TOP zu~IxM9u_x?-lBtOE*gre;!aUY6cjPC6DSUA{|E84*dP{)x5X^+h!`pQh>oI#ND$S; zB;ggYqM+>L6GycFqu4C6#Uk;Jm@TG@TSQ-Rqeu~r#I@osQCbudv9c2+j%t6m_(psv z77I%}Cmt2IihiP#Xeq7})x~5{M#PCivJ)(hX@8H{B0dsJ#40gIJSJ`v{Y7WdN?b2$ zh`U8uQCP&uPKfwb`#*_q#mC}R@vfLFmWqFfQDT7TB3g^aB3|4hMvHQyh$t+3q2jpq z_lm7zqj*ou6R(NK#S}46+$7ovL(~-aiZS9UQABpa#0l;HEItvd#q(mBctYGK%8Nmw zt4I~5s3pdVq9R;2PO97|wh8~{SHQ1;Ux9x|0srqH{~fjbb^QwX74R$ISHQ2pm94=4 E0GCp&`Tzg` literal 0 HcmV?d00001 diff --git a/resources/ComplexIncludingTetra.med b/resources/ComplexIncludingTetra.med new file mode 100644 index 0000000000000000000000000000000000000000..e6aedf5aacd26b9f6ba406b9ea379b16ed6ee708 GIT binary patch literal 41636 zcmeI52Y3|K-iJ2@5<(I}2)%6pse+Wy%S?dKNeBtO1qdZ{Lg+0hA|j$9C@LxfmWu^M zR8VvyAgHK-iU`Q1S&-g4;rqS2GYSE@-}8Ow72Ubdn=>=#oPYV9GqW>i7w?138aD|E zDiq{k3-ovRJHi}(?gP)CtJb!7;q}KHg;@=KJXMY~Yp1Jq+$<)#jeA$;o!UvW&f7)v z*d}oaJ(|YE#N9(Jm;5bze}9we1Gka&*Rxo*FIa42ouLQq+rgyeX1s#KS&9z_0ezV zJT;pgW!o8Vdn{OuRS=tfL_PZ}T`7UNt&`Wz74sUKO=T#;?s!d!eH;7qfr-Z3#QC4L zYh$4;cIbOEKUrdH^ zJSm`Kr*&l!7u(eAp0cY|QtfYD({`Kw)*Laj%oR`1zt3O$+9<1(oZ{}_o9A|``(b0x zCzJac^4<@x*YD4{Hg0}D%sS8Q$m}t9>ICzBac#TCQ#OfCxaNL%^UuJfyq>n)Q@vw; zy?%ermr#HYfZYn=N#ZCUk7?9|4c-tXyU=N{}B zOdQ$A89lO}(^=D5rAD1ia#% zl%Gs_*tK$AoV&h=+llv&R{^gAUIn}g+@Qd<&&IqvE-rXO6ZeT*y$?;Gqc%Qgz0byO zDSrL)=R6zV{C#K+AFkR4S*7R5;qW{g`?<%fvh`kLfBt6e!_f9`Y$9(Zd1nSYx^w{G65RZNf0F>T{ez`iEz_pPj&Wqp5$ zZFlSUt$(Jf-mJX}copy};8nn@z+Y1V``Os3@c7JMm*qPZJL+4#H@;HJ(~9S7?|Y-I zn5X=6o{ew*-uQW>Cx*pVX-DSc2=c7cGT_>>>8*%eA75Yh89#md%ua5n;eKCoI7~-W zPDhmcM-H^JzQgvh`lnlEpw-VZ<5*lyjkQtFdsnz+jpkGDUD@P@XUK+aY3}&FHRyDA ztyYV(e5cVi#`E<5d;P3#ikcbfaQtA+@eKFT_3zR1toL85>V|?>i9MxnIDOp-yllSf zx>J9P10RRyyF2&fP)0Y-8_(}qZX5@X?!V3#ON|@mOLb30x(dFVx?S@{zryhtzRiFC za6M{z0!~+PC+KmT<@K8T(-b&vSgtZW<$3G4fay8@!8+b-Wb%P_)#dV>-{bP!`VU*j zr>*0QuGMLe7w~U?!WjEY=_-M*oiwU5iDYjL{CU?8_acfFBZ^}k&HY^!VLjoS8?Baa z+i^1bit*qht6MC7Dbv_6V$PuX-v^kng+?yxk~g zmG0YOT#a8l$nZa63>~~V@A@A5+xY=Vmu27Oy&D z@2U_pwf4KCCJYTX=X}2<(k$*{z8}%-$f_xU`uXa}x>NdvnL#B}k9<=0fa{@V3FCs2 zFB(hD17BzQzV2!S-oP?jAO3{n=KkV{nVz} z4s*cF*)3n%f7;mDa%_j-EeDMs#@20_+U~4TCt~fa!re7~bLOPi626f15dg=?9PrI+<3x=okF+JCFb~DO- z{pYme2(!+In_svu=m@Jmztz8BmU!*B|M%rQU4|K-oiILjE=%qI*eT=WSFL-6bv$F} zW}*xDoZ-3N&s+Dci^fab22w8@U(>91rm!Fy~Br^+JzFgUm7WTTOZ5)FtD5gI97@-+Rs|JM{71MNeKa z76pvXeCdZPhHFw}(9>UBF=l^Jz;WogD@I_MX^jg`$TVIrob&nT7MwA@tohX1_N^}% zp@S|g8nxuI(K^SJN$)nzG~!-8HF?R!E5^^EkLM}2>6G!!@+$TH4_z?sd%gMWUfa$X z13&um`5nbB8P%Tbcrj(#ezD zynl3A;R3TR8GZZYT{dLTC8JyF;F3Fy05kny)eZxm4>fZKZ%TjjNVwU)-A`}D_snOS z%hN~th6b3=uJ75f>cQ-0sph?(-j^@JJd}TZiyGhOHXm+zIx%5LsJVY*i5}Hw2AKD> zY&oD<-#~NTsU}l;PYW|2`mATeXN?f^{bChrj-KIbR-65Bj>C5co6`@xZZm@r9S4ouKj&FFZMNG&$a!PO*8$>BXOrq z|FU__XLe>hSY&vZStIK0&O0v#n1#L?u{!_YOU6qT*0n9uJHPo{qr|Q&djy!dUtbeF zDJ{&Lb!^+L9y#-xYo@e%V$Ubx=Hi#SHf)m;Y%XdRxNlbWFtbq3FNfWGJj{%1_F;wd zlLF1iRmG<+TIFvhEm`+og%jD#?wP}Lv<(a}R}^0TNw!D*%rfD*hGb6;F`tfUlk?T@ z!p&z>dpx&qNKSKZ`OwQp{e!IQCeSW39G)%9v9EP}-tc_hAMjk~3udggj$Q7L>8t0g z&wt+x&*y1&^MQQZ^@*Cz-1tJwJEyMrS)a4M)_EVxu78HZvt`(JJmoRV?rxX6I%9b~ z&-vX}9LEjM@j0XRicdy;_`I*#r|#~!ho8J?yiufca^2m>j7azOyVc0!{yg96`r7?D zecZLr{lR_FbA2DS?i*W;&7KDkm;3f`=76i4`@!RktAV>i zrt482Qy%flD4p=a_v<(Lm{khJxEyQF8E1RuuRg2KUZd$713tRgWuI$gX7L(k&r8PE zYyhblJQH90Y>6|j6`N=0UpDb`!_{lT%=H-ujWLI!Yql$W-YAwbV$%z{-Zr?_ zpL}!jyRIf1zusq*KWH=#UACgxl3$F7_m&QLzW&dy8I?0CZZCbs^85%S0z zFJ#2+b#33$b9BLhXIzUOU--k&X_s6D8t2`+d#_K%+<`Is(--(;H2!qdLu;FBoF}(0 zxqP#4hV$_GFH#~d7$Zy1JhJArPe#A;%Q{Tkv(R{cNug~%pPq9yS~BhV)(`t+d~wEX zp8T=O0NXJ+wQXI%+3|6OID z?@FUUg-Wf)?QxhdmgzD)cj#5)-hb~udiBZ6#)Ch_-cxaAls0&m4A(SYySejjNb9{r=-mG%P>;zS#x2QU&fM&{T^+0{yU@Fl4njO ztv=w22yOY~)cTpO7cMt^{kuzk8IPH}UcS`gs8KWf<9!e9IOpndHF;WW>~7bNUC%Zj zHgT7+XVa7qYj6KVdUP(G-tmlUPQ$!Yx0XBL8rHc?wU=Jo(y5mm!IGrlYdUYZ!!Y}Yc@GVkl>#y@Dtq-5h`*C-s<_cI^lw! zCh%`%_!$CUtL7K1mGJd!AyHWHBfg@dm?$osB2ttPEW4BvESs>rQbv>&EFqN_6~x`5 zqNpV95tT(1p&t)As);DU5<(5Zt8*>EQcoRGSJV^rMFYVN%oNe0p=cx;ix|;FG!@N6 zbJ0Sy6#SBvZ#nrTEMJncs=#j^S+zx`?i#o9Hfjh$PWd^b);A zvgjlFihiQMND%|XKru)R7DL2PF-!~>sbYjk6C=eaF)Ky#}%M?9^Kqpt#uzd8pSc=HTSLW`Sp7N_i?a$ma``8`TcZkoaa3E1FlUU zcPq_B=k?pz>kpmdvK(98bkE@``ptg3SguIV{pMQxgR}JiPseT}-alRiyb5>~@G9U{ z;2)|$*6;trcv!t=ONim=+Wt4@H~%-4Gd+(C?)L%j_y5<3r(M=3u06x^(4Y0+70CMg zZaa>wV}6(yottd8`pw5WUXZl$4(J_ow-mqr`EQPS1~0PO=2@lZ==$F{bhWNm``J8e zp91Psix)?2cANRUP-;uFO2&OUFT2|8Hivl$*0#vE+h=F|3fLw@y+{lI8$puZ&jQqLpkPCW|2f2|8S~vx{pb_KY-Mk8T74RzHRp76u z!0me8<@10o>+^2CmY(y}^I7v(f8PD0^$2WrXX%l3tsrfvC;qH|Z?#tN315h6<9Wrq zR^XQ6*FXQwwSwP!U%y?~z-b;LwPn30tn1=APyOw@O&`J?riI`GcY!=Bl{eW&1g zYDkQUa=)+I_1^NYqX3U9V?;B-qg*%Zn6-pXg5MmmM!_0GYr*}G^@$Wg8`cJzik{Yf zAC=tx`wG?;QUz-cBL!;+$)cg44<8@gD_MIOEm*S{A!uJin1b~VY*rRc1Zxm?iLs)q zV10r$iQa;*w%D&P@Qp3n-R|RWRFGYsoDthy;;cbQJU>X8N%26trh8r-@*_hflZCVu(Q3ae}z18I>rIr@x?WOKKNoTngZP$3C7VWMhe<84w0gbKy%`qESRg^L=(|l5MO(NU*ab} z<|Jd=P*f9)1)5F}CB+y)9*lhn!Pua0Q^7nNC@P7u0v}v=*u-9{Aa?X27RG|P1QkT0 zxJNKP9OG}aXe<~9#soc?Lu~~*p(V%63u0rOl0;`g?93IkV1CsTH3j*hCAJa-<12y28#apZ1yij*K>I_{zpyAwoDK^<*5Mpx!%1A+Zv0^9f_ z2IdP|j26)X`}G7q=tEn^YN)7U9n%IsDFQ8$MR&pZYki^@P>-)p0>Aj9{}>Bw(+|Dz z$#I-uOwbWOj0<_D3+fw+mLfqgHc5iH${eUJXv3Iv7357{az#t}(FcFD!53qKzcvD2 zV+CW0pLBuN=$IsIKjpN~+(K*Ype;5y#*R~?>!`R@LG37jc@rQldj}*)?v_qHr0w35<71&2x>^~;fh-buoVuvW<>Aw$7@vvsC-uCRPnta7UC_X z^IRt(Dv6c+XA5!;)VUKXpA+whAH`idw^`*S5ibhpc%91EL|1XY$f0wiR6Z|EQCi2$ z)lik(GaeUh#Sp<9nI?i%cT(lMg892ZY!N?+GOD|*@&hqP6x8v@Dqk1OC&vBXB3i5z zF9_z`S0b0%mK9gDUr6Opm5k3R@qid37K)!mnCd@NSx#l9O6~y@#9UEW=hmxyLonZy z#A(q`tP+caOO#i;t12goBHACOvZt6Xc8jrMu{a=dt3E>|*Ixzg^TGACiVb3^XeWk? z2SqQ@NSqPl1oP`f@sYS&{k~E8gqS2g5$#1$o!_H!hUhICi}%DY;w8b{^--HTDnAv= zM5;I|lEp)!xu~c%%-L|2FROe~Y!$_H+(G4DaZbE1W{Mb5Np%NRz9P7OeRVufMC1_BjRasugIqJ+f{xc z(nJ@rUo;gL#3Q1$$g6hV&8vV{fq$L?ciQjJ{&^$h&Dg7eR{^gAUIn}g+&%@e{$6J> zFAX=@ZuNVeG2=WP!c=&FuXDY4|IOd)Wc@cdc4W8fcLvk_Jr=Y6?jntsXKmEetF#35r4DocFh}mJhRT5Igfb+qyA2q zH-EGqMOY})cBkm!iPrn~PEvq%Th@S`B1o|A%(`eb!MbO%$R(mhyl5*}?;R=n3D#X3 z2-@}#*y8>-N^q`_h!d<~W)pP;_9(H}T~Hq&hFW01fWU5JF;G+%tc_L>=_1L3eyo#X zuf4!0YsfhTxnY~N=5#UM+AnP#lN&ZU-$>jgh@V_L3*w+2yNQCeTKe`EXjD?*8!cFm zt|^FuPb$fEHA+XIlH#xDEO#FQA0b`slXv>iLyA#F?n_UGy$r(-QOYUgiTQ~({R$l}Q+7d5q@r!;*!dDa)bp&mwD=UZEj`1lfvI%T4Hnb-<>hVt= z#9K~KN1SMrE~ul=U4lHY%Xroo`0HR%O(k)n7uw*L_{fzy@<)5>=!;$E7kT2dgg_f~ zEFesQJsmxdij6pFl%m#a>;j z#5d#0oMjFXC$VFnIn4b4KlpG8;z3*L@k2en(V>rZjBe=2J(XCoN53|rnILbpMi=to z8sT`9AWrNS7POWu7%uQn zznX%!*hDAtFCnOBF42Zu=o=`;2*w{@^cyQeM6jSO)Die#pBT{*|4E{NAU|wki}6ho zv}arxivbq2VNRjVID!7`+c6R|G13NG%u&XaGDc8OAM6qDV1XWu1$Ag!PB;a*qdDi$ zElFStztqtlO_*0<0y`rF=gE(GVCzJC;*1v)1$Ah_n3WN|1$pKb_=FmQ`7=pik9?AZ zQ!s}NL9W=K4!sx$Y>`h}6BX9H|7V?S|%pvN~jWO&gh?BhVgI!ZJ6kSAL z!S&Hjgo?@5enD%WF|I0ThbGy@XhBmi7T zIMDz<*dqoYt`=5_J;oxBKoepI+S87)O%me;F*1L03i{U;*qbcqON`t@$QiAZtbJmH zNrLv|g%7R;@<4my#x`0I4`WJ+{={Kg`-}zG2QbczZ7fo8PLEwI&6)DyJ?V}dS> z9sP(ehoCPpBnk4Mjy~-KV@!-mf_Z@sXh<8{q9gm|t$ofh{-p%IT8KV^^XNE6pbhs> z#*+BZi?rNqAgmP z0^8#Sx?qp_!gWDgVkcKqpe6B?7o6wXr_CgR&m>V;5Fhri&%7c>d;qc1p1E615HB&3 zFXyn=K-?{`fnHI9*vJ<@*kzyV2e<~X$y^~XascA4C8$T+L_ypW1!KybAtv@2(-grR z8Z2T(pkOTU-%ensq(BE^q&;!qoB2ZC*k@eHgSkXqB|$FKr3>mJ1wP4zexZVR(1CHs zZ+F3U#GJqux~B``z#jRO64=LHx*#7kXTC8XhY4&Et10lw_;(N;1-7t{uCzfvVi+hG z59&CFt$2|vhz~!+fEKovv@a;Q*I=vz6w@Kwk;w|x_ zcwF2shKXJxQ8ZJ#I^u3oOazHIoxd!i#ZggB>=Ci5+pKbpSRq~#bHp?;Mhq9dMLW@4 zZR?7PqPWN{a*8&pyCNEjV+NDqH?WxTf8jhiU-74kt&i!d(lE|>xoLjDZ)jt z$W&b;aa>dv`$QX^|4QXL@s4;!JRzowabkq%BRYtdYFl62BO*l}5hAXtuCX{FYKZ+J zUgy77`LTFc{7XC~9u(t6n&>M!iUw+1S(FeFB2+j;jOtE`n&JoXjaV;MiY4M{F+)rc zBSk;aNf_!|MU)hIMK0kZnyBuSs3m?BTg3*kO1vtb5f6!pVwC7FItx>MtBO)0p9mAa zqN(ami`wER@vZnoyeD20^TkXtNsJaLqKk-D-)iD6kze?UW;%aH)Db_6ZQ@h$zIa_c zD;^e;#Q@P&G*rJRQCbub{-U|gpA~h*0kK1TCq5Ia#T()|F-xS2fufseq`uWf8BtJV z6D@T9oTw*$5j(|pu~B>=mWs;adGUytA_j@>qOtnd5M@OnkzKUZ`SYT_I4Hgs^Tg-k zL$ORO5RZy`#bD7x#ETg9uPMrj!XiMl()kObfjA^~i7$jpyeSrm7sO-YJ~2cj3Ge1r jz^i~)fxn6Z-rvjqRYt-ahgSiw0$v5Y3V0Rx;}!TH&?c!6 literal 0 HcmV?d00001 diff --git a/resources/CornerTetra.med b/resources/CornerTetra.med new file mode 100644 index 0000000000000000000000000000000000000000..f006c1756c707fd6cfe584ffc5727a7a7a75addb GIT binary patch literal 25772 zcmeHPO>9(E6u#3AFqBaTMT5r0BaS8-7SnbRN}|(&c|&Wa)6y0gObxc7+C>q9B_eS_ zA~9fM3`rMAh|$;}i!LA`0TWGYbRmXBRxXT-CK4q|q6=Ktd+s@BI`4MgBc?M$XL@e( z?s@0_z5Bj%?$0^*&7A4VWFKmIszpJFgjHC@RI_0)nx`;*(8~iCqkD=i*_*6m$e|Lk+Fh;{o*1)@^v&8jfXoV|gGcjF7t&GYT6R{@St4@);Q@w_TJ4c}Ov@a?_&V&=ERte8knhcao@~@9 z4hUBVkTZl+tC0>7h3qhmX)ZHHI?`$B3f63#n8${rUkR|<94-HDLGxh-S0*!tWIim9 zUo}(x=0jW0xeui>2Yrvfz&S{>>4B2@u>2bM`r}v@Mxn^ui(f6ckXloAKFsm>#Vtp% zD>xs9j9fR+c;&h7IL(iJM2nSH;0^C3?D;>c5(bTs-hD?kiaqI93?I{&;SSKDd_(lg zgJDs0@s?PMeeH*xuTQ>E0OZp{Xa! z7Ywq8-h+xdz$d4vt!;4w1IoNlCoE&;E`_aCq zc@B563%x3}8*?k`hnX}QhsY}gz8ZeJh)YO_2=TonJnfu`PE5FUeM?T$sw{T;o-GuK_H%+*my8F@ep&z8BNMRobdAnsU@}#~a@Dvd#&MH;wAZZhud{+hp18AF}G-kLUJ<3Ic+FARq_` z0xJ;#+y5|$pNkct7;y1$GRXfBeG!X;l_CCzB6Kyo@$)~}=gZt7Tjm!!ILAwaWS-fE zWku#*%xcB?$-Fa(J=jQ;`%~&;`E?7X^4{F813s3*UT>MN2a%s73UZHRy=BHT$Y+Qm zS6^|Zdmx+)*H}?mvDh=L8gExRb$lK|u3NS7z6A(%?( z4a?J>J10nQ*j8QYTEfp+?rRD6)5haeo#D|CO1{Eg0(?Q>`(ghUQ%GZ<5RXU5Yta|X zfot`(Mqhqm;amxS6gTK5J>Q!=Xup_Wza?D|5CjAPK|l}?1pYq)wm)H-|4`IwD#)L3 zZXXsFe}V;CjaWZ_g8hvEcc$LeMhU;YwaIel%!x<;h{(qOQ?jH zCK6L$44OiCK!QmVi4;?U22F@1CTniv(w&f?>l0=+Pii) z`M#Z{O;$_7^Dc}b`l)yYy;M=%PLmla5DwSwKA6G$-CU)v_ zjT>!HB=Q|3dNg0M+GM=GsePN9bQ_}yw+Y;2Q+r)B3e?S*2+hL0xzTk2M=97!PD2ep zzyl6o_Q4xPsz4yV*vj;2q>rCqdKiIv@*>l>klsATw1e4Sepb#f%KvzQ5CzzaS5Yt1 zeMncl@G#y1`W^&0`Wn-KZ7YtIDt>htz}nB4e-!D8drXfKzvU4n0}la|A7h$+x9oeS zyODqRF4NV>cTRDi-Pr2-d8UVvfBG`hy~w{2U!wZlg^zyns+K{z|0kx$knXw3wBrZp z`kU!~Y}Elc2uw$jzjGneSCDQlWf~CI#B!#qHO8ueaq}Sngpi~8OK?6=*|cq44PxUZ z$RM4yG3-*U+7h324p!%Rsq^`1Ixj(;Q|elzG8pA-;@*_3kCB}$MUF$t#l*7;;K+O~ z@6)01q~?}g4*`bJX7XV@H0@k8 zo2qlN_wpyChVyu|N$E11&UfLvgA&a`tMi%A`GSn=VM69FuHmVP*F&GuYnh&}vexzL z^|3+csqN+(Eqe)b{SQJ&LE*#QcMwsmi$0^uC5RbrVK6!BE|?1ZQ`*%gm7G^QdEJ=p z8r?+JcKTJOWat*_+|&88+p(v!ZHMB~bCG&we5TIL-;34Osdo2&&Re>FKlUGh82Sj5 zXczZ2UV;blW>YfpYTnVHhUbNsj6DdnizZwcua@L?RVKWiQ+!o~mtNW3wSQk{*lF$T zaH0qHI8K$bF0?5W+Ef{GDnp@-PUZRyRiO}UAG)y^g3AF`;LI<%^f8wyzkcVPkxwqS zsQMM$tG!zUECLn*i+~pbse5DchXxi{idt(P5k>2?+<4i@K zJe%Kmrktf{e{KE7{P7#Td^aByD_!#q@biU$B7H?gYKuG%PgIFm;zX2R1W*m(MGzj> zL4Kb;!}cj@z;gvo0o_h_1A}XrPxa(qmFnK8Sy}I=@hN-AEsd&yS1g*(E9mz^N>0u+ z8eQJ~d$m)-c$2gD4sGM3x+2w&KB4_zX=CQ(N{l^ja?{H+53a0E=go$a5BTiP+iv)K zmZ^FmL~wo4_9A*T?ZvP4LiKdz%3c82KIj>8nWh_$vI4^+m)P3A{vQPDx(@_?So3qF z$}V7x+A1ffmkaxR&yXFi2W*Wsm?9O!k@)2r!4 z=a5lWo#M^xza?3#s?7fP8SU>^mi}#S5wHkY1S|p;fw_o)8Gjhy&&4L7Xma^*GF$v1 zxPisV%dqi>{E0u9=gXp0Q|Bk2mYVcDo6ciZENgXMs({FsZ?~(%te2%a7mcRISgt+A zT-lqOZ79alwmn_uQZttabWVB?#M5QEVqA{toJ)mu&s9vpD<)UIiSeNNT*O!=zu%pn z7>n2q*+h72c3o18F}yLm4B1Wjmh3WxSWCL3KLsLRo8}ey+mpJ%f=9wXH&QjGU+p2D zb2dFIJv%)w(W$Wl8A~b;MQpMNSOhEr76FTZMWDbD$kl#V_B!U7Vwd(UslE{QyVH%w zkpjcRj1_!j#D~QEv&9N(wy~6r735c}z{_9H)g18hfn(Z+a^(Zly25Gtec(Pmev}`a zl9LZy6`#URW6(V4u1RCfibAgJM)YaEOM0~3P<<}mCGtCk`hCig0>fig82KC<1jU3T zo+WW0$|W{Lb4b1|A$lQR3-Bt&i`GJ~GFsh$v+e|y2=($IMK?&HlC1Q@dWc50a596`-KDAhG_pu*MI%H1={a&n*Mmg zl^yb1nRD7h_v>#lGE4CP(i`Q^u)RbIlAkNP5q+BRgh6dLR9^^oGu?QcDKI<=(x~V7 lLrbiH<{ZTk^uOVqK*jp~6Hr*fB481)2v`Ix0<#{0e*kw7%trtK literal 0 HcmV?d00001 diff --git a/resources/DegenFaceXYZ.med b/resources/DegenFaceXYZ.med new file mode 100644 index 0000000000000000000000000000000000000000..f0ecdd61e8b75723e79c44d380e102b94ab95455 GIT binary patch literal 25772 zcmeHPU2IfE6rP0@ma>{s3t}Ph5{g07hizGCB{AEDy-Q2BTipIoum*^&^r0w;B>_VW z#y->+MN^aqL>`(*Of@AcYC>!i|B(35st;&1HmFGjB7{hI5L3^YGpF=U_g-Sy+THC8 z*_ktEe(ub7&di;2=9ZUgV)45RN(uyqfM57Up_nT(6pgFU-{#C?u25C<4;Dm0uQ|fOzu(q6Yw|3vUp873gb2L|b!&c;|PbcLH5fLH!Is+t+(Z zXdOVGZXx;*(1)L20x<-rzxqg#B&&;sDEpN7hk;)G579%&Z@N#`5sQT=Jwh}-xA+I5 z_X7XmO`@xTZyl#T_d=_&Q$!B{|HN6MagLYci)5cWVOYn`t2#jU{7UpF&|QBLZNc<9 zZxG!Bt=h!Bvh88uw=N)h80f|#q6I*ZSVnZU!dN!YA|C*N`e!NrB3KWY*L7PJ3)b;M zV4%ztq+Bf9BA;X~jLfrA=JOmfF9exm=v*ihF+|Bk+>oq~p`I)Sj)lU7fI5Pqdmg2& zD#c>I(l@TtLCa25tCU*gU(9R6f~g@^Wx@1w^x6+&^k$N4%j}0j3}nfEsOvdPcCyS- z-%B6n5=!II1}RG~neT*m2L@b)M&>gi^Z9A_!vwEi*uy6W+7EqFuElD-@?N)J?T=ej zI=S6!qxmeM@Bcv&l2G{I^c@5g>!O=wz7Q~@Q=FyBWdd&=3RBwECS`$JJAU2h?NWBa zvz>aCsiz7NYv0}ebbQy7?Je83BNJtf;^q12GS{CMtF6Ok;`ZabD(fLG0r(dK8vz5J z;=YC->lpI#Dd>1LZrdWK=Y|*0Jt*)LjkpkAP041NN4y@Be3=zbJ-e&(nLX`cYioO( z72UVnvZ}1|(E3p2`ZeWNMJQBhRjsWkUl$h5{iFAY;1fbDhm~J==0nP3I`;Q#gCC!5 zlI6>(R}&iq3<3rLgMb?Xlh4Lz5B1%jzNXjN_#ifffbUy#HlB&IaeDtyFS(XPr%I)e zAfmJJTq&=RvTOLdFNJ8YkzuHUlpm|u-Rt^K8a5`LvBdP25!D+y05_Jg=9Ux z(@5EL`gdz5D?`M0?`=xQhg892JNl@yf4PmGqA4-@pw4yXaUBe=a`N&a=M!{y=dL&O zJiXhWa=~)^WlCR!ApO8GWHL-Q9>qC^M<%f~ef>WO{CKYW@sVXC zk)~H)9I;mX6$!lgz|F7xUq<@Ay|gm&%NLQK-#;~W^u3FNXG*S>1V8#?@X)(&x3Ajz zawK}?(bNm)t`FL;d~Y4Oej~EFf8XV@^VcKe#}8iYja`ZKpI-dd@TK1(xa9Fp8DAZ| z?$gbTzuidI+p@SWTC3g`RC{B+bV~^KGl0KB76e6$O!;0iSx7ULv4771cE|FOooH%eULrVUo*Im9j)9`&fQj zOI$vi>uu1-($eg#bEc8b%*Fn80-QZm(7fVqNXk+7=!aoT@+48sCK)MF_b0b-%+tscO zGF*UT#&P1fam<*quYk+2oXdbT!60A|FbEg~3<3s$97iBi=UqOB>UWBr%C^LEJVV^+ zogzOc+wT;4oH`!eIfjSsEBHY353%}t`3h<_Q7z*u$Sz-jo4uZ?J;BWejwv0=lnspg z3We#nfe+F3BR_CTiZ*and<#=**qOs4dP<*DzTT}p4TJQME{eonT(OL&|* z9^E;HM_wrTESm)SgV;aB{vh@dA)kFkye8)Z5BIe@8&~C-n1wD5`bkOK*ri!*;8Zll)BS4a-xvpZk>FU^(he zw(HGw<8d;_@W@G}p5=pPe*vyJ^dG4IhIaxLWA{%$VF-hOLBJqj5HJYjJp%s%8bRkD literal 0 HcmV?d00001 diff --git a/resources/DegenTranslatedInPlane.med b/resources/DegenTranslatedInPlane.med new file mode 100644 index 0000000000000000000000000000000000000000..d83f96c6d8eb65e52b2e0cc98d23f7d9a3a8e15a GIT binary patch literal 25772 zcmeHPU1(fI6rOF@HchvX#AvG)x!PFTSTIS~{yn(6?cQxxHk);KQ&TCVfy74K#$r=R zrPMxz62XT&q)?MSNVQbzIdkSTxs%*0(v8{O z%!E7V%-_8;-#IgP&Y8P;voRW5QT}kbz)y@aT6W zv@q~zT8Tdf{J_x%A%+0;*PW=6Y`9v84d*F;0Qhx(6F-jmEh}Xm@qiFDCy2M8ZS`g1 z4?zBr+r(oZtas_22cXrpkBA?I{Gl(1$G-gPy{ZFz|4+nE0^jo+@fHj_ zca!*jXw@O^mu&|if6p@FCxFjZ5ibCO#3RIq6-=^$7Wn`G)IUq*SHgV2w65E#Sg?*) zKnBW8LCVFlE#{NVg^_td%6y4K<`p1w47uf!h;br!<4m$Sih8mNax4@s1k@1>eKcjo z9!0U(uk?-UbkVdEVMS%b5{!6mtb?&xaBT?8K1i?iFiLMGIk(JuD8y)itcSXuvt(K3 zC~wsFnp!R)8jED4EWKpD7v3Eha26Vw&xg#HO4(T`1PHtyW_kX?8a_SHdgzmKtyc4u z*ScP{KBg3%-fp4syqD0||A45IQ21c?9RL)YBG1Zn1z<+IJ4=;I0&gA)Gup+IvcRn! zzi#w)X*=QBPQA+1Q-z3j?(6K%zS`b>FxTGG*%9wf<=VSDX}+VFd7QfJ_2)+8O`gx4 z>hE<~5Ai93zbM!W7;rat5iZs>%qyj!^;O1@h4OT;4-DYc3{nonrjUqF0c!dbO>=UbD z1*kapKBbZNE}a}Zdp0f0SJPchY!EOA7z7LgIs()8#%MbZKcv^7wAbGF2sVU(?^&}q zo{zop?EaxaaxICRqEbi@(cXBmlvkayYk0q+w?WZf+ej_BPNIR*HHxB5)V0Q0=c~sk zJ*+6DiYw;1f#XBn9#ZZ|9tGlo`CfiBGdF~oU*D;JR=e?Ywa(G*l_kn^gY3qHTGv?b zN0t2hH9eSWz3{kLq$!?A*ljP^>1G@~&k44-bj)=7A=%<>Z!J7T&4>*G1_6VBLBJqT zxCrpx*n%$NJ3p$Osc_G|?8ehIR15dl#%?SeyV1>d^NDgPYl;MXzECDv-=alwi@X?4 zpOLAwLeef0A}oT-MevStn)}olwold;aIQe1Otn+D5yR^#AIs6cs#15uW~0|Xi_O?W zZcobwZa!!_ufXTk$$EUIk+x~~@77LMhKSFaI+Tu&tAgov^hss^avPnZDKWWL&vmA8 z9!xyp%qxYIPte(&yWY_EEHmYRh`{=y^o8Z9>x*B>1q$|X+fCG+SOs%vCkC4nfE zZ@0rilFKSZS)$W@EI&R?xx6>m+n|r7b(gcwxh$mz6-Bw1#hi6=QA$S@rBrdn`SK~a z`Q&Q05gu5c4WagPXnUpx)GM_pVcnr6^0BPbd$xwbABQ zKMexTJ;2Y6WT9?XySK=13HBTNiWK|J{sN?U@mP>z9TN-!1_6VBLBJqj5GZ~G^0nXP zeW-q>m{YbTmgD~6Ue6TyIfZ_v$ozJkK?6}Z{! z`C1d)Y~ZNUp?ulEQ+CPR+rY=@_>t9q21OfqOKe8ABLnBbfksDGNhI>6H!M$m?d(x{ zgXQ^nm+*56{awQS)bZ#mGCWE`$rsol*#Cn*BK8SQ_Y0wJvTq3aWpFKps~j#|3*Exh zxf1;-E^7xJ_2=6!=GJeMZxApD7z7Lg1_6V>e@8&~Cyekv6xD9!IJXJRSgu?PC z=-&viNaxxv98@}l`%k>?)yIhYTMBdUPng)v&zha$9=czBi!pl*`d@nE^cl8Wm7>hg zm)@{Eb^Cc(=?#__gWk+F9;b^8kD^rS1>R|#XXr1$Ifp(4^>28AqhRcQM#4~ofI+|@ MU=T0}ls*Fg0G^f3M*si- literal 0 HcmV?d00001 diff --git a/resources/DividedGenTetra1.med b/resources/DividedGenTetra1.med new file mode 100644 index 0000000000000000000000000000000000000000..71274ea709a1151a37a2f411e948b48ffb3e4ead GIT binary patch literal 57532 zcmeI*1z6SD+W&v-R_s>n9J_0YgWcWTirtNcId&&@cjp(oy9)sk!6XDlQGf3bu>a4D z&olG;U)S%PbDen(oa@E?&9&}zueCROvt`b-8cx-cB+QV|PG9k3+r_rCw~H0A6Z!2& z)Yoy(KkjSKKI+04k*DkiM{Tr;+OJX7u|dQp#~s)h9Ch3lYSyb(zj6EORqIu4_O=xC>AJZ%^KFN{qZRjq2rC~dX1MGdt?oX7#kC+vF?H)5wh-zOf5+}80I z>mx6kL!owgMsD9^yZVo)n`7G9y~-JR{cv4>CU!(SmHD19bL6DCL>!6!_HzYd*u{)k ze-2S6lJj`bdi;59+wQZiaeEGEZd&~Q!Nb1p)$zzVw~L;0YVq&(6~iv>ALiViIoFqS zqR33$Bg6mqy>Y$zPFg4FIO3qmV zM4gYUn63Q(?Y^4zA};u|`)ogMe!j0!)jEH9U(}DX$j#Yu9a1N9TThx|VQgGNh}6^KeZ_QFCs>Djw1_yL zDYDh*@6i>Au0V7JqAT!o1%Ch9SnnOzr~mni$o;>1KQvl5@}-}%(XWjoO7SoK_Gj;h z7U+j7U(Qi6a?j2#^0jfSi1{iK^}fdTdgbSPG_`(>Bk#0bQ>Rhne$J?<_e4E%{PCKr z*&_GXN5#m!zt?x$V?*Q9+ICCx$NW_f9sjG}qeVY9{#dhr^`2gDYMvaTI<~!UJR9{q z7y0V-zq0!M`^Gv^&uhA0U%R8`f9g-`AoA>g=J#rjb!*nCQ?-4os*UQa0=CD5?furz zHT(JfL%pVd{eJ5|v#Qafj;=s-1)?huU4iHd{Cifw_S)Ei^z<{|>8HV zBd^ly>*)85wu=4h;-9^5{ImCNG3*jX%yr~9JG(f%`_bITh#52DC2}l2ebz>D2NUtW z$j+{^U5P|?C4NNxl0Qb&XV@_!uC}vF9CbW?)O}IS*saeOSp%Kd`>PaDt?9NFqYqd6 zZ?7S%M7|OIv)@6-w)OYZXBv&FM!x$0v-={m;J3?Bd3T9ge*ZptN!0sa+dV%= zy_N9H{qxa3?(1=IGuI!F{cqT_eRlWf>n8Hwv;6mYP(@f@QS)b;$Dh|r>?)B}(s_M0 z^^aOFc6O2LBeL~>-1mQWJyKQsd)4C~SJ~*6{#6z5&s{YB_Dt_h_?E!ibvu2uQcQHe zyr!zRDIW3l!xT7vrRwNSex`53ZWHSidtusLi!q_>$S20kx{X5Tc+rtS9fUwF_o zxjM4wf|LiWIaBk^bt-(*O4#mW>h-Mp_&oRD7z9WCf@zUy#N zKa=jQYo8g#UK#tVw_bcI($Nd zPr}CDx2m)m_%vVU>((sYA9&Jgn>OjBE-?mKgL@qxJU`uWb6&%Jch$7BZ&7yt#d{{_ zKF8eY%RV!^f0QfT$2Gvb?cKK5r0;)p&zld!qG#CnZ2{K&vWte@ zi}l(nw8Qmm#YNs$n@aQMWpTZ4IW4-n=lHgh*3m~<^B#J4%=C?zzV_%DRPr_} z*3Fu=ZLYtGk?w}unsEUp#`8WNW%7MA#W#c;itYKy6n9<_vV7WCGqOqe^W+Qtj90EQ zA>)1CoAqDI#tB;*WLo$VdSf1tZ z%Liumkx8dN+08YnYMe~dCgVNp`=uG1gGRr!Dn4@>adES+dAd8S;N!`@#&yi>z45|d znpiJKuUh`d$E->kGwkr07iMhQ@BU-a$kEGha5ZPVQ+HalT(D{>Imq{i$1S zgMyx!=En?*?T5$Av(m?w1vGkWHOTos^`tr{&6VE$PbKf=X%6Q8;FEX! z9nG%hx zX`;JJ9G~5{j@AvIn7{07Yxl!Tek})Fx8@J(>l|Qz-3-iOpJMA)56gePeWgUXuUiR6 zIebaF*WH@jcXiMGmu{J&+25pePki5esBYJ4>%lvwb#0%$XTP7bzO5?s@n*my)2P$< z6h2$7nrx{%4?U4+jrml5bJyuH?^z2PzWh8S_8F6^$Dv+Bb1X6wSEu|OujCHP@x;nY zR<1K_!D+Hwz)+*!3+Ae8zd#lqstIC)jM}xfr z%*TxN^E&7JXx?6mTcLZ#PiFG&PU+gDxN0RooqArDCezIDL=C+Izg{;j5++y}l+VMQ z$h7{)(Q&?}QaiKg-n)0ET-;U*N^cJ`b5~kpDz*hRJWW~C)x#~sWdHWg zzSr^~6R*)Z@2j2Oo3y@p@-18V+4T8zW$v~_&#e9Tnm7LBcE{Q}dV-z(Vn5^mt$N%e znY>J!mPadOD*V7Cygc(|(4&hc{lJ5l9e2+$3tpT!*{)xJ*;^#jl90piOxo-X+>&+; zHkm)pKX%FMquD=YPs3Q9PMeN33uTPG?t%F+KV9EyEnk@6qV2+OJ$r3tTc5`r%^zsC zOddAU^~3>d>h_$+{BxhRE(GmziT^s#OgTE%x$h`%6T9Eg8@pdTHr~Bb?_9XxhDp0| zM&1WK-z)AQUr<7f`O$+GH=d6VYM>qLwF zP0WzR`C`-xGEG0Uyf^Q9kSX-8pVe~pM^kJ=uTBYKg__weWfHX;_{pqX*`eXlwV%z3 z%x&^iuk^*(J2-@KbaY=3iZ+>Y)#&Plvpf&eVEuK9pMMY`^EN>38+b z!bfvnn|(8HKHvH7gUP*VZ2Vpc15KR4o$iFx2{z9%J$h2SageDna$~KL2LnyQyQNc^ zn;*@gEkjP*cMUKu?n&2oTM%e!*bh#3tmj8Fr^tkPLp^;=?1ei6@AC8HK&-@PFUYT_B zKW|9TDAj>Eq~aR;z{S zw>o<4G%ph-oZz_jg4wXS^WZJ%@0hbGj~>2z`mvdC;?b=tueX}ab2q>IzTe-fcG{tR zNM29lP{VgX=dvHn9iOWAKj-x_-ggECkFWd5?0(d_SkKO{%+M7{&Xw8n&P=bf?AC*V zLB_GAgU_r)i%j_X94mYh`kK$}Z%qqr`OL(+y1M(anHNk3*A^{)Jh^P_;`uwbdG^rM zOqFb(-ANB)U*d>aI^u=tKd02W++E(8yx$s5zxB+=Y>9VzqH_;_GsGpuyptqEhdI`^aNXpdcJ6g*Z=YGM(ho~NoqHV* zw=y-)aqM=nFIJFm{~e*bKUo)tD$@|_bkM6O6E*}{OD2@OzA;CD^)zSP@e6-_W8KKq zGOTM6Uu#r_kI!}#^s?NFeIIjvvX5yq`*cX&Hr~c*UHPdi^Ld)cw~73*pC zmwE10`ps-`dr~{Yx|gPrlWWuCbA60C|MbA5(J#!~o>Q#cy}iw&H_x(uXzFFA%+?Dt zPt&8DzC28oUMcf@D*x21*wdq2`Sg#?&;zNzUU~n(+zRiN)c)!{v(-*7819%jHTrn_ zjJ;)6+sByFV8d!f@7yG(5uJ^Yul3Xmp^}6YAq>q|6HFEyUmW?xt}`^ zI%B#N-&1|ow{O-^|N58PH4U{I9lelxUe$M2p_{b_%&GRwTH9~yxsjiGo0$bWj+rqs z-0GO6!@aqOzgUk)C74@z?q{oQrRwFrBnY)uR-Ll5=ICH+^!xPl2fqrioEipi`8MUP zwL84!w8QydS+nzgJdp3Zm-TX>Z;}0L9vX+!!yH?>UNC9P49f19ev_F#HtV^;yAN6= zhP=6wXy0`!`17v^SDkol)hm#2~6z;LDr%rk21D-_tJ{F za*11!z^hh4pY!o=pSo;v|Kc?~`<18GfE#u4yeWRt1a7|cTb}yY&8&|5Vl~aN*c#Zi z)64C<&sqf=H*etLcGr4c_ip?}BWGE49?VRhpq#IDW`+MO$FfhXaJv}KkK5n0;#PQ> z-7fKaYiq&l{zu$CS(SFDDq){C*hk3cI;u?}y} zy1ut6Ca!<$U_O8A%hvw4dyacyJ?+!(z;E?WSxZ{kC0V@co^|tb#Z_H~dRX&y`H>T$ac%KRGLL@39edcEXQZQ6>eFro{x-uIGwLG_g#`aw|WIwdxKw$-Pq=h z<sjkSe5fQeyKPw*lM?}_Zjdz?sRO6(FRe$=15=S<_vSJiD`ZT6{xb^Hxf{?>EKU;~9zYIK`I>d4> zzAjeRu7Otnm>D)7>hZ?v(4)wu#8rK)9}|C^Jz4azRetWW!aGKux2_~|A3X8u9y2n$ ze741I_e|3!J03S_=V2<3^2_9vC&a3Is>l>wDbTcHAW2uqI`r z>ibRq=A%PXtlDH;*I#jR?dfAxn2~=^lF5PA`W6?fbb0jII#R>ymuKdJwV-_?r@B?| znMxD0#Z4Hn&s1q#tiM~&kJgs>7t)S<8eoO@KfP{1>DSiB)n#KJIOT2i-hSk3nStl6|a@hWS~ ziR1Pgr|qy}4xV)Q)~f@SU$5DFci%W+efm0o;jxE%jalM&$z|hN>)W}Y6enA~uv#?B zI^jr3f6MuR)8(S`ysWZ)S2|~^blZ9wx_I`O+TPZ$*Pai*@cy*buUNwlnZj>dTkJ~3 zdRFDGHNZLUk^af=Tkj{#>FUtO-}?3DtgS6#1X!EQqQL4)ysRF5Qho{T`PDkpF;mKr zIic3tmdBUZsuE}|Ii0F_#i4#ySe0Bm?(ctUjR|zG`!(5T>urOUL1EuNS}Ts8bzJHf zVBN3pvv2hB*H&22+?JISy|BC!6`p-3-UF*^-aH9*9X@I`=wIp9gk!g?JXurT*;PgF zw_+ce7c2n?*5+^Qjt_ljIi_u!`J3|#D@n8A@4IAqVA;=p)jj2gm)4y2 z`A?tP^VAwv%x{siU4S*wK2bouiosUC7V&F(@AR@JCf;HH>V&7Yb8+qiQ+S;}u1VjT z*Sh#w&qgFa_H6Z4E2zZsB)fk-WqmGN_r;GUo2>P3Kj$c(XQQcAap>!mn52e&jcSBxO{iMo94zC`*eFnsafVSnpjgTAM>d4usUdXMN$P@!l=d|M~m50lL0@V4ef?JCbr z-oPvVZM(|vYF(*uV$)l*chZ5PL#z0hfYGL8yQ2@xk;~Mev{ELbGYqLWm>xG*95ib~mOoQ%uCa&{&V^%KJ*9-Ftzx7zx;=akltAUCM z51FwoX4H>SQy<^_Yn>oNdm|p6_k-Be8&4%6IXY1%|j;39bdir25)9m=%5kAQtnW6K1XZP%L z-3)EfxMixL2aMg|+i}cBU!|Pg_gNwoQ|A`8}(C<16>StPQtTZ(Ot^q-vNI z5Y};0fM1AZ>i*LD&5R%`t7Bm2t8L#~5$UXZWu?nG$+ukjK(oBP>6nN0lXgYUnLBcY zm}*51TndZ**?7Jlwea4^FXnld5x({X-k3_m_b%RVUKr;OGcG2)bI+tK-gsj5%ZE(7 zE=j*ONp{XUmt@$*uzpXh+pl7Ox$=vzwf{}Mn118mTbDD9OtPe2ur+D@yZ9xpg_()P z9VY)4FUWMSQLFm7P;cX2BSx1To%WiCMHA0;AO6&;(e~rluv70XkCBU~#$Eo+EZ$Yc z(Q8Sd@hEjCedqhZCf}9J&koNGH6I-6{dTf$n3-9!o!`WC;ihoL!x<7+{$!OJo;J^! zMsLihsTnSBcou4^x9>SUPxpJKvd7!KJHKDBf?~89Ufspl@;iGg%bq78X2JF$fzG1> z&DsOwW4qM5$nOmjKV0GW(PT}#Cq=E)yszmLJJ84fwRtkE(9J7*-kF+{v)*0t)XU6Y zwW0m9jQ36LE*GCB&3wcxENmBkWxzSBYLDe3ngu_x61oR|DB$dCrM}d*-q(4rd3?So z@FmR-6YSb##gGQUrs9{3_nY>9YK?8>{bkIcK=UE_tEMftc$;OLRu&sn?XDSnzhCY} z`_EfVJ%=vq+T)?gxGKl6C7E{n^{R%;@%g&W~>Q*yQ?_CF74~fo5Cz zi+!6h|H)?TiIXbcN0Yr_+TpWDg_wu6j=#@xI@Da5eE-?lWS>pXoW4DObO3Pr+ZReEwh(FLOTMnfGngeCH&H+c(&B^C;QI zbh6wKKDxT5p>4B8!i?)Y2|@=Cb}K+tuv-N{_v3%7=bi(!DzMtPp6v=4s)TpmmT5O?azer9Ll>Nqv8YrXgkHz%)ZrU(A4a&&4(@hXU81&-nj03(bWFV8#B4e`m~!L zdztBlT5dh$z14JXZY?U+{F&vPV#EBmH$GUq?uR6<+T*j8u*$XWRqQ=XO4okngJaw> zSq{&smTls3Q)X(%w07aPHejtJUzn z_KmOkS(C!szABjVjWx4P<$_HrT(%y#tqhJizK=-}bIQfuu3pwR?_z~ZT;%tw6~47; zSjE%Y-fL6Ua+~g0>%KL)v2es^E41;>y*tx~SjF!6G{460tGjIQY~eBQtrbwt@m}G_ zzE;V{uJ_Hx5Ob%sTZf)~-x*WH^KSY!UZ&dYl*P<~GiGp+PC==P+_d)O@R>AvZ=h+P zw~e{g^0j$4{(bK*CHehNPWKw2sqUCVE{|hxS+UNXp6)VyX!>i`h2b8l?>+Xg7H(R1 zb4lncE9tf)>67IQu%-s@DHOQ$l_{QhV#U^BpUtJ79`?as@0qhbE6;h>XQS0_>hOl$ z>wL75w2RZP(S@g$!_7v^etGI^93?-l8$yR_(eYTAm2-Fbxl`4L!~8 zx7(L!dcIQoUB;*L#i0XV-7wBi+Z}YCx5Ns2y`b`@4mYfE>#{Gt(%Q?)wmXUa{R#e7 zoR!^b47+{TTG*?)DOd7=H8@dN`u<{R5wSkg z)1NErZ@%I|p9v>`{;W@b4y``{OoF6H27PLn94VlG`kV^-XU_U(n`xoXV)f5IGaw`M zM?#sA1zF*MY{(9MtgC;fppRws@ogUDg+8Lrj{+!&Ur-2zQ3Uz}v|=cZ5-5pMD2*~G zi*hKB3aE%mFsKYi=wr=a;e@Kt$9mOK12v(KqiaKdDW`u$slT+-Kf=_fmin72eJZYh zy4?g#(G1Pe0xi)Bt)c%mMq9K)dvriYbV6rzL05D`cl1C{^g?eqqYwI`ANpee24WEO z-`E&}p%{kY7=e)(h0z#;u^0yzjK>5_#3W3HE2dy7reQi}U?zUUEX>9n%*8y+#{w+G zA}q!dEX6V`#|o^(Dy)VZ)?h8xVLdirBQ{|(wqPr^VLNtUCw5^s_FymeVLuMwAP(U$ zj^HSc;W$p3@o!B>Ri8@}TQ z^zQ`pnZN#3Ni5ht_t&ZDujmRyS0K6qwhGw(+YR;VSB*GS*Rf{QzZQ$U;@A51BQ|V* zXTaYpMC>Py_$jETn#jLLu2(U&9^#_%-5>yAC6N+K;-8*CZ_>Hf`-U ztyeQ*|L6Am{B-~SZQs^@bd0V*bOoX-5M6=j3j8Nl;OD{TqwOcSCBkfxi92zijJ9YobKw zzivLhu4(?)*J$13?DW^y{GHG8pAU^l|L6C~|E^>Dg~SZvp$hUr>#`*D*e{N>D1fZU z1_$IvMU+PhZ_V-K5XkqeJKZxRsH;a9qC&2qwzUFeJKyyI?~uRR*g+{Ro!Yr>rb^*?UaYc zsd1}5s)2IN3Dx!Y>qzsZF>4)GhOO4hKRayeNHx(MYK~(>nIHDR!NJE4l*F6^O3Dzf%SNQLlIPJgu*vU++4%i9D|J z`q}XB{(ASHU5`0!|6cX@`F90Q3nKIX`TJk}uAoNu$gB19JNkD8KUeObzyD{yD{$@- z+4s-y*Z)zEfruAhbn)|Zf|GO9n0aybk9baqc+nQoWP_-N|FPls?}1%<{&m}W%~__~ zUtgogLH{0qea+wT>_2SX=tVeR|83n&iaf6Q{yVIje|kM;{=MoEm95=>eWsvyp!~bK zswE=cTieb@$A6y+=soX!=smdJlk5Gj-q-3!$3E17-aqSQfsUQTTIlxyZ%`01Vnn|G zjmP$L=zX#iF5v+59=!>CFadfmy#leJ_w1kX8}8r{^gg{dwxAz2peytm^El=rH}pRK zI~GFu?L=SPg8JN##0bVnJcWLjpx;LXp*ikC@831%*4U0&7>`4E2M?$p{ch$m79jzu zLE~$T<KX;nq8T5OT zmAD4|j-(_e!2w^99S@NTy5}u+;{XQY7}TC8mY^uA!WkNy+EuQqdp;QGcPy&QX=K4! zWP}rRUSq9~R_KmAU|B@`yHd?}XC%R7REEat2eqqO>iTX7gVtM4=$vv>&I|D#T3?&c z3o)Vft2*^WG9-lNUa>kf)~wJR{D!g6deFHu@W)M5LNlmejbRV8J{qDB0-!oCg|6QP z<<}MEp}NJw94L=*2tp31P9-n^snGzT&{`Xe5DbOtkQtuP@4$*dYpge>Lvywb>)?R7 zxDOWuLigl`=I1){p*CLQ94g}%l*Voh!di5|JNydewi%vraazSGnf@$ajZxlr>41>m!7(Jl*+X&5fUZ|cOum&2Z z=I}NQMxiovkLIlk_F@x0q7hUBT~`L`cNtWt5txCCP%a;!T6KfYRe%=tPC^oBp4Y=3 zH=z2Bhpx*8t)(<*fu4AY!qD0*54BMos!JWH4&Binp->wdhXt)Y&F3k!gB$MQ4V>Tz z)l)GBG)AqjV$k`9*n*+ZylEXPH!o;yU4rIWZEL)}pd2-qU7#`U!&e+eay&paOhIa7 zM{evv4XADP?+4AL4^$hiow9I1J(NOQ%*J_W4Sd59yhdfz#A&Er>T5DQpn9tA{jnL1 z;ezGR_|jn}t{^E|p(3)OBQ&lQNQv*5k2uhr<-%EZona$to!KaOj*C++4ym*ycoI zR9&_s9yBlNQ@Lxbnjht-eqTUybPZb5710}qq4l9T8HIK@2CWy>vLN)@qZqLW_Q4sN zvpCRtpN03({rR9iRlApPfcmQo&0Q1hfbLT}2cdpK5R-l`5HCUb$A{{90)wyz8sjG1 zfj6`+HD4K^dDK06d}xjyLVeVLa@YD*{v&Y`3sDq#kOm2${GOsLRzSIH4l`pcvO)d% z;R#YAFTP5wG5T@ff7DN4epeNShIGGuVlyXo+%I59O;dCdDuq_(Qdc z4fUN0^$~)F7=-ue1wF>q$4AsgQD}}vU>f4#B<3J%R8%hGQ4xuu^`dhcdna^&a`+X> zry$hFF$}~?^uTviL4Rz*SZIvr@e9gAHPD)>j-k-n(Oh(ca@vbfsE%uJ7s@RNZJ}}Z zLr$nqtqEOM6CZF1S_{3SViIC=cwqzVp_(htI+%|u_zbNhJtihY_0xUIJ3sc|AUx3! zx=;CQOq#o;Q0<0bIxawS^8#g{x#^7Z_=Ka-9Jj%2WWs4UVFCi7vAsrWbj2_zuLYP1 zwXHE|ed`*v*B7m!Yk$BYisoEn*H~2lT2MRJp+2smGR8r369=cD`6v#}{QzvnBxoGF zpjv3IHsB&O7YUFH!?6U-pt*{P_4tBAP#-s;95m(z$N}AZ9GY*{?lLsDiJ*0$b*lMS z-F;CPCD8}7puBq`94GJB`=wQ(9@P^~oHBT#)cS85|Yf}l0u2@h}! zHQ9-UV(ftGH3yk61scy6Xe?^KJ~Y1?!!fAM(Qt?I{{;1U2^x#W zydL*34HnejOEkq6XsrZ5V^Z7Kumbtf54|uP%3Jf}2IV^h%2j!O#6EaK?bL$CrhD~z zPIbDSMq4EBJaz2Pj(72yM`78H%m>B z|I6@%+S-8Wu%JGCu?`x8){YNqqdU6bD)OQ?zTys6;uMsh#y1+uLDwjUr_gv0LHTH` zH?RhpE9Iv9H5S{rmCsP9O|_-RV>6=WK)I>j%B==$V}HZ8YC94dS6}!+6&iau)bA=Ne?7L6Lu1#t*Wwv8Zso0U+s538ZOy61d>>O$3#~938n4Et zv1WqCs{W2a^Qn8P!W9~4SG(#z(d|%i;4)v$8 zwS#J`@tlVG41tbmd@j)V24Ot*;0zjL6I3hJxI31i1)9OuxBAs{&o-j!U>nOLwr`;T zRIfHT3619}^mF71LF>{Fx_2}Vpdd6pjoTgHps^@#wXfXOz6;jjEY!XSG>#+CIBa9M z&bI2Ju?L|hUO;2hn3Vf4*v3+v?RQYVF1QBetbUaDY{F37WqUhnvmHRx{LDd5wsoz> z-Ii@VE+!D2kPro9@-u;0omiL{%K0>G&u9BNZelQ01C7stb3Vjm9BYJ?Xvcn8j+bTo zH0~iCc5%EH@f@)oQDaih?kz&hfg#Y?j}ld%9z=~Z8`r#KGXZfG`+g|L zwrX{sZPhoBsON#+#P-m`&t2t+XONxiR};$< zcM#K~4(GN0=HdbSONpA_L~NfYb|5|`P9|1EPJG2S=s9u&64IVM`%Q2bTC+Jg=F7R^ zsK9m*#~z{y`-f17W6tb%gx0Dn@iQ?U$7{2_5MH>+v0}ty2xfa2F%j4J6Fab9o_G#9 zIkuBHf@5yP40r^s!Nb^#aQ3Gl7m{Nm$867OiMhY2&JlMJ>vFCK=Aoj_5ra8C6N#b6 zNp)gj&ZS2uwx4n=CfncGUIaZxixb0$dJgOdZ}x}dH9E3ifvCr567E~W_Ic!Hdp9v7 zMsjWf_Ms2^M~Ewto9zr-7YlXRK8_LC#JS^0!FDrrV!I+S7W~;x%Jm=F{tZcRkz;z! zS&mAatHCin=M`aF&tYGXk?rps@6YyP;tSkny980s8zYI$@dlM}0k1ipjQi>lCt?)V za$G-0<|7a1_OPD`vyc>hafx#^iF&Ro$~BMK*7H+mqMn255>FrxGI2gOrn2p$aRIS1@e=ZK?_OeNyybXu_Qw!sv;PGB&;`XguAl39*?vxp!~PGp>tQr$gPw0waLrg^DElpl$8ZHDIG&GV z;cUlcdmwQs`_nLk?R(e?cQ|tX9ml({J(d_3u57O-F4TGA73AZ({lqMI&+(M(*CNhg zzds7%Dcm^LmFP*d+ojmHkRO9MHWT-en*D9WQ)qxz>{sFXal|R?UnL&E z2ew;c96qr>0A6gz;M!$umm+>+KMOG)eq-C0oI$1KIyhTuv;_J-3Oo+5bRHfHQ2jA#TEAwySc@c(w}=vk_Bk zpX2k`zJY6O7a|^pGus}-wZ!hkhG@%n8R98q<=z+U$0rUUzT(&&q{Xk;$+6AEYMjqb z48tH?=lBug3d}$m&i~+8^jCBRqAL(xf#?cES0K6q|G5?TyZv43f9_00k36~p(G`fU zKy(G7EAanbfuH~WZ_e8Pxy`@)`@hwZ_t+Dn|NifPuHQfN=%4=mpMKfOms3=X+|x%Q z@xWKaH~m|lpa1`<|Ns5}45vyM+2ViuJ1%`7!k5m+CjP(vP4HxXR~7lC-$MT5zy9#| ze{24_{rH?Nv+A#}(c_@M+h1SvcRc%#S~s@&{CV9h&@XZM()qu`y7_0s@;Cc$B-!%*PmKD!K%a@|vrm2Qsn50b*=a0(Hs~`feIBRn6vQOZXA%0WxfxF2 zCG`2Eq8}Wg&+g_ypHJvBhEb>vFQ~tw(B~)mJa97RK>g={+BlBo7zTZ2Gab+I3zAHkX_2`}nNQ@Zt ztMfUbc9o0fY&1F`Ci{BYP+irh&P~HYsLfN*XL_BXe=p#Hy?6_qKMU1fF+cRVm*&9> z%E1l4;5?k6Iv0R#4#%-Q1gg&gVgjgd-Cq@&!_3eaRC7mYo|N-f9D-_j3Rj>px@mx>tE#z*gw`15h2C z;U=`6H$piqfO1cbCQvj5D&I=bz3N+!O`TJ1Rzhn~d9;OU*aeND zoOHixdK|XAUD(#+M)RRM^v4s_ht}&aMCIQP)1bCgcjc-%p8?fd<5KRKp}f=K1~l%~ z=!+$|0v|kp>U)wn7@9w=N3F|pQ2jKny+o~LZR`BpsO?_FgqQ-2=_)iQ=b?VPL+6*n z8{6;}sqh_otmz&{xI?)pZ>QL7u!qJz2R#rD-KVvq`e}?upnNalIdq-Y zT@t8<8jtF%^`dK3J00Hy)jl}_pmo#=)1l{)jhKg<*b3#ZHBu9*>G~*Y*Oq%8w!NTn zD(8<-y)<{qNp*P)U8A`>2t5WgXXg-%Q}Dn-bc@cbsscMz1zE~s|OPqo_vol_3Opmk9Zv!L;)p1+|Zv~IL6)Sn+zOC8g?)V%1N z+Ereo&;e?*7+T{LG_C|t-BfecB@I-=?zoS%(6x7=`_#VbbRL?E16Yh4(3;SBtsAYQ z>QFlwp>f_rwW#ei#JcDVtt$tpR%cNN%1ya?L~)W6259HwC>G#<67Ia&$TUu#9r z8Ll{wZs-8rcM)2vn&-#(1ofdY215M~iBguRKA#gcE}dHd)i*12en} zeQEAArhBM~{#zvJ@f~xZy6Sl&09xbfvn*6YjWIW-VkC5( za-Rt0t(vG_qtO=XGda|*=F%H#S2fW*YECO7J9NM5n+dv3wNxKEZd)g-+17ZKx9Xwe zS|1u?HjIVFro0-X9jf9HEH2Q3J)G@yy3bs1N0E2O6_z z?P;xQUeh5X)W=b1uEwE1PQeYT-zX^O9#9=rE5-ZJTxsriJ?1dSfI`L3PsHYE6xW z?p1$kJ0!|mqQ;d28l&d2CsfC0Q2W}S4)w2g)VAt!14pm|?{ODe=cVtcKRjO{fpmL$x^$*D0dbfySe})t2h6oLWNp zDEB~UP0Yh0=sLBbaTkRPv>w!^tv~gn{`J_?agAXkRObT-hmNZ*y`bYAu?O9tajLCd zL_J=#M%|!(HE%kvYkjd5-;o-s%O~g_&8OyC`|F{8=RoUb0F;l~FN2bBf@-=B>RUBb zZrXMs>K^6mhH6lou27pAYZz4L%}`FQu?EiQhZ<1Zy`eQ656ht1H-qw5zN$-HqUKrK z%4r|eZcb>8s%_<69Ll3Il&9wDAj(4b>YDV>So@$0eudVk*0K|Ho#vw+R7d5mYj5Bs z+@W*2PHk(6x^UO zDu;4VzoL89ju#xEcFsZL)Ed>A)3)YL^PLTkqN3(E9durKs^3;vh>@_}w~TG&t97d} zi0Yv+>G66IpP?G--Y4jUP0%$>&>X7cIB34-;xW9i6B>is)!NnlYD+n^#l)z6wQ1{L zkE^OE1b4V0CscRc=LU`49lA%g(7M+cD?^-NQ4g{e%4QHs&Kqyxq?1QfT0$nSc zaT}^}UMQzMQBm!*!3Jm?I;R}9HZS8iR8OrT&G{QBSJgygP}F^`q4R5@w$ec7hhr5E zVm2nD1ypC9PX^_^6pf*LJ3y}iHBahKIjT;oz4}u-pYRw%Q5>F7o9f>l^J$m6v836q98OEA21Pm91Or}%z)|@0NWgvVq0T;j}mx+ zfly!SL%FC;-M0kFw#0Ep@%xQ{C0Ja?^3Ot+`UWdVFiW z={a~MIzTm0UG#WbgszAIt@Cln4E28oszFVtMw6giw9ZuL6HuG)pf=Y*YoaHSmXy|3HQ)8q-vafcnsQv@Ud==ER^dR2$Vy zeQ4e^_qtF0^?~Z7ycHYZ7LG#g>t0>2I2l@l`-$_RxzqJ}{HrGMp|ukaYU4JPt8$+S z)kkyG6wRQ#RU@r6)q5cGV?QxNr-IDlzL3Dt4} zQEN`usGV`p+S!SuL00l)OMw)ZS|`$s#aPDnos4g zemY|?zC&&9fab?mcRkl>E>!o+P!7s{EwqMJkNt3lYNs0JiQ2wJT!VXP3eAV=pmnA> z8vu>-HFTf))i_oA*k}#avOhv`9h&>8P!0W{d^85tqA?0Xd8%L4RQ1=m)Q0B00`@^| zX$^*<7j*v&6u}E97u_p)Q46|8{i&8EaTMpEn%ZiAm~Ho{{SHLsqBcGw9@O^{Xf5dp zR&~@kHP7>L92cN9TpL}XF=;KHg!)nLJ)!IMIL(TkP#qRS*Qj0^|6JllsJ-UUoHRx_ zcHthj;uKawee1rY#823WZBTov*9o+OYN5GNTe^P=0-$9?zPS zN0*7GQ&>9~CwW-HXf1;ivRF9fO^;r-d5Ep7EGj!abDKy>|QBiYl54EQ})Q2as zL~UOp>i5)YXDBpg^{+9ijU(6#<<$i7Q4P1Du}_E2^^S^rh^n>L$5PaS`dkLB4MnxD zdnV%~bp0VH_j6HEIjQDy3>u>a-K$z0CaO=Z-4pl#t%35;d3nRO=2dNJ{K{YTxQxCy z16zA)XA?SLH#GjKc!f4l-W6~Q&X^6&pZd*(7YKvqtQT=TRPTY%7}Wk%w1xUto9E#I z^`U(A_*4D9p%l`?2g*AZ)XsirOd7u)do~BzR-KjeI%u34+fE#S)<-QUkJ3@EHcGkH({@yp*@@*O+v_@|z47sNQ;vXdG%^{kh>1biM8`NSpx|Xf57F2%18< zE1$v8{C9%Z>0+qQ`q1-++8YnmV=cmP6Uy5K-?0)cpfTyX2lxi9L#>xd(ArE29}I=Y zsoXWD;`jvhsodW|b=TVI49$ne(;zD9=a|M{BI?|A;v%SDtyirZ)ubJcLC@2f;DPl~ zG-lvJh|z9W>s`nU^S zy8*V`HGitbYiO?XLTzb`v#|=Ad)3n&8k1_Wj@Sf!p>?D2YYgY0=bWn0T&d0@}X- zH$1=+-;zH-gLTfAt zny2n~4y~CfC3orp{PiyWomO$fF-dl;fPuFjP=2+J% zmv&HI4WVoFyz~VDFwo;hZR)Y7dTShepm|vh&4v1CAGNJI*mCT_w(_2Z(olce*EPCF z^P{m8K#W+tZivn6B<#joEPyM9qZhWva5fj(w&%z$8!$M5K2=vAd z?rlq4O{`CxOLQa_M@r;E9DJfJe>_o}IKy=o@c?lTaUB+73W7QI5)aT6gE%&h*p)bd z*qqpzSd&;DzaT4;BNndHpF8$qJr-dqM#338xwjqBjo5%Vk649R0;!N2aq*e9UgN3S z#96L$Cmtm3C9cOJOvOiz`Qjm(p%w;n+=bYUIFQ(a*hS}v6;KEcNP*ZmMW6ey0gEvW zqtFMtxUW5N4Y46{KJiy#Nu)*|#6uYEyumZvz&WlvMLa~@N8EtLn1&FJy}~0j#}JN< zCw3(NQ1nHk1w?I z79O~X^IUhDc$m1KxDiV*9itJ-aX&mp3k>Dh1Y!^3U}7s`H)0)PB@{t+q(WSrq0a-@ zgr%5)G3bYmSjWAMi3^EUiM5HPiD{7!3GkJ6-r+fJ;R4s3As!(fAa24^%)l6Y;kO?xmc9>SGnQc{#-cwuVLjJ0Aub|TBi137 zA*MrqBt$rEy@w}mqwho5g5~%P<1hf7v4Lxv5*HJz6U!n!3Lp`_(ar~W;SMgTP2w@)A>tM+ z$8Q*iHynS48)%KX7{T$$#NNbV#J0qq#QH=>6h|(kMFM=~+*6##GC#bvciJWf1J+=>;L1sA;KxCd^c4eDVe$6ble#Nott z#9qV(#40F(+(?In2?C zs9oX-;t}FDti)`L$2*Qc$1Su)eT?Gx6k;FZ2x5C;Z(>8@uPBK;NRLGL#<`31eH7cV z3Ue?4gV7C}xuyki8L<{|Cb0rB6AB?I9AJka+VsJFTvgk|lf*B0g};3wO{E)6kFOqlg`eeTa>T?TJ;1rI8OA zkpw?DewjXxV<+4&50fwyJ+PH)S`k+e>kwxVD-nyp9@!BSA8F4Q4{;gpTz8zfm$;MY zhIyET0FHU%E*fDv`g43Vu@kW`u?evQu^O=q@*@+H!VXvH^8|Kb4d!DqhM_06ac^tl zN@88&Y@#6+MRMdoEQHY3D?C!0IK_1*i2I1Uh-)w(lM%?V7r2MUn1KNtA4BX+>_==$ n>`1ImEQ!S|AX4LP8_NnuHhx(>zvpHwh%0G=U13@|_SUluivFQN}Xb z*7Aon9np!EKb$txK*840vEVe*7g}0Fn<{bW3?fn}t&kK!oTY+d)AM`x9^8H8tuyT= z!;*V5=iPVDJ?DP>?#H|5ytmm_;tPx#nKn`&3{Mn^B1H_b6Y|olLTF878mSZ|=#g2X zLEY4-bcx?vY2OmxpqmE8=cqIosHk32><{{@yk-7s;SW|XDffB>iaEhTH()%k%I_D{ zOU9OCbTxmd?C@}kUU_Gjo%l9gAKxqQ!9Ud^g_;PcU!9Qmhv>ezS1Bh5aXLfFZ-)Hu z6YX+l({*IJ9JH-AM_-%>ctnCd{|uEen$`p6qjPSv9?$UmN*6mmjcEa;d^q~i5e9nJ5eo5mZ?{B zWVU0j*ah4`7hW4#)F*=|JGt-9McWU3^2ubcA-x~=jbDFq-S$K0Jm)s>m>a~vd{4~g z92p0^)nWT#-_O9MUzD;~g?h{*@f(PgMgby;oY#xFlHFJ1+#JrX64;&%XP9(qU5ff+! zNNJld(}>4*K4$H!Uts4?kfrLYMnEH=5zq)Y5ePpU!**;L=ai7?X!g(+C{n<4R-cV+ zC8|20DfFCdSE76|>!VK<#t#^!iz=!tKT~ zwXZ=R;UzWxKXl9knL9YI0dIL}S($&yB7ao{5a2bz_LdVfr~MF|AKl&>oT%zvYXmd` z8Uc-fM&SBHfX~JTH4o4H497baZgn)fac72e68XK#$?s9`usyc45HnR*hbui*jA*$-W*{H- z)vc;Ds4d&FFL2En^1dot!OaKF=C4z)$}dm9M=^Hy-=q43F3$t4kUv1k$_AGbeY4%ujQ7B)`7yv~_svvZ+TaLuS*TyLP2!9x=Cn zJY~ldubwg&{$|(XpX`6pDjG3y!^2O8%n|z`(e+Pr%#jWCwd3ElRyWtJZ~XNitsUcMA6R+pHM6vQ z#hy2-dd%*$|NMLTv{UBJ#YSK37T_YsWzBl<_===Yu2?m{r8mcQ?PySc~8-dvMDB=x+NbTscL zCl6k*LRa1hi09w4ys2VIQ|~!zcY4-0>*ijx?)h-bw8oll%eVc3lHeaMTdz0&^yBHN z-R8uew8q=^pECo26?Kn}x?uU&edB?;)fcVyA1z!~oYZC2cU>%PYC2@5)x2NOko$qP z|Ec6>>o*>?3V(aog$B`Kp569b(Z1Y3CW-UIvHEYgEYvx}sPukS_Hw#-U-reI7jlC0}h2h`>}@6B`@)4<>61S|Y@x#iwcbsCV>++Sh4odo-l z$RA~AB#Q-h%Gy)m{jlf_;!0BJ=beLk*e))-CH2>1s84@|1Q?!3+;AfGWf64j%0JqFJ=Kr{t zb0XttW1cZf%IYzX#B88aZewl~$QE2u-;!{9!hL9MYQ6H=+^GW}TGJw_$uc2%%om+1 z#<-s;j+7}2$aIE^Wh%a|vGN(Q3)1OBODvRlAU_^_XgqH)CsL(p=sKXVV^i7L>>`DE zljw8FjAJ(Jbo3wgJ?#8pL|8kp>0!^qeuvEp`yMtw?0?w!u=`=>0~gr$m=paG=x1wo zu}=FP?M55WUbG4AN4wb%f%c&7;GtjWBifCAal4^|J|PW0_aCxcCUoqvrx1h3leQ9W(+O0gZr0KqH_L=qChXb>8Lo63+8(nX)aBk7xa0e`nkrXLvY$1!pV! zo`}(W1zGZyjeY*p{-M6{>rbwmufWY-kJX+~r#j`_6EcD_1NpJo6DC~+&t-C6&HvwS z!foKCbEB65n`dlEbQ#z|m2;xY#A9x(<_(XhbKbPcSI`3a@i1@t8xNnF?(u?0JkIIC z-e&l3;IBy_!kUAB2m2TNLGa(;9=;>^Z6E_b5B5Ot;17Zf?jZ+0d_C~4)<+A7WF{}I%pHhh78I?o!mC`8GS_= zkVU=FNGhJe~8VQ0+a)kB52F-*_~~ z86I&sr3ZbJ;d_CP1U?kR?59E+K9>Z=haC1b_)%B~`H*8-_H|Rs^*{&Z;GX%=fh>6F2V&@R8{s2^Oc{}%sEB9=Q3;Wk2%iPx6V$mxKBBJ@ z6%)-S!l!|Ai0&sUBnl9jME4Mt65U6nUXRjU{|4;`>R)_y=^6ozfJQ(gpb@y95cn?$ Cd1L?p literal 0 HcmV?d00001 diff --git a/resources/DividedUnitTetra.med b/resources/DividedUnitTetra.med new file mode 100644 index 0000000000000000000000000000000000000000..320bdfa3d8cfdce11628d27454c424a7280055e7 GIT binary patch literal 26804 zcmeHPO>9(E6uwgm)KYXn6oH}-`H?@d)S>*vl#zL*#m-Faj~2wVm_bUyPa?%sg9#)x zfrNz%4eVsmMHgKVLyRUSuyCP?3!-6dHyFc;7-L9W;Q8*kXFBiBydj}8W2e2ZIq&|Q zd+yJ7?!E7x^WOAwd!lV|{mOcUtFBhns;HW$6MTDQuA4)_G*VGhFv2V~Y$8WYy4{a= z=_uuU6d5-85_F{6(mewQ{FL7v@AP|=pXwP%#^VarOiH0y81{DizM@eIwi05fvH3jU zItniSTcZ=#5WmRr8x(*4HwGFi`RppkFHn5j*;=|2)>L%c*BnRr`~9pal&@dS@e34p zS3Uv}Rio#hXIM*UJpf^QZi}SM@hD|LX9j9vYgy#8&Xv=77MYn7iKqnuokvOMxK1wO zKHlaad+$l56PPD2P>u)X(#5M#snG?TK4LxEyCfjE#9MbSxo1H&v99-LZ|Z} zew9c?)IzO0&on0cVXv%TK8j?!!uw%F>vfq~ud>&TnEkQKgju0E7?HDtz5h3;CXGgf zPTviLVlsYEr=x_KInfzDgC_9j$#tZP%E{x#mh1LAi^`ct88=S%#EFkjWR7L}PM;X= zJvRK&QDH#mP_vBF*1g?sq9dtA!^Sz)-v_!J@TsG}1lbC>o+bL3Nah;yX47Tk)qAK* z4=)Ta8GC~o(NV#xFZGU21FyG<1g{Uz9zA*H)UlYCJagKMpF8e(E#CI#JaH~c&(hd;);VVufe0cTqTi=Zz*ZfA^h`Tui90Cpjhd>?z zxo2b8j$f|yl0 zh4Uu#Q%1Y*lG4P$m6bzgMM0b6}`u3(lL3b(e3l; zKTOVuQLiyW$v0x=_-&mE+R@jH{cBix0ZfU>4SCst4774*eIRRAB;?&)_`KooS@QM3 zxXXMIJ=*i-Z!?j=$GfRg%@?J3ew11X<;G*HnWxo)N2&33^;H^y2d#x$oXqAo9?tCj zJ$qy2s%+r>u+@IEN9VLDm^7m*)gX-6zyw6RgIUla(?L_7g zuSUG!^+_=4eQKq3rF66F?@M)PmRvSH(tWv2konJ8D zX!M&<`jh*uN26J|SW?Pdd6&}}6GHFp2ZCiL*mO{%CS-1P-Am<@dC?5p z_EBZbS_8inKB{N3@Rr9%CG!SzVmOg)tY8H8>F}KIcp*qJ8X8?~`oJI>Wkx?JnQTQ*>Gv{p?H2+xGe6Dn;MW7xXW84DdIgujn)Sh%uqR z7!UH12RY;;E&d4^Q|_373kc65O4@M1RMhY9|EO1@5(vUKJQNK;jw{_ zbHa4r$x1sj{hh4z)5hb^D#OF}6^y0}&(&hSf=lfz<9r2$u`73On6DtrUN6<2VAM`_ z1%D4D-}Q|0D?$Z+DfR@|rhJv(2EKhz+A18vh8i(`k-{bDSE|Ig!LKpHid1EOspgI7 z)1EhDDTW37YM3|W#-rBk#npmGRZi+T-f8%2;LCx}2EHBf3n342@f*QUgfj5+AOsI( z;3vYKnCnA=&j`LCtYP_%1@g#;EaW7O5OtzH@i$4Gkk9QG%Avm~hkVox9~|mMo#LBA zzVsV>j7!QPjy91-d9;f-@-Pm>QJ2)Wf#Ue*0?Ucu(_Ko0^@yL9EhAb>xe3KO}*#iA9MElF+E;s|4IFeBJV59cCHl-zkh;xpTw(JN7pl(vMQvrMpapww<0CU$wM3@Z6gjJ5X#{*$XAIr5;YNRB8m}hC3=kr-i|Fq j+lcU`Gk&wQnP|F~^OsB)K_~AJa0oaA90Cr3>OtT?)en*$ literal 0 HcmV?d00001 diff --git a/resources/DividedUnitTetraSimpler.med b/resources/DividedUnitTetraSimpler.med new file mode 100644 index 0000000000000000000000000000000000000000..a826cfbebcacba717c745ca5f508b8af4b954f2f GIT binary patch literal 26060 zcmeHPO>9(E6uwgmlv1=H{szr6{zQ!-L)*wtOkiMM>5z6>JFWZ#0wh%1WDo+y6r%~4 zE|j=2X<~P>m<5Y2*x|xN7MN(#1;)g!T^kpTny_$z=ey^g*Lip5J!$f0!t31I+;`8p zKkwc9opbNI=iE2_av75ixD@B`Fm9hfAjq`$c~=H|Up?Uo4#-%H*h-3__O0gu zcTjTu-!{$6gTBu6ZK6;8%0N#N{nL)pw@jn{AIDfwsNeY_)7OZ0 zp%#IRYSa4XnWC*UAAqo}+tN~Wyqhv0GXu30>$b>enJXvr0GY>!$fyk=nRk=S5z3pn zk9Ro0h4lzwRe@d@dgdeqxg1L z{V~R^hk1T8>AB_B!}|C&GB;{HwDny2ATswT1NuJumXvVXpDSru#>xE5(_9y`(8)ZE zUo*Ij+NfpcnTN@GI41L#w<1ATcs8VKlSm{xruY*A5Be< zO-x@pTbeq5u{?1}>kPy!A1d)DoZH55&~MA<`gc)3z2 zo0K`FcyGK;%PVW_8hQWPZ6Ml#K5i*>7J8VUH7RVO$ydU47GGw*Vp8VPb(Oh+@j$H1?a2lV=_+cH=#>&hhSbrC-%{rEok^dK#9&l-E zUi))Hg~37p^a+1-7zoIm5PQqU%(fqj*(|u#8z*= zwu_WHp!znezK6zXo-t?G8C_c`V|r3uZf9|55EsGwL-oRH=JL$M0$`Y)8)<`_~lJBrqkGUa!dxW#B3I zUkhce2}aKDqVr zSl(|xk-s1OeZQo3d6-{m&##81;C?nAZYNxAdwDL}<7^tQAgm4e>egE@T`~TzR{w{S0I0PI54grTiD$s(>R$|_HbP%^Dt)3;-X}J(|p4)V^Zl)^|ui3n)xbw zbGr?EIN77zQtHf&Gha3-zc7El+w3l0H?fJ49`;1F;Kqz8ed_Pesb zvG==+dl?Iqr(wU_XgrovhKKDd_{#DRiKfT-3T_?dTFzG>Xf-#D@)bnc>q*Ulm&`Ej zHKD>kc~vM+Vom4_;B34Nyi^DtR+oUiH*fqRbt_ON=E41esM7T2C#5$cPg`$xn6(Gx zY0#TSkiVGJClWuB_>I6<=flT@h&)8l z_;(lh7eHGmV!l5~@d=9f?gQ&5{HI$ft~RWkXZ7l)V}4)gJC|np-B#OgSKlGv5O4@M z1RMemfz=~m`!NZ*r9o&=4E8}l2fJ49` M;1F;Kv0T)ErnP~ZIHAD8zn{` zs-{@cM~n5T3IQtw>4RtmG5gSmCVAUd3Vky`k=gc{0vv;!h3TAiBCTGgd z%$YMYcjh~1=FT~D)7M*@npcEZhZThCN>!;MYKb8*x<%n~zn2CoLKQ>EQbUy4OLVK7 z=rCDb?&!=Am2=XTZf@`DYjM+VXCmo#DL38Kmr5iQbaR0#Ex@$9({+`eC1cAeOf^=8 z6Ry_DeUnHG>-^taG2f{3SHqP$R_crfYJC#(S9Jc7Bbaw8bp908(e-}qML`YdeE1&B zAJusYvk?fX3Zs7xu~zE!0QzX_Hn(IQkLUuB89~i#(-z86=1OFqMP~kFLTaf;<`FG( z2!qRUj2TR_aaX#%3G`%@E^$D(I)IKq7+s3#UP^&oNo$(x4A8#WNGY~hcrmXHM^6o~ zdblS49E0}5CVVrQwI%yue*B7A>bD=-dd_3Wl{x5p|9a+N+MMVzvJ8^>lXqfWScM{U zFMg$VvSD!C(1MY_*BiA}wue{d{)Be~&X>Pm4mglpCz5my!sDUD+Pv12f z#Z=;PLq{}bbfP0ZBF6B~YS)$yY{#z~yIrT73~WcQGJLp>TlOF7-aqv0z6L4*S%Y|a zezwf*=QXvZOiRD{&5ZMksRy`J>wit!Mu2dK{(Yf;tYc8Nkb;d@_wEieJwLp7?llT5 z1;7RI>Phc1G~l(C-K|?$JyjGG;Dh;x-Gid*$~xHtRWWN zvQZ6;|6HeP)>No^z49ZUoj@AWYi}Q$ID7hl;n!oY5(@%?fFK|U_#u#cHU@iW?C$(E zgU-e;K|>wjeJf{U14T(AKYQpcc**K8NGVcE;n{eJkyn)L8a}t!Z9udS#;_&Vd2<8O z{gi@DG;qJS&iOS+k5P(LdBwaoV1A(6@80DdKi~o7L4Gt@8(z#xO~JVnbwGj2|#+LB4PZ;#vA4hZm?V7tr4%(f%aJ;8R@ z;zU(uEeHq#f`A|(2+U6e_-yRx;lVq9)VfpQQ3u(Lr`KXFIA4q1$cU9L^|KrOY`2gK z8(CwdRF$@VK=z@E3UfqWqHk0ov(kwhyGW@<6V6xw0@DkupDiDiIH4@4|h|A(if#r zKVS@n48_JHS!Q??5?kr3Gzk2%uAzFo{$~982Y-B%da5#0`)cOP_uN0@8%?;q@#fU9 zu5vAYKKjo3RVU6*e12;6qu;Ns&78gW*l#<}UX6E{%%zEpp?`=^J`XjjGCiMOe{K4^ z_v6zypo72SKaJFV*z#kz5ocAGc@fAFm8p99#Ev%xZ%jNBJNilED>o-#i-0?5c-6oj z0G~JUexb9zfxrDox4WiJD$z!78*H+p-MBRr`zf@)!!T-8uSxk{Gu@8m;A(|CXxg&W zu;UFccu8l(qD`kdvit8zw;7h*|3Rz$ztP;kq=JATAP5Kog1}-#!1iBE;pb*qD7sv{ z+zj$xBsXDouu{Z-k%g{g89)DpeLu|uvSog8D|5VQkjzK7U|o^97qe1PelkBw-zfA_ z%HzrP(bUs=<+Hin27ELpcVSDebG;ksDM~@^tc4}*N^N3cm@a+>= zS^NnWXeA>3{0a6q0z8~z+d)&LLwpXMtB(=Rx)_S@Pl)W{XXQ`9pD;<^V&ty@f0v`f zZa;4MmKREISe~{&;Ueh`@C)Hj;OESD-!zZY#^X|%;ZeSty?93sx|jPsNu33S*!>nD SLJ9(cfFK|U2m%WjfqwvU!i&)W literal 0 HcmV?d00001 diff --git a/resources/GenTetra2.med b/resources/GenTetra2.med new file mode 100644 index 0000000000000000000000000000000000000000..32e3bb51e35130b526754f28848d213c9b118e95 GIT binary patch literal 25836 zcmeHPeQXp(6rZD|w52A;hgGqEtPu?;Hf=o%G)jBWJ$mLoN?Wi+Oa)u%AABTQVlfyo z)&!yf z^V^}qq;UuYB+h`4MVuLOK0sNwZDI?y@t~{#opokT9$ALAP>(tnM&}ZXn=J)Ii9_c> zsdGphrejajNXE|XiDWa_$?dYn0_DmCdWJMuf^-d0U>DPzW|=;+ktWSnu;s)#^W3oH zkN~R%71_t;wH`L(lS$1XvmR#0FQ2t;>!EJv+=on^gT43M#~h@QaC^pjnEe^}dKlyRi(8AauHbqYP(r$S+UAl#8-rNf-TGp##PH}_w$0116)ewU$gWPAkC7;*YaQ+gSv^7bi6vcTGjB} z@Zzyohyj&lyjCVwsWRZTLLQ9Q)6cHmuz6!|$ck^?WQDh^x2$@rwx*%BzM-zxs;jA4 zXw@yOU0nZ==o>yeOH`Z;h$=brg9mqG8R*5gUP*nrZ@uDIshgOSLBJqj5HJY1B9OT^ z27hSij_jgd{?ImPC=)zy&E8l+QP7i{KlCO%WM$ew6iY(j-ndBVtA_jBK7iCLjbuj)ddUSj_HTZMP)>0iF}=cj=hv zeneuWx8F56Q8lAB2p9wm0tNwtz|DyO?~N_lJ$U91YVTCI*Is_(zPTs``)lJjGGc|J zZhoVi?-mf{O4oQ2Vyg6hK=*+uQ`8o@NWM{p%nYkh`9(rBiO_TrxZ0<*KEn{% zc$yfQe)Jylf4PlL!Ie05uU_jc!#w!@9%tP|DDk^Hce}ywS;p%D5Rvvp_QHD9?IlEd z0Y1E&%9Xtcf%XA?$Yshm9xyCX16$`jKKgg#hbtO(pE@@?J^FfJebsxHQxCs*X5Q)T zCsJR0TldkHlCt!>?;rW<{L3ZjIh)tbt=x4k_0(coe;sW+``nHt_djqkb!^tABQ=*VrC^DGPXORi18V?$-o)#L%(^E2_9KzB zl}tPwrKb%xS(a2`4Z(2=8ShYx3el-3zt>D8u^vLLwep}POAJ`fwAER(p4ZCiqVl9) ze`O-7So-xhYVEI8=Ia}35HJWB1PlTOfysz~9={mD&&|f5Xt(j@rdRwTR*&M~rI`4| z7<2{8xWzB@_tV@VUFV~Vnd78hIv-qwvPS1l%nDU<)A@G#Mxm1^_a`$(Q$_QY_vU&V zh|%m>g)Ld;pAIaK5Cy%rM4U3i%~&2JiY0%Ya}|>Sd?4vDnw|!%2YxPMG!PfW)XJHh zQnnkgiT-8YmxjLdFZ7myarZ3smhmGO<_U}+{BH2SLAT&{ga18E9@m@X&zF>_?q^?b z`~NEk{f0in0eywOLmkHuc`U%otmkpcYLg5C1_6VBLBJqj5bzlSx!UjYK2+cD4wG*Q zd_U}W^Nq(*pW&g$3jWmMLu~F|v4S1TQ403{CO*W76^^>a3f%nlT+IRb!$g(zHDQo! z2>7{J6W|({^6vwu@%1C$a2$noZh+z<Sgi-#7q}rvu;t9teLtzt7&_F9t(Jh{!e(v&>Q{T1MG7#)-lZ*$wMck0<;_b_4ue z#1r^FH~a5pxSu*6zxxai|62Cs?K#+9X53)528HqaH9$QgV?k^D-+(n{DctxSiP;^6u!F7pw%y|`~i>gGQB37$> zgY9pYB$Qh~^r>=ALXluow6orh*e(9B9Tj#Y+S%y$3+!f$nes4gZLw_uqhxG3#i9D9 z(SY5MJXCOx%xs5v{kFN1Zijf=+Pft^3PAmR+AHa<*+QH;O65a9KYLm60R47wj^yz| zQRi-=n}J?%gy@|>e|(Z?Q6R+CbJS-o(7*pm^k(Sc*ma_Zp!~U6rLs?aw&8uVB;5u2 zuwg!x4*?xrMD!TYHA{)MiiD_soanvKYDqoOZ$kMa%|wp^J->r!0eIZImgriAjl5Zg zA{PLF`e&*BOJP1>Ue|3^Em+498I)POniE`)ZLyqWE{x0pi|mtmae~afAae}8Gi4%1 zC>f8Vk){CZ$vh~rP`D6K&KUL-Q@UEESY4#_mU()Vj?`+pLM`(r=DA_PkPxfIp5$Xo zwH^lO%_QfLSq}lfELjhAJ!i?r%N+H+Y!TN`8uUlw*2CoQz%F{#$ysP*4!GTRnNLr; z9)@}T!uvly(0b^Sa-E~*E3b9i)%w`1()e~$jpn_CzW#eexrD+6yKfJm*yw*k=DmO! zY=t0LGGgZ5y|J&j$b!=yB0f~vYmRBNgj;qyPxmgvZlASyKl=H%;R&RczJxf z%=P;PLXFr=-26HJkbH=X8~y@dBVd5-s`wTz#skapDd>2$b~MZ3IpM`)_XymLBQAtj zTV$2YBVJF+a+wwDUtH7s!lrJY)!MVw@^9T>S(R3WZ$*`FMMafWSzcakEw5TyQSK8P z2lsh|=cEu1oY>VfSbP3}`_Gzd!(Bfea9^)`+1<7KVr>6Ae+^%+qi#)X5HJWB1PlTW z2*mG=(Hd*~Hx2m#->W^W82-1fb3a{o|2zGNZvs+4lz1CzdjLMg9uW!Lb& zMQ?+m{jG{xGEcaY@?|PT&qU9HM4s;+r2K$NDVJL@&kYh}BO`Wznw;(_I<{Agxw z2yth9r$us4ZYim;AMLnW=V%wJDj|ut8^dZ{W4#|z^53iJ!9?q+M+NWvleA;el z!qM}bV7p7lOt&K_9sfVZ;u|dEfU=T0}7zDBx0p1&1@QC=%4`^p9{N$f&#fkE`$?RBucFsX%`7mD||DB?}l>nyVM!B zOL7Z1SD@fl?Nm2n;87~adbF=*tKP7ged@E|q&;L^i)`TJi>C7mykEKG<1>vGn|A+B z?IbfqY%{D=IzFN*;_c{z%KqgxdWxpR*u#2hVjkzg=)%OZe8{;3o!vR>4SmltSq_K@ ztS?GmSdO~B6e+o2JznY37a>SL@EOt>GL6Ta9K$1>*qXj>4+5pfo+)1P>F2Rm_n-M^ z{YO_~AAA)W@ou~j+i~RVzdb*kuQ_{o@wDnUuExB@_K%lO{~X(N;*(2}tDD{$sY-K9&hYs0Vge?4{~c5dc3b1%O8Va@azo4-H5@Kg;>c|23bR|n7gWFzBm zHzG~8tZwv&)Z2o%S2jtvgkV1d_$y?EN3_e7&m|*GRF1CF3b@pgBeq+KVMk)tdP&R6 zP9f6!Z;OOvmEQj*t^LjN+`mZ;0tNwtfI+|@FclHd{SW>0x!5=qQJW4YQ~4h}j}bds z8OHwrAY_UEK|fz+q3SX}yqs$i(^N7KEF)ec^X&R9ORbOOGEb!}(Rd%rFH5PE_vU&V z^s%(BO62*nmGb>6MY+3!i9Edl$_G?Rx!j7=j&m$7X^-z#w1{FbEg~3<9~2K)Uw3yw}ms6nm9ziS@XzxYaX7zE8HFDe`mb zcnQXPBw5r=}@|CU|d%yWZnkeNym?T z!$~RHz{`S@iW3TO9&Bz%D9VdWy7Y$Ssjr=VN^jUcz14RK-zVGOCH$N^9(!^OkGxRw zDK-e*Kg52Zc;67uM@*-~RRY%xxNt3W3KQo_^rN^|+n2z9r~P71&uz*L0tNwtfI+|@ zU=aBK2uLT34z32?w7dX5&xDE`NgljR1=@({^FM(jnf5-l|=Qds_;b_a}^Y z@V%0!xQE`WzQst+fxoK}VuU`!c4EoN@^tA9%Tu?X2bA7mJ?c)j>rJNdIGSU4in-9HY6Aq)Zr0fT@+z#x$K2>b`dJ#jSv literal 0 HcmV?d00001 diff --git a/resources/HalfstripOnly.med b/resources/HalfstripOnly.med new file mode 100644 index 0000000000000000000000000000000000000000..11dee64348bb76a0134c2edda676f2702b93b831 GIT binary patch literal 25772 zcmeHP&2L*p5ML*08&V^=P*6cVJcI#(IJk-1Cb=YT{oEKic4{YSiXw%gQi9|{MN=t7 z95_Ux6$dUk1jK1`0 ztR?#_GY9dv^91%3fV1^8-7iD>-?w{gnQw@5y&0hiEQSshbZQuYn)DL}hUls?3A5nd_E$ew<=jtF#YqWG~^)|8bMDXvARm9S0OAGw)eG0hrMa(BphdVCA8(qh7vf zGs5c0a^u!3`-RAQyp-`fl&^G~joKCRw_RFpM}SNiA5+fYEw(bg!; z9$Ljk7DAONamLWzxYx=n#deMCTiiM*+Q&mwQ_2(uiBB>`n`rU%V3|K(BYvGJah(;* z*uegwZvXglu>FV!rbqeFbZi7MYd44cPLz|!FBU)AA3V>|E_S$-{=QXU4;TM+w zEk_S1cYnsY%w)b$@E6|q%OylW#)Q~iE@rMBQJjgkyY?rl+G|BX5l{pa0YzYMA|QKX z51NN-e%3itkyc09jdzk%3isD)Hwt3yqhWSqnC%ub16I})8FK)vAC!Hpr^jxQd*MVC zZ;p6l)-Ez;+@uei^cFk$V|<1kv!xB3D^Td?di)r({x+p!KH66=@YAr`*f+oCcI+X~ zlx>ADUo@Rp;QLaxoSbQtecJtp)w6{m5}V;N>-Z*T)Z5W(Z2w9fm!c{0@J%;0$m2M8 z@J2AL8#p<;3)dU^o@J*T5E0NX))$eZt1o>l7tF`2S^8oO=?Au`e`w;=x|FBA*i`Ag0_;ffK<$s8uqT*;}sQ;k`UE6NL z{15K=vNXt*`41zK6Qofx&kj>rm3a`ec5z`cA5T*Yjxm+?)caWeI!URrH+Soxk7aH) zSmxd|@vBTx?)|x7nQE5!EK}k-D{hugAX3_+br!yKLK9YcI=1r z0rMpdZZ#+Zihv@Z2q*%IK!+pHto^R+L)|mQD%+N_Kich?qP%CXpD9W^T|8Dg3=h{= z@SEcw67`Sr6-=C?QtB%ZwAxF zi1>xj4Mj^blOn!8HIE&V^xN`&@~B6gTas1LdE!Uo5QMnyv^a0*Zhlpa>`e z{~rO@pRghSP}F%;ls{qZ0u@$&f&*Hc*f4*B`;CA!r`2}hD(jH!Lw9Qz;{KLG>-`B2 zF3P(aQ`|$};%_k;m%!iE7_&*AVTTpz$n<9EjmXorpVwJ$Fdubiuj@^#@wn4rcyy#v k@AF3EIII7*g#Qh14HdQfYf$KoBA^H;0*Zhl(DexX2kGu7djJ3c literal 0 HcmV?d00001 diff --git a/resources/HalfstripOnly2.med b/resources/HalfstripOnly2.med new file mode 100644 index 0000000000000000000000000000000000000000..67bca23777470e7cfd3f512624498be9d078e1cc GIT binary patch literal 25772 zcmeHPPi$0W5dXFmsAbiqRzlGR zVlXkrm>$shheJ(>_JD~8Oo(zYBqYZ4!b7xr!EYxd2| z_ix|#oB96CytjRSD4n^#eSN#Z)YfWRO~S0O0$px%nmbe}qm?Ch=aYF&(FgKL@Sa-zEMd$Zz?8_<7*}o*~`<9)EpK{2*gvzpPG|7yy9w z=W+jQVLqVj+BO#(vGD|Cpw1?EPDnYn#eCAaQJn)8ji>Wf6*^CV&M_6&SYoacDP6~M z`83+eI>_-*xsXuLn9i;ux}PZ)x3b+zox^M+gO06G%jUVt-0)yXh}ARk>SGIA57V?W z**T=^A>h{_>!EAsqSiG0quN})DEuQM3y${tZ<8yWHeqVZc z#P)UP`~1V!LtNV6FAY8dCOEE|>+lggFt3@Ci`V$RQ9HaKykzWggQs!Ch49*)+hb+K zYnRQpqM15=u=vK2SCZcN;S*l!#H*gy<8A8g-`v;V)93YccW?2w^(B+N+sxj%c*4ZL ziJ1*B^AlITA{pt;muG&we0h&8-$1?URS{4G6ahsb2!ZmmG5SNZk5#W3<`2D%4I$xv zt7l^X;jW*JtH+0CaFc~pWJ;1TbT(dL_0`ROjhtKDHmKU0z0^|bWZ`^;sf#K4M2DZS zS*7#O&ySNl%ao*OU1e_I_|Ud5Q0TKX2*d;P!{TT$7NC@!`ZAq`J!LQPV$=e*{kMT3?7`K;4H*MUGyD_s*K_`%~9Q~^f z?hTv0e(l@Lk~8G?0+c{B7%EqLHbHsVU$?C%&ol}?o&JN`*~-{2-}fQ5@vB@=?nj?z z|5w_$6kUmhjc#tGjPqdr$;!NDkYa|O-I4L2nY(sF@3Sn`10n+U#r7h4bnT^;^@8R2 zsg=DLL-v99P|H+rJUSzWM=i0{zU~eJpMHYr&jFzqto(gR?zQO#%eMxSEK7b6rT%UW z;FQN_%D8pNystJgd3Ph1_igb=YM6Hms=Xs`!xBc~3=prd1#vTBsXUj=tZD+ z#(a7>85Vzt_fd89GBo}GAT&t)!F|3gN_BPq*)}Puq+vQwZ>74b^Tzru%V3i9vW}@} zv^>W0+h)p@v$@*_V=NPUD(hSsCwYb`>b)&fS*MsLIn9)$XkBX+QwWO5b@dY-SYC@5 z%i{aH^@*{F-Jne*cZBN_V~oiy;dRh%y0(YciDD`G*r-of`mL{xb8AsIAOs}%xsfe# z{px_~HP6QT#QT;Qfs6y=1?6SPW1c2OKoL*`6ahs*5r{qlwL0(0S;u{*SY+Q4%cF4K ztv4QLBZh|?EBMZd56KJ&ixmv*q*@v)XslR4kiTB5IS}Lnr`d*T=XA@;y;~Kjm!1@MsDnUuJ{A zm=DH;F#aP{cr3#?Boz7k;IkS&xDK|%CnzmGSE3)qjr+-p_f76~TrB9mHD3`>1QY>9 zKoL*`{yPG0JYi1$p{UbUSUlnUUaG9|gvN>|xbFyvO6%w(9zdja!4<$VGKWF@zUYg+kz2dJWbaY#Ok9hyul#XC+PW(BF=S$#`Ex+gg z3GAG0Tqu6F*3Q;sb4P0mowYN?&&k}JZs}`!|E%=iFmxqbThrmA{1cad!lgsiUiRyN zS#S^)$MADvDH0L&*LWcwCm#4UehEj^q4U4S$MN`h?eF<`os(ymVv&w61iKafYdi^$ z59vj)aN|%Oo5AD$L(5qV=~U?y@$`~ELsDue%hI+4OhP|`WoiD>e*O%1?*8?KOB25b zExIk6mqrQI23I8vB3KSx4`=bm;h*Iz@o|NWhO^Yh$y67zN3cAZ4rjqxjc#?cEL~En z5rYN}54~Vxz+X&Hk3S6XhU%xnS@Q7T{cA7K)t2F0c%9wbFVEY-+p*0T$ z>)~JX`Ja^i$24Ec@8fcRmi!8B%qjnHJ^bhQK!3hp>n@#zf2a9>Mw&1Gr`JQrKj-hS zV#dED?62z~{m*n=_4|DNbFGW}eSIYU4*zAN|A8Za$`XIQ{56mvFK!O z|8trf#~IVlF^S*He+Kx@^K1Aw+u1q)T=4I0_vd!=*X_vGj{o6yzi(x~|0*4w)5e%} zL4@0v$Avg`Cb3#{T%Hf~s_E#bB< zNP@C1NeJ7 zYdigZE?@ogkNo}dcMSX;1AoWB-!bs-#=t+8jhE^}B|`sv)&HnIw0&tqJS^RB|1KN< zXOxZqdHYcI(oGf*5x>JwjL1{(Z!M$H3n)@OKRS9Rq*Iz<=Es_)|8Pz`M9~&o7Dj=ll8} z)f>;r{Mu%zeEoO5@xM;Ju}$(vm-8U#0(lZ@XW6fm@k>?|0MhJdS=Z_{Vy5 z-tYSFpKbmcE>$IZ*Zg(vzn)$?51z>W>v{hjrs(N@_1*o-JpU};{Cb|{-=@P-jNttB zJHPx%$G_&6ptZkZX=(XV-_-xD{DPlq`t`;Cmp}jijt`~{|8K>|pXmM55AW>q>p3VV zbGzSX{+~7e4*#pfK;QV}T~2>x^ssGdP=nFAX33lmb?6&=wslvGEfhb!bYZ@B6%?-6 zcbrya4*l&qGaH00K(7CA#>*t`2>H@R6B5~7;=E++_pZ_gvu@pU6+rM280Iz`;* z#F_cdYDSj9TxwO*04HqrRg}B!pwbsTP+gu*#`vW~~eeIOU}=Wl4aKMyF|7^BO4P6a2#ULm8&#%AeaG)c}FdtrZ!U zwZX7&B;`q~9(X@z3*;R%gq_Vw#xy!pc=}p4-7y2;)~0g-cZ;1tQCw;8gQO$mwN-?z zD6|9Sc=gYUVz#hFt}5E`q!nCd$Xy<%W(ke6rCW~OvV!)ht9>`@Y+!J#+4)U}Eqoh% zadUpH9kBGimlvO~2jN%KIk3YKl%6eP3}kZ#lUBZ{J$Wt=uyV(_>*qDW`&P~{`$iq0 zt*=n_*U|?PPjh6|H$!l^{pJ$$EfbK|j{mL<0HennEka(dLEo&h>l&OT5o34D7Tp5^ zNIR&b^Vu{DVqAM4?jGx?k8vm=1h_DQZ(}!qc_#th$R+%~hsFdkQ_;;CuX_0ltuJx9%XSGzR@1!KO&;@!7tdR%K_oq3+yj0=* zz=-Yb!UdQ6D~sO)d?Hl6Ib6;K z3>#i`w41Jmd&e(`k5goTedp-&t*MGororEo=&KHG(z^L5P76Ns(c&17=)y{t7_v=; z0ciioQQm*g7=n@IwX%Wn`r%7&ryKm}V7@SN?P%UAXwv@nel|%0ScUYC@4C7MT;^k( zmk%q$gD9_EHU~9eEHJ@qf0;ILQ%~r$HR{3n>^w;ZKq>>B4bYcTs< zw!C-w;0oAtYL7dYw-79^+U1|oF9ps=8&>fOD8S(a`j$KgH5j>AXv&2&!S`rJ+>Sg& zcy>FOe|NAtOkdnL-jt*T0!3#YEI*+OhS_&Z?5`St&%VJO8IO%2DUR9W$wxCF=^Qz> zVz)W0RV`*#;Ob3LhyFTnn)(2zT(+y4eM&N7EaANs&Q)tK; zH!S@K@GO(=+}#)pc$dP>H^gWK(@7G$S7cekH$G#<>l8f@X7Og4+h7PLKN7l4Q%ykq ztJ51G0hoXGu?y@r2g=ZfYkR^iA*e4=x^%`0wsvJ4y1Ul~Ql6ih|G+~4k(;`bF>c0S zT0xtw-DC!rTy74}TbsijLaUqja|?KG{<;bLt>C@kh(_R;HGI{Y&EFGm3(Hm5*h`n& zfzTR}ZFjTnLE@6&fnyZ_8fEg|Zx~vDMUj5+n|qevdV=tL-qjk`WxIoTzYWA@gjB#r z0;KMX6AqlSgX23>Q}*n4fbzJ!sS|9ZsJtds85U^aW0QaS2xk`2qMY`*;mBXdUHuKPUuf`<3BC^k2;yh7&BrMLCe> zohv%UYFZ#_7?s%OrVKAW#~&mUr9jw$&GK>dab8G0#EnP-&PPiGtb5 zljx+m6I9rVTi4awgUGkjEj=a#*k2;!On(cfGwJLzDML$WxO+%Ey2k<3w(yo@``E!L z$z1tsuWf+sv0%kZ4=dQV{e;H%m*!wKk-Wn$(H^ekFJ}y&wFL>sw3GwetReF|JA36f z3z$1|@X+I}0QW?;y%iU+1D%%H0P|xuP#m*kFi^k>R?5(aC+C>MLecUb2RT!)v)N$$ zwbmADzI;Xkde*S!^k!>=s}_*$Jl*1{1;9A-`Q%KX5wt}GoM@xjfT7rc_@{PDC~FNa z;i8y>%H%UrV37%|aO(6d&DV!y#{AuLO**i%@@kXIetp>H@sPRL$q2SEy(fHKX$m!N zJDO#}0kl=pf}J0l!`bQE-ex`eaJukC{^4RHIO#Cf`+mJCI3(=OTAd4UpO~p>Gh+@m zYmPtP=xGTLKD^c4{oM$f4o}CuxNQnO`TKlUH30Z*IlN#ZZ2?_&`339uT7sMSdC2-; z1*5O#xtn>-Ao@<40{bYy#L?UbI?fglxuGcB?~)~qSX8p_=dp%R!5bAAOKcmw=b;uQZsI}s=CLC%S*y>%S18E!QYA(j=!(QQ4 zKNyUSV6JHPyjh+MOw1^%4N54&ptEPyfig9SeAaYA=$IyiB))kY;iChA@>AkSQXe7` zW5=TJt%mYdBk}&D(oh&CyvV#&0dkSuo{D)@uui|lsA#DPpxbVpxLgPNs=pxZxPSe+JgS8#@2lEJ?k>&Xt1` z6s>erz6l5e>r9JqQMME;IDOA2h|0rMvAp!>QWXfP<;wH^K!>g-UhAJ89WXQh7PkMT z4;vhiTlG*M3Brxg>%(`3B;jKHP-N&PIcO|NslNVZ6}mh@a(|V}h`2Po8khODo6X#o zZ}F^TfdSSp5%D1c5c2xmjd#Y9aQ51s%>E(?w6@{gsyJY-crrsr7`wCT ztCI&=x)+v#^{1!ox48Ksz_&QvtU(?25F6L+eys&(EZ&(lPwT>cZAM`hZbKMdBRyrW zXaXq*BtO_l0p#t~a1FU=4&^({8}=Jp!p&HcYI%zlc(1tm@bF$ONSy6D`=US>3derz z61-smcY1`qz1of8{Zm8T{7+`U7^spOm23`tUMAC7D=k6J)hVzl-wO1dd>*$e*#NEj z8P}klF2tRg{3st}0LSxqn!GHik5H%v0%G=R+{R^{#T#*k~N$Xst>25*FA$CFgeVT@PDr|6~y zEV5faXe3wxH#1f5{R?Z5UJNQ>UTX_lGh0^5j}yRwl_C4UGb4zYlZ<>kY6^sJ5;FT4 z&B0?_J?s1-3kd%R3$3D7u)UucK6c(3GG6<4a_ZYcVSALddNTnoJxsbnBHM$_RiTca zTvMoizS=#j6QH?Gx%tXE3wYUVtl2(k2}5;`1=BmN;YW=(y8wqRuw9PWrhl9OqUW~{ zddS&>YViR|LbU^I+e}hemLLc9w5+#j7gb>QsUZT(qz056WRZ4Y*Ma-HVLAL!$n=QUx-A}40XH)?XTJI&nlNzv{K$q@yO&jtJ zCD!FX(SrtMFzXvOgaI*sA(>^Su!jY0t2zjfP5)%DeYFLg`O!o9T4o8FT%CdnHX7hc zKQkHbuMII@1Rhq#>%sB&dS@rH4dK>Hw_Vy7O<;WBX2s1Q0Q&A$;kOg!!1MBT%EAsy zka;pPCC+XQ$~QiHZfDg7=L=jq^)h;}IbV&7!ORe{54n7_@-P9fhMK-c1Aq-T?m54| zYYscAUfyJOwS+?#KFG`WS;6VjZ#j06Hn3*h@>7o==>ow`E|%w`0Yuo@lBkR(aPL$V zUmGXDtK7!jV<*gE_~7%;oC=mOoBGaM{hAeU?Ecb1va|uQ1V+8+C$=ETE#4z_!~m=q z%&xDiz~##+-y`SlnSph5{k7>RbD%{$mKIpBfRJ@PhxF6&{nYpKyoaC-q`57IC6wC2 zNg`M0NliNlB-Y08e-MUP_LHBv&q+hw`0L`_Hwv(8m_s>}MIB1ts4zd3(t>-BoO*dp zbYU26xIuF>0FJ7R>|>jZL9MhR@=dxK$n8k4*O-%n@W9sS8g&J@V#6kHvQ7;Wa%3a> z+B6|Q#hM`VK?fSPt&>_$ZvgMZ_s*4x7{hYk(vDFbGvGTH*eM`y4kGP0EIX6s!N2y$ zH=paOKyeoBn26DY7%;h{ouvbLnm%c<<@#_-CNbznvk`RjpE8~AF@>aLUv`OkfWwKA z+sxA}fJbro-VP2`P+HDs${?u;F5k5Ut&MabYQ%Lig{lun-fn4ou+a!=9%rB7NHc{| ze@~Eq2C(1}R$=F70bG=L;qXyQU_|U^cv>|;=8AF>^}ROOl{k9lE$Ts7ZiM!0AtT60 zJ-{iZZ3^xtv@Pmq0YY_y`$*arkf0Xh9Nl0E2jutN%y!22(J+~zbqBRU!pG{)xiUSl zbO|~+(r5_5=E*Ut-6qf^L_Vj#9iW3Jkk5041$uGDkidL)vx{bkN@}|mAq(J#dGthu$k?6)_ z6m1ydICARE20dV@s@h>P93s#dV; zvcc}64Xj9?xrp~>ZA*PsRI_-x?sDi)3Y;M_2||s`CiO~PDVSrqJfIaW50+!&dDjb6 zp(!NqjChzPy!Wh43QX34bHNkMlFEJc8};`1ai%bVWsrH^;S2n*F4v##ahfEQ#}`(+ z*2}@TZ*NG~7Zo@ud5BYel_u2jUK-l0ssp{tp4_=g&NFs??JRLmr#95q-BY|fsRvz`J}|A|Famm| z#g}~Zyl{7$Ss}NU1aSE7bj@AA28b5*EUQl{gTs-X-NXIHdt|pZWX0AW?l`Fj zRiQl*LN$i)!fVmS`>6?h=+aimPmO3yGnoPs35vq?sJ4COd3_z?tJLs;0>Il9MhxgyLkOL6tym8k7}uYRU$u+^ryk0fuqshNU;FgpB9Q`eqEjhE9};{w z%{f6!afV_`3psXIdvGRvzS1CJ12c{%O=HK*!MgZxw#ODDIDTQYyoo~#y2CT}`8msi z_>W~TI+z$>GA4efr=b)Qvp5=lfmIS65e~UHcU2sXTsoanNE1aCwy*uHz6qn$t1G@| zY$L+WjVSSHy(6sKvSCHa0RnJu6V5Tl^{pF;_ar7|0a}=x|3P=#l7-^xRHH?$VD&UB{Q-G$$A5$ z5$#B|h9|h+R`%c$bcaeaM)9x(Dlp!8HPAFb0e7oQ=fzt|;FZDAHx=jt%MRa&IIrda zmvYz7_|4gX=ZRIxd@U9*Wz;$rsBH+o9k;$2v>C$vs7m^kK}Mj-R>(Fqi|bFNS$y?J zjA8BbaHdHo52&SDJaE)@1I|vtLuZAlu-5Z4m4TiDwQrh5d_E9Cq)xwa;|V8dzVqH$ zHpC91?j7#x(zXWSHnZSE^yWbRR5`M`g94k&6ib+esE`$O#(Uh02A9O8_`34l;7Ql1 zhmC#i@Ht0lUxkb(Oi+)-_gr^{l`HwQTIQ&r-gao@ikTa@q_CHq-0cnWoxW#i zGg&VN4abK1S)UKq-`>rzV_P#3WLdAIoO5%6$DDq=zA| zhUIkH?l*>@&NF*@Y^x@)>7CzvSxg4bI!&^g9Qa`t(*Ej69h)cIrd`Svnsfu6SNpuT zbW&lxmygurIttu=t2LxhN&*?Q%6o-?3p{FS_)z!G9=5eaMsLCQap^}QiwcPr@Nk0< zXUlsJkRI1+mu_=|xXrA`cU+;u!>L}sS9uhWimj2=-bsSblOiugTb<$9xBG$5kJ^LP z+}2O2K{oJ-PCDLH!veIH_c=VT_kh&>H7ixmxWQYt%$KPLsGyk{bn4w^3Z!xytu^%^ z!3|kyvgS@_2vU6V_Je~xaH%DRp2FqA>spG_e`P-T!EeY zZ19{Z70$*;D@+Da!TfE($cHp4ys=ze`SubOV$SD9J@24`a;ud1;4T{6)hOtDeU=6R zc_JlGAJSlU;b`!^FEq$qw@Tr*kQ+Gc%YXT}$qfcag^xE6yFn7=sP|27cM!S3D}Gtm z9j;0o>O14@4q@iPhpsR8fMw@$$rn{Tpn#K?uax2ehFcH5Jig5X+F!FBKUClWX>&P* zVtY@JSLG#^Oqt)v0=;#xZMn?_M*SuqUd_V(= z+p0*Lwrj$V(Yb?fq}ISA$=>2LvK$oA8f)}-%R@~1nbyI}3gCeD_hk1e!lImMo~VEz zoXuiF+hT;lW%j{`7cW-BOi&h=rj9reKa`8uK9B&NOT5Y4R-eso?U3$^6aQ{jc~}f> zai)jGb>&odpY(N3GOM;hXpK>VQ_G}l!JvUFc-~qY44?itk0GsV)toaB{=TD!sZS? z-q(C8v37ygl`qw9y(fZkC}pTro(x_xj|&fmQy{*_-eX&(D;z(5cjej%DwGTd#e4EN zz&n5M5tTeA$Qn_oq~jvOr8A|$!z2XWY}GoI!Tr+kk2U5v&TLPYKrH`}dc+tmbkH&=0%Da7@D*VtOm0 zBWi5nDy?+D%)}n1+D=k=8y!K*GW+fz!3At5UK)NKH-`SmwVBQf0Fv)L7bTM|!QYD< zHNCU z_!_YT_iv>-etJv7?a_V91)t~H%0uLMyhF5_28_N5RS`DQg|HTPt_B+;_|jE8qon|# zNjed_{GtUYhp8{W^^pVieJN|cl_d$g+EVtjmMKBqvny+qdo{tR{xr+;5k0tGerR~x zH)CMFXgbNPAqqRx=Z&9Ul!sf%8*Ep`m9yB zeFd%u4rj`wB-Uv|NCI!D%mV`$T+iUp`oa`6&d2dzA2)|6iKvtVF;*aW>~%)CkUr$> zeXx-Sm(NyQe{;@T&>RSA?LzALmXK1~n#L$&19wFlXFJLW(031v=x{%lgsJ|8=gk)2 zSFI*`*1;O`73DJHpW>=Uq1d968*Yad;E$4g>j-*{M=sjq_LvxhbP2KVHc)+Wch56xu-$H26)Z{DGSMCM_JG;6bd~yfN zm-JgU^pilX>QeNfEEjlHj25OSkq?}Z9%o6hW|2dN57ojV)YRBw=m~Od+ST- zz%}F9L7P|wK>5B(hOR3i;Bca{n=A#ct#X~b%tiu!Rce&bfD<51hPg+V>>xt#VoGwN zHSimI^BUUY_OD$$radZ#@G>oeYJX1yJmfQ8g*8aQE~oDU5aJ3iA3r{S!kG+8WsL*uD)@cy6O`RR@vMW+1Tz1A@@)9wcH>>z5SN6GN*~4a6G4Lfzt_| z^ZJ`f=CaSiuUM&05$UMI$sWEQ3d0T6Mhm; zad~>;o@_!51qP$@M_!yFLComKzyKB(keJB*#Q(w`TBj}J+fLiSmLKEGc1K!(>4K5; z#u#Hbygb*ii%T0k6*ma7u5yEm7c9Gjez*dYns7;dFBv>9kB181BZBYxOuu1ICt!&% z8Yz&ogQ}>Z7%G0gz-Ma?0~_u);haidy-UOZw!I&E@`dIOkJs1dI-AfyW;n;YOxhK; zMMOw3v6A8B+r3Y2_u}hyzovI|g#%0u_I!}X{e&k+4n%j`;C7wf=mk04pA=W}W5!*T z3=h{lt*cr^1Piju3em3)@QF5;{p^k{Q#4a{~{*EIUK6hmNO(SzdfLkksKM zy0YIKsMn@R89qi(Uhfh*^Hme*4pZpwNyx%A;mJ_tYFr*Vbzd#zI2ruK3#R*$h!AEu z60_}*Bk&RO7}QP>06AJ`4y?!NMVb^ku8rGWFPJ(MFzCZ^d|KRBA0K7w&8{aoXt~FvEej< z+0!Xjm(Oa$)S-`Eiw16B&x;B@C8%&JCpY*iGX=f~Y*6MNBf??rlz|s#oWR&XJ1RZe z4nCOl%>v;5B#X?Huk7X!ml?Chy~+SqIj%eMaXT(=Ju>pJT1$gEB9C~1oh$G@9~$_s zMut#o{dre@B5c2?)aU!%0pzMXBUatDg(tOpXIc(h!gTyPmc=?#;9-ls>Fi2|^%+jP z$_$ARD^AE{v2X-wnQ5UqP69lXFDi}3{a>Fw^8?95Gq4Jp*>?M*E@-|kks>jxK<(?x zMb4WALF4XZwEj_CE+w=W97!a>wH&j>*Y}(u;P_}-7VbZ0FF3|;8-UyUi*1VLR4f1y zKX?UA8v(zkdGzB?KlX~$zC1&OLhgatAYmu4 z=gUeF{y>0fAyeKbRaQ_SHj=+L3Ac0Gs8eH(>%)*sT120gI&3l?9$*v2<=Q%QKA6!J zM9hyyHGCvN-@exo`h_l#mYzB@7U2LohjfyQ4Q!!1yPwO0*%ElP#Pvkan7}&YZO*A? zI&kpYx>M7BZt#Aa@mit{zVFQOD3mKx;NXsrpO*2G;68m)Xv3rvY-U!Oj;XhU)ofi! z$_K3BE>{YBzdL@ONIpbe&A|{ZUg@Lu6uHAPomvsY{WS2X$?VJC=n9nv>&||r;&zc+ zMjPyniEw$lOt8JRBY5q4I0D=& zVGKK`G!xIP(S~!gLJHDJaxnT~_>%w~u3v;TRBs+6L(b^Y_=jyoXn*@Ai5TnzJNx&u zGi%{~&kwqJHy819JR|R76xw^r6Bf(p+A4~`U)DIouT-Q`(*63jh{`*gw78O}VHjSpnA2U6#=WrCg7 zF#Bd_*25BWaBTE2WPWD|_CmgbFYdcT!3phdwTm?PBFa{hmg5S9!oJY=31rBZs#U|C z1u$qQI--;12wBb#Vn%!jU_^;!IIU;}Ef)!^D{%kWX`w*L`;0c|*w4WjMRvSA{O!9h zVp7i-+9u|OZk$%`tO_Ng!mo{95v!fhg{uk`@6^|#*g&RE8Os2a&JyE4reucdjLMEk z^Z?R}7KpW%w?LQ9@AcfW$r8!;e=*6wVTBqvPep1n*rQ&?&YLIcoKe#D{mFY4iHQ5# zn)2EOGAh{A^mJ_A6{(E|%t+1CP<`;CnERYNa_Kv1oiXc)db|uH?)>mZ$ib?Mv(XDB zuAgl!Uhj}Iana}8jJO(?QD?Jg~Vz$ zf<1CLT}vNhfODxN1yND;mM#e@8ZacE;F54dTdsa?uoCw`LaLf( zx$B(K;dO;lsL}=PbzaUmoj^nuVRcjcB}iyt@76)FW)iYr{a|4z#1(bru9p=Hp`p-~ z%N!}|-O=~-qRpwno@hTa9J{^F8wu@tTR$1(i`o-SzVK@Lqt!+S_El;GprDy9Av=vg zL>=!TZp;ZlT03sGT>lV&c-W6xFc}7-@nf5BIv)r`kK~-Sk97y4-mN=b!!w8|YJ7D< z;Q=yQ47+jqMY=0G_}=Y4YdQ^y`%ix|OLIpTyAJbh-S3G^sp`h(Q@zon#}5O0QhbqG z=~b&VOMjFI27Oz@0I61&PK%{lpl&%H*88Pa=-`PRVw)OmkQvvArf?qtjqi4|ZCtQN zJ_ef}0z#}umyA-tYO?dFeW-2;OTai)H{cLnI`VlcpT=69)6Q@ zX9}YspUr58L8v<#8}%uT4Dm#?5OP!X8XFYjld)1<)*j`M@B8kTc1Ev@={vr2lTesg z!JH~L1^vh~mJZ^gqBPzvyJMVgNH{Y#>Jf(rDr4b0%E;k`9v7gq-5YJu=j_=jLZk!Y zOw+Zfif}uc|2UL_$pL)xdtY6`km{bkknDk`#3%Q8}}azhI zjc)GfaPxc049ycs!Jc>(nm5X%@fgTR0 zo|SL#LO$!(Jn}(4Xtbws>TtauGOf785!B<4_Ju7LE_4T=rvqLh_2Lv1;P>5pN{oso zUgd@;h`FI%uG$BEMLiJrVs^T8{4dS>W4;9gs%9IKRTJEYeT0=Ldxvz5jq+a zbZg(HBVp=P*~4_c#FRDZHS+yHT#OG|d3(r~L&O(7>MSrQs`o`{Q#2*L zAU||UXNziYh!t`w>$TXw&ju~5c5>4!A)t%+rK+zR?2#YSnxy0&N5mC(I9p}j8QoI6 zm|(x2fHvfd9Qu-Mk3@M=OHLL!qDOnDYORqos^5Mo_**0qwYwJSG*hHTf=2ZblNp;J4ZI=892qL}M@0-epM$ob3lQ7LB{TB9-^ z^Ud20Ed;NjZwq%vFAKfX3*$YI%8%iNN;Mi3$dAh}wt1o3TT*2QRNYZdW!=SE6A!duXv!eN(G%H7DK!Onc_A_H0B7AW zZ}c_iXeaA7ALR5|)=@~+6Ui!gcZ?Z(q4~hW{&yU_QOC*q!?~V5sIp^WOL(X++Q+T4 zXcp&(YI35!=C1HX2OpnF3K#T4)ya{|3VHoe;pzSMtS9`@fx41|TciR|-0S-G?y>+R zC-VGp0IN4zbY0=8E98SZGtCBA<$ckm`&%P>bp4POJ#Frlls{4z+hlHa-X9$;)G1SD z^F&cy;ac><_UQLo=4 z_BLS;RC|o+XrY29I`rI7a;v@<3gy^TZfEO_%x%x=h|zpdT=@^LJM1(>e6K#3E8>Ra z#n(rLE4m|kk5fO)3_Q@Q!_Sjf5j@f5wiR3-XhBlQ8RsVJ|c#ZFg= zhBkFCK4&#_Lw16?zC8qYBuS1Ks&ex{?$_!$-8sofV|+SKS(JiSC@M?SE4!k;wSg5c z45{e)sXV>&b~JSO<)elj?r!M5k<^eB7ZDY1b>I9=l!Ufj%$#dgCL_|vrhSD*6r>oUrqtqD{M3?0GHjh$O#mJ(_Idj6P~lyvS7{qJ~I0|8QdxI(E)y*vx^9 zHuoRdvdWW!_&4;gWW8dIv;#%`wp_A9G&lDP-IY$r=uFG@k`5wzAhEvq)eAE6Ji+vU z^MxxK<|`VvY^R}wLuLZop1UJ9wznpg&pgrHUgtGck8RQW7yF|{A2}fA)+UtF=7M;7 z7n!%cC!u1`Q!14M6r}mW(DmIrD!Sv+887(G4Ur#gJ?+%*f%>g@@9*yOLNTs44ZX+g zP>$W#l)EEN$g3bPOmLcrhJ|#qoTkVqA^UmF?k}#0m1|RX%_I$-*w49aY{DHWGF?!T zn(#z7cG!{K#=Vi;eL0t9Y>uedSL0C)6A@{@dNei4NJfp62oWhpS44TV!orP#hWZI- zLk`fpqs@(02OH=-5u<7U)vt?Q=y2`MH%f~>=%rEi+Ddt6l#sXH{QxfsvGXq5*}y|V zCo+mkzpkVrMHXmNTIq&v?0!4x&+UO6=HrEOxxCQpSV3sz^g&^hSA*#|{ZNw|-7O_) zB659ps^4FVj0PNyR^&?Ja>Tt(wN?pSj9jKSvcc7(A_oJYL>BH$W-w5inoGx$cJNwW-H)?%ytPq>r5k} zN3+*vIMT?-FUD2ea=$D3G(KS+mr6s4;RhosQrr>8pz@LaeV*u)|AXrS`@GS3x`Nm~ zU0mMU6%;3y8eYBbvionFk*RC9Gge(^)~j%7qNKDBpn*Eupu+9=IYbJi7c4vJKKKNRn}BK3C%CHGZGa++{`tMftRgGOB6B8TKG-tk5oA1=G4 zea9Cq7Lz8b{rwU1o4QUPD*}>z`d#+E1-|dEJ#KJJf{3c#>iItsCnF2@7yB5+UD2~= zn=ACiXeiK$yM2SGJDR*#@uO(9C)#b{AlWJ6jcQz!#+U=`QHNEq;U<4)G<)(%;|4Vn zN*9P6E>fi+k;7~&J5{KtoV`qsL&XglrP%mdD0`q*`hnzFB`@T;eOLJfMIW@j@hJVJ zosP)LJ5-LtjEJ6hFgshAlF?dH$(C3XSM;UT{KN%g8royi)6i$+j(8ik5Ahp$qGBUq zE=NOeq;;)EXP1F564Ybj_c-N@R_7L6?sFoc3*081{7w{PoZhwI;7CQS%i?5rIk+Lu zBtFM$_8w^D$Cb?^c3vo9EAg0woeyFiAFHPl{7?{8pKq5B5q)|5!swbe8SSx;4jl1v zMZC@2`z5?+sK~7HBGuC!Y2I+?Nb~SSceFpw)w_Em^2L49({8?KVik{Uj*>qzIT%?$ zjUb`673_D@!YRl*xoosPjEY7UY1sT*MRGdz6`t@kT+6GIrc=o+yS|4Zx=mL=7MT#Q?4?! zlaQ^J>>Iu36!fy<4#S3LRJ2~+U!mx!8~Ro>@AC4A2ihlkB8L5m7vj&;KV|;d2lf0| z_Wsp%? z{M??>^>urr&rc)e_`7}4p~%hOcMrOt&dapom`M@}QJ!ikpP-^<)e9&FC(#yl6en@=NXJ&o|61q5YK*fQcf*@GY zHHnUjp7h>}zq&|6eqQTN4==c*PcInmi!XSh9mK*v6pzc}LCBU=I|wDAL`w&I|q6cAbe~oewIjTVCxs<%kA6PA3I!BciSC zre}}FkrAWIn}=<&t|+@@+Z25)4aryui0H+*qw2b9^N6jU$ijdea(at5dRFy0vm@FU zd1!9A&&cC~ZXJ2as+USaPMljcB2p;mbt;eh>3vib!FXv$$6hxym*`Z=w$}q4m>F(` zWG^HVojSR7j}IyzlNBo8?T408q9Qwv5>d`WjvTh5WF$){zX`dn=;|Fp|JEEDvNjoH zDbIFC?KOMUdXIRbAWcac-w|&#b)jy%{b65phqv-+J;5KnJ#c2`{23D3u-r<#x0HhB zclBBGl~Pg0+>QwQ5;r6oyZXq^Vh>a?as7IAkry(KaO?ee+6T1_PBDw0_CuBGE&Pg4 z{L!_$Z+7pzN=DY^@nzN3uBhXtz>|+vG_+3l>Q}KUcQk#8Eb4m26YW(Rx7>f(8}XgV z2&=o~i^zv^?tgdlN5`E8yt3T_5Iz1O>C(^Q88Ieo8MgFJ-R0QQZ|+xMORr64!|;n% z={PV>j0@w&R$@HZ(yQfGVSE@rCV&ZIORv`w#ze5Cm$8asVwgB4fk|Rgm^3DXE&c3$ z>F4osm^`L{DPl^PGNyv5VrrN=rh#c=_OZ0jD#({b%27oVpNQVxnb^@2j+=+VcwVz z=8O4Z{#XDOh^@tfuyt54wjK+?La{I`9E-p<!W!QPF9J_#3V3pWK>=JevyMk3= z)!0?+8dihVV%M=7*iGyfb{nh1>M?{hV0W-atO;wz?qc__`&bM10BgnCu!q@oHP zdx|~7o@4FU3#4bz?nPFV=_kWACs5>^(M!eZYpWkJvCaf{kKh z*eC2WHjYhTlh_w*3Y*5hV&AYC>^t@Yo5kj^d29h&#FpN}u=GxarMGr4VocaFj2T;w z;s0=iZUx4Qv0>~O2gZqUVcghCZ0Wz>;l);Ad>B6_fC*wkm@p=St;R$#F-#njz$7s# zOd6BHWU)1v943z`V2YR$ri`gzs+bz4j%i?;m=>mu>0r8;9;S~OV1}3xW{jC&rkEK9 zm^o&FSz=b0HD-g^Vg$?%v&S4TN6ZOx##}HWM#9J#1#`uy7!7m7+%XT#6Z68nF(1qq z^TYhH04xw&iv?lpuwZOG7J`LhVOTg8fo;Gdv5i<1wh7yeMPpmAtyl~ei^XBvuy`y1 z+m0n-JFuNt61EH5jqSmbvAx(nECox&_G4*SI(7idz%sFe*dZ(nJB%H{vauX27dwjO zVaKrJ*a_?;b_&bK3a~=#G**NaV@s!*tHP?W ztJpQH2CK!cV>hsy*e&cfR)^JN2y4LZV2xN4){Nc7?qT<_7VH7minU=6u}9cr>~Ldaz!s59`O?VFTEEY!Lf^4PhU#VQd5&#m2Bt z*k^1Uo4_WqFW3||jeW(wVKdlw><2cB&0+J{0=9@P-T3J-28H#cr_+tjEspcY*sE z=Q-~kI$obK^!@HL#@}nMwdOhZ*=w&o=bn4ZKA0bMumI{}K`exNsE>uQ2o}X+SR6}W zNi2n>u?&{Qa#$WKU_~@QL#%{GXpAOkie_kzm9Yv|MGLHk)zK2Ium;vdYqY^ySR3nL zU9?3ztcUfn0XD=&Xpau)h)(E?F6fGG=#C!fiH)%dHbpP=#%9fjjn;5hUaxqga3Su_|Im8k!9`K@5(i~ zzO&5Y^ZWGbF{@qv`FoVTzq)qu`88TI12r>HGXpg2HhGkZTEZ zV?`{EWzZN4VP(vZUf38N&<6`(rgO$(>`Q8hoF1EtvSOUwU8?pj6Kx=G*)zJveQ9h5E!)!{OMAlB$P1bAv-Xho*J(2m&`p*8z{Ny`j&F1s7#`_`LzA>_2avWrRXH8~)v&OS6 z{gLmP`OVtNT;*8EJmq_4zO$b)U-?eWkTp^dS>p|m{gOG%n#wxb6q)brr*6nLWZh&A zvfnb7nfvVH%)!>kd}O|h^_?}AxywGxKF)s48p;|UfXrFeXx4Z3Vb)vLRMuSPEo&+J zGINR+^JaWox@8po0YXyIwYX#*seX&pfs$*bK zP9VI>W5NSx&hF3nUl9}f<^*ikD`#PU{j0PFp7lS6SMeuuK6mTMRX7;Raqz{dRr~xC zNB^pIQ|!<3b<_Fk*|U=If5N)?Z_mf%8~$1I@lRaS|G$6dPL8!UNbT2{en1zaFVVT_ zH}nDAgmaKO@EDB86-e#9Bevq(8Ec^lKGe53eVNWfzoif2W}J%#I2IF-G;$^O!w{qf z@8UgcqbWYpUV^?t8{j)Ugj zb+Ix&(HGy>Fu!(d`Xlna9zjmYCZaJ;KpaS((>k;bP3_+a zkK%UZn#BA#5v$=kjKD6~)_Z!O9daFG29{yJK^LHF(Vx&6kKqnX!bw;i*JC7h#kT0@ zJw34=R>e$x%hES#UAi{?8QB+)<4$D1UWlaHTw6I93!oo1_MY|80-tFwN8h3iu?~Jg z*WbxD=k&A{4#7p(4gIl+_bozKqqXSrv^QOlZb5&=a(DuFA@h~Ao@*)#VlsBe0Bq_# z8(?*;fa&@cqSNSa_#98-Zq&hPSQDdgG4{ZA=;b{dq9wM}&OUz|^|YU*xft~Yc?$O+ zb8e)V{j>kV-Pmu+W~7}Ywd>gJ-P^efzCz$MLVMd&P4Xb zkr<21ur~%{bG{w1CbrRDiS~+S47IesruX3noQ36a6vm+m_CfZ`7JL_`t!ZD{h`vu3 zr7zOjH1EA1H{xs@jTLYhF2}yu0bBAdJ&v}}_bGBLy-ss1x}z<978j;a}2#n8{!u{jytg& zvZk`0vd-4SXk?9bL+LT}nRbq$w`h)`Uujo#Lbg3?DeI_MW9zfC#4MMTLHeioI)>jXd9!E=RH^y}2IGRQ?2|13Q#ND_IgOPQXHMSwf zB5SNC>Y+H6mSX2vdYk50dNyr*iKlQ6vZm@FYwaj(gsio!vyD-DEG?~_W9c25V=2dG zaV$N}z86_n#ae66&RQ$h*(U7L<7pY~98d4k98Wnmi{q&{HaqCM0_Wfuj7MGUh|=R} zS?wH8??sVgvpAmSVRy#4I2IFdCHBJ*lpaf2hdGvVT;^EXoEFDY=ITD=*gOx%fy;m7 z@*GQ@kaLyBc;9%=Q*ule=P6Cu_an#U`8Xa6;c6U!ol$q0vNHQ2+=6WLiC7rdVFY$T`T0t3eIFy|EU(hyoTUr9BPL;6oPRIJSOul$6*IMqn3y>A3m(UvxC8@nI%?r?tcTIq6Wx%w z*pjYBKcnSwabtbm@j0eodz^v%twXUFGF}=Jx71Ed+?p0K@d@@_n1)L+2y5dAY=AKs zjvgqFiHS+mk(ihR?hEoH?#5*pj5Bd0HpE!$jh<+Ms^Veh_Dej4d(Z|;;w&76{Oxfl zkB4jMTN({~pLb|g@o)#@XX9wJ$6?qPo8ZsJ!z;Y!92|rBFdq3HJ7R8B785har7#KpceaWU(>3GTyrI1bC=D(sJ)P#P1Pc+X!I z7u);n4^Ul9T!&9RT!RBK6sw}Dn3&__J2c0GD2<8rwXelN7=|JqwqiH4eZA;Ms45<= zW&A{Ji-mC=Mqn5G(U>?l->mU&n1qw?M`Geu{BoRZgX&^pYd!~KDI_MYM%&U=usQn^ zl*Yui+K=EiT!j8O1r^1_;+%6$<6GcURCUgIs__9>9QixTV0Yx4voqF0)>nIdA0q4G zWm@FQVn1fBH$bu8v;K=UU%oD~ACo6vp~aebh*?(CJuc25A z9ofY`oXBpB6VL*s>%M$FWdCI?yiSuJ+tBQ%PIwfzBkR64vi7rM z4VoGx@BIm#@fhyFBy5YU{lu#4F%r9?ycU_Muey5vLOv&>B}&(RKkZdentMOfE^7Mq z*_rQO&=nh_JLX4@gB$~i4~Jkk^has#-BLTbH#svk$-MMe%!4Oz7qb4>z%rPO-7x?a z<=*M~lY6Jp-|#t}#NDWa(~#qC6fVXd*bb$+H@WX^B=0^;i`@GZ^BydK(~<8w8kb;C z3`EvSk$1CRQ-{1mQ-?f9+u=(*jeAiSXW(!giYeF&+oPyKvh974ock_K&V8O1HOMpU zsaOze;|MH=F}M`NF$kqOH}jnuK-(tMlaF*QiO z;|A!9TH0UJ`)~u!!tyu@1p9FVZ64-p{@fXJZ8%jl*y` z_Qeh;&AHh>$+;iU)FLm@x#>6b0o;Uha12(&cwB*{u_IE8oQ)%K0QN-o=Ky4_=G=B& ze8KlkiOgNrUe?Hd zxSH>|I2s2b`*9$$UUO`If)64?h?cSqs?T+8=-9E*|2J{*Ls)f`*vBe^qc;vL+Bop2Sh52hh&<3L=; z?*bf$gOPnW7+I$|CbBR$z^cf;d>8j(XIzbQk>fV&;~-qmZz7JzAxN&=0a;%;CUP8Y zh!*&ke~yW%7>a9f9-c$i$q3xQZxT+xC^W^6$QsLWk>hA1tcLISXD#1{YjHlF$FUfR zVYrdsg*XwTk#ngbNFL9zQ48&{I=<(h_51?v$91>>$Khb?f{XZ_ghSCBJ0ZC|$3~8& z4rqxV_2#j%bA+`F?~Pt2ba0PQZg0h23y5 zzf&+4Ly;2`?#;Qvyy%QI@iX5~ z@G^3oUxbtJFb>5Yn8NQg9EQEI3zk976>@&j1+DQ5-%s%hZpLJ6hm-LL#$Zoe%J+1P z$3EB<$-6mUm>*ry2EX#1fmd-0F2*T%6l0P2e;L0sFai5wH!O#oE7UX{c$L0La#NpTvyJLCu!vg4zwecC>*Kr%R#}u4~$8i|;#ufa|!V%aXdte3h zM_u&5I{2LL8@L^pVh~Qp6Bv(ua3#N*SIrF6%s|Zy)XcztcLs`oN4j~RHnaXv_b%(s zx{g--)qmQ*n<>6`jn>RS%?#AcK+O!)%s|Zy{LdLE|MvrvQ#@Ais_Nel%$JJ-UiDG) z?+5-q|9+sw)LFY_j@rNL>{c$1%?q#JclF%P^kU9A*IcuDYT1)9>d!a(p2Qq;G|-BB z_As60rFX$uP48Z6mjCZ>pQCmzRCr~)HvQlJo1$E2n*G|WF#b|4{r3ayb1|ukmnoU! z`&_K6;+0uym5W>d$6kNcx+(T&`MMdO3u#^%|0k@Q|Mq+=R(tls|EJ8y?7dg>-+$5! zd~S*59!l;FoK?{%6^FKKq9>*Xn=9ipFxS zdtUvEY3JTYo?qWZU+xp+x%2(mcd_$a`P_SW1bH6(8+2#-70vyiU$7u{avGdy!zp!(Esv&z}dKJd#8;twd9rf=?pF-|yy+gl4?g7>^_N~5M zkkh+5`Yu9#H(~{PEApEVLul@W<+mK}p@Zp6eItT)?tLB4&hK2*qPd^85_?JZ)flUt z`=s}?JL!L#=Jy`nrMb`gJ;tN9zMuK+ilx}~^-V^8!=f?04f%bFooMc-<<#~S_73_! zqh!{-_>uG*oS^SsIzK%djW{e%ufaI|{pkbftUved^7|9-X%EK_SWe#rI=BA0=#AP- zvzK6BjQp;~{Q7QZ=QlHUru%B=_by&#??}tH?Rw+YZQG@M%C~K#&pTg#b=!3+pVIBh zwv=w$(Z;iF)os^2{7Sd$GVRjs`rKz6j0^NvmuFAoGl1`dn9AOpK7%6H9;5F^?ZZ)3 zo?VyU9ek?GvlIEv$*&2mEYIfN>~?$}Lh|fBv^3WqYy2mDM_?Y~3(=ENo@d+g`!jj= za{iU&*)xm{^f#K)tIDy>jjxYClVh*qUtNy9h);6teY7;s zo~2z?em%kX2B<8*u57G4$6l?kx*R*1PjYPX>We7y?AiLO%C9FH-w>7M*TapK=h$oX zRhMHg=93(oy!sM~JbRA*s`6|8KbDP9S$;jjSb2`UR$p~F_7Xl7<=AueSCwB+Hr^hU z<<}#PCC7H4htVR>UZ-7Mo}I#{qC9(^{_65<{-2uCJe%D5G>Tk%l=0-OisE`=`Kw6I zI*}&6uYq%r7~d1Aqk3sKrq|M{;(B8F`$%j*i6*``#U)6L-;$<|TAiI5s-n1l2%pz5 z#CT%)7wlDWE+%UCrHSv|X%l)Kttzf3mVbc6_Qdj^(1SgNCdO|{6W?3vOAS>~Tpz{f zb)<$$EdP?-1n1#q?Ja2Hdq>)oUQerv>xt#5qY~Rsp+93AT#9+UzZ*>*)yjBksEXqH zXg+UXXXA^XV_x1c~qK(`NJrdW-Sx=_Fbn*AvS>LSp-=H1U0NdKu>P{th&C z)EfFzLsb;lA2#+TzoEtx%m2mR8W-SKbVcI(+O#>nkrwsSh5XCodgA%V=w|FRni#(^ zy&S2f)}yJT*3_T)ez$gc{dB0lw=j%f;`vk9OnV}JLwh8~x1y<`9x-0jPZh=X#Pm{S0T03Cfcc^65sFBS6(|MmcN6< z_r&tV_;qm+662fG2lcOt)KIr(oDQK=^d+v3U>7yi@$8w{0I8W0<5y!Z#IGMs4Rtk5 zT)&@o(%y?cjl}fv^j##br>^*(y(T83w(*VirPgRcQ$r;;+-9t(p)S>a2qX2qg49tb z(9f_A&PHmEX7paV6>Y@l8d_DIF~RtIINEzsGyTABfs2t^YFnB*YBgg;+#b)~PX9yb zVr(Be4Y~Gp1WnvtjL*rmh})$xdy4+V?0$Tzi`zT!`6F?A0>6PicLCbfXYb2?7C9d; z;`ZXkPSIDy?b4W?7@nBjpU++F^7HZ0`rbg|_KtkMVt1$K(_gR&DvR5@@vAIuFJbId z@2yJ{w@YL8W!j0^1B_J{w}OXGIp z_-Jg8s^a%T#)qP^_+1{m_u^Y!>^_iBTi>TNZYPc(s(%owir@8&4?|_~yF7Le=bQ8L zgM9wWs3>+H#3!-4G;Sx3kI_FERmJc6#&skDi}<}T zpIuQ|{7&q?&9)VBdmp~l#qE)NDvI0V^!N8!4Y*>{V1$LmbSfCrWFG`L(a*e;6G=AH-Dl-t-yd`q1I@eJrZ~B>E$oqO^v% zL_0OawtVhjug4xrZ_@6~K7{=mQb*L$_XT@ZoQp%W`_iJ0$XNW-KPR0=GqyN8{>fLH z>${F!p0lRu``_HF`ON1I^BE6gGk@oGx)c49j>0m==cPN)%B~?5IqO+I$yr6d`q2B5 zzkbGw#$L>48DC60`71TXX8NkjSw+4|u1fw&o>~yeU&&Ps*~wqc*gxvKfxRr@{2zbd+p^t%3{)+ln;3;c_FHPekJPpyRH zujHy;?9?5pF@9rLm$RnxeSzQCbPVS3z7^@t%+hN~uV~lRe-ny&qsUn=>dSScH~1XD zPM&In5dKY!{U4-PT73i%f z>W#gPCue;~2h*9xMg;B8k=i45MlJfOc500!*{L_iYA=cV*`4&4)*SEZ`yR<%sWE=$ zm)w;)qrSe$NWN-J%X8Ly{9e(YIwLj4$e`~Vr0)2L&QH%qBRg z=B$tTKBF)BD!FQJ{D|fCCx6YP^U;%#YhKII%5v7ye3P?^d{w?}t&LZ=ZI|&$K7Q5T z*^y@3vRxPGuWq|e<1>KogUEJeTS~X>7=78c>b7fLes}OIa&a5&((U@f*ddsxzq&ko zI-l+MK7=l|XCFEZMXo*8_)pqLAT{Aa^kn2(-b!=|QX97AcPG2LJi8XZdHG#VE6cNI z7#ql^$g}&#Mry+k>2bz?rq$)ycKq(*S6&;wp)Yk|d7iz3e`R@gZDZSe-y~XI8z$Et zuRpoAx;(ocpO^4Q^X!#;tIM+&@+q$kKlYv0(SIh6Vy7l-Ku<%i<^7=la(43U`na22 zUK_rpucADA760n;>_vQ18&0RCx%MpWs&ecJ#y7w{{&x8_z3KWY%CT4TuP(<<=2Ko5 zCeNOYs`BfJ#y7;h{&suYQ=VTBH&&iwuhCasj=h*qXm()m0wRXz7eMSJ{^3Y zJ?N9zn*EOcBT$}Wuhmywj=h9Wd0m)1d#?WDXl42JWaI5|AD@oC!=Cgh6#4Z?W92#a zI(^mU*eQG}%CYC^Ki=n8mS0aXJ|8N}uSXe6j!m8}&9m2QSC?lm}GV;fwG)KcAO>Zn%ya^0_@xIUUs5!(~X6W_O_=hI)X3CgeeRTbA0%Zu2ankn&p zbDHaX^Z5=PXzHjn^yj)?MREN#V{h^sYCN(0U+k@M0e(eSB)+dro72+z=|cYHaXqoT zi0z5xiSHZJ#PZVXerxJaY`h7np$HuW2vE%8*WEYLlt$#!TMfBYK;@==U4;hAT>uH zq>k#P-I!iWPt!MmK8W%f-j!e7@%26MrOb@5#UXoV+}4AHXMZ zyEJCMt#358M^&+VA>%{wXJYqW{L9bF%VYO}eA@a>rExoP{80UaP*wb{XM7kci{It3 zdpO_n^YZf8eGs44{(foPP8=Vje=w?w-}Q~}g397|dFDD~jFY^e66C6~7nZvlM>dS5f>< z>@JPl`||Cjud29xFrSLz_F?*~i`$FxDUI8uF*`B5yoOksf36K3V(c|kR6`uXr?0Wn z8e&_0*XbKix1$fC{QA%l`aZy7d`_l6VJmiN4KYPuYKVUN?__^QhtY?znf_7qb)=5i z!PuAVCO8kvXm3G_I^u3VIUjuiS<^XZn}O{2ZD=jQ^$Yf%>}A-=K@+eB`&`edxV3apWarZFi)fA^U$@Iyax>*x~Hu*vUahpf&q^+LPWy8{i4t#y97Z@8TJD z*7uM2ke&UuF#liKhp?No7pJFSBlhKVYdV!Cmb{Dy`F5h8Bm2G|orh0y?B49<*>&iV zXv4mMZcJ~c4e=y$%&kk`!!&l*_)qwVy)#{e|8MM3?3LL$S3ec)*;mkQ=zTQt7iJay%aqi9objW2hb9);SulYO25Ke?Ct3M{F2M|V>e(g zNRPof>jz_(x z8~qoivj@^T{F2M|XE$UoM32R~?2Bk`dOK}`X~=oj`t&2bz}}Mnh8gT#=@R^F(Xm*K zy$n4Qo!QsWhp-0Tz+>Lioqmnfj@#1(_$8Mez+Q=6j~<7%?8$U9dIxQaXOVNP4d};s zk-Zh2gU?L%ZtNx5wdpvl&R&+Dg)Z!C>BCqPZ{l(9=|R6iYPCVMF2Cfm1KEw(_380w z$G(_uPVc16@Eme}wITfkFR}a3Ir*eE+MT@=dv1CdTC&fkUFmgnIXr^acneQ>Pfz+S zQmYN73-bMueGt1bdtrJ4)?;5nx1e{?oA@-R&m-qo8_`ekGJ9(}7r)d-d$5;g&qK$f z75g08jb2Ze$D_EJZyP!tPqH_r-yt>I4s;>@$!8{ylQ5kW(I0z npk@YYX5jBM12xZI{5$Q9n(tCG12r>HGXpgKpelt{dTYnhMGjDq^=SZ(oGuZqMUs}wgKK)QWF0e;e|SR zgJl-scMIFQ26ftAoqw^tr~iL!Z(j*?0e)zEPuFd)oN=6tq(p<7@uyO=R6?Q^Hj)}E zKDO%?O9*3}wvig^|NYK`X?iWqukG{N7z+I@#IwH7|F`?R@gMqJjs_-v;$&+I30_O`=%! zZ1&@}{VE^Y)E=2?_M!TG_}9sY;^SQC+3)+D#=E+gu%x!MvlM-o4}W|N3~yVzF71W& zeg41E=h8pShc3eYTYH-EkB+U)hmtk@+NN%Q6>{CEy8P%-SN`Kg|A7_ZTtYnmH+;Lj)g8*} zeyfCp>(JzwXor|l5mrM->0vJQHFdAhKJh*m|9v)&t~KxVueb9>&3Y6U8T_-sH3I## zh9e-1F`D=9^dlZG?*NaQ=KU;QLfe~3jH;;$@$zx=t(m5Hd4dqH{z(yWLx;o|sQAT1 ztB7I}RCM)Jbc~F3bdCEMs_5zH7^)cc?W5CAPa?c%JVqog=Q?4}m)l!WJ58nL_LpTP zm&0n7cdGd%_5amJpgsci5vY&APb2XCxiMW26?XXXs=vA(8bdF{4;^po&yD{#&W(S3 ze<+&{viOOsD{H4DBx=u%8`kufPTh5la2ytY4Rvi*Q0*&)bzA~#r`799x+aS4^2c?K ztJY2z)|ItWf3N?AeS_LZW4oZ;AGe?4LGyogAFY1h_}{*dCiTPX#)EZvuJ$bUzxwt2 z>qeKlye5qM&2{~sE;bJS>(&2xH9JdJM;8~{us~ZccWS+`PYBmt;xQAyj&Sq&^>x>O zUPS9#Tpxk@2-HWQJ_7X-`0GYMI5$?oCektAM(l4Y!j}K)x^azq?OJqxU4Py9*ST)| z*|nRbMAO>)wRI=Njd1ltRmleQ5eW%=Mn|D$x|NDW&2^E41d}k3l`!~LcavYT?r+%i zSql0&UN>K+?sauzn0VWm3-a1(aY{-KCt_zb4aV&!S}O!(fGr7&5!u&yliXF z{(ttm+E?K&bKQ1F*Nt&rx8VEh=tXtczrt(8OS+V}*-d=zAE#+QINkM+^Zp&CBqeJ9 zc2_%iBqaWM+|+)a<)8b3B7*T%*MCAkijNm5huSF7a{AlUpEX|asixXD{*(XzpN>bH z+Dj^lze_y+C4o!-yatO9#a}?~mhMcuw z4_YpMT3K$#Or@W1*C}>jbH7@R%U$8bs@^WJlgf5s=1(8Bh@axdoD%aN4>tB-VG&~% zT+Vc4?T>AZ&7En-P8vKk4$iPRt8F;{KM*?3rgjB|mF6 z@7_v@G<#e2@Y=`2FAVLO#d+5`ftrpi``EI8g9^?}x*?O?w$)mnH_{?jI`W|T zg=wBFx9En(jz%8r>$9Ah6DGQ`9jhO?Y(lRcLoF*5m%0i}O@P z?7}VOJ%>AZvkAXD5wAmir)BO@$^qHb+>F4%64cUo@Q+P%XFIE`CwhUJju`#zkFAG)l zX5Jw?=Q_yyuwHjNEV=Gz%YMH)d+b&tdp7f&bl9}+j?CoPI2PE!nK?`v`(F99C0jcB zOj$a&W^WR@roY;3%N%1zdjv1DXZc|#6e{O9vbVmfw|Y-kJGa3SP}OtemUNOH7Hr18kFF~;@6)y zv|VV(I^>QytT4)iRivv2Jn^?+B_{XZ+*o4CTId_^bDU+(qI+acEgoyjcB$TWRf@M~ z*Tz|1J3LpHr4M-+wwC-cZDwZT<$SeNg+dMLk^D z^@zb~4LZ6p^`2%8D^qM)N|!mudJnc|rxfqa%L#I1UFD5}8@f2N)F!jkhnTvuGZMF+ zT_^Zo6x(_37-v zW;*-K%4qM&4DH7cD{JY+w)>wBwHoitWIfb1iiWzf2>Vgb+lITdpw9QNTCDM4jXTQm z;RT*-OFQGjwTrx1uzYm-_1Pk}d{Tkj^i!5>>*Wm<^K;FZYLVig4hsxf*7&p*d2@7` zXG;55!DZG==C12O<$a8eUo6vW{|FQIEPlipn_+#JL}KPSolUl^e3o&0j5th>XBP&ZnR!sl zg-yr}%Jo_1$%0QMPwuPY!G_AH|E6v3%DN>6u76?T#MUSmtR1}8gUPH;?waG_#>(V2 zybpA9X2~gcj&yQyU`(#Gu`cq%bLl=+Qd=EZ{>2>EP*wEU@D%(wlP zzBzum6MOUS+Z?O8F0Alba#FiVZtTg|n1xcvCyU=*8+94;ot}vTi!i@B@tzVh@?e(l zc7?^yb73!^>R4J$c4MbsiCWFT{5A0{8;rpIz3^#s&C8g-BC%ZRBIc()8~e5l^F@P< z?ytpupZrSag9`GJTl|x%rpUM59;}3e4zR)@R|?ie;nBEga!8FGxyHy z>yG_+O~kB?t+5|pdQx$2cz`vVcGq;f1NP&-*NPW(!hU@3T#r#Nke?>L9VRP_e5(7j z>dAF~Yj*Q)*vYL9wk+$~#ZA-t*)zW{Cey55nzO5Kv(-n$TC$9%vW*?Fp7Fg&4?1DJ zqUPCey{?0wEEDa zN6*Yz=DRDtj#$q}$L9PYH+@#zS#RM`Z9^vCQYEl`3llc%YjdTtr{>ISa=)~ytJ+LP zH>2?qSAEuBWz-StUWTlqW&fFvo0+ipqu*S9h4JY+%wbtD@=jswl7W>4_UuF03tb)L z9XCIlPdO8v*$THswk6Ib9KltSLSWkvhY#_#@kZe9V!?{z5{PB&c%3MAKo`w3hOuU&*&LH!I}BFUw?Bc z!If>a)7xi>^&50PP<{dX-3G;{tD0hbHjW4w8=vLKg8VnwUdDd9#a*(671nQT?{jfI z_EYVA?^79A|8nlQ;xg=~lNYR6_y+swraL{>Z^M2kb#+{lGV){4*|R3;OKsTZBi*E4 zW!kYu`%{hwAwLF|oXoBm>clp0QVZ#g<6+3vWi!(^Sh3=>Z@pjT*|4TZ9hHKye%SuJ zdllmx*!Gf_8+v1X+0C}eIbkj=Vs(yrL+l^T&nd(QWBtgZaeXUi*|A;w`Pbf9zs0UL zhjQYbSj^_wtcF;B*Xq4~L)_h1y1UHm$H+%}k7l{+V14=WJMD9kzhZY8-fW2VOE=G0 zGbGf7wOw-~;S%!Kwo^Gv3b20Iu>%t>Ab*|6E&uc+*@TUnK4=E_vtaV3O+)bei`D(dSq25Y|^{by}{%)tOb8x-(_0uV}DBCVhrAOVE0M`emvu zt5kE&3BvlF6!Rl0f*o06tJmFoWBtQ2_OBM1xw7E~m8!3B9&l7k@lg=gSLqVJwE}rP zS+P8&H`YJis-1NX^15o1L9z|8{z!?^b8)@gnQ?sY{Dj0l>_PCQ-Zmm5HrIJPZ*O46 zI_J$>Ga`er;n@!oZlqYTqccp59C5r+nVcnA9Aw9GLX;vEyo{KwOU0|ldS+}?-j4k@ zrZaXjyJ?0a*6%Vi)T?-q4NIB4RZ$7+pGs+bXPUDEd+#FKmFt?ZGkL#Vam4zX^Lwo* z9&N?O&YU<{3G1Jod_g}g(2jLa?f&J3lLH%{IQC#5@}$<%FRzPnelu~t$6h6@e`#jn zl(d01tk;weE-!Jso1E&>HVErqPAvHSAdYvv2fbOTjr_Yp>!ab!@m@^oV2ia!7JD+g zs$N5wv^qEMI`OV|S1mr?G06IbD2Uy-m9VW>LLhUV{PNv%S0i@vuKk{SrG1#X#ib9v zM?9I$?tF*4?>t#lpV&pi)V z_f_5w)fVh}>?Pe6WBagH4_hj)`Ph}MYo^j7wc8B~3m3Egk4n08%cPGF#=B^8tK97a z+urQPZT1g)ye_FL|NX>vmi*q8ZEfw=J7A{+OAENKWIEECm6%s6n8%x9{P%j-@R=^t z^B?W1u~UgTze-P?G*5^KOCJYiJq>$yCiP~swht_s z>zRR{6i4)BC-&j!pk>Hh)@>Ly^_C$!w&2vx-3$A%qf+~{I-mAq+ccW2kkj#D`A$BH z4Q6{W`Kwl49^7ze4^*zUI-2jq_V!Pb*brvR<`l_3&+5$Bkp({Me-9I}tbt25yzJt` zyiMbfVDMDS3RfFg(Y+tY$^ZgY1y?^A4_lIdSc6yIU5diabnZ-dMsIW$A;~AF{N#5 zdkYpmX8Wtll?JS}-0^&~S?Wx&+m7LDPaH3Ee0hD4_cVW|-$v`U?R_7nY&bH&&`!i; z0uP+lT;;*;e^j1r(9DJHvDi7T{c$@sd#d}8Pctl8_vCgR5BdAEHaU@VOG2Id1RHIxS?aontXOWW~AlM-YDf-Y0dCq z*V+ADogRC!BT;&~Eu1~rfacX6-+Y{zsc62%?Y4HTRZcJMiVKWA96vR+sM?R6Uw-CQ zUWgAXtxy`2wa<&Ktr~c1f)d7M`?xK4KRK~4!BPc>cH6Qu%a>%XOTp_}DSsX`OT?PY zy6^O1H^#BdrrfDjBIY=G_vIuadqm|@u5p?%KiupQ3ZHV#VDWj4wEPrqHG z!mQobxrMh?W0soFjvt-ag)vDz=RVKW*#U|1FE?1dvDkSy?~dfEZx+4|*B`B$(ujE! zJ-y@ISC(~;YrZC1r#;u&J#~GVUmLD1^L68+ajke{(UWK`m*#xiu4QVc$~@TUnN{j{ zCVMf9u-+Q)?Y)_lUUGNYX1>g9*H!(A7B1{?=)it!54p1*lTw#ft@dQky~_)}3>Go@ zZ%Jy=-|Sdkp`^ylG$*F%l%=(;r5n34vW3F4V;<~*O|w?^WtJ@QQBK>TxPGk8(okG@ z%7LZ#*{)pX=fXy{b^Ip!+K<`m=t+4=Gj{eunM_KSH48~~ZN9v{J+rlXU84V`C+qhl z``*CJKFrN<^Rrz`O_|>Qd!MIVvScy+Jx^3WYso5}XG{(2)Qx>8zj-C{ z+D1&Rm-~g)`pRry&+A{3jC--+5gF1M8~U(}qLp%2_L;J!7F`t7o9nS{8Rn|Pm5kWE zM?Kr#ZpT={?A;xj=Hq&=_?2T;o!ywnNeA|@(t%w~x@gnN-G%k<5bbDm$(@b9 z@nO~GO`a?@eY0m{X*breeP^wOhdkKfhcUg@&+uZIdy9IPxqCB_>dY;3R(LXnH)o4i z4iK^Hl^u5M((+*|a>7eaJojbi4F)9!KlNZ^H^rS>oa@3o-%6E_3$$k%CV8796s_0? z$-(B+w;3_(j)sOyW3`!fjLsd=X?d0-y=8d97B4oXNA=@~RvzqH?%c{%x1HG5n?1Hq zUvA6Nx*b>%7s*)3gyPWOy+}n(e+|)pJ`*}UqWrO+XMMl1?<>`zU({n`Z$+YO|)l;JChgZ>shf`pOo4(x@W{*Z_+D&sOZV|uB=Yn@z#aS zc|1QNXS+S?udD4bW`q?R`E2Ic(m}?|@=eNinG3yH&-NuZG}pFeC6h)UtiF!x{*@Ib z&&@qp$2nd4?>BR1uP*NKF097+P$zHeG@N%DR$Puc|EVu4UnW1v;H@Sb=ff*ot?*_8 zM>R8V*wBlu>bqso!wNSxc8p`d@u`l?tNfPzCR-ckD)%N%UDlkvU##x*C_|rFYf2yK zI>49pb{4VrheWJHeEv=u4NoSmb}8gdb60k4z)+VWK>MaxL&C~aG#Ym1zj?g^JSrT%uTaqD}xmnm4X?OQ%AZ?eUZ zm8JMftyR+E$YLi?Kws@@V%6G4wS;B$e=lxb} z$LyLUPuiPg$@(nqblZGDKUONr9oOWf7Atf1?BDaNH(MK@_q$T47t`o+ZmsedcU%Vz zULW$~7GZ|JIdvw&zFzb=zJ z*|ZiPmvleq%7VP-%xE^nf$8LSPkC-_&6@JbX9}fF*-`2K+ZL&bI4|$prf;Sj-xh1v z(RWr9AKzBhZee}|-?rpdYHUUzcY3$r%%hFIyt}^CH#5rt{LPK)-N%b!__gh`=jJx^ z;C@XTcT_yqRXhxZ;;Cch0*;@_l7NP5l}~^3MYooNN~p$@{fAGxy@5 zNS<6O>$jjs6wfvePf@D$2_!NEh)!?!`}1+9u3A3r6~MQwQQ7*uD2U&i7^C(| zE0oXd<>Q#*6VAKkj-2>8JA(7Zxx2=kkK}<#dtX+!j^=Nj2Q0EF_uv_Qi@U{}dGXGw z-?l9*5%KM^&er?&eR$ZbEmq}u-u%tA>eG)EeEIZ4j$v)H{dniC^Pifx4B$tb+BfeR z63N?03^~sgqj=ccxr==+NAb0fN+KW4kLKS(;%+~3isp`@xogME#PDo~l7u#uG5osO zb;Grj`}3Ar7aHWv@Z#37ib?rRefUVrush49`| z!*~~w(b)^-;e5s&rx~3*MZ9cp)@S9@KK#}E^Mh60{du>c9jFKg?6aifHE+HnCZ(BjkRN|!UN%PMN&r_@kb0^X5X?Pa zI)rq+9LlZkRoP)9MD*OJn%U<0e-Jj|<}Jt0p+dSBLPTvfCm3%)_}| z&qfP&jg8~GQhV;9e(ajye-uj={6Llc8} z#KkM?hP@2sM=xbHH!cb1(KcIc_q>nbUpJp0Yibz97b(5pzArhN4=cE)axl%4KY8Q1 z*4ovJ$2uO;Js>aQck@hlTU{3M#xpcGJ#8T3lWyc04mm91)%vaZlUd$;bb7i&oXCeS z9x>dk{Wx#l{^-Ms1=c=%u5y@LyC%N8Mbi0;^H2J6op64Pee~f)+xmF(QeUpwWc@XZ zDSmuKh)PVEqd$L;{kq$_57E4vr7?WjycJ0gRr>S1{1ahqO``b~ zM;Y}CdC^?qja5~)L=5*`I=09=Du#QlTfJz)*eE{R`*tw98O2Y2?W8JU9nEFe`rRvA z8O^1)PdxLuERx%X?VZ0~Es9TW@;+ej)F{5cSXWuADvEdSS|np{DdMFqKP*Vt>&-=< zth?qK`tqu~S;Yso`tgvjFGe|XjL#}l=_C_Bo^~#M{*oR3yi@7v$UeL|m>=An65v)E$O9HvJ{V~f%%4q_PhPeygilEb zl{(ftjDMJ3cErOhgwGl->z`5*%I9p-dVQt?#$Tc5?*5m;dCw>*W6|C)ezTiqbm`)7 zuGPs+OB4{nN3Os3muCrAA!aAi0o-)sAiN)0jUXh*BE+G`Bgk=bMI23_mtSBeCjO1aF(-YBY9wBo7~&Bq47f z#n(mW99kt6&2<$T84s8l&QqIfNq%vR;FS$L56_m4aEnGSZk?DM#1pR@c%5z# z%FkUjjd;ky`4ES&oIw*JxKZ?w&MzkV@OZPWS+6Ag`LcHJzr7hB$S*uMNqGM?n9CfQ zaP#BXFs^(iQghtba9(0IHT;`dBtPl4YeR=>U*1sjt5T;i0o+j1I#c;`5Fd0U?W;;k zC_fd@f9ISr;e23a`g8fc5xk-PdX4PQkv!6k=#}D%rnhhQC#KZf(~}ay}9@5tJ58v{rI#ADxaK= z2JqbhF@szkF`o3+U2#7g%3Dnv*>&xTaK8RkPDqbC5qw95a^Ans|iuEre0PM$(>hd_suEyjXd{x4M;CD2WF(KjH|6`@q{Us6nWz55r0oNk=jI*-ERUM=G618T_E9Us{ z)1N)$56k)Uh8N~fI+_*84L4qSb-YCgADq@X>eQ?-?xC~wVQk|F{%Fa0)kn6GT(`Nu z&hut|Jm&tAf|oM_cuv_Zxi?LN`N>7yCcc{<$|ZMgA2nu5IA7fL^5*IT5xmdbPgUa@ zM)At`D@qm8{rRWURoWe;gShs(UinJXLiiTTUDC4~gmdSOS1npuMeuuyUq{ZJ7|F+{ zscde&KZ*z4e(qw`5c~TcgMBuWg86~$(Y@>?!}yoJH%_YaaK3Pbr1Yw95nM-3-9p16 ziXRC!19_V^emO~}gSq3=#Z8jk!?=0W3schv z;r!Am`4RiNMe<>$eKwf)kK&dt9qbpm2Jqyrp*wSr2k~`ZmuuuXhw|r-UmtUi3Fp)O z+ew`%jNob&{Y<^?M)JM7LBr2=iRKsj#$4EcIFQTAt+PF7AHvNaKHFYd8ODuob(nWo zErOqSH9Zy+8Oew24sG~gSrnJnm}nAvE1J_sKIoa^hCrX`ps%Z-XMUxDJ`>ag=&^44 zIt)ArD$yKTKue%cjkE%JphsWY06peHAKPya3P2zKSA>p04~r^+GIRzNPz5#U0`xI8 z`dAKqd`AN`fj*+t1GJzg^a5?@4LYC;dY}&m&#Fd9-I6~@3=7zg8F z0!)NSFd3%6RG0>7kPg!!17^TXm<5@T1+!re%!PT74fA0EEQCdn1G%smmOvgXg?uQ0 zWw0DpKq0JzRj?Y?z*<-b>!AoXz(&{vn_&xVg<{wS+hGUngc8^VyI~LPg?&&8Wx!!S zl*0iy2o+EXhu|8E!38&yRoPo1&4$i{`xCocvGW-Tt;3`~$-{CskfSYg& zZo?h83-{nYJb)^A2#??~Jb|b144%UacnPoIHN1hh@DAR?2lxn|;4^%IYWND@fIcTi zpZRJ44M7STL1T~xd?AuV6KD#u&bgB1nMJDAA$M^{4oN;cQ?7Y+tw^_wREgGKNQxoc6YC-3eOqhdBvLPW;JI) zbf~HQ9v3(F+WD=qR2PJlJ4)q`KcoLRsgfc#UGwJnPyGFT+#G8biNF64vDg2rY2p3r z%lZh^N1#3e^%1C#z<*){#GgN$o%qKnh-~$0X8h{;!)BqiuaQAf|NP-!=lMhN@0SsJ zR=m&SB7`M>EPvJKHsQ7F%Au(5^S@4?v*6l(QK&0xr~d1^iWD%TWKQi|;oQ8_?Aqx! zS+yngsZ9JI*NKa(owlhfX`g{EUXFXF`7gAh9)jrEBJd>*)SsMph zE(QO$&saMABdi|(sQsB0;n`doHwJ@${mwKd48nfB4vj7KLBC$-@4OUWA6~Qh;@8Q4 z)ixPm!)P3;K_k$D7C_h6I?x0kyCA5KFK=0WXI)E`Kg9ey`70~Z!3))}^ ziqI5HKocy$8hSu;Xa}^7y`dveyXbdOY`%{HZ7aoq-izWuZKv&_w$tyVcGI?~0Bs|^ zKed(KhuSI)^xo87s?+;XEQNMc+<(+=>MxHWsm=7h-?y9EOtJpH z-9nqGz0{A?W{NrWqtJ)cf5Nz+ex&}Pu|ea4wuijtzO)X-QfL#s2epf0 zK(V7fqxYb8(l$|hsNX22)E?>=ilYNiztgr+`zYQNV`?|`3H2duGi@iulj1{tM)9Zi zQoGfG0maJ}XdKgcraq%_LGhwC)A*o%vIIMzzN0>+I8uCQd{AG}SnLKAYkQzrQM{>d zDLxcWidFqjeFW+wP#=N6Y6SkQ^Igi(^do-08y#IcpO#ZD{;Qww{!hnaO3dFY9^&^3 z-lWxTzxe;Zx>w*Zw03Pej@I8R`0Ly&5Wl7u`t(oR2k1lzKjJyTW_ay<2Q2?Ha>Ci# zx5NJ)|ET?$mG7^CCk*@bJIi2kzxZFTQ?nl={Cb_govk)R5P zfbJ#G{Qy@8f%!o96P5yP&nSq6!$8}x3mSnN(C-h01wc7_1LVVW7!3n~?h(*^h27u| zbiX1D7Q#j-fDA~1LC^(`0NrEQ19UW^`w8K&2ztOKp!Z3I!JrODf$kaXg&E)p9f9sK zXu)P!24f%&y23FS3v^#(CU^n0JqLP%5-f)yK=&z*!#LOnvOvpof$o27ffYc9Ee&V^ zml~z$_>RYCoO+b%&F{!3S(16Sl!hpwv!>L^U`C zbRWeR>>vwh8&<(2XaIB=j)2p!AN;@`X2W(^4U-`eBB42ygFiSxL$H7yum&jpQy>YV z;0zo9M+ks9uoI|X`hp=0hiEtp2f-4Ypc%}C5?Bkg?sO3P&jj_Bup7L9wrwG7gaXKb z#xMj-;Rx&j>Nna>>K__Un}Eg=z5fi*hIpXyv=^xFngIPSGZ4lRjhEh_0^g4#>OVQ4 zF(iy1+72BU3g3?*HB1ZrNPR_PiNO|F0i8h?5`eZ>7*9f<%3~glsjW~5Dxe3$AOcRn z1fX%{2h_K%L40iKV_qUqAD;y38xE=<^tBY~nIMcc1Js3nK85;3pf*sOsIMKN4P=2Z z?uMft4X0re><8*+>hHEN8@2tGrvz$l1?^H2ey;0D=1V`e?1 z!Dtu=l28d@;0~?9yiO6yJV=KW7z9Re2s|Jh<^#3A6D);(kP3sr7!JcO@C5o@3xL4} z$cGH*0YktPgnfhhfX3M(*aX@zqpplc*$9rpUJ$`pXbD!Z8I}RXZYK1Go^TAhgE!a$ z#WDwIoGu6Y{W>rdj>9A~=FxVmf<7cdB%B250}lS+01d$sGGQC61OrHdC^!WZK@-#<0KVTR=>1oL z3=D^8I1Q6vKQsaw=T1QVG8?wT_xpxArYA!=e7|qlV0s6v0WIhcXJ85(0NOX&!1wzG z^+7TS`v#4pgAfcZK<%9iC9oD~|L6thKm$U+73KlOVI53^Q4kB~p#nm|4YI)u)Uu)IK4;Li=c2+W^Iw+DPqd4Ae#;zC!z`pD4CXfZ|NAr8ZJ5 zXzWm1g*a0?rGa9s02FU(>-X&xVtp9%n*zmyVofopwtnAEA=cDJS)g{(c2T@L0L7l# z+XIC5W}q&_{s`*LU=L926(JlJ!6wj#86d=79rdFi344M1pd(N}SOLXxGb{t454xg$ z48}rtm}$Q?LSJ83%y_qtblmX0Qx=Sp%g@*42yx{ zy%h?9ewQYkfC*3r-XH}_U=|cZZx{-+y(hs4IQW1qSVAUjgO!i~^m|SLy}vKmK^Ex4 zDwqVrAOcRqe((c(m<`)uHB5#+kO+}LF)s&yZ~zX-gBu)z?eGvTK^lwzde*G~|2?1pD>4OYT2mBrmgfT$R|1|<%)MH^Eyo4KY0@gqlj0Jj5ura(r-46yrDZGN4 za1!c&>LXAefj=(-f49$}{dp~~f2aBg)JLE`0`(E7kHCLk1jIkDV?Y-rKlu68=XE~J zt$l$Uiu%v%{B=IBBmSI@(5-*g=L}fm+IJNH+{K%PwexAY6srHXpW&nH>e?T=a{lvw zzW=v&H42@v>&Ks|9YLjoPvd5DZB%Ia-(lSRPsgLpw!c?A z{*Gh%KYg3&9&|?pAr$C-9ep24?y=5>3*p?EX6c^M^_V+w}I|&(mlyEP=auvd%*PFT$fSOJx{vN zO!pe;K5AE>`=j(7U$!Wx0^R$hdzJK^V#+8dKrzsFd(pjBx+hBaoaz1}-CyknbT5;> zBg_uv9T*36ACtb5tPe{1o-sNVqWhwB&z0_%eg?YtO7~sq`^eUzPTw(RkMb_i{Y<*Q zN#ALfhmyXhtRG7HezB`4=|1WgpnIE>QPTIBZA3}mQRaa19?(5bKcMeKTZWRp=d1wb zSD^1Gn~3rSR0I9(fbQ)!LwzgI_nbMRybp9g)*tA5)mEaU?@B8~$xvQH*%avS33R_V z5cM694o>g@=-%rzXaE7Ag7wy-r0-i>gVGY^?{?0)6gB4H@0z-mnXOva&7UXs4Jhe5+lo+Hp}daL73gmebf35b>cNl?^qp;EQ9gusAc1N6o4^cIw*YDC6*H&MC+{k`I6?M}vW zijUCtfv5}ZHb7ly_pjPK66<{fq1}T}|52NN)!tEf4aG!g^I%N=sJ*{x>u7olya%$5P`*e&NjYHx${*ze@m$aw>qWsGpnNb4C?EVP7f`Mc&j&h~7Y%oT zazY~fcXEO*UKa!Rfbv2Ti06f$jQ3w?7A%Y^=+ew=`5Ay4l^opSX-_yj`#QNE^J zE%ePPETjGr@;UX-OiW*aUGNMzP`(!Wkn%OHu z{-b98I~Kag7=|w7&Xkh1-l^y zG=cIL>(Q9;hd`k5;smYX0f>(o8V};*#s%xqcnN?Y z5RU0T8!t4*#K(*)UMGx~VAO@<`LD)|_;_){>x8iqg6e-~thm!#VEgDe7>c_1IH5lM z*;v7%e~c9m)P->phWgLOi7b{0W28Ci!Wi*HT^Ju}s5gOd_*otmVVRIaZBcIlQ{fzV z0UhTkj|#`RpXE|-tSjVEJJegk9T3l@!trhlp0v-={wBnVV!0I4w9he=6iYqS3)=&&rVVaId6i13-TcEg5?4(c% z=L^}WQw%A76uY@Vv7z{_L;YV}J6K>H;h6Ls^)Eojn#Cw7b{kO&`Gs=JE1;b5qjQ8D z%zputSETV;x^BoqeJcq0(AOe3hPl^gmw?ca z$_GPG{wNoSUpp|Y7Xg$DQlTMUI~3(Yh=)xeo(m{fexC=lu}&n+hcQ68A^|1kgPtgV zlnZ`z-9Yp%n7zPSnNofp{*^!8*}E`Sc|uf{;foQ2$Xb$isZ`e4vYY zF(BlGB-H;qIYAGv>kmR+7>>GlUSMeFk8(mR))jJsKI#MD0la|`pn})^C@&;q*#Z!9 z!$8!9++cuuFA(yB7V1Kt-iP{AI0&CW=tJR{C-luJEED>N`jK+A&@a2N>=|(Q3_|}2 z$2*~Kj$s+~5A`|q;rGWmOU!!?d*KTReMtHGXV(tYf5JIn4(ey&1=v6}2>q9Z`v1wb z!z`@d1t?bweMkL5d0Oa?GSsP0gj_Cu4oLl>4%8cI9q;k|b%Pa_r9yY0{3TpB2zhHi=0AbM@DYXrAPtnmD9=&; z5{~WisMCIY0A7P|4)CLE2O2jtX8a)#XuLQ00;u%*xnFz z+JD8z42=izapQ{hXuPz8U=WV!XHb6#;^T$JnE04+!#rWUgrF)M&wn*$#K(&}y#=<9 zj)S47|98fU2VN(PlQ7i9#|ic6&&En~ETdx_jTKK2#z{EpKN}|^EEC2^3)F=%;)S{} zKIj-{i|O|8vpnjJWkL?ML%k*3fm6^H=r~7tR5;FMW8M|m1>fgVA505*)E;#?xC`RB zR5;!V$2v=_^BnfV7w`okmpY){3g|d@2ws73eoV)^U*%LkteXXEu^#OM9B5y;1g;Pb zsgMRsVLQwxxw{XpaVGPpqujDd8>haJ!pPJlH8!Z65&^+5YTInX%& z4cwtWjD_h?06T&9wUb~2L68XZp$OFA0MIzU0v<2`#z6)wgA$;9?G)HTFeJeO*Z^JN zAka9!3Z4)P<6#CYhh0GX+G((Z5Eu>%VI!zR1(?7!@PdIb0cOGq*bO>x2J9geM!+K2 z1YMyLOkooI4k8!?6JZtkPJDn8M?tCFoWyh4TB*QR>EG;gLB{r;V=>= zLr2JkEuaC1!5nUY55z$htb$Ik5A@+YI6(xAf+?^Vwt^-c0SmYZzAyx4!)j21QZRrE z;0%#48m7V$D2DEE6d2qBKZu7pum+T&4En%DaDgaDfoYHj+n@&=153CK{xB5gLj6yD k1nML3uOm?Z`T2hxqxv=KBTyfK`UuoVpgscsE)n?u0D%yVHUIzs literal 0 HcmV?d00001 diff --git a/resources/NudgedDividedUnitTetra.med b/resources/NudgedDividedUnitTetra.med new file mode 100644 index 0000000000000000000000000000000000000000..67aabde0b4e7b84bcae89903b73d1ba90a54dd83 GIT binary patch literal 26804 zcmeHPTWpj?6rL@GVkuf`RYZ`#a*=CoVQaYF^1|N z%L5)y(cXXZG;1Qu6&``-6?{m6`(&fAI=}K5r(ee*Djq+F9SWzfnIE~Xi zly)Hz0iVj#^UpJ)Nwgk-usye>rp)nT%7D%c)FRe(kMHpm~@V? zV*>Z_3`enRbEGkZc`})DJSdj}uR^7I$1%Ur#1QA3H7#X2%)Yt8#9U+KU1e>|p`m$1 z3yPADG2MO`;wO_{TW&v0j$bx&Q|*WLJeNL*&OOS&e2>l-3CA_TX06L~I^TRBm&Gb{ zI*-S1C^(-g(7N-?&164pk@d?*kyuxFKlEw6mYVe{dtJBLAJ>{VR%sL_56V^kA}9t zvOU@!ZGB;TXG?qM%UgK8L(MWyTle;TLv;;l?|ZYq*L6AIGoJoJWGf)tOY|*~%r)eV zq+sLKvTm&&UaEN^V=q$OIw^RyMxNF@@Op|!@Op07mK`tdZ1;ODUF}}5Yn$g)dzDqo z{7aTqReFmmDyqGuix)3l}i%!Zo7c@ui_%#Fov{L<`me0sgvtHE|-z1i1LAK`@2|2x(^VCm?*)&v{s>g&Us z)`y!Kfq<+DvA1l@Z2KY7n%>?Tov6A|I|Lj84grUNLtuC!AZKHbx`$_euk}tvdY#U0 zJT{X{;e74vMnP=o;#78HD%;Jc3bn5JqSRQjexQ3_UYX^Ag>@Y}SHuUt#WKUVeq^2qsL0x#{&|LQ>Hg}^sE{XL(pxRg*%+Of-R zC|`By?|ByjRQc6~Wp5_L2a!GfdP2Tk-rIU@{mLf?0tF?dfvNfgeru#oi|qDyTJ>+mOZzT%2si{B0uBL(z-UCk_TNPLF&~4XIlM@x)A?`e ztGPI=H0QsGK{sTHRQ?AZI-mvuUi$84y$R66f5-)OX%So$;g zTaT$`y~^3#uA{&8SjRP`%)V!skD3^IuU#Eq<~W-Us@KFU&8=&ud@>K4VcR~cs99@} zXTnEyPYmAd_^4#wU`}*~VmC{gfPLDzB&jHtiFp%U6=O$C0`@rUa;z8F>#)I*`#uwae)grCvF-C)wT(WbFX#vQhdGJ9Vhl)QTo?<+g}$RK#)B~o z9y559gAC=+2I>nQXdCyCHYn+WL%<>65O4@M1RMhY69Sn!@5(vUKJOl1#sh;K=YczY zCoA_E?(bx!pEe#>atsgKSJ2y-e4?iF6}(f+b)2stId<93P30>{Wv^#yPcUjHyMn(5 zlJ9zY`4yppJQI5YY*P-|w}H>Bk$WYNVdr!kzew^D^ea^ooZ#1(sfy%eex~M)=+mAz zeG!HQ@?4lV*~TN^?8Ui)M@~-aQQm3zY~b6$dV_BVej)gPz{B4IUcN(tuLt{|_>v%p zj|e^=_?f_?Eol?&i0=t9w1YMvM?LVUi!|o5TKaeMKv;lvd^Z{+7 zEc%Ol@Q|Th#K@C6$dfWCk9y$IHhuzDLWDK=2+L@sBxM!hVg7-2Zm{i&Edk4grUNL%<>6 z5O4?#9|Uaw&dJTm`SG-%uD(M>FXJQH?0 z&e|NZ@83Bh{~%g!X~j7Ix0!EaEK!WbIykr?#*Uaorsj?4)1Eh>%?uUfxiD|CjmMrG z!-KWwuDOH&)*Ad7@T-Vl1O5-B5yLM6zYEF9i+uSggg-ZKgdJ+F(Q1yJfCPT5k8A7C&F)&<`K;y l@)Ip2svv^*V*$~f2InuiQv_Xohk!%CA>a^j2;>d|{{h<7??V6p literal 0 HcmV?d00001 diff --git a/resources/NudgedDividedUnitTetraSimpler.med b/resources/NudgedDividedUnitTetraSimpler.med new file mode 100644 index 0000000000000000000000000000000000000000..2eb145e5e17412eefd56c7c899d88c52794980d4 GIT binary patch literal 26060 zcmeHPUu+ab7@sTUkELkAs%Su$_z#VeE4K1S<8g3XdZxYByHPIH}My>9N%lv*sjGtZ5r9NI>C zJk?x%jA83xhMr7nZrSy)I)1e*4O$OPJLf*I&K+3*d!KlcB}6;Z1*OX{ogci1>S7jJ zo%`{t2bU1btFDKGJb!5`;&p}A!-UdntDdjC){X1+u}@R4(IU+7Ucy}en*~-#%$G*d zEK%%9zo_^YiJ9KfX}Kcu$wR54T{fo#LGAc)W46n?-J$LDqfFmbh}_uF;o?wcG@#^2(r-m1V7mvMJjH@i;HIRE*@xbdPc`#nD9Y0ih<4CdH=^raP>9NC( zv&Cua*xu2xy=|MbIhowzbaZUqnoNql=Qphp%|8jTPObzkmj;LjT|52$<%`36lzg4K zi9J~aECLn*i-3o~+`Tb;$MXH2$d?J*8^42wGU54Z_r?l}x}Jjkp-DVcWh!Y(oDj4( zUZV7s)P4;=cg;3XZC5+Bo<1mbq(#2E@=IKVAugk7w5S%-IML^ zb`R`x^SwZT=LGv%CT6DJkQ)g1vlb_+cGMODi-1MIB481?JrUr&u_L>OXMWjur^3As z^Bd1SLbb5JwtgccRzDi#HwO7`3DKlk=6_vdRLG}@~5^9adaLn+C8ee<8+9U9G>o>dq zI60b9q;)pMJk!Sq&wTgK#+ejTF8?4uNTzP!xzx6l7v|J7gDQ_O$e@$+-JUw*IP zzdzIV*3%o_OPTZ&6hAcUl{>Fq-?#IHnbbp?;+cVW(|817ZNO(wcz4WeiOhy3{x&4n z>#BM^>8ySlQL|mWD(oRRjv?bAO3*C!D9Z0SbG=lKP;8q#^dnG=JN|US&pIa!UR_k4 z%B{x zwj0>QV`Zn6ZM7jQ9{+dRVOB+-|C6V+D*>eQ8jvAjn^jY7RW8hiR?}W%|jh zfINydp~-`@_CD}T*1K8N4E(?;9T%xuK^9^jT9mZYqm~XoIKI<9PwBK#_edkiIxZ8aDZR=YEECLn*i-1MI zA~1gh%s9sM{_58Vi(?$6PhHg>#D3>e9lu(Z2E{SVZxgr=QTbokb0V4kXi3`;#4 Pi-1MIB480%cnJIlCtIFl literal 0 HcmV?d00001 diff --git a/resources/NudgedSimpler.med b/resources/NudgedSimpler.med new file mode 100644 index 0000000000000000000000000000000000000000..a0fb4ea200cbef5e923f8eaab0bb8a0421e558fa GIT binary patch literal 25772 zcmeHPO=w(I6uy(6Hcm&FYLqBNo;ZpX7fCZQO+b=q=S^m0GLz2auM&ttVk=BAXbQ2q z5RA~`qMIzT5D^U0MUXCBL?|?f8=;Ht+_lT0p^FV2s-Ik%9=x<%#Yiu2=%gu=%>!4<6vLE~b;rL3UPvERi{a@B~9ho$$y!3Npt~Iz^4zA+jGAbNM9dNe`qrC|n3A zXAEm6h|U>`*&Sv~bD66~M@B7Op_KDYMJUc zAKH4(Z77#H>U-=B&LNsg6wBtr%4^_z&rw+%g(7n=e)Zr&>SV?FFw5hYwjBFSq4_YR z<=Sn=E6;VSW`3M7biYyy-tb<+p8q4NSECVv-FE~~98bKX`6ysUJ3zT-xn7yKo`M!@haT))A^I)-V@6l}aEFHh+1`QgQVk0`VV5EsI0DmSfp#Osny*R1;B zllMy>EiJ~KnZ*SsaqWuZ^f`U;;enyy{z2zlZ||TpI5afaKcHr|Qk^RDwNhtbeu|dt(hnT_Znx zXdM?>2qi;_Q;PP+?OI;F#;)Ogi(Lmr`*%OpR&NgXohT>AyqNuHznXcDcCiPoDz_W6W?sYk815MP z->~$6a$7S_CB`$^tULd%TgW2DcoO+-N=a5kNVk-ezsdkg|)0HQmPZIAC!Hltxa!{+u=kN zZ%#WS+AdORRK-uJ_?~g{hs+svNS9V{u0Wy7)H9D!n`bE<^U=O~#ykzH{ru&f)S*4( zg@Uf&=ZmKE3VdI$F2`pY1($aJe)V)=h}dRWFgm_tGRp1f4P*au9h;&l@$fY})yv~J zc<`z>tr<8zyYtr@`kv*m91s!EFQYFkM_XSyj9f4uuWIRwQluZ)hH8ddd0gz-eitQJ)IVqUu?6#E&#U!gN1YF1M|m(1lU9bKh;aG5Db ztvcTDsh4%GTfBZKME3Kiav7avKYz)p|2UqXFH{f^1Ox#=KoDp}1Z@AqI(;s-4@J?X z!^t53L*y+gj#h^FANHZE+l`<9!9HK+2H7(IW{7jVG)U&j^Hf%3?!~NLoS)1`Rk zI@7kR{cBV=2iuD6#*6&~Ot~LOxqomgK@boG1OY)n5D)|!9D!=>cX=ObpDC7%ZHf7~ z$2jVlBERRjpDA)XZ9LW*3=i8^@RQ{qV)YO56{N?gl=uo5t@=_wUxA;!UadL7&jwB! z9jcZMjPnYG+S|Zi(D5U`@DN2C_ F{0GF>B;Wu5 literal 0 HcmV?d00001 diff --git a/resources/NudgedTetra.med b/resources/NudgedTetra.med new file mode 100644 index 0000000000000000000000000000000000000000..0672d1a7a9b540d91481fed0d51288263a39c757 GIT binary patch literal 25772 zcmeHPO>7%g5PnYFHn>J|A)o>kT11Lk#KB42Bt$}qTdxyWj-A?0Nl`?olt2%zs34I- z;lLqC<-{e29uPtzNIgV_1BVV8E=3t*n50(dVS)H*H#=({ zb-be=Rw$lHp_DII7pL5kTgevPs&Y%!#hGkYK{XFJQwP#o#dVeLC1cAJx|%tJ9q#ME zen+88G1X)A&!NU1)8hg3(bjG53+s4FM}W-c8wZc(p)ACs%$3MoV{y^gKu?bA7>5Qb$Qi==VWbO0AvsQCn)6&C9htOr1xt3$oyUfwy98Lh zk!;?#p!qO|E0Y;R%}YycD-rOU4{bf?HZ;l{^nKuUPC=T_RvYHS=4;@4&tYC1h2HYQ zwH|?(I?{ALEb{p6@wNfy!(*#~oF(cl*snQ0BnBILSHHtIY z^M+4p%;M(Bp>#v^%7bBFxkAY#_?6@3#x7TJi-F~6DWkV4Ra#y8V0o#wyzNmC0|GoI*zU41v+an|T(I5Mov2E$ z1pz@o5D)|efrE(v?~NVZJXrH{)|m>oI>>IkHGsKbe=VW?2>98JezsdoC5)``rqm&A z{ebLaJw0ZN+^bJi;o+DwVeBHMCROIB$~-0~e~iwsVCUs2o+JHcw*$q=S9+ z4Aln3N+Uny_w6CiR!jjuUo@Uq!28lBAD?MdT-^Qpl{1<3jctY{((${L&}c{BB>R`k z*c42O2d~+&ULMB5y_4R!PT=_L&R=ivdzSrjz?p&dh2?1LOPu5a>2QUmFG`_)ge`+s zT5j$pZ4FL`t$nw#OZ~ve*WjwqY37jZomA+-xs#O*5)UVs z$9x+*HZ|2PJe+{J9?q0u>ELnStXKT)MyaenU({hHTcFhfbDt|4w}fIpgZ5XLgru4` zl+PtgWsC<`>7c%7iBfBhH+T0)l`bAP5Ko z-H3qgf7rm!#de{nx_CGlCVu19~%XHaJfdQ{iVoU}u9& zVO)V+$X%mIUYaq3-?j5fyeUuqCF8X4z5jmqB?>JOK(`7_FA$+dIRZU ztR?)OsD7Wa9x*&RLdm^r}TA1UoaQ?g}rkn{83!Z z&3N+v-+nQ_c1yef>hY=6QQ|DmW=Rggd7=G&NA{0SClZDRfW3HCPv z+?-b1g&U+pVcCVSzs1mcf5N>>{I2E{_Ru%zTa4y8;CD4j-Nnza{jx+dJ}kXqdD{Mj zP0|}k55u3p?>XrH1a7B|$E}Fr5lN-)@%K9H!$!%UFyfQb)05SWP}Au(q0LDUeP@lW>RpTL`m&U5a$r|Wmu?<2-`wB5bQ zch9+h``zC;_s==^+urPmC+=%l(I7C?`-NWw#R8e2Xj+8}dz@)Z1yw~K6^T*B>{RIv zJJusv$hUwQRryRhlZjMjaFd<1)3Gi)BkW{musaqL*v%wYszKVDwrv5UWNew@P$T!y zfa`&LxaA?4I0Erc*B+F7Kg8)X4@iCjfcpCWM#=A5D#ZDpDgPqy&#qcVb$~zcD)B)m zZo5K!5Agp?5`P@{Hx^R=qE3h}AErLrfxl)EKLWkI6CwU0;0kMDNs`Vx8v(1#5> zR2|@-9wL4c_!Tb`Z~28-dW`s!(5m4z;@^e*y0?j+0RFEW@dEI;^#Sqi3LE)jWr}~3@T-vZP}g&oEX&-24Al3gC%J@ZB9Mv(U&KK$vrxFOsse zNMx`Y(t6m-^B4C2!a(bxPs(+fny(QB2dD13h4)n<3 zx#7iQ4+uP!BQAtjUvjHVBVJo%zD$atL%Xvt?cW=*dWR2NvBNJ~R@iC|wKcc4g(Frt z6k2DsM4BVv7P0NZFH1xq=MyVo1qfdLh|)->p3hyqbZM(BUrD{1*dSmKFbEg~bOZ{| z#%Mc^J)+m3w3j{f3O0m*`>Z(|0|;||HZJZT%Hie;A*(2*;H)Hl1q-CSLdve;^NQXE zMLX3(ExArtn9@y(;z@k?F=w4G9;Nh{qLiwxnCAwL4|V%2xjyrQKs+$t%a3N}h7fnx zcj_-{H-4_xIXb=OMXKfo*^OOlU1Pn!sN{cA(}S7T^N&l!x;wkN?7{7JI)$U>Il=ap zj+t&hB>TMWt@($j8L>gYAYc$M2p9w^7XdySTkwjw=f|}>6@GIsyYXBT)xz_&u^Rz| z3b7mAY&V~1kg}!-U$5&&V`dQE3+m(Jms3MPynz`F-ln#wTkFxL2T1 zuiB|@#Ml#*kL74zEmgf?vu{p)l9+Xd+>n+H+sdj3=!Wo z?NK^Dt_lk6=+nynsEUPj+N{OxMYiRXpLa?6!{1vhw zAo?Zc`^aR9^3he=43AoJ#1YFGjyjXpF)b}Sg-GweFWD)J^#1p2?eCQ4{tYz<7z7Lg z1_6V>d_+L^Kji3hu{;zRn=U84{11Uvs*YBM@jn0v72t|&{i(8qFfE#>mrTyKLumj12II+uGXol_L$UY~H*$;K%iSCmrK z6_?7V;O3KSY9l!98=ZSbyBjis>UOL^j) zLYbhw&EIkZ>l)w_B3Y)})vgUnT!8P0ddL*t8^?foNb%vZAT_}tU=T0}7z7Lg27&5F zpj78wK8Naeidkh_VtF;3cgu~(i7LZG_Z57q`G6(E2u1Aft$TvsyX0h z1ILvPmC6Rjb%jFtZQ$c{{m4H!i=qv@J~1oXk%9AIq{ER_6Nysk4a-yCOGcI6V0kI_ z68@cP{XXSHmElnnN;`rX= zZu`aD-rM9G1PlTO0fT@+z##D75zze!7x*8FYF&Bx6HafV%Eq5iS^fn58vzz+x$VN7 z(jh*F{@=e_z;i8y^7|7e2Kcv%Q#?nXRNrD0uYrG8Bg8m;hV53RD)URFH!M%xpD?EM zhV7V2*PC+Vajwens7j@t=P!+GbfG^%{TtpqD#q^5Ltz+$fI+|@U=T0})I0+J0%N^w Aq5uE@ literal 0 HcmV?d00001 diff --git a/resources/SimpleIncludedTetra.med b/resources/SimpleIncludedTetra.med new file mode 100644 index 0000000000000000000000000000000000000000..9ac90ee1bac67987e31ae2b227b06e1372b5f335 GIT binary patch literal 25772 zcmeHP&2L*p5MMiPYf>XpQc!>%9yFkWIHakYgkBQ2er}8sJH$>&p;B9sKqI+OK`NCm zkxE4>2W~m^z@ZX3^w2{OR4Sqa@E!eK ziQ|-YwQpwhCf8sx6G7XALps8@Ru|&COk}mJj8)Znlq%XS1*<6P7cbHGJ|De_c0R~e zw*`4o56aeik6jnaqOEuj!f?p*J^8607UIAJd_+Zcy&f2HyQl=-W6QQqq=(2j>O}>! z)qoI>vWVv~ue_UEz-@42P5^~C87E0@&6sZwjCqfi5~WHz`K@d6azhKbd>V4nN4#fd z7TN`OX*<&sh9Ws;=CtHFV`OB^k`*r5IZv4z`>0DE`BxM5`_^bZO!JpX&mp%S2JzdN zxlw;TwB=k{CS&eV1mwN{6-i;5NfoNr!}{OAQ%`YTtf_9ygZORCnD3~&9u{T(^7f`J`7oKkuc^zvp?w-u|IOgTsdg4|;${MoE? zynC&*s#=}L5%6f?2YT|jbXZ5 zM0K;B)@+Ekk@kaNBpTH*U3B`4z%Fl8>qSb9spL+T)R!EP&zxaLOnEuFiJNk!8nyB= zV}Nw%S5KPSP;BzztxU}x^6<1S5N3ns^9r&STW1>6KJWg+%IQk!A!qM%M#k4nLbV=! z#pu6M#->mu?!IKl26@bb^%sM2oxnvDpWU(WC{-&re4nLu9DoSPjgc1_M_XQEW?YaC z-z<5dTR3=q(_O3a*xhD$G#y}9;l?1a^yjF4(b6BQO#f#7YVQ~~AZn~q&xye-4d@VTRk-RobD6un;y+S7>)U2j* z9+}H?JWQqg$(todUGReL;~?vOV)43Cdf4^Pa^j2si|` zA_BJkVVSRsm7ys3d^p+2{*V~r?9eit{hmf2(e&1;epq>aWrJ;Hh2 zmTeA;tsg(8nIu#!|Vz+Zr2-$PHv7d4$^@_JoZwIUJVC@sOMT!;SN_V=BfH z^9g!3eiDRnk-OX!40UvcNT=C)b!dSmC!pP89{`nppfB(ehkn63=MZoRI0PI54grTi z`ym)Z;u+g9+MWgn6m(8yLWew1@LTY;cep_^$d z2-DY_H3!0U;IxsUX6e9KSJ<^)2VUaiM|oilg$_KDsfi9mU>=+s4@7kY(=54>@wC4s zi$-pc-i&XFyr*67r(9|?JUT+iw^%2{HYBzn*nlJqDsvGOY2D->Bad}(2l+5pIalHz z#TETz;J(Rb+r`4#?cyB*4grUNL%<>65cvNH*!G0$@`s{URgLTkSKj2z&Yoa_M(k$V z6YM(z(z;gbh08{UWH0)#Hb(4g*|pxDuzpBYMoEE;p^l<64{H(UwBJ#TzYl0?aws7R=x9mQitfe;EoFa0oaA90Cpj Jhd{?8@EL4$_FSE&Y#hXQMhT-t^GE?v{^*6tRIk^qqkO%^0t5>$+k za6qC5lbUewz@Y~YJ#dhKQA15kl*B{-01q5F_Ta$_>U;Cv+uiT%?w1(9+p^m?&G-A} z&F}floB7UrGutmlGTA44p6@Z}yl&HNdd)h^(CZ&|3n#)na=n}|K~ZLwJx7=y3DV=% z3*kO^W;t9)E}tzFj~)#2!9;pAD4HN&JUW(68!YC&BOJ?Wf&@C!~69d7v35W6MzjJrS+suKrvNBv#=0PZEt7NR9SiU?G{NS^pTiDJ= zx!Sg1S}X_iHmb)h3-e-Iu^#lJ!!+N+GycPjhnOIrFkQA>9~p3)AqUpu%C?P!m&iDl ziv>*80s=hd#c&$)YTeWVYJ-kB0Mx|EI7xJC#(YC)%zLer7*pOvwJu8KOpEzK2IXWM z@IKEhY!|xA8;BlZiqYLXrzOoPmXTpcRw!rZyk&0egf98;yq0X-x2W|nLtiF4hjcv* zm#@vShl=r?@C5C7=U94FT8-D|jJws`6P1P|EwnVpTQm}|`%VIiW9c_7-wT*^Tc6MI zmcW;XTwT6g-bOUbC!ZTP-$XDPnU6nZT&pp~nbT*>)44P6mCv1+Idif!J^SHtjKnd= zVU~WnWAE0R$&Fz)^XT?2+jNMJ2Y(sR70_*g=N>#FYZ%r^&c&;Ac-(fc8D7%&q&aWB zg4bmJkmV7tx8M=H-v8)$`GeEbDZg}X+E1T5>HCBJu7Q2K2lwq7@OSt35BdXpQvIov z8DF?yOcF$K`)`TD!u>DH-k(E%-|oFq_Li?sd5f36y8HDv4{tADwYBI=5l{pa0Y$(? zpmuMJy5szFZVKX~^q~uw5gdMJb#H8;sJvEYZ`@cPn!}GPxH41XV6S9t1?%j1^|M|h z-(NQm4()?IlvC1-?k2vEDe6S0c7)UX{sQsyOo?l+x6BRfAIkRiXT$ABJTN@Uj;3=X zj9D$WGIOK5q2BlluXD6}t+uLGZye=yP2~P2kN*`%4yw7=o-dmo%Z-i(N8br13fOy@ z6QXaqn7R5xell9$T6>6Uj}-w$KoL*`6oJPV0ofb-&_w*^XPh$?X?c|1c%zR};r?3n zMnR0|%Iu9B>y6EHw}j~-J1uXB*Ms(h-b7cI4Os`L&*<6g4_Up)m|>IJWK#B$1E%9M z>;&hRqZ^O&aWQ886~X}HQNMbcOT%oLOLww$d&vD0HbXNTG@Vy~wb(h+mc;^Q z0l8s$k#Th8rJKhEoERv)`-H6B~r43F3W)&f@uf#o5tXp!h&%x@d!aHf)O zyA$p)?=2c~6&|NNo+;z`u*`e#);YPmkuSjYMQ~&39Dgk+>4AbZOBk^;fW5*-B+Zni zavqs4P&k@OcfrF;jydm#-N#|p|HR>Kq409cpUmfMlw1C3C;y{(DPO4~pa>`eihv@p z77=jm4|8-~tO7+bpu@>1`$KYolB1TP_J;~|*0Y-S2lsqgTIP=Vw{J*Hm`06xW-q1H zF%M&g+-i>baEjWqjj6P**2eP3>l7+`b2ks#Sf&nz)BIQ>evT=QyO#~8DQAe!FeR?N z-f`I!n%U&~_7NT!ABT;ldVjYyHWrZ^l!?@V=&?i_V`?xu4a!a5{^&IA7>jcW$3paM z{8^CP<&NqPTp_a2u3jCLC(bx*Bg&z)8+q&tJ{Tsvq=6uHihv@Z2q*%IfFjW52*kDD zm3^ptrdVd(664!pzuRg&F1HyTuC3rZ$37(DA7v{TIY_D0Rv>6q>1x^vn(6Ct&4Fe* zaE4_lE*%)x6>_cDfiKeWqf}T&Q3u|etqTqX;5;}x5(?@FCN8;=@pQi>vn)3lAIG;u z>S@>eDVN&}kB$)XHP#8V|A-9;?Ln?xNbE*3CpSWx9(Zsad;*?ku6nLSKZ+{_snC6s z)wYW@Yqy3g0*Zhlpa>`eiokzIz_lkV$UhWyii)x)Tzi`mt3AO14cOJRC%AV6q;;*< z3+GsdWDoj(f46{pTym|qCoCP7x*Aj5M_=K)7>!fV|2`eihv@} H@d*42iYn*w literal 0 HcmV?d00001 diff --git a/resources/TinyBox.med b/resources/TinyBox.med new file mode 100644 index 0000000000000000000000000000000000000000..2ae80ef4f183d42efb4821ba6a6da29c8418de79 GIT binary patch literal 26612 zcmeHPYiwM_6`plWa1$qS(m*Lq=!6(kjPV0+n&7zZHQDte@~#~}LJSVhdiTcOa`(aR zb?j834ONgR;s-+c5mo#^B0PlnQIKenniiwF(4dHdny8?HngV^PsG@3`Ruz<<@6L>~ zdu?w;Sd(3Q#yWSNXYR~*&di;2?p=Ss!`HTI#nmeqLRl#*Wfg3hlM$CsWH>jPoky-h zHuQ)pX3NBo%y;-(dz>Vdb4u8+c7M>{+uG&tWBy>@aG-DIjJFST z@~TiQxt)iSNh=x-+0jItN6Z7Jl}N_SxLw;^muzWHw(PQbGL<+GjhJbkj>bnV^QLrS zJQX&1GLf=FBT*}AALOBMDv?g}KwnjSB%R!TTQnX?Or&dgAQToQ-Lhs+4eyG^$0uue zC?4S<%i^)nw`g?XL@Z{e!qJe$2jbC5ejt>JhT?X*ws|ClF+|hUIGy031EHuD8nH}g zPG)Sxj6}=`1|AKY)Ou#nVBr0!gk{-gcnouhjEC)7-e;P;*|uB8Y&+RhS2r;+QERJK zwKR&lyY0H>I%*(5OQcL5jRP_VA?F4xQ1%olo`;n<`o{&_Y%Q$^=%ZV=Wi?YeUO@$r z*`CbBLwWF`T*#bhnHQwYmuJbmf@F?ht#l$bC8C*YU$EN;J-LcXID|_9az;3^T*N~% zMs=xN)2hvw)Dgp_D}rX!+j0Rr?zc1ez#2B{rNfrIs3aXjt`x7n0Fcd`N#%9_#)9sBBetpTTH=? zSN~v-GrjrbrsiJGCY_|hYaqDW$pf!lL<+C{>5!F(nO+{C(|_yuD8IR$H+Y-sw>C9; zDP6ynH*9I#vaNx2JoAT@to(P3ZJ?E1apD&ukGSmo%P+q0X1i0r!MTaPXaqC@8Uc;K zMI(@THiqqZ{L6E9mCHYL2#gePoYH6GOK~=y+dp&^4`B+Hj75$yaW-D&$g5u3HR?I- z=7DJc>=m4-QMD>g{0vZ90fJQ(gP`C)FvoWV3;+gMry;D&m&t*40RViBGe68)q z!m%6Yv)w#wh4AOf1My{K{UGTnDRD}c(c5Pv*79#Ub`fI+^R8sxPo#6*Bi~_rB)=3t zwH17_8+QB#p%69+&-Frmp z_>`>3w4;wn`&aR}W0(?WtK6m8d8~uiugfl53^@-I@9u>1Fg9Cn#P=+- zIlA>_x>}%wdc2#;m%cD2^h5NXzw1KdajwYl$RD!0r_T!kPeWkcv#;zkocm4fGVbmf zdU<7apD~$UvpR65+W2cS_~Bje?l*q@-I5dI|J-Z%Ucd8u_YMymN8Y~wCoR0&xbpF; zmTOK88nur;`B=+yeq;RM^&g*PVWaWsRsZ@+^=-!czo}ZY`{*9gKih5uUi;74cOEm1 zA5FjZoS7ap{?>S=!&^FRT=BwC`o!=~qxYjXK0fu^G2;FFSs~%(Z6P44Y)8Cl->83*uzWRQL@!z+8_T%dxX`gByec-(f$95U3RzL8A zLqG2@-aTLXYW$H#WAx#dHb42t-@k}03-847=)<0b&#myz`f?&X)4o82&l2!Cm)h4T z`Kg2Yb|u*DcX$J>o$_fZy4un0xMi5~vyi{WsVHZAoLIfL4R(uixQZKyh8xt)e-sEbV_O9RG{^{kjsWTjn1$shaFKm&|>)h_+hhh4ot=V~dc>RWeo*&Ga$7 z{dG~Q&gO0&_?V7~WBtte{iEH2=d_F=_p&x9W>{0Kw+-)%4${o4e6l3ig8Z_n)`y6&S))wsY)kLu0 zF>l!Fu-jq3L-w%QVb`xB%2%MDZ#iAL?ezKjS7XLFFfNQ$m0`Rn$Cxozj2UxM^Fv!T z50rxsJgOcTfESpkzR@0K>VA~eK_j3M&&E+fT=n$=diS`c_j(=!Adp%!!LbFs3_Zskt zSA-1p`PdWI%lj#qvYw<8?W4Z zP+tssv(R`PDKb13gN`q?Ilz~r{5A0Lz~=*BkRteTl+Ot5kcJNkZBd7I*b_nU1F62y zFV-+f`G>#iV~v1DKqH_L&^X8!7(f*ji`gX(pjfIkI zzU;_)*`eF6K7FUCKt0aIpVdc;v$P03Dv{SX(;rhYl)G!#1HY8tvdq^K>ww)cUv2T0 z=Sy#tJl%S8RPH^fF9y9?XgsEi43FZ~?4>($_&MP7fDZ#c4dk)@K*+$l(NKv#UOI{iA5vvQ0UqgV!|%$zqGQut?h!vU?>3;V-bQif+6am z2{kd1LlcQH%ApAyz;Mv$fdmf5NaCR<@Q}s3Xho*raDZ^{l2egl!C3K7;1Vk z54e$%gNGi`8PJ^{Gd)Q3)Q1o2a_=Ffl0Pt=X;7+V>Ooz8oyvzh)@u4V#W#Q9@+-tM z-qfPYPf|QJwo21OB*(LBx%>*zr#CP?Mf8z2roE(6LtB_0qgI34nLbP9eY=>RB)YrI zw4!ljUSK*y^zqDU{aKUCmD;z9X_X@WJxpha{{Ahdhlq3X4AbYS{F}2(_mN_J@+4B_WwNlgtr@NmmiB>v%dEDD~!`Cu^w4gK#N8&IqGRINfJb zR40wT3C|IuBN~{TrA-0`) zm1!PC;fQ}^!YZOsQ<8tE@bHUa{6aX08+ z@IYBT1skvO-ab9ND7<9sDTQ4#Tb5wFAS)jvh{X#bd8}+1Fq4 zHn(}3I<~fVZf)-(x~re;Ua?xGzDuZeH1nIU{>XWxjep#__T}%Fb^SVi zs*4>04grUNLm&!);NBSa&;+|~X0XTEL#NS@0^YZ7Z#*A+!#bt85NI0PI54grTi^&%j9V~@IscYe+~ zQ<0v>*^L)lnG5@CXE#=l-56!NB~+7^HAhME%G8dQ3bKm+otUzx^1D- z%T@Z0aq=h38FoT*D>_%;&}iD3$Eb-XxE%GcuU46-q1omOH}W&~klXupgD77#pI6}X z+BBb>Y4rQN`;Tg;nJE&R;gHes>!u=TN1rqHue7mKm=aTu*`?t;=E3BKa9KU%5{l36 zqV{r@uYE?2A9|TzDE&9%ssUb;!Jzrv+2FyHV=(b#XD< zW8N0bwWC+NC6xFX$X}r=Qfg4AaxPiw<#M=6ae!==9CgwQhg0FK_okKC52eU{{_awb zuCkwh)M|gPygc8f4grUNL%<>65Lk!^*#3uczAknbiUFSwC*%ANsSaj`mErsk)#HD# z&zD80w#+};A~oSOPUg8T=5;c!uJ5vB(kz!XCKZVWeJnSh;!@e0+il=u8Qc@*xmxD@ zxJe=R#(bD(ILG;%NjXqeT(wyW=>X+>DjPxuDVchJExfQF8u9(ABya0oaA z90Cpjhk!$%_7SMmepmLP_L<_au`N+w3;W%<#$&X`@UVRapIQDPnE`RWg6^Hn<$MLz z8v zl>Vc_^tlp$DX#3NL-$SQ+b(8#&t!e&6Na_!H!Fs@NPGgOR{>hya^E146 YQE_(vT__yk5O4@M1RMemfx1WFZ=Ld#;{X5v literal 0 HcmV?d00001 diff --git a/resources/UnitTetra.med b/resources/UnitTetra.med new file mode 100644 index 0000000000000000000000000000000000000000..d4548f23c8e7ba563ba3695f0e40db4e7193a55e GIT binary patch literal 25772 zcmeHPO>7%Q6dosTYf>Y*P*6b}7GV&rKynf%ZF8yJ?z(a1*ui#6iVC4pgNo!*1gR7d z2M#%uBT^1MZ~!Stm56`?s23!1-~bYb+F zc4xl#=I6ax=Zk5#^yJ9%BML$)sgf$Ah7EzyHih-8ej2C@F($A`Ez!+oqNly$thv?s zj=ou)Yv0>Jb*shy3KuI9na_tklB3u;L$YHg?yB`5}9i(_FG7(gFcyOw9Fy2 z4r5Ca!ltqG*Cg#5LOOidYMuvPSTiWo@=Bd1xr`3X8*)IHXPk0!0NSh z=e~u_hc2#6W(;+%ExFx9AZR|c^_<(#E_2ZL>pdFAYSgD zEpz*QZh6Mkd~`dv3?JZ<(tobD5g%EV!v~FBq%sb7Si_V3cEoXex8J(CKot&B&bH;MHlg{|*(ebfK_0GDk zlGZBt!tdX_z5eQVuc!WA*uJ}U;zp`)ubJBX_WNJI{Q2SC!aegAi39;bKoAfFf)Hrm z8-qQx_FQMqFnj1TsHlT%rx1r;$3gbcDlD=(v?xVNDcl__{J<~iKO?zXDkZmiI}2K6EQNbe7wW?U-Hlq(f);VrLT0|a2X4?_f zxp2E{Fj19W3j%_GARq_`0{asI-Wxl*d9dcY)|m>oI?QgolSMArUrXpc0zr0TknNUG zBSzL3DRn?wKOp{Kj`1pJ<*Tb1IEFC=VJH3Oy-Kf^|=ZiYb6w9<) zAosbNaZ4!nGiZN>DM+h%lk&M_wTAiNDjn4qEm3OO@rO_Rr1Pmon}_Pie*RpwY>MpX zU$^Q%ist7_DhLPyf`A|(2n-?uw*O%jKNs79qT%7;WSIXUJ&EjSW$aXyn;sz$lGCw^Fkh5aEwxJPrHxh{siXo-rTMOK9>25ex6^N zNUu@~a!-}~JS`V#mr|tS6-VV$2=d8grw|V)kHW{Y`~7Zrd@QUtpcDCX;j#oDWBz0~ z59m$yY&cIGOF<4Wp4iTYp9S%^++qFEKQ}VPwp|@oC(1I=!FK-u+;cx5Wg7UPZxRFn zK|l}?1Ox#=ApQtMwcq7^sC}l`BHI$mP20EFaXS%?*-gTZH*K#EhXvkdoP`RoOeSnwz$h78E^(;A zJ7~Z@DBgRmNLCQUr&@^a2l}leL=OQ_4-FH21?We|iMF8l!%NB;0{NdmL_~$y1y@lw z(LF#{Jb4%7LiF4z#KD(|7SOliaH-_ilnJrs6XG8Py5eu5N0FbpTgr%~LX;mS8h^L! zd!qLOfBy}lYk+SZqkHa!Ugtj`dIRcoXF-pZm-V|?$pq(rOj)lsFM6*hW z1M{iAO_kW}Rrba*yOfR8XtqK<3n%BfVZo3Pt2O@V$L6*kM(E8X=ayLyg%|>x$%XY$ zw{zBPy3Wzw%OB(xDx={hsY^GV?|^p)CY*&v=d+>n1=FsFF`mD$hNmZ54?R+^Woo|i zTGy@C$3|7A_nT`p?hQ{eWV9_%T^80nA_v1Idy(W6JPP=~ow*a!&pD zb))x7*fH0B>QyFXFiY)urYqUe^;}z|V`s;nq_h=OFON^xx&C{Rx_Wu9TR-O&DTlcD z;9mrM1WdS#`vNY;19^p%bi9&V8|Cnv@Zz!i1@58|7s4wQZv74KRq^!mZJp2V z?g&~f9qm?l-%iWgU|>3Ep>RZEDc^`lRpL z$kpM-Gbeq4w(or7f4wMNl98s4|)eNeUM*HKTFiB(d)T$Sh(?OK^B z^VJHf52%uA`IYnB!11AN_p5Luj{@;PzFQp4%nc!CH+JfkdvZ%DjpJyas&$Tbv8oY@ zbiXmC)-|^KA*KIb%?>7e&pj#{uCI&5?Cnq3i3S`!&k6Rsbj)-=BA#;hyXGFEX2b>o zgMdN6AYc&4T?BYID7k>|mQDiTYqko1d$s1dfn-a2vQ!JX!EAaOMQjX6w5;pDro%%^($k=CSS2jMX8q)pfBg+5fK6;6+ z#P|a`H?xlOU~E+;uMlb;L1%Z)c0=E@Ow|J-0_%&i7uKV0FJ7e=Y{x5G_96t?2R=hK z(@f)0mS=cm6I*ky{|ABow=rD^r4GINjkV%h$oJ0uH$L}%78>~W;>yrZr$ejPVLCT* zYWeTW{lgbVdfq?Yv8v^bQ26pAiC4b7K4Qto91m3t?z^<+?Df#iWBY&Yi(C#3e!TSE zv5UWiaLVJEGQK)^-pejp#@}wl8*JH9AFfkx3liMiAj1-Z;|vh5kPUv(DoZ|>j5iP; zL#1osQcI5LwldR;nN{m0tuAj0nSTFNyiPXh_usAczg3;@Z%TuJLBJqj5HJYLMFjNt zLmz!EHUUMGO^1_i@dy9IB#vH&i9h5{{6RloW~J&nKU&Q#nWdY~BO6H8=)6<{kt^SB z*91u~%T&o4O^>nsxSqJYH`n`MjHR_XQ|45X>V2w2z5Ak>GMy2sM^s6*{K~TxQ*esO zm2V_Guss_wmdWpTXC}tNc7rw%-0ZGPj4=kQ+-1;i$~U>oL>)=AT zoYLgE68%wJ(hg>PZ!+6)F{kG?e1m{Nz#w1{FbEg~{yPGCJYkUkLs9KkZt;X8EhKE> z3Aq(d(7zF2mCm$Z*r#lW_n@~MW1KwaocVad*j7eq`V#liz3N+x=_Tm@(i^4Eu$@Hm zlAkTRVSVcU^MJA&Y)9M4b-S5qJWk{p9(ifhbNr!ko~6eS)PKV}fr|0_C!jEdLBJqj N5HJWB1PUI3e*m volumes; for(IntersectionMatrix::const_iterator iter = m.begin() ; iter != m.end() ; ++iter) { @@ -89,7 +89,7 @@ double Interpolation3DTest::sumVolume(const IntersectionMatrix& m) const // vol += std::abs(iter2->second); } } - + // sum in ascending order to avoid rounding errors sort(volumes.begin(), volumes.end()); @@ -163,7 +163,7 @@ bool Interpolation3DTest::areCompatitable(const IntersectionMatrix& m1, const In } return compatitable; } - + bool Interpolation3DTest::testSymmetric(const IntersectionMatrix& m1, const IntersectionMatrix& m2) const { @@ -242,9 +242,9 @@ void Interpolation3DTest::dumpIntersectionMatrix(const IntersectionMatrix& m) co { for(std::map::const_iterator iter2 = iter->begin() ; iter2 != iter->end() ; ++iter2) { - + std::cout << "V(" << i << ", " << iter2->first << ") = " << iter2->second << std::endl; - + } ++i; } @@ -259,18 +259,25 @@ void Interpolation3DTest::setUp() void Interpolation3DTest::tearDown() { delete interpolator; -} +} void Interpolation3DTest::calcIntersectionMatrix(const char* mesh1path, const char* mesh1, const char* mesh2path, const char* mesh2, IntersectionMatrix& m) const { - const string dataBaseDir = getenv("MED_ROOT_DIR"); - const string dataDir = dataBaseDir + "/share/salome/resources/med/"; + string dataDir = ""; + if ( getenv("MEDTOOL_ROOT_DIR") ) { + dataDir = getenv("MEDTOOL_ROOT_DIR"); + dataDir += "/share/resources/med/"; + } + else { + dataDir = get_current_dir_name(); + dataDir += "/../../resources/"; + } LOG(1, std::endl << "=== -> intersecting src = " << mesh1 << ", target = " << mesh2 ); LOG(5, "Loading " << mesh1 << " from " << mesh1path); MESH sMesh(MED_DRIVER, dataDir+mesh1path, mesh1); - + LOG(5, "Loading " << mesh2 << " from " << mesh2path); MESH tMesh(MED_DRIVER, dataDir+mesh2path, mesh2); @@ -284,7 +291,7 @@ void Interpolation3DTest::calcIntersectionMatrix(const char* mesh1path, const ch } LOG(1, "Intersection calculation done. " << std::endl ); - + } void Interpolation3DTest::intersectMeshes(const char* mesh1path, const char* mesh1, const char* mesh2path, const char* mesh2, const double correctVol, const double prec, bool doubleTest) const @@ -300,7 +307,7 @@ void Interpolation3DTest::intersectMeshes(const char* mesh1path, const char* mes IntersectionMatrix matrix1; calcIntersectionMatrix(mesh1path, mesh1, mesh2path, mesh2, matrix1); -#if LOG_LEVEL >= 2 +#if LOG_LEVEL >= 2 dumpIntersectionMatrix(matrix1); #endif @@ -320,14 +327,14 @@ void Interpolation3DTest::intersectMeshes(const char* mesh1path, const char* mes } else { - + IntersectionMatrix matrix2; - calcIntersectionMatrix(mesh2path, mesh2, mesh1path, mesh1, matrix2); + calcIntersectionMatrix(mesh2path, mesh2, mesh1path, mesh1, matrix2); #if LOG_LEVEL >= 2 dumpIntersectionMatrix(matrix2); #endif - + const double vol2 = sumVolume(matrix2); LOG(1, "vol1 = " << vol1 << ", vol2 = " << vol2 << ", correctVol = " << correctVol ); diff --git a/src/INTERP_KERNELTest/MeshTestToolkit.txx b/src/INTERP_KERNELTest/MeshTestToolkit.txx index 9096963ce..de2934321 100644 --- a/src/INTERP_KERNELTest/MeshTestToolkit.txx +++ b/src/INTERP_KERNELTest/MeshTestToolkit.txx @@ -38,7 +38,7 @@ #include -// levels : +// levels : // 1 - titles and volume results // 2 - symmetry / diagonal results and intersection matrix output // 3 - empty @@ -138,7 +138,7 @@ namespace INTERP_TEST } /** - * Verifies if for a given intersection matrix the sum of each row is equal to the volumes + * Verifies if for a given intersection matrix the sum of each row is equal to the volumes * of the corresponding source elements and the sum of each column is equal to the volumes * of the corresponding target elements. This will be true as long as the meshes correspond * to the same geometry. The equalities are in the "epsilon-sense", making sure the relative @@ -252,7 +252,7 @@ namespace INTERP_TEST //if(m2[j - 1].count(i+1) > 0) // { std::map theMap = m2.at(j); - const double v2 = fabs(theMap[i]); + const double v2 = fabs(theMap[i]); if(v1 != v2) { LOG(2, "V1( " << i << ", " << j << ") = " << v1 << " which is different from V2( " << j << ", " << i << ") = " << v2 << " | diff = " << v1 - v2 ); @@ -267,7 +267,7 @@ namespace INTERP_TEST } if(!isSymmetric) { - LOG(1, "*** matrices are not symmetric"); + LOG(1, "*** matrices are not symmetric"); } return isSymmetric; } @@ -335,9 +335,9 @@ namespace INTERP_TEST * Calculates the intersection matrix for two meshes. * If the source and target meshes are the same, a CppUnit assertion raised if testVolumes() returns false. * - * @param mesh1path the path to the file containing the source mesh, relative to {$MED_ROOT_DIR}/share/salome/resources/med/ + * @param mesh1path the path to the file containing the source mesh, relative to {$MEDTOOL_ROOT_DIR}/share/resources/med/ * @param mesh1 the name of the source mesh - * @param mesh2path the path to the file containing the target mesh, relative to {$MED_ROOT_DIR}/share/salome/resources/med/ + * @param mesh2path the path to the file containing the target mesh, relative to {$MEDTOOL_ROOT_DIR}/share/resources/med/ * @param mesh2 the name of the target mesh * @param m intersection matrix in which to store the result of the intersection */ @@ -397,9 +397,9 @@ namespace INTERP_TEST * it will be confirmed that the intersection matrix is diagonal, otherwise the intersection matrices will be * calculated once which each mesh as source mesh, and it will be verified that the they are each others' transpose. * - * @param mesh1path the path to the file containing the source mesh, relative to {$MED_ROOT_DIR}/share/salome/resources/med/ + * @param mesh1path the path to the file containing the source mesh, relative to {$MEDTOOL_ROOT_DIR}/share/resources/med/ * @param mesh1 the name of the source mesh - * @param mesh2path the path to the file containing the target mesh, relative to {$MED_ROOT_DIR}/share/salome/resources/med/ + * @param mesh2path the path to the file containing the target mesh, relative to {$MEDTOOL_ROOT_DIR}/share/resources/med/ * @param mesh2 the name of the target mesh * @param correctVol the total volume of the intersection of the two meshes * @param prec maximum relative error to be tolerated in volume comparisions @@ -420,7 +420,7 @@ namespace INTERP_TEST IntersectionMatrix matrix1; calcIntersectionMatrix(mesh1path, mesh1, mesh2path, mesh2, matrix1); -#if LOG_LEVEL >= 2 +#if LOG_LEVEL >= 2 dumpIntersectionMatrix(matrix1); #endif @@ -441,7 +441,7 @@ namespace INTERP_TEST else { IntersectionMatrix matrix2; - calcIntersectionMatrix(mesh2path, mesh2, mesh1path, mesh1, matrix2); + calcIntersectionMatrix(mesh2path, mesh2, mesh1path, mesh1, matrix2); #if LOG_LEVEL >= 2 dumpIntersectionMatrix(matrix2); @@ -460,7 +460,7 @@ namespace INTERP_TEST /** * Utility method used to facilitate the call to intersect meshes. - * It calls intersectMeshes, using "mesh1.med" as file name for the mesh with name "mesh1" and + * It calls intersectMeshes, using "mesh1.med" as file name for the mesh with name "mesh1" and * "mesh2.med" as file name for the mesh with name "mesh2". The rest of the arguments are passed * along as they are. * diff --git a/src/INTERP_KERNELTest/PerfTest.cxx b/src/INTERP_KERNELTest/PerfTest.cxx index 85d67be1a..8c5c8d641 100644 --- a/src/INTERP_KERNELTest/PerfTest.cxx +++ b/src/INTERP_KERNELTest/PerfTest.cxx @@ -31,11 +31,11 @@ /** * \file PerfTest.cxx - * Test program which takes two meshes and calculates their intersection matrix. - * - * USAGE : PerfTest mesh1 mesh2 + * Test program which takes two meshes and calculates their intersection matrix. + * + * USAGE : PerfTest mesh1 mesh2 * where mesh1 and mesh2 are the names of two meshes located in - * the files mesh1.med, mesh2.med in {$MED_ROOT_DIR}/share/salome/resources/med/ + * the files mesh1.med, mesh2.med in {$MEDTOOL_ROOT_DIR}/share/resources/med/ * */ @@ -47,48 +47,48 @@ namespace INTERP_TEST */ class PerfTestToolkit : public MeshTestToolkit<3,3> { - + public: /** * Calculates the intersection matrix for two meshes. - * Outputs the names of the meshes intersected, the number of elements in each mesh, + * Outputs the names of the meshes intersected, the number of elements in each mesh, * the number of matrix elements and the number of non-zero matrix elements, etc. * These values help to determine how well the filtering algorithm is working. * - * @param mesh1path the path to the file containing the source mesh, relative to {$MED_ROOT_DIR}/share/salome/resources/med/ + * @param mesh1path the path to the file containing the source mesh, relative to {$MEDTOOL_ROOT_DIR}/share/resources/med/ * @param mesh1 the name of the source mesh - * @param mesh2path the path to the file containing the target mesh, relative to {$MED_ROOT_DIR}/share/salome/resources/med/ + * @param mesh2path the path to the file containing the target mesh, relative to {$MEDTOOL_ROOT_DIR}/share/resources/med/ * @param mesh2 the name of the target mesh * @param m intersection matrix in which to store the result of the intersection */ - void calcIntersectionMatrix(const char* mesh1path, const char* mesh1, const char* mesh2path, const char* mesh2, IntersectionMatrix& m) + void calcIntersectionMatrix(const char* mesh1path, const char* mesh1, const char* mesh2path, const char* mesh2, IntersectionMatrix& m) { LOG(1, std::endl << "=== -> intersecting src = " << mesh1 << ", target = " << mesh2 ); - + LOG(5, "Loading " << mesh1 << " from " << mesh1path); MEDCouplingAutoRefCountObjectPtr sMeshML=MEDFileUMesh::New(INTERP_TEST::getResourceFile(mesh1path).c_str(),mesh1); MEDCouplingAutoRefCountObjectPtr sMesh=sMeshML->getMeshAtLevel(0); - - + + LOG(5, "Loading " << mesh2 << " from " << mesh2path); MEDCouplingAutoRefCountObjectPtr tMeshML=MEDFileUMesh::New(INTERP_TEST::getResourceFile(mesh2path).c_str(),mesh2); MEDCouplingAutoRefCountObjectPtr tMesh=tMeshML->getMeshAtLevel(0); - + MEDCouplingNormalizedUnstructuredMesh<3,3> sMesh_wrapper(sMesh); MEDCouplingNormalizedUnstructuredMesh<3,3> tMesh_wrapper(tMesh); - + Interpolation3D interpolator; interpolator.interpolateMeshes(sMesh_wrapper, tMesh_wrapper,m,"P0P0"); - + std::pair eff = countNumberOfMatrixEntries(m); - LOG(1, eff.first << " of " << numTargetElems * numSrcElems << " intersections calculated : ratio = " + LOG(1, eff.first << " of " << numTargetElems * numSrcElems << " intersections calculated : ratio = " << double(eff.first) / double(numTargetElems * numSrcElems)); - LOG(1, eff.second << " non-zero elements of " << eff.first << " total : filter efficiency = " + LOG(1, eff.second << " non-zero elements of " << eff.first << " total : filter efficiency = " << double(eff.second) / double(eff.first)); - + LOG(1, "Intersection calculation done. " << std::endl ); - + } /** @@ -100,7 +100,7 @@ namespace INTERP_TEST */ std::pair countNumberOfMatrixEntries(const IntersectionMatrix& m) { - + int numElems = 0; int numNonZero = 0; for(IntersectionMatrix::const_iterator iter = m.begin() ; iter != m.end() ; ++iter) @@ -116,12 +116,12 @@ namespace INTERP_TEST } return std::make_pair(numElems, numNonZero); } - + }; } /** - * Main method of the program. + * Main method of the program. * Intersects the meshes and outputs some information about the calculation as well as the * intersection matrix on std::cout. * @@ -133,7 +133,7 @@ int main(int argc, char** argv) using INTERP_TEST::PerfTestToolkit; assert(argc == 3); - + // load meshes const std::string mesh1 = argv[1]; const std::string mesh2 = argv[2]; @@ -148,7 +148,7 @@ int main(int argc, char** argv) testTools.calcIntersectionMatrix(mesh1path.c_str(), mesh1.c_str(), mesh2path.c_str(), mesh2.c_str(), m); testTools.dumpIntersectionMatrix(m); - + return 0; } diff --git a/src/INTERP_KERNELTest/TestInterpKernelUtils.cxx b/src/INTERP_KERNELTest/TestInterpKernelUtils.cxx index d82a4c273..6aa73854a 100644 --- a/src/INTERP_KERNELTest/TestInterpKernelUtils.cxx +++ b/src/INTERP_KERNELTest/TestInterpKernelUtils.cxx @@ -20,23 +20,24 @@ #include "TestInterpKernelUtils.hxx" #include +#include namespace INTERP_TEST { std::string getResourceFile( const std::string& filename ) { std::string resourceFile = ""; - - if ( getenv("top_srcdir") ) { - // we are in 'make test' step - resourceFile = getenv("top_srcdir"); - resourceFile += "/resources/"; + + if ( getenv("MEDTOOL_ROOT_DIR") ) { + // use MEDTOOL_ROOT_DIR env.var + resourceFile = getenv("MEDTOOL_ROOT_DIR"); + resourceFile += "/share/resources/med/"; } - else if ( getenv("MED_ROOT_DIR") ) { - // use MED_ROOT_DIR env.var - resourceFile = getenv("MED_ROOT_DIR"); - resourceFile += "/share/salome/resources/med/"; + else { + resourceFile = get_current_dir_name(); + resourceFile += "/../../resources/"; } + resourceFile += filename; return resourceFile; } diff --git a/src/MEDCoupling/Test/CMakeLists.txt b/src/MEDCoupling/Test/CMakeLists.txt index 2b8adefc7..5deb19f7f 100644 --- a/src/MEDCoupling/Test/CMakeLists.txt +++ b/src/MEDCoupling/Test/CMakeLists.txt @@ -54,22 +54,17 @@ SET(TestMEDCouplingExamples_SOURCES MEDCouplingBasicsTest0.cxx ) -#SALOME_GENERATE_TESTS_ENVIRONMENT(tests_env) - ADD_EXECUTABLE(TestMEDCoupling ${TestMEDCoupling_SOURCES}) TARGET_LINK_LIBRARIES(TestMEDCoupling medcoupling ${CPPUNIT_LIBRARIES} ${PLATFORM_LIBS}) ADD_TEST(TestMEDCoupling TestMEDCoupling) -SET_TESTS_PROPERTIES(TestMEDCoupling PROPERTIES ENVIRONMENT "${tests_env}") ADD_EXECUTABLE(TestMEDCouplingRemapper ${TestMEDCouplingRemapper_SOURCES}) TARGET_LINK_LIBRARIES(TestMEDCouplingRemapper medcouplingremapper ${CPPUNIT_LIBRARIES} ${PLATFORM_LIBS}) ADD_TEST(TestMEDCouplingRemapper TestMEDCouplingRemapper) -SET_TESTS_PROPERTIES(TestMEDCouplingRemapper PROPERTIES ENVIRONMENT "${tests_env}") ADD_EXECUTABLE(TestMEDCouplingExamples ${TestMEDCouplingExamples_SOURCES}) TARGET_LINK_LIBRARIES(TestMEDCouplingExamples medcoupling ${CPPUNIT_LIBRARIES} ${PLATFORM_LIBS}) ADD_TEST(TestMEDCouplingExamples TestMEDCouplingExamples) -SET_TESTS_PROPERTIES(TestMEDCouplingExamples PROPERTIES ENVIRONMENT "${tests_env}") INSTALL(TARGETS TestMEDCoupling TestMEDCouplingRemapper TestMEDCouplingExamples DESTINATION ${MEDTOOL_INSTALL_BINS}) diff --git a/src/MEDCoupling_Swig/CMakeLists.txt b/src/MEDCoupling_Swig/CMakeLists.txt index e0186342a..59cef51d2 100644 --- a/src/MEDCoupling_Swig/CMakeLists.txt +++ b/src/MEDCoupling_Swig/CMakeLists.txt @@ -89,20 +89,13 @@ INSTALL(FILES MEDCoupling.i MEDCouplingCommon.i MEDCouplingRefCountObject.i MEDC INSTALL(FILES MEDCouplingBasicsTest.py MEDCouplingRemapperTest.py MEDCouplingDataForTest.py MEDCouplingNumPyTest.py MEDCouplingPickleTest.py DESTINATION ${MEDTOOL_INSTALL_SCRIPT_PYTHON}) INSTALL(FILES MEDCouplingExamplesTest.py DESTINATION ${MEDTOOL_INSTALL_SCRIPT_PYTHON}) -#SALOME_GENERATE_TESTS_ENVIRONMENT(tests_env) - ADD_TEST(MEDCouplingBasicsTest ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDCouplingBasicsTest.py) -SET_TESTS_PROPERTIES(MEDCouplingBasicsTest PROPERTIES ENVIRONMENT "${tests_env}") ADD_TEST(MEDCouplingExamplesTest ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDCouplingExamplesTest.py) -SET_TESTS_PROPERTIES(MEDCouplingExamplesTest PROPERTIES ENVIRONMENT "${tests_env}") ADD_TEST(MEDCouplingRemapperTest ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDCouplingRemapperTest.py) -SET_TESTS_PROPERTIES(MEDCouplingRemapperTest PROPERTIES ENVIRONMENT "${tests_env}") IF(NUMPY_FOUND) ADD_TEST(MEDCouplingNumPyTest ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDCouplingNumPyTest.py) - SET_TESTS_PROPERTIES(MEDCouplingNumPyTest PROPERTIES ENVIRONMENT "${tests_env}") ADD_TEST(MEDCouplingPickleTest ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDCouplingPickleTest.py) - SET_TESTS_PROPERTIES(MEDCouplingPickleTest PROPERTIES ENVIRONMENT "${tests_env}") ENDIF(NUMPY_FOUND) # Application tests diff --git a/src/MEDLoader/Swig/CMakeLists.txt b/src/MEDLoader/Swig/CMakeLists.txt index 54a9a602c..99a282168 100644 --- a/src/MEDLoader/Swig/CMakeLists.txt +++ b/src/MEDLoader/Swig/CMakeLists.txt @@ -81,31 +81,27 @@ INSTALL(FILES sauv2med PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EX INSTALL(FILES case2med PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ DESTINATION ${MEDTOOL_INSTALL_BINS} ) INSTALL(FILES med2case PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ DESTINATION ${MEDTOOL_INSTALL_BINS} ) -#SALOME_GENERATE_TESTS_ENVIRONMENT(tests_env) - ADD_TEST(MEDLoaderTest ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDLoaderTest.py) -SET_TESTS_PROPERTIES(MEDLoaderTest PROPERTIES ENVIRONMENT "${tests_env}") ADD_TEST(MEDLoaderTest2 ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDLoaderTest2.py) -SET_TESTS_PROPERTIES(MEDLoaderTest2 PROPERTIES ENVIRONMENT "${tests_env}") ADD_TEST(MEDLoaderTest3 ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDLoaderTest3.py) -SET_TESTS_PROPERTIES(MEDLoaderTest3 PROPERTIES ENVIRONMENT "${tests_env}") ADD_TEST(MEDLoaderTest4 ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDLoaderTest4.py) -SET_TESTS_PROPERTIES(MEDLoaderTest4 PROPERTIES ENVIRONMENT "${tests_env}") ADD_TEST(MEDLoaderExamplesTest ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDLoaderExamplesTest.py) -SET_TESTS_PROPERTIES(MEDLoaderExamplesTest PROPERTIES ENVIRONMENT "${tests_env}") ADD_TEST(SauvLoaderTest ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/SauvLoaderTest.py) -SET_TESTS_PROPERTIES(SauvLoaderTest PROPERTIES ENVIRONMENT "${tests_env}") IF(NUMPY_FOUND) ADD_TEST(MEDLoaderCouplingTrainingSession ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDLoaderCouplingTrainingSession.py) - SET_TESTS_PROPERTIES(MEDLoaderCouplingTrainingSession PROPERTIES ENVIRONMENT "${tests_env}") ENDIF(NUMPY_FOUND) # Application tests SET(TEST_INSTALL_DIRECTORY ${MEDTOOL_INSTALL_SCRIPT_SCRIPTS}/test/MEDCoupling/MEDLoader/Swig) -INSTALL(FILES MEDLoaderDataForTest.py MEDLoaderTest.py MEDLoaderTest2.py MEDLoaderTest3.py MEDLoaderTest4.py SauvLoaderTest.py MEDLoaderExamplesTest.py MEDLoaderCouplingTrainingSession.py CaseIO.py CaseReader.py CaseWriter.py VTKReader.py MEDLoaderSplitter.py medutilities.py DESTINATION ${TEST_INSTALL_DIRECTORY}) +SET(MEDLOADER_TEST_FILES MEDLoaderDataForTest.py MEDLoaderTest.py MEDLoaderTest2.py MEDLoaderTest3.py MEDLoaderTest4.py SauvLoaderTest.py MEDLoaderExamplesTest.py MEDLoaderCouplingTrainingSession.py CaseIO.py CaseReader.py CaseWriter.py VTKReader.py MEDLoaderSplitter.py medutilities.py) + +FOREACH(testfile ${MEDLOADER_TEST_FILES}) + CONFIGURE_FILE("${CMAKE_CURRENT_SOURCE_DIR}/${testfile}" "${CMAKE_CURRENT_BINARY_DIR}/${testfile}" COPYONLY) +ENDFOREACH(testfile) +INSTALL(FILES ${MEDLOADER_TEST_FILES} DESTINATION ${TEST_INSTALL_DIRECTORY}) INSTALL(FILES CTestTestfileInstall.cmake DESTINATION ${TEST_INSTALL_DIRECTORY} diff --git a/src/MEDLoader/Swig/MEDLoaderCouplingTrainingSession.py b/src/MEDLoader/Swig/MEDLoaderCouplingTrainingSession.py index 8001e2798..668b56260 100644 --- a/src/MEDLoader/Swig/MEDLoaderCouplingTrainingSession.py +++ b/src/MEDLoader/Swig/MEDLoaderCouplingTrainingSession.py @@ -480,8 +480,13 @@ print "IntegralGlobConstraint %lf == %lf"%(srcField.getArray().accumulate()[0],t from numpy import * from math import acos -med_root_dir=os.getenv("MED_ROOT_DIR") -agitateur_file=os.path.join(os.getenv("MED_ROOT_DIR"),"share","salome","resources","med","agitateur.med") +med_root_dir=os.getenv("MEDTOOL_ROOT_DIR") +if med_root_dir: + agitateur_file=os.path.join(os.getenv("MEDTOOL_ROOT_DIR"),"share","resources","med","agitateur.med") +else: + current_dir = os.path.dirname(os.path.realpath(__file__)) + agitateur_file=os.path.join(current_dir, "..", "..", "..", "resources","agitateur.med") + pass data=MEDFileData(agitateur_file) ts=data.getFields()[0].getTimeSteps() print ts diff --git a/src/MEDLoader/Swig/SauvLoaderTest.py b/src/MEDLoader/Swig/SauvLoaderTest.py index 502e4e468..b60a4fd3d 100644 --- a/src/MEDLoader/Swig/SauvLoaderTest.py +++ b/src/MEDLoader/Swig/SauvLoaderTest.py @@ -25,11 +25,18 @@ from MEDLoaderDataForTest import MEDLoaderDataForTest class SauvLoaderTest(unittest.TestCase): + def __getResourcesDirectory(self): + med_root_dir=os.getenv("MEDTOOL_ROOT_DIR") + if med_root_dir: + return os.path.join( os.getenv("MEDTOOL_ROOT_DIR"), "share","resources","med") + else: + current_dir = os.path.dirname(os.path.realpath(__file__)) + return os.path.join(current_dir, "..", "..", "..", "resources") + pass + def testSauv2Med(self): # get a file containing all types of readable piles - self.assertTrue( os.getenv("MED_ROOT_DIR") ) - sauvFile = os.path.join( os.getenv("MED_ROOT_DIR"), "share","salome", - "resources","med","allPillesTest.sauv") + sauvFile = os.path.join( self.__getResourcesDirectory(),"allPillesTest.sauv") self.assertTrue( os.access( sauvFile, os.F_OK)) # read SAUV and write MED @@ -38,7 +45,7 @@ class SauvLoaderTest(unittest.TestCase): d2=sr.loadInMEDFileDS(); d2.write(medFile,0); - # check + # check self.assertEqual(1,d2.getNumberOfMeshes()) self.assertEqual(8+97,d2.getNumberOfFields()) mm = d2.getMeshes() @@ -50,9 +57,7 @@ class SauvLoaderTest(unittest.TestCase): def testMed2Sauv(self): # read pointe.med - self.assertTrue( os.getenv("MED_ROOT_DIR") ) - medFile = os.path.join( os.getenv("MED_ROOT_DIR"), "share","salome", - "resources","med","pointe.med") + medFile = os.path.join(self.__getResourcesDirectory(),"pointe.med") self.assertTrue( os.access( medFile, os.F_OK)) pointeMed = MEDFileData.New( medFile ) @@ -241,9 +246,7 @@ class SauvLoaderTest(unittest.TestCase): @unittest.skipUnless(MEDLoader.HasXDR(),"requires XDR") def testMissingGroups(self): """test for issue 0021749: [CEA 601] Some missing groups in mesh after reading a SAUV file with SauvReader.""" - self.assertTrue( os.getenv("MED_ROOT_DIR") ) - sauvFile = os.path.join( os.getenv("MED_ROOT_DIR"), "share","salome", - "resources","med","BDC-714.sauv") + sauvFile = os.path.join(self.__getResourcesDirectory(),"BDC-714.sauv") self.assertTrue( os.access( sauvFile, os.F_OK)) name_of_group_on_cells='Slice10:ABSORBER' name_of_group_on_cells2='Slice10:00LR' @@ -336,13 +339,13 @@ class SauvLoaderTest(unittest.TestCase): sw.setCpyGrpIfOnASingleFamilyStatus(True) self.assertTrue(sw.getCpyGrpIfOnASingleFamilyStatus()) sw.write(sauvFile) - + f = open(sauvFile) # String pattern for the header of the sub meshes record ("PILE" number, number of named objects, number of objects) pattern_pile= re.compile(r'\sPILE\sNUMERO\s+(?P[0-9]+)NBRE\sOBJETS\sNOMMES\s+(?P[0-9]+)NBRE\sOBJETS\s+(?P[0-9]+)') # String pattern for a sub mesh header (cell type, number of components and three numbers) pattern_header=re.compile(r'\s+(?P[0-9]+)\s+(?P[0-9]+)\s+[0-9]+\s+[0-9]+\s+[0-9]+') - + nbobjects=0 line = f.readline() while(line): @@ -356,13 +359,13 @@ class SauvLoaderTest(unittest.TestCase): pass line=f.readline() pass - + # Skipping the objects names f.readline() # Skipping the objects ids f.readline() - # Looking for each sub-mesh header + # Looking for each sub-mesh header line = f.readline() cur_object=0 while(line and cur_object < nbobjects): diff --git a/src/MEDLoader/Test/CMakeLists.txt b/src/MEDLoader/Test/CMakeLists.txt index 96d121401..e61fc9378 100644 --- a/src/MEDLoader/Test/CMakeLists.txt +++ b/src/MEDLoader/Test/CMakeLists.txt @@ -39,18 +39,14 @@ SET(TestSauvLoader_SOURCES SauvLoaderTest.cxx ) -#SALOME_GENERATE_TESTS_ENVIRONMENT(tests_env) - ADD_EXECUTABLE(TestMEDLoader ${TestMEDLoader_SOURCES}) TARGET_LINK_LIBRARIES(TestMEDLoader medloader ${CPPUNIT_LIBRARIES} ${PLATFORM_LIBS}) ADD_TEST(TestMEDLoader TestMEDLoader) -SET_TESTS_PROPERTIES(TestMEDLoader PROPERTIES ENVIRONMENT "${tests_env}") ADD_EXECUTABLE(TestSauvLoader ${TestSauvLoader_SOURCES}) TARGET_LINK_LIBRARIES(TestSauvLoader medloader ${CPPUNIT_LIBRARIES} ${PLATFORM_LIBS}) ADD_TEST(TestSauvLoader TestSauvLoader) -SET_TESTS_PROPERTIES(TestSauvLoader PROPERTIES ENVIRONMENT "${tests_env}") INSTALL(TARGETS TestMEDLoader TestSauvLoader DESTINATION ${MEDTOOL_INSTALL_BINS}) diff --git a/src/MEDLoader/Test/SauvLoaderTest.cxx b/src/MEDLoader/Test/SauvLoaderTest.cxx index b6195f6df..df1570008 100644 --- a/src/MEDLoader/Test/SauvLoaderTest.cxx +++ b/src/MEDLoader/Test/SauvLoaderTest.cxx @@ -44,7 +44,7 @@ void SauvLoaderTest::testSauv2Med() MEDCouplingAutoRefCountObjectPtr d2=sr->loadInMEDFileDS(); // write MED d2->write("allPillesTest.med",0); - // check + // check CPPUNIT_ASSERT_EQUAL(1,d2->getNumberOfMeshes()); CPPUNIT_ASSERT_EQUAL(8+97,d2->getNumberOfFields()); MEDFileMesh * m = d2->getMeshes()->getMeshAtPos(0); @@ -332,16 +332,16 @@ std::string SauvLoaderTest::getResourceFile( const std::string& filename ) { std::string resourceFile = ""; - if ( getenv("top_srcdir") ) { - // we are in 'make test' step - resourceFile = getenv("top_srcdir"); - resourceFile += "/resources/"; + if ( getenv("MEDTOOL_ROOT_DIR") ) { + // use MEDTOOL_ROOT_DIR env.var + resourceFile = getenv("MEDTOOL_ROOT_DIR"); + resourceFile += "/share/resources/med/"; } - else if ( getenv("MED_ROOT_DIR") ) { - // use MED_ROOT_DIR env.var - resourceFile = getenv("MED_ROOT_DIR"); - resourceFile += "/share/salome/resources/med/"; + else { + resourceFile = get_current_dir_name(); + resourceFile += "/../../../resources/"; } + resourceFile += filename; #ifdef WIN32 std::string fixedpath = resourceFile; diff --git a/src/MEDPartitioner/CMakeLists.txt b/src/MEDPartitioner/CMakeLists.txt index d1642c5cf..a57506609 100644 --- a/src/MEDPartitioner/CMakeLists.txt +++ b/src/MEDPartitioner/CMakeLists.txt @@ -131,7 +131,7 @@ IF(${SALOME_USE_MPI}) SET(medpartitionercpp_LDFLAGS ${medpartitionercpp_LDFLAGS} ${MPI_LIBRARIES}) SET_TARGET_PROPERTIES(medpartitioner_para PROPERTIES COMPILE_FLAGS "${medpartitionercpp_DEFINITIONS}") TARGET_LINK_LIBRARIES(medpartitioner_para medpartitionercpp ${medpartitionercpp_LDFLAGS}) - INSTALL(TARGETS medpartitioner_para DESTINATION ${SALOME_INSTALL_BINS}) + INSTALL(TARGETS medpartitioner_para DESTINATION ${MEDTOOL_INSTALL_BINS}) ENDIF(${SALOME_USE_MPI}) ADD_DEFINITIONS(${medpartitionercpp_DEFINITIONS}) diff --git a/src/MEDPartitioner/MEDPARTITIONER_metis.c b/src/MEDPartitioner/MEDPARTITIONER_metis.c index f67c84932..7967cab1c 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_metis.c +++ b/src/MEDPartitioner/MEDPARTITIONER_metis.c @@ -38,42 +38,42 @@ typedef int idxtype; #endif // defined(MED_ENABLE_METIS) & !defined(MED_ENABLE_PARMETIS) -void MEDPARTITIONER_METIS_PartGraphRecursive(int *nvtxs, idxtype *xadj, idxtype *adjncy, idxtype *vwgt, - idxtype *adjwgt, int *wgtflag, int *numflag, int *nparts, +void MEDPARTITIONER_METIS_PartGraphRecursive(int *nvtxs, idxtype *xadj, idxtype *adjncy, idxtype *vwgt, + idxtype *adjwgt, int *wgtflag, int *numflag, int *nparts, int *options, int *edgecut, idxtype *part) { #if defined(MED_ENABLE_METIS) #ifndef MED_ENABLE_METIS_V5 - METIS_PartGraphRecursive(nvtxs, xadj, adjncy, vwgt, - adjwgt, wgtflag, numflag, nparts, + METIS_PartGraphRecursive(nvtxs, xadj, adjncy, vwgt, + adjwgt, wgtflag, numflag, nparts, options, edgecut, part); #else int ncon=1; options[METIS_OPTION_NCUTS]=1; options[METIS_OPTION_NITER]=1; options[METIS_OPTION_UFACTOR]=1; - METIS_PartGraphRecursive(nvtxs, &ncon, xadj, adjncy, vwgt, 0 /* vsize*/, + METIS_PartGraphRecursive(nvtxs, &ncon, xadj, adjncy, vwgt, 0 /* vsize*/, adjwgt, nparts,/* tpwgts*/ 0,/* ubvec */ 0, options, edgecut, part); #endif #endif } -void MEDPARTITIONER_METIS_PartGraphKway(int *nvtxs, idxtype *xadj, idxtype *adjncy, idxtype *vwgt, - idxtype *adjwgt, int *wgtflag, int *numflag, int *nparts, +void MEDPARTITIONER_METIS_PartGraphKway(int *nvtxs, idxtype *xadj, idxtype *adjncy, idxtype *vwgt, + idxtype *adjwgt, int *wgtflag, int *numflag, int *nparts, int *options, int *edgecut, idxtype *part) { #if defined(MED_ENABLE_METIS) #ifndef MED_ENABLE_METIS_V5 - METIS_PartGraphKway(nvtxs, xadj, adjncy, vwgt, - adjwgt, wgtflag, numflag, nparts, + METIS_PartGraphKway(nvtxs, xadj, adjncy, vwgt, + adjwgt, wgtflag, numflag, nparts, options, edgecut, part); #else int ncon=1; options[METIS_OPTION_NCUTS]=1; options[METIS_OPTION_NITER]=1; options[METIS_OPTION_UFACTOR]=1; - METIS_PartGraphKway(nvtxs, &ncon, xadj, adjncy, vwgt, 0 /* vsize*/, + METIS_PartGraphKway(nvtxs, &ncon, xadj, adjncy, vwgt, 0 /* vsize*/, adjwgt, nparts, 0 , 0 /* ubvec */, options, edgecut, part); #endif diff --git a/src/MEDPartitioner/Test/CMakeLists.txt b/src/MEDPartitioner/Test/CMakeLists.txt index 25e10b994..c1dd6f3bf 100644 --- a/src/MEDPartitioner/Test/CMakeLists.txt +++ b/src/MEDPartitioner/Test/CMakeLists.txt @@ -51,9 +51,7 @@ INSTALL(TARGETS TestMEDPARTITIONER DESTINATION ${MEDTOOL_INSTALL_BINS}) INSTALL(FILES ${MEDPARTITIONERTest_HEADERS_HXX} DESTINATION ${MEDTOOL_INSTALL_HEADERS}) -#SALOME_GENERATE_TESTS_ENVIRONMENT(tests_env) ADD_TEST(TestMEDPARTITIONER TestMEDPARTITIONER) -SET_TESTS_PROPERTIES(TestMEDPARTITIONER PROPERTIES ENVIRONMENT "${tests_env}") # Application tests diff --git a/src/MEDPartitioner/Test/MEDPARTITIONERTest.cxx b/src/MEDPartitioner/Test/MEDPARTITIONERTest.cxx index 5144d40d3..9b6743a1c 100644 --- a/src/MEDPartitioner/Test/MEDPARTITIONERTest.cxx +++ b/src/MEDPartitioner/Test/MEDPARTITIONERTest.cxx @@ -37,11 +37,13 @@ #include #include +#include #include #include #include #include #include +#include #ifdef HAVE_MPI #include @@ -83,19 +85,17 @@ void MEDPARTITIONERTest::setbigSize() std::string MEDPARTITIONERTest::getPartitionerExe() const { std::string execName; - if ( getenv("top_builddir")) // make distcheck + if ( getenv("MEDTOOL_ROOT_DIR") ) { - execName = getenv("top_builddir"); - execName += "/src/MEDPartitioner/medpartitioner"; - } - else if ( getenv("MED_ROOT_DIR") ) - { - execName=getenv("MED_ROOT_DIR"); //.../INSTALL/MED - execName+="/bin/salome/medpartitioner"; + execName=getenv("MEDTOOL_ROOT_DIR"); //.../INSTALL/MED + execName+="/bin/medpartitioner"; } else { - CPPUNIT_FAIL("Can't find medpartitioner, neither MED_ROOT_DIR nor top_builddir is set"); + execName = get_current_dir_name(); + execName += "/../../MEDPartitioner/medpartitioner"; + if (! std::ifstream(execName.c_str())) + CPPUNIT_FAIL("Can't find medpartitioner, please set MEDTOOL_ROOT_DIR"); } return execName; } @@ -179,7 +179,7 @@ ParaMEDMEM::MEDCouplingUMesh * MEDPARTITIONERTest::buildCUBE3DMesh() ii=ii + _ni + 2 ; conn.push_back(ii); conn.push_back(ii-1); - + ii=i + j*(_ni+1) + (k+1)*(_ni+1)*(_nj+1); conn.push_back(ii); conn.push_back(ii+1); @@ -197,7 +197,7 @@ ParaMEDMEM::MEDCouplingUMesh * MEDPARTITIONERTest::buildCUBE3DMesh() cout << endl; cout << "\nnb conn " << (_ni)*(_nj)*(_nk)*8 << " " << conn.size() << endl; for (int i=0; i<(int)conn.size(); i=i+8) - { + { for (int j=0; j<8; j++) cout << conn[i+j] << " "; cout << endl; @@ -205,7 +205,7 @@ ParaMEDMEM::MEDCouplingUMesh * MEDPARTITIONERTest::buildCUBE3DMesh() cout << endl; } */ - + MEDCouplingUMesh *mesh=MEDCouplingUMesh::New(); mesh->setMeshDimension(3); int nbc=conn.size()/8; //nb of cells @@ -267,13 +267,13 @@ ParaMEDMEM::MEDCouplingUMesh * MEDPARTITIONERTest::buildCARRE3DMesh() cout<setMeshDimension(2); int nbc=conn.size()/4; //nb of cells @@ -335,14 +335,14 @@ ParaMEDMEM::MEDCouplingUMesh * MEDPARTITIONERTest::buildFACE3DMesh() cout<setMeshDimension(2); int nbc=conn.size()/4; //nb of cells @@ -415,7 +415,7 @@ MEDCouplingFieldDouble * MEDPARTITIONERTest::buildVecFieldOnNodes() field.push_back(j+.2); field.push_back(k+.3); } - + MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(_file_name.c_str(),_mesh_name.c_str(),0); int nbOfNodes=mesh->getNumberOfNodes(); MEDCouplingFieldDouble *f1=MEDCouplingFieldDouble::New(ON_NODES,ONE_TIME); @@ -452,7 +452,7 @@ void MEDPARTITIONERTest::createTestMeshWithoutField() } mesh->decrRef(); } - + { vector meshes; MEDCouplingUMesh * mesh1 = buildCUBE3DMesh(); @@ -465,7 +465,7 @@ void MEDPARTITIONERTest::createTestMeshWithoutField() meshes.push_back(mesh1); meshes.push_back(mesh2); MEDLoader::WriteUMeshes(_file_name_with_faces.c_str(), meshes, true); - + ParaMEDMEM::MEDFileUMesh* mfm=ParaMEDMEM::MEDFileUMesh::New(_file_name_with_faces.c_str(), mesh1->getName().c_str()); DataArrayInt* FacesFam=DataArrayInt::New(); FacesFam->alloc(mfm->getSizeAtLevel(-1),1); @@ -487,7 +487,7 @@ void MEDPARTITIONERTest::createTestMeshWithoutField() mfm->write(_file_name_with_faces.c_str(),0); FacesFam->decrRef(); CellsFam->decrRef(); - + /*ce truc marche pas! ParaMEDMEM::MEDFileUMesh* mfm=ParaMEDMEM::MEDFileUMesh::New(_file_name_with_faces.c_str(), mesh1->getName()); vector ms; @@ -495,7 +495,7 @@ void MEDPARTITIONERTest::createTestMeshWithoutField() mfm->setGroupsFromScratch(-1, ms); mfm->write(_file_name_with_faces.c_str(),0); */ - + if (_verbose) cout<decrRef(); mfm->decrRef(); } - + { MEDCouplingUMesh * mesh = buildCARRE3DMesh(); MEDLoader::WriteUMesh(_file_name2.c_str(),mesh,true); @@ -553,7 +553,7 @@ void MEDPARTITIONERTest::createHugeTestMesh(int ni, int nj, int nk, int nbx, int \n$tagMesh \ \n \ \n"; - + string tagSubfiles, tagSubfile="\ \n \ $fileName\n \ @@ -565,7 +565,7 @@ void MEDPARTITIONERTest::createHugeTestMesh(int ni, int nj, int nk, int nbx, int testMesh\n \ \n \ \n"; - + int xyz=1; string sxyz; DataArrayDouble* coordsInit=mesh->getCoords()->deepCpy(); @@ -573,7 +573,7 @@ void MEDPARTITIONERTest::createHugeTestMesh(int ni, int nj, int nk, int nbx, int double deltax=cooFin[0]-cooDep[0]; double deltay=cooFin[1]-cooDep[1]; double deltaz=cooFin[2]-cooDep[2]; - + double dz=0.; for (int z=0; zgetCoords(); //int nbOfComp=coords->getNumberOfComponents(); //be 3D int nbOfTuple=coords->getNumberOfTuples(); @@ -600,11 +600,11 @@ void MEDPARTITIONERTest::createHugeTestMesh(int ni, int nj, int nk, int nbx, int } MEDLoader::WriteUMesh(fileName.c_str(),mesh,true); - + tagSubfiles+=tagSubfile; tagSubfiles.replace(tagSubfiles.find("$xyz"),4,sxyz); tagSubfiles.replace(tagSubfiles.find("$fileName"),9,fileName); - + tagMeshes+=tagMesh; tagMeshes.replace(tagMeshes.find("$xyz"),4,sxyz); xyz++; @@ -615,7 +615,7 @@ void MEDPARTITIONERTest::createHugeTestMesh(int ni, int nj, int nk, int nbx, int dz+=deltaz; } coordsInit->decrRef(); - + tagXml.replace(tagXml.find("$subdomainNumber"),16,sxyz); tagXml.replace(tagXml.find("$tagSubfile"),11,tagSubfiles); tagXml.replace(tagXml.find("$tagMesh"),8,tagMeshes); @@ -626,7 +626,7 @@ void MEDPARTITIONERTest::createHugeTestMesh(int ni, int nj, int nk, int nbx, int f<decrRef(); } @@ -666,17 +666,17 @@ void MEDPARTITIONERTest::createTestMeshWithVecFieldOnCells() f3->setDescription("MyDescriptionNE"); DataArrayDouble *array=DataArrayDouble::New(); //int nb=f1->getMesh()->getNumberOfNodes(); - + /*8 pt de gauss by cell int nb=f3->getMesh()->getNumberOfCells()*8; array->alloc(nb,2); double *ptr=array->getPointer(); for (int i=0; igetMesh()->getNumberOfCells(); int nb=nbcell*nbptgauss; int nbcomp=2; @@ -753,7 +753,7 @@ void MEDPARTITIONERTest::verifyTestMeshWithVecFieldOnNodes() { cout<<"\n types in "<::iterator t=types.begin(); t!=types.end(); ++t) cout<<" "<<*t; - for (std::set::const_iterator t=types.begin(); t!=types.end(); ++t) + for (std::set::const_iterator t=types.begin(); t!=types.end(); ++t) { //INTERP_KERNEL::CellModel essai=INTERP_KERNEL::CellModel::GetCellModel(*t); cout<<" "<<(INTERP_KERNEL::CellModel::GetCellModel(*t)).getRepr(); @@ -761,7 +761,7 @@ void MEDPARTITIONERTest::verifyTestMeshWithVecFieldOnNodes() cout<decrRef(); - + MEDFileUMesh * mf = MEDFileUMesh::New(_file_name.c_str(),_mesh_name.c_str(),-1,-1); vector lev; lev=mf->getNonEmptyLevels(); @@ -839,7 +839,7 @@ void MEDPARTITIONERTest::testMeshCollectionSinglePartitionMetis() bool empty_groups=false; MEDPARTITIONER::ParaDomainSelector parallelizer(false); MEDPARTITIONER::MeshCollection collection(fileName,parallelizer); - + MEDPARTITIONER::ParallelTopology* aPT = (MEDPARTITIONER::ParallelTopology*) collection.getTopology(); aPT->setGlobalNumerotationDefault(collection.getParaDomainSelector()); //Creating the graph and partitioning it @@ -847,13 +847,13 @@ void MEDPARTITIONERTest::testMeshCollectionSinglePartitionMetis() new_topo.reset( collection.createPartition(ndomains,MEDPARTITIONER::Graph::METIS) ); //Creating a new mesh collection from the partitioning MEDPARTITIONER::MeshCollection new_collection(collection,new_topo.get(),split_family,empty_groups); - + //example to create files //MyGlobals::_General_Informations.clear(); //MyGlobals::_General_Informations.push_back(SerializeFromString("finalMeshName=Merge")); //if (MyGlobals::_Verbose>100) cout << "generalInformations : \n"<setGlobalNumerotationDefault(collection.getParaDomainSelector()); - + for (int ndomains=2 ; ndomains<=16 ; ndomains++) { //Creating the graph and partitioning it @@ -883,7 +883,7 @@ void MEDPARTITIONERTest::testMeshCollectionComplexPartitionMetis() new_topo.reset( collection.createPartition(ndomains,MEDPARTITIONER::Graph::METIS) ); //Creating a new mesh collection from the partitioning MEDPARTITIONER::MeshCollection new_collection(collection,new_topo.get(),split_family,empty_groups); - + CPPUNIT_ASSERT_EQUAL(ndomains,new_collection.getNbOfLocalMeshes()); CPPUNIT_ASSERT_EQUAL(ndomains,new_collection.getNbOfGlobalMeshes()); CPPUNIT_ASSERT_EQUAL(collection.getNbOfLocalCells(),new_collection.getNbOfLocalCells()); @@ -921,7 +921,7 @@ void MEDPARTITIONERTest::testMeshCollectionSinglePartitionScotch() bool empty_groups=false; MEDPARTITIONER::ParaDomainSelector parallelizer(false); MEDPARTITIONER::MeshCollection collection(fileName,parallelizer); - + MEDPARTITIONER::ParallelTopology* aPT = (MEDPARTITIONER::ParallelTopology*) collection.getTopology(); aPT->setGlobalNumerotationDefault(collection.getParaDomainSelector()); //Creating the graph and partitioning it @@ -929,13 +929,13 @@ void MEDPARTITIONERTest::testMeshCollectionSinglePartitionScotch() new_topo.reset( collection.createPartition(ndomains,MEDPARTITIONER::Graph::SCOTCH) ); //Creating a new mesh collection from the partitioning MEDPARTITIONER::MeshCollection new_collection(collection,new_topo.get(),split_family,empty_groups); - + //example to create files //MyGlobals::_General_Informations.clear(); //MyGlobals::_General_Informations.push_back(SerializeFromString("finalMeshName=Merge")); //if (MyGlobals::_Verbose>100) cout << "generalInformations : \n"<setGlobalNumerotationDefault(collection.getParaDomainSelector()); - + for (int ndomains=2 ; ndomains<=16 ; ndomains++) { //Creating the graph and partitioning it @@ -965,7 +965,7 @@ void MEDPARTITIONERTest::testMeshCollectionComplexPartitionScotch() new_topo.reset( collection.createPartition(ndomains,MEDPARTITIONER::Graph::SCOTCH) ); //Creating a new mesh collection from the partitioning MEDPARTITIONER::MeshCollection new_collection(collection,new_topo.get(),split_family,empty_groups); - + CPPUNIT_ASSERT_EQUAL(ndomains,new_collection.getNbOfLocalMeshes()); CPPUNIT_ASSERT_EQUAL(ndomains,new_collection.getNbOfGlobalMeshes()); CPPUNIT_ASSERT_EQUAL(collection.getNbOfLocalCells(),new_collection.getNbOfLocalCells()); @@ -991,13 +991,13 @@ void MEDPARTITIONERTest::launchMetisOrScotchMedpartitionerOnTestMeshes(std::stri { int res; string cmd,execName,sourceName,targetName; - + execName=getPartitionerExe(); - + cmd="which "+execName+" 2>/dev/null 1>/dev/null"; //no trace res=system(cmd.c_str()); CPPUNIT_ASSERT_EQUAL_MESSAGE(execName + " - INVALID PATH TO medpartitioner", 0, res); - + cmd=execName+" --ndomains=2 --split-method="+MetisOrScotch; //on same proc sourceName=_file_name; targetName=_file_name; @@ -1006,7 +1006,7 @@ void MEDPARTITIONERTest::launchMetisOrScotchMedpartitionerOnTestMeshes(std::stri if (_verbose) cout<getLevel0Mesh(false); ParaMEDMEM::MEDCouplingUMesh* faceMesh=initialMesh->getLevelM1Mesh(false); - + cmd=execName+" --ndomains=5 --split-method="+MetisOrScotch; //on same proc sourceName=fileName; targetName=fileName; @@ -1055,7 +1055,7 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForMesh(std res=system(cmd.c_str()); CPPUNIT_ASSERT_EQUAL(0, res); input=targetName+".xml"; - + MEDPARTITIONER::ParaDomainSelector parallelizer(false); MEDPARTITIONER::MeshCollection collection(input,parallelizer); CPPUNIT_ASSERT_EQUAL(3, collection.getMeshDimension()); @@ -1065,14 +1065,14 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForMesh(std for (std::size_t i = 0; i < cellMeshes.size(); i++) nbcells+=cellMeshes[i]->getNumberOfCells(); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), nbcells); - + std::vectorfaceMeshes=collection.getFaceMesh(); CPPUNIT_ASSERT_EQUAL(5, (int) faceMeshes.size()); int nbfaces=0; for (std::size_t i=0; i < faceMeshes.size(); i++) nbfaces+=faceMeshes[i]->getNumberOfCells(); CPPUNIT_ASSERT_EQUAL(faceMesh->getNumberOfCells(), nbfaces); - + //merge split meshes and test equality cmd=execName+" --ndomains=1 --split-method="+MetisOrScotch; //on same proc sourceName=targetName+".xml"; @@ -1082,25 +1082,25 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForMesh(std if (_verbose) cout<getLevel0Mesh(false); ParaMEDMEM::MEDCouplingUMesh* refusedFaceMesh=refusedMesh->getLevelM1Mesh(false); - + CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), refusedCellMesh->getNumberOfCells()); CPPUNIT_ASSERT_EQUAL(faceMesh->getNumberOfCells(), refusedFaceMesh->getNumberOfCells()); - + /*not the good job ParaMEDMEM::MEDCouplingMesh* mergeCell=cellMesh->mergeMyselfWith(refusedCellMesh); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), mergeCell->getNumberOfCells()); - + ParaMEDMEM::MEDCouplingMesh* mergeFace=faceMesh->mergeMyselfWith(refusedFaceMesh); CPPUNIT_ASSERT_EQUAL(faceMesh->getNumberOfCells(), mergeFace->getNumberOfCells()); - + CPPUNIT_ASSERT(faceMesh->isEqual(refusedFaceMesh,1e-12)); */ - + std::vector meshes; std::vector corr; meshes.push_back(cellMesh); @@ -1108,7 +1108,7 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForMesh(std meshes.push_back(refusedCellMesh); MEDCouplingUMesh* fusedCell=MEDCouplingUMesh::FuseUMeshesOnSameCoords(meshes,0,corr); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), fusedCell->getNumberOfCells()); - + meshes.resize(0); for (std::size_t i = 0; i < corr.size(); i++) corr[i]->decrRef(); @@ -1118,7 +1118,7 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForMesh(std meshes.push_back(refusedFaceMesh); MEDCouplingUMesh* fusedFace=MEDCouplingUMesh::FuseUMeshesOnSameCoords(meshes,0,corr); CPPUNIT_ASSERT_EQUAL(faceMesh->getNumberOfCells(), fusedFace->getNumberOfCells()); - + for (std::size_t i = 0; i < corr.size(); i++) corr[i]->decrRef(); fusedFace->decrRef(); @@ -1141,10 +1141,10 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForFieldOnC execName=getPartitionerExe(); fileName=_file_name; fileName.replace(fileName.find(".med"),4,"_WithVecFieldOnCells.med"); - + ParaMEDMEM::MEDFileUMesh* initialMesh=ParaMEDMEM::MEDFileUMesh::New(fileName.c_str(),_mesh_name.c_str()); ParaMEDMEM::MEDCouplingUMesh* cellMesh=initialMesh->getLevel0Mesh(false); - + cmd=execName+" --ndomains=5 --split-method="+MetisOrScotch; //on same proc sourceName=fileName; targetName=fileName; @@ -1154,7 +1154,7 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForFieldOnC res=system(cmd.c_str()); CPPUNIT_ASSERT_EQUAL(0, res); input=targetName+".xml"; - + //merge split meshes and test equality cmd=execName+" --ndomains=1 --split-method="+MetisOrScotch; //on same proc sourceName=targetName+".xml"; @@ -1164,13 +1164,13 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForFieldOnC if (_verbose) cout<getLevel0Mesh(false); - + CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), refusedCellMesh->getNumberOfCells()); - + std::vector meshes; std::vector corr; meshes.push_back(cellMesh); @@ -1178,22 +1178,22 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForFieldOnC meshes.push_back(refusedCellMesh); MEDCouplingUMesh* fusedCell=MEDCouplingUMesh::FuseUMeshesOnSameCoords(meshes,0,corr); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), fusedCell->getNumberOfCells()); - + MEDCouplingFieldDouble* field1=MEDLoader::ReadFieldCell(fileName.c_str(),initialMesh->getName().c_str(),0,"VectorFieldOnCells",0,1); MEDCouplingFieldDouble* field2=MEDLoader::ReadFieldCell(refusedName.c_str(),refusedCellMesh->getName().c_str(),0,"VectorFieldOnCells",0,1); - + int nbcells=corr[1]->getNumberOfTuples(); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), nbcells); //use corr to test equality of field DataArrayDouble* f1=field1->getArray(); DataArrayDouble* f2=field2->getArray(); - if (_verbose>300) + if (_verbose>300) { cout<<"\nf1 : "<reprZip(); cout<<"\nf2 : "<reprZip(); //field2->advancedRepradvancedRepr(); for (std::size_t i = 0; i < corr.size(); i++) cout << "\ncorr " << i << " : " << corr[i]->reprZip(); - + } int nbequal=0; int nbcomp=field1->getNumberOfComponents(); @@ -1211,7 +1211,7 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForFieldOnC } } CPPUNIT_ASSERT_EQUAL(nbcells*nbcomp, nbequal); - + for (std::size_t i = 0; i < corr.size(); i++) corr[i]->decrRef(); field1->decrRef(); @@ -1230,10 +1230,10 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForFieldOnG execName=getPartitionerExe(); fileName=_file_name; fileName.replace(fileName.find(".med"),4,"_WithVecFieldOnGaussNe.med"); - + ParaMEDMEM::MEDFileUMesh* initialMesh=ParaMEDMEM::MEDFileUMesh::New(fileName.c_str(),_mesh_name.c_str()); ParaMEDMEM::MEDCouplingUMesh* cellMesh=initialMesh->getLevel0Mesh(false); - + cmd=execName+" --ndomains=5 --split-method="+MetisOrScotch; //on same proc sourceName=fileName; targetName=fileName; @@ -1243,7 +1243,7 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForFieldOnG res=system(cmd.c_str()); CPPUNIT_ASSERT_EQUAL(0, res); input=targetName+".xml"; - + //merge split meshes and test equality cmd=execName+" --ndomains=1 --split-method="+MetisOrScotch; //on same proc sourceName=targetName+".xml"; @@ -1253,13 +1253,13 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForFieldOnG if (_verbose) cout<getLevel0Mesh(false); - + CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), refusedCellMesh->getNumberOfCells()); - + std::vector meshes; std::vector corr; meshes.push_back(cellMesh); @@ -1267,22 +1267,22 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForFieldOnG meshes.push_back(refusedCellMesh); MEDCouplingUMesh* fusedCell=MEDCouplingUMesh::FuseUMeshesOnSameCoords(meshes,0,corr); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), fusedCell->getNumberOfCells()); - + MEDCouplingFieldDouble* field1=MEDLoader::ReadField(ON_GAUSS_NE,fileName.c_str(),initialMesh->getName().c_str(),0,"MyFieldOnGaussNE",5,6); MEDCouplingFieldDouble* field2=MEDLoader::ReadField(ON_GAUSS_NE,refusedName.c_str(),refusedCellMesh->getName().c_str(),0,"MyFieldOnGaussNE",5,6); - + int nbcells=corr[1]->getNumberOfTuples(); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), nbcells); //use corr to test equality of field DataArrayDouble* f1=field1->getArray(); DataArrayDouble* f2=field2->getArray(); - if (_verbose>300) + if (_verbose>300) { cout << "\nf1 : " << f1->reprZip(); //123.4 for 12th cell,3rd component, 4th gausspoint cout << "\nf2 : " << f2->reprZip(); //field2->advancedRepradvancedRepr(); for (std::size_t i = 0; i < corr.size(); i++) cout << "\ncorr " << i << " : " << corr[i]->reprZip(); - + } int nbequal=0; int nbptgauss=8; @@ -1301,7 +1301,7 @@ void MEDPARTITIONERTest::verifyMetisOrScotchMedpartitionerOnSmallSizeForFieldOnG } } CPPUNIT_ASSERT_EQUAL(nbcells*nbcomp*nbptgauss, nbequal); - + for (std::size_t i = 0; i < corr.size(); i++) corr[i]->decrRef(); field1->decrRef(); diff --git a/src/MEDPartitioner/Test/MEDPARTITIONERTest.hxx b/src/MEDPartitioner/Test/MEDPARTITIONERTest.hxx index 5e437cb32..f6cd5a13b 100644 --- a/src/MEDPartitioner/Test/MEDPARTITIONERTest.hxx +++ b/src/MEDPartitioner/Test/MEDPARTITIONERTest.hxx @@ -56,7 +56,7 @@ class MEDPARTITIONERTEST_EXPORT MEDPARTITIONERTest : public CppUnit::TestFixture CPPUNIT_TEST( testMeshCollectionComplexPartitionScotch ); CPPUNIT_TEST( testScotchSmallSize ); #endif - + #if defined(HAVE_MPI) #if defined(MED_ENABLE_PARMETIS) //test with mpi on system @@ -85,13 +85,14 @@ public: int _nb_target_huge; std::string _mesh_name; //initial test mesh file med int _verbose; - + //for utils void setSize(int ni, int nj, int nk); void setSmallSize(); void setMedianSize(); void setbigSize(); std::string getPartitionerExe() const; + std::string getPartitionerParaExe() const; ParaMEDMEM::MEDCouplingUMesh * buildCUBE3DMesh(); ParaMEDMEM::MEDCouplingUMesh * buildFACE3DMesh(); ParaMEDMEM::MEDCouplingUMesh * buildCARRE3DMesh(); @@ -113,7 +114,7 @@ public: void launchMedpartitionerOnTestMeshes(); void launchMedpartitionerOnHugeTestMeshes(); void deleteTestMeshes(); - + //for CPPUNIT_TEST void setUp(); void tearDown(); @@ -129,7 +130,7 @@ public: void testMeshCollectionComplexPartitionScotch(); void testScotchSmallSize(); #endif - + #if defined(HAVE_MPI) void testMpirunSmallSize(); void testMpirunMedianSize(); diff --git a/src/MEDPartitioner/Test/MEDPARTITIONERTestPara.cxx b/src/MEDPartitioner/Test/MEDPARTITIONERTestPara.cxx index 5daaa525d..c38d51755 100644 --- a/src/MEDPartitioner/Test/MEDPARTITIONERTestPara.cxx +++ b/src/MEDPartitioner/Test/MEDPARTITIONERTestPara.cxx @@ -50,18 +50,35 @@ using namespace ParaMEDMEM; using namespace MEDPARTITIONER; #if defined(HAVE_MPI) +std::string MEDPARTITIONERTest::getPartitionerParaExe() const +{ + std::string execName; + if ( getenv("MEDTOOL_ROOT_DIR") ) + { + execName=getenv("MEDTOOL_ROOT_DIR"); //.../INSTALL/MED + execName+="/bin/medpartitioner_para"; + } + else + { + execName = get_current_dir_name(); + execName += "/../../MEDPartitioner/medpartitioner_para"; + if (! std::ifstream(execName.c_str())) + CPPUNIT_FAIL("Can't find medpartitioner_para, please set MEDTOOL_ROOT_DIR"); + } + return execName; +} + void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForMesh() { int res; string fileName,cmd,execName,sourceName,targetName,input; - execName=getenv("MED_ROOT_DIR"); //.../INSTALL/MED - execName+="/bin/salome/medpartitioner_para"; + execName=getPartitionerParaExe(); fileName=_file_name_with_faces; - + ParaMEDMEM::MEDFileUMesh* initialMesh=ParaMEDMEM::MEDFileUMesh::New(fileName.c_str(),_mesh_name.c_str()); ParaMEDMEM::MEDCouplingUMesh* cellMesh=initialMesh->getLevel0Mesh(false); ParaMEDMEM::MEDCouplingUMesh* faceMesh=initialMesh->getLevelM1Mesh(false); - + cmd="mpirun -np 5 "+execName+" --ndomains=5 --split-method=metis"; //on same proc sourceName=fileName; targetName=fileName; @@ -71,7 +88,7 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForMesh() res=system(cmd.c_str()); CPPUNIT_ASSERT_EQUAL(0, res); input=targetName+".xml"; - + MEDPARTITIONER::ParaDomainSelector parallelizer(false); MEDPARTITIONER::MeshCollection collection(input,parallelizer); CPPUNIT_ASSERT_EQUAL(3, collection.getMeshDimension()); @@ -81,14 +98,14 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForMesh() for (std::size_t i = 0; i < cellMeshes.size(); i++) nbcells+=cellMeshes[i]->getNumberOfCells(); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), nbcells); - + std::vectorfaceMeshes=collection.getFaceMesh(); CPPUNIT_ASSERT_EQUAL(5, (int) faceMeshes.size()); int nbfaces=0; for (std::size_t i=0; i < faceMeshes.size(); i++) nbfaces+=faceMeshes[i]->getNumberOfCells(); CPPUNIT_ASSERT_EQUAL(faceMesh->getNumberOfCells(), nbfaces); - + //merge split meshes and test equality cmd="mpirun -np 1 "+execName+" --ndomains=1 --split-method=metis"; //on same proc sourceName=targetName+".xml"; @@ -98,25 +115,25 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForMesh() if (_verbose) cout<getLevel0Mesh(false); ParaMEDMEM::MEDCouplingUMesh* refusedFaceMesh=refusedMesh->getLevelM1Mesh(false); - + CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), refusedCellMesh->getNumberOfCells()); CPPUNIT_ASSERT_EQUAL(faceMesh->getNumberOfCells(), refusedFaceMesh->getNumberOfCells()); - + /*not the good job ParaMEDMEM::MEDCouplingMesh* mergeCell=cellMesh->mergeMyselfWith(refusedCellMesh); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), mergeCell->getNumberOfCells()); - + ParaMEDMEM::MEDCouplingMesh* mergeFace=faceMesh->mergeMyselfWith(refusedFaceMesh); CPPUNIT_ASSERT_EQUAL(faceMesh->getNumberOfCells(), mergeFace->getNumberOfCells()); - + CPPUNIT_ASSERT(faceMesh->isEqual(refusedFaceMesh,1e-12)); */ - + std::vector meshes; std::vector corr; meshes.push_back(cellMesh); @@ -124,7 +141,7 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForMesh() meshes.push_back(refusedCellMesh); MEDCouplingUMesh* fusedCell=MEDCouplingUMesh::FuseUMeshesOnSameCoords(meshes,0,corr); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), fusedCell->getNumberOfCells()); - + meshes.resize(0); for (std::size_t i = 0; i < corr.size(); i++) corr[i]->decrRef(); @@ -134,7 +151,7 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForMesh() meshes.push_back(refusedFaceMesh); MEDCouplingUMesh* fusedFace=MEDCouplingUMesh::FuseUMeshesOnSameCoords(meshes,0,corr); CPPUNIT_ASSERT_EQUAL(faceMesh->getNumberOfCells(), fusedFace->getNumberOfCells()); - + for (std::size_t i = 0; i < corr.size(); i++) corr[i]->decrRef(); fusedFace->decrRef(); @@ -152,14 +169,13 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForFieldOnCells() { int res; string fileName,cmd,execName,sourceName,targetName,input; - execName=getenv("MED_ROOT_DIR"); //.../INSTALL/MED - execName+="/bin/salome/medpartitioner_para"; + execName=getPartitionerParaExe(); fileName=_file_name; fileName.replace(fileName.find(".med"),4,"_WithVecFieldOnCells.med"); - + ParaMEDMEM::MEDFileUMesh* initialMesh=ParaMEDMEM::MEDFileUMesh::New(fileName.c_str(),_mesh_name.c_str()); ParaMEDMEM::MEDCouplingUMesh* cellMesh=initialMesh->getLevel0Mesh(false); - + cmd="mpirun -np 5 "+execName+" --ndomains=5 --split-method=metis"; //on same proc sourceName=fileName; targetName=fileName; @@ -169,7 +185,7 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForFieldOnCells() res=system(cmd.c_str()); CPPUNIT_ASSERT_EQUAL(0, res); input=targetName+".xml"; - + //merge split meshes and test equality cmd="mpirun -np 1 "+execName+" --ndomains=1 --split-method=metis"; //on same proc sourceName=targetName+".xml"; @@ -179,13 +195,13 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForFieldOnCells() if (_verbose) cout<getLevel0Mesh(false); - + CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), refusedCellMesh->getNumberOfCells()); - + std::vector meshes; std::vector corr; meshes.push_back(cellMesh); @@ -193,22 +209,22 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForFieldOnCells() meshes.push_back(refusedCellMesh); MEDCouplingUMesh* fusedCell=MEDCouplingUMesh::FuseUMeshesOnSameCoords(meshes,0,corr); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), fusedCell->getNumberOfCells()); - + MEDCouplingFieldDouble* field1=MEDLoader::ReadFieldCell(fileName.c_str(),initialMesh->getName().c_str(),0,"VectorFieldOnCells",0,1); MEDCouplingFieldDouble* field2=MEDLoader::ReadFieldCell(refusedName.c_str(),refusedCellMesh->getName().c_str(),0,"VectorFieldOnCells",0,1); - + int nbcells=corr[1]->getNumberOfTuples(); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), nbcells); //use corr to test equality of field DataArrayDouble* f1=field1->getArray(); DataArrayDouble* f2=field2->getArray(); - if (_verbose>300) + if (_verbose>300) { cout<<"\nf1 : "<reprZip(); cout<<"\nf2 : "<reprZip(); //field2->advancedRepradvancedRepr(); for (std::size_t i = 0; i < corr.size(); i++) cout << "\ncorr " << i << " : " << corr[i]->reprZip(); - + } int nbequal=0; int nbcomp=field1->getNumberOfComponents(); @@ -226,7 +242,7 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForFieldOnCells() } } CPPUNIT_ASSERT_EQUAL(nbcells*nbcomp, nbequal); - + for (std::size_t i = 0; i < corr.size(); i++) corr[i]->decrRef(); field1->decrRef(); @@ -240,14 +256,13 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForFieldOnGaussNe() { int res; string fileName,cmd,execName,sourceName,targetName,input; - execName=getenv("MED_ROOT_DIR"); //.../INSTALL/MED - execName+="/bin/salome/medpartitioner_para"; + execName=getPartitionerParaExe(); fileName=_file_name; fileName.replace(fileName.find(".med"),4,"_WithVecFieldOnGaussNe.med"); - + ParaMEDMEM::MEDFileUMesh* initialMesh=ParaMEDMEM::MEDFileUMesh::New(fileName.c_str(),_mesh_name.c_str()); ParaMEDMEM::MEDCouplingUMesh* cellMesh=initialMesh->getLevel0Mesh(false); - + cmd="mpirun -np 5 "+execName+" --ndomains=5 --split-method=metis"; //on same proc sourceName=fileName; targetName=fileName; @@ -257,7 +272,7 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForFieldOnGaussNe() res=system(cmd.c_str()); CPPUNIT_ASSERT_EQUAL(0, res); input=targetName+".xml"; - + //merge split meshes and test equality cmd="mpirun -np 1 "+execName+" --ndomains=1 --split-method=metis"; //on same proc sourceName=targetName+".xml"; @@ -267,13 +282,13 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForFieldOnGaussNe() if (_verbose) cout<getLevel0Mesh(false); - + CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), refusedCellMesh->getNumberOfCells()); - + std::vector meshes; std::vector corr; meshes.push_back(cellMesh); @@ -281,22 +296,22 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForFieldOnGaussNe() meshes.push_back(refusedCellMesh); MEDCouplingUMesh* fusedCell=MEDCouplingUMesh::FuseUMeshesOnSameCoords(meshes,0,corr); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), fusedCell->getNumberOfCells()); - + MEDCouplingFieldDouble* field1=MEDLoader::ReadField(ON_GAUSS_NE,fileName.c_str(),initialMesh->getName().c_str(),0,"MyFieldOnGaussNE",5,6); MEDCouplingFieldDouble* field2=MEDLoader::ReadField(ON_GAUSS_NE,refusedName.c_str(),refusedCellMesh->getName().c_str(),0,"MyFieldOnGaussNE",5,6); - + int nbcells=corr[1]->getNumberOfTuples(); CPPUNIT_ASSERT_EQUAL(cellMesh->getNumberOfCells(), nbcells); //use corr to test equality of field DataArrayDouble* f1=field1->getArray(); DataArrayDouble* f2=field2->getArray(); - if (_verbose>300) + if (_verbose>300) { cout << "\nf1 : " << f1->reprZip(); //123.4 for 12th cell,3rd component, 4th gausspoint cout << "\nf2 : " << f2->reprZip(); //field2->advancedRepradvancedRepr(); for (std::size_t i = 0; i < corr.size(); i++) cout << "\ncorr " << i << " : " << corr[i]->reprZip(); - + } int nbequal=0; int nbptgauss=8; @@ -315,7 +330,7 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForFieldOnGaussNe() } } CPPUNIT_ASSERT_EQUAL(nbcells*nbcomp*nbptgauss, nbequal); - + for (std::size_t i = 0; i < corr.size(); i++) corr[i]->decrRef(); field1->decrRef(); @@ -327,8 +342,8 @@ void MEDPARTITIONERTest::verifyMedpartitionerOnSmallSizeForFieldOnGaussNe() void MEDPARTITIONERTest::launchMedpartitionerOnTestMeshes() { - - /* examples + + /* examples export INFI=/home/vb144235/resources/blade.med //no need export MESH=Fuse_1 export INFI=tmp_testMeshxxx.med @@ -339,17 +354,16 @@ void MEDPARTITIONERTest::launchMedpartitionerOnTestMeshes() */ int res; string cmd,execName,sourceName,targetName; - + res=system("which mpirun 2>/dev/null 1>/dev/null"); //no trace CPPUNIT_ASSERT_EQUAL(0, res); - - execName=getenv("MED_ROOT_DIR"); //.../INSTALL/MED - execName+="/bin/salome/medpartitioner_para"; - + + execName=getPartitionerParaExe(); + cmd="which "+execName+" 2>/dev/null 1>/dev/null"; //no trace res=system(cmd.c_str()); CPPUNIT_ASSERT_EQUAL(0, res); - + cmd="mpirun -np 2 "+execName+" --ndomains=2 --split-method=metis"; //on same proc sourceName=_file_name; targetName=_file_name; @@ -358,7 +372,7 @@ void MEDPARTITIONERTest::launchMedpartitionerOnTestMeshes() if (_verbose) cout< ret; + int meshDim, spaceDim, numberOfNodes; + std::vector< std::vector< std::pair > > typesDistrib(MEDLoader::GetUMeshGlobalInfo(fileName,mName,meshDim,spaceDim,numberOfNodes)); + std::vector types; + std::vector distrib; + for(std::vector< std::vector< std::pair > >::const_iterator it0=typesDistrib.begin();it0!=typesDistrib.end();it0++) + for(std::vector< std::pair >::const_iterator it1=(*it0).begin();it1!=(*it0).end();it1++) + { + types.push_back((*it1).first); + int tmp[3]; + DataArray::GetSlice(0,(*it1).second,1,iPart,nbOfParts,tmp[0],tmp[1]); + tmp[2]=1; + distrib.insert(distrib.end(),tmp,tmp+3); + } + ret=MEDFileUMesh::LoadPartOf(fid,mName,types,distrib,dt,it,mrs); + return ret.retn(); +} + +MEDFileMeshes *ParaMEDFileMeshes::New(int iPart, int nbOfParts, const std::string& fileName) +{ + std::vector ms(MEDLoader::GetMeshNames(fileName)); + MEDCouplingAutoRefCountObjectPtr ret(MEDFileMeshes::New()); + for(std::vector::const_iterator it=ms.begin();it!=ms.end();it++) + { + MEDCouplingAutoRefCountObjectPtr mesh(ParaMEDFileMesh::New(iPart,nbOfParts,fileName,(*it))); + ret->pushMesh(mesh); + } + return ret.retn(); +} + +MEDFileMeshes *ParaMEDFileMeshes::ParaNew(int iPart, int nbOfParts, const MPI_Comm& com, const MPI_Info& nfo, const std::string& fileName) +{ + std::vector ms(MEDLoader::GetMeshNames(fileName)); + MEDCouplingAutoRefCountObjectPtr ret(MEDFileMeshes::New()); + for(std::vector::const_iterator it=ms.begin();it!=ms.end();it++) + { + MEDCouplingAutoRefCountObjectPtr mesh(ParaMEDFileMesh::ParaNew(iPart,nbOfParts,com,nfo,fileName,(*it))); + ret->pushMesh(mesh); + } + return ret.retn(); +} diff --git a/src/ParaMEDLoader/ParaMEDFileMesh.hxx b/src/ParaMEDLoader/ParaMEDFileMesh.hxx new file mode 100644 index 000000000..f034c471d --- /dev/null +++ b/src/ParaMEDLoader/ParaMEDFileMesh.hxx @@ -0,0 +1,61 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// +// Author : Anthony Geay (EDF R&D) + +#ifndef __PARAMEDFILEMESH_HXX__ +#define __PARAMEDFILEMESH_HXX__ + +#include "med.h" + +#include "mpi.h" + +#include + +namespace ParaMEDMEM +{ + class MEDFileMesh; + class MEDFileUMesh; + class MEDFileMeshes; + class MEDFileMeshReadSelector; + + class ParaMEDFileMesh + { + public: + static MEDFileMesh *New(int iPart, int nbOfParts, const std::string& fileName, const std::string& mName, int dt=-1, int it=-1, MEDFileMeshReadSelector *mrs=0); + static MEDFileMesh *ParaNew(int iPart, int nbOfParts, const MPI_Comm& com, const MPI_Info& nfo, const std::string& fileName, const std::string& mName, int dt=-1, int it=-1, MEDFileMeshReadSelector *mrs=0); + }; + + class ParaMEDFileUMesh + { + public: + static MEDFileUMesh *New(int iPart, int nbOfParts, const std::string& fileName, const std::string& mName, int dt=-1, int it=-1, MEDFileMeshReadSelector *mrs=0); + static MEDFileUMesh *ParaNew(int iPart, int nbOfParts, const MPI_Comm& com, const MPI_Info& nfo, const std::string& fileName, const std::string& mName, int dt=-1, int it=-1, MEDFileMeshReadSelector *mrs=0); + private: + static MEDFileUMesh *NewPrivate(med_idt fid, int iPart, int nbOfParts, const std::string& fileName, const std::string& mName, int dt, int it, MEDFileMeshReadSelector *mrs); + }; + + class ParaMEDFileMeshes + { + public: + static MEDFileMeshes *New(int iPart, int nbOfParts, const std::string& fileName); + static MEDFileMeshes *ParaNew(int iPart, int nbOfParts, const MPI_Comm& com, const MPI_Info& nfo, const std::string& fileName); + }; +} + +#endif diff --git a/src/ParaMEDLoader/ParaMEDLoader.cxx b/src/ParaMEDLoader/ParaMEDLoader.cxx new file mode 100644 index 000000000..43a4eec9d --- /dev/null +++ b/src/ParaMEDLoader/ParaMEDLoader.cxx @@ -0,0 +1,65 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// +// Author : Anthony Geay (CEA/DEN) + +#include "ParaMEDLoader.hxx" +#include "MEDLoader.hxx" +#include "ParaMESH.hxx" +#include "BlockTopology.hxx" +#include "MEDCouplingUMesh.hxx" + +#include + +using namespace ParaMEDMEM; + +ParaMEDLoader::ParaMEDLoader() +{ +} + +void ParaMEDLoader::WriteParaMesh(const char *fileName, ParaMEDMEM::ParaMESH *mesh) +{ + if(!mesh->getBlockTopology()->getProcGroup()->containsMyRank()) + return ; + int myRank=mesh->getBlockTopology()->getProcGroup()->myRank(); + int nbDomains=mesh->getBlockTopology()->getProcGroup()->size(); + std::vector fileNames(nbDomains); + for(int i=0;igetCellMesh()->getName().c_str()); + MEDLoader::WriteUMesh(fileNames[myRank].c_str(),dynamic_cast(mesh->getCellMesh()),true); +} + +/*! + * This method builds the master file 'fileName' of a parallel MED file defined in 'fileNames'. + */ +void ParaMEDLoader::WriteMasterFile(const char *fileName, const std::vector& fileNames, const char *meshName) +{ + int nbOfDom=fileNames.size(); + std::ofstream fs(fileName); + fs << "#MED Fichier V 2.3" << " " << std::endl; + fs << "#"<<" " << std::endl; + fs << nbOfDom <<" " << std::endl; + for(int i=0;i +#include + +namespace ParaMEDMEM +{ + class ParaMESH; + class ParaFIELD; +} + +class ParaMEDLoader +{ +public: + static void WriteParaMesh(const char *fileName, ParaMEDMEM::ParaMESH *mesh); + static void WriteMasterFile(const char *fileName, const std::vector& fileNames, const char *meshName); +private: + ParaMEDLoader(); +}; + +#endif diff --git a/src/ParaMEDMEM/BASICS_JR b/src/ParaMEDMEM/BASICS_JR new file mode 100644 index 000000000..61a724d45 --- /dev/null +++ b/src/ParaMEDMEM/BASICS_JR @@ -0,0 +1,339 @@ + +Le document de specification : +============================== + +Globalement le document de specification correspond a +l'implementation qui a ete faite avec : + +. Transport-ParaMEDMEM qui a ete enrichi avec la classe MPI_Access + +. Presentation-ParaMEDMEM qui a ete enrichi avec la classe + MPI_AccessDEC + + +La conception correspondant a cette specification est restee +la meme : + +. MPI_Access gere pour un ProcessorGroup (IntraCommunicator) : + - Les structures MPI_Request et MPI_Status + - La valeur des "tags" MPI + - Les requetes d'ecritures et de lectures asynchrones + - Les communications en "Point a Point" [I]Send, [I]Recv ainsi + que [I]SendRecv. + - A la difference de l'API MPI [I]SendRecv ne concerne qu'un + seul et meme "target". + - Les controles de communications asynchrones Wait, Test, + WaitAll, TestAll, [I]Probe, Cancel et CancelAll. + - Comme c'etait demande seules les methodes "utiles" ont ete + implementees. + - Les appels a [I]Send ou a [I]Recv avec des sendbuff/recvbuff + de valeur NULL ou avec des sendcount/recvcount de valeur + nulle sont ignores. + - Les methodes de communications collectives ne sont pas + implementees dans MPI_Access. + - Les deux methodes "Cancel" concernent soit un IRecv deja + soumis soit un message en attente (sans IRecv deja soumis). + Elles regroupent les differents appels de l'API MPI + necessaires (IProbe, IRecv, Wait, Test_Canceled ...). + +. MPI_AccessDEC utilise les services de MPI_Access pour un + ProcessorGroup (IntraCommunicator) et gere : + - Les communications collectives en "Point a Point". + (AllToAll[v] synchrone ou asynchrone). + - Les temps et l'interpolation + - Les [I]Send avec leurs buffers (delete []) + - Les [I]Recv + - La finalisation des envois et receptions de messages dans + le destructeur afin qu'il n'y ait plus de message en attente + et afin de liberer les buffers + + +MPI_Access et "tags" (ou "MPITags") : +===================================== + +. Le constructeur permet optionnellement de fixer une plage de tags + a utiliser : [BaseTag , MaxTag]. + Par defaut c'est [ 0 , MPI_TAG_UB], MPI_TAG_UB etant la valeur + maximum d'une implementation de MPI (valeur minimum 32767 + soit 2**15-1). Sur awa avec l'implementation lam MPI_TAG_UB + vaut 7353944. La norme MPI specifie que cette valeur doit + etre la meme dans les process demarres avec mpirun. + Dans le cas de l'usage simultane du meme IntraCommunicator + dans un meme process (ou de plusieurs IntraCommunicator + d'intersection non nulle) cela peut eviter toute ambiguite + et aider au debug. + +. Dans MPI_Access les tags sont constitues de deux parties + (#define ModuloTag 10) : + + Le dernier digit decimal correspond au MPI_DataType ( 1 pour + les messages "temps", 2 pour MPI_INT et 3 pour MPI_DOUBLE) + + La valeur des autres digits correspond a une numerotation + circulaire des messages. + + Un message "temps" et le message de donnees associe ont le + meme numero de message (mais des types et donc des tags + differents). + +. Pour un envoi de message d'un process "source" vers un process + "target", on dispose de _SendMPITag[target] dans le process + source (il contient le dernier "tag" utilise pour l'envoi de + messages vers le process target). + Et dans le process "target" qui recoit ce message, on dispose + de _RecvMPITag[source] (il contient le dernier "tag" utilise + pour la reception de messages du process source). + Naturellement d'apres la norme MPI les valeurs de ces tags sont + les memes. + + +MPI_Access et "RequestIds" : +============================ + +. ATTENTION : Dans le document de specification, la distinction + n'est pas faite clairement entre les "MPITags" (voir ci-dessus) + qui sont un argument des appels a MPI et les "RequestIds" qui + ne concernent pas les appels MPI. Ces "RequestIds" figurent + en effet sous le nom de tag comme argument d'entree/sortie dans l'API + de MPI_Access decrite dans le document de specification. Mais + dans l'implementation on a bien le nom RequestId (ou bien + RecvRequestId/SendRequestId). + +. Lors de la soumission d'une requete d'ecriture ou de lecture MPI + via MPI_Access, on obtient un identifieur "RequestId". + Cet identifieur "RequestId" correspond a une structure RequestStruct + de MPI_Access a laquelle on accede avec la map + "_MapOfRequestStruct". + Cette structure RequestStruct permet de gerer MPI_Request et + MPI_Status * de MPI et permet d'obtenir des informations sur + la requete : target, send/recv, tag, [a]synchrone, type, outcount. + +. C'est cet identifieur qui peut etre utilise pour controler une + requete asynchrone via MPI_Access : Wait, Test, Probe, etc... + +. En pratique "RequestId" est simplement un entier de l'intervalle + [0 , 2**32-1]. Il y a uniquement un compteur cyclique global + aussi bien pour les [I]Send que pour les [I]Recv. + +. Ces "RequestIds" et leur structures associees facilitent les + communications asynchrones. + Par exemple on a mpi_access->Wait( int RequestId ) + au lieu de MPI_Wait(MPI_Request *request, MPI_Status *status) + avec gestion de status. + +. L'API de MPI_Access peut fournir les "SendRequestIds" d'un "target", + les "RecvRequestIds" d'un "source" ou bien les "SendRequestIds" de + tous les "targets" ou les "RecvRequestIds" de tous les "sources". + Cela permet d'eviter leur gestion au niveau de Presentation-ParaMEDMEM. + + +MPI_AccessDEC : +=============== + +. Comme la classe DEC, il est base sur local_group et distant_group + ce qui forme un MPI_union_group et donc un IntraCommunicator. + +. Il permet de choisir le mode synchrone ou asynchrone (par defaut). + Le meme programme peut fonctionner en synchrone ou en asynchrone + sans devoir etre modifie. + +. Il permet de choisir un mode d'interpolation (actuellement + uniquement une interpolation lineaire) ou bien un mode sans + interpolation (par defaut). Ceci pour les communications collectives. + Avec interpolation les communications collectives transmettent et + recoivent un message "temps" en plus des donnees. + +. Il implemente AllToAll[v] en "Point a Point" avec ou sans interpolation. + +. Il gere les buffers d'envoi de messages. Il les detruit donc + lorsqu'ils sont disponibles. + +. Il cree et utilise MPI_Access. + + +MPI_AccessDEC et la gestion des SendBuffers : +============================================= + +. Comme dans les communications collectives on n'envoie que des + parties du meme buffer à chaque process "target", il faut s'assurer + en asynchrone que toutes ces parties sont disponibles pour + pouvoir liberer le buffer. + +. On suppose que ces buffers ont ete alloues avec un new double[] + +. La structure SendBuffStruct permet de conserver l'adresse du buffer + et de gerer un compteur de references de ce buffer. Elle comporte + aussi MPI_Datatype pour pouvoir faire un delete [] (double *) ... + lorsque le compteur est null. + +. La map _MapOfSendBuffers etablit la correspondance entre chaque + RequestId obtenu de MPI_Access->ISend(...) et un SendBuffStruct + pour chaque "target" d'une partie du buffer. + +. Tout cela ne concerne que les envois asynchrones. En synchrone, + on detruit senbuf juste apres l'avoir transmis. + + +MPI_AccessDEC et la gestion des RecvBuffers : +============================================= + +S'il n'y a pas d'interpolation, rien de particulier n'est fait. + +Avec interpolation pour chaque target : +--------------------------------------- +. On a _TimeMessages[target] qui est un vecteur de TimesMessages. + On en a 2 dans notre cas avec une interpolation lineaire qui + contiennent le time(t0)/deltatime precedent et le dernier + time(t1)/deltatime. + +. On a _DataMessages[target] qui est un vecteur de DatasMessages + On en a 2 dans notre cas avec une interpolation lineaire qui + contiennent les donnees obtenues par Recv au time(t0)/deltatime + precedent et au dernier time(t1)/deltatime. + +. Au temps _t(t*) du processus courrant on effectue l'interpolation + entre les valeurs des 2 DatasMessages que l'on rend dans la + partie de recvbuf correspondant au target pourvu que t0 < t* <= t1. + +. Par suite de la difference des "deltatimes" entre process, on + peut avoir t0 < t1 < t* auquel cas on aura une extrapolation. + +. Les vecteurs _OutOfTime, _DataMessagesRecvCount et _DataMessagesType + contiennent pour chaque target true si t* > dernier t1, recvcount et + MPI_Datatype pour finaliser la gestion des messages a la fin. + + +Etapes des communications collectives de MPI_AccessDEC : +======================================================== + +AllToAll[v] : Les arguments sont les memes que dans MPI sauf MPI_Comm +------------- inutile (deja connu de MPI_AccessDEC et MPI_Access). + + Si on a un TimeInterpolator, appel de AllToAll[v]Time. + + Sinon, on appelle CheckSent pour les echanges + asynchrones (voir ci-apres) et on appelle SendRecv + pour chaque "target". + +AllToAll[v]Time : +----------------- + +. CheckSent() : + + appelle SendRequestIds de MPI_Access afin d'obtenir tous les + RequestIds d'envoi de messages a tous les "targets". + + Pour chaque RequestId, appelle Test de MPI_Access pour savoir + si le buffer est libre (flag = true). Lorsqu'il s'agit du + FinalCheckSent, on appelle Wait au lieu de Test. + + Si le buffer est libre, on decremente le compteur de la + structure SendBuffStruct obtenue avec _MapOfSendBuffers. + (voir MPI_AccessDEC et la gestion des SendBuffers ci-dessus) + + Si le compteur est nul on detruit le TimeMessage ou le + SendBuffer en fonction du DataType. + + Puis on detruit la structure SendBuffStruct avant de supprimer + (erase) cet item de _MapOfSendBuffers + +. DoSend : + + On cree un TimeMessage (voir cette structure dans MPI_Access). + + Si l'on est en asynchrone on cree deux structures SendBuffStruct + aSendTimeStruct et aSendDataStruct que l'on remplit. + + On remplit la structure aSendTimeMessage avec time/deltatime du + process courant. "deltatime" doit etre nul s'il s'agit du dernier + pas de temps. + + Puis pour chaque "target", on envoie le TimeMessage et la partie + de sendbuf concernee par ce target. + + Si l'on est en asynchrone, on incremente le compteur et on ajoute + a _MapOfSendBuffers aSendTimeStruct et aSendDataStruct avec les + identifieurs SendTimeRequestId et SendDataRequestId recus de + MPI_Access->Send(...). + + Et enfin si l'on est en synchrone, on detruit les SendMessages. + +. CheckTime(recvcount , recvtype , target , UntilEnd) + + Au depart, on lit le premier "Message-temps" dans + &(*_TimeMessages)[target][1] et le premier message de donnees + dans le buffer alloue (*_DataMessages)[target][1]. + + Par convention deltatime des messages temps est nul si c'est le + dernier. + + Boucle while : _t(t*) est le temps courant du processus. + "tant que _t(t*) est superieur au temps du "target" + (*_TimeMessages)[target][1].time et que + (*_TimeMessages)[target][1].deltatime n'est pas nul", + ainsi en fin de boucle on aura : + _t(t*) <= (*_TimeMessages)[target][1].time avec + _t(t*) > (*_TimeMessages)[target][0].time + ou bien on aura le dernier message temps du "target". + + S'il s'agit de la finalisation des receptions des messages + temps et donnees (UntilEnd vaut true), on effectue la + boucle jusqu'a ce que l'on trouve + (*_TimeMessages)[target][1].deltatime nul. + + Dans la boucle : + On recopie le dernier message temps dans le message temps + precedent et on lit le message temps suivant. + On detruit le buffer de donnees du temps precedent. + On recopie le pointeur du dernier buffer de donnees dans + le precedent. + On alloue un nouveau dernier buffer de donnees + (*_DataMessages)[target][1] et on lit les donnees + correspondantes dans ce buffer. + + Si le temps courant du process est plus grand que le dernier + temps (*_TimeMessages)[target][1].time du target, on donne + la valeur true a (*_OutOfTime)[target]. + (*_TimeMessages)[target][1].deltatime est alors nul. + +. CheckTime + DoRecv + DoInterp + + Pour chaque target on appelle CheckTime + + Si on a un TimeInterpolator et si le message temps du target + n'est pas le premier, on appelle l'interpolateur qui stocke + ses resultats dans la partie du buffer de reception qui + correspond au "target". + + Sinon, on recopie les donnees recues pour ce premier pas de + temps dans la partie du buffer de reception qui correspond au + "target". + + +Presentation-ParaMEDMEM : +========================= + +. Des modifications mineures ont ete effectuees dans Presentation-ParaMEDMEM + afin de pouvoir utiliser ces nouvelles fonctionnalites. Il n'y + a surtout pas eu de bouleversement destabilisateur. L'ancien + mode de fonctionnement reste naturellement disponible. + +. Cela repose sur trois nouvelles options creees avec registerOption + dans le constructeur de InterpKernelDEC : + + Asynchronous : true ou false (par defaut) + + TimeInterpolation : WithoutTimeInterp (par defaut) ou LinearTimeInterp + typedef enum{WithoutTimeInterp,LinearTimeInterp} TimeInterpolationMethod; + dans MPI_AccessDEC.hxx + + AllToAllMethod : Native (par defaut) ou PointToPoint + typedef enum{Native,PointToPoint} AllToAllMethod; + dans MxN_Mapping.hxx + +. Le choix des options se fait avec le Data Exchange Channel : + + ParaMEDMEM::InterpKernelDEC dec (*source_group,*target_group); + + dec.setOption("Asynchronous",true); + + dec.setOption("TimeInterpolation",LinearTimeInterp); + + dec.setOption("AllToAllMethod",PointToPoint); + +. Dans dec.synchronize(), + + on cree un objet InterpolationMatrix + qui lui-meme cree un objet MxN_Mapping + qui lui-meme cree maintenant un objet MPI_AccessDEC + + on transmet a MxN_Mapping via l'InterpolationMatrix l'option + choisie de AllToAllMethod + + on transmet a MPI_AccessDEC les valeurs des options Asynchronous + et TimeInterpolation : methodes Asynchronous et + SetTimeInterpolator de MPI_AccessDEC. + +. ParaMEDMEM::InterpKernelDEC comporte maintenant une surcharge des + methodes recvData() et sendData() : + + void InterpKernelDEC::recvData( double time ) qui appelle + SetTime(time) de MPI_AccessDEC et + recvData() + + void InterpKernelDEC::sendData( double time , double deltatime ) + qui appelle + SetTime(time,deltatime) de MPI_AccessDEC et + sendData() + +. recvData() et sendData() de ParaMEDMEM::InterpKernelDEC + appellent multiply et transposeMultiply de l'InterpolationMatrix + qui appellent sendRecv et reverseSendRecv de MxN_Mapping + qui appellent comm_interface.allToAllV en mode "Native" + ou bien MPI_AccessDEC::AllToAllv en mode "PointToPoint" + diff --git a/src/ParaMEDMEM/BlockTopology.cxx b/src/ParaMEDMEM/BlockTopology.cxx new file mode 100644 index 000000000..8f6b4cea8 --- /dev/null +++ b/src/ParaMEDMEM/BlockTopology.cxx @@ -0,0 +1,336 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "BlockTopology.hxx" +#include "MEDCouplingMemArray.hxx" +#include "MEDCouplingCMesh.hxx" +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "ComponentTopology.hxx" +#include "InterpKernelUtilities.hxx" + +#include +#include +#include +#include + +using namespace std; + +namespace ParaMEDMEM +{ + + //!converts a pair to a global number + std::pair BlockTopology::globalToLocal(const int global) const + { + int subdomain_id=0; + int position=global; + int size=_nb_elems; + int size_procs=_proc_group->size(); + int increment=size; + vectoraxis_position(_dimension); + vectoraxis_offset(_dimension); + for (int idim=0; idim<_dimension; idim++) + { + int axis_size=_local_array_indices[idim].size()-1; + int axis_nb_elem=_local_array_indices[idim][axis_size]; + increment=increment/axis_nb_elem; + int proc_increment = size_procs/(axis_size); + int axis_pos=position/increment; + position=position%increment; + int iaxis=1; + while (_local_array_indices[idim][iaxis]<=axis_pos) + { + subdomain_id+=proc_increment; + iaxis++; + } + axis_position[idim]=axis_pos-_local_array_indices[idim][iaxis-1]; + axis_offset[idim]=iaxis; + } + int local=0; + int local_increment=1; + for (int idim=_dimension-1; idim>=0; idim--) + { + local+=axis_position[idim]*local_increment; + local_increment*=_local_array_indices[idim][axis_offset[idim]]-_local_array_indices[idim][axis_offset[idim]-1]; + } + return make_pair(subdomain_id,local); + } + + //!converts local number to a global number + int BlockTopology::localToGlobal(const pair local) const + { + + int subdomain_id=local.first; + int global=0; + int loc=local.second; + int increment=_nb_elems; + int proc_increment=_proc_group->size(); + int local_increment=getNbLocalElements(); + for (int idim=0; idim < _dimension; idim++) + { + int axis_size=_local_array_indices[idim].size()-1; + int axis_nb_elem=_local_array_indices[idim][axis_size]; + increment=axis_nb_elem==0?0:increment/axis_nb_elem; + proc_increment = proc_increment/(axis_size); + int proc_axis=subdomain_id/proc_increment; + subdomain_id=subdomain_id%proc_increment; + int local_axis_nb_elem=_local_array_indices[idim][proc_axis+1]-_local_array_indices[idim][proc_axis]; + local_increment = (local_axis_nb_elem==0)?0:(local_increment/local_axis_nb_elem); + int iaxis=((local_increment==0)?0:(loc/local_increment))+_local_array_indices[idim][proc_axis]; + global+=increment*iaxis; + loc = (local_increment==0)?0:(loc%local_increment); + } + return global; + } + + //Retrieves the local number of elements + int BlockTopology::getNbLocalElements()const + { + int position=_proc_group->myRank(); + int nb_elem = 1; + int increment=1; + for (int i=_dimension-1; i>=0; i--) + { + increment *=_nb_procs_per_dim[i]; + int idim=position%increment; + position=position/increment; + int imin=_local_array_indices[i][idim]; + int imax=_local_array_indices[i][idim+1]; + nb_elem*=(imax-imin); + } + return nb_elem; + } + + /*! + * Constructor of a block topology from a grid. + * This preliminary version simply splits along the first axis + * instead of making the best choice with respect to the + * values of the different axes. + */ + BlockTopology::BlockTopology(const ProcessorGroup& group, MEDCouplingCMesh *grid): + _dimension(grid->getSpaceDimension()), _proc_group(&group), _owns_processor_group(false) + { + vector axis_length(_dimension); + _nb_elems=1; + for (int idim=0; idim <_dimension; idim++) + { + DataArrayDouble *arr=grid->getCoordsAt(idim); + axis_length[idim]=arr->getNbOfElems(); + _nb_elems*=axis_length[idim]; + } + //default splitting along 1st dimension + _local_array_indices.resize(_dimension); + _nb_procs_per_dim.resize(_dimension); + + _local_array_indices[0].resize(_proc_group->size()+1); + _local_array_indices[0][0]=0; + _nb_procs_per_dim[0]=_proc_group->size(); + + for (int i=1; i<=_proc_group->size(); i++) + { + _local_array_indices[0][i]=_local_array_indices[0][i-1]+ + axis_length[0]/_proc_group->size(); + if (i<= axis_length[0]%_proc_group->size()) + _local_array_indices[0][i]+=1; + } + for (int i=1; i<_dimension; i++) + { + _local_array_indices[i].resize(2); + _local_array_indices[i][0]=0; + _local_array_indices[i][1]=axis_length[i]; + _nb_procs_per_dim[i]=1; + } + _cycle_type.resize(_dimension); + for (int i=0; i<_dimension; i++) + _cycle_type[i]=ParaMEDMEM::Block; + } + + /*! + * Creation of a block topology by composing + * a geometrical topology and a component topology. + * This constructor is intended for creating fields + * for which the parallel distribution is made on the + * components of the field rather than on the geometrical + * partitioning of the underlying mesh. + * + */ + BlockTopology::BlockTopology(const BlockTopology& geom_topo, const ComponentTopology& comp_topo):_owns_processor_group(false) + { + // so far, the block topology can only be created if the proc group + // is either on geom_topo or on comp_topo + if (geom_topo.getProcGroup()->size()>1 && comp_topo.nbBlocks()>1) + throw INTERP_KERNEL::Exception(LOCALIZED("BlockTopology cannot yet be constructed with both complex geo and components topology")); + + if (comp_topo.nbComponents()==1) + { + *this=geom_topo; + return; + } + else + { + _dimension = geom_topo.getDimension()+1; + if (comp_topo.nbBlocks()>1) + _proc_group=comp_topo.getProcGroup(); + else + _proc_group=geom_topo.getProcGroup(); + _local_array_indices=geom_topo._local_array_indices; + vector comp_indices = *(comp_topo.getBlockIndices()); + _local_array_indices.push_back(comp_indices); + _nb_procs_per_dim=geom_topo._nb_procs_per_dim; + _nb_procs_per_dim.push_back(comp_topo.nbBlocks()); + _cycle_type=geom_topo._cycle_type; + _cycle_type.push_back(Block); + _nb_elems=geom_topo.getNbElements()*comp_topo.nbComponents(); + } + } + + /*! Constructor for creating a one-dimensional + * topology from a processor group and a local + * number of elements on each processor + * + * The function must be called only by the processors belonging + * to group \a group. Calling it from a processor not belonging + * to \a group will cause an MPI error, while calling from a subset + * of \a group will result in a deadlock. + */ + BlockTopology::BlockTopology(const ProcessorGroup& group, int nb_elem):_dimension(1),_proc_group(&group),_owns_processor_group(false) + { + int* nbelems_per_proc = new int[group.size()]; + const MPIProcessorGroup* mpi_group=dynamic_cast(_proc_group); + const MPI_Comm* comm=mpi_group->getComm(); + int nbtemp=nb_elem; + mpi_group->getCommInterface().allGather(&nbtemp, 1, MPI_INT, + nbelems_per_proc, 1, MPI_INT, + *comm); + _nb_elems=0; + + //splitting along only dimension + _local_array_indices.resize(1); + _nb_procs_per_dim.resize(1); + + _local_array_indices[0].resize(_proc_group->size()+1); + _local_array_indices[0][0]=0; + _nb_procs_per_dim[0]=_proc_group->size(); + + for (int i=1; i<=_proc_group->size(); i++) + { + _local_array_indices[0][i]=_local_array_indices[0][i-1]+ + nbelems_per_proc[i-1]; + _nb_elems+=nbelems_per_proc[i-1]; + } + _cycle_type.resize(1); + _cycle_type[0]=ParaMEDMEM::Block; + delete[] nbelems_per_proc; + } + + BlockTopology::~BlockTopology() + { + if (_owns_processor_group) + delete _proc_group; + } + + /*! Retrieves the min and max indices of the domain stored locally + * for each dimension. The output vector has the topology dimension + * as a size and each pair contains min and max. Indices + * range from min to max-1. + */ + std::vector > BlockTopology::getLocalArrayMinMax() const + { + vector > local_indices (_dimension); + int myrank=_proc_group->myRank(); + int increment=1; + for (int i=_dimension-1; i>=0; i--) + { + increment *=_nb_procs_per_dim[i]; + int idim=myrank%increment; + local_indices[i].first=_local_array_indices[i][idim]; + local_indices[i].second=_local_array_indices[i][idim+1]; + cout << local_indices[i].first << " "<< local_indices[i].second< buffer; + + buffer.push_back(_dimension); + buffer.push_back(_nb_elems); + for (int i=0; i<_dimension; i++) + { + buffer.push_back(_nb_procs_per_dim[i]); + buffer.push_back(_cycle_type[i]); + buffer.push_back(_local_array_indices[i].size()); + for (int j=0; j<(int)_local_array_indices[i].size(); j++) + buffer.push_back(_local_array_indices[i][j]); + } + + //serializing the comm group + int size_comm=_proc_group->size(); + buffer.push_back(size_comm); + MPIProcessorGroup world_group(_proc_group->getCommInterface()); + for (int i=0; i procs; + int size_comm=*(ptr_serializer++); + for (int i=0; i + +namespace ParaMEDMEM +{ + class ComponentTopology; + class MEDCouplingCMesh; + + typedef enum{Block,Cycle} CYCLE_TYPE; + + class BlockTopology : public Topology + { + public: + BlockTopology() { } + BlockTopology(const ProcessorGroup& group, MEDCouplingCMesh *grid); + BlockTopology(const BlockTopology& geom_topo, const ComponentTopology& comp_topo); + BlockTopology(const ProcessorGroup& group, int nb_elem); + virtual ~BlockTopology(); + //!Retrieves the number of elements for a given topology + int getNbElements()const { return _nb_elems; } + int getNbLocalElements() const; + const ProcessorGroup* getProcGroup()const { return _proc_group; } + std::pair globalToLocal (const int) const ; + int localToGlobal (const std::pair) const; + std::vector > getLocalArrayMinMax() const ; + int getDimension() const { return _dimension; } + void serialize(int* & serializer, int& size) const ; + void unserialize(const int* serializer, const CommInterface& comm_interface); + private: + //dimension : 2 or 3 + int _dimension; + //proc array + std::vector _nb_procs_per_dim; + //stores the offsets vector + std::vector > _local_array_indices; + //stores the cycle type (block or cyclic) + std::vector _cycle_type; + //Processor group + const ProcessorGroup* _proc_group; + //nb of elements + int _nb_elems; + bool _owns_processor_group; + }; +} + +#endif diff --git a/src/ParaMEDMEM/CMakeLists.txt b/src/ParaMEDMEM/CMakeLists.txt new file mode 100644 index 000000000..a94cb3ede --- /dev/null +++ b/src/ParaMEDMEM/CMakeLists.txt @@ -0,0 +1,71 @@ +# Copyright (C) 2012-2015 CEA/DEN, EDF R&D +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# +# Author : Anthony Geay (CEA/DEN) + +ADD_DEFINITIONS(${MPI_DEFINITIONS}) + +INCLUDE_DIRECTORIES( + ${MPI_INCLUDE_DIRS} + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/../MEDCoupling + ${CMAKE_CURRENT_SOURCE_DIR}/../INTERP_KERNEL + ${CMAKE_CURRENT_SOURCE_DIR}/../INTERP_KERNEL/Bases + ${CMAKE_CURRENT_SOURCE_DIR}/../INTERP_KERNEL/Geometric2D + ${CMAKE_CURRENT_SOURCE_DIR}/../INTERP_KERNEL/ExprEval + ${CMAKE_CURRENT_SOURCE_DIR}/../INTERP_KERNEL/GaussPoints + ) + +SET(paramedmem_SOURCES + ProcessorGroup.cxx + MPIProcessorGroup.cxx + ParaMESH.cxx + ComponentTopology.cxx + MPIAccess.cxx + InterpolationMatrix.cxx + OverlapInterpolationMatrix.cxx + StructuredCoincidentDEC.cxx + ExplicitCoincidentDEC.cxx + InterpKernelDEC.cxx + ElementLocator.cxx + OverlapElementLocator.cxx + MPIAccessDEC.cxx + TimeInterpolator.cxx + LinearTimeInterpolator.cxx + DEC.cxx + DisjointDEC.cxx + OverlapDEC.cxx + ExplicitTopology.cxx + MxN_Mapping.cxx + OverlapMapping.cxx + ICoCoMEDField.cxx + ICoCoField.cxx + ParaFIELD.cxx + ParaGRID.cxx + BlockTopology.cxx + ) + +ADD_LIBRARY(paramedmem SHARED ${paramedmem_SOURCES}) +TARGET_LINK_LIBRARIES(paramedmem medcoupling ${MPI_LIBRARIES}) +INSTALL(TARGETS paramedmem EXPORT ${PROJECT_NAME}TargetGroup DESTINATION ${MEDTOOL_INSTALL_LIBS}) + +FILE(GLOB paramedmem_HEADERS_HXX "${CMAKE_CURRENT_SOURCE_DIR}/*.hxx") +INSTALL(FILES ${paramedmem_HEADERS_HXX} DESTINATION ${MEDTOOL_INSTALL_HEADERS}) + +# To allow usage as SWIG dependencies: +SET(paramedmem_HEADERS_HXX PARENT_SCOPE) diff --git a/src/ParaMEDMEM/CommInterface.cxx b/src/ParaMEDMEM/CommInterface.cxx new file mode 100644 index 000000000..ae9e2d9f1 --- /dev/null +++ b/src/ParaMEDMEM/CommInterface.cxx @@ -0,0 +1,63 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "CommInterface.hxx" + +namespace ParaMEDMEM +{ + /*! \defgroup comm_interface CommInterface + Class \a CommInterface is the gateway to the MPI library. + It is a helper class that gathers the calls to the MPI + library that are made in the ParaMEDMEM library. This gathering + allows easier gathering of information about the communication + in the library. + + It is typically called after the MPI_Init() call in a program. It is afterwards passed as a parameter to the constructors of ParaMEDMEM objects so that they access the MPI library via the CommInterface. + + As an example, the following code excerpt initializes a processor group made of the zero processor. + + \verbatim + #include "CommInterface.hxx" + #include "ProcessorGroup.hxx" + + int main(int argc, char** argv) + { + //initialization + MPI_Init(&argc, &argv); + ParaMEDMEM::CommInterface comm_interface; + + //setting up a processor group with proc 0 + set procs; + procs.insert(0); + ParaMEDMEM::ProcessorGroup group(procs, comm_interface); + + //cleanup + MPI_Finalize(); + } + \endverbatim + */ + + CommInterface::CommInterface() + { + } + + CommInterface::~CommInterface() + { + } +} diff --git a/src/ParaMEDMEM/CommInterface.hxx b/src/ParaMEDMEM/CommInterface.hxx new file mode 100644 index 000000000..ae430eddd --- /dev/null +++ b/src/ParaMEDMEM/CommInterface.hxx @@ -0,0 +1,92 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __COMMINTERFACE_HXX__ +#define __COMMINTERFACE_HXX__ + +#include +namespace ParaMEDMEM +{ + + class CommInterface + { + public: + CommInterface(){} + virtual ~CommInterface(){} + int worldSize() const { + int size; + MPI_Comm_size(MPI_COMM_WORLD, &size); + return size;} + int commSize(MPI_Comm comm, int* size) const { return MPI_Comm_size(comm,size); } + int commRank(MPI_Comm comm, int* rank) const { return MPI_Comm_rank(comm,rank); } + int commGroup(MPI_Comm comm, MPI_Group* group) const { return MPI_Comm_group(comm, group); } + int groupIncl(MPI_Group group, int size, int* ranks, MPI_Group* group_output) const { return MPI_Group_incl(group, size, ranks, group_output); } + int commCreate(MPI_Comm comm, MPI_Group group, MPI_Comm* comm_output) const { return MPI_Comm_create(comm,group,comm_output); } + int groupFree(MPI_Group* group) const { return MPI_Group_free(group); } + int commFree(MPI_Comm* comm) const { return MPI_Comm_free(comm); } + + int send(void* buffer, int count, MPI_Datatype datatype, int target, int tag, MPI_Comm comm) const { return MPI_Send(buffer,count, datatype, target, tag, comm); } + int recv(void* buffer, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status* status) const { return MPI_Recv(buffer,count, datatype, source, tag, comm, status); } + int sendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, + int dest, int sendtag, void* recvbuf, int recvcount, + MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm, + MPI_Status* status) { return MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, recvcount, recvtype, source, recvtag, comm,status); } + + int Isend(void* buffer, int count, MPI_Datatype datatype, int target, + int tag, MPI_Comm comm, MPI_Request *request) const { return MPI_Isend(buffer,count, datatype, target, tag, comm, request); } + int Irecv(void* buffer, int count, MPI_Datatype datatype, int source, + int tag, MPI_Comm comm, MPI_Request* request) const { return MPI_Irecv(buffer,count, datatype, source, tag, comm, request); } + + int wait(MPI_Request *request, MPI_Status *status) const { return MPI_Wait(request, status); } + int test(MPI_Request *request, int *flag, MPI_Status *status) const { return MPI_Test(request, flag, status); } + int requestFree(MPI_Request *request) const { return MPI_Request_free(request); } + int waitany(int count, MPI_Request *array_of_requests, int *index, MPI_Status *status) const { return MPI_Waitany(count, array_of_requests, index, status); } + int testany(int count, MPI_Request *array_of_requests, int *index, int *flag, MPI_Status *status) const { return MPI_Testany(count, array_of_requests, index, flag, status); } + int waitall(int count, MPI_Request *array_of_requests, MPI_Status *array_of_status) const { return MPI_Waitall(count, array_of_requests, array_of_status); } + int testall(int count, MPI_Request *array_of_requests, int *flag, MPI_Status *array_of_status) const { return MPI_Testall(count, array_of_requests, flag, array_of_status); } + int waitsome(int incount, MPI_Request *array_of_requests,int *outcount, int *array_of_indices, MPI_Status *array_of_status) const { return MPI_Waitsome(incount, array_of_requests, outcount, array_of_indices, array_of_status); } + int testsome(int incount, MPI_Request *array_of_requests, int *outcount, + int *array_of_indices, MPI_Status *array_of_status) const { return MPI_Testsome(incount, array_of_requests, outcount, array_of_indices, array_of_status); } + int probe(int source, int tag, MPI_Comm comm, MPI_Status *status) const { return MPI_Probe(source, tag, comm, status) ; } + int Iprobe(int source, int tag, MPI_Comm comm, int *flag, MPI_Status *status) const { return MPI_Iprobe(source, tag, comm, flag, status) ; } + int cancel(MPI_Request *request) const { return MPI_Cancel(request); } + int testCancelled(MPI_Status *status, int *flag) const { return MPI_Test_cancelled(status, flag); } + int barrier(MPI_Comm comm) const { return MPI_Barrier(comm); } + int errorString(int errorcode, char *string, int *resultlen) const { return MPI_Error_string(errorcode, string, resultlen); } + int getCount(MPI_Status *status, MPI_Datatype datatype, int *count) const { return MPI_Get_count(status, datatype, count); } + + int broadcast(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm) const { return MPI_Bcast(buffer, count, datatype, root, comm); } + int allGather(void* sendbuf, int sendcount, MPI_Datatype sendtype, + void* recvbuf, int recvcount, MPI_Datatype recvtype, + MPI_Comm comm) const { return MPI_Allgather(sendbuf,sendcount, sendtype, recvbuf, recvcount, recvtype, comm); } + int allToAll(void* sendbuf, int sendcount, MPI_Datatype sendtype, + void* recvbuf, int recvcount, MPI_Datatype recvtype, + MPI_Comm comm) const { return MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); } + int allToAllV(void* sendbuf, int* sendcounts, int* senddispls, + MPI_Datatype sendtype, void* recvbuf, int* recvcounts, + int* recvdispls, MPI_Datatype recvtype, + MPI_Comm comm) const { return MPI_Alltoallv(sendbuf, sendcounts, senddispls, sendtype, recvbuf, recvcounts, recvdispls, recvtype, comm); } + + int reduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, + MPI_Op op, int root, MPI_Comm comm) const { return MPI_Reduce(sendbuf, recvbuf, count, datatype, op, root, comm); } + int allReduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) const { return MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm); } + }; +} + +#endif /*COMMINTERFACE_HXX_*/ diff --git a/src/ParaMEDMEM/ComponentTopology.cxx b/src/ParaMEDMEM/ComponentTopology.cxx new file mode 100644 index 000000000..8af706e59 --- /dev/null +++ b/src/ParaMEDMEM/ComponentTopology.cxx @@ -0,0 +1,115 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ComponentTopology.hxx" +#include "ProcessorGroup.hxx" +#include "InterpolationUtils.hxx" + +namespace ParaMEDMEM +{ + /* Generic constructor for \a nb_comp components equally parted + * in \a nb_blocks blocks + */ + ComponentTopology::ComponentTopology(int nb_comp, ProcessorGroup* group):_proc_group(group) + { + int nb_blocks=group->size(); + + if (nb_blocks>nb_comp) + throw INTERP_KERNEL::Exception("ComponentTopology Number of components must be larger than number of blocks"); + + _component_array.resize(nb_blocks+1); + _component_array[0]=0; + for (int i=1; i<=nb_blocks; i++) + { + _component_array[i]=_component_array[i-1]+nb_comp/nb_blocks; + if (i<=nb_comp%nb_blocks) + _component_array[i]++; + } + } + + /* Generic constructor for \a nb_comp components equally parted + * in \a nb_blocks blocks + */ + ComponentTopology::ComponentTopology(int nb_comp, int nb_blocks):_proc_group(0) + { + if (nb_blocks>nb_comp) + throw INTERP_KERNEL::Exception("ComponentTopology Number of components must be larger than number of blocks"); + + _component_array.resize(nb_blocks+1); + _component_array[0]=0; + for (int i=1; i<=nb_blocks; i++) + { + _component_array[i]=_component_array[i-1]+nb_comp/nb_blocks; + if (i<=nb_comp%nb_blocks) + _component_array[i]++; + } + + } + + //!Constructor for one block of \a nb_comp components + ComponentTopology::ComponentTopology(int nb_comp):_proc_group(0) + { + + _component_array.resize(2); + _component_array[0]=0; + _component_array[1]=nb_comp; + + } + + //! Constructor for one component + ComponentTopology::ComponentTopology():_proc_group(0) + { + _component_array.resize(2); + _component_array[0]=0; + _component_array[1]=1; + + } + + ComponentTopology::~ComponentTopology() + { + } + + int ComponentTopology::nbLocalComponents() const + { + if (_proc_group==0) + return nbComponents(); + + int nbcomp; + int myrank = _proc_group->myRank(); + if (myrank!=-1) + nbcomp = _component_array[myrank+1]-_component_array[myrank]; + else + nbcomp=0; + return nbcomp; + } + + int ComponentTopology::firstLocalComponent() const + { + if (_proc_group==0) + return 0; + + int icomp; + int myrank = _proc_group->myRank(); + if (myrank!=-1) + icomp = _component_array[myrank]; + else + icomp=-1; + return icomp; + } +} diff --git a/src/ParaMEDMEM/ComponentTopology.hxx b/src/ParaMEDMEM/ComponentTopology.hxx new file mode 100644 index 000000000..de11e3efe --- /dev/null +++ b/src/ParaMEDMEM/ComponentTopology.hxx @@ -0,0 +1,56 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __COMPONENTTOPOLOGY_HXX__ +#define __COMPONENTTOPOLOGY_HXX__ + +#include "Topology.hxx" + +#include + +namespace ParaMEDMEM +{ + class ProcessorGroup; + + class ComponentTopology + { + public: + ComponentTopology(int nb_comp, ProcessorGroup* group); + ComponentTopology(int nb_comp, int nb_blocks); + ComponentTopology(int nb_comp); + ComponentTopology(); + virtual ~ComponentTopology(); + //!returns the number of MED components in the topology + int nbComponents() const { return _component_array.back(); } + //!returns the number of MED components on local processor + int nbLocalComponents() const ; + //!returns the number of the first MED component on local processor + int firstLocalComponent() const ; + //!returns the number of blocks in the topology + int nbBlocks()const {return _component_array.size()-1;} + //!returns the block structure + const std::vector* getBlockIndices() const { return &_component_array; } + const ProcessorGroup* getProcGroup()const { return _proc_group; } + private: + std::vector _component_array; + ProcessorGroup* _proc_group; + }; +} + +#endif /*COMPONENTTOPOLOGY_HXX_*/ diff --git a/src/ParaMEDMEM/DEC.cxx b/src/ParaMEDMEM/DEC.cxx new file mode 100644 index 000000000..cbd0ea45d --- /dev/null +++ b/src/ParaMEDMEM/DEC.cxx @@ -0,0 +1,47 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" +#include "ParaMESH.hxx" +#include "DEC.hxx" +#include "ICoCoField.hxx" +#include "ICoCoMEDField.hxx" +#include "MPIProcessorGroup.hxx" + +#include + +namespace ParaMEDMEM +{ + DEC::DEC():_comm_interface(0) + { + } + + void DEC::copyFrom(const DEC& other) + { + _comm_interface=other._comm_interface; + } + + DEC::~DEC() + { + } +} diff --git a/src/ParaMEDMEM/DEC.hxx b/src/ParaMEDMEM/DEC.hxx new file mode 100644 index 000000000..1b0a8675f --- /dev/null +++ b/src/ParaMEDMEM/DEC.hxx @@ -0,0 +1,43 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __DEC_HXX__ +#define __DEC_HXX__ + +#include "MEDCouplingFieldDouble.hxx" +#include "NormalizedUnstructuredMesh.hxx" +#include "DECOptions.hxx" + +namespace ParaMEDMEM +{ + class CommInterface; + class DEC : public DECOptions + { + public: + DEC(); + void copyFrom(const DEC& other); + virtual void synchronize() = 0; + virtual void sendRecvData(bool way=true) = 0; + virtual ~DEC(); + protected: + const CommInterface* _comm_interface; + }; +} + +#endif diff --git a/src/ParaMEDMEM/DECOptions.hxx b/src/ParaMEDMEM/DECOptions.hxx new file mode 100644 index 000000000..5572ffdca --- /dev/null +++ b/src/ParaMEDMEM/DECOptions.hxx @@ -0,0 +1,74 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __DECOPTIONS_HXX__ +#define __DECOPTIONS_HXX__ + +#include + +namespace ParaMEDMEM +{ + //Enum describing the allToAll method used in the communication pattern + typedef enum { Native, PointToPoint } AllToAllMethod; + typedef enum { WithoutTimeInterp, LinearTimeInterp } TimeInterpolationMethod; + + class DECOptions + { + protected: + std::string _method; + bool _asynchronous; + TimeInterpolationMethod _timeInterpolationMethod; + AllToAllMethod _allToAllMethod; + bool _forcedRenormalization; + public: + DECOptions():_method("P0"), + _asynchronous(false), + _timeInterpolationMethod(WithoutTimeInterp), + _allToAllMethod(Native), + _forcedRenormalization(false) + { + } + + DECOptions(const DECOptions& deco) + { + _method=deco._method; + _timeInterpolationMethod=deco._timeInterpolationMethod; + _asynchronous=deco._asynchronous; + _forcedRenormalization=deco._forcedRenormalization; + _allToAllMethod=deco._allToAllMethod; + } + + const std::string& getMethod() const { return _method; } + void setMethod(const char *m) { _method=m; } + + TimeInterpolationMethod getTimeInterpolationMethod() const { return DECOptions::_timeInterpolationMethod; } + void setTimeInterpolationMethod(TimeInterpolationMethod it) { DECOptions::_timeInterpolationMethod=it; } + + bool getForcedRenormalization() const { return DECOptions::_forcedRenormalization; } + void setForcedRenormalization( bool dr) { DECOptions::_forcedRenormalization = dr; } + + bool getAsynchronous() const { return DECOptions::_asynchronous; } + void setAsynchronous( bool dr) { DECOptions::_asynchronous = dr; } + + AllToAllMethod getAllToAllMethod() const { return _allToAllMethod; } + void setAllToAllMethod(AllToAllMethod sp) { _allToAllMethod=sp; } + }; +} + +#endif diff --git a/src/ParaMEDMEM/DisjointDEC.cxx b/src/ParaMEDMEM/DisjointDEC.cxx new file mode 100644 index 000000000..f3f9bf666 --- /dev/null +++ b/src/ParaMEDMEM/DisjointDEC.cxx @@ -0,0 +1,386 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "DisjointDEC.hxx" +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" +#include "ParaMESH.hxx" +#include "ICoCoField.hxx" +#include "ICoCoMEDField.hxx" +#include "MPIProcessorGroup.hxx" + +#include +#include + +/*! \defgroup dec DEC + * + * \section decintroduction Introduction + * + * Interface class for creation of a link between two + * processor groups for exhanging mesh or field data. + * The \c DEC is defined by attaching a field on the receiving or on the + * sending side. + * On top of attaching a \c ParaMEDMEM::FIELD, it is possible to + * attach a ICoCo::Field. This class is an abstract class that enables + * coupling of codes that respect the ICoCo interface \ref icoco. It has two implementations: + * one for codes that express their fields as \ref medoupling fields (ICoCo::MEDField). + * + * \section dec_options DEC Options + * Options supported by \c DEC objects are + * + * + * + * + *
OptionDescriptionDefault value
ForcedRenormalizationAfter receiving data, the target field is renormalized so that L2-norms of the source and target fields match. false
+ + + The following code excerpt shows how to set options for an object that inherits from \c DEC : + + \code + InterpKernelDEC dec(source_group,target_group); + dec.setOptions("ForcedRenormalization",true); + dec.attachLocalField(field); + dec.synchronize(); + if (source_group.containsMyRank()) + dec.sendData(); + else + dec.recvData(); + \endcode +*/ + +namespace ParaMEDMEM +{ + + + /*! \addtogroup dec + @{ + */ + DisjointDEC::DisjointDEC(ProcessorGroup& source_group, ProcessorGroup& target_group):_local_field(0), + _source_group(&source_group), + _target_group(&target_group), + _owns_field(false), + _owns_groups(false) + { + _union_group = source_group.fuse(target_group); + } + + DisjointDEC::DisjointDEC(const DisjointDEC& s):DEC(s),_local_field(0),_union_group(0),_source_group(0),_target_group(0),_owns_field(false),_owns_groups(false) + { + copyInstance(s); + } + + DisjointDEC & DisjointDEC::operator=(const DisjointDEC& s) + { + cleanInstance(); + copyInstance(s); + return *this; + + } + + void DisjointDEC::copyInstance(const DisjointDEC& other) + { + DEC::copyFrom(other); + if(other._target_group) + { + _target_group=other._target_group->deepCpy(); + _owns_groups=true; + } + if(other._source_group) + { + _source_group=other._source_group->deepCpy(); + _owns_groups=true; + } + if (_source_group && _target_group) + _union_group = _source_group->fuse(*_target_group); + } + + DisjointDEC::DisjointDEC(const std::set& source_ids, const std::set& target_ids, const MPI_Comm& world_comm):_local_field(0), + _owns_field(false), + _owns_groups(true) + { + ParaMEDMEM::CommInterface comm; + // Create the list of procs including source and target + std::set union_ids; // source and target ids in world_comm + union_ids.insert(source_ids.begin(),source_ids.end()); + union_ids.insert(target_ids.begin(),target_ids.end()); + if(union_ids.size()!=(source_ids.size()+target_ids.size())) + throw INTERP_KERNEL::Exception("DisjointDEC constructor : source_ids and target_ids overlap partially or fully. This type of DEC does not support it ! OverlapDEC class could be the solution !"); + int* union_ranks_world=new int[union_ids.size()]; // ranks of sources and targets in world_comm + std::copy(union_ids.begin(), union_ids.end(), union_ranks_world); + + // Create a communicator on these procs + MPI_Group union_group,world_group; + comm.commGroup(world_comm,&world_group); + comm.groupIncl(world_group,union_ids.size(),union_ranks_world,&union_group); + MPI_Comm union_comm; + comm.commCreate(world_comm,union_group,&union_comm); + delete[] union_ranks_world; + + if (union_comm==MPI_COMM_NULL) + { // This process is not in union + _source_group=0; + _target_group=0; + _union_group=0; + return; + } + + // Translate source_ids and target_ids from world_comm to union_comm + int* source_ranks_world=new int[source_ids.size()]; // ranks of sources in world_comm + std::copy(source_ids.begin(), source_ids.end(),source_ranks_world); + int* source_ranks_union=new int[source_ids.size()]; // ranks of sources in union_comm + int* target_ranks_world=new int[target_ids.size()]; // ranks of targets in world_comm + std::copy(target_ids.begin(), target_ids.end(),target_ranks_world); + int* target_ranks_union=new int[target_ids.size()]; // ranks of targets in union_comm + MPI_Group_translate_ranks(world_group,source_ids.size(),source_ranks_world,union_group,source_ranks_union); + MPI_Group_translate_ranks(world_group,target_ids.size(),target_ranks_world,union_group,target_ranks_union); + std::set source_ids_union; + for (int i=0;i<(int)source_ids.size();i++) + source_ids_union.insert(source_ranks_union[i]); + std::set target_ids_union; + for (int i=0;i<(int)target_ids.size();i++) + target_ids_union.insert(target_ranks_union[i]); + delete [] source_ranks_world; + delete [] source_ranks_union; + delete [] target_ranks_world; + delete [] target_ranks_union; + + // Create the MPIProcessorGroups + _source_group = new MPIProcessorGroup(comm,source_ids_union,union_comm); + _target_group = new MPIProcessorGroup(comm,target_ids_union,union_comm); + _union_group = _source_group->fuse(*_target_group); + + } + + DisjointDEC::~DisjointDEC() + { + cleanInstance(); + } + + void DisjointDEC::cleanInstance() + { + if(_owns_field) + { + delete _local_field; + } + _local_field=0; + _owns_field=false; + if(_owns_groups) + { + delete _source_group; + delete _target_group; + } + _owns_groups=false; + _source_group=0; + _target_group=0; + delete _union_group; + _union_group=0; + } + + void DisjointDEC::setNature(NatureOfField nature) + { + if(_local_field) + _local_field->getField()->setNature(nature); + } + + /*! Attaches a local field to a DEC. + If the processor is on the receiving end of the DEC, the field + will be updated by a recvData() call. + Reversely, if the processor is on the sending end, the field will be read, possibly transformed, and sent appropriately to the other side. + */ + void DisjointDEC::attachLocalField(const ParaFIELD *field, bool ownPt) + { + if(!isInUnion()) + return ; + if(_owns_field) + delete _local_field; + _local_field=field; + _owns_field=ownPt; + _comm_interface=&(field->getTopology()->getProcGroup()->getCommInterface()); + compareFieldAndMethod(); + } + + /*! Attaches a local field to a DEC. The method will test whether the processor + is on the source or the target side and will associate the mesh underlying the + field to the local side. + + If the processor is on the receiving end of the DEC, the field + will be updated by a recvData() call. + Reversely, if the processor is on the sending end, the field will be read, possibly transformed, + and sent appropriately to the other side. + */ + + void DisjointDEC::attachLocalField(MEDCouplingFieldDouble *field) + { + if(!isInUnion()) + return ; + ProcessorGroup* local_group; + if (_source_group->containsMyRank()) + local_group=_source_group; + else if (_target_group->containsMyRank()) + local_group=_target_group; + else + throw INTERP_KERNEL::Exception("Invalid procgroup for field attachment to DEC"); + ParaMESH *paramesh=new ParaMESH(static_cast(const_cast(field->getMesh())),*local_group,field->getMesh()->getName()); + ParaFIELD *tmp=new ParaFIELD(field, paramesh, *local_group); + tmp->setOwnSupport(true); + attachLocalField(tmp,true); + //_comm_interface=&(local_group->getCommInterface()); + } + + /*! + Attaches a local field to a DEC. + If the processor is on the receiving end of the DEC, the field + will be updated by a recvData() call. + Reversely, if the processor is on the sending end, the field will be read, possibly transformed, and sent appropriately to the other side. + The field type is a generic ICoCo Field, so that the DEC can couple a number of different fields : + - a ICoCo::MEDField, that is created from a MEDCoupling structure + + */ + void DisjointDEC::attachLocalField(const ICoCo::MEDField *field) + { + if(!isInUnion()) + return ; + if(!field) + throw INTERP_KERNEL::Exception("DisjointDEC::attachLocalField : ICoCo::MEDField pointer is NULL !"); + attachLocalField(field->getField()); + } + + /*! + Computes the field norm over its support + on the source side and renormalizes the field on the target side + so that the norms match. + + \f[ + I_{source}=\sum_{i=1}^{n_{source}}V_{i}.|\Phi^{source}_{i}|^2, + \f] + + \f[ + I_{target}=\sum_{i=1}^{n_{target}}V_{i}.|\Phi^{target}_{i}|^2, + \f] + + \f[ + \Phi^{target}:=\Phi^{target}.\sqrt{I_{source}/I_{target}}. + \f] + + */ + void DisjointDEC::renormalizeTargetField(bool isWAbs) + { + if (_source_group->containsMyRank()) + for (int icomp=0; icomp<_local_field->getField()->getArray()->getNumberOfComponents(); icomp++) + { + double total_norm = _local_field->getVolumeIntegral(icomp+1,isWAbs); + double source_norm = total_norm; + _comm_interface->broadcast(&source_norm, 1, MPI_DOUBLE, 0,* dynamic_cast(_union_group)->getComm()); + + } + if (_target_group->containsMyRank()) + { + for (int icomp=0; icomp<_local_field->getField()->getArray()->getNumberOfComponents(); icomp++) + { + double total_norm = _local_field->getVolumeIntegral(icomp+1,isWAbs); + double source_norm=total_norm; + _comm_interface->broadcast(&source_norm, 1, MPI_DOUBLE, 0,* dynamic_cast(_union_group)->getComm()); + + if (fabs(total_norm)>1e-100) + _local_field->getField()->applyLin(source_norm/total_norm,0.0,icomp+1); + } + } + } + /*! @} */ + + bool DisjointDEC::isInSourceSide() const + { + if(!_source_group) + return false; + return _source_group->containsMyRank(); + } + + bool DisjointDEC::isInTargetSide() const + { + if(!_target_group) + return false; + return _target_group->containsMyRank(); + } + + bool DisjointDEC::isInUnion() const + { + if(!_union_group) + return false; + return _union_group->containsMyRank(); + } + + void DisjointDEC::compareFieldAndMethod() const throw(INTERP_KERNEL::Exception) + { + if (_local_field) + { + TypeOfField entity = _local_field->getField()->getTypeOfField(); + if ( getMethod() == "P0" ) + { + if ( entity != ON_CELLS ) + throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch." + " For P0 interpolation, field must be on MED_CELL's"); + } + else if ( getMethod() == "P1" ) + { + if ( entity != ON_NODES ) + throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch." + " For P1 interpolation, field must be on MED_NODE's"); + } + else if ( getMethod() == "P1d" ) + { + if ( entity != ON_CELLS ) + throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch." + " For P1d interpolation, field must be on MED_CELL's"); + if ( _target_group->containsMyRank() ) + throw INTERP_KERNEL::Exception("Projection to P1d field not supported"); + } + else + { + throw INTERP_KERNEL::Exception("Unknown interpolation method. Possible methods: P0, P1, P1d"); + } + } + } + + /*! + If way==true, source procs call sendData() and target procs call recvData(). + if way==false, it's the other way round. + */ + void DisjointDEC::sendRecvData(bool way) + { + if(!isInUnion()) + return; + if(isInSourceSide()) + { + if(way) + sendData(); + else + recvData(); + } + else if(isInTargetSide()) + { + if(way) + recvData(); + else + sendData(); + } + } +} diff --git a/src/ParaMEDMEM/DisjointDEC.hxx b/src/ParaMEDMEM/DisjointDEC.hxx new file mode 100644 index 000000000..521353f5e --- /dev/null +++ b/src/ParaMEDMEM/DisjointDEC.hxx @@ -0,0 +1,86 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __DISJOINTDEC_HXX__ +#define __DISJOINTDEC_HXX__ + +#include "MEDCouplingFieldDouble.hxx" +#include "NormalizedUnstructuredMesh.hxx" +#include "DEC.hxx" + +#include +#include + +namespace ICoCo +{ + class MEDField; +} + +namespace ParaMEDMEM +{ + class ProcessorGroup; + class ParaFIELD; + + class DisjointDEC : public DEC + { + public: + DisjointDEC():_local_field(0),_union_group(0),_source_group(0),_target_group(0),_owns_field(false),_owns_groups(false) { } + DisjointDEC(ProcessorGroup& source_group, ProcessorGroup& target_group); + DisjointDEC(const DisjointDEC&); + DisjointDEC &operator=(const DisjointDEC& s); + DisjointDEC(const std::set& src_ids, const std::set& trg_ids, + const MPI_Comm& world_comm=MPI_COMM_WORLD); + void setNature(NatureOfField nature); + void attachLocalField( MEDCouplingFieldDouble *field); + void attachLocalField(const ParaFIELD *field, bool ownPt=false); + void attachLocalField(const ICoCo::MEDField *field); + + virtual void prepareSourceDE() = 0; + virtual void prepareTargetDE() = 0; + virtual void recvData() = 0; + virtual void sendData() = 0; + void sendRecvData(bool way=true); + virtual void synchronize() = 0; + virtual ~DisjointDEC(); + virtual void computeProcGroup() { } + void renormalizeTargetField(bool isWAbs); + // + ProcessorGroup *getSourceGrp() const { return _source_group; } + ProcessorGroup *getTargetGrp() const { return _target_group; } + bool isInSourceSide() const; + bool isInTargetSide() const; + bool isInUnion() const; + protected: + void compareFieldAndMethod() const throw(INTERP_KERNEL::Exception); + void cleanInstance(); + void copyInstance(const DisjointDEC& other); + protected: + const ParaFIELD* _local_field; + //! Processor group representing the union of target and source processors + ProcessorGroup* _union_group; + ProcessorGroup* _source_group; + ProcessorGroup* _target_group; + + const CommInterface* _comm_interface; + bool _owns_field; + bool _owns_groups; + }; +} + +#endif diff --git a/src/ParaMEDMEM/ElementLocator.cxx b/src/ParaMEDMEM/ElementLocator.cxx new file mode 100644 index 000000000..a7fcfc28e --- /dev/null +++ b/src/ParaMEDMEM/ElementLocator.cxx @@ -0,0 +1,718 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include "CommInterface.hxx" +#include "ElementLocator.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ParaFIELD.hxx" +#include "ParaMESH.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "MEDCouplingFieldDouble.hxx" +#include "MEDCouplingAutoRefCountObjectPtr.hxx" +#include "DirectedBoundingBox.hxx" + +#include +#include +#include + +using namespace std; + +//#define USE_DIRECTED_BB + +namespace ParaMEDMEM +{ + ElementLocator::ElementLocator(const ParaFIELD& sourceField, + const ProcessorGroup& distant_group, + const ProcessorGroup& local_group) + : _local_para_field(sourceField), + _local_cell_mesh(sourceField.getSupport()->getCellMesh()), + _local_face_mesh(sourceField.getSupport()->getFaceMesh()), + _distant_group(distant_group), + _local_group(local_group) + { + _union_group = _local_group.fuse(distant_group); + _computeBoundingBoxes(); + _comm=getCommunicator(); + } + + ElementLocator::~ElementLocator() + { + delete _union_group; + delete [] _domain_bounding_boxes; + } + + const MPI_Comm *ElementLocator::getCommunicator() const + { + MPIProcessorGroup* group=static_cast (_union_group); + return group->getComm(); + } + + NatureOfField ElementLocator::getLocalNature() const + { + return _local_para_field.getField()->getNature(); + } + + // ========================================================================== + // Procedure for exchanging mesh between a distant proc and a local processor + // param idistantrank proc id on distant group + // param distant_mesh on return , points to a local reconstruction of + // the distant mesh + // param distant_ids on return, contains a vector defining a correspondence + // between the distant ids and the ids of the local reconstruction + // ========================================================================== + void ElementLocator::exchangeMesh(int idistantrank, + MEDCouplingPointSet*& distant_mesh, + int*& distant_ids) + { + int rank = _union_group->translateRank(&_distant_group,idistantrank); + + if (find(_distant_proc_ids.begin(), _distant_proc_ids.end(),rank)==_distant_proc_ids.end()) + return; + + MEDCouplingAutoRefCountObjectPtr elems; +#ifdef USE_DIRECTED_BB + INTERP_KERNEL::DirectedBoundingBox dbb; + double* distant_bb = _domain_bounding_boxes+rank*dbb.dataSize(_local_cell_mesh_space_dim); + dbb.setData(distant_bb); + elems=_local_cell_mesh->getCellsInBoundingBox(dbb,getBoundingBoxAdjustment()); +#else + double* distant_bb = _domain_bounding_boxes+rank*2*_local_cell_mesh_space_dim; + elems=_local_cell_mesh->getCellsInBoundingBox(distant_bb,getBoundingBoxAdjustment()); +#endif + + DataArrayInt *distant_ids_send; + MEDCouplingPointSet *send_mesh = (MEDCouplingPointSet *)_local_para_field.getField()->buildSubMeshData(elems->begin(),elems->end(),distant_ids_send); + _exchangeMesh(send_mesh, distant_mesh, idistantrank, distant_ids_send, distant_ids); + distant_ids_send->decrRef(); + + if(send_mesh) + send_mesh->decrRef(); + } + + void ElementLocator::exchangeMethod(const std::string& sourceMeth, int idistantrank, std::string& targetMeth) + { + CommInterface comm_interface=_union_group->getCommInterface(); + MPIProcessorGroup* group=static_cast (_union_group); + const MPI_Comm* comm=(group->getComm()); + MPI_Status status; + // it must be converted to union numbering before communication + int idistRankInUnion = group->translateRank(&_distant_group,idistantrank); + char *recv_buffer=new char[4]; + std::vector send_buffer(4); + std::copy(sourceMeth.begin(),sourceMeth.end(),send_buffer.begin()); + comm_interface.sendRecv(&send_buffer[0], 4, MPI_CHAR,idistRankInUnion, 1112, + recv_buffer, 4, MPI_CHAR,idistRankInUnion, 1112, + *comm, &status); + targetMeth=recv_buffer; + delete [] recv_buffer; + } + + + // ====================== + // Compute bounding boxes + // ====================== + + void ElementLocator::_computeBoundingBoxes() + { + CommInterface comm_interface =_union_group->getCommInterface(); + MPIProcessorGroup* group=static_cast (_union_group); + const MPI_Comm* comm = group->getComm(); + _local_cell_mesh_space_dim = -1; + if(_local_cell_mesh->getMeshDimension() != -1) + _local_cell_mesh_space_dim=_local_cell_mesh->getSpaceDimension(); + int *spaceDimForAll=new int[_union_group->size()]; + comm_interface.allGather(&_local_cell_mesh_space_dim, 1, MPI_INT, + spaceDimForAll,1, MPI_INT, + *comm); + _local_cell_mesh_space_dim=*std::max_element(spaceDimForAll,spaceDimForAll+_union_group->size()); + _is_m1d_corr=((*std::min_element(spaceDimForAll,spaceDimForAll+_union_group->size()))==-1); + for(int i=0;i<_union_group->size();i++) + if(spaceDimForAll[i]!=_local_cell_mesh_space_dim && spaceDimForAll[i]!=-1) + throw INTERP_KERNEL::Exception("Spacedim not matches !"); + delete [] spaceDimForAll; +#ifdef USE_DIRECTED_BB + INTERP_KERNEL::DirectedBoundingBox dbb; + int bbSize = dbb.dataSize(_local_cell_mesh_space_dim); + _domain_bounding_boxes = new double[bbSize*_union_group->size()]; + if(_local_cell_mesh->getMeshDimension() != -1) + dbb = INTERP_KERNEL::DirectedBoundingBox(_local_cell_mesh->getCoords()->getPointer(), + _local_cell_mesh->getNumberOfNodes(), + _local_cell_mesh_space_dim); + std::vector dbbData = dbb.getData(); + if ( dbbData.size() < bbSize ) dbbData.resize(bbSize,0); + double * minmax= &dbbData[0]; +#else + int bbSize = 2*_local_cell_mesh_space_dim; + _domain_bounding_boxes = new double[bbSize*_union_group->size()]; + double * minmax=new double [bbSize]; + if(_local_cell_mesh->getMeshDimension() != -1) + _local_cell_mesh->getBoundingBox(minmax); + else + for(int i=0;i<_local_cell_mesh_space_dim;i++) + { + minmax[i*2]=-std::numeric_limits::max(); + minmax[i*2+1]=std::numeric_limits::max(); + } +#endif + + comm_interface.allGather(minmax, bbSize, MPI_DOUBLE, + _domain_bounding_boxes,bbSize, MPI_DOUBLE, + *comm); + + for (int i=0; i< _distant_group.size(); i++) + { + int rank=_union_group->translateRank(&_distant_group,i); + + if (_intersectsBoundingBox(rank)) + { + _distant_proc_ids.push_back(rank); + } + } +#ifdef USE_DIRECTED_BB +#else + delete [] minmax; +#endif + } + + + // ============================================= + // Intersect Bounding Box (with a given "irank") + // ============================================= + bool ElementLocator::_intersectsBoundingBox(int irank) + { +#ifdef USE_DIRECTED_BB + INTERP_KERNEL::DirectedBoundingBox local_dbb, distant_dbb; + local_dbb.setData( _domain_bounding_boxes+_union_group->myRank()*local_dbb.dataSize( _local_cell_mesh_space_dim )); + distant_dbb.setData( _domain_bounding_boxes+irank*distant_dbb.dataSize( _local_cell_mesh_space_dim )); + return !local_dbb.isDisjointWith( distant_dbb ); +#else + double* local_bb = _domain_bounding_boxes+_union_group->myRank()*2*_local_cell_mesh_space_dim; + double* distant_bb = _domain_bounding_boxes+irank*2*_local_cell_mesh_space_dim; + + for (int idim=0; idim < _local_cell_mesh_space_dim; idim++) + { + const double eps = 1e-12; + bool intersects = (distant_bb[idim*2]getCommInterface(); + + // First stage : exchanging sizes + // ------------------------------ + vector tinyInfoLocalD,tinyInfoDistantD(1);//not used for the moment + vector tinyInfoLocal,tinyInfoDistant; + vector tinyInfoLocalS; + //Getting tiny info of local mesh to allow the distant proc to initialize and allocate + //the transmitted mesh. + local_mesh->getTinySerializationInformation(tinyInfoLocalD,tinyInfoLocal,tinyInfoLocalS); + tinyInfoLocal.push_back(distant_ids_send->getNumberOfTuples()); + tinyInfoDistant.resize(tinyInfoLocal.size()); + std::fill(tinyInfoDistant.begin(),tinyInfoDistant.end(),0); + MPIProcessorGroup* group=static_cast (_union_group); + const MPI_Comm* comm=group->getComm(); + MPI_Status status; + + // iproc_distant is the number of proc in distant group + // it must be converted to union numbering before communication + int iprocdistant_in_union = group->translateRank(&_distant_group, + iproc_distant); + + comm_interface.sendRecv(&tinyInfoLocal[0], tinyInfoLocal.size(), MPI_INT, iprocdistant_in_union, 1112, + &tinyInfoDistant[0], tinyInfoDistant.size(), MPI_INT,iprocdistant_in_union,1112, + *comm, &status); + DataArrayInt *v1Local=0; + DataArrayDouble *v2Local=0; + DataArrayInt *v1Distant=DataArrayInt::New(); + DataArrayDouble *v2Distant=DataArrayDouble::New(); + //serialization of local mesh to send data to distant proc. + local_mesh->serialize(v1Local,v2Local); + //Building the right instance of copy of distant mesh. + MEDCouplingPointSet *distant_mesh_tmp=MEDCouplingPointSet::BuildInstanceFromMeshType((MEDCouplingMeshType)tinyInfoDistant[0]); + std::vector unusedTinyDistantSts; + distant_mesh_tmp->resizeForUnserialization(tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts); + int nbLocalElems=0; + int nbDistElem=0; + int *ptLocal=0; + int *ptDist=0; + if(v1Local) + { + nbLocalElems=v1Local->getNbOfElems(); + ptLocal=v1Local->getPointer(); + } + if(v1Distant) + { + nbDistElem=v1Distant->getNbOfElems(); + ptDist=v1Distant->getPointer(); + } + comm_interface.sendRecv(ptLocal, nbLocalElems, MPI_INT, + iprocdistant_in_union, 1111, + ptDist, nbDistElem, MPI_INT, + iprocdistant_in_union,1111, + *comm, &status); + nbLocalElems=0; + double *ptLocal2=0; + double *ptDist2=0; + if(v2Local) + { + nbLocalElems=v2Local->getNbOfElems(); + ptLocal2=v2Local->getPointer(); + } + nbDistElem=0; + if(v2Distant) + { + nbDistElem=v2Distant->getNbOfElems(); + ptDist2=v2Distant->getPointer(); + } + comm_interface.sendRecv(ptLocal2, nbLocalElems, MPI_DOUBLE, + iprocdistant_in_union, 1112, + ptDist2, nbDistElem, MPI_DOUBLE, + iprocdistant_in_union, 1112, + *comm, &status); + // + distant_mesh=distant_mesh_tmp; + //finish unserialization + distant_mesh->unserialization(tinyInfoDistantD,tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts); + // + distant_ids_recv=new int[tinyInfoDistant.back()]; + comm_interface.sendRecv(const_cast(reinterpret_cast(distant_ids_send->getConstPointer())),tinyInfoLocal.back(), MPI_INT, + iprocdistant_in_union, 1113, + distant_ids_recv,tinyInfoDistant.back(), MPI_INT, + iprocdistant_in_union,1113, + *comm, &status); + if(v1Local) + v1Local->decrRef(); + if(v2Local) + v2Local->decrRef(); + if(v1Distant) + v1Distant->decrRef(); + if(v2Distant) + v2Distant->decrRef(); + } + + /*! + * connected with ElementLocator::sendPolicyToWorkingSideL + */ + void ElementLocator::recvPolicyFromLazySideW(std::vector& policy) + { + policy.resize(_distant_proc_ids.size()); + int procId=0; + CommInterface comm; + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int toRecv; + comm.recv((void *)&toRecv,1,MPI_INT,*iter,1120,*_comm,&status); + policy[procId]=toRecv; + } + } + + /*! + * connected with ElementLocator::recvFromWorkingSideL + */ + void ElementLocator::sendSumToLazySideW(const std::vector< std::vector >& distantLocEltIds, const std::vector< std::vector >& partialSumRelToDistantIds) + { + int procId=0; + CommInterface comm; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& eltIds=distantLocEltIds[procId]; + const vector& valued=partialSumRelToDistantIds[procId]; + int lgth=eltIds.size(); + comm.send(&lgth,1,MPI_INT,*iter,1114,*_comm); + comm.send(const_cast(reinterpret_cast(&eltIds[0])),lgth,MPI_INT,*iter,1115,*_comm); + comm.send(const_cast(reinterpret_cast(&valued[0])),lgth,MPI_DOUBLE,*iter,1116,*_comm); + } + } + + /*! + * connected with ElementLocator::sendToWorkingSideL + */ + void ElementLocator::recvSumFromLazySideW(std::vector< std::vector >& globalSumRelToDistantIds) + { + int procId=0; + CommInterface comm; + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + std::vector& vec=globalSumRelToDistantIds[procId]; + comm.recv(&vec[0],vec.size(),MPI_DOUBLE,*iter,1117,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvLocalIdsFromWorkingSideL + */ + void ElementLocator::sendLocalIdsToLazyProcsW(const std::vector< std::vector >& distantLocEltIds) + { + int procId=0; + CommInterface comm; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& eltIds=distantLocEltIds[procId]; + int lgth=eltIds.size(); + comm.send(&lgth,1,MPI_INT,*iter,1121,*_comm); + comm.send(const_cast(reinterpret_cast(&eltIds[0])),lgth,MPI_INT,*iter,1122,*_comm); + } + } + + /*! + * connected with ElementLocator::sendGlobalIdsToWorkingSideL + */ + void ElementLocator::recvGlobalIdsFromLazyProcsW(const std::vector< std::vector >& distantLocEltIds, std::vector< std::vector >& globalIds) + { + int procId=0; + CommInterface comm; + MPI_Status status; + globalIds.resize(_distant_proc_ids.size()); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const std::vector& vec=distantLocEltIds[procId]; + std::vector& global=globalIds[procId]; + global.resize(vec.size()); + comm.recv(&global[0],vec.size(),MPI_INT,*iter,1123,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::sendCandidatesGlobalIdsToWorkingSideL + */ + void ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW(std::vector< std::vector >& globalIds) + { + int procId=0; + CommInterface comm; + MPI_Status status; + globalIds.resize(_distant_proc_ids.size()); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + std::vector& global=globalIds[procId]; + int lgth; + comm.recv(&lgth,1,MPI_INT,*iter,1132,*_comm,&status); + global.resize(lgth); + comm.recv(&global[0],lgth,MPI_INT,*iter,1133,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvSumFromWorkingSideL + */ + void ElementLocator::sendPartialSumToLazyProcsW(const std::vector& distantGlobIds, const std::vector& sum) + { + int procId=0; + CommInterface comm; + int lgth=distantGlobIds.size(); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + comm.send(&lgth,1,MPI_INT,*iter,1124,*_comm); + comm.send(const_cast(reinterpret_cast(&distantGlobIds[0])),lgth,MPI_INT,*iter,1125,*_comm); + comm.send(const_cast(reinterpret_cast(&sum[0])),lgth,MPI_DOUBLE,*iter,1126,*_comm); + } + } + + /*! + * connected with ElementLocator::recvCandidatesForAddElementsL + */ + void ElementLocator::sendCandidatesForAddElementsW(const std::vector& distantGlobIds) + { + int procId=0; + CommInterface comm; + int lgth=distantGlobIds.size(); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + comm.send(const_cast(reinterpret_cast(&lgth)),1,MPI_INT,*iter,1128,*_comm); + comm.send(const_cast(reinterpret_cast(&distantGlobIds[0])),lgth,MPI_INT,*iter,1129,*_comm); + } + } + + /*! + * connected with ElementLocator::sendAddElementsToWorkingSideL + */ + void ElementLocator::recvAddElementsFromLazyProcsW(std::vector >& elementsToAdd) + { + int procId=0; + CommInterface comm; + MPI_Status status; + int lgth=_distant_proc_ids.size(); + elementsToAdd.resize(lgth); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int locLgth; + std::vector& eltToFeed=elementsToAdd[procId]; + comm.recv(&locLgth,1,MPI_INT,*iter,1130,*_comm,&status); + eltToFeed.resize(locLgth); + comm.recv(&eltToFeed[0],locLgth,MPI_INT,*iter,1131,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvPolicyFromLazySideW + */ + int ElementLocator::sendPolicyToWorkingSideL() + { + CommInterface comm; + int toSend; + DataArrayInt *isCumulative=_local_para_field.returnCumulativeGlobalNumbering(); + if(isCumulative) + { + toSend=CUMULATIVE_POLICY; + isCumulative->decrRef(); + } + else + toSend=NO_POST_TREATMENT_POLICY; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++) + comm.send(&toSend,1,MPI_INT,*iter,1120,*_comm); + return toSend; + } + + /*! + * connected with ElementLocator::sendSumToLazySideW + */ + void ElementLocator::recvFromWorkingSideL() + { + _values_added.resize(_local_para_field.getField()->getNumberOfTuples()); + int procId=0; + CommInterface comm; + _ids_per_working_proc.resize(_distant_proc_ids.size()); + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int lgth; + comm.recv(&lgth,1,MPI_INT,*iter,1114,*_comm,&status); + vector& ids=_ids_per_working_proc[procId]; + ids.resize(lgth); + vector values(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1115,*_comm,&status); + comm.recv(&values[0],lgth,MPI_DOUBLE,*iter,1116,*_comm,&status); + for(int i=0;i::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + vector& ids=_ids_per_working_proc[procId]; + vector valsToSend(ids.size()); + vector::iterator iter3=valsToSend.begin(); + for(vector::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter3++) + *iter3=_values_added[*iter2]; + comm.send(&valsToSend[0],ids.size(),MPI_DOUBLE,*iter,1117,*_comm); + //ids.clear(); + } + //_ids_per_working_proc.clear(); + } + + /*! + * connected with ElementLocator::sendLocalIdsToLazyProcsW + */ + void ElementLocator::recvLocalIdsFromWorkingSideL() + { + int procId=0; + CommInterface comm; + _ids_per_working_proc.resize(_distant_proc_ids.size()); + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int lgth; + vector& ids=_ids_per_working_proc[procId]; + comm.recv(&lgth,1,MPI_INT,*iter,1121,*_comm,&status); + ids.resize(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1122,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvGlobalIdsFromLazyProcsW + */ + void ElementLocator::sendGlobalIdsToWorkingSideL() + { + int procId=0; + CommInterface comm; + DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering(); + const int *globalIdsC=globalIds->getConstPointer(); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& ids=_ids_per_working_proc[procId]; + vector valsToSend(ids.size()); + vector::iterator iter1=valsToSend.begin(); + for(vector::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++) + *iter1=globalIdsC[*iter2]; + comm.send(&valsToSend[0],ids.size(),MPI_INT,*iter,1123,*_comm); + } + if(globalIds) + globalIds->decrRef(); + } + + /*! + * connected with ElementLocator::sendPartialSumToLazyProcsW + */ + void ElementLocator::recvSumFromWorkingSideL() + { + int procId=0; + int wProcSize=_distant_proc_ids.size(); + CommInterface comm; + _ids_per_working_proc.resize(wProcSize); + _values_per_working_proc.resize(wProcSize); + MPI_Status status; + std::map sums; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int lgth; + comm.recv(&lgth,1,MPI_INT,*iter,1124,*_comm,&status); + vector& ids=_ids_per_working_proc[procId]; + vector& vals=_values_per_working_proc[procId]; + ids.resize(lgth); + vals.resize(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1125,*_comm,&status); + comm.recv(&vals[0],lgth,MPI_DOUBLE,*iter,1126,*_comm,&status); + vector::const_iterator iter1=ids.begin(); + vector::const_iterator iter2=vals.begin(); + for(;iter1!=ids.end();iter1++,iter2++) + sums[*iter1]+=*iter2; + } + //assign sum to prepare sending to working side + for(procId=0;procId& ids=_ids_per_working_proc[procId]; + vector& vals=_values_per_working_proc[procId]; + vector::const_iterator iter1=ids.begin(); + vector::iterator iter2=vals.begin(); + for(;iter1!=ids.end();iter1++,iter2++) + *iter2=sums[*iter1]; + ids.clear(); + } + } + + /*! + * Foreach working procs Wi compute and push it in _ids_per_working_proc3, + * if it exist, local id of nodes that are in interaction with an another lazy proc than this + * and that exists in this \b but with no interaction with this. + * The computation is performed here. sendAddElementsToWorkingSideL is only in charge to send + * precomputed _ids_per_working_proc3 attribute. + * connected with ElementLocator::sendCandidatesForAddElementsW + */ + void ElementLocator::recvCandidatesForAddElementsL() + { + int procId=0; + int wProcSize=_distant_proc_ids.size(); + CommInterface comm; + _ids_per_working_proc3.resize(wProcSize); + MPI_Status status; + std::map sums; + DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering(); + const int *globalIdsC=globalIds->getConstPointer(); + int nbElts=globalIds->getNumberOfTuples(); + std::set globalIdsS(globalIdsC,globalIdsC+nbElts); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const std::vector& ids0=_ids_per_working_proc[procId]; + int lgth0=ids0.size(); + std::set elts0; + for(int i=0;i ids(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1129,*_comm,&status); + set ids1(ids.begin(),ids.end()); + ids.clear(); + set tmp5,tmp6; + set_intersection(globalIdsS.begin(),globalIdsS.end(),ids1.begin(),ids1.end(),inserter(tmp5,tmp5.begin())); + set_difference(tmp5.begin(),tmp5.end(),elts0.begin(),elts0.end(),inserter(tmp6,tmp6.begin())); + std::vector& ids2=_ids_per_working_proc3[procId]; + ids2.resize(tmp6.size()); + std::copy(tmp6.begin(),tmp6.end(),ids2.begin()); + //global->local + for(std::vector::iterator iter2=ids2.begin();iter2!=ids2.end();iter2++) + *iter2=std::find(globalIdsC,globalIdsC+nbElts,*iter2)-globalIdsC; + } + if(globalIds) + globalIds->decrRef(); + } + + /*! + * connected with ElementLocator::recvAddElementsFromLazyProcsW + */ + void ElementLocator::sendAddElementsToWorkingSideL() + { + int procId=0; + CommInterface comm; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const std::vector& vals=_ids_per_working_proc3[procId]; + int size=vals.size(); + comm.send(const_cast(reinterpret_cast(&size)),1,MPI_INT,*iter,1130,*_comm); + comm.send(const_cast(reinterpret_cast(&vals[0])),size,MPI_INT,*iter,1131,*_comm); + } + } + + /*! + * This method sends to working side Wi only nodes in interaction with Wi \b and located on boundary, to reduce number. + * connected with ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW + */ + void ElementLocator::sendCandidatesGlobalIdsToWorkingSideL() + { + int procId=0; + CommInterface comm; + DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering(); + const int *globalIdsC=globalIds->getConstPointer(); + MEDCouplingAutoRefCountObjectPtr candidates=_local_para_field.getSupport()->getCellMesh()->findBoundaryNodes(); + for(int *iter1=candidates->getPointer();iter1!=candidates->getPointer()+candidates->getNumberOfTuples();iter1++) + (*iter1)=globalIdsC[*iter1]; + std::set candidatesS(candidates->begin(),candidates->end()); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& ids=_ids_per_working_proc[procId]; + vector valsToSend(ids.size()); + vector::iterator iter1=valsToSend.begin(); + for(vector::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++) + *iter1=globalIdsC[*iter2]; + std::set tmp2(valsToSend.begin(),valsToSend.end()); + std::vector tmp3; + set_intersection(candidatesS.begin(),candidatesS.end(),tmp2.begin(),tmp2.end(),std::back_insert_iterator< std::vector >(tmp3)); + int lgth=tmp3.size(); + comm.send(&lgth,1,MPI_INT,*iter,1132,*_comm); + comm.send(&tmp3[0],lgth,MPI_INT,*iter,1133,*_comm); + } + if(globalIds) + globalIds->decrRef(); + } +} diff --git a/src/ParaMEDMEM/ElementLocator.hxx b/src/ParaMEDMEM/ElementLocator.hxx new file mode 100644 index 000000000..4853c9766 --- /dev/null +++ b/src/ParaMEDMEM/ElementLocator.hxx @@ -0,0 +1,109 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __ELEMENTLOCATOR_HXX__ +#define __ELEMENTLOCATOR_HXX__ + +#include "InterpolationOptions.hxx" +#include "MEDCouplingNatureOfField.hxx" + +#include +#include +#include + +namespace ParaMEDMEM +{ + class ParaFIELD; + class ProcessorGroup; + class ParaSUPPORT; + class InterpolationMatrix; + class MEDCouplingPointSet; + class DataArrayInt; + + class ElementLocator : public INTERP_KERNEL::InterpolationOptions + { + public: + ElementLocator(const ParaFIELD& sourceField, const ProcessorGroup& distant_group, const ProcessorGroup& local_group); + + virtual ~ElementLocator(); + void exchangeMesh(int idistantrank, + MEDCouplingPointSet*& target_mesh, + int*& distant_ids); + void exchangeMethod(const std::string& sourceMeth, int idistantrank, std::string& targetMeth); + const std::vector& getDistantProcIds() const { return _distant_proc_ids; } + const MPI_Comm *getCommunicator() const; + NatureOfField getLocalNature() const; + //! This method is used to informed if there is -1D mesh on distant_group side or on local_group side. + bool isM1DCorr() const { return _is_m1d_corr; } + //Working side methods + void recvPolicyFromLazySideW(std::vector& policy); + void sendSumToLazySideW(const std::vector< std::vector >& distantLocEltIds, const std::vector< std::vector >& partialSumRelToDistantIds); + void recvSumFromLazySideW(std::vector< std::vector >& globalSumRelToDistantIds); + void sendCandidatesForAddElementsW(const std::vector& distantGlobIds); + void recvAddElementsFromLazyProcsW(std::vector >& elementsToAdd); + // + void sendLocalIdsToLazyProcsW(const std::vector< std::vector >& distantLocEltIds); + void recvGlobalIdsFromLazyProcsW(const std::vector< std::vector >& distantLocEltIds, std::vector< std::vector >& globalIds); + void recvCandidatesGlobalIdsFromLazyProcsW(std::vector< std::vector >& globalIds); + void sendPartialSumToLazyProcsW(const std::vector& distantGlobIds, const std::vector& sum); + //Lazy side methods + int sendPolicyToWorkingSideL(); + void recvFromWorkingSideL(); + void sendToWorkingSideL(); + // + void recvLocalIdsFromWorkingSideL(); + void sendGlobalIdsToWorkingSideL(); + void sendCandidatesGlobalIdsToWorkingSideL(); + // + void recvSumFromWorkingSideL(); + void recvCandidatesForAddElementsL(); + void sendAddElementsToWorkingSideL(); + private: + void _computeBoundingBoxes(); + bool _intersectsBoundingBox(int irank); + void _exchangeMesh(MEDCouplingPointSet* local_mesh, MEDCouplingPointSet*& distant_mesh, + int iproc_distant, const DataArrayInt* distant_ids_send, + int*& distant_ids_recv); + private: + const ParaFIELD& _local_para_field ; + MEDCouplingPointSet* _local_cell_mesh; + int _local_cell_mesh_space_dim; + bool _is_m1d_corr; + MEDCouplingPointSet* _local_face_mesh; + std::vector _distant_cell_meshes; + std::vector _distant_face_meshes; + double* _domain_bounding_boxes; + const ProcessorGroup& _distant_group; + const ProcessorGroup& _local_group; + ProcessorGroup* _union_group; + std::vector _distant_proc_ids; + const MPI_Comm *_comm; + //Attributes only used by lazy side + std::vector _values_added; + std::vector< std::vector > _ids_per_working_proc; + std::vector< std::vector > _ids_per_working_proc3; + std::vector< std::vector > _values_per_working_proc; + public: + static const int CUMULATIVE_POLICY=3; + static const int NO_POST_TREATMENT_POLICY=7; + }; + +} + +#endif diff --git a/src/ParaMEDMEM/ExplicitCoincidentDEC.cxx b/src/ParaMEDMEM/ExplicitCoincidentDEC.cxx new file mode 100644 index 000000000..79852a1ca --- /dev/null +++ b/src/ParaMEDMEM/ExplicitCoincidentDEC.cxx @@ -0,0 +1,395 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" +#include "MPIProcessorGroup.hxx" +#include "ExplicitCoincidentDEC.hxx" +#include "ExplicitMapping.hxx" +#include "InterpKernelUtilities.hxx" + +using namespace std; + +namespace ParaMEDMEM +{ + /*! \defgroup explicitcoincidentdec ExplicitCoincidentDEC + */ + ExplicitCoincidentDEC::ExplicitCoincidentDEC():_toposource(0),_topotarget(0) + { + } + + ExplicitCoincidentDEC::~ExplicitCoincidentDEC() + { + } + + + /*! + \addtogroup explicitcoincidentdec + @{ + */ + + /*! Synchronization process for exchanging topologies + */ + void ExplicitCoincidentDEC::synchronize() + { + if (_source_group->containsMyRank()) + { + _toposource = dynamic_cast(_local_field->getTopology()); + _sourcegroup= _toposource->getProcGroup()->createProcGroup(); + _targetgroup=_toposource->getProcGroup()->createComplementProcGroup(); + } + if (_target_group->containsMyRank()) + { + _topotarget = dynamic_cast(_local_field->getTopology()); + _sourcegroup= _topotarget->getProcGroup()->createComplementProcGroup(); + _targetgroup=_topotarget->getProcGroup()->createProcGroup(); + } + + // Exchanging + + // Transmitting source topology to target code + broadcastTopology(_toposource,_topotarget,1000); + transferMappingToSource(); + } + + /*! Creates the arrays necessary for the data transfer + * and fills the send array with the values of the + * source field + * */ + void ExplicitCoincidentDEC::prepareSourceDE() + { + //////////////////////////////////// + //Step 1 : buffer array creation + + if (!_toposource->getProcGroup()->containsMyRank()) + return; + MPIProcessorGroup* group=new MPIProcessorGroup(_sourcegroup->getCommInterface()); + + // Warning : the size of the target side is implicitly deduced + //from the size of MPI_COMM_WORLD + int target_size = _toposource->getProcGroup()->getCommInterface().worldSize()- _toposource->getProcGroup()->size() ; + + vector* target_arrays=new vector[target_size]; + + int nb_local = _toposource-> getNbLocalElements(); + + int union_size=group->size(); + + _sendcounts=new int[union_size]; + _senddispls=new int[union_size]; + _recvcounts=new int[union_size]; + _recvdispls=new int[union_size]; + + for (int i=0; i< union_size; i++) + { + _sendcounts[i]=0; + _recvcounts[i]=0; + _recvdispls[i]=0; + } + _senddispls[0]=0; + + int* counts=_explicit_mapping.getCounts(); + for (int i=0; isize(); i++) + _sendcounts[i]=counts[i]; + + for (int iproc=1; iprocsize();iproc++) + _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1]; + + _sendbuffer = new double [nb_local * _toposource->getNbComponents()]; + + ///////////////////////////////////////////////////////////// + //Step 2 : filling the buffers with the source field values + + int* counter=new int [target_size]; + counter[0]=0; + for (int i=1; igetField()->getArray()->getPointer(); + + int* bufferindex= _explicit_mapping.getBufferIndex(); + + for (int ielem=0; ielemgetNbComponents(); + for (int icomp=0; icompgetProcGroup()->containsMyRank()) + return; + MPIProcessorGroup* group=new MPIProcessorGroup(_topotarget->getProcGroup()->getCommInterface()); + + vector < vector > source_arrays(_sourcegroup->size()); + int nb_local = _topotarget-> getNbLocalElements(); + for (int ielem=0; ielem< nb_local ; ielem++) + { + //pair source_local =_distant_elems[ielem]; + pair source_local=_explicit_mapping.getDistantNumbering(ielem); + source_arrays[source_local.first].push_back(source_local.second); + } + int union_size=group->size(); + _recvcounts=new int[union_size]; + _recvdispls=new int[union_size]; + _sendcounts=new int[union_size]; + _senddispls=new int[union_size]; + + for (int i=0; i< union_size; i++) + { + _sendcounts[i]=0; + _recvcounts[i]=0; + _recvdispls[i]=0; + } + for (int iproc=0; iproc < _sourcegroup->size(); iproc++) + { + //converts the rank in target to the rank in union communicator + int unionrank=group->translateRank(_sourcegroup,iproc); + _recvcounts[unionrank]=source_arrays[iproc].size()*_topotarget->getNbComponents(); + } + for (int i=1; igetNbComponents()]; + + } + + + /*! + * Synchronizing a topology so that all the + * group possesses it. + * + * \param toposend Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others. + * \param toporecv Topology which is received. + * \param tag Communication tag associated with this operation. + */ + void ExplicitCoincidentDEC::broadcastTopology(const ExplicitTopology* toposend, ExplicitTopology* toporecv, int tag) + { + MPI_Status status; + + int* serializer=0; + int size; + + MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface); + + // The send processors serialize the send topology + // and send the buffers to the recv procs + if (toposend !=0 && toposend->getProcGroup()->containsMyRank()) + { + toposend->serialize(serializer, size); + for (int iproc=0; iproc< group->size(); iproc++) + { + int itarget=iproc; + if (!toposend->getProcGroup()->contains(itarget)) + { + _comm_interface->send(&size,1,MPI_INT, itarget,tag+itarget,*(group->getComm())); + _comm_interface->send(serializer, size, MPI_INT, itarget, tag+itarget,*(group->getComm())); + } + } + } + else + { + vector size2(group->size()); + int myworldrank=group->myRank(); + for (int iproc=0; iprocsize();iproc++) + { + int isource = iproc; + if (!toporecv->getProcGroup()->contains(isource)) + { + int nbelem; + _comm_interface->recv(&nbelem, 1, MPI_INT, isource, tag+myworldrank, *(group->getComm()), &status); + int* buffer = new int[nbelem]; + _comm_interface->recv(buffer, nbelem, MPI_INT, isource,tag+myworldrank, *(group->getComm()), &status); + + ExplicitTopology* topotemp=new ExplicitTopology(); + topotemp->unserialize(buffer, *_comm_interface); + delete[] buffer; + + for (int ielem=0; ielemgetNbLocalElements(); ielem++) + { + int global = toporecv->localToGlobal(ielem); + int sendlocal=topotemp->globalToLocal(global); + if (sendlocal!=-1) + { + size2[iproc]++; + _explicit_mapping.pushBackElem(make_pair(iproc,sendlocal)); + } + } + delete topotemp; + } + } + } + MESSAGE (" rank "<myRank()<< " broadcastTopology is over"); + } + + void ExplicitCoincidentDEC::transferMappingToSource() + { + + MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface); + + // sending source->target mapping which is stored by target + //in _distant_elems from target to source + if (_topotarget!=0 && _topotarget->getProcGroup()->containsMyRank()) + { + int world_size = _topotarget->getProcGroup()->getCommInterface().worldSize() ; + int* nb_transfer_union=new int[world_size]; + int* dummy_recv=new int[world_size]; + for (int i=0; itranslateRank(_sourcegroup,_explicit_mapping.getDistantDomain(i)); + nb_transfer_union[unionrank]=_explicit_mapping.getNbDistantElems(i); + } + _comm_interface->allToAll(nb_transfer_union, 1, MPI_INT, dummy_recv, 1, MPI_INT, MPI_COMM_WORLD); + + int* sendbuffer= _explicit_mapping.serialize(_topotarget->getProcGroup()->myRank()); + + int* sendcounts= new int [world_size]; + int* senddispls = new int [world_size]; + for (int i=0; i< world_size; i++) + { + sendcounts[i]=2*nb_transfer_union[i]; + if (i==0) + senddispls[i]=0; + else + senddispls[i]=senddispls[i-1]+sendcounts[i-1]; + } + int* recvcounts=new int[world_size]; + int* recvdispls=new int[world_size]; + int *dummyrecv=0; + for (int i=0; i allToAllV(sendbuffer, sendcounts, senddispls, MPI_INT, dummyrecv, recvcounts, senddispls, MPI_INT, MPI_COMM_WORLD); + + } + //receiving in the source subdomains the mapping sent by targets + else + { + int world_size = _toposource->getProcGroup()->getCommInterface().worldSize() ; + int* nb_transfer_union=new int[world_size]; + int* dummy_send=new int[world_size]; + for (int i=0; iallToAll(dummy_send, 1, MPI_INT, nb_transfer_union, 1, MPI_INT, MPI_COMM_WORLD); + + int total_size=0; + for (int i=0; i< world_size; i++) + total_size+=nb_transfer_union[i]; + int nbtarget = _targetgroup->size(); + int* targetranks = new int[ nbtarget]; + for (int i=0; itranslateRank(_targetgroup,i); + int* mappingbuffer= new int [total_size*2]; + int* sendcounts= new int [world_size]; + int* senddispls = new int [world_size]; + int* recvcounts=new int[world_size]; + int* recvdispls=new int[world_size]; + for (int i=0; i< world_size; i++) + { + recvcounts[i]=2*nb_transfer_union[i]; + if (i==0) + recvdispls[i]=0; + else + recvdispls[i]=recvdispls[i-1]+recvcounts[i-1]; + } + + int *dummysend=0; + for (int i=0; i allToAllV(dummysend, sendcounts, senddispls, MPI_INT, mappingbuffer, recvcounts, recvdispls, MPI_INT, MPI_COMM_WORLD); + _explicit_mapping.unserialize(world_size,nb_transfer_union,nbtarget, targetranks, mappingbuffer); + } + } + + void ExplicitCoincidentDEC::recvData() + { + //MPI_COMM_WORLD is used instead of group because there is no + //mechanism for creating the union group yet + MESSAGE("recvData"); + + cout<<"start AllToAll"<allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, + _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD); + cout<<"end AllToAll"<getNbLocalElements(); + double* value=new double[nb_local*_topotarget->getNbComponents()]; + + vector counters(_sourcegroup->size()); + counters[0]=0; + for (int i=0; i<_sourcegroup->size()-1; i++) + { + MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface); + int worldrank=group->translateRank(_sourcegroup,i); + counters[i+1]=counters[i]+_recvcounts[worldrank]; + } + + for (int ielem=0; ielem distant_numbering=_explicit_mapping.getDistantNumbering(ielem); + int iproc=distant_numbering.first; + int ncomp = _topotarget->getNbComponents(); + for (int icomp=0; icomp< ncomp; icomp++) + value[ielem*ncomp+icomp]=_recvbuffer[counters[iproc]*ncomp+icomp]; + counters[iproc]++; + } + _local_field->getField()->getArray()->useArray(value,true,CPP_DEALLOC,nb_local,_topotarget->getNbComponents()); + } + + void ExplicitCoincidentDEC::sendData() + { + MESSAGE ("sendData"); + for (int i=0; i< 4; i++) + cout << _sendcounts[i]<<" "; + cout <allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, + _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD); + } + /*! + @} + */ +} + diff --git a/src/ParaMEDMEM/ExplicitCoincidentDEC.hxx b/src/ParaMEDMEM/ExplicitCoincidentDEC.hxx new file mode 100644 index 000000000..6205e11ef --- /dev/null +++ b/src/ParaMEDMEM/ExplicitCoincidentDEC.hxx @@ -0,0 +1,62 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __EXPLICITCOINCIDENTDEC_HXX__ +#define __EXPLICITCOINCIDENTDEC_HXX__ + +#include "DisjointDEC.hxx" +#include "ExplicitMapping.hxx" +#include "ExplicitTopology.hxx" + +#include + +namespace ParaMEDMEM +{ + class BlockTopology; + + class ExplicitCoincidentDEC : public DisjointDEC + { + public: + ExplicitCoincidentDEC(); + virtual ~ExplicitCoincidentDEC(); + void synchronize(); + void broadcastTopology(BlockTopology*&, int tag); + void broadcastTopology(const ExplicitTopology* toposend, ExplicitTopology* toporecv, int tag); + void transferMappingToSource(); + void prepareSourceDE(); + void prepareTargetDE(); + void recvData(); + void sendData(); + private: + ExplicitTopology* _toposource; + ExplicitTopology* _topotarget; + ProcessorGroup* _targetgroup; + ProcessorGroup* _sourcegroup; + int* _sendcounts; + int* _recvcounts; + int* _senddispls; + int* _recvdispls; + double* _recvbuffer; + double* _sendbuffer; + std::map > _distant_elems; + ExplicitMapping _explicit_mapping; + }; +} + +#endif diff --git a/src/ParaMEDMEM/ExplicitMapping.hxx b/src/ParaMEDMEM/ExplicitMapping.hxx new file mode 100644 index 000000000..e83d0dc97 --- /dev/null +++ b/src/ParaMEDMEM/ExplicitMapping.hxx @@ -0,0 +1,176 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __EXPLICITMAPPING_HXX__ +#define __EXPLICITMAPPING_HXX__ + +#include +#include +#include + +namespace ParaMEDMEM +{ + class ExplicitMapping + { + public: + + ExplicitMapping():_numbers(0), _domains(0), _comm_buffer(0) { } + + ~ExplicitMapping() + { + if (_domains!=0) delete[] _domains; + if (_numbers!=0) delete[] _numbers; + if (_comm_buffer!=0) delete[] _comm_buffer; + } + + void pushBackElem(std::pair idistant) + { + _mapping.push_back(idistant); + } + + void setDistantElem(int ilocal, std::pair idistant) + { + _mapping[ilocal]=idistant; + } + + int nbDistantDomains() + { + if (_distant_domains.empty()) + { + for (std::vector >::const_iterator iter= _mapping.begin(); + iter!=_mapping.end(); + iter++) + _distant_domains.insert(iter->first); + } + return _distant_domains.size(); + } + + std::pair getDistantNumbering(int ielem)const + { + return _mapping[ielem]; + } + + int getDistantDomain(int i) + { + if (_domains==0) + computeNumbers(); + + return _domains[i]; + } + + int getNbDistantElems(int i) + { + if (_numbers==0) + computeNumbers(); + return _numbers[i]; + } + + int* serialize(int idproc) + { + _comm_buffer=new int[_mapping.size()*2]; + std::vector offsets(_distant_domains.size()); + offsets[0]=0; + for (int i=1; i<(int)_distant_domains.size();i++) + offsets[i]=offsets[i-1]+_numbers[i-1]; + + for (int i=0; i<(int)_mapping.size(); i++) + { + int offset= offsets[_mapping[i].first]; + _comm_buffer[offset*2]=idproc; + _comm_buffer[offset*2+1]=_mapping[i].second; + offsets[_mapping[i].first]++; + } + return _comm_buffer; + } + + void unserialize(int nbprocs, int* sizes,int nbtarget, int* targetrank, int* commbuffer) + { + int total_size=0; + for (int i=0; i< nbprocs; i++) + total_size+=sizes[i]; + + _mapping.resize(total_size); + _buffer_index=new int[total_size]; + int indmap=0; + for (int i=0; i0) + { + _numbers[index]=sizes[targetrank[i]]; + _domains[index]=i; + index++; + } + } + _send_counts=new int[nbprocs]; + for (int i=0; i > _mapping; + std::set _distant_domains; + int* _numbers; + int* _domains; + int* _comm_buffer; + int* _buffer_index; + int* _send_counts; + + void computeNumbers() + { + std::map counts; + if (_numbers==0) + { + _numbers=new int[nbDistantDomains()]; + _domains=new int[nbDistantDomains()]; + for (int i=0; i<(int)_mapping.size(); i++) + { + if ( counts.find(_mapping[i].first) == counts.end()) + counts.insert(std::make_pair(_mapping[i].first,1)); + else + (counts[_mapping[i].first])++; + } + int counter=0; + for (std::map::const_iterator iter=counts.begin(); + iter!=counts.end(); + iter++) + { + _numbers[counter]=iter->second; + _domains[counter]=iter->first; + counter++; + } + } + } + }; +} + +#endif diff --git a/src/ParaMEDMEM/ExplicitTopology.cxx b/src/ParaMEDMEM/ExplicitTopology.cxx new file mode 100644 index 000000000..a624623a9 --- /dev/null +++ b/src/ParaMEDMEM/ExplicitTopology.cxx @@ -0,0 +1,109 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "ParaMESH.hxx" +#include "Topology.hxx" +#include "ExplicitTopology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" + +#include +#include + +using namespace std; +namespace ParaMEDMEM +{ + +ExplicitTopology::ExplicitTopology(const ParaMESH& paramesh ): +_proc_group(paramesh.getBlockTopology()->getProcGroup()), +_nb_components(1) +{ + _nb_elems=paramesh.getCellMesh()->getNumberOfCells(); + const int* global=paramesh.getGlobalNumberingCell(); + _loc2glob=new int[_nb_elems]; + + for (int i=0; i<_nb_elems; i++) + { + _loc2glob[i]=global[i]; + _glob2loc[global[i]]=i; + } +} + +ExplicitTopology::ExplicitTopology(const ExplicitTopology& topo, int nb_components) +{ + _proc_group = topo._proc_group; + _nb_elems = topo._nb_elems; + _nb_components = nb_components; + _loc2glob=new int[_nb_elems]; + for (int i=0; i<_nb_elems; i++) + { + _loc2glob[i]=topo._loc2glob[i]; + } + _glob2loc=topo._glob2loc; +} + + +ExplicitTopology::~ExplicitTopology() +{ + if (_loc2glob != 0) delete[] _loc2glob; +} + + +/*! Serializes the data contained in the Explicit Topology + * for communication purposes*/ +void ExplicitTopology::serialize(int* & serializer, int& size) const +{ + vector buffer; + + buffer.push_back(_nb_elems); + for (int i=0; i<_nb_elems; i++) + { + buffer.push_back(_loc2glob[i]); + } + + serializer=new int[buffer.size()]; + size= buffer.size(); + copy(buffer.begin(), buffer.end(), serializer); + +} +/*! Unserializes the data contained in the Explicit Topology + * after communication. Uses the same structure as the one used for serialize() + * + * */ +void ExplicitTopology::unserialize(const int* serializer,const CommInterface& comm_interface) +{ + const int* ptr_serializer=serializer; + cout << "unserialize..."< +#include +#include + +namespace ParaMEDMEM +{ + class ParaMESH; + class Topology; + class ComponentTopology; + + class ExplicitTopology : public Topology + { + public: + ExplicitTopology() { } + ExplicitTopology( const ExplicitTopology& topo, int nbcomponents); + ExplicitTopology(const ParaMESH &mesh); + virtual ~ExplicitTopology(); + + inline int getNbElements()const; + inline int getNbLocalElements() const; + const ProcessorGroup* getProcGroup()const { return _proc_group; } + int localToGlobal (const std::pair local) const { return localToGlobal(local.second); } + inline int localToGlobal(int) const; + inline int globalToLocal(int) const; + void serialize(int* & serializer, int& size) const ; + void unserialize(const int* serializer, const CommInterface& comm_interface); + int getNbComponents() const { return _nb_components; } + private: + //Processor group + const ProcessorGroup* _proc_group; + //nb of elements + int _nb_elems; + //nb of components + int _nb_components; + //mapping local to global + int* _loc2glob; + //mapping global to local + INTERP_KERNEL::HashMap _glob2loc; + }; + + //!converts a pair to a global number + inline int ExplicitTopology::globalToLocal(const int global) const + { + return (_glob2loc.find(global))->second;; + } + + //!converts local number to a global number + int ExplicitTopology::localToGlobal(int local) const + { + return _loc2glob[local]; + } + + //!Retrieves the number of elements for a given topology + inline int ExplicitTopology::getNbElements() const + { + return _nb_elems; + } + + //Retrieves the local number of elements + inline int ExplicitTopology::getNbLocalElements()const + { + return _glob2loc.size(); + } +} + + +#endif diff --git a/src/ParaMEDMEM/ICoCoField.cxx b/src/ParaMEDMEM/ICoCoField.cxx new file mode 100644 index 000000000..3925945b7 --- /dev/null +++ b/src/ParaMEDMEM/ICoCoField.cxx @@ -0,0 +1,48 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +// ICoCo file common to several codes +// ICoCoField.cxx +// version 1.2 10/05/2010 + +#include +#include + +using namespace ICoCo; +using std::string; + +Field::Field() { + _name=new string; +} + +Field::~Field() { + delete _name; +} + +void Field::setName(const string& name) { + *_name=name; +} + +const string& Field::getName() const { + return *_name; +} + +const char* Field::getCharName() const { + return _name->c_str(); +} diff --git a/src/ParaMEDMEM/ICoCoField.hxx b/src/ParaMEDMEM/ICoCoField.hxx new file mode 100644 index 000000000..509dc68f5 --- /dev/null +++ b/src/ParaMEDMEM/ICoCoField.hxx @@ -0,0 +1,43 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +// ICoCo file common to several codes +// ICoCoField.h +// version 1.2 10/05/2010 + +#ifndef _ICoCoField_included_ +#define _ICoCoField_included_ +#include + + +namespace ICoCo { + + class Field { + public: + Field(); + virtual ~Field(); + void setName(const std::string& name); + const std::string& getName() const; + const char* getCharName() const; + + private: + std::string* _name; + }; +} +#endif diff --git a/src/ParaMEDMEM/ICoCoMEDField.cxx b/src/ParaMEDMEM/ICoCoMEDField.cxx new file mode 100644 index 000000000..1bf60fc75 --- /dev/null +++ b/src/ParaMEDMEM/ICoCoMEDField.cxx @@ -0,0 +1,62 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ICoCoMEDField.hxx" +#include "ProcessorGroup.hxx" +#include "MEDCouplingUMesh.hxx" +#include "MEDCouplingFieldDouble.hxx" +#include "NormalizedUnstructuredMesh.hxx" + +namespace ICoCo +{ + + /*! Constructor directly attaching a MEDCouplingFieldDouble + the object does not take the control the objects pointed by + \a field. + */ + + MEDField::MEDField(ParaMEDMEM::MEDCouplingFieldDouble *field):_field(field) + { + if(_field) + _field->incrRef(); + } + MEDField::MEDField(const MEDField& field):_field(field.getField()) + { + if(_field) + _field->incrRef(); + } + + MEDField::~MEDField() + { + if(_field) + _field->decrRef(); + } + + + MEDField& MEDField::operator=(const MEDField& field) + { + if (_field) + _field->decrRef(); + + _field=field.getField(); + if(_field) + _field->incrRef(); + return *this; + } +} diff --git a/src/ParaMEDMEM/ICoCoMEDField.hxx b/src/ParaMEDMEM/ICoCoMEDField.hxx new file mode 100644 index 000000000..c5dbdbb14 --- /dev/null +++ b/src/ParaMEDMEM/ICoCoMEDField.hxx @@ -0,0 +1,46 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __ICOCOMEDFIELD_HXX__ +#define __ICOCOMEDFIELD_HXX__ + +#include "ICoCoField.hxx" +#include "MEDCouplingUMesh.hxx" +#include "MEDCouplingFieldDouble.hxx" + +#include + +namespace ICoCo +{ + class MEDField : public ICoCo::Field + { + public: + MEDField():_field(0) { } + MEDField(ParaMEDMEM::MEDCouplingFieldDouble* field); + MEDField(const MEDField& field); + MEDField& operator=(const MEDField& field); + virtual ~MEDField(); + ParaMEDMEM::MEDCouplingFieldDouble *getField() const { return _field; } + const ParaMEDMEM::MEDCouplingMesh *getMesh() const { return _field->getMesh(); } + private: + ParaMEDMEM::MEDCouplingFieldDouble *_field; + }; +} + +#endif diff --git a/src/ParaMEDMEM/InterpKernelDEC.cxx b/src/ParaMEDMEM/InterpKernelDEC.cxx new file mode 100644 index 000000000..e8605ded2 --- /dev/null +++ b/src/ParaMEDMEM/InterpKernelDEC.cxx @@ -0,0 +1,280 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" +#include "MPIProcessorGroup.hxx" +#include "ParaMESH.hxx" +#include "DEC.hxx" +#include "InterpolationMatrix.hxx" +#include "InterpKernelDEC.hxx" +#include "ElementLocator.hxx" + +namespace ParaMEDMEM +{ + + /*! + \defgroup interpkerneldec InterpKernelDEC + + \section overview Overview + + The InterpKernelDEC enables the \ref conservativeremapping of fields between two parallel codes. This remapping is based on the computation of intersection volumes between elements from code A and elements from code B. The computation is possible for 3D meshes, 2D meshes, and 3D-surface meshes. Dimensions must be similar for code A and code B (for instance, though it could be desirable, it is not yet possible to couple 3D surfaces with 2D surfaces). + + In the present version, only fields lying on elements are considered. + + \image html NonCoincident_small.png "Example showing the transfer from a field based on a quadrangular mesh to a triangular mesh. In a P0-P0 interpolation, to obtain the value on a triangle, the values on quadrangles are weighted by their intersection area and summed." + + \image latex NonCoincident_small.eps "Example showing the transfer from a field based on a quadrangular mesh to a triangular mesh. In a P0-P0 interpolation, to obtain the value on a triangle, the values on quadrangles are weighted by their intersection area and summed." + + A typical use of InterpKernelDEC encompasses two distinct phases : + - A setup phase during which the intersection volumes are computed and the communication structures are setup. This corresponds to calling the InterpKernelDEC::synchronize() method. + - A use phase during which the remappings are actually performed. This corresponds to the calls to sendData() and recvData() which actually trigger the data exchange. The data exchange are synchronous in the current version of the library so that recvData() and sendData() calls must be synchronized on code A and code B processor groups. + + The following code excerpt illutrates a typical use of the InterpKernelDEC class. + + \code + ... + InterpKernelDEC dec(groupA, groupB); + dec.attachLocalField(field); + dec.synchronize(); + if (groupA.containsMyRank()) + dec.recvData(); + else if (groupB.containsMyRank()) + dec.sendData(); + ... + \endcode + A \ref conservativeremapping of the field from the source mesh to the target mesh is performed by the function synchronise(), which computes the \ref remappingmatrix. + + Computing the field on the receiving side can be expressed in terms of a matrix-vector product : \f$ \phi_t=W.\phi_s\f$, with \f$ \phi_t \f$ the field on the target side and \f$ \phi_s \f$ the field on the source side. + When remapping a 3D surface to another 3D surface, a projection phase is necessary to match elements from both sides. Care must be taken when defining this projection to obtain a \ref conservative remapping. + + In the P0-P0 case, this matrix is a plain rectangular matrix with coefficients equal to the intersection areas between triangle and quadrangles. For instance, in the above figure, the matrix is : + + \f[ + \begin{tabular}{|cccc|} + 0.72 & 0 & 0.2 & 0 \\ + 0.46 & 0 & 0.51 & 0.03\\ + 0.42 & 0.53 & 0 & 0.05\\ + 0 & 0 & 0.92 & 0.05 \\ + \end{tabular} + \f] + + + + \section interpkerneldec_options Options + On top of \ref dec_options, options supported by %InterpKernelDEC objects are + related to the underlying Intersector class. + All the options available in the intersector objects are + available for the %InterpKernelDEC object. The various options available for * intersectors can be reviewed in \ref InterpKerIntersectors. + + For instance : + \verbatim + InterpKernelDEC dec(source_group, target_group); + dec.attachLocalField(field); + dec.setOptions("DoRotate",false); + dec.setOptions("Precision",1e-12); + dec.synchronize(); + \endverbatim + + \warning{ Options must be set before calling the synchronize method. } + */ + + /*! + \addtogroup interpkerneldec + @{ + */ + + InterpKernelDEC::InterpKernelDEC():_interpolation_matrix(0) + { + } + + /*! + This constructor creates an InterpKernelDEC which has \a source_group as a working side + and \a target_group as an idle side. All the processors will actually participate, but intersection computations will be performed on the working side during the \a synchronize() phase. + The constructor must be called synchronously on all processors of both processor groups. + + \param source_group working side ProcessorGroup + \param target_group lazy side ProcessorGroup + + */ + InterpKernelDEC::InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group): + DisjointDEC(source_group, target_group),_interpolation_matrix(0) + { + + } + + InterpKernelDEC::InterpKernelDEC(const std::set& src_ids, const std::set& trg_ids, + const MPI_Comm& world_comm):DisjointDEC(src_ids,trg_ids,world_comm), + _interpolation_matrix(0) + { + } + + InterpKernelDEC::~InterpKernelDEC() + { + if (_interpolation_matrix !=0) + delete _interpolation_matrix; + } + + /*! + \brief Synchronization process for exchanging topologies. + + This method prepares all the structures necessary for sending data from a processor group to the other. It uses the mesh underlying the fields that have been set with attachLocalField method. + It works in four steps : + -# Bounding boxes are computed for each subdomain, + -# The lazy side mesh parts that are likely to intersect the working side local processor are sent to the working side, + -# The working side calls the interpolation kernel to compute the intersection between local and imported mesh. + -# The lazy side is updated so that it knows the structure of the data that will be sent by + the working side during a \a sendData() call. + + */ + void InterpKernelDEC::synchronize() + { + if(!isInUnion()) + return ; + delete _interpolation_matrix; + _interpolation_matrix = new InterpolationMatrix (_local_field, *_source_group,*_target_group,*this,*this); + + //setting up the communication DEC on both sides + if (_source_group->containsMyRank()) + { + //locate the distant meshes + ElementLocator locator(*_local_field, *_target_group, *_source_group); + //transfering option from InterpKernelDEC to ElementLocator + locator.copyOptions(*this); + MEDCouplingPointSet* distant_mesh=0; + int* distant_ids=0; + std::string distantMeth; + for (int i=0; i<_target_group->size(); i++) + { + // int idistant_proc = (i+_source_group->myRank())%_target_group->size(); + int idistant_proc=i; + + //gathers pieces of the target meshes that can intersect the local mesh + locator.exchangeMesh(idistant_proc,distant_mesh,distant_ids); + if (distant_mesh !=0) + { + locator.exchangeMethod(_method,idistant_proc,distantMeth); + //adds the contribution of the distant mesh on the local one + int idistant_proc_in_union=_union_group->translateRank(_target_group,idistant_proc); + //std::cout <<"add contribution from proc "<myRank()<addContribution(*distant_mesh,idistant_proc_in_union,distant_ids,_method,distantMeth); + distant_mesh->decrRef(); + delete [] distant_ids; + distant_mesh=0; + distant_ids=0; + } + } + _interpolation_matrix->finishContributionW(locator); + } + + if (_target_group->containsMyRank()) + { + ElementLocator locator(*_local_field, *_source_group, *_target_group); + //transfering option from InterpKernelDEC to ElementLocator + locator.copyOptions(*this); + MEDCouplingPointSet* distant_mesh=0; + int* distant_ids=0; + for (int i=0; i<_source_group->size(); i++) + { + // int idistant_proc = (i+_target_group->myRank())%_source_group->size(); + int idistant_proc=i; + //gathers pieces of the target meshes that can intersect the local mesh + locator.exchangeMesh(idistant_proc,distant_mesh,distant_ids); + //std::cout << " Data sent from "<<_union_group->myRank()<<" to source proc "<< idistant_proc<decrRef(); + delete [] distant_ids; + distant_mesh=0; + distant_ids=0; + } + } + _interpolation_matrix->finishContributionL(locator); + } + _interpolation_matrix->prepare(); + } + + + /*! + Receives the data whether the processor is on the working side or on the lazy side. It must match a \a sendData() call on the other side. + */ + void InterpKernelDEC::recvData() + { + if (_source_group->containsMyRank()) + _interpolation_matrix->transposeMultiply(*_local_field->getField()); + else if (_target_group->containsMyRank()) + { + _interpolation_matrix->multiply(*_local_field->getField()); + if (getForcedRenormalization()) + renormalizeTargetField(getMeasureAbsStatus()); + } + } + + + /*! + Receives the data at time \a time in asynchronous mode. The value of the field + will be time-interpolated from the field values received. + \param time time at which the value is desired + */ + void InterpKernelDEC::recvData( double time ) + { + _interpolation_matrix->getAccessDEC()->setTime(time); + recvData() ; + } + + /*! + Sends the data whether the processor is on the working side or on the lazy side. + It must match a recvData() call on the other side. + */ + void InterpKernelDEC::sendData() + { + if (_source_group->containsMyRank()) + { + + _interpolation_matrix->multiply(*_local_field->getField()); + if (getForcedRenormalization()) + renormalizeTargetField(getMeasureAbsStatus()); + + } + else if (_target_group->containsMyRank()) + _interpolation_matrix->transposeMultiply(*_local_field->getField()); + } + + /*! + Sends the data available at time \a time in asynchronous mode. + \param time time at which the value is available + \param deltatime time interval between the value presently sent and the next one. + */ + void InterpKernelDEC::sendData( double time , double deltatime ) + { + _interpolation_matrix->getAccessDEC()->setTime(time,deltatime); + sendData() ; + } + + /*! + @} + */ + +} diff --git a/src/ParaMEDMEM/InterpKernelDEC.hxx b/src/ParaMEDMEM/InterpKernelDEC.hxx new file mode 100644 index 000000000..54b8819da --- /dev/null +++ b/src/ParaMEDMEM/InterpKernelDEC.hxx @@ -0,0 +1,57 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __INTERPKERNELDEC_HXX__ +#define __INTERPKERNELDEC_HXX__ + +#include "DisjointDEC.hxx" +#include "MxN_Mapping.hxx" +#include "InterpolationOptions.hxx" + +namespace ParaMEDMEM +{ + class InterpolationMatrix; + + class InterpKernelDEC : public DisjointDEC, public INTERP_KERNEL::InterpolationOptions + { + public: + InterpKernelDEC(); + InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group); + InterpKernelDEC(const std::set& src_ids, const std::set& trg_ids, + const MPI_Comm& world_comm=MPI_COMM_WORLD); + virtual ~InterpKernelDEC(); + void synchronize(); + void recvData(); + void recvData(double time); + void sendData(); + void sendData(double time , double deltatime); + void prepareSourceDE() { } + void prepareTargetDE() { } + private : + //Number of distant points to be located locally + int _nb_distant_points; + //coordinates of distant points + const double* _distant_coords; + //local element number containing the distant points + const int* _distant_locations; + InterpolationMatrix* _interpolation_matrix; + }; +} + +#endif diff --git a/src/ParaMEDMEM/InterpolationMatrix.cxx b/src/ParaMEDMEM/InterpolationMatrix.cxx new file mode 100644 index 000000000..99c12981a --- /dev/null +++ b/src/ParaMEDMEM/InterpolationMatrix.cxx @@ -0,0 +1,973 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "ProcessorGroup.hxx" +#include "MxN_Mapping.hxx" +#include "InterpolationMatrix.hxx" +#include "TranslationRotationMatrix.hxx" +#include "Interpolation.hxx" +#include "Interpolation1D.txx" +#include "Interpolation2DCurve.hxx" +#include "Interpolation2D.txx" +#include "Interpolation3DSurf.hxx" +#include "Interpolation3D.txx" +#include "Interpolation3D2D.txx" +#include "Interpolation2D1D.txx" +#include "MEDCouplingUMesh.hxx" +#include "MEDCouplingNormalizedUnstructuredMesh.txx" +#include "InterpolationOptions.hxx" +#include "NormalizedUnstructuredMesh.hxx" +#include "ElementLocator.hxx" + +#include + +// class InterpolationMatrix +// This class enables the storage of an interpolation matrix Wij mapping +// source field Sj to target field Ti via Ti=Vi^(-1).Wij.Sj. +// The matrix is built and stored on the processors belonging to the source +// group. + +using namespace std; + +namespace ParaMEDMEM +{ + + // ==================================================================== + // Creates an empty matrix structure linking two distributed supports. + // The method must be called by all processors belonging to source + // and target groups. + // param source_support local support + // param source_group processor group containing the local processors + // param target_group processor group containing the distant processors + // param method interpolation method + // ==================================================================== + + InterpolationMatrix::InterpolationMatrix(const ParaMEDMEM::ParaFIELD *source_field, + const ProcessorGroup& source_group, + const ProcessorGroup& target_group, + const DECOptions& dec_options, + const INTERP_KERNEL::InterpolationOptions& interp_options): + INTERP_KERNEL::InterpolationOptions(interp_options), + DECOptions(dec_options), + _source_field(source_field), + _source_support(source_field->getSupport()->getCellMesh()), + _mapping(source_group, target_group, dec_options), + _source_group(source_group), + _target_group(target_group) + { + int nbelems = source_field->getField()->getNumberOfTuples(); + _row_offsets.resize(nbelems+1); + _coeffs.resize(nbelems); + _target_volume.resize(nbelems); + } + + InterpolationMatrix::~InterpolationMatrix() + { + } + + + // ====================================================================== + // \brief Adds the contribution of a distant subdomain to the* + // interpolation matrix. + // The method adds contribution to the interpolation matrix. + // For each row of the matrix, elements are addded as + // a (column, coeff) pair in the _coeffs array. This column number refers + // to an element on the target side via the _col_offsets array. + // It is made of a series of (iproc, ielem) pairs. + // The number of elements per row is stored in the row_offsets array. + + // param distant_support local representation of the distant subdomain + // param iproc_distant id of the distant subdomain (in the distant group) + // param distant_elems mapping between the local representation of + // the subdomain and the actual elem ids on the distant subdomain + // ====================================================================== + + void InterpolationMatrix::addContribution ( MEDCouplingPointSet& distant_support, + int iproc_distant, + const int* distant_elems, + const std::string& srcMeth, + const std::string& targetMeth) + { + std::string interpMethod(targetMeth); + interpMethod+=srcMeth; + //creating the interpolator structure + vector > surfaces; + //computation of the intersection volumes between source and target elements + MEDCouplingUMesh *distant_supportC=dynamic_cast(&distant_support); + MEDCouplingUMesh *source_supportC=dynamic_cast(_source_support); + if ( distant_support.getMeshDimension() == -1 ) + { + if(source_supportC->getMeshDimension()==2 && source_supportC->getSpaceDimension()==2) + { + MEDCouplingNormalizedUnstructuredMesh<2,2> source_mesh_wrapper(source_supportC); + INTERP_KERNEL::Interpolation2D interpolation(*this); + interpolation.fromIntegralUniform(source_mesh_wrapper,surfaces,srcMeth); + } + else if(source_supportC->getMeshDimension()==3 && source_supportC->getSpaceDimension()==3) + { + MEDCouplingNormalizedUnstructuredMesh<3,3> source_mesh_wrapper(source_supportC); + INTERP_KERNEL::Interpolation3D interpolation(*this); + interpolation.fromIntegralUniform(source_mesh_wrapper,surfaces,srcMeth); + } + else if(source_supportC->getMeshDimension()==2 && source_supportC->getSpaceDimension()==3) + { + MEDCouplingNormalizedUnstructuredMesh<3,2> source_mesh_wrapper(source_supportC); + INTERP_KERNEL::Interpolation3DSurf interpolation(*this); + interpolation.fromIntegralUniform(source_mesh_wrapper,surfaces,srcMeth); + } + else + throw INTERP_KERNEL::Exception("No para interpolation available for the given mesh and space dimension of source mesh to -1D targetMesh"); + } + else if ( source_supportC->getMeshDimension() == -1 ) + { + if(distant_supportC->getMeshDimension()==2 && distant_supportC->getSpaceDimension()==2) + { + MEDCouplingNormalizedUnstructuredMesh<2,2> distant_mesh_wrapper(distant_supportC); + INTERP_KERNEL::Interpolation2D interpolation(*this); + interpolation.toIntegralUniform(distant_mesh_wrapper,surfaces,srcMeth); + } + else if(distant_supportC->getMeshDimension()==3 && distant_supportC->getSpaceDimension()==3) + { + MEDCouplingNormalizedUnstructuredMesh<3,3> distant_mesh_wrapper(distant_supportC); + INTERP_KERNEL::Interpolation3D interpolation(*this); + interpolation.toIntegralUniform(distant_mesh_wrapper,surfaces,srcMeth); + } + else if(distant_supportC->getMeshDimension()==2 && distant_supportC->getSpaceDimension()==3) + { + MEDCouplingNormalizedUnstructuredMesh<3,2> distant_mesh_wrapper(distant_supportC); + INTERP_KERNEL::Interpolation3DSurf interpolation(*this); + interpolation.toIntegralUniform(distant_mesh_wrapper,surfaces,srcMeth); + } + else + throw INTERP_KERNEL::Exception("No para interpolation available for the given mesh and space dimension of distant mesh to -1D sourceMesh"); + } + else if ( distant_support.getMeshDimension() == 2 + && _source_support->getMeshDimension() == 3 + && distant_support.getSpaceDimension() == 3 && _source_support->getSpaceDimension() == 3) + { + MEDCouplingNormalizedUnstructuredMesh<3,3> target_wrapper(distant_supportC); + MEDCouplingNormalizedUnstructuredMesh<3,3> source_wrapper(source_supportC); + INTERP_KERNEL::Interpolation3D2D interpolator (*this); + interpolator.interpolateMeshes(target_wrapper,source_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if ( distant_support.getMeshDimension() == 1 + && _source_support->getMeshDimension() == 2 + && distant_support.getSpaceDimension() == 2 && _source_support->getSpaceDimension() == 2) + { + MEDCouplingNormalizedUnstructuredMesh<2,2> target_wrapper(distant_supportC); + MEDCouplingNormalizedUnstructuredMesh<2,2> source_wrapper(source_supportC); + INTERP_KERNEL::Interpolation2D1D interpolator (*this); + interpolator.interpolateMeshes(target_wrapper,source_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if ( distant_support.getMeshDimension() == 3 + && _source_support->getMeshDimension() == 1 + && distant_support.getSpaceDimension() == 3 && _source_support->getSpaceDimension() == 3) + { + MEDCouplingNormalizedUnstructuredMesh<3,3> target_wrapper(distant_supportC); + MEDCouplingNormalizedUnstructuredMesh<3,3> source_wrapper(source_supportC); + INTERP_KERNEL::Interpolation3D interpolator (*this); + interpolator.interpolateMeshes(target_wrapper,source_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if (distant_support.getMeshDimension() != _source_support->getMeshDimension()) + { + throw INTERP_KERNEL::Exception("local and distant meshes do not have the same space and mesh dimensions"); + } + else if( distant_support.getMeshDimension() == 1 + && distant_support.getSpaceDimension() == 1 ) + { + MEDCouplingNormalizedUnstructuredMesh<1,1> target_wrapper(distant_supportC); + MEDCouplingNormalizedUnstructuredMesh<1,1> source_wrapper(source_supportC); + + INTERP_KERNEL::Interpolation1D interpolation(*this); + interpolation.interpolateMeshes(target_wrapper,source_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if( distant_support.getMeshDimension() == 1 + && distant_support.getSpaceDimension() == 2 ) + { + MEDCouplingNormalizedUnstructuredMesh<2,1> target_wrapper(distant_supportC); + MEDCouplingNormalizedUnstructuredMesh<2,1> source_wrapper(source_supportC); + + INTERP_KERNEL::Interpolation2DCurve interpolation(*this); + interpolation.interpolateMeshes(target_wrapper,source_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if ( distant_support.getMeshDimension() == 2 + && distant_support.getSpaceDimension() == 3 ) + { + MEDCouplingNormalizedUnstructuredMesh<3,2> target_wrapper(distant_supportC); + MEDCouplingNormalizedUnstructuredMesh<3,2> source_wrapper(source_supportC); + + INTERP_KERNEL::Interpolation3DSurf interpolator (*this); + interpolator.interpolateMeshes(target_wrapper,source_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if ( distant_support.getMeshDimension() == 2 + && distant_support.getSpaceDimension() == 2) + { + MEDCouplingNormalizedUnstructuredMesh<2,2> target_wrapper(distant_supportC); + MEDCouplingNormalizedUnstructuredMesh<2,2> source_wrapper(source_supportC); + + INTERP_KERNEL::Interpolation2D interpolator (*this); + interpolator.interpolateMeshes(target_wrapper,source_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if ( distant_support.getMeshDimension() == 3 + && distant_support.getSpaceDimension() == 3 ) + { + MEDCouplingNormalizedUnstructuredMesh<3,3> target_wrapper(distant_supportC); + MEDCouplingNormalizedUnstructuredMesh<3,3> source_wrapper(source_supportC); + + INTERP_KERNEL::Interpolation3D interpolator (*this); + interpolator.interpolateMeshes(target_wrapper,source_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else + { + throw INTERP_KERNEL::Exception("no interpolator exists for these mesh and space dimensions "); + } + bool needTargetSurf=isSurfaceComputationNeeded(targetMeth); + + MEDCouplingFieldDouble *target_triangle_surf=0; + if(needTargetSurf) + target_triangle_surf = distant_support.getMeasureField(getMeasureAbsStatus()); + fillDSFromVM(iproc_distant,distant_elems,surfaces,target_triangle_surf); + + if(needTargetSurf) + target_triangle_surf->decrRef(); + } + + void InterpolationMatrix::fillDSFromVM(int iproc_distant, const int* distant_elems, const std::vector< std::map >& values, MEDCouplingFieldDouble *surf) + { + //loop over the elements to build the interpolation + //matrix structures + int source_size=values.size(); + for (int ielem=0; ielem < source_size; ielem++) + { + _row_offsets[ielem+1] += values[ielem].size(); + for(map::const_iterator iter=values[ielem].begin();iter!=values[ielem].end();iter++) + { + int localId; + if(distant_elems) + localId=distant_elems[iter->first]; + else + localId=iter->first; + //locating the (iproc, itriangle) pair in the list of columns + map,int >::iterator iter2 = _col_offsets.find(make_pair(iproc_distant,localId)); + int col_id; + + if (iter2 == _col_offsets.end()) + { + //(iproc, itriangle) is not registered in the list + //of distant elements + col_id =_col_offsets.size(); + _col_offsets.insert(make_pair(make_pair(iproc_distant,localId),col_id)); + _mapping.addElementFromSource(iproc_distant,localId); + } + else + { + col_id = iter2->second; + } + //the non zero coefficient is stored + //ielem is the row, + //col_id is the number of the column + //iter->second is the value of the coefficient + if(surf) + { + double surface = surf->getIJ(iter->first,0); + _target_volume[ielem].push_back(surface); + } + _coeffs[ielem].push_back(make_pair(col_id,iter->second)); + } + } + } + + void InterpolationMatrix::serializeMe(std::vector< std::vector< std::map > >& data1, std::vector& data2) const + { + data1.clear(); + data2.clear(); + const std::vector >& sendingIds=_mapping.getSendingIds(); + std::set procsS; + for(std::vector >::const_iterator iter1=sendingIds.begin();iter1!=sendingIds.end();iter1++) + procsS.insert((*iter1).first); + data1.resize(procsS.size()); + data2.resize(procsS.size()); + std::copy(procsS.begin(),procsS.end(),data2.begin()); + std::map fastProcAcc; + int id=0; + for(std::set::const_iterator iter2=procsS.begin();iter2!=procsS.end();iter2++,id++) + fastProcAcc[*iter2]=id; + int nbOfSrcElt=_coeffs.size(); + for(std::vector< std::vector< std::map > >::iterator iter3=data1.begin();iter3!=data1.end();iter3++) + (*iter3).resize(nbOfSrcElt); + id=0; + for(std::vector< std::vector< std::pair > >::const_iterator iter4=_coeffs.begin();iter4!=_coeffs.end();iter4++,id++) + { + for(std::vector< std::pair >::const_iterator iter5=(*iter4).begin();iter5!=(*iter4).end();iter5++) + { + const std::pair& elt=sendingIds[(*iter5).first]; + data1[fastProcAcc[elt.first]][id][elt.second]=(*iter5).second; + } + } + } + + void InterpolationMatrix::initialize() + { + int lgth=_coeffs.size(); + _row_offsets.clear(); _row_offsets.resize(lgth+1); + _coeffs.clear(); _coeffs.resize(lgth); + _target_volume.clear(); _target_volume.resize(lgth); + _col_offsets.clear(); + _mapping.initialize(); + } + + void InterpolationMatrix::finishContributionW(ElementLocator& elementLocator) + { + NatureOfField nature=elementLocator.getLocalNature(); + switch(nature) + { + case ConservativeVolumic: + computeConservVolDenoW(elementLocator); + break; + case Integral: + { + if(!elementLocator.isM1DCorr()) + computeIntegralDenoW(elementLocator); + else + computeGlobConstraintDenoW(elementLocator); + break; + } + case IntegralGlobConstraint: + computeGlobConstraintDenoW(elementLocator); + break; + case RevIntegral: + { + if(!elementLocator.isM1DCorr()) + computeRevIntegralDenoW(elementLocator); + else + computeConservVolDenoW(elementLocator); + break; + } + default: + throw INTERP_KERNEL::Exception("Not recognized nature of field. Change nature of Field."); + break; + } + } + + void InterpolationMatrix::finishContributionL(ElementLocator& elementLocator) + { + NatureOfField nature=elementLocator.getLocalNature(); + switch(nature) + { + case ConservativeVolumic: + computeConservVolDenoL(elementLocator); + break; + case Integral: + { + if(!elementLocator.isM1DCorr()) + computeIntegralDenoL(elementLocator); + else + computeConservVolDenoL(elementLocator); + break; + } + case IntegralGlobConstraint: + //this is not a bug doing like ConservativeVolumic + computeConservVolDenoL(elementLocator); + break; + case RevIntegral: + { + if(!elementLocator.isM1DCorr()) + computeRevIntegralDenoL(elementLocator); + else + computeConservVolDenoL(elementLocator); + break; + } + default: + throw INTERP_KERNEL::Exception("Not recognized nature of field. Change nature of Field."); + break; + } + } + + void InterpolationMatrix::computeConservVolDenoW(ElementLocator& elementLocator) + { + computeGlobalColSum(_deno_reverse_multiply); + computeGlobalRowSum(elementLocator,_deno_multiply,_deno_reverse_multiply); + } + + void InterpolationMatrix::computeConservVolDenoL(ElementLocator& elementLocator) + { + int pol1=elementLocator.sendPolicyToWorkingSideL(); + if(pol1==ElementLocator::NO_POST_TREATMENT_POLICY) + { + elementLocator.recvFromWorkingSideL(); + elementLocator.sendToWorkingSideL(); + } + else if(ElementLocator::CUMULATIVE_POLICY) + { + //ask for lazy side to deduce ids eventually missing on working side and to send it back. + elementLocator.recvLocalIdsFromWorkingSideL(); + elementLocator.sendCandidatesGlobalIdsToWorkingSideL(); + elementLocator.recvCandidatesForAddElementsL(); + elementLocator.sendAddElementsToWorkingSideL(); + //Working side has updated its eventually missing ids updates its global ids with lazy side procs contribution + elementLocator.recvLocalIdsFromWorkingSideL(); + elementLocator.sendGlobalIdsToWorkingSideL(); + //like no post treatment + elementLocator.recvFromWorkingSideL(); + elementLocator.sendToWorkingSideL(); + } + else + throw INTERP_KERNEL::Exception("Not managed policy detected on lazy side : not implemented !"); + } + + void InterpolationMatrix::computeIntegralDenoW(ElementLocator& elementLocator) + { + MEDCouplingFieldDouble *source_triangle_surf = _source_support->getMeasureField(getMeasureAbsStatus()); + _deno_multiply.resize(_coeffs.size()); + vector >::iterator iter6=_deno_multiply.begin(); + const double *values=source_triangle_surf->getArray()->getConstPointer(); + for(vector > >::const_iterator iter4=_coeffs.begin();iter4!=_coeffs.end();iter4++,iter6++,values++) + { + (*iter6).resize((*iter4).size()); + std::fill((*iter6).begin(),(*iter6).end(),*values); + } + source_triangle_surf->decrRef(); + _deno_reverse_multiply=_target_volume; + } + + void InterpolationMatrix::computeRevIntegralDenoW(ElementLocator& elementLocator) + { + _deno_multiply=_target_volume; + MEDCouplingFieldDouble *source_triangle_surf = _source_support->getMeasureField(getMeasureAbsStatus()); + _deno_reverse_multiply.resize(_coeffs.size()); + vector >::iterator iter6=_deno_reverse_multiply.begin(); + const double *values=source_triangle_surf->getArray()->getConstPointer(); + for(vector > >::const_iterator iter4=_coeffs.begin();iter4!=_coeffs.end();iter4++,iter6++,values++) + { + (*iter6).resize((*iter4).size()); + std::fill((*iter6).begin(),(*iter6).end(),*values); + } + source_triangle_surf->decrRef(); + } + + /*! + * Nothing to do because surface computation is on working side. + */ + void InterpolationMatrix::computeIntegralDenoL(ElementLocator& elementLocator) + { + } + + /*! + * Nothing to do because surface computation is on working side. + */ + void InterpolationMatrix::computeRevIntegralDenoL(ElementLocator& elementLocator) + { + } + + + void InterpolationMatrix::computeGlobConstraintDenoW(ElementLocator& elementLocator) + { + computeGlobalColSum(_deno_multiply); + computeGlobalRowSum(elementLocator,_deno_reverse_multiply,_deno_multiply); + } + + void InterpolationMatrix::computeGlobalRowSum(ElementLocator& elementLocator, std::vector >& denoStrorage, std::vector >& denoStrorageInv) + { + //stores id in distant procs sorted by lazy procs connected with + vector< vector > rowsPartialSumI; + //stores for each lazy procs connected with, if global info is available and if it's the case the policy + vector policyPartial; + //stores the corresponding values. + vector< vector > rowsPartialSumD; + elementLocator.recvPolicyFromLazySideW(policyPartial); + int pol1=mergePolicies(policyPartial); + if(pol1==ElementLocator::NO_POST_TREATMENT_POLICY) + { + computeLocalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD); + elementLocator.sendSumToLazySideW(rowsPartialSumI,rowsPartialSumD); + elementLocator.recvSumFromLazySideW(rowsPartialSumD); + } + else if(pol1==ElementLocator::CUMULATIVE_POLICY) + { + //updateWithNewAdditionnalElements(addingElements); + //stores for each lazy procs connected with, the ids in global mode if it exists (regarding policyPartial). This array has exactly the size of rowsPartialSumI, + //if policyPartial has CUMALATIVE_POLICY in each. + vector< vector > globalIdsPartial; + computeLocalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD); + elementLocator.sendLocalIdsToLazyProcsW(rowsPartialSumI); + elementLocator.recvCandidatesGlobalIdsFromLazyProcsW(globalIdsPartial); + std::vector< std::vector > addingElements; + findAdditionnalElements(elementLocator,addingElements,rowsPartialSumI,globalIdsPartial); + addGhostElements(elementLocator.getDistantProcIds(),addingElements); + rowsPartialSumI.clear(); + globalIdsPartial.clear(); + computeLocalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD); + elementLocator.sendLocalIdsToLazyProcsW(rowsPartialSumI); + elementLocator.recvGlobalIdsFromLazyProcsW(rowsPartialSumI,globalIdsPartial); + // + elementLocator.sendSumToLazySideW(rowsPartialSumI,rowsPartialSumD); + elementLocator.recvSumFromLazySideW(rowsPartialSumD); + mergeRowSum3(globalIdsPartial,rowsPartialSumD); + mergeCoeffs(elementLocator.getDistantProcIds(),rowsPartialSumI,globalIdsPartial,denoStrorageInv); + } + else + throw INTERP_KERNEL::Exception("Not managed policy detected : not implemented !"); + divideByGlobalRowSum(elementLocator.getDistantProcIds(),rowsPartialSumI,rowsPartialSumD,denoStrorage); + } + + /*! + * @param distantProcs in parameter that indicates which lazy procs are concerned. + * @param resPerProcI out parameter that must be cleared before calling this method. The size of 1st dimension is equal to the size of 'distantProcs'. + * It contains the element ids (2nd dimension) of the corresponding lazy proc. + * @param resPerProcD out parameter with the same format than 'resPerProcI'. It contains corresponding sum values. + */ + void InterpolationMatrix::computeLocalRowSum(const std::vector& distantProcs, std::vector >& resPerProcI, + std::vector >& resPerProcD) const + { + resPerProcI.resize(distantProcs.size()); + resPerProcD.resize(distantProcs.size()); + std::vector res(_col_offsets.size()); + for(vector > >::const_iterator iter=_coeffs.begin();iter!=_coeffs.end();iter++) + for(vector >::const_iterator iter3=(*iter).begin();iter3!=(*iter).end();iter3++) + res[(*iter3).first]+=(*iter3).second; + set procsSet; + int id=-1; + const vector >& mapping=_mapping.getSendingIds(); + for(vector >::const_iterator iter2=mapping.begin();iter2!=mapping.end();iter2++) + { + std::pair::iterator,bool> isIns=procsSet.insert((*iter2).first); + if(isIns.second) + id=std::find(distantProcs.begin(),distantProcs.end(),(*iter2).first)-distantProcs.begin(); + resPerProcI[id].push_back((*iter2).second); + resPerProcD[id].push_back(res[iter2-mapping.begin()]); + } + } + + /*! + * This method is only usable when CUMULATIVE_POLICY detected. This method finds elements ids (typically nodes) lazy side that + * are not present in columns of 'this' and that should regarding cumulative merge of elements regarding their global ids. + */ + void InterpolationMatrix::findAdditionnalElements(ElementLocator& elementLocator, std::vector >& elementsToAdd, + const std::vector >& resPerProcI, const std::vector >& globalIdsPartial) + { + std::set globalIds; + int nbLazyProcs=globalIdsPartial.size(); + for(int i=0;i tmp(globalIds.size()); + std::copy(globalIds.begin(),globalIds.end(),tmp.begin()); + globalIds.clear(); + elementLocator.sendCandidatesForAddElementsW(tmp); + elementLocator.recvAddElementsFromLazyProcsW(elementsToAdd); + } + + void InterpolationMatrix::addGhostElements(const std::vector& distantProcs, const std::vector >& elementsToAdd) + { + std::vector< std::vector< std::map > > data1; + std::vector data2; + serializeMe(data1,data2); + initialize(); + int nbOfDistProcs=distantProcs.size(); + for(int i=0;i& eltsForThisProc=elementsToAdd[i]; + if(!eltsForThisProc.empty()) + { + std::vector::iterator iter1=std::find(data2.begin(),data2.end(),procId); + std::map *toFeed=0; + if(iter1!=data2.end()) + {//to test + int rank=iter1-data2.begin(); + toFeed=&(data1[rank].back()); + } + else + { + iter1=std::lower_bound(data2.begin(),data2.end(),procId); + int rank=iter1-data2.begin(); + data2.insert(iter1,procId); + std::vector< std::map > tmp(data1.front().size()); + data1.insert(data1.begin()+rank,tmp); + toFeed=&(data1[rank].back()); + } + for(std::vector::const_iterator iter2=eltsForThisProc.begin();iter2!=eltsForThisProc.end();iter2++) + (*toFeed)[*iter2]=0.; + } + } + // + nbOfDistProcs=data2.size(); + for(int j=0;j& policyPartial) + { + if(policyPartial.empty()) + return ElementLocator::NO_POST_TREATMENT_POLICY; + int ref=policyPartial[0]; + std::vector::const_iterator iter1=std::find_if(policyPartial.begin(),policyPartial.end(),std::bind2nd(std::not_equal_to(),ref)); + if(iter1!=policyPartial.end()) + { + std::ostringstream msg; msg << "Incompatible policies between lazy procs each other : proc # " << iter1-policyPartial.begin(); + throw INTERP_KERNEL::Exception(msg.str().c_str()); + } + return ref; + } + + /*! + * This method introduce global ids aspects in computed 'rowsPartialSumD'. + * As precondition rowsPartialSumD.size()==policyPartial.size()==globalIdsPartial.size(). Foreach i in [0;rowsPartialSumD.size() ) rowsPartialSumD[i].size()==globalIdsPartial[i].size() + * @param rowsPartialSumD : in parameter, Partial row sum computed for each lazy procs connected with. + * @param rowsPartialSumI : in parameter, Corresponding local ids for each lazy procs connected with. + * @param globalIdsPartial : in parameter, the global numbering, of elements connected with. + * @param globalIdsLazySideInteraction : out parameter, constituted from all global ids of lazy procs connected with. + * @para sumCorresponding : out parameter, relative to 'globalIdsLazySideInteraction' + */ + void InterpolationMatrix::mergeRowSum(const std::vector< std::vector >& rowsPartialSumD, const std::vector< std::vector >& globalIdsPartial, + std::vector& globalIdsLazySideInteraction, std::vector& sumCorresponding) + { + std::map sumToReturn; + int nbLazyProcs=rowsPartialSumD.size(); + for(int i=0;i& rowSumOfP=rowsPartialSumD[i]; + const std::vector& globalIdsOfP=globalIdsPartial[i]; + std::vector::const_iterator iter1=rowSumOfP.begin(); + std::vector::const_iterator iter2=globalIdsOfP.begin(); + for(;iter1!=rowSumOfP.end();iter1++,iter2++) + sumToReturn[*iter2]+=*iter1; + } + // + int lgth=sumToReturn.size(); + globalIdsLazySideInteraction.resize(lgth); + sumCorresponding.resize(lgth); + std::vector::iterator iter3=globalIdsLazySideInteraction.begin(); + std::vector::iterator iter4=sumCorresponding.begin(); + for(std::map::const_iterator iter5=sumToReturn.begin();iter5!=sumToReturn.end();iter5++,iter3++,iter4++) + { + *iter3=(*iter5).first; + *iter4=(*iter5).second; + } + } + + /*! + * This method simply reorganize the result contained in 'sumCorresponding' computed by lazy side into 'rowsPartialSumD' with help of 'globalIdsPartial' and 'globalIdsLazySideInteraction' + * + * @param globalIdsPartial : in parameter, global ids sorted by lazy procs + * @param rowsPartialSumD : in/out parameter, with exactly the same size as 'globalIdsPartial' + * @param globalIdsLazySideInteraction : in parameter that represents ALL the global ids of every lazy procs in interaction + * @param sumCorresponding : in parameter with same size as 'globalIdsLazySideInteraction' that stores the corresponding sum of 'globalIdsLazySideInteraction' + */ + void InterpolationMatrix::mergeRowSum2(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD, + const std::vector& globalIdsLazySideInteraction, const std::vector& sumCorresponding) + { + std::map acc; + std::vector::const_iterator iter1=globalIdsLazySideInteraction.begin(); + std::vector::const_iterator iter2=sumCorresponding.begin(); + for(;iter1!=globalIdsLazySideInteraction.end();iter1++,iter2++) + acc[*iter1]=*iter2; + // + int nbLazyProcs=globalIdsPartial.size(); + for(int i=0;i& tmp1=globalIdsPartial[i]; + std::vector& tmp2=rowsPartialSumD[i]; + std::vector::const_iterator iter3=tmp1.begin(); + std::vector::iterator iter4=tmp2.begin(); + for(;iter3!=tmp1.end();iter3++,iter4++) + *iter4=acc[*iter3]; + } + } + + void InterpolationMatrix::mergeRowSum3(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD) + { + std::map sum; + std::vector< std::vector >::const_iterator iter1=globalIdsPartial.begin(); + std::vector< std::vector >::iterator iter2=rowsPartialSumD.begin(); + for(;iter1!=globalIdsPartial.end();iter1++,iter2++) + { + std::vector::const_iterator iter3=(*iter1).begin(); + std::vector::const_iterator iter4=(*iter2).begin(); + for(;iter3!=(*iter1).end();iter3++,iter4++) + sum[*iter3]+=*iter4; + } + iter2=rowsPartialSumD.begin(); + for(iter1=globalIdsPartial.begin();iter1!=globalIdsPartial.end();iter1++,iter2++) + { + std::vector::const_iterator iter3=(*iter1).begin(); + std::vector::iterator iter4=(*iter2).begin(); + for(;iter3!=(*iter1).end();iter3++,iter4++) + *iter4=sum[*iter3]; + } + } + + /*! + * This method updates this->_coeffs attribute in order to take into account hidden (because having the same global number) similar nodes in _coeffs array. + * If in this->_coeffs two distant element id have the same global id their values will be replaced for each by the sum of the two. + * @param procsInInteraction input parameter : specifies the procId in absolute of distant lazy procs in interaction with + * @param rowsPartialSumI input parameter : local ids of distant lazy procs elements in interaction with + * @param globalIdsPartial input parameter : global ids of distant lazy procs elements in interaction with + */ + void InterpolationMatrix::mergeCoeffs(const std::vector& procsInInteraction, const std::vector< std::vector >& rowsPartialSumI, + const std::vector >& globalIdsPartial, std::vector >& denoStrorageInv) + { + //preparing fast access structures + std::map procT; + int localProcId=0; + for(std::vector::const_iterator iter1=procsInInteraction.begin();iter1!=procsInInteraction.end();iter1++,localProcId++) + procT[*iter1]=localProcId; + int size=procsInInteraction.size(); + std::vector > localToGlobal(size); + for(int i=0;i& myLocalToGlobal=localToGlobal[i]; + const std::vector& locals=rowsPartialSumI[i]; + const std::vector& globals=globalIdsPartial[i]; + std::vector::const_iterator iter3=locals.begin(); + std::vector::const_iterator iter4=globals.begin(); + for(;iter3!=locals.end();iter3++,iter4++) + myLocalToGlobal[*iter3]=*iter4; + } + // + const vector >& mapping=_mapping.getSendingIds(); + std::map globalIdVal; + //accumulate for same global id on lazy part. + for(vector > >::iterator iter1=_coeffs.begin();iter1!=_coeffs.end();iter1++) + for(vector >::iterator iter2=(*iter1).begin();iter2!=(*iter1).end();iter2++) + { + const std::pair& distantLocalLazyId=mapping[(*iter2).first]; + int localLazyProcId=procT[distantLocalLazyId.first]; + int globalDistantLazyId=localToGlobal[localLazyProcId][distantLocalLazyId.second]; + globalIdVal[globalDistantLazyId]+=(*iter2).second; + } + //perform merge + std::vector >::iterator iter3=denoStrorageInv.begin(); + for(vector > >::iterator iter1=_coeffs.begin();iter1!=_coeffs.end();iter1++,iter3++) + { + double val=(*iter3).back(); + (*iter3).resize((*iter1).size()); + std::vector::iterator iter4=(*iter3).begin(); + for(vector >::iterator iter2=(*iter1).begin();iter2!=(*iter1).end();iter2++,iter4++) + { + const std::pair& distantLocalLazyId=mapping[(*iter2).first]; + int localLazyProcId=procT[distantLocalLazyId.first]; + int globalDistantLazyId=localToGlobal[localLazyProcId][distantLocalLazyId.second]; + double newVal=globalIdVal[globalDistantLazyId]; + if((*iter2).second!=0.) + (*iter4)=val*newVal/(*iter2).second; + else + (*iter4)=std::numeric_limits::max(); + (*iter2).second=newVal; + } + } + } + + void InterpolationMatrix::divideByGlobalRowSum(const std::vector& distantProcs, const std::vector >& resPerProcI, + const std::vector >& resPerProcD, std::vector >& deno) + { + map fastSums; + int procId=0; + for(vector::const_iterator iter1=distantProcs.begin();iter1!=distantProcs.end();iter1++,procId++) + { + const std::vector& currentProcI=resPerProcI[procId]; + const std::vector& currentProcD=resPerProcD[procId]; + vector::const_iterator iter3=currentProcD.begin(); + for(vector::const_iterator iter2=currentProcI.begin();iter2!=currentProcI.end();iter2++,iter3++) + fastSums[_col_offsets[std::make_pair(*iter1,*iter2)]]=*iter3; + } + deno.resize(_coeffs.size()); + vector >::iterator iter6=deno.begin(); + for(vector > >::const_iterator iter4=_coeffs.begin();iter4!=_coeffs.end();iter4++,iter6++) + { + (*iter6).resize((*iter4).size()); + vector::iterator iter7=(*iter6).begin(); + for(vector >::const_iterator iter5=(*iter4).begin();iter5!=(*iter4).end();iter5++,iter7++) + *iter7=fastSums[(*iter5).first]; + } + } + + void InterpolationMatrix::computeGlobalColSum(std::vector >& denoStrorage) + { + denoStrorage.resize(_coeffs.size()); + vector >::iterator iter2=denoStrorage.begin(); + for(vector > >::const_iterator iter1=_coeffs.begin();iter1!=_coeffs.end();iter1++,iter2++) + { + (*iter2).resize((*iter1).size()); + double sumOfCurrentRow=0.; + for(vector >::const_iterator iter3=(*iter1).begin();iter3!=(*iter1).end();iter3++) + sumOfCurrentRow+=(*iter3).second; + std::fill((*iter2).begin(),(*iter2).end(),sumOfCurrentRow); + } + } + + void InterpolationMatrix::resizeGlobalColSum(std::vector >& denoStrorage) + { + vector >::iterator iter2=denoStrorage.begin(); + for(vector > >::const_iterator iter1=_coeffs.begin();iter1!=_coeffs.end();iter1++,iter2++) + { + double val=(*iter2).back(); + (*iter2).resize((*iter1).size()); + std::fill((*iter2).begin(),(*iter2).end(),val); + } + } + + // ================================================================== + // The call to this method updates the arrays on the target side + // so that they know which amount of data from which processor they + // should expect. + // That call makes actual interpolations via multiply method + // available. + // ================================================================== + + void InterpolationMatrix::prepare() + { + int nbelems = _source_field->getField()->getNumberOfTuples(); + for (int ielem=0; ielem < nbelems; ielem++) + { + _row_offsets[ielem+1]+=_row_offsets[ielem]; + } + _mapping.prepareSendRecv(); + } + + + // ======================================================================= + // brief performs t=Ws, where t is the target field, s is the source field + + // The call to this method must be called both on the working side + // and on the idle side. On the working side, the vector T=VT^(-1).(W.S) + // is computed and sent. On the idle side, no computation is done, but the + // result from the working side is received and the field is updated. + + // param field source field on processors involved on the source side, + // target field on processors on the target side + // ======================================================================= + + void InterpolationMatrix::multiply(MEDCouplingFieldDouble& field) const + { + int nbcomp = field.getArray()->getNumberOfComponents(); + vector target_value(_col_offsets.size()* nbcomp,0.0); + + //computing the matrix multiply on source side + if (_source_group.containsMyRank()) + { + int nbrows = _coeffs.size(); + + // performing W.S + // W is the intersection matrix + // S is the source vector + + for (int irow=0; irowgetNumberOfTuples() ; + double* value = const_cast (field.getArray()->getPointer()); + for (int i=0; igetNumberOfComponents(); + vector source_value(_col_offsets.size()* nbcomp,0.0); + _mapping.reverseSendRecv(&source_value[0],field); + + //treatment of the transpose matrix multiply on the source side + if (_source_group.containsMyRank()) + { + int nbrows = _coeffs.size(); + double *array = field.getArray()->getPointer() ; + + // Initialization + std::fill(array, array+nbrows*nbcomp, 0.0) ; + + //performing WT.T + //WT is W transpose + //T is the target vector + for (int irow = 0; irow < nbrows; irow++) + { + for (int icol = _row_offsets[irow]; icol < _row_offsets[irow+1]; icol++) + { + int colid = _coeffs[irow][icol-_row_offsets[irow]].first; + double value = _coeffs[irow][icol-_row_offsets[irow]].second; + double deno = _deno_reverse_multiply[irow][icol-_row_offsets[irow]]; + for (int icomp=0; icomp& res) const; + void computeLocalRowSum(const std::vector& distantProcs, std::vector >& resPerProcI, + std::vector >& resPerProcD) const; + void computeGlobalRowSum(ElementLocator& elementLocator, std::vector >& denoStrorage, std::vector >& denoStrorageInv); + void computeGlobalColSum(std::vector >& denoStrorage); + void resizeGlobalColSum(std::vector >& denoStrorage); + void fillDSFromVM(int iproc_distant, const int* distant_elems, const std::vector< std::map >& values, MEDCouplingFieldDouble *surf); + void serializeMe(std::vector< std::vector< std::map > >& data1, std::vector& data2) const; + void initialize(); + void findAdditionnalElements(ElementLocator& elementLocator, std::vector >& elementsToAdd, + const std::vector >& resPerProcI, const std::vector >& globalIdsPartial); + void addGhostElements(const std::vector& distantProcs, const std::vector >& elementsToAdd); + int mergePolicies(const std::vector& policyPartial); + void mergeRowSum(const std::vector< std::vector >& rowsPartialSumD, const std::vector< std::vector >& globalIdsPartial, + std::vector& globalIdsLazySideInteraction, std::vector& sumCorresponding); + void mergeRowSum2(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD, + const std::vector& globalIdsLazySideInteraction, const std::vector& sumCorresponding); + void mergeRowSum3(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD); + void mergeCoeffs(const std::vector& procsInInteraction, const std::vector< std::vector >& rowsPartialSumI, + const std::vector >& globalIdsPartial, std::vector >& denoStrorageInv); + void divideByGlobalRowSum(const std::vector& distantProcs, const std::vector >& resPerProcI, + const std::vector >& resPerProcD, std::vector >& deno); + private: + bool isSurfaceComputationNeeded(const std::string& method) const; + private: + const ParaMEDMEM::ParaFIELD *_source_field; + std::vector _row_offsets; + std::map, int > _col_offsets; + MEDCouplingPointSet *_source_support; + MxN_Mapping _mapping; + + const ProcessorGroup& _source_group; + const ProcessorGroup& _target_group; + std::vector< std::vector > _target_volume; + std::vector > > _coeffs; + std::vector > _deno_multiply; + std::vector > _deno_reverse_multiply; + }; +} + +#endif diff --git a/src/ParaMEDMEM/LinearTimeInterpolator.cxx b/src/ParaMEDMEM/LinearTimeInterpolator.cxx new file mode 100644 index 000000000..79524983e --- /dev/null +++ b/src/ParaMEDMEM/LinearTimeInterpolator.cxx @@ -0,0 +1,54 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "LinearTimeInterpolator.hxx" + +using namespace std; + +namespace ParaMEDMEM +{ + + LinearTimeInterpolator::LinearTimeInterpolator( double InterpPrecision, int nStepBefore, + int nStepAfter ): + TimeInterpolator( InterpPrecision, nStepBefore, nStepAfter ) + { + } + + LinearTimeInterpolator::~LinearTimeInterpolator() + { + } + + void LinearTimeInterpolator::doInterp( double time0, double time1, double time, + int recvcount , int nbuff0, int nbuff1, + int **recvbuff0, int **recvbuff1, int *result ) + { + for(int i = 0 ; i < recvcount ; i++ ) + result[i] = (int) ((recvbuff0[0][i]*(time1 - time) + recvbuff1[0][i]*(time - time0))/(time1 - time0) + _interp_precision); + } + + void LinearTimeInterpolator::doInterp( double time0, double time1, double time, + int recvcount , int nbuff0, int nbuff1, + double **recvbuff0, double **recvbuff1, + double *result ) + { + for(int i = 0 ; i < recvcount ; i++ ) + result[i] = (recvbuff0[0][i]*(time1 - time) + recvbuff1[0][i]*(time - time0))/(time1 - time0); + } + +} diff --git a/src/ParaMEDMEM/LinearTimeInterpolator.hxx b/src/ParaMEDMEM/LinearTimeInterpolator.hxx new file mode 100644 index 000000000..0128e42b2 --- /dev/null +++ b/src/ParaMEDMEM/LinearTimeInterpolator.hxx @@ -0,0 +1,47 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __LINEARTIMEINTERPOLATOR_HXX__ +#define __LINEARTIMEINTERPOLATOR_HXX__ + +#include "TimeInterpolator.hxx" + +#include +#include + +namespace ParaMEDMEM +{ + class DEC; + + class LinearTimeInterpolator : public TimeInterpolator + { + public: + LinearTimeInterpolator( double InterpPrecision=0, int nStepBefore=1, + int nStepAfter=1 ) ; + virtual ~LinearTimeInterpolator(); + void doInterp( double time0, double time1, double time, int recvcount, + int nbuff0, int nbuff1, + int **recvbuff0, int **recvbuff1, int *result ); + void doInterp( double time0, double time1, double time, int recvcount, + int nbuff0, int nbuff1, + double **recvbuff0, double **recvbuff1, double *result ); + }; +} + +#endif diff --git a/src/ParaMEDMEM/MPIAccess.cxx b/src/ParaMEDMEM/MPIAccess.cxx new file mode 100644 index 000000000..2ca867122 --- /dev/null +++ b/src/ParaMEDMEM/MPIAccess.cxx @@ -0,0 +1,1088 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "MPIAccess.hxx" +#include "InterpolationUtils.hxx" + +#include + +using namespace std; + +namespace ParaMEDMEM +{ + /*! \defgroup mpi_access MPIAccess + Class \a MPIAccess is the gateway to the MPI library. + It is a helper class that gathers the calls to the MPI + library that are made in the ParaMEDMEM library. This gathering + allows easier gathering of information about the communication + in the library. With MPIAccess, tags are managed automatically + and asynchronous operations are easier. + + It is typically called after the MPI_Init() call in a program. It is afterwards passed as a parameter to the constructors of ParaMEDMEM objects so that they access the MPI library via the MPIAccess. + + As an example, the following code initializes a processor group made of the zero processor. + + \verbatim + #include "MPIAccess.hxx" + #include "ProcessorGroup.hxx" + + int main(int argc, char** argv) + { + //initialization + MPI_Init(&argc, &argv); + ParaMEDMEM::CommInterface comm_interface; + + //setting up a processor group with proc 0 + set procs; + procs.insert(0); + ParaMEDMEM::ProcessorGroup group(procs, comm_interface); + + ParaMEDMEM::MPIAccess mpi_access(group); + + //cleanup + MPI_Finalize(); + } + \endverbatim + */ + + + /*! Creates a MPIAccess that is based on the processors included in \a ProcessorGroup. + This class may be called for easier use of MPI API. + + \param ProcessorGroup MPIProcessorGroup object giving access to group management + \param BaseTag and MaxTag define the range of tags to be used. + Tags are managed by MPIAccess. They are cyclically incremented. + When there is a Send or a Receive operation there is a new RequestId tag returned + to the caller. That RequestId may be used to manage the operation Wait, Check of + status etc... The MPITag internally managed by MPIAccess is used as "tag" argument + in MPI call. + */ + + MPIAccess::MPIAccess(MPIProcessorGroup * ProcessorGroup, int BaseTag, int MaxTag) : + _comm_interface( ProcessorGroup->getCommInterface() ) , + _intra_communicator( ProcessorGroup->getComm() ) + { + void *v ; + int mpitagub ; + int flag ; + //MPI_Comm_get_attr does not run with _IntraCommunicator ??? + //MPI_Comm_get_attr(*_IntraCommunicator,MPID_TAG_UB,&mpitagub,&flag) ; + MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&v,&flag) ; + mpitagub=*(reinterpret_cast(v)); + if ( BaseTag != 0 ) + BaseTag = (BaseTag/MODULO_TAG)*MODULO_TAG ; + if ( MaxTag == 0 ) + MaxTag = (mpitagub/MODULO_TAG-1)*MODULO_TAG ; + MPI_Comm_rank( *_intra_communicator, &_my_rank ) ; + if ( !flag | (BaseTag < 0) | (BaseTag >= MaxTag) | (MaxTag > mpitagub) ) + throw INTERP_KERNEL::Exception("wrong call to MPIAccess constructor"); + + _processor_group = ProcessorGroup ; + _processor_group_size = _processor_group->size() ; + _trace = false ; + + _base_request = -1 ; + _max_request = std::numeric_limits::max() ; + _request = _base_request ; + + _base_MPI_tag = BaseTag ; + _max_MPI_tag = MaxTag ; + + _send_request = new int[ _processor_group_size ] ; + _recv_request = new int[ _processor_group_size ] ; + + _send_requests.resize( _processor_group_size ) ; + _recv_requests.resize( _processor_group_size ) ; + + _send_MPI_tag = new int[ _processor_group_size ] ; + _recv_MPI_Tag = new int[ _processor_group_size ] ; + int i ; + for (i = 0 ; i < _processor_group_size ; i++ ) + { + _send_request[ i ] = _max_request ; + _recv_request[ i ] = _max_request ; + _send_requests[ i ].resize(0) ; + _recv_requests[ i ].resize(0) ; + _send_MPI_tag[ i ] = _max_MPI_tag ; + _recv_MPI_Tag[ i ] = _max_MPI_tag ; + } + MPI_Datatype array_of_types[3] ; + array_of_types[0] = MPI_DOUBLE ; + array_of_types[1] = MPI_DOUBLE ; + array_of_types[2] = MPI_INT ; + int array_of_blocklengths[3] ; + array_of_blocklengths[0] = 1 ; + array_of_blocklengths[1] = 1 ; + array_of_blocklengths[2] = 1 ; + MPI_Aint array_of_displacements[3] ; + array_of_displacements[0] = 0 ; + array_of_displacements[1] = sizeof(double) ; + array_of_displacements[2] = 2*sizeof(double) ; + MPI_Type_struct(3, array_of_blocklengths, array_of_displacements, + array_of_types, &_MPI_TIME) ; + MPI_Type_commit(&_MPI_TIME) ; + } + + MPIAccess::~MPIAccess() + { + delete [] _send_request ; + delete [] _recv_request ; + delete [] _send_MPI_tag ; + delete [] _recv_MPI_Tag ; + MPI_Type_free(&_MPI_TIME) ; + } + + /* + MPIAccess and "RequestIds" : + ============================ + + . WARNING : In the specification document, the distinction + between "MPITags" and "RequestIds" is not clear. "MPITags" + are arguments of calls to MPI. "RequestIds" does not concern + calls to MPI. "RequestIds" are named "tag"as arguments in/out + in the MPIAccess API in the specification documentation. + But in the implementation we have the right name RequestId (or + RecvRequestId/SendRequestId). + + . When we have a MPI write/read request via MPIAccess, we get + an identifier "RequestId". + That identifier matches a structure RequestStruct of + MPIAccess. The access to that structure is done with the map + "_MapOfRequestStruct". + That structure RequestStruct give the possibility to manage + the structures MPI_Request and MPI_Status * of MPI. It give + also the possibility to get informations about that request : + target, send/recv, tag, [a]synchronous, type, outcount. + + . That identifier is used to control an asynchronous request + via MPIAccess : Wait, Test, Probe, etc... + + . In practise "RequestId" is simply an integer fo the interval + [0 , 2**32-1]. There is only one such a cyclic for + [I]Sends and [I]Recvs. + + . That "RequestIds" and their associated structures give an easy + way to manage asynchronous communications. + For example we have mpi_access->Wait( int RequestId ) instead of + MPI_Wait(MPI_Request *request, MPI_Status *status). + + . The API of MPIAccess may give the "SendRequestIds" of a "target", + the "RecvRequestIds" from a "source" or the "SendRequestIds" of + all "targets" or the "RecvRequestIds" of all "sources". + That avoid to manage them in Presentation-ParaMEDMEM. + */ + + int MPIAccess::newRequest( MPI_Datatype datatype, int tag , int destsourcerank , + bool fromsourcerank , bool asynchronous ) + { + RequestStruct *mpiaccessstruct = new RequestStruct; + mpiaccessstruct->MPITag = tag ; + mpiaccessstruct->MPIDatatype = datatype ; + mpiaccessstruct->MPITarget = destsourcerank ; + mpiaccessstruct->MPIIsRecv = fromsourcerank ; + MPI_Status *aStatus = new MPI_Status ; + mpiaccessstruct->MPIStatus = aStatus ; + mpiaccessstruct->MPIAsynchronous = asynchronous ; + mpiaccessstruct->MPICompleted = !asynchronous ; + mpiaccessstruct->MPIOutCount = -1 ; + if ( !asynchronous ) + { + mpiaccessstruct->MPIRequest = MPI_REQUEST_NULL ; + mpiaccessstruct->MPIStatus->MPI_SOURCE = destsourcerank ; + mpiaccessstruct->MPIStatus->MPI_TAG = tag ; + mpiaccessstruct->MPIStatus->MPI_ERROR = MPI_SUCCESS ; + } + if ( _request == _max_request ) + _request = _base_request ; + _request += 1 ; + _map_of_request_struct[_request] = mpiaccessstruct ; + if ( fromsourcerank ) + _recv_request[ destsourcerank ] = _request; + else + _send_request[ destsourcerank ] = _request; + if ( _trace ) + cout << "NewRequest" << _my_rank << "( " << _request << " ) " + << mpiaccessstruct << endl ; + return _request ; + } + + /* + MPIAccess and "tags" (or "MPITags") : + ===================================== + + . The constructor give the possibility to choose an interval of + tags to use : [BaseTag , MaxTag]. + The default is [ 0 , MPI_TAG_UB], MPI_TAG_UB being the maximum + value in an implementation of MPI (minimum 32767 = 2**15-1). + On awa with the implementation lam MPI_TAG_UB value is + 7353944. The norma MPI specify that value is the same in all + processes started by mpirun. + In the case of the use of the same IntraCommunicator in a process + for several distinct data flows (or for several IntraCommunicators + with common processes), that permits to avoid ambibuity + and may help debug. + + . In MPIAccess the tags have two parts (#define MODULO_TAG 10) : + + The last decimal digit decimal correspond to MPI_DataType ( 1 for + TimeMessages, 2 for MPI_INT and 3 for MPI_DOUBLE) + + The value of other digits correspond to a circular numero for each + message. + + A TimeMessage and the associated DataMessage have the same numero + (but the types are different and the tags also). + + . For a Send of a message from a process "source" to a process + "target", we have _send_MPI_tag[target] in the process + source (it contains the last "tag" used for the Send of a pour l'envoi de + message to the process target). + And in the process "target" which receive that message, we have + _recv_MPI_Tag[source] (it contains the last "tag" used for the Recv + of messages from the process source). + Naturally in the MPI norma the values of that tags must be the same. + */ + int MPIAccess::newSendTag( MPI_Datatype datatype, int destrank , int method , + bool asynchronous, int &RequestId ) + { + int tag ; + tag = incrTag( _send_MPI_tag[destrank] ) ; + tag = valTag( tag, method ) ; + _send_MPI_tag[ destrank ] = tag ; + RequestId = newRequest( datatype, tag, destrank , false , asynchronous ) ; + _send_request[ destrank ] = RequestId ; + _send_requests[ destrank ].push_back( RequestId ) ; + return tag ; + } + + int MPIAccess::newRecvTag( MPI_Datatype datatype, int sourcerank , int method , + bool asynchronous, int &RequestId ) + { + int tag ; + tag = incrTag( _recv_MPI_Tag[sourcerank] ) ; + tag = valTag( tag, method ) ; + _recv_MPI_Tag[ sourcerank ] = tag ; + RequestId = newRequest( datatype, tag , sourcerank , true , asynchronous ) ; + _recv_request[ sourcerank ] = RequestId ; + _recv_requests[ sourcerank ].push_back( RequestId ) ; + return tag ; + } + + // Returns the number of all SendRequestIds that may be used to allocate + // ArrayOfSendRequests for the call to SendRequestIds + int MPIAccess::sendRequestIdsSize() + { + int size = 0; + for (int i = 0 ; i < _processor_group_size ; i++ ) + size += _send_requests[ i ].size() ; + return size ; + } + + // Returns in ArrayOfSendRequests with the dimension "size" all the + // SendRequestIds + int MPIAccess::sendRequestIds(int size, int *ArrayOfSendRequests) + { + int destrank ; + int i = 0 ; + for ( destrank = 0 ; destrank < _processor_group_size ; destrank++ ) + { + list< int >::const_iterator iter ; + for (iter = _send_requests[ destrank ].begin() ; iter != _send_requests[destrank].end() ; iter++ ) + ArrayOfSendRequests[i++] = *iter ; + } + return i ; + } + + // Returns the number of all RecvRequestIds that may be used to allocate + // ArrayOfRecvRequests for the call to RecvRequestIds + int MPIAccess::recvRequestIdsSize() + { + int size = 0 ; + for (int i = 0 ; i < _processor_group_size ; i++ ) + size += _recv_requests[ i ].size() ; + return size ; + } + + // Returns in ArrayOfRecvRequests with the dimension "size" all the + // RecvRequestIds + int MPIAccess::recvRequestIds(int size, int *ArrayOfRecvRequests) + { + int sourcerank ; + int i = 0 ; + for ( sourcerank = 0 ; sourcerank < _processor_group_size ; sourcerank++ ) + { + list< int >::const_iterator iter ; + for (iter = _recv_requests[ sourcerank ].begin() ; iter != _recv_requests[sourcerank].end() ; iter++ ) + ArrayOfRecvRequests[i++] = *iter ; + } + return i ; + } + + // Returns in ArrayOfSendRequests with the dimension "size" all the + // SendRequestIds to a destination rank + int MPIAccess::sendRequestIds(int destrank, int size, int *ArrayOfSendRequests) + { + if (size < (int)_send_requests[destrank].size() ) + throw INTERP_KERNEL::Exception("wrong call to MPIAccess::SendRequestIds"); + int i = 0 ; + list< int >::const_iterator iter ; + for (iter = _send_requests[ destrank ].begin() ; iter != _send_requests[destrank].end() ; iter++ ) + ArrayOfSendRequests[i++] = *iter ; + return _send_requests[destrank].size() ; + } + + // Returns in ArrayOfRecvRequests with the dimension "size" all the + // RecvRequestIds from a sourcerank + int MPIAccess::recvRequestIds(int sourcerank, int size, int *ArrayOfRecvRequests) + { + if (size < (int)_recv_requests[sourcerank].size() ) + throw INTERP_KERNEL::Exception("wrong call to MPIAccess::RecvRequestIds"); + int i = 0 ; + list< int >::const_iterator iter ; + _recv_requests[ sourcerank ] ; + for (iter = _recv_requests[ sourcerank ].begin() ; iter != _recv_requests[sourcerank].end() ; iter++ ) + ArrayOfRecvRequests[i++] = *iter ; + return _recv_requests[sourcerank].size() ; + } + + // Send in synchronous mode count values of type datatype from buffer to target + // (returns RequestId identifier even if the corresponding structure is deleted : + // it is only in order to have the same signature as the asynchronous mode) + int MPIAccess::send(void* buffer, int count, MPI_Datatype datatype, int target, int &RequestId) + { + int sts = MPI_SUCCESS ; + RequestId = -1 ; + if ( count ) + { + _MessageIdent aMethodIdent = methodId( datatype ) ; + int MPItag = newSendTag( datatype, target , aMethodIdent , false , RequestId ) ; + if ( aMethodIdent == _message_time ) + { + TimeMessage *aTimeMsg = (TimeMessage *) buffer ; + aTimeMsg->tag = MPItag ; + } + deleteRequest( RequestId ) ; + sts = _comm_interface.send(buffer, count, datatype, target, MPItag, + *_intra_communicator ) ; + if ( _trace ) + cout << "MPIAccess::Send" << _my_rank << " SendRequestId " + << RequestId << " count " << count << " target " << target + << " MPItag " << MPItag << endl ; + } + return sts ; + } + + // Receive (read) in synchronous mode count values of type datatype in buffer from source + // (returns RequestId identifier even if the corresponding structure is deleted : + // it is only in order to have the same signature as the asynchronous mode) + // The output argument OutCount is optionnal : *OutCount <= count + int MPIAccess::recv(void* buffer, int count, MPI_Datatype datatype, int source, int &RequestId, int *OutCount) + { + int sts = MPI_SUCCESS ; + RequestId = -1 ; + if ( OutCount != NULL ) + *OutCount = -1 ; + if ( count ) + { + _MessageIdent aMethodIdent = methodId( datatype ) ; + int MPItag = newRecvTag( datatype, source , aMethodIdent , false , RequestId ) ; + sts = _comm_interface.recv(buffer, count, datatype, source, MPItag, + *_intra_communicator , MPIStatus( RequestId ) ) ; + int outcount = 0 ; + if ( sts == MPI_SUCCESS ) + { + MPI_Datatype datatype = MPIDatatype( RequestId ) ; + _comm_interface.getCount(MPIStatus( RequestId ), datatype, &outcount ) ; + setMPIOutCount( RequestId , outcount ) ; + setMPICompleted( RequestId , true ) ; + deleteStatus( RequestId ) ; + } + if ( OutCount != NULL ) + *OutCount = outcount ; + if ( _trace ) + cout << "MPIAccess::Recv" << _my_rank << " RecvRequestId " + << RequestId << " count " << count << " source " << source + << " MPItag " << MPItag << endl ; + deleteRequest( RequestId ) ; + } + return sts ; + } + + // Send in asynchronous mode count values of type datatype from buffer to target + // Returns RequestId identifier. + int MPIAccess::ISend(void* buffer, int count, MPI_Datatype datatype, int target, int &RequestId) + { + int sts = MPI_SUCCESS ; + RequestId = -1 ; + if ( count ) + { + _MessageIdent aMethodIdent = methodId( datatype ) ; + int MPItag = newSendTag( datatype, target , aMethodIdent , true , RequestId ) ; + if ( aMethodIdent == _message_time ) + { + TimeMessage *aTimeMsg = (TimeMessage *) buffer ; + aTimeMsg->tag = MPItag ; + } + MPI_Request *aSendRequest = MPIRequest( RequestId ) ; + if ( _trace ) + { + cout << "MPIAccess::ISend" << _my_rank << " ISendRequestId " + << RequestId << " count " << count << " target " << target + << " MPItag " << MPItag << endl ; + if ( MPItag == 1 ) + cout << "MPIAccess::ISend" << _my_rank << " time " + << ((TimeMessage *)buffer)->time << " " << ((TimeMessage *)buffer)->deltatime + << endl ; + } + sts = _comm_interface.Isend(buffer, count, datatype, target, MPItag, + *_intra_communicator , aSendRequest) ; + } + return sts ; + } + + // Receive (read) in asynchronous mode count values of type datatype in buffer from source + // returns RequestId identifier. + int MPIAccess::IRecv(void* buffer, int count, MPI_Datatype datatype, int source, int &RequestId) + { + int sts = MPI_SUCCESS ; + RequestId = -1 ; + if ( count ) + { + _MessageIdent aMethodIdent = methodId( datatype ) ; + int MPItag = newRecvTag( datatype, source , aMethodIdent , true , RequestId ) ; + MPI_Request *aRecvRequest = MPIRequest( RequestId ) ; + if ( _trace ) + { + cout << "MPIAccess::IRecv" << _my_rank << " IRecvRequestId " + << RequestId << " count " << count << " source " << source + << " MPItag " << MPItag << endl ; + if ( MPItag == 1 ) + cout << "MPIAccess::ISend" << _my_rank << " time " + << ((TimeMessage *)buffer)->time << " " << ((TimeMessage *)buffer)->deltatime + << endl ; + } + sts = _comm_interface.Irecv(buffer, count, datatype, source, MPItag, + *_intra_communicator , aRecvRequest) ; + } + return sts ; + } + + // Perform a Send and a Recv in synchronous mode + int MPIAccess::sendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, + int dest, int &SendRequestId, + void* recvbuf, int recvcount, MPI_Datatype recvtype, + int source, int &RecvRequestId, int *OutCount) + { + int sts = MPI_SUCCESS ; + SendRequestId = -1 ; + RecvRequestId = -1 ; + if ( recvcount ) + sts = IRecv(recvbuf, recvcount, recvtype, source, RecvRequestId) ; + int outcount = -1 ; + if ( _trace ) + cout << "MPIAccess::SendRecv" << _my_rank << " IRecv RecvRequestId " + << RecvRequestId << endl ; + if ( sts == MPI_SUCCESS ) + { + if ( sendcount ) + sts = send(sendbuf, sendcount, sendtype, dest, SendRequestId) ; + if ( _trace ) + cout << "MPIAccess::SendRecv" << _my_rank << " Send SendRequestId " + << SendRequestId << endl ; + if ( sts == MPI_SUCCESS && recvcount ) + { + sts = wait( RecvRequestId ) ; + outcount = MPIOutCount( RecvRequestId ) ; + if ( _trace ) + cout << "MPIAccess::SendRecv" << _my_rank << " IRecv RecvRequestId " + << RecvRequestId << " outcount " << outcount << endl ; + } + } + if ( OutCount != NULL ) + { + *OutCount = outcount ; + if ( _trace ) + cout << "MPIAccess::SendRecv" << _my_rank << " *OutCount = " << *OutCount + << endl ; + } + deleteRequest( RecvRequestId ) ; + return sts ; + } + + // Perform a Send and a Recv in asynchronous mode + int MPIAccess::ISendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, + int dest, int &SendRequestId, + void* recvbuf, int recvcount, MPI_Datatype recvtype, + int source, int &RecvRequestId) + { + int sts = MPI_SUCCESS ; + SendRequestId = -1 ; + RecvRequestId = -1 ; + if ( recvcount ) + sts = IRecv(recvbuf, recvcount, recvtype, source, RecvRequestId) ; + if ( sts == MPI_SUCCESS ) + if ( sendcount ) + sts = ISend(sendbuf, sendcount, sendtype, dest, SendRequestId) ; + return sts ; + } + + // Perform a wait of a Send or Recv asynchronous Request + // Do nothing for a synchronous Request + // Manage MPI_Request * and MPI_Status * structure + int MPIAccess::wait( int RequestId ) + { + int status = MPI_SUCCESS ; + if ( !MPICompleted( RequestId ) ) + { + if ( *MPIRequest( RequestId ) != MPI_REQUEST_NULL ) + { + if ( _trace ) + cout << "MPIAccess::Wait" << _my_rank << " -> wait( " << RequestId + << " ) MPIRequest " << MPIRequest( RequestId ) << " MPIStatus " + << MPIStatus( RequestId ) << " MPITag " << MPITag( RequestId ) + << " MPIIsRecv " << MPIIsRecv( RequestId ) << endl ; + status = _comm_interface.wait(MPIRequest( RequestId ), MPIStatus( RequestId )) ; + } + else + { + if ( _trace ) + cout << "MPIAccess::Wait" << _my_rank << " MPIRequest == MPI_REQUEST_NULL" + << endl ; + } + setMPICompleted( RequestId , true ) ; + if ( MPIIsRecv( RequestId ) && MPIStatus( RequestId ) ) + { + MPI_Datatype datatype = MPIDatatype( RequestId ) ; + int outcount ; + status = _comm_interface.getCount(MPIStatus( RequestId ), datatype, + &outcount ) ; + if ( status == MPI_SUCCESS ) + { + setMPIOutCount( RequestId , outcount ) ; + deleteStatus( RequestId ) ; + if ( _trace ) + cout << "MPIAccess::Wait" << _my_rank << " RequestId " << RequestId + << "MPIIsRecv " << MPIIsRecv( RequestId ) << " outcount " << outcount + << endl ; + } + else + { + if ( _trace ) + cout << "MPIAccess::Wait" << _my_rank << " MPIIsRecv " + << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ; + } + } + else + { + if ( _trace ) + cout << "MPIAccess::Wait" << _my_rank << " MPIIsRecv " << MPIIsRecv( RequestId ) + << " MPIOutCount " << MPIOutCount( RequestId ) << endl ; + } + } + if ( _trace ) + cout << "MPIAccess::Wait" << _my_rank << " RequestId " << RequestId + << " Request " << MPIRequest( RequestId ) + << " Status " << MPIStatus( RequestId ) << " MPICompleted " + << MPICompleted( RequestId ) << " MPIOutCount " << MPIOutCount( RequestId ) + << endl ; + return status ; + } + + // Perform a "test" of a Send or Recv asynchronous Request + // If the request is done, returns true in the flag argument + // If the request is not finished, returns false in the flag argument + // Do nothing for a synchronous Request + // Manage MPI_request * and MPI_status * structure + int MPIAccess::test(int RequestId, int &flag) + { + int status = MPI_SUCCESS ; + flag = MPICompleted( RequestId ) ; + if ( _trace ) + cout << "MPIAccess::Test" << _my_rank << " flag " << flag ; + if ( MPIIsRecv( RequestId ) ) + { + if ( _trace ) + cout << " Recv" ; + } + else + { + if ( _trace ) + cout << " Send" ; + } + if( _trace ) + cout << "Request" << RequestId << " " << MPIRequest( RequestId ) + << " Status " << MPIStatus( RequestId ) << endl ; + if ( !flag ) + { + if ( *MPIRequest( RequestId ) != MPI_REQUEST_NULL ) + { + if ( _trace ) + cout << "MPIAccess::Test" << _my_rank << " -> test( " << RequestId + << " ) MPIRequest " << MPIRequest( RequestId ) + << " MPIStatus " << MPIStatus( RequestId ) + << " MPITag " << MPITag( RequestId ) + << " MPIIsRecv " << MPIIsRecv( RequestId ) << endl ; + status = _comm_interface.test(MPIRequest( RequestId ), &flag, + MPIStatus( RequestId )) ; + } + else + { + if ( _trace ) + cout << "MPIAccess::Test" << _my_rank << " MPIRequest == MPI_REQUEST_NULL" + << endl ; + } + if ( flag ) + { + setMPICompleted( RequestId , true ) ; + if ( MPIIsRecv( RequestId ) && MPIStatus( RequestId ) ) + { + int outcount ; + MPI_Datatype datatype = MPIDatatype( RequestId ) ; + status = _comm_interface.getCount( MPIStatus( RequestId ), datatype, + &outcount ) ; + if ( status == MPI_SUCCESS ) + { + setMPIOutCount( RequestId , outcount ) ; + deleteStatus( RequestId ) ; + if ( _trace ) + cout << "MPIAccess::Test" << _my_rank << " MPIIsRecv " + << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ; + } + else + { + if ( _trace ) + cout << "MPIAccess::Test" << _my_rank << " MPIIsRecv " + << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ; + } + } + else + { + if ( _trace ) + cout << "MPIAccess::Test" << _my_rank << " MPIIsRecv " + << MPIIsRecv( RequestId ) << " MPIOutCount " + << MPIOutCount( RequestId ) << endl ; + } + } + } + if ( _trace ) + cout << "MPIAccess::Test" << _my_rank << " RequestId " << RequestId + << " flag " << flag << " MPICompleted " << MPICompleted( RequestId ) + << " MPIOutCount " << MPIOutCount( RequestId ) << endl ; + return status ; + } + + int MPIAccess::waitAny(int count, int *array_of_RequestIds, int &RequestId) + { + int status = MPI_ERR_OTHER ; + RequestId = -1 ; + cout << "MPIAccess::WaitAny not yet implemented" << endl ; + return status ; + } + + int MPIAccess::testAny(int count, int *array_of_RequestIds, int &RequestId, int &flag) + { + int status = MPI_ERR_OTHER ; + RequestId = -1 ; + flag = 0 ; + cout << "MPIAccess::TestAny not yet implemented" << endl ; + return status ; + } + + // Perform a wait of each Send or Recv asynchronous Request of the array + // array_of_RequestIds of size "count". + // That array may be filled with a call to SendRequestIdsSize or RecvRequestIdsSize + // Do nothing for a synchronous Request + // Manage MPI_Request * and MPI_Status * structure + int MPIAccess::waitAll(int count, int *array_of_RequestIds) + { + if ( _trace ) + cout << "WaitAll" << _my_rank << " : count " << count << endl ; + int status ; + int retstatus = MPI_SUCCESS ; + int i ; + for ( i = 0 ; i < count ; i++ ) + { + if ( _trace ) + cout << "WaitAll" << _my_rank << " " << i << " -> Wait( " + << array_of_RequestIds[i] << " )" << endl ; + status = wait( array_of_RequestIds[i] ) ; + if ( status != MPI_SUCCESS ) + retstatus = status ; + } + if ( _trace ) + cout << "EndWaitAll" << _my_rank << endl ; + return retstatus ; + } + + // Perform a "test" of each Send or Recv asynchronous Request of the array + // array_of_RequestIds of size "count". + // That array may be filled with a call to SendRequestIdsSize or RecvRequestIdsSize + // If all requests are done, returns true in the flag argument + // If all requests are not finished, returns false in the flag argument + // Do nothing for a synchronous Request + // Manage MPI_Request * and MPI_Status * structure + int MPIAccess::testAll(int count, int *array_of_RequestIds, int &flag) + { + if ( _trace ) + cout << "TestAll" << _my_rank << " : count " << count << endl ; + int status ; + int retstatus = MPI_SUCCESS ; + bool retflag = true ; + int i ; + for ( i = 0 ; i < count ; i++ ) + { + status = test( array_of_RequestIds[i] , flag ) ; + retflag = retflag && (flag != 0) ; + if ( status != MPI_SUCCESS ) + retstatus = status ; + } + flag = retflag ; + if ( _trace ) + cout << "EndTestAll" << _my_rank << endl ; + return retstatus ; + } + + int MPIAccess::waitSome(int count, int *array_of_RequestIds, int outcount, + int *outarray_of_RequestIds) + { + int status = MPI_ERR_OTHER ; + cout << "MPIAccess::WaitSome not yet implemented" << endl ; + return status ; + } + + int MPIAccess::testSome(int count, int *array_of_RequestIds, int outcounts, + int *outarray_of_RequestIds) + { + int status = MPI_ERR_OTHER ; + cout << "MPIAccess::TestSome not yet implemented" << endl ; + return status ; + } + + // Probe checks if a message is available for read from FromSource rank. + // Returns the corresponding source, MPITag, datatype and outcount + // Probe is a blocking call which wait until a message is available + int MPIAccess::probe(int FromSource, int &source, int &MPITag, + MPI_Datatype &myDatatype, int &outcount) + { + MPI_Status aMPIStatus ; + int sts = _comm_interface.probe( FromSource, MPI_ANY_TAG, + *_intra_communicator , &aMPIStatus ) ; + if ( sts == MPI_SUCCESS ) + { + source = aMPIStatus.MPI_SOURCE ; + MPITag = aMPIStatus.MPI_TAG ; + int MethodId = (MPITag % MODULO_TAG) ; + myDatatype = datatype( (ParaMEDMEM::_MessageIdent) MethodId ) ; + _comm_interface.getCount(&aMPIStatus, myDatatype, &outcount ) ; + if ( _trace ) + cout << "MPIAccess::Probe" << _my_rank << " FromSource " << FromSource + << " source " << source << " MPITag " << MPITag << " MethodId " + << MethodId << " datatype " << myDatatype << " outcount " << outcount + << endl ; + } + else + { + source = -1 ; + MPITag = -1 ; + myDatatype = 0 ; + outcount = -1 ; + } + return sts ; + } + + // IProbe checks if a message is available for read from FromSource rank. + // If there is a message available, returns the corresponding source, + // MPITag, datatype and outcount with flag = true + // If not, returns flag = false + int MPIAccess::IProbe(int FromSource, int &source, int &MPITag, + MPI_Datatype &myDataType, int &outcount, int &flag) + { + MPI_Status aMPIStatus ; + int sts = _comm_interface.Iprobe( FromSource, MPI_ANY_TAG, + *_intra_communicator , &flag, + &aMPIStatus ) ; + if ( sts == MPI_SUCCESS && flag ) + { + source = aMPIStatus.MPI_SOURCE ; + MPITag = aMPIStatus.MPI_TAG ; + int MethodId = (MPITag % MODULO_TAG) ; + myDataType = datatype( (ParaMEDMEM::_MessageIdent) MethodId ) ; + _comm_interface.getCount(&aMPIStatus, myDataType, &outcount ) ; + if ( _trace ) + cout << "MPIAccess::IProbe" << _my_rank << " FromSource " << FromSource + << " source " << source << " MPITag " << MPITag << " MethodId " + << MethodId << " datatype " << myDataType << " outcount " << outcount + << " flag " << flag << endl ; + } + else + { + source = -1 ; + MPITag = -1 ; + myDataType = 0 ; + outcount = -1 ; + } + return sts ; + } + + // Cancel concerns a "posted" asynchronous IRecv + // Returns flag = true if the receiving request was successfully canceled + // Returns flag = false if the receiving request was finished but not canceled + // Use cancel, wait and test_cancelled of the MPI API + int MPIAccess::cancel( int RecvRequestId, int &flag ) + { + flag = 0 ; + int sts = _comm_interface.cancel( MPIRequest( RecvRequestId ) ) ; + if ( sts == MPI_SUCCESS ) + { + sts = _comm_interface.wait( MPIRequest( RecvRequestId ) , + MPIStatus( RecvRequestId ) ) ; + if ( sts == MPI_SUCCESS ) + sts = _comm_interface.testCancelled( MPIStatus( RecvRequestId ) , &flag ) ; + } + return sts ; + } + + // Cancel concerns a "pending" receiving message (without IRecv "posted") + // Returns flag = true if the message was successfully canceled + // Returns flag = false if the receiving request was finished but not canceled + // Use Irecv, cancel, wait and test_cancelled of the MPI API + int MPIAccess::cancel( int source, int theMPITag, MPI_Datatype datatype, int outcount, int &flag ) + { + int sts ; + MPI_Aint extent ; + flag = 0 ; + sts = MPI_Type_extent( datatype , &extent ) ; + if ( sts == MPI_SUCCESS ) + { + void * recvbuf = malloc( extent*outcount ) ; + MPI_Request aRecvRequest ; + if ( _trace ) + cout << "MPIAccess::Cancel" << _my_rank << " Irecv extent " << extent + << " datatype " << datatype << " source " << source << " theMPITag " + << theMPITag << endl ; + sts = _comm_interface.Irecv( recvbuf, outcount, datatype, source, theMPITag, + *_intra_communicator , &aRecvRequest ) ; + if ( sts == MPI_SUCCESS ) + { + sts = _comm_interface.cancel( &aRecvRequest ) ; + if ( _trace ) + cout << "MPIAccess::Cancel" << _my_rank << " theMPITag " << theMPITag + << " cancel done" << endl ; + if ( sts == MPI_SUCCESS ) + { + MPI_Status aStatus ; + if ( _trace ) + cout << "MPIAccess::Cancel" << _my_rank << " wait" << endl ; + sts = _comm_interface.wait( &aRecvRequest , &aStatus ) ; + if ( sts == MPI_SUCCESS ) + { + if ( _trace ) + cout << "MPIAccess::Cancel" << _my_rank << " test_cancelled" << endl ; + sts = _comm_interface.testCancelled( &aStatus , &flag ) ; + } + } + } + if ( _trace && datatype == timeType() ) + cout << "MPIAccess::Cancel" << _my_rank << " time " + << ((TimeMessage *) recvbuf)->time << " " + << ((TimeMessage *) recvbuf)->deltatime << endl ; + free( recvbuf ) ; + } + if ( _trace ) + cout << "MPIAccess::Cancel" << _my_rank << " flag " << flag << endl ; + return sts ; + } + + + // CancelAll concerns all "pending" receiving message (without IRecv "posted") + // CancelAll use IProbe and Cancel (see obove) + int MPIAccess::cancelAll() + { + int sts = MPI_SUCCESS ; + int target ; + int source ; + int MPITag ; + MPI_Datatype datatype ; + int outcount ; + int flag ; + for ( target = 0 ; target < _processor_group_size ; target++ ) + { + sts = IProbe(target, source, MPITag, datatype, outcount, flag) ; + if ( sts == MPI_SUCCESS && flag ) + { + sts = cancel(source, MPITag, datatype, outcount, flag) ; + if ( _trace ) + cout << "MPIAccess::CancelAll" << _my_rank << " source " << source + << " MPITag " << MPITag << " datatype " << datatype + << " outcount " << outcount << " Cancel flag " << flag << endl ; + if ( sts != MPI_SUCCESS ) + break ; + } + else if ( sts != MPI_SUCCESS ) + break ; + } + return sts ; + } + + // Same as barrier of MPI API + int MPIAccess::barrier() + { + int status = _comm_interface.barrier( *_intra_communicator ) ; + return status ; + } + + // Same as Error_string of MPI API + int MPIAccess::errorString(int errorcode, char *string, int *resultlen) const + { + return _comm_interface.errorString( errorcode, string, resultlen) ; + } + + // Returns source, tag, error and outcount corresponding to receiving RequestId + // By default the corresponding structure of RequestId is deleted + int MPIAccess::status(int RequestId, int &source, int &tag, int &error, + int &outcount, bool keepRequestStruct) + { + MPI_Status *myStatus = MPIStatus( RequestId ) ; + if ( _trace ) + cout << "MPIAccess::status" << _my_rank << " RequestId " << RequestId + << " status " << myStatus << endl ; + if ( myStatus != NULL && MPIAsynchronous( RequestId ) && + MPICompleted( RequestId ) ) + { + if ( MPIIsRecv( RequestId ) ) + { + source = myStatus->MPI_SOURCE ; + tag = myStatus->MPI_TAG ; + error = myStatus->MPI_ERROR ; + MPI_Datatype datatype = MPIDatatype( RequestId ) ; + _comm_interface.getCount(myStatus, datatype, &outcount ) ; + if ( _trace ) + cout << "MPIAccess::status" << _my_rank << " RequestId " << RequestId + << " status " << myStatus << " outcount " << outcount << endl ; + setMPIOutCount( RequestId , outcount ) ; + } + else + { + source = MPITarget( RequestId ) ; + tag = MPITag( RequestId ) ; + error = 0 ; + outcount = MPIOutCount( RequestId ) ; + } + if ( !keepRequestStruct ) + deleteRequest( RequestId ) ; + return MPI_SUCCESS ; + } + else + { + source = MPITarget( RequestId ) ; + tag = MPITag( RequestId ) ; + error = 0 ; + outcount = MPIOutCount( RequestId ) ; + } + return MPI_SUCCESS ; + } + + int MPIAccess::requestFree( MPI_Request *request ) + { + return _comm_interface.requestFree( request ) ; + } + + // Print all informations of all known requests for debugging purpose + void MPIAccess::check() const + { + int i = 0 ; + map< int , RequestStruct * >::const_iterator MapOfRequestStructiterator ; + cout << "MPIAccess::Check" << _my_rank << "_map_of_request_struct_size " + << _map_of_request_struct.size() << endl ; + for ( MapOfRequestStructiterator = _map_of_request_struct.begin() ; + MapOfRequestStructiterator != _map_of_request_struct.end() ; + MapOfRequestStructiterator++ ) + { + if ( MapOfRequestStructiterator->second != NULL ) + { + cout << " Check" << _my_rank << " " << i << ". Request" + << MapOfRequestStructiterator->first << "-->" ; + if ( (MapOfRequestStructiterator->second)->MPIAsynchronous ) + cout << "I" ; + if ( (MapOfRequestStructiterator->second)->MPIIsRecv ) + cout << "Recv from " ; + else + cout << "Send to " ; + cout << (MapOfRequestStructiterator->second)->MPITarget + << " MPITag " << (MapOfRequestStructiterator->second)->MPITag + << " DataType " << (MapOfRequestStructiterator->second)->MPIDatatype + << " Request " << (MapOfRequestStructiterator->second)->MPIRequest + << " Status " << (MapOfRequestStructiterator->second)->MPIStatus + << " Completed " << (MapOfRequestStructiterator->second)->MPICompleted + << endl ; + } + i++ ; + } + } + + // Returns the MPI size of a TimeMessage + MPI_Aint MPIAccess::timeExtent() const + { + MPI_Aint aextent ; + MPI_Type_extent( _MPI_TIME , &aextent ) ; + return aextent ; + } + + // Returns the MPI size of a MPI_INT + MPI_Aint MPIAccess::intExtent() const + { + MPI_Aint aextent ; + MPI_Type_extent( MPI_INT , &aextent ) ; + return aextent ; + } + + // Returns the MPI size of a MPI_DOUBLE + MPI_Aint MPIAccess::doubleExtent() const + { + MPI_Aint aextent ; + MPI_Type_extent( MPI_DOUBLE , &aextent ) ; + return aextent ; + } + + // Outputs fields of a TimeMessage structure + ostream & operator<< (ostream & f ,const TimeMessage & aTimeMsg ) + { + f << " time " << aTimeMsg.time << " deltatime " << aTimeMsg.deltatime + << " tag " << aTimeMsg.tag ; + return f; + } + + // Outputs the DataType coded in a Tag + ostream & operator<< (ostream & f ,const _MessageIdent & methodtype ) + { + switch (methodtype) + { + case _message_time : + f << " MethodTime "; + break; + case _message_int : + f << " MPI_INT "; + break; + case _message_double : + f << " MPI_DOUBLE "; + break; + default : + f << " UnknownMethodType "; + break; + } + return f; + } +} diff --git a/src/ParaMEDMEM/MPIAccess.hxx b/src/ParaMEDMEM/MPIAccess.hxx new file mode 100644 index 000000000..d438c8cec --- /dev/null +++ b/src/ParaMEDMEM/MPIAccess.hxx @@ -0,0 +1,471 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __MPIACCESS_HXX__ +#define __MPIACCESS_HXX__ + +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" + +#include +#include +#include +#include + +namespace ParaMEDMEM +{ + typedef struct + { + double time ; + double deltatime ; + int tag ; + } TimeMessage; + + static MPI_Request mpirequestnull = MPI_REQUEST_NULL ; + enum _MessageIdent { _message_unknown, _message_time, _message_int, _message_double } ; + + class MPIAccess + { + private: + struct RequestStruct + { + int MPITarget ; + bool MPIIsRecv ; + int MPITag ; + bool MPIAsynchronous ; + bool MPICompleted ; + MPI_Datatype MPIDatatype ; + MPI_Request MPIRequest ; + MPI_Status *MPIStatus ; + int MPIOutCount ; + }; + public: + MPIAccess(MPIProcessorGroup * ProcessorGroup, int BaseTag=0, int MaxTag=0) ; + virtual ~MPIAccess() ; + + void trace( bool trace = true ) ; + + void deleteRequest( int RequestId ) ; + void deleteRequests(int size , int *ArrayOfSendRequests ) ; + + int sendMPITag(int destrank) ; + int recvMPITag(int sourcerank) ; + + int sendRequestIdsSize() ; + int sendRequestIds(int size, int *ArrayOfSendRequests) ; + int recvRequestIdsSize() ; + int recvRequestIds(int size, int *ArrayOfRecvRequests) ; + + int sendRequestIdsSize(int destrank) ; + int sendRequestIds(int destrank, int size, int *ArrayOfSendRequests) ; + int recvRequestIdsSize(int sourcerank) ; + int recvRequestIds(int sourcerank, int size, int *ArrayOfRecvRequests) ; + + int send(void* buffer, int count, MPI_Datatype datatype, int target, + int &RequestId) ; + int ISend(void* buffer, int count, MPI_Datatype datatype, int target, + int &RequestId) ; + int recv(void* buffer, int count, MPI_Datatype datatype, int source, + int &RequestId, int *OutCount=NULL) ; + int IRecv(void* buffer, int count, MPI_Datatype datatype, int source, + int &RequestId) ; + int sendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, int dest, + int &SendRequestId, void* recvbuf, int recvcount, + MPI_Datatype recvtype, int source, + int &RecvRequestId, int *OutCount=NULL) ; + int ISendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, int dest, + int &SendRequestId, void* recvbuf, int recvcount, + MPI_Datatype recvtype, int source, int &RecvRequestId) ; + + int wait(int RequestId) ; + int test(int RequestId, int &flag) ; + int waitAny(int count, int *array_of_RequestIds, int &RequestId) ; + int testAny(int count, int *array_of_RequestIds, int &RequestId, int &flag) ; + int waitAll(int count, int *array_of_RequestIds) ; + int testAll(int count, int *array_of_RequestIds, int &flag) ; + int waitSome(int count, int *array_of_RequestIds, int outcount, + int *outarray_of_RequestIds) ; + int testSome(int count, int *array_of_RequestIds, int outcounts, + int *outarray_of_RequestIds) ; + int probe(int FromSource, int &source, int &MPITag, MPI_Datatype &datatype, + int &outcount) ; + int IProbe(int FromSource, int &source, int &MPITag, MPI_Datatype &datatype, + int &outcount, int &flag) ; + int cancel( int RecvRequestId, int &flag ) ; + int cancel( int source, int MPITag, MPI_Datatype datatype, int outcount, + int &flag ) ; + int cancelAll() ; + int barrier() ; + int errorString(int errorcode, char *string, int *resultlen) const ; + int status(int RequestId, int &source, int &tag, int &error, int &outcount, + bool keepRequestStruct=false) ; + int requestFree( MPI_Request *request ) ; + + void check() const ; + + MPI_Datatype timeType() const ; + bool isTimeMessage( int MPITag ) const ; + MPI_Aint timeExtent() const ; + MPI_Aint intExtent() const ; + MPI_Aint doubleExtent() const ; + MPI_Aint extent( MPI_Datatype datatype ) const ; + + int MPITag( int RequestId ) ; + int MPITarget( int RequestId ) ; + bool MPIIsRecv( int RequestId ) ; + bool MPIAsynchronous( int RequestId ) ; + bool MPICompleted( int RequestId ) ; + MPI_Datatype MPIDatatype( int RequestId ) ; + int MPIOutCount( int RequestId ) ; + + private: + int newRequest( MPI_Datatype datatype, int tag , int destsourcerank , + bool fromsourcerank , bool asynchronous ) ; + int newSendTag( MPI_Datatype datatype, int destrank , int method , + bool asynchronous, int &RequestId ) ; + int newRecvTag( MPI_Datatype datatype, int sourcerank , int method , + bool asynchronous, int &RequestId ) ; + int incrTag( int prevtag ) ; + int valTag( int tag, int method ) ; + + void deleteSendRecvRequest( int RequestId ) ; + + void deleteStatus( int RequestId ) ; + + MPI_Request *MPIRequest( int RequestId ) ; + MPI_Status *MPIStatus( int RequestId ) ; + void setMPICompleted( int RequestId , bool completed ) ; + void setMPIOutCount( int RequestId , int outcount ) ; + void clearMPIStatus( int RequestId ) ; + + _MessageIdent methodId( MPI_Datatype datatype ) const ; + MPI_Datatype datatype( _MessageIdent aMethodIdent ) const ; + private: + const CommInterface &_comm_interface ; + const MPI_Comm* _intra_communicator ; + MPIProcessorGroup * _processor_group ; + int _processor_group_size ; + int _my_rank ; + bool _trace ; + int _base_request ; + int _max_request ; + int _request ; + int * _send_request ; + int * _recv_request ; + std::vector< std::list< int > > _send_requests ; + std::vector< std::list< int > > _recv_requests ; + int _base_MPI_tag ; + int _max_MPI_tag ; + int * _send_MPI_tag ; + int * _recv_MPI_Tag ; + MPI_Datatype _MPI_TIME ; + static const int MODULO_TAG=10; + std::map< int , RequestStruct * > _map_of_request_struct ; + + }; + + inline void MPIAccess::trace( bool atrace ) + { + _trace = atrace ; + } + + // Delete the structure Request corresponding to RequestId identifier after + // the deletion of the structures MPI_Request * and MPI_Status * + // remove it from _MapOfRequestStruct (erase) + inline void MPIAccess::deleteRequest( int RequestId ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + { + if ( _trace ) + std::cout << "MPIAccess::DeleteRequest" << _my_rank << "( " << RequestId << " ) " + << aRequestStruct << " MPIRequest " << aRequestStruct->MPIRequest + << " MPIIsRecv " << aRequestStruct->MPIIsRecv << std::endl ; + if ( _map_of_request_struct[RequestId]->MPIRequest != MPI_REQUEST_NULL ) + requestFree( &_map_of_request_struct[RequestId]->MPIRequest ) ; + deleteSendRecvRequest( RequestId ) ; + deleteStatus( RequestId ) ; + _map_of_request_struct.erase( RequestId ) ; + delete aRequestStruct ; + } + else + { + if ( _trace ) + std::cout << "MPIAccess::DeleteRequest" << _my_rank << "( " << RequestId + << " ) Request not found" << std::endl ; + } + } + + // Delete all requests of the array ArrayOfSendRequests + inline void MPIAccess::deleteRequests(int size , int *ArrayOfSendRequests ) + { + for (int i = 0 ; i < size ; i++ ) + deleteRequest( ArrayOfSendRequests[i] ) ; + } + + // Returns the last MPITag of the destination rank destrank + inline int MPIAccess::sendMPITag(int destrank) + { + return _send_MPI_tag[destrank] ; + } + + // Returns the last MPITag of the source rank sourcerank + inline int MPIAccess::recvMPITag(int sourcerank) + { + return _recv_MPI_Tag[sourcerank] ; + } + + // Returns the number of all SendRequestIds matching a destination rank. It may be + // used to allocate ArrayOfSendRequests for the call to SendRequestIds + inline int MPIAccess::sendRequestIdsSize(int destrank) + { + return _send_requests[destrank].size() ; + } + + // Returns the number of all RecvRequestIds matching a source rank. It may be + // used to allocate ArrayOfRecvRequests for the call to RecvRequestIds + inline int MPIAccess::recvRequestIdsSize(int sourcerank) + { + return _recv_requests[sourcerank].size() ; + } + + // Returns the MPI_Datatype (registered in MPI in the constructor with + // MPI_Type_struct and MPI_Type_commit) for TimeMessages + inline MPI_Datatype MPIAccess::timeType() const + { + return _MPI_TIME ; + } + + // Returns true if the tag MPITag corresponds to a TimeMessage + inline bool MPIAccess::isTimeMessage( int aMPITag ) const + { + return ((aMPITag%MODULO_TAG) == _message_time) ; + } + + // Returns the MPI size of the MPI_Datatype datatype + inline MPI_Aint MPIAccess::extent( MPI_Datatype adatatype ) const + { + if ( adatatype == _MPI_TIME ) + return timeExtent() ; + if ( adatatype == MPI_INT ) + return intExtent() ; + if ( adatatype == MPI_DOUBLE ) + return doubleExtent() ; + return 0 ; + } + + // Returns the MPITag of the request corresponding to RequestId identifier + inline int MPIAccess::MPITag( int RequestId ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + return aRequestStruct->MPITag ; + return -1 ; + } + + // Returns the MPITarget of the request corresponding to RequestId identifier + inline int MPIAccess::MPITarget( int RequestId ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + return aRequestStruct->MPITarget ; + return -1 ; + } + + // Returns true if the request corresponding to RequestId identifier was [I]Recv + inline bool MPIAccess::MPIIsRecv( int RequestId ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + return aRequestStruct->MPIIsRecv ; + return false ; + } + + // Returns true if the request corresponding to RequestId identifier was asynchronous + inline bool MPIAccess::MPIAsynchronous( int RequestId ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + return aRequestStruct->MPIAsynchronous ; + return false ; + } + + // Returns true if the request corresponding to RequestId identifier was completed + inline bool MPIAccess::MPICompleted( int RequestId ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + return aRequestStruct->MPICompleted; + return true ; + } + + // Returns the MPI_datatype of the request corresponding to RequestId identifier + inline MPI_Datatype MPIAccess::MPIDatatype( int RequestId ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + return aRequestStruct->MPIDatatype; + return MPI_DATATYPE_NULL; + } + + // Returns the size of the receiving message of the request corresponding to + // RequestId identifier + inline int MPIAccess::MPIOutCount( int RequestId ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + return aRequestStruct->MPIOutCount; + return 0 ; + } + + // Increments the previous tag value (cyclically) + // Look at MPIAccess::NewSendTag/NewRecvTag in MPIAccess.cxx + inline int MPIAccess::incrTag( int prevtag ) + { + int tag; + if ( (prevtag % MODULO_TAG) == _message_time ) + tag = ((prevtag/MODULO_TAG)*MODULO_TAG); + else + tag = ((prevtag/MODULO_TAG + 1)*MODULO_TAG); + if ( tag > _max_MPI_tag ) + tag = _base_MPI_tag ; + return tag ; + } + + // Returns the MPITag with the method-type field + // Look at MPIAccess::NewSendTag/NewRecvTag in MPIAccess.cxx + inline int MPIAccess::valTag( int tag, int method ) + { + return ((tag/MODULO_TAG)*MODULO_TAG) + method; + } + + // Remove a Request identifier from the list _RecvRequests/_SendRequests for + // the corresponding target. + inline void MPIAccess::deleteSendRecvRequest( int RequestId ) + { + if ( _trace ) + std::cout << "MPIAccess::DeleteSendRecvRequest" << _my_rank + << "( " << RequestId << " ) " << std::endl ; + if ( MPIIsRecv( RequestId ) ) + _recv_requests[ MPITarget( RequestId ) ].remove( RequestId ); + else + _send_requests[ MPITarget( RequestId ) ].remove( RequestId ); + } + + // Delete the MPI structure MPI_status * of a ReaquestId + inline void MPIAccess::deleteStatus( int RequestId ) + { + if ( _map_of_request_struct[RequestId]->MPIStatus != NULL ) + { + delete _map_of_request_struct[RequestId]->MPIStatus ; + clearMPIStatus( RequestId ) ; + } + } + + // Returns the MPI structure MPI_request * of a RequestId + inline MPI_Request * MPIAccess::MPIRequest( int RequestId ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + return &aRequestStruct->MPIRequest; + return &mpirequestnull ; + } + + // Returns the MPI structure MPI_status * of a RequestId + inline MPI_Status * MPIAccess::MPIStatus( int RequestId ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ]; + if ( aRequestStruct ) + return aRequestStruct->MPIStatus; + return NULL ; + } + + // Set the MPICompleted field of the structure Request corresponding to RequestId + // identifier with the value completed + inline void MPIAccess::setMPICompleted( int RequestId , bool completed ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + aRequestStruct->MPICompleted = completed; + } + + // Set the MPIOutCount field of the structure Request corresponding to RequestId + // identifier with the value outcount + inline void MPIAccess::setMPIOutCount( int RequestId , int outcount ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + aRequestStruct->MPIOutCount = outcount; + } + + // Nullify the MPIStatusfield of the structure Request corresponding to RequestId + // identifier + inline void MPIAccess::clearMPIStatus( int RequestId ) + { + struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ; + if ( aRequestStruct ) + aRequestStruct->MPIStatus = NULL ; + } + + // Returns the _MessageIdent enum value corresponding to the MPI_Datatype datatype + // Look at MPIAccess::NewSendTag/NewRecvTag in MPIAccess.cxx + inline _MessageIdent MPIAccess::methodId( MPI_Datatype adatatype ) const + { + _MessageIdent aMethodIdent ; + if ( adatatype == _MPI_TIME ) + aMethodIdent = _message_time; + else if ( adatatype == MPI_INT ) + aMethodIdent = _message_int ; + else if ( adatatype == MPI_DOUBLE ) + aMethodIdent = _message_double ; + else + aMethodIdent = _message_unknown ; + return aMethodIdent ; + } + + // Returns the MPI_Datatype corresponding to the _MessageIdent enum aMethodIdent + inline MPI_Datatype MPIAccess::datatype( _MessageIdent aMethodIdent ) const + { + MPI_Datatype aDataType ; + switch( aMethodIdent ) + { + case _message_time : + aDataType = _MPI_TIME ; + break ; + case _message_int : + aDataType = MPI_INT ; + break ; + case _message_double : + aDataType = MPI_DOUBLE ; + break ; + default : + aDataType = (MPI_Datatype) -1 ; + break ; + } + return aDataType ; + } + + std::ostream & operator<< (std::ostream &,const _MessageIdent &); + + std::ostream & operator<< (std::ostream &,const TimeMessage &); + +} + +#endif diff --git a/src/ParaMEDMEM/MPIAccessDEC.cxx b/src/ParaMEDMEM/MPIAccessDEC.cxx new file mode 100644 index 000000000..942dc79d8 --- /dev/null +++ b/src/ParaMEDMEM/MPIAccessDEC.cxx @@ -0,0 +1,1054 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "MPIAccessDEC.hxx" + +#include + +using namespace std; + +namespace ParaMEDMEM +{ + + /*! + This constructor creates an MPIAccessDEC which has \a source_group as a working side + and \a target_group as an idle side. + The constructor must be called synchronously on all processors of both processor groups. + + \param source_group working side ProcessorGroup + \param target_group lazy side ProcessorGroup + \param Asynchronous Communication mode (default asynchronous) + \param nStepBefore Number of Time step needed for the interpolation before current time + \param nStepAfter Number of Time step needed for the interpolation after current time + + */ + + MPIAccessDEC::MPIAccessDEC( const ProcessorGroup& source_group, + const ProcessorGroup& target_group, + bool Asynchronous ) + { + + ProcessorGroup * union_group = source_group.fuse(target_group) ; + int i ; + std::set procs; + for ( i = 0 ; i < union_group->size() ; i++ ) + { + procs.insert(i) ; + } + MPIProcessorGroup *mpilg = static_cast(const_cast(&source_group)); + _MPI_union_group = new ParaMEDMEM::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm()); + delete union_group ; + _my_rank = _MPI_union_group->myRank() ; + _group_size = _MPI_union_group->size() ; + _MPI_access = new MPIAccess( _MPI_union_group ) ; + _asynchronous = Asynchronous ; + _time_messages = new vector< vector< TimeMessage > > ; + _time_messages->resize( _group_size ) ; + _out_of_time = new vector< bool > ; + _out_of_time->resize( _group_size ) ; + _data_messages_recv_count = new vector< int > ; + _data_messages_recv_count->resize( _group_size ) ; + for ( i = 0 ; i < _group_size ; i++ ) + { + (*_out_of_time)[i] = false ; + (*_data_messages_recv_count)[i] = 0 ; + } + _data_messages_type = new vector< MPI_Datatype > ; + _data_messages_type->resize( _group_size ) ; + _data_messages = new vector< vector< void * > > ; + _data_messages->resize( _group_size ) ; + _time_interpolator = NULL ; + _map_of_send_buffers = new map< int , SendBuffStruct * > ; + } + + MPIAccessDEC::~MPIAccessDEC() + { + checkFinalSent() ; + checkFinalRecv() ; + delete _MPI_union_group ; + delete _MPI_access ; + if ( _time_interpolator ) + delete _time_interpolator ; + if ( _time_messages ) + delete _time_messages ; + if ( _out_of_time ) + delete _out_of_time ; + if ( _data_messages_recv_count ) + delete _data_messages_recv_count ; + if ( _data_messages_type ) + delete _data_messages_type ; + if ( _data_messages ) + delete _data_messages ; + if ( _map_of_send_buffers ) + delete _map_of_send_buffers ; + } + + void MPIAccessDEC::setTimeInterpolator( TimeInterpolationMethod aTimeInterp , + double InterpPrecision, int nStepBefore, + int nStepAfter ) + { + if ( _time_interpolator ) + delete _time_interpolator ; + switch ( aTimeInterp ) + { + case WithoutTimeInterp : + _time_interpolator = NULL ; + _n_step_before = 0 ; + _n_step_after = 0 ; + break ; + case LinearTimeInterp : + _time_interpolator = new LinearTimeInterpolator( InterpPrecision , nStepBefore , + nStepAfter ) ; + _n_step_before = nStepBefore ; + _n_step_after = nStepAfter ; + int i ; + for ( i = 0 ; i < _group_size ; i++ ) + { + (*_time_messages)[ i ].resize( _n_step_before + _n_step_after ) ; + (*_data_messages)[ i ].resize( _n_step_before + _n_step_after ) ; + int j ; + for ( j = 0 ; j < _n_step_before + _n_step_after ; j++ ) + { + (*_time_messages)[ i ][ j ].time = -1 ; + (*_time_messages)[ i ][ j ].deltatime = -1 ; + (*_data_messages)[ i ][ j ] = NULL ; + } + } + break ; + } + } + + /*! + Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator + (Internal Protected method) + + Returns the request identifier SendRequestId + + */ + int MPIAccessDEC::send( void* sendbuf, int sendcount , int offset , + MPI_Datatype sendtype , int target , int &SendRequestId ) + { + int sts ; + if ( _asynchronous ) + { + if ( sendtype == MPI_INT ) + { + sts = _MPI_access->ISend( &((int *) sendbuf)[offset] , sendcount , sendtype , + target , SendRequestId ) ; + } + else + { + sts = _MPI_access->ISend( &((double *) sendbuf)[offset] , sendcount , sendtype , + target , SendRequestId ) ; + } + } + else + { + if ( sendtype == MPI_INT ) + { + sts = _MPI_access->send( &((int *) sendbuf)[offset] , sendcount , sendtype , + target , SendRequestId ) ; + } + else + { + sts = _MPI_access->send( &((double *) sendbuf)[offset] , sendcount , sendtype , + target , SendRequestId ) ; + } + } + return sts ; + } + + /*! + Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator + (Internal Protected method) + + Returns the request identifier RecvRequestId + + */ + int MPIAccessDEC::recv( void* recvbuf, int recvcount , int offset , + MPI_Datatype recvtype , int target , int &RecvRequestId ) + { + int sts ; + if ( _asynchronous ) + { + if ( recvtype == MPI_INT ) + { + sts = _MPI_access->IRecv( &((int *) recvbuf)[offset] , recvcount , recvtype , + target , RecvRequestId ) ; + } + else + { + sts = _MPI_access->IRecv( &((double *) recvbuf)[offset] , recvcount , recvtype , + target , RecvRequestId ) ; + } + } + else + { + if ( recvtype == MPI_INT ) + { + sts = _MPI_access->recv( &((int *) recvbuf)[offset] , recvcount , recvtype , + target , RecvRequestId ) ; + } + else + { + sts = _MPI_access->recv( &((double *) recvbuf)[offset] , recvcount , recvtype , + target , RecvRequestId ) ; + } + } + return sts ; + } + + /*! + Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator + Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator + (Internal Protected method) + + Returns the request identifier SendRequestId + Returns the request identifier RecvRequestId + + */ + int MPIAccessDEC::sendRecv( void* sendbuf, int sendcount , int sendoffset , + MPI_Datatype sendtype , + void* recvbuf, int recvcount , int recvoffset , + MPI_Datatype recvtype , int target , + int &SendRequestId , int &RecvRequestId ) + { + int sts ; + if ( _asynchronous ) + { + if ( sendtype == MPI_INT ) + { + if ( recvtype == MPI_INT ) + { + sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount , + sendtype , target , SendRequestId , + &((int *) recvbuf)[recvoffset] , recvcount , + recvtype , target , RecvRequestId ) ; + } + else + { + sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount , + sendtype , target , SendRequestId , + &((double *) recvbuf)[recvoffset] , + recvcount , recvtype , target , RecvRequestId ) ; + } + } + else + { + if ( recvtype == MPI_INT ) + { + sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount , + sendtype , target , SendRequestId , + &((int *) recvbuf)[recvoffset] , + recvcount , recvtype , target , RecvRequestId ) ; + } + else + { + sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount , + sendtype , target , SendRequestId , + &((double *) recvbuf)[recvoffset] , + recvcount , recvtype , target , RecvRequestId ) ; + } + } + } + else + { + if ( sendtype == MPI_INT ) + { + if ( recvtype == MPI_INT ) + { + sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount , + sendtype , target , SendRequestId , + &((int *) recvbuf)[recvoffset] , recvcount , + recvtype , target , RecvRequestId ) ; + } + else + { + sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount , + sendtype , target , SendRequestId , + &((double *) recvbuf)[recvoffset] , + recvcount , recvtype , target , RecvRequestId ) ; + } + } + else + { + if ( recvtype == MPI_INT ) + { + sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount , + sendtype , target , SendRequestId , + &((int *) recvbuf)[recvoffset] , + recvcount , recvtype , target , RecvRequestId ) ; + } + else + { + sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount , + sendtype , target , SendRequestId , + &((double *) recvbuf)[recvoffset] , + recvcount , recvtype , target , RecvRequestId ) ; + } + } + } + return sts ; + } + + /*! + Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator + Receive recvcount datas to recvbuf[offset] with type recvtype from all targets of IntraCommunicator + + */ + int MPIAccessDEC::allToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype , + void* recvbuf, int recvcount, MPI_Datatype recvtype ) + { + if ( _time_interpolator ) + { + return allToAllTime( sendbuf, sendcount, sendtype , recvbuf, recvcount, recvtype ) ; + } + int sts ; + int target ; + int sendoffset = 0 ; + int recvoffset = 0 ; + int SendRequestId ; + int RecvRequestId ; + + //Free of SendBuffers + if ( _asynchronous ) + checkSent() ; + + //DoSend + DoRecv : SendRecv + SendBuffStruct * aSendDataStruct = NULL ; + if ( _asynchronous && sendbuf ) + { + aSendDataStruct = new SendBuffStruct ; + aSendDataStruct->SendBuffer = sendbuf ; + aSendDataStruct->Counter = 0 ; + aSendDataStruct->DataType = sendtype ; + } + for ( target = 0 ; target < _group_size ; target++ ) + { + sts = sendRecv( sendbuf , sendcount , sendoffset , sendtype , + recvbuf , recvcount , recvoffset , recvtype , + target , SendRequestId , RecvRequestId ) ; + if ( _asynchronous && sendbuf && sendcount ) + { + aSendDataStruct->Counter += 1 ; + (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ; + } + sendoffset += sendcount ; + recvoffset += recvcount ; + } + if ( !_asynchronous && sendbuf ) + { + if ( sendtype == MPI_INT ) + { + delete [] (int *) sendbuf ; + } + else + { + delete [] (double *) sendbuf ; + } + } + return sts ; + } + + /*! + Send sendcounts[target] datas from sendbuf[sdispls[target]] with type sendtype to all targets of IntraCommunicator + Receive recvcounts[target] datas to recvbuf[rdispls[target]] with type recvtype from all targets of IntraCommunicator + + */ + int MPIAccessDEC::allToAllv( void* sendbuf, int* sendcounts, int* sdispls, + MPI_Datatype sendtype , + void* recvbuf, int* recvcounts, int* rdispls, + MPI_Datatype recvtype ) + { + if ( _time_interpolator ) + { + return allToAllvTime( sendbuf, sendcounts, sdispls, sendtype , + recvbuf, recvcounts, rdispls, recvtype ) ; + } + int sts ; + int target ; + int SendRequestId ; + int RecvRequestId ; + + //Free of SendBuffers + if ( _asynchronous ) + { + checkSent() ; + } + + //DoSend + DoRecv : SendRecv + SendBuffStruct * aSendDataStruct = NULL ; + if ( _asynchronous && sendbuf ) + { + aSendDataStruct = new SendBuffStruct ; + aSendDataStruct->SendBuffer = sendbuf ; + aSendDataStruct->Counter = 0 ; + aSendDataStruct->DataType = sendtype ; + } + for ( target = 0 ; target < _group_size ; target++ ) + { + if ( sendcounts[target] || recvcounts[target] ) + { + sts = sendRecv( sendbuf , sendcounts[target] , sdispls[target] , sendtype , + recvbuf , recvcounts[target] , rdispls[target] , recvtype , + target , SendRequestId , RecvRequestId ) ; + if ( _asynchronous && sendbuf && sendcounts[target]) + { + aSendDataStruct->Counter += 1 ; + (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ; + } + } + } + if ( !_asynchronous && sendbuf ) + { + if ( sendtype == MPI_INT ) + { + delete [] (int *) sendbuf ; + } + else + { + delete [] (double *) sendbuf ; + } + } + return sts ; + } + + /* + MPIAccessDEC and the management of SendBuffers : + ================================================= + + . In the collective communications collectives we send only parts of + the same buffer to each "target". So in asynchronous mode it is + necessary that all parts are free before to delete/free the + buffer. + + . We assume that buffers are allocated with a new double[]. so a + delete [] is done. + + . The structure SendBuffStruct permit to keep the adress of the buffer + and to manage a reference counter of that buffer. It contains + also MPI_Datatype for the delete [] (double *) ... when the counter + is null. + + . The map _MapOfSendBuffers etablish the correspondance between each + RequestId given by a MPI_Access->ISend(...) and a SendBuffStruct + for each "target" of a part of the buffer. + + . All that concerns only asynchronous Send. In synchronous mode, + we delete senbuf just after the Send. + */ + + /* + MPIAccessDEC and the management of RecvBuffers : + ================================================= + + If there is no interpolation, no special action is done. + + With interpolation for each target : + ------------------------------------ + . We have _time_messages[target] which is a vector of TimesMessages. + We have 2 TimesMessages in our case with a linear interpolation. + They contain the previous time(t0)/deltatime and the last + time(t1)/deltatime. + + . We have _data_messages[target] which is a vector of DatasMessages. + We have 2 DatasMessages in our case with a linear interpolation. + They contain the previous datas at time(t0)/deltatime and at last + time(t1)/deltatime. + + . At time _t(t*) of current processus we do the interpolation of + the values of the 2 DatasMessages which are returned in the part of + recvbuf corresponding to the target with t0 < t* <= t1. + + . Because of the difference of "deltatimes" between processes, we + may have t0 < t1 < t* and there is an extrapolation. + + . The vectors _out_of_time, _DataMessagesRecvCount and _DataMessagesType + contain for each target true if t* > last t1, recvcount and + MPI_Datatype for the finalize of messages at the end. + */ + + /*! + Send a TimeMessage to all targets of IntraCommunicator + Receive the TimeMessages from targets of IntraCommunicator if necessary. + + Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator + Returns recvcount datas to recvbuf[offset] with type recvtype after an interpolation + with datas received from all targets of IntraCommunicator. + + */ + int MPIAccessDEC::allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype , + void* recvbuf, int recvcount , MPI_Datatype recvtype ) + { + int sts ; + int target ; + int sendoffset = 0 ; + int SendTimeRequestId ; + int SendDataRequestId ; + + if ( _time_interpolator == NULL ) + { + return MPI_ERR_OTHER ; + } + + //Free of SendBuffers + if ( _asynchronous ) + { + checkSent() ; + } + + //DoSend : Time + SendBuff + SendBuffStruct * aSendTimeStruct = NULL ; + SendBuffStruct * aSendDataStruct = NULL ; + if ( sendbuf && sendcount ) + { + TimeMessage * aSendTimeMessage = new TimeMessage ; + if ( _asynchronous ) + { + aSendTimeStruct = new SendBuffStruct ; + aSendTimeStruct->SendBuffer = aSendTimeMessage ; + aSendTimeStruct->Counter = 0 ; + aSendTimeStruct->DataType = _MPI_access->timeType() ; + aSendDataStruct = new SendBuffStruct ; + aSendDataStruct->SendBuffer = sendbuf ; + aSendDataStruct->Counter = 0 ; + aSendDataStruct->DataType = sendtype ; + } + aSendTimeMessage->time = _t ; + aSendTimeMessage->deltatime = _dt ; + for ( target = 0 ; target < _group_size ; target++ ) + { + sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target , + SendTimeRequestId ) ; + sts = send( sendbuf , sendcount , sendoffset , sendtype , target , SendDataRequestId ) ; + if ( _asynchronous ) + { + aSendTimeStruct->Counter += 1 ; + (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ; + aSendDataStruct->Counter += 1 ; + (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ; + } + sendoffset += sendcount ; + } + if ( !_asynchronous ) + { + delete aSendTimeMessage ; + if ( sendtype == MPI_INT ) + { + delete [] (int *) sendbuf ; + } + else + { + delete [] (double *) sendbuf ; + } + } + } + + //CheckTime + DoRecv + DoInterp + if ( recvbuf && recvcount ) + { + for ( target = 0 ; target < _group_size ; target++ ) + { + int recvsize = recvcount*_MPI_access->extent( recvtype ) ; + checkTime( recvcount , recvtype , target , false ) ; + //=========================================================================== + //TODO : it is assumed actually that we have only 1 timestep before nad after + //=========================================================================== + if ( _time_interpolator && (*_time_messages)[target][0].time != -1 ) + { + if ( (*_out_of_time)[target] ) + { + cout << " =====================================================" << endl + << "Recv" << _my_rank << " <-- target " << target << " t0 " + << (*_time_messages)[target][0].time << " < t1 " + << (*_time_messages)[target][1].time << " < t* " << _t << endl + << " =====================================================" << endl ; + } + if ( recvtype == MPI_INT ) + { + _time_interpolator->doInterp( (*_time_messages)[target][0].time, + (*_time_messages)[target][1].time, _t, recvcount , + _n_step_before, _n_step_after, + (int **) &(*_data_messages)[target][0], + (int **) &(*_data_messages)[target][1], + &((int *)recvbuf)[target*recvcount] ) ; + } + else + { + _time_interpolator->doInterp( (*_time_messages)[target][0].time, + (*_time_messages)[target][1].time, _t, recvcount , + _n_step_before, _n_step_after, + (double **) &(*_data_messages)[target][0], + (double **) &(*_data_messages)[target][1], + &((double *)recvbuf)[target*recvcount] ) ; + } + } + else + { + char * buffdest = (char *) recvbuf ; + char * buffsrc = (char *) (*_data_messages)[target][1] ; + memcpy( &buffdest[target*recvsize] , buffsrc , recvsize ) ; + } + } + } + + return sts ; + } + + int MPIAccessDEC::allToAllvTime( void* sendbuf, int* sendcounts, int* sdispls, + MPI_Datatype sendtype , + void* recvbuf, int* recvcounts, int* rdispls, + MPI_Datatype recvtype ) + { + int sts ; + int target ; + int SendTimeRequestId ; + int SendDataRequestId ; + + if ( _time_interpolator == NULL ) + { + return MPI_ERR_OTHER ; + } + + //Free of SendBuffers + if ( _asynchronous ) + { + checkSent() ; + } + + /* + . DoSend : + + We create a TimeMessage (look at that structure in MPI_Access). + + If we are in asynchronous mode, we create two structures SendBuffStruct + aSendTimeStruct and aSendDataStruct that we fill. + + We fill the structure aSendTimeMessage with time/deltatime of + the current process. "deltatime" must be nul if it is the last step of + Time. + + After that for each "target", we Send the TimeMessage and the part + of sendbuf corresponding to that target. + + If we are in asynchronous mode, we increment the counter and we add + aSendTimeStruct and aSendDataStruct to _MapOfSendBuffers with the + identifiers SendTimeRequestId and SendDataRequestId returned by + MPI_Access->Send(...). + + And if we are in synchronous mode we delete the SendMessages. + */ + //DoSend : Time + SendBuff + SendBuffStruct * aSendTimeStruct = NULL ; + SendBuffStruct * aSendDataStruct = NULL ; + if ( sendbuf ) + { + TimeMessage * aSendTimeMessage = new TimeMessage ; + if ( _asynchronous ) + { + aSendTimeStruct = new SendBuffStruct ; + aSendTimeStruct->SendBuffer = aSendTimeMessage ; + aSendTimeStruct->Counter = 0 ; + aSendTimeStruct->DataType = _MPI_access->timeType() ; + aSendDataStruct = new SendBuffStruct ; + aSendDataStruct->SendBuffer = sendbuf ; + aSendDataStruct->Counter = 0 ; + aSendDataStruct->DataType = sendtype ; + } + aSendTimeMessage->time = _t ; + aSendTimeMessage->deltatime = _dt ; + for ( target = 0 ; target < _group_size ; target++ ) + { + if ( sendcounts[target] ) + { + sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target , + SendTimeRequestId ) ; + sts = send( sendbuf , sendcounts[target] , sdispls[target] , sendtype , target , + SendDataRequestId ) ; + if ( _asynchronous ) + { + aSendTimeStruct->Counter += 1 ; + (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ; + aSendDataStruct->Counter += 1 ; + (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ; + } + } + } + if ( !_asynchronous ) + { + delete aSendTimeMessage ; + if ( sendtype == MPI_INT ) + { + delete [] (int *) sendbuf ; + } + else + { + delete [] (double *) sendbuf ; + } + } + } + + /* + . CheckTime + DoRecv + DoInterp + + For each target we call CheckTime + + If there is a TimeInterpolator and if the TimeMessage of the target + is not the first, we call the interpolator which return its + results in the part of the recv buffer corresponding to the "target". + + If not, there is a copy of received datas for that first step of time + in the part of the recv buffer corresponding to the "target". + */ + //CheckTime + DoRecv + DoInterp + if ( recvbuf ) + { + for ( target = 0 ; target < _group_size ; target++ ) + { + if ( recvcounts[target] ) + { + int recvsize = recvcounts[target]*_MPI_access->extent( recvtype ) ; + checkTime( recvcounts[target] , recvtype , target , false ) ; + //=========================================================================== + //TODO : it is assumed actually that we have only 1 timestep before nad after + //=========================================================================== + if ( _time_interpolator && (*_time_messages)[target][0].time != -1 ) + { + if ( (*_out_of_time)[target] ) + { + cout << " =====================================================" << endl + << "Recv" << _my_rank << " <-- target " << target << " t0 " + << (*_time_messages)[target][0].time << " < t1 " + << (*_time_messages)[target][1].time << " < t* " << _t << endl + << " =====================================================" << endl ; + } + if ( recvtype == MPI_INT ) + { + _time_interpolator->doInterp( (*_time_messages)[target][0].time, + (*_time_messages)[target][1].time, _t, + recvcounts[target] , _n_step_before, _n_step_after, + (int **) &(*_data_messages)[target][0], + (int **) &(*_data_messages)[target][1], + &((int *)recvbuf)[rdispls[target]] ) ; + } + else + { + _time_interpolator->doInterp( (*_time_messages)[target][0].time, + (*_time_messages)[target][1].time, _t, + recvcounts[target] , _n_step_before, _n_step_after, + (double **) &(*_data_messages)[target][0], + (double **) &(*_data_messages)[target][1], + &((double *)recvbuf)[rdispls[target]] ) ; + } + } + else + { + char * buffdest = (char *) recvbuf ; + char * buffsrc = (char *) (*_data_messages)[target][1] ; + memcpy( &buffdest[rdispls[target]*_MPI_access->extent( recvtype )] , buffsrc , + recvsize ) ; + } + } + } + } + + return sts ; + } + + /* + . CheckTime(recvcount , recvtype , target , UntilEnd) + + At the beginning, we read the first TimeMessage in + &(*_TimeMessages)[target][1] and the first DataMessage + in the allocated buffer (*_DataMessages)[target][1]. + + deltatime of TimesMessages must be nul if it is the last one. + + While : _t(t*) is the current time of the processus. + "while _t(t*) is greater than the time of the "target" + (*_TimeMessages)[target][1].time and + (*_TimeMessages)[target][1].deltatime is not nul", + So at the end of the while we have : + _t(t*) <= (*_TimeMessages)[target][1].time with + _t(t*) > (*_TimeMessages)[target][0].time + or we have the last TimeMessage of the "target". + + If it is the finalization of the recv of TimeMessages and + DataMessages (UntilEnd value is true), we execute the while + until (*_TimeMessages)[target][1].deltatime is nul. + + In the while : + We copy the last TimeMessage in the previoud TimeMessage and + we read a new TimeMessage + We delete the previous DataMessage. + We copy the last DataMessage pointer in the previous one. + We allocate a new last DataMessage buffer + (*_DataMessages)[target][1] and we read the corresponding + datas in that buffe. + + If the current time of the current process is greater than the + last time (*_TimeMessages)[target][1].time du target, we give + a true value to (*_OutOfTime)[target]. + (*_TimeMessages)[target][1].deltatime is nul. + */ + int MPIAccessDEC::checkTime( int recvcount , MPI_Datatype recvtype , int target , + bool UntilEnd ) + { + int sts = MPI_SUCCESS ; + int RecvTimeRequestId ; + int RecvDataRequestId ; + //Pour l'instant on cherche _time_messages[target][0] < _t <= _time_messages[target][1] + //=========================================================================== + //TODO : it is assumed actually that we have only 1 timestep before and after + // instead of _n_step_before and _n_step_after ... + //=========================================================================== + (*_data_messages_recv_count)[target] = recvcount ; + (*_data_messages_type)[target] = recvtype ; + if ( (*_time_messages)[target][1].time == -1 ) + { + (*_time_messages)[target][0] = (*_time_messages)[target][1] ; + sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() , + target , RecvTimeRequestId ) ; + (*_data_messages)[target][0] = (*_data_messages)[target][1] ; + if ( recvtype == MPI_INT ) + { + (*_data_messages)[target][1] = new int[recvcount] ; + } + else + { + (*_data_messages)[target][1] = new double[recvcount] ; + } + sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target , + RecvDataRequestId ) ; + } + else + { + while ( ( _t > (*_time_messages)[target][1].time || UntilEnd ) && + (*_time_messages)[target][1].deltatime != 0 ) + { + (*_time_messages)[target][0] = (*_time_messages)[target][1] ; + sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() , + target , RecvTimeRequestId ) ; + if ( UntilEnd ) + { + cout << "CheckTime" << _my_rank << " TimeMessage target " << target + << " RecvTimeRequestId " << RecvTimeRequestId << " MPITag " + << _MPI_access->recvMPITag(target) << endl ; + } + if ( recvtype == MPI_INT ) + { + delete [] (int *) (*_data_messages)[target][0] ; + } + else + { + delete [] (double *) (*_data_messages)[target][0] ; + } + (*_data_messages)[target][0] = (*_data_messages)[target][1] ; + if ( recvtype == MPI_INT ) + { + (*_data_messages)[target][1] = new int[recvcount] ; + } + else + { + (*_data_messages)[target][1] = new double[recvcount] ; + } + sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target , + RecvDataRequestId ) ; + if ( UntilEnd ) + { + cout << "CheckTime" << _my_rank << " DataMessage target " << target + << " RecvDataRequestId " << RecvDataRequestId << " MPITag " + << _MPI_access->recvMPITag(target) << endl ; + } + } + + if ( _t > (*_time_messages)[target][0].time && + _t <= (*_time_messages)[target][1].time ) + { + } + else + { + (*_out_of_time)[target] = true ; + } + } + return sts ; + } + + /* + . CheckSent() : + + call SendRequestIds of MPI_Access in order to get all + RequestIds of SendMessages of all "targets". + + For each RequestId, CheckSent call "Test" of MPI_Access in order + to know if the buffer is "free" (flag = true). If it is the + FinalCheckSent (WithWait = true), we call Wait instead of Test. + + If the buffer is "free", the counter of the structure SendBuffStruct + (from _MapOfSendBuffers) is decremented. + + If that counter is nul we delete the TimeMessage or the + SendBuffer according to the DataType. + + And we delete the structure SendBuffStruct before the suppression + (erase) of that item of _MapOfSendBuffers + */ + int MPIAccessDEC::checkSent(bool WithWait) + { + int sts = MPI_SUCCESS ; + int flag = WithWait ; + int size = _MPI_access->sendRequestIdsSize() ; + int * ArrayOfSendRequests = new int[ size ] ; + int nSendRequest = _MPI_access->sendRequestIds( size , ArrayOfSendRequests ) ; + bool SendTrace = false ; + int i ; + for ( i = 0 ; i < nSendRequest ; i++ ) + { + if ( WithWait ) + { + cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest + << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget " + << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag " + << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ; + sts = _MPI_access->wait( ArrayOfSendRequests[i] ) ; + } + else + { + sts = _MPI_access->test( ArrayOfSendRequests[i] , flag ) ; + } + if ( flag ) + { + _MPI_access->deleteRequest( ArrayOfSendRequests[i] ) ; + if ( SendTrace ) + { + cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest + << " SendRequestId " << ArrayOfSendRequests[i] + << " flag " << flag + << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter + << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType + << endl ; + } + (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter -= 1 ; + if ( SendTrace ) + { + if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType == + _MPI_access->timeType() ) + { + cout << "CheckTimeSent" << _my_rank << " Request " ; + } + else + { + cout << "CheckDataSent" << _my_rank << " Request " ; + } + cout << ArrayOfSendRequests[i] + << " _map_of_send_buffers->SendBuffer " + << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer + << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter + << endl ; + } + if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter == 0 ) + { + if ( SendTrace ) + { + cout << "CheckSent" << _my_rank << " SendRequestId " << ArrayOfSendRequests[i] + << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter + << " flag " << flag << " SendBuffer " + << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer + << " deleted. Erase in _map_of_send_buffers :" << endl ; + } + if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType == + _MPI_access->timeType() ) + { + delete (TimeMessage * ) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ; + } + else + { + if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType == MPI_INT ) + { + delete [] (int *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ; + } + else + { + delete [] (double *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ; + } + } + delete (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ] ; + } + if ( SendTrace ) + { + cout << "CheckSent" << _my_rank << " Erase in _map_of_send_buffers SendRequestId " + << ArrayOfSendRequests[i] << endl ; + } + (*_map_of_send_buffers).erase( ArrayOfSendRequests[i] ) ; + } + else if ( SendTrace ) + { + cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest + << " SendRequestId " << ArrayOfSendRequests[i] + << " flag " << flag + << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter + << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType + << endl ; + } + } + if ( SendTrace ) + { + _MPI_access->check() ; + } + delete [] ArrayOfSendRequests ; + return sts ; + } + + int MPIAccessDEC::checkFinalRecv() + { + int sts = MPI_SUCCESS ; + if ( _time_interpolator ) + { + int target ; + for ( target = 0 ; target < _group_size ; target++ ) + { + if ( (*_data_messages)[target][0] != NULL ) + { + sts = checkTime( (*_data_messages_recv_count)[target] , (*_data_messages_type)[target] , + target , true ) ; + if ( (*_data_messages_type)[target] == MPI_INT ) + { + delete [] (int *) (*_data_messages)[target][0] ; + } + else + { + delete [] (double *) (*_data_messages)[target][0] ; + } + (*_data_messages)[target][0] = NULL ; + if ( (*_data_messages)[target][1] != NULL ) + { + if ( (*_data_messages_type)[target] == MPI_INT ) + { + delete [] (int *) (*_data_messages)[target][1] ; + } + else + { + delete [] (double *) (*_data_messages)[target][1] ; + } + (*_data_messages)[target][1] = NULL ; + } + } + } + } + return sts ; + } + + ostream & operator<< (ostream & f ,const TimeInterpolationMethod & interpolationmethod ) + { + switch (interpolationmethod) + { + case WithoutTimeInterp : + f << " WithoutTimeInterpolation "; + break; + case LinearTimeInterp : + f << " LinearTimeInterpolation "; + break; + default : + f << " UnknownTimeInterpolation "; + break; + } + + return f; + } +} diff --git a/src/ParaMEDMEM/MPIAccessDEC.hxx b/src/ParaMEDMEM/MPIAccessDEC.hxx new file mode 100644 index 000000000..e381ff61a --- /dev/null +++ b/src/ParaMEDMEM/MPIAccessDEC.hxx @@ -0,0 +1,179 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __MPIACCESSDEC_HXX__ +#define __MPIACCESSDEC_HXX__ + +#include "MPIAccess.hxx" +#include "DEC.hxx" +#include "LinearTimeInterpolator.hxx" + +#include +#include + +namespace ParaMEDMEM +{ + class MPIAccessDEC + { + public: + MPIAccessDEC( const ProcessorGroup& local_group, const ProcessorGroup& distant_group, + bool Asynchronous = true ); + virtual ~MPIAccessDEC(); + MPIAccess * getMPIAccess() { return _MPI_access; } + const MPI_Comm* getComm() { return _MPI_union_group->getComm(); } + void asynchronous( bool Asynchronous = true ) { _asynchronous = Asynchronous; } + void setTimeInterpolator( TimeInterpolationMethod anInterp , double InterpPrecision=0 , + int n_step_before=1, int nStepAfter=1 ); + + void setTime( double t ) { _t = t; _dt = -1; } + void setTime( double t , double dt ) { _t = t; _dt = dt; } + bool outOfTime( int target ) { return (*_out_of_time)[target]; } + + int send( void* sendbuf, int sendcount , MPI_Datatype sendtype , int target ); + int recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , int target ); + int recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , int target , + int &RecvRequestId , bool Asynchronous=false ); + int sendRecv( void* sendbuf, int sendcount , MPI_Datatype sendtype , + void* recvbuf, int recvcount , MPI_Datatype recvtype , int target ); + + int allToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype , + void* recvbuf, int recvcount, MPI_Datatype recvtype ); + int allToAllv( void* sendbuf, int* sendcounts, int* sdispls, MPI_Datatype sendtype , + void* recvbuf, int* recvcounts, int* rdispls, MPI_Datatype recvtype ); + + int allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype , + void* recvbuf, int recvcount , MPI_Datatype recvtype ); + int allToAllvTime( void* sendbuf, int* sendcounts, int* sdispls, + MPI_Datatype sendtype , + void* recvbuf, int* recvcounts, int* rdispls, + MPI_Datatype recvtype ); + int checkTime( int recvcount , MPI_Datatype recvtype , int target , bool UntilEnd ); + int checkSent(bool WithWait=false); + int checkFinalSent() { return checkSent( true ); } + int checkFinalRecv(); + protected: + int send( void* sendbuf, int sendcount , int sendoffset , MPI_Datatype sendtype , + int target, int &SendRequestId ); + int recv( void* recvbuf, int recvcount , int recvoffset , MPI_Datatype recvtype , + int target, int &RecvRequestId ); + int sendRecv( void* sendbuf, int sendcount , int sendoffset , + MPI_Datatype sendtype , + void* recvbuf, int recvcount , int recvoffset , + MPI_Datatype recvtype , int target , + int &SendRequestId ,int &RecvRequestId ); + private : + bool _asynchronous; + MPIProcessorGroup* _MPI_union_group; + + TimeInterpolator* _time_interpolator; + int _n_step_before; + int _n_step_after; + + int _my_rank; + int _group_size; + MPIAccess* _MPI_access; + + // Current time and deltatime of current process + double _t; + double _dt; + + // TimeMessages from each target _TimeMessages[target][Step] : TimeMessage + std::vector< std::vector< TimeMessage > > *_time_messages; + // Corresponding DataMessages from each target _DataMessages[target][~TimeStep] + std::vector< bool >* _out_of_time; + std::vector< int >* _data_messages_recv_count; + std::vector< MPI_Datatype >* _data_messages_type; + std::vector< std::vector< void * > >* _data_messages; + + typedef struct + { + void * SendBuffer; + int Counter; + MPI_Datatype DataType; } + SendBuffStruct; + std::map< int , SendBuffStruct * > *_map_of_send_buffers; + }; + + inline int MPIAccessDEC::send( void* sendbuf, int sendcount , MPI_Datatype sendtype , int target ) + { + int SendRequestId; + int sts; + if ( _asynchronous ) + { + sts = _MPI_access->ISend( sendbuf , sendcount , sendtype , target , + SendRequestId ); + } + else + { + sts = _MPI_access->send( sendbuf , sendcount , sendtype , target , + SendRequestId ); + if ( sts == MPI_SUCCESS ) + free( sendbuf ); + } + return sts; + } + + inline int MPIAccessDEC::recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , int target ) + { + int RecvRequestId; + int sts; + if ( _asynchronous ) + sts = _MPI_access->IRecv( recvbuf , recvcount , recvtype , target , RecvRequestId ); + else + sts = _MPI_access->recv( recvbuf , recvcount , recvtype , target , RecvRequestId ); + return sts; + } + + inline int MPIAccessDEC::recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , + int target , int &RecvRequestId , bool Asynchronous ) + { + int sts; + if ( Asynchronous ) + sts = _MPI_access->IRecv( recvbuf , recvcount , recvtype , target , + RecvRequestId ); + else + sts = _MPI_access->recv( recvbuf , recvcount , recvtype , target , + RecvRequestId ); + return sts; + } + + inline int MPIAccessDEC::sendRecv( void* sendbuf, int sendcount , MPI_Datatype sendtype , + void* recvbuf, int recvcount , MPI_Datatype recvtype , + int target ) + { + int SendRequestId; + int RecvRequestId; + int sts; + if ( _asynchronous ) + sts = _MPI_access->ISendRecv( sendbuf , sendcount , sendtype , target , + SendRequestId , + recvbuf , recvcount , recvtype , target , + RecvRequestId ); + else + sts = _MPI_access->sendRecv( sendbuf , sendcount , sendtype , target , + SendRequestId , + recvbuf , recvcount , recvtype , target , + RecvRequestId ); + return sts; + } + + std::ostream & operator<< (std::ostream &,const TimeInterpolationMethod &); +} + +#endif diff --git a/src/ParaMEDMEM/MPIProcessorGroup.cxx b/src/ParaMEDMEM/MPIProcessorGroup.cxx new file mode 100644 index 000000000..055cc5122 --- /dev/null +++ b/src/ParaMEDMEM/MPIProcessorGroup.cxx @@ -0,0 +1,266 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "CommInterface.hxx" +#include "InterpolationUtils.hxx" + +#include +#include +#include +#include "mpi.h" + +using namespace std; + +/*! \defgroup processor_group Processor Groups + * + * \section processor_group_overview Overview + * The MPIProcessorGroup class is used to set up processor groups that help to define + * the MPI topology of the couplings. They can be set up in various ways, the most common being + * the use of the \c MPIProcessorGroup(Comminterface, int pfirst, int plast) + * constructor. + * + * The following code excerpt creates two processor groups on respectively 3 and 2 processors. + \verbatim + int main() + { + MPI_Init(&argc,&argv); + CommInterface comm_interface; + MPIProcessorGroup codeA_group(comm_interface, 0, 2); + MPIProcessorGroup codeB_group(comm_interface, 3, 4); + + ... + } + \endverbatim +*/ + + +namespace ParaMEDMEM +{ + /*! + \addtogroup processor_group + @{ + */ + + /*! + * Creates a processor group that is based on all the + MPI_COMM_WORLD processor.This routine must be called by all processors in MPI_COMM_WORLD. + \param interface CommInterface object giving access to the MPI + communication layer + */ + MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface): + ProcessorGroup(interface),_world_comm(MPI_COMM_WORLD) + { + _comm=_world_comm; + _comm_interface.commGroup(_world_comm, &_group); + int size; + _comm_interface.commSize(_world_comm,&size); + for (int i=0; i proc_ids, const MPI_Comm& world_comm): + ProcessorGroup(interface, proc_ids),_world_comm(world_comm) + { + updateMPISpecificAttributes(); + } + + + void MPIProcessorGroup::updateMPISpecificAttributes() + { + //Creation of a communicator + MPI_Group group_world; + + int size_world; + _comm_interface.commSize(_world_comm,&size_world); + int rank_world; + _comm_interface.commRank(_world_comm,&rank_world); + _comm_interface.commGroup(_world_comm, &group_world); + + int* ranks=new int[_proc_ids.size()]; + + // copying proc_ids in ranks + copy::const_iterator,int*> (_proc_ids.begin(), _proc_ids.end(), ranks); + for (int i=0; i< (int)_proc_ids.size();i++) + if (ranks[i]>size_world-1) + { + delete[] ranks; + _comm_interface.groupFree(&group_world); // MPI_Group is a C structure and won't get de-allocated automatically? + throw INTERP_KERNEL::Exception("invalid rank in set argument of MPIProcessorGroup constructor"); + } + + _comm_interface.groupIncl(group_world, _proc_ids.size(), ranks, &_group); + + _comm_interface.commCreate(_world_comm, _group, &_comm); + + // clean-up + delete[] ranks; + _comm_interface.groupFree(&group_world); // MPI_Group is a C structure and won't get de-allocated automatically? + } + + /*! Creates a processor group that is based on the processors between \a pstart and \a pend. + This routine must be called by all processors in MPI_COMM_WORLD. + + \param comm_interface CommInterface object giving access to the MPI + communication layer + \param pstart id in MPI_COMM_WORLD of the first processor in the group + \param pend id in MPI_COMM_WORLD of the last processor in the group + */ + MPIProcessorGroup::MPIProcessorGroup (const CommInterface& comm_interface, int pstart, int pend, const MPI_Comm& world_comm): ProcessorGroup(comm_interface,pstart,pend),_world_comm(world_comm) + { + //Creation of a communicator + MPI_Group group_world; + + int size_world; + _comm_interface.commSize(_world_comm,&size_world); + int rank_world; + _comm_interface.commRank(_world_comm,&rank_world); + _comm_interface.commGroup(_world_comm, &group_world); + + if (pend>size_world-1 || pend proc_ids) : + ProcessorGroup(proc_group.getCommInterface()),_world_comm(MPI_COMM_WORLD) + { + cout << "MPIProcessorGroup (const ProcessorGroup& proc_group, set proc_ids)" <(group); + int local_rank; + MPI_Group_translate_ranks(targetgroup->_group, 1, &rank, _group, &local_rank); + return local_rank; + } + + /*!Creates a processor group that is the complement of the current group + inside MPI_COMM_WORLD + \return pointer to the new ProcessorGroup structure. + */ + ProcessorGroup* MPIProcessorGroup::createComplementProcGroup() const + { + set procs; + int world_size=_comm_interface.worldSize(); + for (int i=0; i::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++) + procs.erase(*iter); + + return new MPIProcessorGroup(_comm_interface, procs, _world_comm); + + } + + ProcessorGroup *MPIProcessorGroup::deepCpy() const + { + return new MPIProcessorGroup(*this); + } + + /*!Adding processors of group \a group to local group. + \param group group that is to be fused with current group + \return new group formed by the fusion of local group and \a group. + */ + ProcessorGroup* MPIProcessorGroup::fuse (const ProcessorGroup& group) const + { + set procs = _proc_ids; + const set& distant_proc_ids = group.getProcIDs(); + for (set::const_iterator iter=distant_proc_ids.begin(); iter!=distant_proc_ids.end(); iter++) + { + procs.insert(*iter); + } + return new MPIProcessorGroup(_comm_interface, procs, _world_comm); + } + + int MPIProcessorGroup::myRank() const + { + int rank; + MPI_Comm_rank(_comm,&rank); + return rank; + } + + /*! + @} + */ + ProcessorGroup* MPIProcessorGroup::createProcGroup() const + { + set procs; + for (set::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++) + procs.insert(*iter); + + return new MPIProcessorGroup(_comm_interface, procs, _world_comm); + + } +} diff --git a/src/ParaMEDMEM/MPIProcessorGroup.hxx b/src/ParaMEDMEM/MPIProcessorGroup.hxx new file mode 100644 index 000000000..d4f25eed4 --- /dev/null +++ b/src/ParaMEDMEM/MPIProcessorGroup.hxx @@ -0,0 +1,60 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __MPIPROCESSORGROUP_HXX__ +#define __MPIPROCESSORGROUP_HXX__ + +#include "ProcessorGroup.hxx" + +#include +#include + +namespace ParaMEDMEM +{ + class CommInterface; + + class MPIProcessorGroup : public ProcessorGroup + { + public: + MPIProcessorGroup(const CommInterface& interface); + MPIProcessorGroup(const CommInterface& interface, std::set proc_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD); + MPIProcessorGroup (const ProcessorGroup& proc_group, std::set proc_ids); + MPIProcessorGroup(const CommInterface& interface,int pstart, int pend, const MPI_Comm& world_comm=MPI_COMM_WORLD); + MPIProcessorGroup(const MPIProcessorGroup& other); + virtual ~MPIProcessorGroup(); + virtual ProcessorGroup *deepCpy() const; + virtual ProcessorGroup* fuse (const ProcessorGroup&) const; + void intersect (ProcessorGroup&) { } + int myRank() const; + bool containsMyRank() const { int rank; MPI_Group_rank(_group, &rank); return (rank!=MPI_UNDEFINED); } + int translateRank(const ProcessorGroup* group, int rank) const; + const MPI_Comm* getComm() const { return &_comm; } + ProcessorGroup* createComplementProcGroup() const; + ProcessorGroup* createProcGroup() const; + MPI_Comm getWorldComm() { return _world_comm; } + private: + void updateMPISpecificAttributes(); + private: + const MPI_Comm _world_comm; + MPI_Group _group; + MPI_Comm _comm; + }; +} + +#endif diff --git a/src/ParaMEDMEM/MxN_Mapping.cxx b/src/ParaMEDMEM/MxN_Mapping.cxx new file mode 100644 index 000000000..05ca0990e --- /dev/null +++ b/src/ParaMEDMEM/MxN_Mapping.cxx @@ -0,0 +1,317 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "MPIAccessDEC.hxx" +#include "MxN_Mapping.hxx" + +using namespace std; + +namespace ParaMEDMEM +{ + MxN_Mapping::MxN_Mapping() + { + } + + + MxN_Mapping::MxN_Mapping(const ProcessorGroup& source_group, const ProcessorGroup& target_group,const DECOptions& dec_options) + : DECOptions(dec_options),_union_group(source_group.fuse(target_group)) + { + _access_DEC = new MPIAccessDEC(source_group,target_group,getAsynchronous()); + _access_DEC->setTimeInterpolator(getTimeInterpolationMethod()); + _send_proc_offsets.resize(_union_group->size()+1,0); + _recv_proc_offsets.resize(_union_group->size()+1,0); + + } + + MxN_Mapping::~MxN_Mapping() + { + delete _union_group; + delete _access_DEC; + } + + + /*! + Method registering a new element for correspondence with a distant element + \param distant_proc proc rank of the distant processor (in terms of the union group) + \param distant_element id of the element on the distant processor + */ + void MxN_Mapping::addElementFromSource(int distant_proc, int distant_element) + { + _sending_ids.push_back(make_pair(distant_proc,distant_element)); + for (int i=distant_proc; i<_union_group->size(); i++) + _send_proc_offsets[i+1]++; + } + + void MxN_Mapping::initialize() + { + _sending_ids.clear(); + std::fill(_send_proc_offsets.begin(),_send_proc_offsets.end(),0); + } + + void MxN_Mapping::prepareSendRecv() + { + CommInterface comm_interface=_union_group->getCommInterface(); + // sending count pattern + int* nbsend=new int[_union_group->size()]; + int* nbrecv=new int[_union_group->size()]; + for (int i=0; i<_union_group->size(); i++) + { + nbsend[i]=_send_proc_offsets[i+1]-_send_proc_offsets[i]; + } + + MPIProcessorGroup* group = static_cast(_union_group); + const MPI_Comm* comm=group->getComm(); + comm_interface.allToAll(nbsend, 1, MPI_INT, + nbrecv, 1, MPI_INT, + *comm); + + for (int i=0; i<_union_group->size(); i++) + { + for (int j=i+1;j<_union_group->size()+1; j++) + _recv_proc_offsets[j]+=nbrecv[i]; + + } + + delete[] nbsend; + delete[] nbrecv; + + _recv_ids.resize(_recv_proc_offsets[_union_group->size()]); + int* isendbuf=0; + int* irecvbuf=0; + if (_sending_ids.size()>0) + isendbuf = new int[_sending_ids.size()]; + if (_recv_ids.size()>0) + irecvbuf = new int[_recv_ids.size()]; + int* sendcounts = new int[_union_group->size()]; + int* senddispls=new int[_union_group->size()]; + int* recvcounts=new int[_union_group->size()]; + int* recvdispls=new int[_union_group->size()]; + for (int i=0; i< _union_group->size(); i++) + { + sendcounts[i]=_send_proc_offsets[i+1]-_send_proc_offsets[i]; + senddispls[i]=_send_proc_offsets[i]; + recvcounts[i]=_recv_proc_offsets[i+1]-_recv_proc_offsets[i]; + recvdispls[i]=_recv_proc_offsets[i]; + } + vector offsets = _send_proc_offsets; + for (int i=0; i<(int)_sending_ids.size();i++) + { + int iproc = _sending_ids[i].first; + isendbuf[offsets[iproc]]=_sending_ids[i].second; + offsets[iproc]++; + } + comm_interface.allToAllV(isendbuf, sendcounts, senddispls, MPI_INT, + irecvbuf, recvcounts, recvdispls, MPI_INT, + *comm); + + for (int i=0; i< _recv_proc_offsets[_union_group->size()]; i++) + _recv_ids[i]=irecvbuf[i]; + + if (_sending_ids.size()>0) + delete[] isendbuf; + if (_recv_ids.size()>0) + delete[] irecvbuf; + delete[] sendcounts; + delete[]recvcounts; + delete[]senddispls; + delete[] recvdispls; + } + + /*! Exchanging field data between two groups of processes + * + * \param field MEDCoupling field containing the values to be sent + * + * The ids that were defined by addElementFromSource method + * are sent. + */ + void MxN_Mapping::sendRecv(double* sendfield, MEDCouplingFieldDouble& field) const + { + CommInterface comm_interface=_union_group->getCommInterface(); + const MPIProcessorGroup* group = static_cast(_union_group); + + int nbcomp=field.getArray()->getNumberOfComponents(); + double* sendbuf=0; + double* recvbuf=0; + if (_sending_ids.size() >0) + sendbuf = new double[_sending_ids.size()*nbcomp]; + if (_recv_ids.size()>0) + recvbuf = new double[_recv_ids.size()*nbcomp]; + + int* sendcounts = new int[_union_group->size()]; + int* senddispls=new int[_union_group->size()]; + int* recvcounts=new int[_union_group->size()]; + int* recvdispls=new int[_union_group->size()]; + + for (int i=0; i< _union_group->size(); i++) + { + sendcounts[i]=nbcomp*(_send_proc_offsets[i+1]-_send_proc_offsets[i]); + senddispls[i]=nbcomp*(_send_proc_offsets[i]); + recvcounts[i]=nbcomp*(_recv_proc_offsets[i+1]-_recv_proc_offsets[i]); + recvdispls[i]=nbcomp*(_recv_proc_offsets[i]); + } + //building the buffer of the elements to be sent + vector offsets = _send_proc_offsets; + + for (int i=0; i<(int)_sending_ids.size();i++) + { + int iproc = _sending_ids[i].first; + for (int icomp=0; icompgetComm(); + comm_interface.allToAllV(sendbuf, sendcounts, senddispls, MPI_DOUBLE, + recvbuf, recvcounts, recvdispls, MPI_DOUBLE, + *comm); + } + break; + case PointToPoint: + _access_DEC->allToAllv(sendbuf, sendcounts, senddispls, MPI_DOUBLE, + recvbuf, recvcounts, recvdispls, MPI_DOUBLE); + break; + } + + //setting the received values in the field + DataArrayDouble *fieldArr=field.getArray(); + double* recvptr=recvbuf; + for (int i=0; i< _recv_proc_offsets[_union_group->size()]; i++) + { + for (int icomp=0; icompgetIJ(_recv_ids[i],icomp); + fieldArr->setIJ(_recv_ids[i],icomp,temp+*recvptr); + recvptr++; + } + } + if (sendbuf!=0 && getAllToAllMethod()== Native) + delete[] sendbuf; + if (recvbuf !=0) + delete[] recvbuf; + delete[] sendcounts; + delete[] recvcounts; + delete[] senddispls; + delete[] recvdispls; + + } + + /*! Exchanging field data between two groups of processes + * + * \param field MEDCoupling field containing the values to be sent + * + * The ids that were defined by addElementFromSource method + * are sent. + */ + void MxN_Mapping::reverseSendRecv(double* recvfield, MEDCouplingFieldDouble& field) const + { + CommInterface comm_interface=_union_group->getCommInterface(); + const MPIProcessorGroup* group = static_cast(_union_group); + + int nbcomp=field.getArray()->getNumberOfComponents(); + double* sendbuf=0; + double* recvbuf=0; + if (_recv_ids.size() >0) + sendbuf = new double[_recv_ids.size()*nbcomp]; + if (_sending_ids.size()>0) + recvbuf = new double[_sending_ids.size()*nbcomp]; + + int* sendcounts = new int[_union_group->size()]; + int* senddispls=new int[_union_group->size()]; + int* recvcounts=new int[_union_group->size()]; + int* recvdispls=new int[_union_group->size()]; + + for (int i=0; i< _union_group->size(); i++) + { + sendcounts[i]=nbcomp*(_recv_proc_offsets[i+1]-_recv_proc_offsets[i]); + senddispls[i]=nbcomp*(_recv_proc_offsets[i]); + recvcounts[i]=nbcomp*(_send_proc_offsets[i+1]-_send_proc_offsets[i]); + recvdispls[i]=nbcomp*(_send_proc_offsets[i]); + } + //building the buffer of the elements to be sent + vector offsets = _recv_proc_offsets; + DataArrayDouble *fieldArr=field.getArray(); + for (int iproc=0; iproc<_union_group->size();iproc++) + for (int i=_recv_proc_offsets[iproc]; i<_recv_proc_offsets[iproc+1]; i++) + { + for (int icomp=0; icompgetIJ(_recv_ids[i],icomp); + } + + //communication phase + switch (getAllToAllMethod()) + { + case Native: + { + const MPI_Comm* comm = group->getComm(); + comm_interface.allToAllV(sendbuf, sendcounts, senddispls, MPI_DOUBLE, + recvbuf, recvcounts, recvdispls, MPI_DOUBLE, + *comm); + } + break; + case PointToPoint: + _access_DEC->allToAllv(sendbuf, sendcounts, senddispls, MPI_DOUBLE, + recvbuf, recvcounts, recvdispls, MPI_DOUBLE); + break; + } + + //setting the received values in the field + double* recvptr=recvbuf; + for (int i=0; i< _send_proc_offsets[_union_group->size()]; i++) + { + for (int icomp=0; icomp + +namespace ParaMEDMEM +{ + + class ProcessorGroup; + + class MxN_Mapping : public DECOptions + { + public: + MxN_Mapping(); + MxN_Mapping(const ProcessorGroup& source_group, const ProcessorGroup& target_group, const DECOptions& dec_options); + virtual ~MxN_Mapping(); + void addElementFromSource(int distant_proc, int distant_elem); + void prepareSendRecv(); + void sendRecv(MEDCouplingFieldDouble& field); + void sendRecv(double* sendfield, MEDCouplingFieldDouble& field) const ; + void reverseSendRecv(double* recvfield, MEDCouplingFieldDouble& field) const ; + + // + const std::vector >& getSendingIds() const { return _sending_ids; } + const std::vector& getSendProcsOffsets() const { return _send_proc_offsets; } + void initialize(); + + MPIAccessDEC* getAccessDEC(){ return _access_DEC; } + private : + ProcessorGroup* _union_group; + MPIAccessDEC * _access_DEC; + int _nb_comps; + std::vector > _sending_ids; + std::vector _recv_ids; + std::vector _send_proc_offsets; + std::vector _recv_proc_offsets; + }; + + std::ostream & operator<< (std::ostream &,const AllToAllMethod &); + +} + +#endif diff --git a/src/ParaMEDMEM/NonCoincidentDEC.cxx b/src/ParaMEDMEM/NonCoincidentDEC.cxx new file mode 100644 index 000000000..830d3d11c --- /dev/null +++ b/src/ParaMEDMEM/NonCoincidentDEC.cxx @@ -0,0 +1,398 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" +#include "MPIProcessorGroup.hxx" +#include "DEC.hxx" +#include "NonCoincidentDEC.hxx" + +extern "C" { +#include +#include +#include +#include +} + +namespace ParaMEDMEM +{ + + /*! + \defgroup noncoincidentdec NonCoincidentDEC + + \section overview Overview + + \c NonCoincidentDEC enables nonconservative remapping of fields + between two parallel codes. + The computation is possible for 3D meshes and 2D meshes. + It is not available for 3D surfaces. The computation enables fast parallel localization, and is based on a point in element search, followed + by a field evaluation at the point location. Thus, it is typically + faster than the \ref interpkerneldec which gives a \ref conservativeremapping. + It is particularly true for the initialisation phase (synchronize) + which is very computationnaly intensive in \ref interpkerneldec. + + In the present version, only fields lying on elements are considered. + The value is estimated by locating the barycenter of the target + side cell in a source cell and sending the value of this source cell + as the value of the target cell. + + \image html NonCoincident_small.png "Example showing the transfer from a field based on a quadrangular mesh to a triangular mesh. The triangle barycenters are computed and located in the quadrangles. In a P0-P0 interpolation, the value on the quadrangle is then applied to the triangles whose barycenter lies within." + + \image latex NonCoincident_small.eps "Example showing the transfer from a field based on a quadrangular mesh to a triangular mesh. The triangle barycenters are computed and located in the quadrangles. In a P0-P0 interpolation, the value on the quadrangle is then applied to the triangles whose barycenter lies within." + + A typical use of NonCoincidentDEC encompasses two distinct phases : + - A setup phase during which the intersection volumes are computed and the communication structures are setup. This corresponds to calling the NonCoincidentDEC::synchronize() method. + - A use phase during which the remappings are actually performed. This corresponds to the calls to sendData() and recvData() which actually trigger the data exchange. The data exchange are synchronous in the current version of the library so that recvData() and sendData() calls must be synchronized on code A and code B processor groups. + + The following code excerpt illutrates a typical use of the NonCoincidentDEC class. + + \code + ... + NonCoincidentDEC dec(groupA, groupB); + dec.attachLocalField(field); + dec.synchronize(); + if (groupA.containsMyRank()) + dec.recvData(); + else if (groupB.containsMyRank()) + dec.sendData(); + ... + \endcode + + Computing the field on the receiving side can be expressed in terms + of a matrix-vector product : \f$ \phi_t=W.\phi_s\f$, with \f$ \phi_t + \f$ the field on the target side and \f$ \phi_s \f$ the field on + the source side. + In the P0-P0 case, this matrix is a plain rectangular matrix with one + non-zero element per row (with value 1). For instance, in the above figure, the matrix is : + \f[ + + \begin{tabular}{|cccc|} + 1 & 0 & 0 & 0\\ + 0 & 0 & 1 & 0\\ + 1 & 0 & 0 & 0\\ + 0 & 0 & 1 & 0\\ + \end{tabular} + \f] + */ + + fvm_nodal_t* medmemMeshToFVMMesh(const MEDMEM::MESH* mesh) + { + // create an FVM structure from the paramesh structure + std::string meshName(mesh->getName());//this line avoid that mesh->getName() object killed before fvm_nodal_create read the const char *. + fvm_nodal_t * fvm_nodal = fvm_nodal_create(meshName.c_str(),mesh->getMeshDimension()); + + //loop on cell types + int nbtypes = mesh->getNumberOfTypes(MED_EN::MED_CELL); + const MED_EN::medGeometryElement* types = mesh->getTypes(MED_EN::MED_CELL); + for (int itype=0; itypegetNumberOfElements(MED_EN::MED_CELL, types[itype]); + fvm_lnum_t* conn = new fvm_lnum_t[nbelems*(types[itype]%100)]; + const int* mesh_conn =mesh->getConnectivity(MED_EN::MED_FULL_INTERLACE,MED_EN::MED_NODAL, MED_EN::MED_CELL, types[itype]); + for (int i=0; igetNumberOfNodes(); + int spacedim=mesh->getSpaceDimension(); + fvm_coord_t* coords = new fvm_coord_t[nbnodes*spacedim]; + const double* mesh_coords=mesh->getCoordinates(MED_EN::MED_FULL_INTERLACE); + for (int i=0; igetName());//this line avoid that support->getName() object killed before fvm_nodal_create read the const char *. + fvm_nodal_t * fvm_nodal = fvm_nodal_create(supportName.c_str(),1); + + const MEDMEM::MESH* mesh= support->getMesh(); + + //loop on cell types + MED_EN::medEntityMesh entity = support->getEntity(); + + int nbtypes = support->getNumberOfTypes(); + const MED_EN::medGeometryElement* types = support->getTypes(); + int ioffset=0; + const int* type_offset = support->getNumberIndex(); + + //browsing through all types + for (int itype=0; itypegetNumberOfElements(types[itype]); + + //for a partial support, defining the element numbers that are taken into + //account in the support + fvm_lnum_t* elem_numbers=0; + if (!support->isOnAllElements()) + { + elem_numbers = const_cast (support->getNumber(types[itype])); + + //creating work arrays to store list of elems for partial suports + if (itype>0) + { + fvm_lnum_t* temp = new int[nbelems]; + for (int i=0; i< nbelems; i++) + temp[i] = elem_numbers [i]-ioffset; + ioffset+=type_offset[itype]; + elem_numbers = temp; + } + } + //retrieving original mesh connectivity + fvm_lnum_t* conn = const_cast (mesh->getConnectivity(MED_EN::MED_FULL_INTERLACE,MED_EN::MED_NODAL,entity, types[itype])); + + // adding the elements to the FVM structure + fvm_nodal_append_by_transfer(fvm_nodal, nbelems, fvm_type,0,0,0,conn,elem_numbers); + + //cleaning work arrays (for partial supports) + if (!support->isOnAllElements() && itype>0) + delete[] elem_numbers; + + } + return fvm_nodal; + } + + NonCoincidentDEC::NonCoincidentDEC() + { + } + + /*! + \addtogroup noncoincidentdec + @{ + */ + + /*! Constructor of a non coincident \ref dec with + * a source group on which lies a field lying on a mesh and a + * target group on which lies a mesh. + * + * \param source_group ProcessorGroup on the source side + * \param target_group ProcessorGroup on the target side + */ + + NonCoincidentDEC::NonCoincidentDEC(ProcessorGroup& source_group, + ProcessorGroup& target_group) + :DEC(source_group, target_group) + {} + + NonCoincidentDEC::~NonCoincidentDEC() + { + } + + /*! Synchronization process. Calling this method + * synchronizes the topologies so that the target side + * gets the information which enable it to fetch the field value + * from the source side. + * A typical call is : + \verbatim + NonCoincidentDEC dec(source_group,target_group); + dec.attachLocalField(field); + dec.synchronize(); + \endverbatim + */ + void NonCoincidentDEC::synchronize() + { + + //initializing FVM parallel environment + const MPI_Comm* comm=dynamic_cast (_union_group)->getComm(); + fvm_parall_set_mpi_comm(*const_cast (comm)); + + + //setting up the communication DEC on both sides + + if (_source_group->containsMyRank()) + { + MEDMEM::MESH* mesh = _local_field->getField()->getSupport()->getMesh(); + fvm_nodal_t* source_nodal = ParaMEDMEM::medmemMeshToFVMMesh(mesh); + + int target_size = _target_group->size() ; + int start_rank= _source_group->size(); + const MPI_Comm* comm = (dynamic_cast (_union_group))->getComm(); + + _locator = fvm_locator_create(1e-6, + *comm, + target_size, + start_rank); + + fvm_locator_set_nodal(_locator, + source_nodal, + mesh->getSpaceDimension(), + 0, + NULL, + 0); + + + _nb_distant_points = fvm_locator_get_n_dist_points(_locator); + _distant_coords = fvm_locator_get_dist_coords(_locator); + _distant_locations = fvm_locator_get_dist_locations(_locator); + + } + if (_target_group->containsMyRank()) + { + MEDMEM::MESH* mesh = _local_field->getField()->getSupport()->getMesh(); + + fvm_nodal_t* target_nodal = ParaMEDMEM::medmemMeshToFVMMesh(mesh); + int source_size = _source_group->size(); + int start_rank= 0 ; + const MPI_Comm* comm = (dynamic_cast (_union_group))->getComm(); + + _locator = fvm_locator_create(1e-6, + *comm, + source_size, + start_rank); + int nbcells = mesh->getNumberOfElements(MED_EN::MED_CELL,MED_EN::MED_ALL_ELEMENTS); + const MEDMEM::SUPPORT* support=_local_field->getField()->getSupport(); + MEDMEM::FIELD* barycenter_coords = mesh->getBarycenter(support); + const double* coords = barycenter_coords->getValue(); + fvm_locator_set_nodal(_locator, + target_nodal, + mesh->getSpaceDimension(), + nbcells, + NULL, + coords); + delete barycenter_coords; + } + } + + + /*! This method is called on the target group in order to + * trigger the retrieveal of field data. It must + * be called synchronously with a sendData() call on + * the source group. + */ + void NonCoincidentDEC::recvData() + { + int nbelems = _local_field->getField()->getSupport()->getMesh()->getNumberOfElements(MED_EN::MED_CELL, MED_EN::MED_ALL_ELEMENTS); + int nbcomp = _local_field->getField()->getNumberOfComponents(); + double* values = new double [nbelems*nbcomp]; + fvm_locator_exchange_point_var(_locator, + 0, + values, + 0, + sizeof(double), + nbcomp, + 0); + _local_field->getField()->setValue(values); + if (_forced_renormalization_flag) + renormalizeTargetField(); + delete[]values; + } + + /*! This method is called on the source group in order to + * send field data. It must be called synchronously with + * a recvData() call on + * the target group. + */ + void NonCoincidentDEC::sendData() + { + const double* values=_local_field->getField()->getValue(); + int nbcomp = _local_field->getField()->getNumberOfComponents(); + double* distant_values = new double [_nb_distant_points*nbcomp]; + + //cheap interpolation : the value of the cell is transfered to the point + for (int i=0; i<_nb_distant_points; i++) + for (int j=0; j \c >. + + For a proc#k, it is necessary to fetch info of all matrices built in \ref ParaMEDMEMOverlapDECAlgoStep4 "Step4" where the first element in pair (i,j) + is equal to k. + + After this step, the matrix repartition is the following after a call to ParaMEDMEM::OverlapMapping::prepare : + + - proc#0 : (0,0),(1,0),(2,0) + - proc#1 : (0,1),(2,1) + - proc#2 : (1,2),(2,2) + + Tuple (2,1) computed on proc 2 is stored in proc 1 after execution of the function "prepare". This is an example of item 0 in \ref ParaMEDMEMOverlapDECAlgoStep2 "Step2". + Tuple (0,1) computed on proc 1 is stored in proc 1 too. This is an example of item 1 in \ref ParaMEDMEMOverlapDECAlgoStep2 "Step2". + + In the end ParaMEDMEM::OverlapMapping::_proc_ids_to_send_vector_st will contain : + + - Proc#0 : 0,1 + - Proc#1 : 0,2 + - Proc#2 : 0,1,2 + + In the end ParaMEDMEM::OverlapMapping::_proc_ids_to_recv_vector_st will contain : + + - Proc#0 : 0,1,2 + - Proc#1 : 0,2 + - Proc#2 : 1,2 + + The method in charge to perform this is : ParaMEDMEM::OverlapMapping::prepare. +*/ +namespace ParaMEDMEM +{ + OverlapDEC::OverlapDEC(const std::set& procIds, const MPI_Comm& world_comm):_own_group(true),_interpolation_matrix(0), + _source_field(0),_own_source_field(false), + _target_field(0),_own_target_field(false) + { + ParaMEDMEM::CommInterface comm; + int *ranks_world=new int[procIds.size()]; // ranks of sources and targets in world_comm + std::copy(procIds.begin(),procIds.end(),ranks_world); + MPI_Group group,world_group; + comm.commGroup(world_comm,&world_group); + comm.groupIncl(world_group,procIds.size(),ranks_world,&group); + delete [] ranks_world; + MPI_Comm theComm; + comm.commCreate(world_comm,group,&theComm); + comm.groupFree(&group); + if(theComm==MPI_COMM_NULL) + { + _group=0; + return ; + } + std::set idsUnion; + for(std::size_t i=0;imultiply(); + } + + void OverlapDEC::recvData() + { + throw INTERP_KERNEL::Exception("Not implemented yet !!!!"); + //_interpolation_matrix->transposeMultiply(); + } + + void OverlapDEC::synchronize() + { + if(!isInGroup()) + return ; + delete _interpolation_matrix; + _interpolation_matrix=new OverlapInterpolationMatrix(_source_field,_target_field,*_group,*this,*this); + OverlapElementLocator locator(_source_field,_target_field,*_group); + locator.copyOptions(*this); + locator.exchangeMeshes(*_interpolation_matrix); + std::vector< std::pair > jobs=locator.getToDoList(); + std::string srcMeth=locator.getSourceMethod(); + std::string trgMeth=locator.getTargetMethod(); + for(std::vector< std::pair >::const_iterator it=jobs.begin();it!=jobs.end();it++) + { + const MEDCouplingPointSet *src=locator.getSourceMesh((*it).first); + const DataArrayInt *srcIds=locator.getSourceIds((*it).first); + const MEDCouplingPointSet *trg=locator.getTargetMesh((*it).second); + const DataArrayInt *trgIds=locator.getTargetIds((*it).second); + _interpolation_matrix->addContribution(src,srcIds,srcMeth,(*it).first,trg,trgIds,trgMeth,(*it).second); + } + _interpolation_matrix->prepare(locator.getProcsInInteraction()); + _interpolation_matrix->computeDeno(); + } + + void OverlapDEC::attachSourceLocalField(ParaFIELD *field, bool ownPt) + { + if(!isInGroup()) + return ; + if(_own_source_field) + delete _source_field; + _source_field=field; + _own_source_field=ownPt; + } + + void OverlapDEC::attachTargetLocalField(ParaFIELD *field, bool ownPt) + { + if(!isInGroup()) + return ; + if(_own_target_field) + delete _target_field; + _target_field=field; + _own_target_field=ownPt; + } + + bool OverlapDEC::isInGroup() const + { + if(!_group) + return false; + return _group->containsMyRank(); + } +} diff --git a/src/ParaMEDMEM/OverlapDEC.hxx b/src/ParaMEDMEM/OverlapDEC.hxx new file mode 100644 index 000000000..f89e6a7a4 --- /dev/null +++ b/src/ParaMEDMEM/OverlapDEC.hxx @@ -0,0 +1,60 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// +// Author : Anthony Geay (CEA/DEN) + +#ifndef __OVERLAPDEC_HXX__ +#define __OVERLAPDEC_HXX__ + +#include "DEC.hxx" +#include "InterpolationOptions.hxx" + +#include + +namespace ParaMEDMEM +{ + class OverlapInterpolationMatrix; + class ProcessorGroup; + class ParaFIELD; + + class OverlapDEC : public DEC, public INTERP_KERNEL::InterpolationOptions + { + public: + OverlapDEC(const std::set& procIds,const MPI_Comm& world_comm=MPI_COMM_WORLD); + virtual ~OverlapDEC(); + void sendRecvData(bool way=true); + void sendData(); + void recvData(); + void synchronize(); + void attachSourceLocalField(ParaFIELD *field, bool ownPt=false); + void attachTargetLocalField(ParaFIELD *field, bool ownPt=false); + ProcessorGroup *getGrp() { return _group; } + bool isInGroup() const; + private: + bool _own_group; + OverlapInterpolationMatrix* _interpolation_matrix; + ProcessorGroup *_group; + private: + ParaFIELD *_source_field; + bool _own_source_field; + ParaFIELD *_target_field; + bool _own_target_field; + }; +} + +#endif diff --git a/src/ParaMEDMEM/OverlapElementLocator.cxx b/src/ParaMEDMEM/OverlapElementLocator.cxx new file mode 100644 index 000000000..51560e145 --- /dev/null +++ b/src/ParaMEDMEM/OverlapElementLocator.cxx @@ -0,0 +1,369 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// +// Author : Anthony Geay (CEA/DEN) + +#include "OverlapElementLocator.hxx" + +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ParaFIELD.hxx" +#include "ParaMESH.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "OverlapInterpolationMatrix.hxx" +#include "MEDCouplingFieldDouble.hxx" +#include "MEDCouplingFieldDiscretization.hxx" +#include "DirectedBoundingBox.hxx" +#include "InterpKernelAutoPtr.hxx" + +#include + +using namespace std; + +namespace ParaMEDMEM +{ + OverlapElementLocator::OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField, const ProcessorGroup& group) + : _local_source_field(sourceField), + _local_target_field(targetField), + _local_source_mesh(0), + _local_target_mesh(0), + _domain_bounding_boxes(0), + _group(group) + { + if(_local_source_field) + _local_source_mesh=_local_source_field->getSupport()->getCellMesh(); + if(_local_target_field) + _local_target_mesh=_local_target_field->getSupport()->getCellMesh(); + _comm=getCommunicator(); + computeBoundingBoxes(); + } + + OverlapElementLocator::~OverlapElementLocator() + { + delete [] _domain_bounding_boxes; + } + + const MPI_Comm *OverlapElementLocator::getCommunicator() const + { + const MPIProcessorGroup* group=static_cast(&_group); + return group->getComm(); + } + + void OverlapElementLocator::computeBoundingBoxes() + { + CommInterface comm_interface=_group.getCommInterface(); + const MPIProcessorGroup* group=static_cast (&_group); + _local_space_dim=0; + if(_local_source_mesh) + _local_space_dim=_local_source_mesh->getSpaceDimension(); + else + _local_space_dim=_local_target_mesh->getSpaceDimension(); + // + const MPI_Comm* comm = group->getComm(); + int bbSize=2*2*_local_space_dim;//2 (for source/target) 2 (min/max) + _domain_bounding_boxes=new double[bbSize*_group.size()]; + INTERP_KERNEL::AutoPtr minmax=new double[bbSize]; + //Format minmax : Xmin_src,Xmax_src,Ymin_src,Ymax_src,Zmin_src,Zmax_src,Xmin_trg,Xmax_trg,Ymin_trg,Ymax_trg,Zmin_trg,Zmax_trg + if(_local_source_mesh) + _local_source_mesh->getBoundingBox(minmax); + else + { + for(int i=0;i<_local_space_dim;i++) + { + minmax[i*2]=std::numeric_limits::max(); + minmax[i*2+1]=-std::numeric_limits::max(); + } + } + if(_local_target_mesh) + _local_target_mesh->getBoundingBox(minmax+2*_local_space_dim); + else + { + for(int i=0;i<_local_space_dim;i++) + { + minmax[i*2+2*_local_space_dim]=std::numeric_limits::max(); + minmax[i*2+1+2*_local_space_dim]=-std::numeric_limits::max(); + } + } + comm_interface.allGather(minmax, bbSize, MPI_DOUBLE, + _domain_bounding_boxes,bbSize, MPI_DOUBLE, + *comm); + + // Computation of all pairs needing an interpolation pairs are duplicated now ! + + _proc_pairs.clear();//first is source second is target + _proc_pairs.resize(_group.size()); + for(int i=0;i<_group.size();i++) + for(int j=0;j<_group.size();j++) + { + if(intersectsBoundingBox(i,j)) + _proc_pairs[i].push_back(j); + } + + // OK now let's assigning as balanced as possible, job to each proc of group + std::vector< std::vector< std::pair > > pairsToBeDonePerProc(_group.size()); + int i=0; + for(std::vector< std::vector< int > >::const_iterator it1=_proc_pairs.begin();it1!=_proc_pairs.end();it1++,i++) + for(std::vector< int >::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + { + if(pairsToBeDonePerProc[i].size()<=pairsToBeDonePerProc[*it2].size())//it includes the fact that i==*it2 + pairsToBeDonePerProc[i].push_back(std::pair(i,*it2)); + else + pairsToBeDonePerProc[*it2].push_back(std::pair(i,*it2)); + } + //Keeping todo list of current proc. _to_do_list contains a set of pair where at least _group.myRank() appears once. + //This proc will be in charge to perform interpolation of any of element of '_to_do_list' + //If _group.myRank()==myPair.first, current proc should fetch target mesh of myPair.second (if different from _group.myRank()). + //If _group.myRank()==myPair.second, current proc should fetch source mesh of myPair.second. + + int myProcId=_group.myRank(); + _to_do_list=pairsToBeDonePerProc[myProcId]; + + //Feeding now '_procs_to_send'. A same id can appears twice. The second parameter in pair means what to send true=source, false=target + _procs_to_send.clear(); + for(int i=_group.size()-1;i>=0;i--) + if(i!=myProcId) + { + const std::vector< std::pair >& anRemoteProcToDoList=pairsToBeDonePerProc[i]; + for(std::vector< std::pair >::const_iterator it=anRemoteProcToDoList.begin();it!=anRemoteProcToDoList.end();it++) + { + if((*it).first==myProcId) + _procs_to_send.push_back(std::pair(i,true)); + if((*it).second==myProcId) + _procs_to_send.push_back(std::pair(i,false)); + } + } + } + + /*! + * The aim of this method is to perform the communication to get data corresponding to '_to_do_list' attribute. + * The principle is the following : if proc n1 and n2 need to perform a cross sending with n1 > toDoListForFetchRemaining; + for(std::vector< std::pair >::const_iterator it=_to_do_list.begin();it!=_to_do_list.end();it++) + { + if((*it).first!=(*it).second) + { + if((*it).first==myProcId) + { + if((*it).second((*it).first,(*it).second)); + } + else + {//(*it).second==myProcId + if((*it).first((*it).first,(*it).second)); + } + } + } + //sending source or target mesh to remote procs + for(std::vector< std::pair >::const_iterator it2=_procs_to_send.begin();it2!=_procs_to_send.end();it2++) + sendLocalMeshTo((*it2).first,(*it2).second,matrix); + //fetching remaining meshes + for(std::vector< std::pair >::const_iterator it=toDoListForFetchRemaining.begin();it!=toDoListForFetchRemaining.end();it++) + { + if((*it).first!=(*it).second) + { + if((*it).first==myProcId) + receiveRemoteMesh((*it).second,false); + else//(*it).second==myProcId + receiveRemoteMesh((*it).first,true); + } + } + } + + std::string OverlapElementLocator::getSourceMethod() const + { + return _local_source_field->getField()->getDiscretization()->getStringRepr(); + } + + std::string OverlapElementLocator::getTargetMethod() const + { + return _local_target_field->getField()->getDiscretization()->getStringRepr(); + } + + const MEDCouplingPointSet *OverlapElementLocator::getSourceMesh(int procId) const + { + int myProcId=_group.myRank(); + if(myProcId==procId) + return _local_source_mesh; + std::pair p(procId,true); + std::map, MEDCouplingAutoRefCountObjectPtr< MEDCouplingPointSet > >::const_iterator it=_remote_meshes.find(p); + return (*it).second; + } + + const DataArrayInt *OverlapElementLocator::getSourceIds(int procId) const + { + int myProcId=_group.myRank(); + if(myProcId==procId) + return 0; + std::pair p(procId,true); + std::map, MEDCouplingAutoRefCountObjectPtr< DataArrayInt > >::const_iterator it=_remote_elems.find(p); + return (*it).second; + } + + const MEDCouplingPointSet *OverlapElementLocator::getTargetMesh(int procId) const + { + int myProcId=_group.myRank(); + if(myProcId==procId) + return _local_target_mesh; + std::pair p(procId,false); + std::map, MEDCouplingAutoRefCountObjectPtr< MEDCouplingPointSet > >::const_iterator it=_remote_meshes.find(p); + return (*it).second; + } + + const DataArrayInt *OverlapElementLocator::getTargetIds(int procId) const + { + int myProcId=_group.myRank(); + if(myProcId==procId) + return 0; + std::pair p(procId,false); + std::map, MEDCouplingAutoRefCountObjectPtr< DataArrayInt > >::const_iterator it=_remote_elems.find(p); + return (*it).second; + } + + bool OverlapElementLocator::intersectsBoundingBox(int isource, int itarget) const + { + const double *source_bb=_domain_bounding_boxes+isource*2*2*_local_space_dim; + const double *target_bb=_domain_bounding_boxes+itarget*2*2*_local_space_dim+2*_local_space_dim; + + for (int idim=0; idim < _local_space_dim; idim++) + { + const double eps = -1e-12;//tony to change + bool intersects = (target_bb[idim*2] elems=local_mesh->getCellsInBoundingBox(distant_bb,getBoundingBoxAdjustment()); + DataArrayInt *idsToSend; + MEDCouplingPointSet *send_mesh=static_cast(field->getField()->buildSubMeshData(elems->begin(),elems->end(),idsToSend)); + if(sourceOrTarget) + matrix.keepTracksOfSourceIds(procId,idsToSend);//Case#1 in Step2 of main algorithm. + else + matrix.keepTracksOfTargetIds(procId,idsToSend);//Case#0 in Step2 of main algorithm. + sendMesh(procId,send_mesh,idsToSend); + send_mesh->decrRef(); + idsToSend->decrRef(); + } + + /*! + * This method recieves source remote mesh on proc 'procId' if sourceOrTarget==True + * This method recieves target remote mesh on proc 'procId' if sourceOrTarget==False + */ + void OverlapElementLocator::receiveRemoteMesh(int procId, bool sourceOrTarget) + { + DataArrayInt *da=0; + MEDCouplingPointSet *m=0; + receiveMesh(procId,m,da); + std::pair p(procId,sourceOrTarget); + _remote_meshes[p]=m; + _remote_elems[p]=da; + } + + void OverlapElementLocator::sendMesh(int procId, const MEDCouplingPointSet *mesh, const DataArrayInt *idsToSend) const + { + CommInterface comInterface=_group.getCommInterface(); + // First stage : exchanging sizes + vector tinyInfoLocalD;//tinyInfoLocalD not used for the moment + vector tinyInfoLocal; + vector tinyInfoLocalS; + mesh->getTinySerializationInformation(tinyInfoLocalD,tinyInfoLocal,tinyInfoLocalS); + const MPI_Comm *comm=getCommunicator(); + // + int lgth[2]; + lgth[0]=tinyInfoLocal.size(); + lgth[1]=idsToSend->getNbOfElems(); + comInterface.send(&lgth,2,MPI_INT,procId,1140,*_comm); + comInterface.send(&tinyInfoLocal[0],tinyInfoLocal.size(),MPI_INT,procId,1141,*comm); + // + DataArrayInt *v1Local=0; + DataArrayDouble *v2Local=0; + mesh->serialize(v1Local,v2Local); + comInterface.send(v1Local->getPointer(),v1Local->getNbOfElems(),MPI_INT,procId,1142,*comm); + comInterface.send(v2Local->getPointer(),v2Local->getNbOfElems(),MPI_DOUBLE,procId,1143,*comm); + //finished for mesh, ids now + comInterface.send(const_cast(idsToSend->getConstPointer()),lgth[1],MPI_INT,procId,1144,*comm); + // + v1Local->decrRef(); + v2Local->decrRef(); + } + + void OverlapElementLocator::receiveMesh(int procId, MEDCouplingPointSet* &mesh, DataArrayInt *&ids) const + { + int lgth[2]; + MPI_Status status; + const MPI_Comm *comm=getCommunicator(); + CommInterface comInterface=_group.getCommInterface(); + comInterface.recv(lgth,2,MPI_INT,procId,1140,*_comm,&status); + std::vector tinyInfoDistant(lgth[0]); + ids=DataArrayInt::New(); + ids->alloc(lgth[1],1); + comInterface.recv(&tinyInfoDistant[0],lgth[0],MPI_INT,procId,1141,*comm,&status); + mesh=MEDCouplingPointSet::BuildInstanceFromMeshType((MEDCouplingMeshType)tinyInfoDistant[0]); + std::vector unusedTinyDistantSts; + vector tinyInfoDistantD(1);//tinyInfoDistantD not used for the moment + DataArrayInt *v1Distant=DataArrayInt::New(); + DataArrayDouble *v2Distant=DataArrayDouble::New(); + mesh->resizeForUnserialization(tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts); + comInterface.recv(v1Distant->getPointer(),v1Distant->getNbOfElems(),MPI_INT,procId,1142,*comm,&status); + comInterface.recv(v2Distant->getPointer(),v2Distant->getNbOfElems(),MPI_DOUBLE,procId,1143,*comm,&status); + mesh->unserialization(tinyInfoDistantD,tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts); + //finished for mesh, ids now + comInterface.recv(ids->getPointer(),lgth[1],MPI_INT,procId,1144,*comm,&status); + // + v1Distant->decrRef(); + v2Distant->decrRef(); + } +} diff --git a/src/ParaMEDMEM/OverlapElementLocator.hxx b/src/ParaMEDMEM/OverlapElementLocator.hxx new file mode 100644 index 000000000..13a94c821 --- /dev/null +++ b/src/ParaMEDMEM/OverlapElementLocator.hxx @@ -0,0 +1,92 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// +// Author : Anthony Geay (CEA/DEN) + +#ifndef __OVERLAPELEMENTLOCATOR_HXX__ +#define __OVERLAPELEMENTLOCATOR_HXX__ + +#include "InterpolationOptions.hxx" +#include "MEDCouplingNatureOfField.hxx" +#include "MEDCouplingPointSet.hxx" +#include "MEDCouplingMemArray.hxx" +#include "MEDCouplingAutoRefCountObjectPtr.hxx" + +#include +#include +#include +#include + +namespace ParaMEDMEM +{ + class ParaFIELD; + class ProcessorGroup; + class ParaSUPPORT; + class OverlapInterpolationMatrix; + + class OverlapElementLocator : public INTERP_KERNEL::InterpolationOptions + { + public: + OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField, const ProcessorGroup& group); + virtual ~OverlapElementLocator(); + const MPI_Comm *getCommunicator() const; + void exchangeMeshes(OverlapInterpolationMatrix& matrix); + std::vector< std::pair > getToDoList() const { return _to_do_list; } + std::vector< std::vector< int > > getProcsInInteraction() const { return _proc_pairs; } + std::string getSourceMethod() const; + std::string getTargetMethod() const; + const MEDCouplingPointSet *getSourceMesh(int procId) const; + const DataArrayInt *getSourceIds(int procId) const; + const MEDCouplingPointSet *getTargetMesh(int procId) const; + const DataArrayInt *getTargetIds(int procId) const; + private: + void computeBoundingBoxes(); + bool intersectsBoundingBox(int i, int j) const; + void sendLocalMeshTo(int procId, bool sourceOrTarget, OverlapInterpolationMatrix& matrix) const; + void receiveRemoteMesh(int procId, bool sourceOrTarget); + void sendMesh(int procId, const MEDCouplingPointSet *mesh, const DataArrayInt *idsToSend) const; + void receiveMesh(int procId, MEDCouplingPointSet* &mesh, DataArrayInt *&ids) const; + private: + const ParaFIELD *_local_source_field; + const ParaFIELD *_local_target_field; + int _local_space_dim; + MEDCouplingPointSet *_local_source_mesh; + MEDCouplingPointSet *_local_target_mesh; + std::vector _distant_cell_meshes; + std::vector _distant_face_meshes; + //! of size _group.size(). Contains for each source proc i, the ids of proc j the targets interact with. This vector is common for all procs in _group. + std::vector< std::vector< int > > _proc_pairs; + //! list of interpolations couple to be done + std::vector< std::pair > _to_do_list; + std::vector< std::pair > _procs_to_send; + std::map, MEDCouplingAutoRefCountObjectPtr< MEDCouplingPointSet > > _remote_meshes; + std::map, MEDCouplingAutoRefCountObjectPtr< DataArrayInt > > _remote_elems; + double* _domain_bounding_boxes; + const ProcessorGroup& _group; + std::vector _distant_proc_ids; + const MPI_Comm *_comm; + //Attributes only used by lazy side + //std::vector _values_added; + //std::vector< std::vector > _ids_per_working_proc; + //std::vector< std::vector > _ids_per_working_proc3; + //std::vector< std::vector > _values_per_working_proc; + }; + +} + +#endif diff --git a/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx b/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx new file mode 100644 index 000000000..b57541bb8 --- /dev/null +++ b/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx @@ -0,0 +1,315 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// +// Author : Anthony Geay (CEA/DEN) + +#include "OverlapInterpolationMatrix.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "ProcessorGroup.hxx" +#include "TranslationRotationMatrix.hxx" +#include "Interpolation.hxx" +#include "Interpolation1D.txx" +#include "Interpolation2DCurve.hxx" +#include "Interpolation2D.txx" +#include "Interpolation3DSurf.hxx" +#include "Interpolation3D.txx" +#include "Interpolation3D2D.txx" +#include "Interpolation2D1D.txx" +#include "MEDCouplingUMesh.hxx" +#include "MEDCouplingNormalizedUnstructuredMesh.txx" +#include "InterpolationOptions.hxx" +#include "NormalizedUnstructuredMesh.hxx" +#include "ElementLocator.hxx" +#include "InterpKernelAutoPtr.hxx" + +#include + +using namespace std; + +namespace ParaMEDMEM +{ + OverlapInterpolationMatrix::OverlapInterpolationMatrix(ParaFIELD *source_field, + ParaFIELD *target_field, + const ProcessorGroup& group, + const DECOptions& dec_options, + const INTERP_KERNEL::InterpolationOptions& i_opt): + INTERP_KERNEL::InterpolationOptions(i_opt), + DECOptions(dec_options), + _source_field(source_field), + _target_field(target_field), + _source_support(source_field->getSupport()->getCellMesh()), + _target_support(target_field->getSupport()->getCellMesh()), + _mapping(group), + _group(group) + { + int nbelems = source_field->getField()->getNumberOfTuples(); + _row_offsets.resize(nbelems+1); + _coeffs.resize(nbelems); + _target_volume.resize(nbelems); + } + + void OverlapInterpolationMatrix::keepTracksOfSourceIds(int procId, DataArrayInt *ids) + { + _mapping.keepTracksOfSourceIds(procId,ids); + } + + void OverlapInterpolationMatrix::keepTracksOfTargetIds(int procId, DataArrayInt *ids) + { + _mapping.keepTracksOfTargetIds(procId,ids); + } + + OverlapInterpolationMatrix::~OverlapInterpolationMatrix() + { + } + + void OverlapInterpolationMatrix::addContribution(const MEDCouplingPointSet *src, const DataArrayInt *srcIds, const std::string& srcMeth, int srcProcId, + const MEDCouplingPointSet *trg, const DataArrayInt *trgIds, const std::string& trgMeth, int trgProcId) + { + std::string interpMethod(srcMeth); + interpMethod+=trgMeth; + //creating the interpolator structure + vector > surfaces; + int colSize=0; + //computation of the intersection volumes between source and target elements + const MEDCouplingUMesh *trgC=dynamic_cast(trg); + const MEDCouplingUMesh *srcC=dynamic_cast(src); + if ( src->getMeshDimension() == -1 ) + { + if(trgC->getMeshDimension()==2 && trgC->getSpaceDimension()==2) + { + MEDCouplingNormalizedUnstructuredMesh<2,2> target_mesh_wrapper(trgC); + INTERP_KERNEL::Interpolation2D interpolation(*this); + colSize=interpolation.fromIntegralUniform(target_mesh_wrapper,surfaces,trgMeth); + } + else if(trgC->getMeshDimension()==3 && trgC->getSpaceDimension()==3) + { + MEDCouplingNormalizedUnstructuredMesh<3,3> target_mesh_wrapper(trgC); + INTERP_KERNEL::Interpolation3D interpolation(*this); + colSize=interpolation.fromIntegralUniform(target_mesh_wrapper,surfaces,trgMeth); + } + else if(trgC->getMeshDimension()==2 && trgC->getSpaceDimension()==3) + { + MEDCouplingNormalizedUnstructuredMesh<3,2> target_mesh_wrapper(trgC); + INTERP_KERNEL::Interpolation3DSurf interpolation(*this); + colSize=interpolation.fromIntegralUniform(target_mesh_wrapper,surfaces,trgMeth); + } + else + throw INTERP_KERNEL::Exception("No para interpolation available for the given mesh and space dimension of source mesh to -1D targetMesh"); + } + else if ( trg->getMeshDimension() == -1 ) + { + if(srcC->getMeshDimension()==2 && srcC->getSpaceDimension()==2) + { + MEDCouplingNormalizedUnstructuredMesh<2,2> local_mesh_wrapper(srcC); + INTERP_KERNEL::Interpolation2D interpolation(*this); + colSize=interpolation.toIntegralUniform(local_mesh_wrapper,surfaces,srcMeth); + } + else if(srcC->getMeshDimension()==3 && srcC->getSpaceDimension()==3) + { + MEDCouplingNormalizedUnstructuredMesh<3,3> local_mesh_wrapper(srcC); + INTERP_KERNEL::Interpolation3D interpolation(*this); + colSize=interpolation.toIntegralUniform(local_mesh_wrapper,surfaces,srcMeth); + } + else if(srcC->getMeshDimension()==2 && srcC->getSpaceDimension()==3) + { + MEDCouplingNormalizedUnstructuredMesh<3,2> local_mesh_wrapper(srcC); + INTERP_KERNEL::Interpolation3DSurf interpolation(*this); + colSize=interpolation.toIntegralUniform(local_mesh_wrapper,surfaces,srcMeth); + } + else + throw INTERP_KERNEL::Exception("No para interpolation available for the given mesh and space dimension of distant mesh to -1D sourceMesh"); + } + else if ( src->getMeshDimension() == 2 && trg->getMeshDimension() == 3 + && trg->getSpaceDimension() == 3 && src->getSpaceDimension() == 3 ) + { + MEDCouplingNormalizedUnstructuredMesh<3,3> target_wrapper(trgC); + MEDCouplingNormalizedUnstructuredMesh<3,3> source_wrapper(srcC); + + INTERP_KERNEL::Interpolation3D2D interpolator (*this); + colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if ( src->getMeshDimension() == 3 && trg->getMeshDimension() == 2 + && trg->getSpaceDimension() == 3 && src->getSpaceDimension() == 3 ) + { + MEDCouplingNormalizedUnstructuredMesh<3,3> target_wrapper(trgC); + MEDCouplingNormalizedUnstructuredMesh<3,3> source_wrapper(srcC); + + INTERP_KERNEL::Interpolation3D2D interpolator (*this); + vector > surfacesTranspose; + colSize=interpolator.interpolateMeshes(target_wrapper,source_wrapper,surfaces,interpMethod);//not a bug target in source. + TransposeMatrix(surfacesTranspose,colSize,surfaces); + colSize=surfacesTranspose.size(); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if ( src->getMeshDimension() == 1 && trg->getMeshDimension() == 2 + && trg->getSpaceDimension() == 2 && src->getSpaceDimension() == 2 ) + { + MEDCouplingNormalizedUnstructuredMesh<2,2> target_wrapper(trgC); + MEDCouplingNormalizedUnstructuredMesh<2,2> source_wrapper(srcC); + + INTERP_KERNEL::Interpolation2D1D interpolator (*this); + colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if ( src->getMeshDimension() == 2 && trg->getMeshDimension() == 1 + && trg->getSpaceDimension() == 2 && src->getSpaceDimension() == 2 ) + { + MEDCouplingNormalizedUnstructuredMesh<2,2> target_wrapper(trgC); + MEDCouplingNormalizedUnstructuredMesh<2,2> source_wrapper(srcC); + + INTERP_KERNEL::Interpolation2D1D interpolator (*this); + vector > surfacesTranspose; + colSize=interpolator.interpolateMeshes(target_wrapper,source_wrapper,surfacesTranspose,interpMethod);//not a bug target in source. + TransposeMatrix(surfacesTranspose,colSize,surfaces); + colSize=surfacesTranspose.size(); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if (trg->getMeshDimension() != _source_support->getMeshDimension()) + { + throw INTERP_KERNEL::Exception("local and distant meshes do not have the same space and mesh dimensions"); + } + else if( src->getMeshDimension() == 1 + && src->getSpaceDimension() == 1 ) + { + MEDCouplingNormalizedUnstructuredMesh<1,1> target_wrapper(trgC); + MEDCouplingNormalizedUnstructuredMesh<1,1> source_wrapper(srcC); + + INTERP_KERNEL::Interpolation1D interpolation(*this); + colSize=interpolation.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if( trg->getMeshDimension() == 1 + && trg->getSpaceDimension() == 2 ) + { + MEDCouplingNormalizedUnstructuredMesh<2,1> target_wrapper(trgC); + MEDCouplingNormalizedUnstructuredMesh<2,1> source_wrapper(srcC); + + INTERP_KERNEL::Interpolation2DCurve interpolation(*this); + colSize=interpolation.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if ( trg->getMeshDimension() == 2 + && trg->getSpaceDimension() == 3 ) + { + MEDCouplingNormalizedUnstructuredMesh<3,2> target_wrapper(trgC); + MEDCouplingNormalizedUnstructuredMesh<3,2> source_wrapper(srcC); + + INTERP_KERNEL::Interpolation3DSurf interpolator (*this); + colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if ( trg->getMeshDimension() == 2 + && trg->getSpaceDimension() == 2) + { + MEDCouplingNormalizedUnstructuredMesh<2,2> target_wrapper(trgC); + MEDCouplingNormalizedUnstructuredMesh<2,2> source_wrapper(srcC); + + INTERP_KERNEL::Interpolation2D interpolator (*this); + colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else if ( trg->getMeshDimension() == 3 + && trg->getSpaceDimension() == 3 ) + { + MEDCouplingNormalizedUnstructuredMesh<3,3> target_wrapper(trgC); + MEDCouplingNormalizedUnstructuredMesh<3,3> source_wrapper(srcC); + + INTERP_KERNEL::Interpolation3D interpolator (*this); + colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod); + target_wrapper.releaseTempArrays(); + source_wrapper.releaseTempArrays(); + } + else + { + throw INTERP_KERNEL::Exception("no interpolator exists for these mesh and space dimensions "); + } + bool needSourceSurf=isSurfaceComputationNeeded(srcMeth); + MEDCouplingFieldDouble *source_triangle_surf=0; + if(needSourceSurf) + source_triangle_surf=src->getMeasureField(getMeasureAbsStatus()); + // + fillDistributedMatrix(surfaces,srcIds,srcProcId,trgIds,trgProcId); + // + if(needSourceSurf) + source_triangle_surf->decrRef(); + } + + /*! + * \b res rows refers to target and column (first param of map) to source. + */ + void OverlapInterpolationMatrix::fillDistributedMatrix(const std::vector< std::map >& res, + const DataArrayInt *srcIds, int srcProc, + const DataArrayInt *trgIds, int trgProc) + { + _mapping.addContributionST(res,srcIds,srcProc,trgIds,trgProc); + } + + /*! + * 'procsInInteraction' gives the global view of interaction between procs. + * In 'procsInInteraction' for a proc with id i, is in interaction with procs listed in procsInInteraction[i] + */ + void OverlapInterpolationMatrix::prepare(const std::vector< std::vector >& procsInInteraction) + { + if(_source_support) + _mapping.prepare(procsInInteraction,_target_field->getField()->getNumberOfTuplesExpected()); + else + _mapping.prepare(procsInInteraction,0); + } + + void OverlapInterpolationMatrix::computeDeno() + { + if(_target_field->getField()->getNature()==ConservativeVolumic) + _mapping.computeDenoConservativeVolumic(_target_field->getField()->getNumberOfTuplesExpected()); + else + throw INTERP_KERNEL::Exception("Policy Not implemented yet : only ConservativeVolumic defined !"); + } + + void OverlapInterpolationMatrix::multiply() + { + _mapping.multiply(_source_field->getField(),_target_field->getField()); + } + + void OverlapInterpolationMatrix::transposeMultiply() + { + _mapping.transposeMultiply(_target_field->getField(),_source_field->getField()); + } + + bool OverlapInterpolationMatrix::isSurfaceComputationNeeded(const std::string& method) const + { + return method=="P0"; + } + + void OverlapInterpolationMatrix::TransposeMatrix(const std::vector >& matIn, int nbColsMatIn, std::vector >& matOut) + { + matOut.resize(nbColsMatIn); + int id=0; + for(std::vector >::const_iterator iter1=matIn.begin();iter1!=matIn.end();iter1++,id++) + for(std::map::const_iterator iter2=(*iter1).begin();iter2!=(*iter1).end();iter2++) + matOut[(*iter2).first][id]=(*iter2).second; + } +} diff --git a/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx b/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx new file mode 100644 index 000000000..514deb8de --- /dev/null +++ b/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx @@ -0,0 +1,126 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// +// Author : Anthony Geay (CEA/DEN) + +#ifndef __OVERLAPINTERPOLATIONMATRIX_HXX__ +#define __OVERLAPINTERPOLATIONMATRIX_HXX__ + +#include "MPIAccessDEC.hxx" +#include "OverlapMapping.hxx" +#include "InterpolationOptions.hxx" +#include "DECOptions.hxx" + +namespace ParaMEDMEM +{ + class ParaFIELD; + class MEDCouplingPointSet; + + class OverlapInterpolationMatrix : public INTERP_KERNEL::InterpolationOptions, + public DECOptions + { + public: + + OverlapInterpolationMatrix(ParaFIELD *source_field, + ParaFIELD *target_field, + const ProcessorGroup& group, + const DECOptions& dec_opt, + const InterpolationOptions& i_opt); + + void keepTracksOfSourceIds(int procId, DataArrayInt *ids); + + void keepTracksOfTargetIds(int procId, DataArrayInt *ids); + + void addContribution(const MEDCouplingPointSet *src, const DataArrayInt *srcIds, const std::string& srcMeth, int srcProcId, + const MEDCouplingPointSet *trg, const DataArrayInt *trgIds, const std::string& trgMeth, int trgProcId); + + void prepare(const std::vector< std::vector >& procsInInteraction); + + void computeDeno(); + + void multiply(); + + void transposeMultiply(); + + virtual ~OverlapInterpolationMatrix(); +#if 0 + void addContribution(MEDCouplingPointSet& distant_support, int iproc_distant, + const int* distant_elems, const std::string& srcMeth, const std::string& targetMeth); + void finishContributionW(ElementLocator& elementLocator); + void finishContributionL(ElementLocator& elementLocator); + void multiply(MEDCouplingFieldDouble& field) const; + void transposeMultiply(MEDCouplingFieldDouble& field)const; + void prepare(); + int getNbRows() const { return _row_offsets.size(); } + MPIAccessDEC* getAccessDEC() { return _mapping.getAccessDEC(); } + private: + void computeConservVolDenoW(ElementLocator& elementLocator); + void computeIntegralDenoW(ElementLocator& elementLocator); + void computeRevIntegralDenoW(ElementLocator& elementLocator); + void computeGlobConstraintDenoW(ElementLocator& elementLocator); + void computeConservVolDenoL(ElementLocator& elementLocator); + void computeIntegralDenoL(ElementLocator& elementLocator); + void computeRevIntegralDenoL(ElementLocator& elementLocator); + + void computeLocalColSum(std::vector& res) const; + void computeLocalRowSum(const std::vector& distantProcs, std::vector >& resPerProcI, + std::vector >& resPerProcD) const; + void computeGlobalRowSum(ElementLocator& elementLocator, std::vector >& denoStrorage, std::vector >& denoStrorageInv); + void computeGlobalColSum(std::vector >& denoStrorage); + void resizeGlobalColSum(std::vector >& denoStrorage); + void fillDSFromVM(int iproc_distant, const int* distant_elems, const std::vector< std::map >& values, MEDCouplingFieldDouble *surf); + void serializeMe(std::vector< std::vector< std::map > >& data1, std::vector& data2) const; + void initialize(); + void findAdditionnalElements(ElementLocator& elementLocator, std::vector >& elementsToAdd, + const std::vector >& resPerProcI, const std::vector >& globalIdsPartial); + void addGhostElements(const std::vector& distantProcs, const std::vector >& elementsToAdd); + int mergePolicies(const std::vector& policyPartial); + void mergeRowSum(const std::vector< std::vector >& rowsPartialSumD, const std::vector< std::vector >& globalIdsPartial, + std::vector& globalIdsLazySideInteraction, std::vector& sumCorresponding); + void mergeRowSum2(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD, + const std::vector& globalIdsLazySideInteraction, const std::vector& sumCorresponding); + void mergeRowSum3(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD); + void mergeCoeffs(const std::vector& procsInInteraction, const std::vector< std::vector >& rowsPartialSumI, + const std::vector >& globalIdsPartial, std::vector >& denoStrorageInv); + void divideByGlobalRowSum(const std::vector& distantProcs, const std::vector >& resPerProcI, + const std::vector >& resPerProcD, std::vector >& deno); +#endif + private: + bool isSurfaceComputationNeeded(const std::string& method) const; + void fillDistributedMatrix(const std::vector< std::map >& res, + const DataArrayInt *srcIds, int srcProc, + const DataArrayInt *trgIds, int trgProc); + static void TransposeMatrix(const std::vector >& matIn, int nbColsMatIn, std::vector >& matOut); + private: + ParaMEDMEM::ParaFIELD *_source_field; + ParaMEDMEM::ParaFIELD *_target_field; + std::vector _row_offsets; + std::map, int > _col_offsets; + MEDCouplingPointSet *_source_support; + MEDCouplingPointSet *_target_support; + OverlapMapping _mapping; + + const ProcessorGroup& _group; + std::vector< std::vector > _target_volume; + std::vector > > _coeffs; + std::vector > _deno_multiply; + std::vector > _deno_reverse_multiply; + }; +} + +#endif diff --git a/src/ParaMEDMEM/OverlapMapping.cxx b/src/ParaMEDMEM/OverlapMapping.cxx new file mode 100644 index 000000000..abb7a1d1b --- /dev/null +++ b/src/ParaMEDMEM/OverlapMapping.cxx @@ -0,0 +1,673 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// +// Author : Anthony Geay (CEA/DEN) + +#include "OverlapMapping.hxx" +#include "MPIProcessorGroup.hxx" + +#include "MEDCouplingFieldDouble.hxx" +#include "MEDCouplingAutoRefCountObjectPtr.hxx" + +#include "InterpKernelAutoPtr.hxx" + +#include +#include + +using namespace ParaMEDMEM; + +OverlapMapping::OverlapMapping(const ProcessorGroup& group):_group(group) +{ +} + +/*! + * This method keeps tracks of source ids to know in step 6 of main algorithm, which tuple ids to send away. + * This method incarnates item#1 of step2 algorithm. + */ +void OverlapMapping::keepTracksOfSourceIds(int procId, DataArrayInt *ids) +{ + ids->incrRef(); + _src_ids_st2.push_back(ids); + _src_proc_st2.push_back(procId); +} + +/*! + * This method keeps tracks of target ids to know in step 6 of main algorithm. + * This method incarnates item#0 of step2 algorithm. + */ +void OverlapMapping::keepTracksOfTargetIds(int procId, DataArrayInt *ids) +{ + ids->incrRef(); + _trg_ids_st2.push_back(ids); + _trg_proc_st2.push_back(procId); +} + +/*! + * This method stores from a matrix in format Target(rows)/Source(cols) for a source procId 'srcProcId' and for a target procId 'trgProcId'. + * All ids (source and target) are in format of local ids. + */ +void OverlapMapping::addContributionST(const std::vector< std::map >& matrixST, const DataArrayInt *srcIds, int srcProcId, const DataArrayInt *trgIds, int trgProcId) +{ + _matrixes_st.push_back(matrixST); + _source_proc_id_st.push_back(srcProcId); + _target_proc_id_st.push_back(trgProcId); + if(srcIds) + {//item#1 of step2 algorithm in proc m. Only to know in advanced nb of recv ids [ (0,1) computed on proc1 and Matrix-Vector on proc1 ] + _nb_of_src_ids_proc_st2.push_back(srcIds->getNumberOfTuples()); + _src_ids_proc_st2.push_back(srcProcId); + } + else + {//item#0 of step2 algorithm in proc k + std::set s; + for(std::vector< std::map >::const_iterator it1=matrixST.begin();it1!=matrixST.end();it1++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + s.insert((*it2).first); + _src_ids_zip_st2.resize(_src_ids_zip_st2.size()+1); + _src_ids_zip_st2.back().insert(_src_ids_zip_st2.back().end(),s.begin(),s.end()); + _src_ids_zip_proc_st2.push_back(trgProcId); + } +} + +/*! + * 'procsInInteraction' gives the global view of interaction between procs. + * In 'procsInInteraction' for a proc with id i, is in interaction with procs listed in procsInInteraction[i]. + * + * This method is in charge to send matrixes in AlltoAll mode. + * After the call of this method 'this' contains the matrixST for all source elements of the current proc + */ +void OverlapMapping::prepare(const std::vector< std::vector >& procsInInteraction, int nbOfTrgElems) +{ + CommInterface commInterface=_group.getCommInterface(); + const MPIProcessorGroup *group=static_cast(&_group); + const MPI_Comm *comm=group->getComm(); + int grpSize=_group.size(); + INTERP_KERNEL::AutoPtr nbsend=new int[grpSize]; + INTERP_KERNEL::AutoPtr nbsend2=new int[grpSize]; + INTERP_KERNEL::AutoPtr nbsend3=new int[grpSize]; + std::fill(nbsend,nbsend+grpSize,0); + int myProcId=_group.myRank(); + _proc_ids_to_recv_vector_st.clear(); + int curProc=0; + for(std::vector< std::vector >::const_iterator it1=procsInInteraction.begin();it1!=procsInInteraction.end();it1++,curProc++) + if(std::find((*it1).begin(),(*it1).end(),myProcId)!=(*it1).end()) + _proc_ids_to_recv_vector_st.push_back(curProc); + _proc_ids_to_send_vector_st=procsInInteraction[myProcId]; + for(std::size_t i=0;i<_matrixes_st.size();i++) + if(_source_proc_id_st[i]==myProcId) + nbsend[_target_proc_id_st[i]]=_matrixes_st[i].size(); + INTERP_KERNEL::AutoPtr nbrecv=new int[grpSize]; + commInterface.allToAll(nbsend,1,MPI_INT,nbrecv,1,MPI_INT,*comm); + //exchanging matrix + //first exchanging offsets+ids_source + INTERP_KERNEL::AutoPtr nbrecv1=new int[grpSize]; + INTERP_KERNEL::AutoPtr nbrecv2=new int[grpSize]; + // + int *tmp=0; + serializeMatrixStep0ST(nbrecv, + tmp,nbsend2,nbsend3, + nbrecv1,nbrecv2); + INTERP_KERNEL::AutoPtr bigArr=tmp; + INTERP_KERNEL::AutoPtr bigArrRecv=new int[nbrecv2[grpSize-1]+nbrecv1[grpSize-1]]; + commInterface.allToAllV(bigArr,nbsend2,nbsend3,MPI_INT, + bigArrRecv,nbrecv1,nbrecv2,MPI_INT, + *comm);// sending ids of sparse matrix (n+1 elems) + //second phase echange target ids + std::fill(nbsend2,nbsend2+grpSize,0); + INTERP_KERNEL::AutoPtr nbrecv3=new int[grpSize]; + INTERP_KERNEL::AutoPtr nbrecv4=new int[grpSize]; + double *tmp2=0; + int lgthOfArr=serializeMatrixStep1ST(nbrecv,bigArrRecv,nbrecv1,nbrecv2, + tmp,tmp2, + nbsend2,nbsend3,nbrecv3,nbrecv4); + INTERP_KERNEL::AutoPtr bigArr2=tmp; + INTERP_KERNEL::AutoPtr bigArrD2=tmp2; + INTERP_KERNEL::AutoPtr bigArrRecv2=new int[lgthOfArr]; + INTERP_KERNEL::AutoPtr bigArrDRecv2=new double[lgthOfArr]; + commInterface.allToAllV(bigArr2,nbsend2,nbsend3,MPI_INT, + bigArrRecv2,nbrecv3,nbrecv4,MPI_INT, + *comm); + commInterface.allToAllV(bigArrD2,nbsend2,nbsend3,MPI_DOUBLE, + bigArrDRecv2,nbrecv3,nbrecv4,MPI_DOUBLE, + *comm); + //finishing + unserializationST(nbOfTrgElems,nbrecv,bigArrRecv,nbrecv1,nbrecv2, + bigArrRecv2,bigArrDRecv2,nbrecv3,nbrecv4); + //updating _src_ids_zip_st2 and _src_ids_zip_st2 with received matrix. + updateZipSourceIdsForFuture(); + //finish to fill _the_matrix_st with already in place matrix in _matrixes_st + finishToFillFinalMatrixST(); + //printTheMatrix(); +} + +/*! + * Compute denominators. + */ +void OverlapMapping::computeDenoGlobConstraint() +{ + _the_deno_st.clear(); + std::size_t sz1=_the_matrix_st.size(); + _the_deno_st.resize(sz1); + for(std::size_t i=0;i& mToFill=_the_deno_st[i][j]; + const std::map& m=_the_matrix_st[i][j]; + for(std::map::const_iterator it=m.begin();it!=m.end();it++) + sum+=(*it).second; + for(std::map::const_iterator it=m.begin();it!=m.end();it++) + mToFill[(*it).first]=sum; + } + } +} + +/*! + * Compute denominators. + */ +void OverlapMapping::computeDenoConservativeVolumic(int nbOfTuplesTrg) +{ + CommInterface commInterface=_group.getCommInterface(); + int myProcId=_group.myRank(); + // + _the_deno_st.clear(); + std::size_t sz1=_the_matrix_st.size(); + _the_deno_st.resize(sz1); + std::vector deno(nbOfTuplesTrg); + for(std::size_t i=0;i >& mat=_the_matrix_st[i]; + int curSrcId=_the_matrix_st_source_proc_id[i]; + std::vector::iterator isItem1=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),curSrcId); + int rowId=0; + if(isItem1==_trg_proc_st2.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids. + { + for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + deno[rowId]+=(*it2).second; + } + else + {//item0 of step2 main algo. More complicated. + std::vector::iterator fnd=isItem1;//std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),curSrcId); + int locId=std::distance(_trg_proc_st2.begin(),fnd); + const DataArrayInt *trgIds=_trg_ids_st2[locId]; + const int *trgIds2=trgIds->getConstPointer(); + for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + deno[trgIds2[rowId]]+=(*it2).second; + } + } + // + for(std::size_t i=0;i >& mat=_the_matrix_st[i]; + int curSrcId=_the_matrix_st_source_proc_id[i]; + std::vector::iterator isItem1=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),curSrcId); + std::vector< std::map >& denoM=_the_deno_st[i]; + denoM.resize(mat.size()); + if(isItem1==_trg_proc_st2.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids. + { + int rowId=0; + for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + denoM[rowId][(*it2).first]=deno[rowId]; + } + else + { + std::vector::iterator fnd=isItem1; + int locId=std::distance(_trg_proc_st2.begin(),fnd); + const DataArrayInt *trgIds=_trg_ids_st2[locId]; + const int *trgIds2=trgIds->getConstPointer(); + for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + denoM[rowId][(*it2).first]=deno[trgIds2[rowId]]; + } + } +} + +/*! + * This method performs step #0/3 in serialization process. + * \param count tells specifies nb of elems to send to corresponding proc id. size equal to _group.size(). + * \param offsets tells for a proc i where to start serialize#0 matrix. size equal to _group.size(). + * \param nbOfElemsSrc of size _group.size(). Comes from previous all2all call. tells how many srcIds per proc contains matrix for current proc. + */ +void OverlapMapping::serializeMatrixStep0ST(const int *nbOfElemsSrc, int *&bigArr, int *count, int *offsets, + int *countForRecv, int *offsetsForRecv) const +{ + int grpSize=_group.size(); + std::fill(count,count+grpSize,0); + int szz=0; + int myProcId=_group.myRank(); + for(std::size_t i=0;i<_matrixes_st.size();i++) + { + if(_source_proc_id_st[i]==myProcId)// && _target_proc_id_st[i]!=myProcId + { + count[_target_proc_id_st[i]]=_matrixes_st[i].size()+1; + szz+=_matrixes_st[i].size()+1; + } + } + bigArr=new int[szz]; + offsets[0]=0; + for(int i=1;i >& mat=_matrixes_st[i]; + for(std::vector< std::map >::const_iterator it=mat.begin();it!=mat.end();it++,work++) + work[1]=work[0]+(*it).size(); + } + } + // + offsetsForRecv[0]=0; + for(int i=0;i0) + countForRecv[i]=nbOfElemsSrc[i]+1; + else + countForRecv[i]=0; + if(i>0) + offsetsForRecv[i]=offsetsForRecv[i-1]+countForRecv[i-1]; + } +} + +/*! + * This method performs step#1 and step#2/3. It returns the size of expected array to get allToAllV. + */ +int OverlapMapping::serializeMatrixStep1ST(const int *nbOfElemsSrc, const int *recvStep0, const int *countStep0, const int *offsStep0, + int *&bigArrI, double *&bigArrD, int *count, int *offsets, + int *countForRecv, int *offsForRecv) const +{ + int grpSize=_group.size(); + int myProcId=_group.myRank(); + offsForRecv[0]=0; + int szz=0; + for(int i=0;i0) + offsForRecv[i]=offsForRecv[i-1]+countForRecv[i-1]; + } + // + std::fill(count,count+grpSize,0); + offsets[0]=0; + int fullLgth=0; + for(std::size_t i=0;i<_matrixes_st.size();i++) + { + if(_source_proc_id_st[i]==myProcId) + { + const std::vector< std::map >& mat=_matrixes_st[i]; + int lgthToSend=0; + for(std::vector< std::map >::const_iterator it=mat.begin();it!=mat.end();it++) + lgthToSend+=(*it).size(); + count[_target_proc_id_st[i]]=lgthToSend; + fullLgth+=lgthToSend; + } + } + for(int i=1;i >& mat=_matrixes_st[i]; + for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++) + { + int j=0; + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++,j++) + { + bigArrI[fullLgth+j]=(*it2).first; + bigArrD[fullLgth+j]=(*it2).second; + } + fullLgth+=(*it1).size(); + } + } + } + return szz; +} + +/*! + * This is the last step after all2Alls for matrix exchange. + * _the_matrix_st is the final matrix : + * - The first entry is srcId in current proc. + * - The second is the pseudo id of source proc (correspondance with true id is in attribute _the_matrix_st_source_proc_id and _the_matrix_st_source_ids) + * - the third is the srcId in the pseudo source proc + */ +void OverlapMapping::unserializationST(int nbOfTrgElems, + const int *nbOfElemsSrcPerProc,//first all2all + const int *bigArrRecv, const int *bigArrRecvCounts, const int *bigArrRecvOffs,//2nd all2all + const int *bigArrRecv2, const double *bigArrDRecv2, const int *bigArrRecv2Count, const int *bigArrRecv2Offs)//3rd and 4th all2alls +{ + _the_matrix_st.clear(); + _the_matrix_st_source_proc_id.clear(); + // + int grpSize=_group.size(); + for(int i=0;i_the_matrix_st' and 'this->_the_matrix_st_target_proc_id' + * and 'this->_the_matrix_st_target_ids'. + * This method finish the job of filling 'this->_the_matrix_st' and 'this->_the_matrix_st_target_proc_id' by putting candidates in 'this->_matrixes_st' into them. + */ +void OverlapMapping::finishToFillFinalMatrixST() +{ + int myProcId=_group.myRank(); + int sz=_matrixes_st.size(); + int nbOfEntryToAdd=0; + for(int i=0;i >& mat=_matrixes_st[i]; + _the_matrix_st[j]=mat; + _the_matrix_st_source_proc_id.push_back(_source_proc_id_st[i]); + j++; + } + _matrixes_st.clear(); +} + +/*! + * This method performs the operation of target ids broadcasting. + */ +void OverlapMapping::prepareIdsToSendST() +{ + CommInterface commInterface=_group.getCommInterface(); + const MPIProcessorGroup *group=static_cast(&_group); + const MPI_Comm *comm=group->getComm(); + int grpSize=_group.size(); + _source_ids_to_send_st.clear(); + _source_ids_to_send_st.resize(grpSize); + INTERP_KERNEL::AutoPtr nbsend=new int[grpSize]; + std::fill(nbsend,nbsend+grpSize,0); + for(std::size_t i=0;i<_the_matrix_st_source_proc_id.size();i++) + nbsend[_the_matrix_st_source_proc_id[i]]=_the_matrix_st_source_ids[i].size(); + INTERP_KERNEL::AutoPtr nbrecv=new int[grpSize]; + commInterface.allToAll(nbsend,1,MPI_INT,nbrecv,1,MPI_INT,*comm); + // + INTERP_KERNEL::AutoPtr nbsend2=new int[grpSize]; + std::copy((int *)nbsend,((int *)nbsend)+grpSize,(int *)nbsend2); + INTERP_KERNEL::AutoPtr nbsend3=new int[grpSize]; + nbsend3[0]=0; + for(int i=1;i bigDataSend=new int[sendSz]; + for(std::size_t i=0;i<_the_matrix_st_source_proc_id.size();i++) + { + int offset=nbsend3[_the_matrix_st_source_proc_id[i]]; + std::copy(_the_matrix_st_source_ids[i].begin(),_the_matrix_st_source_ids[i].end(),((int *)nbsend3)+offset); + } + INTERP_KERNEL::AutoPtr nbrecv2=new int[grpSize]; + INTERP_KERNEL::AutoPtr nbrecv3=new int[grpSize]; + std::copy((int *)nbrecv,((int *)nbrecv)+grpSize,(int *)nbrecv2); + nbrecv3[0]=0; + for(int i=1;i bigDataRecv=new int[recvSz]; + // + commInterface.allToAllV(bigDataSend,nbsend2,nbsend3,MPI_INT, + bigDataRecv,nbrecv2,nbrecv3,MPI_INT, + *comm); + for(int i=0;i0) + { + _source_ids_to_send_st[i].insert(_source_ids_to_send_st[i].end(),((int *)bigDataRecv)+nbrecv3[i],((int *)bigDataRecv)+nbrecv3[i]+nbrecv2[i]); + } + } +} + +/*! + * This method performs a transpose multiply of 'fieldInput' and put the result into 'fieldOutput'. + * 'fieldInput' is expected to be the sourcefield and 'fieldOutput' the targetfield. + */ +void OverlapMapping::multiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput) const +{ + int nbOfCompo=fieldInput->getNumberOfComponents();//to improve same number of components to test + CommInterface commInterface=_group.getCommInterface(); + const MPIProcessorGroup *group=static_cast(&_group); + const MPI_Comm *comm=group->getComm(); + int grpSize=_group.size(); + int myProcId=_group.myRank(); + // + INTERP_KERNEL::AutoPtr nbsend=new int[grpSize]; + INTERP_KERNEL::AutoPtr nbsend2=new int[grpSize]; + INTERP_KERNEL::AutoPtr nbrecv=new int[grpSize]; + INTERP_KERNEL::AutoPtr nbrecv2=new int[grpSize]; + std::fill(nbsend,nbsend+grpSize,0); + std::fill(nbrecv,nbrecv+grpSize,0); + nbsend2[0]=0; + nbrecv2[0]=0; + std::vector valsToSend; + for(int i=0;i::const_iterator isItem1=std::find(_src_proc_st2.begin(),_src_proc_st2.end(),i); + MEDCouplingAutoRefCountObjectPtr vals; + if(isItem1!=_src_proc_st2.end())//item1 of step2 main algo + { + int id=std::distance(_src_proc_st2.begin(),isItem1); + vals=fieldInput->getArray()->selectByTupleId(_src_ids_st2[id]->getConstPointer(),_src_ids_st2[id]->getConstPointer()+_src_ids_st2[id]->getNumberOfTuples()); + } + else + {//item0 of step2 main algo + int id=std::distance(_src_ids_zip_proc_st2.begin(),std::find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),i)); + vals=fieldInput->getArray()->selectByTupleId(&(_src_ids_zip_st2[id])[0],&(_src_ids_zip_st2[id])[0]+_src_ids_zip_st2[id].size()); + } + nbsend[i]=vals->getNbOfElems(); + valsToSend.insert(valsToSend.end(),vals->getConstPointer(),vals->getConstPointer()+nbsend[i]); + } + if(std::find(_proc_ids_to_recv_vector_st.begin(),_proc_ids_to_recv_vector_st.end(),i)!=_proc_ids_to_recv_vector_st.end()) + { + std::vector::const_iterator isItem0=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),i); + if(isItem0==_trg_proc_st2.end())//item1 of step2 main algo [ (0,1) computed on proc1 and Matrix-Vector on proc1 ] + { + std::vector::const_iterator it1=std::find(_src_ids_proc_st2.begin(),_src_ids_proc_st2.end(),i); + if(it1!=_src_ids_proc_st2.end()) + { + int id=std::distance(_src_ids_proc_st2.begin(),it1); + nbrecv[i]=_nb_of_src_ids_proc_st2[id]*nbOfCompo; + } + else if(i==myProcId) + { + nbrecv[i]=fieldInput->getNumberOfTuplesExpected()*nbOfCompo; + } + else + throw INTERP_KERNEL::Exception("Plouff ! send email to anthony.geay@cea.fr ! "); + } + else + {//item0 of step2 main algo [ (2,1) computed on proc2 but Matrix-Vector on proc1 ] [(1,0) computed on proc1 but Matrix-Vector on proc0] + int id=std::distance(_src_ids_zip_proc_st2.begin(),std::find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),i)); + nbrecv[i]=_src_ids_zip_st2[id].size()*nbOfCompo; + } + } + } + for(int i=1;i bigArr=new double[nbrecv2[grpSize-1]+nbrecv[grpSize-1]]; + commInterface.allToAllV(&valsToSend[0],nbsend,nbsend2,MPI_DOUBLE, + bigArr,nbrecv,nbrecv2,MPI_DOUBLE,*comm); + fieldOutput->getArray()->fillWithZero(); + INTERP_KERNEL::AutoPtr tmp=new double[nbOfCompo]; + for(int i=0;i0) + { + double *pt=fieldOutput->getArray()->getPointer(); + std::vector::const_iterator it=std::find(_the_matrix_st_source_proc_id.begin(),_the_matrix_st_source_proc_id.end(),i); + if(it==_the_matrix_st_source_proc_id.end()) + throw INTERP_KERNEL::Exception("Big problem !"); + int id=std::distance(_the_matrix_st_source_proc_id.begin(),it); + const std::vector< std::map >& mat=_the_matrix_st[id]; + const std::vector< std::map >& deno=_the_deno_st[id]; + std::vector::const_iterator isItem0=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),i); + if(isItem0==_trg_proc_st2.end())//item1 of step2 main algo [ (0,1) computed on proc1 and Matrix-Vector on proc1 ] + { + int nbOfTrgTuples=mat.size(); + for(int j=0;j& mat1=mat[j]; + const std::map& deno1=deno[j]; + std::map::const_iterator it4=deno1.begin(); + for(std::map::const_iterator it3=mat1.begin();it3!=mat1.end();it3++,it4++) + { + std::transform(bigArr+nbrecv2[i]+((*it3).first)*nbOfCompo,bigArr+nbrecv2[i]+((*it3).first+1)*(nbOfCompo),(double *)tmp,std::bind2nd(std::multiplies(),(*it3).second/(*it4).second)); + std::transform((double *)tmp,(double *)tmp+nbOfCompo,pt,pt,std::plus()); + } + } + } + else + {//item0 of step2 main algo [ (2,1) computed on proc2 but Matrix-Vector on proc1 ] + double *pt=fieldOutput->getArray()->getPointer(); + std::map zipCor; + int id=std::distance(_src_ids_zip_proc_st2.begin(),std::find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),i)); + const std::vector zipIds=_src_ids_zip_st2[id]; + int newId=0; + for(std::vector::const_iterator it=zipIds.begin();it!=zipIds.end();it++,newId++) + zipCor[*it]=newId; + int id2=std::distance(_trg_proc_st2.begin(),std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),i)); + const DataArrayInt *tgrIds=_trg_ids_st2[id2]; + const int *tgrIds2=tgrIds->getConstPointer(); + int nbOfTrgTuples=mat.size(); + for(int j=0;j& mat1=mat[j]; + const std::map& deno1=deno[j]; + std::map::const_iterator it5=deno1.begin(); + for(std::map::const_iterator it3=mat1.begin();it3!=mat1.end();it3++,it5++) + { + std::map::const_iterator it4=zipCor.find((*it3).first); + if(it4==zipCor.end()) + throw INTERP_KERNEL::Exception("Hmmmmm send e mail to anthony.geay@cea.fr !"); + std::transform(bigArr+nbrecv2[i]+((*it4).second)*nbOfCompo,bigArr+nbrecv2[i]+((*it4).second+1)*(nbOfCompo),(double *)tmp,std::bind2nd(std::multiplies(),(*it3).second/(*it5).second)); + std::transform((double *)tmp,(double *)tmp+nbOfCompo,pt+tgrIds2[j]*nbOfCompo,pt+tgrIds2[j]*nbOfCompo,std::plus()); + } + } + } + } + } +} + +/*! + * This method performs a transpose multiply of 'fieldInput' and put the result into 'fieldOutput'. + * 'fieldInput' is expected to be the targetfield and 'fieldOutput' the sourcefield. + */ +void OverlapMapping::transposeMultiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput) +{ +} + +/*! + * This method should be called immediately after _the_matrix_st has been filled with remote computed matrix put in this proc for Matrix-Vector. + * This method computes for these matrix the minimal set of source ids corresponding to the source proc id. + */ +void OverlapMapping::updateZipSourceIdsForFuture() +{ + CommInterface commInterface=_group.getCommInterface(); + int myProcId=_group.myRank(); + int nbOfMatrixRecveived=_the_matrix_st_source_proc_id.size(); + for(int i=0;i >& mat=_the_matrix_st[i]; + _src_ids_zip_proc_st2.push_back(curSrcProcId); + _src_ids_zip_st2.resize(_src_ids_zip_st2.size()+1); + std::set s; + for(std::vector< std::map >::const_iterator it1=mat.begin();it1!=mat.end();it1++) + for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) + s.insert((*it2).first); + _src_ids_zip_st2.back().insert(_src_ids_zip_st2.back().end(),s.begin(),s.end()); + } + } +} + +// #include + +// void OverlapMapping::printTheMatrix() const +// { +// CommInterface commInterface=_group.getCommInterface(); +// const MPIProcessorGroup *group=static_cast(&_group); +// const MPI_Comm *comm=group->getComm(); +// int grpSize=_group.size(); +// int myProcId=_group.myRank(); +// std::cerr << "I am proc #" << myProcId << std::endl; +// int nbOfMat=_the_matrix_st.size(); +// std::cerr << "I do manage " << nbOfMat << "matrix : "<< std::endl; +// for(int i=0;i >& locMat=_the_matrix_st[i]; +// for(std::vector< std::map >::const_iterator it1=locMat.begin();it1!=locMat.end();it1++) +// { +// for(std::map::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++) +// std::cerr << "(" << (*it2).first << "," << (*it2).second << "), "; +// std::cerr << std::endl; +// } +// } +// std::cerr << "*********" << std::endl; +// } diff --git a/src/ParaMEDMEM/OverlapMapping.hxx b/src/ParaMEDMEM/OverlapMapping.hxx new file mode 100644 index 000000000..952524715 --- /dev/null +++ b/src/ParaMEDMEM/OverlapMapping.hxx @@ -0,0 +1,90 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// +// Author : Anthony Geay (CEA/DEN) + +#ifndef __OVERLAPMAPPING_HXX__ +#define __OVERLAPMAPPING_HXX__ + +#include "MEDCouplingAutoRefCountObjectPtr.hxx" + +#include +#include + +namespace ParaMEDMEM +{ + class ProcessorGroup; + class DataArrayInt; + class MEDCouplingFieldDouble; + + class OverlapMapping + { + public: + OverlapMapping(const ProcessorGroup& group); + void keepTracksOfSourceIds(int procId, DataArrayInt *ids); + void keepTracksOfTargetIds(int procId, DataArrayInt *ids); + void addContributionST(const std::vector< std::map >& matrixST, const DataArrayInt *srcIds, int srcProcId, const DataArrayInt *trgIds, int trgProcId); + void prepare(const std::vector< std::vector >& procsInInteraction, int nbOfTrgElems); + void computeDenoConservativeVolumic(int nbOfTuplesTrg); + void computeDenoGlobConstraint(); + // + void multiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput) const; + void transposeMultiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput); + private: + void serializeMatrixStep0ST(const int *nbOfElemsSrc, int *&bigArr, int *count, int *offsets, + int *countForRecv, int *offsetsForRecv) const; + int serializeMatrixStep1ST(const int *nbOfElemsSrc, const int *recvStep0, const int *countStep0, const int *offsStep0, + int *&bigArrI, double *&bigArrD, int *count, int *offsets, + int *countForRecv, int *offsForRecv) const; + void unserializationST(int nbOfTrgElems, const int *nbOfElemsSrcPerProc, const int *bigArrRecv, const int *bigArrRecvCounts, const int *bigArrRecvOffs, + const int *bigArrRecv2, const double *bigArrDRecv2, const int *bigArrRecv2Count, const int *bigArrRecv2Offs); + void finishToFillFinalMatrixST(); + void prepareIdsToSendST(); + void updateZipSourceIdsForFuture(); + //void printTheMatrix() const; + private: + const ProcessorGroup &_group; + //! vector of ids + std::vector< MEDCouplingAutoRefCountObjectPtr > _src_ids_st2;//item #1 + std::vector< int > _src_proc_st2;//item #1 + std::vector< MEDCouplingAutoRefCountObjectPtr > _trg_ids_st2;//item #0 + std::vector< int > _trg_proc_st2;//item #0 + std::vector< int > _nb_of_src_ids_proc_st2;//item #1 + std::vector< int > _src_ids_proc_st2;//item #1 + std::vector< std::vector > _src_ids_zip_st2;//same size as _src_ids_zip_proc_st2. Sorted. specifies for each id the corresponding ids to send. This is for item0 of Step2 of main algorithm + std::vector< int > _src_ids_zip_proc_st2; + //! vector of matrixes the first entry correspond to source proc id in _source_ids_st + std::vector< std::vector< std::map > > _matrixes_st; + std::vector< std::vector > _source_ids_st; + std::vector< int > _source_proc_id_st; + std::vector< std::vector > _target_ids_st; + std::vector< int > _target_proc_id_st; + //! the matrix for matrix-vector product. The first dimension the set of target procs that interacts with local source mesh. The second dimension correspond to nb of local source ids. + std::vector< std::vector< std::map > > _the_matrix_st; + std::vector< int > _the_matrix_st_source_proc_id; + std::vector< std::vector > _the_matrix_st_source_ids; + std::vector< std::vector< std::map > > _the_deno_st; + //! this attribute stores the proc ids that wait for data from this proc ids for matrix-vector computation + std::vector< int > _proc_ids_to_send_vector_st; + std::vector< int > _proc_ids_to_recv_vector_st; + //! this attribute is of size _group.size(); for each procId in _group _source_ids_to_send_st[procId] contains tupleId to send abroad + std::vector< std::vector > _source_ids_to_send_st; + }; +} + +#endif diff --git a/src/ParaMEDMEM/ParaFIELD.cxx b/src/ParaMEDMEM/ParaFIELD.cxx new file mode 100644 index 000000000..f33acf919 --- /dev/null +++ b/src/ParaMEDMEM/ParaFIELD.cxx @@ -0,0 +1,223 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ExplicitCoincidentDEC.hxx" +#include "StructuredCoincidentDEC.hxx" +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "ParaFIELD.hxx" +#include "ParaMESH.hxx" +#include "InterpKernelUtilities.hxx" +#include "InterpolationMatrix.hxx" + +#include + +namespace ParaMEDMEM +{ + /*! + \defgroup parafield ParaFIELD + This class encapsulates parallel fields. It basically encapsulates + a MEDCouplingField with extra information related to parallel + topology. + It is most conveniently created by giving a pointer to a MEDCouplingField + object and a \c ProcessorGroup. + By default, a ParaFIELD object will be constructed with all field components + located on the same processors. In some specific cases, it might be necessary to scatter components over several processors. In this case, the constructor + using a ComponentTopology is required. + + @{ */ + + /*! + + \brief Constructing a \c ParaFIELD from a \c ParaSUPPORT and a \c ComponentTopology. + + This constructor creates an empty field based on the ParaSUPPORT description + and the partitioning of components described in \a component_topology. + It takes ownership over the \c _field object that it creates. + + Here come the three ComponentTopology constructors : + \verbatim + ComponentTopology c; // one component in the field + ComponentTopology c(6); //six components, all of them on the same processor + ComponentTopology c(6, proc_group); // six components, evenly distributed over the processors of procgroup + \endverbatim + + */ + ParaFIELD::ParaFIELD(TypeOfField type, TypeOfTimeDiscretization td, ParaMESH* para_support, const ComponentTopology& component_topology) + :_field(0), + _component_topology(component_topology),_topology(0),_own_support(false), + _support(para_support) + { + if (para_support->isStructured() || (para_support->getTopology()->getProcGroup()->size()==1 && component_topology.nbBlocks()!=1)) + { + const BlockTopology* source_topo = dynamic_cast(para_support->getTopology()); + _topology=new BlockTopology(*source_topo,component_topology); + } + else + { + if (component_topology.nbBlocks()!=1 && para_support->getTopology()->getProcGroup()->size()!=1) + throw INTERP_KERNEL::Exception(LOCALIZED("ParaFIELD constructor : Unstructured Support not taken into account with component topology yet")); + else + { + const BlockTopology* source_topo=dynamic_cast (para_support->getTopology()); + int nb_local_comp=component_topology.nbLocalComponents(); + _topology=new BlockTopology(*source_topo,nb_local_comp); + } + } + int nb_components = component_topology.nbLocalComponents(); + if (nb_components!=0) + { + _field=MEDCouplingFieldDouble::New(type,td); + _field->setMesh(_support->getCellMesh()); + DataArrayDouble *array=DataArrayDouble::New(); + array->alloc(_field->getNumberOfTuples(),nb_components); + _field->setArray(array); + array->decrRef(); + } + else return; + + _field->setName("Default ParaFIELD name"); + _field->setDescription("Default ParaFIELD description"); + } + + /*! \brief Constructor creating the ParaFIELD + from a given FIELD and a processor group. + + This constructor supposes that support underlying \a subdomain_field has no ParaSUPPORT + attached and it therefore recreates one. It therefore takes ownership over _support. The component topology associated with the field is a basic one (all components on the same processor). + */ + ParaFIELD::ParaFIELD(MEDCouplingFieldDouble* subdomain_field, ParaMESH *sup, const ProcessorGroup& proc_group): + _field(subdomain_field), + _component_topology(ComponentTopology(_field->getNumberOfComponents())),_topology(0),_own_support(false), + _support(sup) + { + if(_field) + _field->incrRef(); + const BlockTopology* source_topo=dynamic_cast (_support->getTopology()); + _topology=new BlockTopology(*source_topo,_component_topology.nbLocalComponents()); + } + + ParaFIELD::~ParaFIELD() + { + if(_field) + _field->decrRef(); + if(_own_support) + delete _support; + delete _topology; + } + + void ParaFIELD::synchronizeTarget(ParaFIELD* source_field) + { + DisjointDEC* data_channel; + if (dynamic_cast(_topology)!=0) + { + data_channel=new StructuredCoincidentDEC; + } + else + { + data_channel=new ExplicitCoincidentDEC; + } + data_channel->attachLocalField(this); + data_channel->synchronize(); + data_channel->prepareTargetDE(); + data_channel->recvData(); + + delete data_channel; + } + + void ParaFIELD::synchronizeSource(ParaFIELD* target_field) + { + DisjointDEC* data_channel; + if (dynamic_cast(_topology)!=0) + { + data_channel=new StructuredCoincidentDEC; + } + else + { + data_channel=new ExplicitCoincidentDEC; + } + data_channel->attachLocalField(this); + data_channel->synchronize(); + data_channel->prepareSourceDE(); + data_channel->sendData(); + + delete data_channel; + } + + /*! + * This method returns, if it exists, an array with only one component and as many as tuples as _field has. + * This array gives for every element on which this->_field lies, its global number, if this->_field is nodal. + * For example if _field is a nodal field : returned array will be the nodal global numbers. + * The content of this method is used to inform Working side to accumulate data recieved by lazy side. + */ + DataArrayInt* ParaFIELD::returnCumulativeGlobalNumbering() const + { + if(!_field) + return 0; + TypeOfField type=_field->getTypeOfField(); + switch(type) + { + case ON_CELLS: + return 0; + case ON_NODES: + return _support->getGlobalNumberingNodeDA(); + default: + return 0; + } + } + + DataArrayInt* ParaFIELD::returnGlobalNumbering() const + { + if(!_field) + return 0; + TypeOfField type=_field->getTypeOfField(); + switch(type) + { + case ON_CELLS: + return _support->getGlobalNumberingCellDA(); + case ON_NODES: + return _support->getGlobalNumberingNodeDA(); + default: + return 0; + } + } + + int ParaFIELD::nbComponents() const + { + return _component_topology.nbComponents(); + } + + + /*! This method retrieves the integral of component \a icomp + over the all domain. */ + double ParaFIELD::getVolumeIntegral(int icomp, bool isWAbs) const + { + CommInterface comm_interface = _topology->getProcGroup()->getCommInterface(); + double integral=_field->integral(icomp,isWAbs); + double total=0.; + const MPI_Comm* comm = (dynamic_cast(_topology->getProcGroup()))->getComm(); + comm_interface.allReduce(&integral, &total, 1, MPI_DOUBLE, MPI_SUM, *comm); + + return total; + } +} diff --git a/src/ParaMEDMEM/ParaFIELD.hxx b/src/ParaMEDMEM/ParaFIELD.hxx new file mode 100644 index 000000000..2f5f89367 --- /dev/null +++ b/src/ParaMEDMEM/ParaFIELD.hxx @@ -0,0 +1,66 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __PARAFIELD_HXX__ +#define __PARAFIELD_HXX__ + +#include "MEDCouplingRefCountObject.hxx" +#include "ComponentTopology.hxx" + +namespace ParaMEDMEM +{ + class DataArrayInt; + class ParaMESH; + class ProcessorGroup; + class MEDCouplingFieldDouble; + class ComponentTopology; + class Topology; + + class ParaFIELD + { + public: + + ParaFIELD(TypeOfField type, TypeOfTimeDiscretization td, ParaMESH* mesh, const ComponentTopology& component_topology); + + + ParaFIELD(MEDCouplingFieldDouble* field, ParaMESH *sup, const ProcessorGroup& group); + + virtual ~ParaFIELD(); + void synchronizeTarget( ParaMEDMEM::ParaFIELD* source_field); + void synchronizeSource( ParaMEDMEM::ParaFIELD* target_field); + MEDCouplingFieldDouble* getField() const { return _field; } + void setOwnSupport(bool v) const { _own_support=v; } + DataArrayInt* returnCumulativeGlobalNumbering() const; + DataArrayInt* returnGlobalNumbering() const; + Topology* getTopology() const { return _topology; } + ParaMESH* getSupport() const { return _support; } + int nbComponents() const; + double getVolumeIntegral(int icomp, bool isWAbs) const; + double getL2Norm()const { return -1; } + private: + MEDCouplingFieldDouble* _field; + ParaMEDMEM::ComponentTopology _component_topology; + Topology* _topology; + mutable bool _own_support; + ParaMESH* _support; + }; + +} + +#endif diff --git a/src/ParaMEDMEM/ParaGRID.cxx b/src/ParaMEDMEM/ParaGRID.cxx new file mode 100644 index 000000000..f45c1e7ac --- /dev/null +++ b/src/ParaMEDMEM/ParaGRID.cxx @@ -0,0 +1,74 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaGRID.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "MEDCouplingMemArray.hxx" +#include "MEDCouplingCMesh.hxx" +#include "InterpKernelUtilities.hxx" + +#include + +using namespace std; + +namespace ParaMEDMEM +{ + + ParaGRID::ParaGRID(MEDCouplingCMesh* global_grid, Topology* topology) throw(INTERP_KERNEL::Exception) + { + + _block_topology = dynamic_cast(topology); + if(_block_topology==0) + throw INTERP_KERNEL::Exception(LOCALIZED("ParaGRID::ParaGRID topology must be block topology")); + + if (!_block_topology->getProcGroup()->containsMyRank()) + return; + + int dimension=_block_topology->getDimension() ; + if (dimension != global_grid->getSpaceDimension()) + throw INTERP_KERNEL::Exception(LOCALIZED("ParaGrid::ParaGrid incompatible topology")); + _grid=global_grid; + _grid->incrRef(); + /*vector > xyz_array(dimension); + vector > local_indices = _block_topology->getLocalArrayMinMax(); + vector coordinates_names; + vector coordinates_units; + for (int idim=0; idimgetCoordsAt(idim); + double *arrayC=array->getPointer(); + cout << " Indices "<< local_indices[idim].first <<" "<getName()); + coordinates_units.push_back(array->getInfoOnComponentAt(0)); + } + _grid=MEDCouplingCMesh::New(); + _grid->set(xyz_array, coordinates_names,coordinates_units); + _grid->setName(global_grid->getName()); + _grid->setDescription(global_grid->getDescription());*/ + } + + ParaGRID::~ParaGRID() + { + if(_grid) + _grid->decrRef(); + } +} diff --git a/src/ParaMEDMEM/ParaGRID.hxx b/src/ParaMEDMEM/ParaGRID.hxx new file mode 100644 index 000000000..2335b9d6c --- /dev/null +++ b/src/ParaMEDMEM/ParaGRID.hxx @@ -0,0 +1,51 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __PARAGRID_HXX__ +#define __PARAGRID_HXX__ + +#include "InterpolationUtils.hxx" + +#include + +namespace ParaMEDMEM +{ + class Topology; + class BlockTopology; + class MEDCouplingCMesh; + + class ParaGRID + { + public: + ParaGRID(MEDCouplingCMesh* global_grid, Topology* topology) throw(INTERP_KERNEL::Exception); + BlockTopology * getBlockTopology() const { return _block_topology; } + virtual ~ParaGRID(); + MEDCouplingCMesh* getGrid() const { return _grid; } + private: + MEDCouplingCMesh* _grid; + // structured grid topology + ParaMEDMEM::BlockTopology* _block_topology; + // stores the x,y,z axes on the global grid + std::vector > _global_axis; + //id of the local grid + int _my_domain_id; + }; +} + +#endif diff --git a/src/ParaMEDMEM/ParaMESH.cxx b/src/ParaMEDMEM/ParaMESH.cxx new file mode 100644 index 000000000..a6482a554 --- /dev/null +++ b/src/ParaMEDMEM/ParaMESH.cxx @@ -0,0 +1,122 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaMESH.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "MEDCouplingMemArray.hxx" + +#include +#include + +//inclusion for the namespaces +using namespace std; + +namespace ParaMEDMEM +{ + ParaMESH::ParaMESH( MEDCouplingPointSet *subdomain_mesh, MEDCouplingPointSet *subdomain_face, + DataArrayInt *CorrespElt_local2global, DataArrayInt *CorrespFace_local2global, + DataArrayInt *CorrespNod_local2global, const ProcessorGroup& proc_group ): + _cell_mesh(subdomain_mesh), + _face_mesh(subdomain_face), + _my_domain_id(proc_group.myRank()), + _block_topology (new BlockTopology(proc_group, subdomain_mesh->getNumberOfCells())), + _explicit_topology(0), + _node_global(CorrespNod_local2global), + _face_global(CorrespFace_local2global), + _cell_global(CorrespElt_local2global) + { + if(_cell_mesh) + _cell_mesh->incrRef(); + if(_face_mesh) + _face_mesh->incrRef(); + if(CorrespElt_local2global) + CorrespElt_local2global->incrRef(); + if(CorrespFace_local2global) + CorrespFace_local2global->incrRef(); + if(CorrespNod_local2global) + CorrespNod_local2global->incrRef(); + } + + ParaMESH::ParaMESH( MEDCouplingPointSet *mesh, const ProcessorGroup& proc_group, const std::string& name): + _cell_mesh(mesh), + _face_mesh(0), + _my_domain_id(proc_group.myRank()), + _block_topology (new BlockTopology(proc_group, mesh->getNumberOfCells())), + _node_global(0), + _face_global(0) + { + if(_cell_mesh) + _cell_mesh->incrRef(); + int nb_elem=mesh->getNumberOfCells(); + _explicit_topology=new BlockTopology(proc_group,nb_elem); + int nbOfCells=mesh->getNumberOfCells(); + _cell_global = DataArrayInt::New(); + _cell_global->alloc(nbOfCells,1); + int *cellglobal=_cell_global->getPointer(); + int offset = _block_topology->localToGlobal(make_pair(_my_domain_id,0)); + for (int i=0; idecrRef(); + _node_global=nodeGlobal; + if(_node_global) + _node_global->incrRef(); + } + } + + void ParaMESH::setCellGlobal(DataArrayInt *cellGlobal) + { + if(cellGlobal!=_cell_global) + { + if(_cell_global) + _cell_global->decrRef(); + _cell_global=cellGlobal; + if(_cell_global) + _cell_global->incrRef(); + } + } + + ParaMESH::~ParaMESH() + { + if(_cell_mesh) + _cell_mesh->decrRef(); + if(_face_mesh) + _face_mesh->decrRef(); + delete _block_topology; + if(_node_global) + _node_global->decrRef(); + if(_cell_global) + _cell_global->decrRef(); + if(_face_global) + _face_global->decrRef(); + delete _explicit_topology; + } + +} diff --git a/src/ParaMEDMEM/ParaMESH.hxx b/src/ParaMEDMEM/ParaMESH.hxx new file mode 100644 index 000000000..391bff5dd --- /dev/null +++ b/src/ParaMEDMEM/ParaMESH.hxx @@ -0,0 +1,82 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __PARAMESH_HXX__ +#define __PARAMESH_HXX__ + +#include "MEDCouplingPointSet.hxx" +#include "ProcessorGroup.hxx" +#include "MEDCouplingMemArray.hxx" + +#include +#include + +namespace ParaMEDMEM +{ + class Topology; + class BlockTopology; + class DataArrayInt; + + class ParaMESH + { + public: + ParaMESH( MEDCouplingPointSet *subdomain_mesh, + MEDCouplingPointSet *subdomain_face, + DataArrayInt *CorrespElt_local2global, + DataArrayInt *CorrespFace_local2global, + DataArrayInt *CorrespNod_local2global, + const ProcessorGroup& proc_group ) ; + ParaMESH( MEDCouplingPointSet *mesh, + const ProcessorGroup& proc_group, const std::string& name); + + virtual ~ParaMESH(); + void setNodeGlobal(DataArrayInt *nodeGlobal); + void setCellGlobal(DataArrayInt *cellGlobal); + Topology* getTopology() const { return _explicit_topology; } + bool isStructured() const { return _cell_mesh->isStructured(); } + MEDCouplingPointSet *getCellMesh() const { return _cell_mesh; } + MEDCouplingPointSet *getFaceMesh() const { return _face_mesh; } + BlockTopology* getBlockTopology() const { return _block_topology; } + + DataArrayInt* getGlobalNumberingNodeDA() const { if(_node_global) _node_global->incrRef(); return _node_global; } + DataArrayInt* getGlobalNumberingFaceDA() const { if(_face_global) _face_global->incrRef(); return _face_global; } + DataArrayInt* getGlobalNumberingCellDA() const { if(_cell_global) _cell_global->incrRef(); return _cell_global; } + const int* getGlobalNumberingNode() const { if(_node_global) return _node_global->getConstPointer(); return 0; } + const int* getGlobalNumberingFace() const { if(_face_global) return _face_global->getConstPointer(); return 0; } + const int* getGlobalNumberingCell() const { if(_cell_global) return _cell_global->getConstPointer(); return 0; } + + private: + //mesh object underlying the ParaMESH object + MEDCouplingPointSet *_cell_mesh ; + MEDCouplingPointSet *_face_mesh ; + + //id of the local grid + int _my_domain_id; + + //global topology of the cells + ParaMEDMEM::BlockTopology* _block_topology; + Topology* _explicit_topology; + // pointers to global numberings + DataArrayInt* _node_global; + DataArrayInt* _face_global; + DataArrayInt* _cell_global; + }; +} + +#endif diff --git a/src/ParaMEDMEM/ProcessorGroup.cxx b/src/ParaMEDMEM/ProcessorGroup.cxx new file mode 100644 index 000000000..011695016 --- /dev/null +++ b/src/ParaMEDMEM/ProcessorGroup.cxx @@ -0,0 +1,32 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ProcessorGroup.hxx" +#include "InterpolationUtils.hxx" + +namespace ParaMEDMEM +{ + ProcessorGroup::ProcessorGroup (const CommInterface& interface, int start, int end):_comm_interface(interface) + { + if (start>end) + throw INTERP_KERNEL::Exception("wrong call to Processor group constructor"); + for (int i=start; i<=end;i++) + _proc_ids.insert(i); + } +} diff --git a/src/ParaMEDMEM/ProcessorGroup.hxx b/src/ParaMEDMEM/ProcessorGroup.hxx new file mode 100644 index 000000000..344704a9f --- /dev/null +++ b/src/ParaMEDMEM/ProcessorGroup.hxx @@ -0,0 +1,60 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __PROCESSORGROUP_HXX__ +#define __PROCESSORGROUP_HXX__ + +#include "CommInterface.hxx" + +#include + +namespace ParaMEDMEM +{ + class ProcessorGroup + { + public: + + ProcessorGroup(const CommInterface& interface):_comm_interface(interface) { } + ProcessorGroup(const CommInterface& interface, std::set proc_ids): + _comm_interface(interface),_proc_ids(proc_ids) { } + ProcessorGroup (const ProcessorGroup& proc_group, std::set proc_ids): + _comm_interface(proc_group.getCommInterface()),_proc_ids(proc_ids) { } + ProcessorGroup (const ProcessorGroup& other): + _comm_interface(other.getCommInterface()),_proc_ids(other._proc_ids) { } + ProcessorGroup (const CommInterface& interface, int start, int end); + virtual ~ProcessorGroup() { } + virtual ProcessorGroup *deepCpy() const = 0; + virtual ProcessorGroup* fuse (const ProcessorGroup&) const = 0; + virtual void intersect (ProcessorGroup&) = 0; + bool contains(int rank) const { return _proc_ids.find(rank)!=_proc_ids.end(); } + virtual bool containsMyRank() const = 0; + int size() const { return _proc_ids.size(); } + const CommInterface& getCommInterface()const { return _comm_interface; } + virtual int myRank() const = 0; + virtual int translateRank(const ProcessorGroup*, int) const = 0; + virtual ProcessorGroup* createComplementProcGroup() const = 0; + virtual ProcessorGroup* createProcGroup() const = 0; + virtual const std::set& getProcIDs()const { return _proc_ids; } + protected: + const CommInterface _comm_interface; + std::set _proc_ids; + }; +} + +#endif diff --git a/src/ParaMEDMEM/README_JR b/src/ParaMEDMEM/README_JR new file mode 100644 index 000000000..762dc9e47 --- /dev/null +++ b/src/ParaMEDMEM/README_JR @@ -0,0 +1,446 @@ + +CVS : +===== + +Branche : BR_MEDPARA : MED_SRC +setenv CVSROOT :pserver:rahuel@cvs.opencascade.com:/home/server/cvs/MED +cvs login +... + +Repertoires : +============= + +Sources : /home/rahuel/MEDPARAsynch +Construction sur awa : /data/tmpawa/rahuel/MEDPARAsynch/MED_Build +Intallation sur awa : /data/tmpawa/rahuel/MEDPARAsynch/MED_Install + + +Environnement : +=============== + +source /home/rahuel/MEDPARAsynch/env_products.csh + +On utilise : +/data/tmpawa/vb144235/valgrind-3.2.1/valgrind_install/bin +/data/tmpawa/adam/Salome3/V3_2_7_AWA_OCC/Python-2.4.1 +/data/tmpawa/vb144235/med_231_install +/data/tmpawa2/adam/omniORB/omniORB-4.0.7 +/data/tmpawa/vb144235/lam_install +/data/tmpawa/vb144235/cppunit_install +/data/tmpawa/vb144235/fvm_install_lam +/data/tmpawa/vb144235/bft_install +/home/rahuel/MEDPARAsynch/ICoCo +/data/tmpawa2/adam/Salome3/V3_2_0_maintainance/KERNEL/KERNEL_INSTALL + + +Build_Configure et Configure : +============================== + +MEDMEM est en "stand-alone" sans KERNEL ni IHM. + +cd $MED_BUILD_DIR +${MED_SRC_DIR}/build_configure --without-kernel --without-ihm +rm ${MED_SRC_DIR}/adm_local_without_kernel/adm_local_without_kernel +rm -fR $MED_BUILD_DIR/adm_local_without_kernel/adm_local_without_kernel + +cd $MED_BUILD_DIR +${MED_SRC_DIR}/configure --without-kernel --without-ihm --with-lam=/data/tmpawa/vb144235/lam_install --prefix=${MED_ROOT_DIR} --with-med2=/data/tmpawa/vb144235/med_231_install --with-python=/data/tmpawa/adam/Salome3/V3_2_7_AWA_OCC/Python-2.4.1 --with-cppunit=/data/tmpawa/vb144235/cppunit_install --with-cppunit_inc=/data/tmpawa/vb144235/cppunit_install/include --with-fvm=/data/tmpawa/vb144235/fvm_install_lam +rm ${MED_SRC_DIR}/adm_local_without_kernel/adm_local_without_kernel +rm -fR $MED_BUILD_DIR/adm_local_without_kernel/adm_local_without_kernel + + +Construction : +============== + +cd $MED_BUILD_DIR +make +make install + +Problemes de construction : +=========================== + +Liste des fichiers modifies et differents de la base CVS pour pouvoir +effectuer la construction et l'installation : + +M MED_SRC/configure.in.base : +----------------------------- +CHECK_MPICH +CHECK_LAM +CHECK_OPENMPI mis en commentaire (redefinit le resultat de CHECK_LAM) +CHECK_CPPUNIT a ete ajoute + +M MED_SRC/adm_local_without_kernel/unix/config_files/check_lam.m4 : +------------------------------------------------------------------- +Debugs pour trouver la bonne configuration de LAM + +M MED_SRC/src/INTERP_KERNEL/Makefile.in : +----------------------------------------- +Problemes de construction des tests + +M MED_SRC/src/ParaMEDMEM/Makefile.in : +-------------------------------------- +. Construction de libParaMEDMEM.a pour gcov (link statique) +. Ajout d'options de compilations : -fprofile-arcs -ftest-coverage -pg (gcov) ==> + instrumentation du code + +C MED_SRC/src/ParaMEDMEM/Test/Makefile.in : +------------------------------------------- +. Construction de libParaMEDMEMTest.a pour gcov (link statique) +. Ajout d'options de compilations : -fprofile-arcs -ftest-coverage -pg (gcov) ==> + instrumentation du code +. Prise en compte de $(MED_WITH_KERNEL) avec : + ifeq ($(MED_WITH_KERNEL),yes) + LDFLAGSFORBIN += $(LDFLAGS) -lm $(MED3_LIBS) $(HDF5_LIBS) $(MPI_LIBS) \ + -L$(CMAKE_BINARY_DIR)/lib@LIB_LOCATION_SUFFIX@/salome -lmed_V2_1 -lparamed -lmedmem \ + ${KERNEL_LDFLAGS} -lSALOMELocalTrace -lSALOMEBasics \ + $(CPPUNIT_LIBS) \ + -lParaMEDMEMTest + endif + ifeq ($(MED_WITH_KERNEL),no) + LDFLAGSFORBIN += $(LDFLAGS) -lm $(MED3_LIBS) $(HDF5_LIBS) $(MPI_LIBS) \ + -L$(CMAKE_BINARY_DIR)/lib@LIB_LOCATION_SUFFIX@/salome -lmed_V2_1 -lparamed -linterpkernel -lmedmem \ + ${KERNEL_LDFLAGS} ${FVM_LIBS} ${CPPUNIT_LIBS} -L/data/tmpawa/vb144235/bft_install/lib -lbft\ + -lParaMEDMEMTest + endif + +M MED_SRC/src/ParaMEDMEM/Test/ParaMEDMEMTest.hxx : +-------------------------------------------------- +Mise en commentaire du test manquant : +CPPUNIT_TEST(testNonCoincidentDEC_3D); + +U MED_SRC/src/ParaMEDMEM/Test/ParaMEDMEMTest_NonCoincidentDEC.cxx : +------------------------------------------------------------------- +Manque dans CVS + +Pour forcer la reconstruction des tests : +========================================= + +cd $MED_BUILD_DIR +rm src/ParaMEDMEM/*o +rm src/ParaMEDMEM/*.la +rm src/ParaMEDMEM/test_* +rm src/ParaMEDMEM/.libs/* +rm src/ParaMEDMEM/Test/*o +rm src/ParaMEDMEM/Test/*.la +rm src/ParaMEDMEM/Test/.libs/* +rm core.* +rm vgcore.* +cd $MED_BUILD_DIR/src/ParaMEDMEM/Test +make +make install +cd $MED_BUILD_DIR + + +Probleme avec lam : +=================== + +jr[1175]> mpirun -np 5 -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} TestParaMEDMEM +21508 TestParaMEDMEM running on n0 (o) +21509 TestParaMEDMEM running on n0 (o) +21510 TestParaMEDMEM running on n0 (o) +21511 TestParaMEDMEM running on n0 (o) +21512 TestParaMEDMEM running on n0 (o) +- Trace /home/rahuel/MEDPARAsynch/MED_SRC/src/MEDMEM/MEDMEM_Init.cxx [54] : Med Memory Initialization with $SALOME_trace = local +- Trace /home/rahuel/MEDPARAsynch/MED_SRC/src/MEDMEM/MEDMEM_Init.cxx [54] : Med Memory Initialization with $SALOME_trace = local +- Trace /home/rahuel/MEDPARAsynch/MED_SRC/src/MEDMEM/MEDMEM_Init.cxx [54] : Med Memory Initialization with $SALOME_trace = local +- Trace /home/rahuel/MEDPARAsynch/MED_SRC/src/MEDMEM/MEDMEM_Init.cxx [54] : Med Memory Initialization with $SALOME_trace = local +- Trace /home/rahuel/MEDPARAsynch/MED_SRC/src/MEDMEM/MEDMEM_Init.cxx [54] : Med Memory Initialization with $SALOME_trace = local +----------------------------------------------------------------------------- +The selected RPI failed to initialize during MPI_INIT. This is a +fatal error; I must abort. + +This occurred on host awa (n0). +The PID of failed process was 21508 (MPI_COMM_WORLD rank: 0) +----------------------------------------------------------------------------- +----------------------------------------------------------------------------- +One of the processes started by mpirun has exited with a nonzero exit +code. This typically indicates that the process finished in error. +If your process did not finish in error, be sure to include a "return +0" or "exit(0)" in your C code before exiting the application. + +PID 21510 failed on node n0 (127.0.0.1) with exit status 1. +----------------------------------------------------------------------------- +jr[1176]> + + +Contournement du probleme lam : +=============================== + +mpirun -np 5 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} TestParaMEDMEM + + +Valgrind : +========== +. Les tests avec valgrind indiquent des erreurs dans MPI_Init et + MPI_Finalize ainsi que dans des programmes appeles "below main". +. De plus on obtient un "Segmentation Violation" accompagne d'un + fichier "vgcore.*" (plantage de valgrind) +. Mais on a " All heap blocks were freed -- no leaks are possible." + et on n'a aucune erreur de malloc/free new/delete dans ParaMEDMEM et + dans les tests. + +. Cependant si on execute les tests sans valgrind, il n'y a pas + d'erreur ni de fichier "core.*". + + +Tests avec CPPUNIT de $MED_BUILD_DIR/src/ParaMEDMEM/Test : +========================================================== + +L'appel a MPI_Init n'est fait qu'une seule fois. +Il est suivi par l'execution de toute la suite des tests regroupes +dans les trois executables TestParaMEDMEM, TestMPIAccessDEC et +TestMPIAccess +On a enfin un seul appel a MPI_Finalize. + +Si un des tests d'une suite de tests comporte une anomalie cela +peut avoir des implications sur l'execution des tests suivants. + +Lors de la mise au point de la suite de tests de TestMPIAccessDEC +cela etait le cas : il restait des messages postes dans lam mais +non lus. Le test suivant s'executait de plus en plus lentement +sans donner d'erreur (probleme difficile a identifier). + + +Lancement des tests de TestParaMEDMEM avec CPPUNIT et TotalView (option -tv) : +============================================================================== + +mpirun -np 5 -ssi rpi tcp C -tv -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} TestParaMEDMEM + +Il arrive qu'on ne puisse pas utiliser totalview par manque de +license. + + + +Lancement des tests de TestParaMEDMEM avec CPPUNIT et Valgrind avec "memory leaks" : +==================================================================================== + +mpirun -np 5 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full TestParaMEDMEM + + +Lancement des tests fonctionnels de MPI_AccessDEC avec CPPUNIT : +================================================================ + +mpirun -np 11 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full TestMPIAccessDEC + + +Lancement des tests unitaires de MPI_Access avec CPPUNIT : +========================================================== + +mpirun -np 3 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full TestMPIAccess + + +TestMPIAccess/TestMPIAccessDEC/TestParaMEDMEM et gcov : +======================================================= + +Les resultats sont dans les repertoires suivants de $MED_BUILD_DIR/src/ParaMEDMEM/Test : +------------- + + TestParaMEDMEM-gcov/ + TestMPIAccessDEC-gcov/ + TestMPIAccess-gcov/ + +Je n'y ai pas trouve d'anomalies. + +compilation : -fprofile-arcs -ftest-coverage +------------- + +$MED_BUILD_DIR/src/ParaMEDMEM/makefile.in : LIB=libparamedar.a \ +------------------------------------------- libparamed.la + +$MED_BUILD_DIR/src/ParaMEDMEM/Test/makefile.in : LIB = libParaMEDMEMTestar.a \ +------------------------------------------------ libParaMEDMEMTest.la + +links statiques manuels : +------------------------- + +g++ -g -D_DEBUG_ -Wno-deprecated -Wparentheses -Wreturn-type -Wunused -DPCLINUX -I/data/tmpawa/vb144235/cppunit_install/include -I/data/tmpawa/vb144235/lam_install/include -ftemplate-depth-42 -I/home/rahuel/MEDPARAsynch/MED_SRC/src/ParaMEDMEM -fprofile-arcs -ftest-coverage -o TestMPIAccess TestMPIAccess.lo -L../../../lib64/salome -lstdc++ -L../../../lib64/salome -lstdc++ -lm -L/data/tmpawa/vb144235/med_231_install/lib -lmed -lhdf5 -lhdf5 -L/data/tmpawa/vb144235/lam_install/lib -llam -lmpi -L../../../lib64/salome -lmed_V2_1 --whole-archive -linterpkernel -lmedmem -L/data/tmpawa/vb144235/fvm_install_lam/lib -lfvm -L/data/tmpawa/vb144235/cppunit_install/lib -lcppunit -L/data/tmpawa/vb144235/bft_install/lib -lbft -lutil -lm -lrt -ldl -Bstatic -L./ -lParaMEDMEMTestar -L../ -lparamedar -L./ -lParaMEDMEMTestar + +g++ -g -D_DEBUG_ -Wno-deprecated -Wparentheses -Wreturn-type -Wunused -DPCLINUX -I/data/tmpawa/vb144235/cppunit_install/include -I/data/tmpawa/vb144235/lam_install/include -ftemplate-depth-42 -I/home/rahuel/MEDPARAsynch/MED_SRC/src/ParaMEDMEM -fprofile-arcs -ftest-coverage -o TestMPIAccessDEC TestMPIAccessDEC.lo -L../../../lib64/salome -lstdc++ -L../../../lib64/salome -lstdc++ -lm -L/data/tmpawa/vb144235/med_231_install/lib -lmed -lhdf5 -lhdf5 -L/data/tmpawa/vb144235/lam_install/lib -llam -lmpi -L../../../lib64/salome -lmed_V2_1 --whole-archive -linterpkernel -lmedmem -L/data/tmpawa/vb144235/fvm_install_lam/lib -lfvm -L/data/tmpawa/vb144235/cppunit_install/lib -lcppunit -L/data/tmpawa/vb144235/bft_install/lib -lbft -lutil -lm -lrt -ldl -Bstatic -L./ -lParaMEDMEMTestar -L../ -lparamedar -L./ -lParaMEDMEMTestar + +g++ -g -D_DEBUG_ -Wno-deprecated -Wparentheses -Wreturn-type -Wunused -DPCLINUX -I/data/tmpawa/vb144235/cppunit_install/include -I/data/tmpawa/vb144235/lam_install/include -ftemplate-depth-42 -I/home/rahuel/MEDPARAsynch/MED_SRC/src/ParaMEDMEM -fprofile-arcs -ftest-coverage -o TestParaMEDMEM TestParaMEDMEM.lo -L../../../lib64/salome -lstdc++ -L../../../lib64/salome -lstdc++ -lm -L/data/tmpawa/vb144235/med_231_install/lib -lmed -lhdf5 -lhdf5 -L/data/tmpawa/vb144235/lam_install/lib -llam -lmpi -L../../../lib64/salome -lmed_V2_1 --whole-archive -linterpkernel -lmedmem -L/data/tmpawa/vb144235/fvm_install_lam/lib -lfvm -L/data/tmpawa/vb144235/cppunit_install/lib -lcppunit -L/data/tmpawa/vb144235/bft_install/lib -lbft -lutil -lm -lrt -ldl -Bstatic -L./ -lParaMEDMEMTestar -L../ -lparamedar -L./ -lParaMEDMEMTestar + +Ne pas oublier le make install apres ... + +execution et gcov : +------------------- + +Pour pouvoir traiter les .cxx de ${MED_BUILD_DIR}/src/ParaMEDMEM et de +${MED_BUILD_DIR}/src/ParaMEDMEM/Test, on execute deux fois gcov. + +cd ${MED_BUILD_DIR}/src/ParaMEDMEM/Test + +mpirun -np 3 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} TestMPIAccess + +gcov TestMPIAccess.cxx test_MPI_Access_Send_Recv.cxx \ + test_MPI_Access_Cyclic_Send_Recv.cxx \ + test_MPI_Access_SendRecv.cxx \ + test_MPI_Access_ISend_IRecv.cxx \ + test_MPI_Access_Cyclic_ISend_IRecv.cxx \ + test_MPI_Access_ISendRecv.cxx \ + test_MPI_Access_Probe.cxx \ + test_MPI_Access_IProbe.cxx \ + test_MPI_Access_Cancel.cxx \ + test_MPI_Access_Send_Recv_Length.cxx \ + test_MPI_Access_ISend_IRecv_Length.cxx \ + test_MPI_Access_ISend_IRecv_Length_1.cxx \ + test_MPI_Access_Time.cxx \ + test_MPI_Access_Time_0.cxx \ + test_MPI_Access_ISend_IRecv_BottleNeck.cxx \ + ../MPI_Access.cxx +gcov -o ../ TestMPIAccess.cxx test_MPI_Access_Send_Recv.cxx \ + test_MPI_Access_Cyclic_Send_Recv.cxx \ + test_MPI_Access_SendRecv.cxx \ + test_MPI_Access_ISend_IRecv.cxx \ + test_MPI_Access_Cyclic_ISend_IRecv.cxx \ + test_MPI_Access_ISendRecv.cxx \ + test_MPI_Access_Probe.cxx \ + test_MPI_Access_IProbe.cxx \ + test_MPI_Access_Cancel.cxx \ + test_MPI_Access_Send_Recv_Length.cxx \ + test_MPI_Access_ISend_IRecv_Length.cxx \ + test_MPI_Access_ISend_IRecv_Length_1.cxx \ + test_MPI_Access_Time.cxx \ + test_MPI_Access_Time_0.cxx \ + test_MPI_Access_ISend_IRecv_BottleNeck.cxx \ + ../MPI_Access.cxx + + +cd ${MED_BUILD_DIR}/src/ParaMEDMEM/Test +mpirun -np 11 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} TestMPIAccessDEC + +gcov TestMPIAccessDEC.cxx test_AllToAllDEC.cxx \ + test_AllToAllvDEC.cxx \ + test_AllToAllTimeDEC.cxx \ + test_AllToAllvTimeDEC.cxx \ + test_AllToAllvTimeDoubleDEC.cxx \ + ../TimeInterpolator.cxx \ + ../LinearTimeInterpolator.cxx \ + ../MPI_Access.cxx \ + ../MPI_AccessDEC.cxx +gcov -o ../ TestMPIAccessDEC.cxx test_AllToAllDEC.cxx \ + test_AllToAllvDEC.cxx \ + test_AllToAllTimeDEC.cxx \ + test_AllToAllvTimeDEC.cxx \ + test_AllToAllvTimeDoubleDEC.cxx \ + ../TimeInterpolator.cxx \ + ../LinearTimeInterpolator.cxx \ + ../MPI_Access.cxx \ + ../MPI_AccessDEC.cxx + +cd ${MED_BUILD_DIR}/src/ParaMEDMEM/Test +mpirun -np 5 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} TestParaMEDMEM + +gcov TestParaMEDMEM.cxx ParaMEDMEMTest.cxx \ + ParaMEDMEMTest_MPIProcessorGroup.cxx \ + ParaMEDMEMTest_BlockTopology.cxx \ + ParaMEDMEMTest_InterpKernelDEC.cxx \ + ../BlockTopology.cxx \ + ../ComponentTopology.cxx \ + ../DEC.cxx \ + ../ElementLocator.cxx \ + ../InterpolationMatrix.cxx \ + ../InterpKernelDEC.cxx \ + ../MPIProcessorGroup.cxx \ + ../MxN_Mapping.cxx \ + ../ParaFIELD.cxx \ + ../ParaMESH.cxx \ + ../ParaSUPPORT.cxx \ + ../ProcessorGroup.cxx \ + ../TimeInterpolator.cxx \ + ../LinearTimeInterpolator.cxx \ + ../MPI_Access.cxx \ + ../MPI_AccessDEC.cxx + +gcov -o ../ TestParaMEDMEM.cxx ParaMEDMEMTest.cxx \ + ParaMEDMEMTest_MPIProcessorGroup.cxx \ + ParaMEDMEMTest_BlockTopology.cxx \ + ParaMEDMEMTest_InterpKernelDEC.cxx \ + ../BlockTopology.cxx \ + ../ComponentTopology.cxx \ + ../DEC.cxx \ + ../ElementLocator.cxx \ + ../InterpolationMatrix.cxx \ + ../InterpKernelDEC.cxx \ + ../MPIProcessorGroup.cxx \ + ../MxN_Mapping.cxx \ + ../ParaFIELD.cxx \ + ../ParaMESH.cxx \ + ../ParaSUPPORT.cxx \ + ../ProcessorGroup.cxx \ + ../TimeInterpolator.cxx \ + ../LinearTimeInterpolator.cxx \ + ../MPI_Access.cxx \ + ../MPI_AccessDEC.cxx + + + + + +Lancement des tests unitaires sans CPPUNIT : +============================================ + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_Send_Recv + +mpirun -np 3 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_Cyclic_Send_Recv + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_SendRecv + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_ISend_IRecv + +mpirun -np 3 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_Cyclic_ISend_IRecv + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_ISendRecv + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_Probe + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_IProbe + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_Cancel + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_Send_Recv_Length + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_ISend_IRecv_Length + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_ISend_IRecv_Length_1 + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_Time + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_Time_0 2 1 + + +#AllToAllDEC +mpirun -np 4 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_AllToAllDEC 0 + +mpirun -np 4 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_AllToAllDEC 1 + + +#AllToAllvDEC +mpirun -np 4 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_AllToAllvDEC 0 + +mpirun -np 4 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_AllToAllvDEC 1 + + +#AllToAllTimeDEC +mpirun -np 4 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_AllToAllTimeDEC 0 + +mpirun -np 4 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_AllToAllTimeDEC 1 + + +#AllToAllvTimeDEC +mpirun -np 11 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_AllToAllvTimeDEC 0 1 + +mpirun -np 11 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_AllToAllvTimeDEC 0 + +mpirun -np 11 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_AllToAllvTimeDEC 1 + + + +#AllToAllvTimeDoubleDEC +mpirun -np 11 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_AllToAllvTimeDoubleDEC 0 + +mpirun -np 11 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_AllToAllvTimeDoubleDEC 1 + + + +mpirun -np 2 -ssi rpi tcp C -v -x PATH=${PATH},LD_LIBRARY_PATH=${LD_LIBRARY_PATH} valgrind --leak-check=full test_MPI_Access_ISend_IRecv_BottleNeck + diff --git a/src/ParaMEDMEM/StructuredCoincidentDEC.cxx b/src/ParaMEDMEM/StructuredCoincidentDEC.cxx new file mode 100644 index 000000000..af0f9fe84 --- /dev/null +++ b/src/ParaMEDMEM/StructuredCoincidentDEC.cxx @@ -0,0 +1,416 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" +#include "MPIProcessorGroup.hxx" +#include "StructuredCoincidentDEC.hxx" +#include "InterpKernelUtilities.hxx" + +#include + +using namespace std; + +namespace ParaMEDMEM +{ + + /*! \defgroup structuredcoincidentdec StructuredCoincidentDEC + + This class is meant for remapping fields that have identical + supports with different parallel topologies. It can be used to couple + together multiphysics codes that operate on the same domain + with different partitionings, which can be useful if one of + the computation is much faster than the other. It can also be used + to couple together codes that share an interface that was generated + in the same manner (with identical global ids). + Also, this \ref dec can be used for fields that have component topologies, + i.e., components that are scattered over several processors. + + The remapping between the two supports is based on identity of global + ids, instead of geometrical considerations as it is the case for + \ref NonCoincidentDEC and \ref InterpKernelDEC. Therefore, this \ref dec must not be used + for coincident meshes that do not have the same numbering. + + As all the other DECs, its use is made of two phases : + - a setup phase during which the topologies are exchanged so that + the target side knows from which processors it should expect + the data. + - a send/recv phase during which the field data is actually transferred. + + This example illustrates the sending of a field with + the \c StructuredCoincidentDEC : + \code + ... + StructuredCoincidentDEC dec(groupA, groupB); + dec.attachLocalField(field); + dec.synchronize(); + if (groupA.containsMyRank()) + dec.recvData(); + else if (groupB.containsMyRank()) + dec.sendData(); + ... + \endcode + + Creating a ParaFIELD to be attached to the DEC is exactly the same as for + other DECs in the case when the remapping concerns similar meshes + that only have different partitionings. In the case when the + fields have also different component topologies, creating the ParaFIELD + requires some more effort. See \ref parafield section for more details. + */ + + + StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(0),_topo_target(0), + _send_counts(0),_recv_counts(0), + _send_displs(0),_recv_displs(0), + _recv_buffer(0),_send_buffer(0) + { + } + + + StructuredCoincidentDEC::~StructuredCoincidentDEC() + { + delete [] _send_buffer; + delete [] _recv_buffer; + delete []_send_displs; + delete [] _recv_displs; + delete [] _send_counts; + delete [] _recv_counts; + if (! _source_group->containsMyRank()) + delete _topo_source; + if(!_target_group->containsMyRank()) + delete _topo_target; + } + + /*! + \addtogroup structuredcoincidentdec + @{ + */ + StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):DisjointDEC(local_group,distant_group), + _topo_source(0),_topo_target(0), + _send_counts(0),_recv_counts(0), + _send_displs(0),_recv_displs(0), + _recv_buffer(0),_send_buffer(0) + { + } + + /*! Synchronization process for exchanging topologies + */ + void StructuredCoincidentDEC::synchronizeTopology() + { + if (_source_group->containsMyRank()) + _topo_source = dynamic_cast(_local_field->getTopology()); + if (_target_group->containsMyRank()) + _topo_target = dynamic_cast(_local_field->getTopology()); + + // Transmitting source topology to target code + broadcastTopology(_topo_source,1000); + // Transmitting target topology to source code + broadcastTopology(_topo_target,2000); + if (_topo_source->getNbElements() != _topo_target->getNbElements()) + throw INTERP_KERNEL::Exception("Incompatible dimensions for target and source topologies"); + + } + + /*! Creates the arrays necessary for the data transfer + * and fills the send array with the values of the + * source field + * */ + void StructuredCoincidentDEC::prepareSourceDE() + { + //////////////////////////////////// + //Step 1 : _buffer array creation + + if (!_topo_source->getProcGroup()->containsMyRank()) + return; + MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface()); + + int myranksource = _topo_source->getProcGroup()->myRank(); + + vector * target_arrays=new vector[_topo_target->getProcGroup()->size()]; + + //cout<<" topotarget size"<< _topo_target->getProcGroup()->size()< getNbLocalElements(); + for (int ielem=0; ielem< nb_local ; ielem++) + { + // cout <<"source local :"<localToGlobal(make_pair(myranksource, ielem)); + // cout << "global "< target_local =_topo_target->globalToLocal(global); + // cout << "target local : "<size(); + + _send_counts=new int[union_size]; + _send_displs=new int[union_size]; + _recv_counts=new int[union_size]; + _recv_displs=new int[union_size]; + + for (int i=0; i< union_size; i++) + { + _send_counts[i]=0; + _recv_counts[i]=0; + _recv_displs[i]=0; + } + _send_displs[0]=0; + + for (int iproc=0; iproc < _topo_target->getProcGroup()->size(); iproc++) + { + //converts the rank in target to the rank in union communicator + int unionrank=group->translateRank(_topo_target->getProcGroup(),iproc); + _send_counts[unionrank]=target_arrays[iproc].size(); + } + + for (int iproc=1; iprocsize();iproc++) + _send_displs[iproc]=_send_displs[iproc-1]+_send_counts[iproc-1]; + + _send_buffer = new double [nb_local ]; + + ///////////////////////////////////////////////////////////// + //Step 2 : filling the _buffers with the source field values + + int* counter=new int [_topo_target->getProcGroup()->size()]; + counter[0]=0; + for (int i=1; i<_topo_target->getProcGroup()->size(); i++) + counter[i]=counter[i-1]+target_arrays[i-1].size(); + + + const double* value = _local_field->getField()->getArray()->getPointer(); + //cout << "Nb local " << nb_local<localToGlobal(make_pair(myranksource, ielem)); + pair target_local =_topo_target->globalToLocal(global); + //cout <<"global : "<< global<<" local :"<getProcGroup()->containsMyRank()) + return; + MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface()); + + int myranktarget = _topo_target->getProcGroup()->myRank(); + + vector < vector > source_arrays(_topo_source->getProcGroup()->size()); + int nb_local = _topo_target-> getNbLocalElements(); + for (int ielem=0; ielem< nb_local ; ielem++) + { + // cout <<"TS target local :"<localToGlobal(make_pair(myranktarget, ielem)); + //cout << "TS global "< source_local =_topo_source->globalToLocal(global); + // cout << "TS source local : "<size(); + _recv_counts=new int[union_size]; + _recv_displs=new int[union_size]; + _send_counts=new int[union_size]; + _send_displs=new int[union_size]; + + for (int i=0; i< union_size; i++) + { + _send_counts[i]=0; + _recv_counts[i]=0; + _recv_displs[i]=0; + } + for (int iproc=0; iproc < _topo_source->getProcGroup()->size(); iproc++) + { + //converts the rank in target to the rank in union communicator + int unionrank=group->translateRank(_topo_source->getProcGroup(),iproc); + _recv_counts[unionrank]=source_arrays[iproc].size(); + } + for (int i=1; igetProcGroup()->myRank()==0) + { + MESSAGE ("Master rank"); + topo->serialize(serializer, size); + rank_master = group->translateRank(topo->getProcGroup(),0); + MESSAGE("Master rank world number is "<size()); + for (int i=0; i< group->size(); i++) + { + if (i!= rank_master) + _comm_interface->send(&rank_master,1,MPI_INT, i,tag+i,*(group->getComm())); + } + } + else + { + MESSAGE(" rank "<myRank()<< " waiting ..."); + _comm_interface->recv(&rank_master, 1,MPI_INT, MPI_ANY_SOURCE, tag+group->myRank(), *(group->getComm()),&status); + MESSAGE(" rank "<myRank()<< "received master rank"<broadcast(&size, 1,MPI_INT,rank_master,*(group->getComm())); + + int* buffer=new int[size]; + if (topo!=0 && topo->getProcGroup()->myRank()==0) + copy(serializer, serializer+size, buffer); + _comm_interface->broadcast(buffer,size,MPI_INT,rank_master,*(group->getComm())); + + // Processors which did not possess the source topology + // unserialize it + + BlockTopology* topotemp=new BlockTopology(); + topotemp->unserialize(buffer, *_comm_interface); + + if (topo==0) + topo=topotemp; + else + delete topotemp; + + // Memory cleaning + delete[] buffer; + if (serializer!=0) + delete[] serializer; + MESSAGE (" rank "<myRank()<< " unserialize is over"); + delete group; + } + + + + void StructuredCoincidentDEC::recvData() + { + //MPI_COMM_WORLD is used instead of group because there is no + //mechanism for creating the union group yet + MESSAGE("recvData"); + for (int i=0; i< 4; i++) + cout << _recv_counts[i]<<" "; + cout <(_union_group)->getComm()); + _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE, + _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm); + cout<<"end AllToAll"<getNbLocalElements(); + //double* value=new double[nb_local]; + double* value=const_cast(_local_field->getField()->getArray()->getPointer()); + + int myranktarget=_topo_target->getProcGroup()->myRank(); + vector counters(_topo_source->getProcGroup()->size()); + counters[0]=0; + for (int i=0; i<_topo_source->getProcGroup()->size()-1; i++) + { + MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface); + int worldrank=group->translateRank(_topo_source->getProcGroup(),i); + counters[i+1]=counters[i]+_recv_counts[worldrank]; + delete group; + } + + for (int ielem=0; ielemlocalToGlobal(make_pair(myranktarget, ielem)); + pair source_local =_topo_source->globalToLocal(global); + value[ielem]=_recv_buffer[counters[source_local.first]++]; + } + + + //_local_field->getField()->setValue(value); + } + + void StructuredCoincidentDEC::sendData() + { + MESSAGE ("sendData"); + for (int i=0; i< 4; i++) + cout << _send_counts[i]<<" "; + cout <(_union_group)->getComm()); + _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE, + _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm); + cout<<"end AllToAll"<containsMyRank()) + { + synchronizeTopology(); + prepareSourceDE(); + } + else if (_target_group->containsMyRank()) + { + synchronizeTopology(); + prepareTargetDE(); + } + } + /*! + @} + */ +} + diff --git a/src/ParaMEDMEM/StructuredCoincidentDEC.hxx b/src/ParaMEDMEM/StructuredCoincidentDEC.hxx new file mode 100644 index 000000000..75f63b4f0 --- /dev/null +++ b/src/ParaMEDMEM/StructuredCoincidentDEC.hxx @@ -0,0 +1,58 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __STRUCTUREDCOINCIDENTDEC_HXX__ +#define __STRUCTUREDCOINCIDENTDEC_HXX__ + +#include "DisjointDEC.hxx" +#include "BlockTopology.hxx" + + +namespace ParaMEDMEM +{ + class DEC; + class BlockTopology; + class StructuredCoincidentDEC : public DisjointDEC + { + public: + StructuredCoincidentDEC(); + StructuredCoincidentDEC( ProcessorGroup& source, ProcessorGroup& target); + virtual ~StructuredCoincidentDEC(); + void synchronize(); + void recvData(); + void sendData(); + void prepareSourceDE(); + void prepareTargetDE(); + + private : + void synchronizeTopology(); + void broadcastTopology(BlockTopology*&, int tag); + + BlockTopology* _topo_source; + BlockTopology* _topo_target; + int* _send_counts; + int* _recv_counts; + int* _send_displs; + int* _recv_displs; + double* _recv_buffer; + double* _send_buffer; + }; +} + +#endif diff --git a/src/ParaMEDMEM/TODO_JR b/src/ParaMEDMEM/TODO_JR new file mode 100644 index 000000000..de2318d54 --- /dev/null +++ b/src/ParaMEDMEM/TODO_JR @@ -0,0 +1,50 @@ + +MPI_Access : +============ + +. Creer des methodes [I]SendRecv en point a point avec un "target" + pour le Send et un "target" pour le Recv comme le SendRecv MPI. + +. Ne pas creer de structure RequestStruct en mode synchrone. + + +MPI_AccessDEC : +=============== + +. AllToAll, AllToAllv, AllToAllTime et AllToAllvTime comportent + des sequences de code semblables qui pourraient etre regroupees + sans que cela nuise a la lisibilite du code. + +. En mode asynchrone, il n'y a pas de controle d'engorgement des + messages envoyes dans CheckSent(). Il est vrai qu'en pratique + une synchronisation des temps est faite dans AllToAllTime et + AllToAllvTime. Mais ce probleme pourrait se produire avec + AllToAll et AllToAllv. Il serait possible de fixer un nombre + maximum de messages envoyes et "en cours" et de le comparer avec + le nombre de requetes rendu par MPI_Access. En cas de depassement + de ?n?*UnionGroupSize par exemple, CheckSent pourrait fonctionner + en mode "WithWait". Ce qui ferait qu'on apellerait Wait au lieu de Test. + +. Meme si le prototype d'interpolateur comporte des parametres + int nStepBefore et int nStepAfter, le codage actuel considere + qu'on n'a que nStepBefore=1 et nStepAfter=1. + Ainsi on a (*_TimeMessages)[target][0] et (*_TimeMessages)[target][1] + ainsi que &(*_DataMessages)[target][0] et &(*_DataMessages)[target][1]. + +. Les champs nStepBefore et nStepAfter correspondent a un maximum + requis. On devrait avoir les champs correspondants qui valent les + nombres disponibles a un moment donne. + +. Il existe un champs OutOfTime qui n'est pas utilise actuellement. + Il faudrait definir son usage et le transmettre sans doute à + l'interpolateur. Actuellement, L'interpolateur lineaire effectue une + extrapolation si OutOfTime vaut true. + +. Dans CheckTime, on alloue et detruit les (*_DataMessages)[target][] + alors qu'on pourrait considerer que pour un "target" donne, les + recvcount sont constants pendant toute la boucle de temps. Ainsi + on n'allouerait les buffers qu'une fois au depart et ils ne seraient + liberes qu'a la fin. + + + diff --git a/src/ParaMEDMEM/TimeInterpolator.cxx b/src/ParaMEDMEM/TimeInterpolator.cxx new file mode 100644 index 000000000..86c3bfb74 --- /dev/null +++ b/src/ParaMEDMEM/TimeInterpolator.cxx @@ -0,0 +1,34 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "TimeInterpolator.hxx" + +namespace ParaMEDMEM +{ + TimeInterpolator::TimeInterpolator( double InterpPrecision, int nStepBefore, int nStepAfter ) + { + _interp_precision=InterpPrecision; + _n_step_before=nStepBefore; + _n_step_after=nStepAfter; + } + + TimeInterpolator::~TimeInterpolator() + { + } +} diff --git a/src/ParaMEDMEM/TimeInterpolator.hxx b/src/ParaMEDMEM/TimeInterpolator.hxx new file mode 100644 index 000000000..30df1c5e6 --- /dev/null +++ b/src/ParaMEDMEM/TimeInterpolator.hxx @@ -0,0 +1,51 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __TIMEINTERPOLATOR_HXX__ +#define __TIMEINTERPOLATOR_HXX__ + +#include "ProcessorGroup.hxx" + +#include +#include + +namespace ParaMEDMEM +{ + class TimeInterpolator + { + public: + TimeInterpolator( double InterpPrecision, int nStepBefore=1, int nStepAfter=1 ); + virtual ~TimeInterpolator(); + + void setInterpParams( double InterpPrecision, int nStepBefore=1, int nStepAfter=1 ) { _interp_precision=InterpPrecision; _n_step_before=nStepBefore; _n_step_after=nStepAfter; } + void steps( int &nStepBefore, int &nStepAfter ) { nStepBefore=_n_step_before; nStepAfter=_n_step_after ; } + virtual void doInterp( double time0, double time1, double time, int recvcount , + int nbuff0, int nbuff1, + int **recvbuff0, int **recvbuff1, int *result ) = 0; + virtual void doInterp( double time0, double time1, double time, int recvcount , + int nbuff0, int nbuff1, + double **recvbuff0, double **recvbuff1, double *result ) = 0; + protected : + double _interp_precision; + int _n_step_before; + int _n_step_after; + }; +} + +#endif diff --git a/src/ParaMEDMEM/Topology.cxx b/src/ParaMEDMEM/Topology.cxx new file mode 100644 index 000000000..49a7fc20d --- /dev/null +++ b/src/ParaMEDMEM/Topology.cxx @@ -0,0 +1,31 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "Topology.hxx" + +namespace ParaMEDMEM +{ + Topology::Topology() + { + } + + Topology::~Topology() + { + } +} diff --git a/src/ParaMEDMEM/Topology.hxx b/src/ParaMEDMEM/Topology.hxx new file mode 100644 index 000000000..4b10f8b43 --- /dev/null +++ b/src/ParaMEDMEM/Topology.hxx @@ -0,0 +1,40 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __TOPOLOGY_HXX__ +#define __TOPOLOGY_HXX__ + +#include + +namespace ParaMEDMEM +{ + class ProcessorGroup; + + class Topology + { + public: + Topology() { } + virtual ~Topology() { } + virtual int getNbElements() const = 0; + virtual int getNbLocalElements() const = 0; + virtual const ProcessorGroup* getProcGroup()const = 0; + }; +} + +#endif diff --git a/src/ParaMEDMEMTest/CMakeLists.txt b/src/ParaMEDMEMTest/CMakeLists.txt new file mode 100644 index 000000000..c1ac42773 --- /dev/null +++ b/src/ParaMEDMEMTest/CMakeLists.txt @@ -0,0 +1,122 @@ +# Copyright (C) 2012-2015 CEA/DEN, EDF R&D +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# + +ADD_DEFINITIONS(${MPI_DEFINITIONS} ${CPPUNIT_DEFINITIONS}) + +INCLUDE_DIRECTORIES( + ${MPI_INCLUDE_DIRS} + ${CPPUNIT_INCLUDE_DIRS} + ${CMAKE_CURRENT_SOURCE_DIR}/../ParaMEDLoader + ${CMAKE_CURRENT_SOURCE_DIR}/../ParaMEDMEM + ${CMAKE_CURRENT_SOURCE_DIR}/../MEDLoader + ${CMAKE_CURRENT_SOURCE_DIR}/../MEDCoupling + ${CMAKE_CURRENT_SOURCE_DIR}/../INTERP_KERNEL + ${CMAKE_CURRENT_SOURCE_DIR}/../INTERP_KERNEL/Bases + ) + +SET(ParaMEDMEMTest_SOURCES + ParaMEDMEMTest.cxx + ParaMEDMEMTest_MPIProcessorGroup.cxx + ParaMEDMEMTest_BlockTopology.cxx + ParaMEDMEMTest_InterpKernelDEC.cxx + ParaMEDMEMTest_StructuredCoincidentDEC.cxx + ParaMEDMEMTest_MEDLoader.cxx + ParaMEDMEMTest_ICoco.cxx + ParaMEDMEMTest_Gauthier1.cxx + ParaMEDMEMTest_FabienAPI.cxx + ParaMEDMEMTest_NonCoincidentDEC.cxx + ParaMEDMEMTest_OverlapDEC.cxx + MPIAccessDECTest.cxx + test_AllToAllDEC.cxx + test_AllToAllvDEC.cxx + test_AllToAllTimeDEC.cxx + test_AllToAllvTimeDEC.cxx + test_AllToAllvTimeDoubleDEC.cxx + MPIAccessTest.cxx + test_MPI_Access_Send_Recv.cxx + test_MPI_Access_Cyclic_Send_Recv.cxx + test_MPI_Access_SendRecv.cxx + test_MPI_Access_ISend_IRecv.cxx + test_MPI_Access_Cyclic_ISend_IRecv.cxx + test_MPI_Access_ISendRecv.cxx + test_MPI_Access_Probe.cxx + test_MPI_Access_IProbe.cxx + test_MPI_Access_Cancel.cxx + test_MPI_Access_Send_Recv_Length.cxx + test_MPI_Access_ISend_IRecv_Length.cxx + test_MPI_Access_ISend_IRecv_Length_1.cxx + test_MPI_Access_Time.cxx + test_MPI_Access_Time_0.cxx + test_MPI_Access_ISend_IRecv_BottleNeck.cxx + ) + +ADD_LIBRARY(ParaMEDMEMTest SHARED ${ParaMEDMEMTest_SOURCES}) +SET_TARGET_PROPERTIES(ParaMEDMEMTest PROPERTIES COMPILE_FLAGS "") +TARGET_LINK_LIBRARIES(ParaMEDMEMTest paramedmem paramedloader ${CPPUNIT_LIBRARIES}) +INSTALL(TARGETS ParaMEDMEMTest DESTINATION ${MEDTOOL_INSTALL_LIBS}) + +SET(TESTSParaMEDMEM) +SET(TestParaMEDMEM_SOURCES + TestParaMEDMEM.cxx + ) +SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} TestParaMEDMEM) + +SET(TestMPIAccessDEC_SOURCES + TestMPIAccessDEC.cxx + ) +SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} TestMPIAccessDEC) + +SET(TestMPIAccess_SOURCES + TestMPIAccess.cxx + ) +SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} TestMPIAccess) + +SET(test_perf_SOURCES + test_perf.cxx + ) +SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} test_perf) + +IF(MPI2_IS_OK) + SET(ParaMEDMEMTestMPI2_1_SOURCES + MPI2Connector.cxx + ParaMEDMEMTestMPI2_1.cxx + ) + SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} ParaMEDMEMTestMPI2_1) + + SET(ParaMEDMEMTestMPI2_2_SOURCES + MPI2Connector.cxx + ParaMEDMEMTestMPI2_2.cxx + ) + SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} ParaMEDMEMTestMPI2_2) +ENDIF(MPI2_IS_OK) + +FOREACH(bintestparamem ${TESTSParaMEDMEM}) + ADD_EXECUTABLE(${bintestparamem} ${${bintestparamem}_SOURCES}) + TARGET_LINK_LIBRARIES(${bintestparamem} ParaMEDMEMTest) +ENDFOREACH(bintestparamem ${TESTSParaMEDMEM}) + +INSTALL(TARGETS ${TESTSParaMEDMEM} DESTINATION ${MEDTOOL_INSTALL_BINS}) +SET(COMMON_HEADERS_HXX + MPIMainTest.hxx + MPIAccessDECTest.hxx + MPIAccessTest.hxx + ParaMEDMEMTest.hxx + MPI2Connector.hxx +) +INSTALL(FILES ${COMMON_HEADERS_HXX} DESTINATION ${MEDTOOL_INSTALL_HEADERS}) diff --git a/src/ParaMEDMEMTest/MPI2Connector.cxx b/src/ParaMEDMEMTest/MPI2Connector.cxx new file mode 100644 index 000000000..616ac138f --- /dev/null +++ b/src/ParaMEDMEMTest/MPI2Connector.cxx @@ -0,0 +1,153 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "MPI2Connector.hxx" + +#include +#include + +#ifndef WIN32 +#include +#endif + +MPI2Connector::MPI2Connector() +{ + MPI_Comm_size( MPI_COMM_WORLD, &_nb_proc ); + MPI_Comm_rank( MPI_COMM_WORLD, &_num_proc ); +} + +MPI2Connector::~MPI2Connector() +{ +} + +MPI_Comm MPI2Connector::remoteMPI2Connect(const std::string& service) +{ + int i; + char port_name[MPI_MAX_PORT_NAME]; + char port_name_clt[MPI_MAX_PORT_NAME]; + std::ostringstream msg; + MPI_Comm icom; + + if( service.size() == 0 ) + { + msg << "[" << _num_proc << "] You have to give a service name !"; + std::cerr << msg.str().c_str() << std::endl; + throw std::exception(); + } + + _srv = false; + + MPI_Barrier(MPI_COMM_WORLD); + + MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); + if( _num_proc == 0 ) + { + /* rank 0 try to be a server. If service is already published, try to be a cient */ + MPI_Open_port(MPI_INFO_NULL, port_name); + if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS ) + { + std::cerr << "[" << _num_proc << "] I get the connection with " << service << " at " << port_name_clt << std::endl; + MPI_Close_port( port_name ); + } + else if ( MPI_Publish_name((char*)service.c_str(), MPI_INFO_NULL, port_name) == MPI_SUCCESS ) + { + _srv = true; + _port_name = port_name; + std::cerr << "[" << _num_proc << "] service " << service << " available at " << port_name << std::endl; + } + else if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS ) + { + std::cerr << "[" << _num_proc << "] I get the connection with " << service << " at " << port_name_clt << std::endl; + MPI_Close_port( port_name ); + } + else + { + msg << "[" << _num_proc << "] Error on connection with " << service << " at " << port_name_clt; + std::cerr << msg.str().c_str() << std::endl; + throw std::exception(); + } + } + else + { + i=0; + /* Waiting rank 0 publish name and try to be a client */ + while ( i != TIMEOUT ) + { + sleep(1); + if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS ) + { + std::cerr << "[" << _num_proc << "] I get the connection with " << service << " at " << port_name_clt << std::endl; + break; + } + i++; + } + if(i==TIMEOUT) + { + msg << "[" << _num_proc << "] Error on connection with " << service << " at " << port_name_clt; + std::cerr << msg.str().c_str() << std::endl; + throw std::exception(); + } + } + MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL); + + /* If rank 0 is server, all processes call MPI_Comm_accept */ + /* If rank 0 is not server, all processes call MPI_Comm_connect */ + int srv = (int)_srv; + MPI_Bcast(&srv,1,MPI_INT,0,MPI_COMM_WORLD); + _srv = (bool)srv; + if ( _srv ) + MPI_Comm_accept( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &icom ); + else + MPI_Comm_connect(port_name_clt, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &icom ); + + /* create global communicator: servers have low index in global communicator*/ + MPI_Intercomm_merge(icom,!_srv,&_gcom); + + /* only rank 0 can be server for unpublish name */ + if(_num_proc != 0) _srv = false; + + return _gcom; + +} + +void MPI2Connector::remoteMPI2Disconnect(const std::string& service) +{ + std::ostringstream msg; + + if( service.size() == 0 ) + { + msg << "[" << _num_proc << "] You have to give a service name !"; + std::cerr << msg.str().c_str() << std::endl; + throw std::exception(); + } + + MPI_Comm_disconnect( &_gcom ); + if ( _srv ) + { + + char port_name[MPI_MAX_PORT_NAME]; + strcpy(port_name,_port_name.c_str()); + + MPI_Unpublish_name((char*)service.c_str(), MPI_INFO_NULL, port_name); + std::cerr << "[" << _num_proc << "] " << service << ": close port " << _port_name << std::endl; + MPI_Close_port( port_name ); + } + +} + diff --git a/src/ParaMEDMEMTest/MPI2Connector.hxx b/src/ParaMEDMEMTest/MPI2Connector.hxx new file mode 100644 index 000000000..57680b2c3 --- /dev/null +++ b/src/ParaMEDMEMTest/MPI2Connector.hxx @@ -0,0 +1,48 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __MPI2CONNECTOR_HXX__ +#define __MPI2CONNECTOR_HXX__ + +#include +#include +#include + +class MPI2Connector +{ +public: + MPI2Connector(); + ~MPI2Connector(); + // MPI2 connection + MPI_Comm remoteMPI2Connect(const std::string& service); + // MPI2 disconnection + void remoteMPI2Disconnect(const std::string& service); +private: + // Processus id + int _num_proc; + // Processus size + int _nb_proc; + MPI_Comm _gcom; + bool _srv; + std::string _port_name; +private: + static const int TIMEOUT=5; +}; + +#endif diff --git a/src/ParaMEDMEMTest/MPIAccessDECTest.cxx b/src/ParaMEDMEMTest/MPIAccessDECTest.cxx new file mode 100644 index 000000000..c757e6e4f --- /dev/null +++ b/src/ParaMEDMEMTest/MPIAccessDECTest.cxx @@ -0,0 +1,52 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "MPIAccessDECTest.hxx" +#include + +#include +#include + +#ifndef WIN32 +#include +#endif + +using namespace std; + + + +/*! + * Tool to remove temporary files. + * Allows automatique removal of temporary files in case of test failure. + */ +MPIAccessDECTest_TmpFilesRemover::~MPIAccessDECTest_TmpFilesRemover() +{ + set::iterator it = myTmpFiles.begin(); + for (; it != myTmpFiles.end(); it++) { + if (access((*it).data(), F_OK) == 0) + remove((*it).data()); + } + myTmpFiles.clear(); + //cout << "~MPIAccessTest_TmpFilesRemover()" << endl; +} + +bool MPIAccessDECTest_TmpFilesRemover::Register(const string theTmpFile) +{ + return (myTmpFiles.insert(theTmpFile)).second; +} diff --git a/src/ParaMEDMEMTest/MPIAccessDECTest.hxx b/src/ParaMEDMEMTest/MPIAccessDECTest.hxx new file mode 100644 index 000000000..80655b565 --- /dev/null +++ b/src/ParaMEDMEMTest/MPIAccessDECTest.hxx @@ -0,0 +1,102 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef _MPIACCESSDECTEST_HXX_ +#define _MPIACCESSDECTEST_HXX_ + +#include + +#include +#include +#include +#include "mpi.h" + + +class MPIAccessDECTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE( MPIAccessDECTest ); + // CPPUNIT_TEST( test_AllToAllDECSynchronousPointToPoint ) ; + CPPUNIT_TEST( test_AllToAllDECAsynchronousPointToPoint ) ; + //CPPUNIT_TEST( test_AllToAllvDECSynchronousPointToPoint ) ; + CPPUNIT_TEST( test_AllToAllvDECAsynchronousPointToPoint ) ; + //CPPUNIT_TEST( test_AllToAllTimeDECSynchronousPointToPoint ) ; + CPPUNIT_TEST( test_AllToAllTimeDECAsynchronousPointToPoint ) ; + CPPUNIT_TEST( test_AllToAllvTimeDECSynchronousNative ) ; + //CPPUNIT_TEST( test_AllToAllvTimeDECSynchronousPointToPoint ) ; + CPPUNIT_TEST( test_AllToAllvTimeDECAsynchronousPointToPoint ) ; + //CPPUNIT_TEST( test_AllToAllvTimeDoubleDECSynchronousPointToPoint ) ; + CPPUNIT_TEST( test_AllToAllvTimeDoubleDECAsynchronousPointToPoint ) ; + CPPUNIT_TEST_SUITE_END(); + + +public: + + MPIAccessDECTest():CppUnit::TestFixture(){} + ~MPIAccessDECTest(){} + void setUp(){} + void tearDown(){} + void test_AllToAllDECSynchronousPointToPoint() ; + void test_AllToAllDECAsynchronousPointToPoint() ; + void test_AllToAllvDECSynchronousPointToPoint() ; + void test_AllToAllvDECAsynchronousPointToPoint() ; + void test_AllToAllTimeDECSynchronousPointToPoint() ; + void test_AllToAllTimeDECAsynchronousPointToPoint() ; + void test_AllToAllvTimeDECSynchronousNative() ; + void test_AllToAllvTimeDECSynchronousPointToPoint() ; + void test_AllToAllvTimeDECAsynchronousPointToPoint() ; + void test_AllToAllvTimeDoubleDECSynchronousPointToPoint() ; + void test_AllToAllvTimeDoubleDECAsynchronousPointToPoint() ; + +private: + void test_AllToAllDEC( bool Asynchronous ) ; + void test_AllToAllvDEC( bool Asynchronous ) ; + void test_AllToAllTimeDEC( bool Asynchronous ) ; + void test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINative ) ; + void test_AllToAllvTimeDoubleDEC( bool Asynchronous ) ; + }; + +// to automatically remove temporary files from disk +class MPIAccessDECTest_TmpFilesRemover +{ +public: + MPIAccessDECTest_TmpFilesRemover() {} + ~MPIAccessDECTest_TmpFilesRemover(); + bool Register(const std::string theTmpFile); + +private: + std::set myTmpFiles; +}; + +/*! + * Tool to print array to stream. + */ +template +void MPIAccessDECTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text) +{ + stream << text << ": {"; + if (length > 0) { + stream << array[0]; + for (int i = 1; i < length; i++) { + stream << ", " << array[i]; + } + } + stream << "}" << std::endl; +}; + +#endif diff --git a/src/ParaMEDMEMTest/MPIAccessTest.cxx b/src/ParaMEDMEMTest/MPIAccessTest.cxx new file mode 100644 index 000000000..a9714f841 --- /dev/null +++ b/src/ParaMEDMEMTest/MPIAccessTest.cxx @@ -0,0 +1,52 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "MPIAccessTest.hxx" +#include + +#include +#include + +#ifndef WIN32 +#include +#endif + +using namespace std; + + + +/*! + * Tool to remove temporary files. + * Allows automatique removal of temporary files in case of test failure. + */ +MPIAccessTest_TmpFilesRemover::~MPIAccessTest_TmpFilesRemover() +{ + set::iterator it = myTmpFiles.begin(); + for (; it != myTmpFiles.end(); it++) { + if (access((*it).data(), F_OK) == 0) + remove((*it).data()); + } + myTmpFiles.clear(); + //cout << "~MPIAccessTest_TmpFilesRemover()" << endl; +} + +bool MPIAccessTest_TmpFilesRemover::Register(const string theTmpFile) +{ + return (myTmpFiles.insert(theTmpFile)).second; +} diff --git a/src/ParaMEDMEMTest/MPIAccessTest.hxx b/src/ParaMEDMEMTest/MPIAccessTest.hxx new file mode 100644 index 000000000..07d704819 --- /dev/null +++ b/src/ParaMEDMEMTest/MPIAccessTest.hxx @@ -0,0 +1,105 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef _MPIACCESSTEST_HXX_ +#define _MPIACCESSTEST_HXX_ + +#include + +#include +#include +#include +#include "mpi.h" + + +class MPIAccessTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE( MPIAccessTest ); + CPPUNIT_TEST( test_MPI_Access_Send_Recv ) ; + CPPUNIT_TEST( test_MPI_Access_Cyclic_Send_Recv ) ; + CPPUNIT_TEST( test_MPI_Access_SendRecv ) ; + CPPUNIT_TEST( test_MPI_Access_ISend_IRecv ) ; + CPPUNIT_TEST( test_MPI_Access_Cyclic_ISend_IRecv ) ; + CPPUNIT_TEST( test_MPI_Access_ISendRecv ) ; + CPPUNIT_TEST( test_MPI_Access_Probe ) ; + CPPUNIT_TEST( test_MPI_Access_IProbe ) ; + CPPUNIT_TEST( test_MPI_Access_Cancel ) ; + CPPUNIT_TEST( test_MPI_Access_Send_Recv_Length ) ; + CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_Length ) ; + CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_Length_1 ) ; + CPPUNIT_TEST( test_MPI_Access_Time ) ; + CPPUNIT_TEST( test_MPI_Access_Time_0 ) ; + CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_BottleNeck ) ; + CPPUNIT_TEST_SUITE_END(); + + +public: + + MPIAccessTest():CppUnit::TestFixture(){} + ~MPIAccessTest(){} + void setUp(){} + void tearDown(){} + void test_MPI_Access_Send_Recv() ; + void test_MPI_Access_Cyclic_Send_Recv() ; + void test_MPI_Access_SendRecv() ; + void test_MPI_Access_ISend_IRecv() ; + void test_MPI_Access_Cyclic_ISend_IRecv() ; + void test_MPI_Access_ISendRecv() ; + void test_MPI_Access_Probe() ; + void test_MPI_Access_IProbe() ; + void test_MPI_Access_Cancel() ; + void test_MPI_Access_Send_Recv_Length() ; + void test_MPI_Access_ISend_IRecv_Length() ; + void test_MPI_Access_ISend_IRecv_Length_1() ; + void test_MPI_Access_Time() ; + void test_MPI_Access_Time_0() ; + void test_MPI_Access_ISend_IRecv_BottleNeck() ; + +private: + }; + +// to automatically remove temporary files from disk +class MPIAccessTest_TmpFilesRemover +{ +public: + MPIAccessTest_TmpFilesRemover() {} + ~MPIAccessTest_TmpFilesRemover(); + bool Register(const std::string theTmpFile); + +private: + std::set myTmpFiles; +}; + +/*! + * Tool to print array to stream. + */ +template +void MPIAccessTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text) +{ + stream << text << ": {"; + if (length > 0) { + stream << array[0]; + for (int i = 1; i < length; i++) { + stream << ", " << array[i]; + } + } + stream << "}" << std::endl; +} + +#endif diff --git a/src/ParaMEDMEMTest/MPIMainTest.hxx b/src/ParaMEDMEMTest/MPIMainTest.hxx new file mode 100644 index 000000000..eec6e5a6f --- /dev/null +++ b/src/ParaMEDMEMTest/MPIMainTest.hxx @@ -0,0 +1,105 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef _MPIMAINTEST_HXX_ +#define _MPIMAINTEST_HXX_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#ifndef WIN32 +#include +#endif + +// ============================================================================ +/*! + * Main program source for Unit Tests with cppunit package does not depend + * on actual tests, so we use the same for all partial unit tests. + */ +// ============================================================================ + +int main(int argc, char* argv[]) +{ +#ifndef WIN32 + fpu_control_t cw = _FPU_DEFAULT & ~(_FPU_MASK_IM | _FPU_MASK_ZM | _FPU_MASK_OM); + _FPU_SETCW(cw); +#endif + MPI_Init(&argc,&argv); + int rank; + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + // --- Create the event manager and test controller + CPPUNIT_NS::TestResult controller; + + // --- Add a listener that colllects test result + CPPUNIT_NS::TestResultCollector result; + controller.addListener( &result ); + + // --- Add a listener that print dots as test run. +#ifdef WIN32 + CPPUNIT_NS::TextTestProgressListener progress; +#else + CPPUNIT_NS::BriefTestProgressListener progress; +#endif + controller.addListener( &progress ); + + // --- Get the top level suite from the registry + + CPPUNIT_NS::Test *suite = + CPPUNIT_NS::TestFactoryRegistry::getRegistry().makeTest(); + + // --- Adds the test to the list of test to run + + CPPUNIT_NS::TestRunner runner; + runner.addTest( suite ); + runner.run( controller); + + // --- Print test in a compiler compatible format. + + std::ostringstream testFileName; + testFileName<<"UnitTestsResult"< + +#include +#include +#include +#include +#include + +#ifndef WIN32 +#include +#endif + +//================================================================================ +/*! + * \brief Get path to the resources file. + * + * When running 'make test' source file is taken from MED_SRC/resources folder. + * Otherwise, file is searched in ${MED_ROOT_DIR}/share/salome/resources/med folder. + * + * \param filename name of the resource file (should not include a path) + * \return full path to the resource file + */ +//================================================================================ + +std::string ParaMEDMEMTest::getResourceFile( const std::string& filename ) +{ + std::string resourceFile = ""; + + if ( getenv("top_srcdir") ) { + // we are in 'make test' step + resourceFile = getenv("top_srcdir"); + resourceFile += "/resources/"; + } + else if ( getenv("MED_ROOT_DIR") ) { + // use MED_ROOT_DIR env.var + resourceFile = getenv("MED_ROOT_DIR"); + resourceFile += "/share/salome/resources/med/"; + } + resourceFile += filename; + return resourceFile; +} + + +//================================================================================ +/*! + * \brief Returns writable temporary directory + * \return full path to the temporary directory + */ +//================================================================================ + +std::string ParaMEDMEMTest::getTmpDirectory() +{ + std::string path; + + std::list dirs; + if ( getenv("TMP") ) dirs.push_back( getenv("TMP" )); + if ( getenv("TMPDIR") ) dirs.push_back( getenv("TMPDIR" )); + dirs.push_back( "/tmp" ); + + std::string tmpd = ""; + for ( std::list::iterator dir = dirs.begin(); dir != dirs.end() && tmpd == "" ; ++dir ) { + if ( access( dir->data(), W_OK ) == 0 ) { + tmpd = dir->data(); + } + } + + if ( tmpd == "" ) + throw std::runtime_error("Can't find writable temporary directory. Set TMP environment variable"); + + return tmpd; +} + +//================================================================================ +/*! + * \brief Creates a copy of source file (if source file is specified) + * in the temporary directory and returns a path to the tmp file + * + * \param tmpfile name of the temporary file (without path) + * \param srcfile source file + * \return path to the temporary file + */ +//================================================================================ +std::string ParaMEDMEMTest::makeTmpFile( const std::string& tmpfile, const std::string& srcfile ) +{ + std::string tmpf = getTmpDirectory() + "/" + tmpfile; + if ( srcfile != "" ) { + std::string cmd = "cp " + srcfile + " " + tmpf + " ; chmod +w " + tmpf; + system( cmd.c_str() ); + } + return tmpf; +} + + +/*! + * Tool to remove temporary files. + * Allows automatique removal of temporary files in case of test failure. + */ +ParaMEDMEMTest_TmpFilesRemover::~ParaMEDMEMTest_TmpFilesRemover() +{ + std::set::iterator it = myTmpFiles.begin(); + for (; it != myTmpFiles.end(); it++) { + if (access((*it).data(), F_OK) == 0) + remove((*it).data()); + } + myTmpFiles.clear(); + //cout << "~ParaMEDMEMTest_TmpFilesRemover()" << endl; +} + +bool ParaMEDMEMTest_TmpFilesRemover::Register(const std::string theTmpFile) +{ + return (myTmpFiles.insert(theTmpFile)).second; +} diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx new file mode 100644 index 000000000..a8bf2b474 --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx @@ -0,0 +1,188 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef _ParaMEDMEMTEST_HXX_ +#define _ParaMEDMEMTEST_HXX_ + +#include + +#include +#include +#include +#include "mpi.h" + + +class ParaMEDMEMTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE( ParaMEDMEMTest ); + CPPUNIT_TEST(testMPIProcessorGroup_constructor); + CPPUNIT_TEST(testMPIProcessorGroup_boolean); + CPPUNIT_TEST(testMPIProcessorGroup_rank); + CPPUNIT_TEST(testBlockTopology_constructor); + CPPUNIT_TEST(testBlockTopology_serialize); + CPPUNIT_TEST(testInterpKernelDEC_1D); + CPPUNIT_TEST(testInterpKernelDEC_2DCurve); + CPPUNIT_TEST(testInterpKernelDEC_2D); + CPPUNIT_TEST(testInterpKernelDEC2_2D); + CPPUNIT_TEST(testInterpKernelDEC_2DP0P1); + CPPUNIT_TEST(testInterpKernelDEC_3D); + CPPUNIT_TEST(testInterpKernelDECNonOverlapp_2D_P0P0); + CPPUNIT_TEST(testInterpKernelDECNonOverlapp_2D_P0P1P1P0); + CPPUNIT_TEST(testInterpKernelDEC2DM1D_P0P0); + CPPUNIT_TEST(testInterpKernelDECPartialProcs); + CPPUNIT_TEST(testInterpKernelDEC3DSurfEmptyBBox); + CPPUNIT_TEST(testOverlapDEC1); + + CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D); + CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpDEC_2D); + CPPUNIT_TEST(testSynchronousEqualInterpKernelDEC_2D); + CPPUNIT_TEST(testSynchronousFasterSourceInterpKernelDEC_2D); + CPPUNIT_TEST(testSynchronousSlowerSourceInterpKernelDEC_2D); + CPPUNIT_TEST(testSynchronousSlowSourceInterpKernelDEC_2D); + CPPUNIT_TEST(testSynchronousFastSourceInterpKernelDEC_2D); + CPPUNIT_TEST(testAsynchronousEqualInterpKernelDEC_2D); + CPPUNIT_TEST(testAsynchronousFasterSourceInterpKernelDEC_2D); + CPPUNIT_TEST(testAsynchronousSlowerSourceInterpKernelDEC_2D); + CPPUNIT_TEST(testAsynchronousSlowSourceInterpKernelDEC_2D); + CPPUNIT_TEST(testAsynchronousFastSourceInterpKernelDEC_2D); +#ifdef MED_ENABLE_FVM + //can be added again after FVM correction for 2D + // CPPUNIT_TEST(testNonCoincidentDEC_2D); + CPPUNIT_TEST(testNonCoincidentDEC_3D); +#endif + CPPUNIT_TEST(testStructuredCoincidentDEC); + CPPUNIT_TEST(testStructuredCoincidentDEC); + CPPUNIT_TEST(testICoco1); + CPPUNIT_TEST(testGauthier1); + CPPUNIT_TEST(testGauthier2); + CPPUNIT_TEST(testGauthier3); + CPPUNIT_TEST(testGauthier4); + CPPUNIT_TEST(testFabienAPI1); + CPPUNIT_TEST(testFabienAPI2); + CPPUNIT_TEST(testMEDLoaderRead1); + CPPUNIT_TEST(testMEDLoaderPolygonRead); + CPPUNIT_TEST(testMEDLoaderPolyhedronRead); + CPPUNIT_TEST_SUITE_END(); + + +public: + + ParaMEDMEMTest():CppUnit::TestFixture(){} + ~ParaMEDMEMTest(){} + void setUp(){} + void tearDown(){} + void testMPIProcessorGroup_constructor(); + void testMPIProcessorGroup_boolean(); + void testMPIProcessorGroup_rank(); + void testBlockTopology_constructor(); + void testBlockTopology_serialize(); + void testInterpKernelDEC_1D(); + void testInterpKernelDEC_2DCurve(); + void testInterpKernelDEC_2D(); + void testInterpKernelDEC2_2D(); + void testInterpKernelDEC_2DP0P1(); + void testInterpKernelDEC_3D(); + void testInterpKernelDECNonOverlapp_2D_P0P0(); + void testInterpKernelDECNonOverlapp_2D_P0P1P1P0(); + void testInterpKernelDEC2DM1D_P0P0(); + void testInterpKernelDECPartialProcs(); + void testInterpKernelDEC3DSurfEmptyBBox(); + void testOverlapDEC1(); +#ifdef MED_ENABLE_FVM + void testNonCoincidentDEC_2D(); + void testNonCoincidentDEC_3D(); +#endif + void testStructuredCoincidentDEC(); + void testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D(); + void testSynchronousEqualInterpKernelWithoutInterpDEC_2D(); + void testSynchronousEqualInterpKernelDEC_2D(); + void testSynchronousFasterSourceInterpKernelDEC_2D(); + void testSynchronousSlowerSourceInterpKernelDEC_2D(); + void testSynchronousSlowSourceInterpKernelDEC_2D(); + void testSynchronousFastSourceInterpKernelDEC_2D(); + + void testAsynchronousEqualInterpKernelDEC_2D(); + void testAsynchronousFasterSourceInterpKernelDEC_2D(); + void testAsynchronousSlowerSourceInterpKernelDEC_2D(); + void testAsynchronousSlowSourceInterpKernelDEC_2D(); + void testAsynchronousFastSourceInterpKernelDEC_2D(); + // + void testICoco1(); + void testGauthier1(); + void testGauthier2(); + void testGauthier3(); + void testGauthier4(); + void testFabienAPI1(); + void testFabienAPI2(); + // + void testMEDLoaderRead1(); + void testMEDLoaderPolygonRead(); + void testMEDLoaderPolyhedronRead(); + void testMEDLoaderWrite1(); + void testMEDLoaderPolygonWrite(); + + std::string getResourceFile( const std::string& ); + std::string getTmpDirectory(); + std::string makeTmpFile( const std::string&, const std::string& = "" ); + +private: +#ifdef MED_ENABLE_FVM + void testNonCoincidentDEC(const std::string& filename1, + const std::string& meshname1, + const std::string& filename2, + const std::string& meshname2, + int nbprocsource, double epsilon); +#endif + void testAsynchronousInterpKernelDEC_2D(double dtA, double tmaxA, + double dtB, double tmaxB, + bool WithPointToPoint, bool Asynchronous, bool WithInterp, const char *srcMeth, const char *targetMeth); + void testInterpKernelDEC_2D_(const char *srcMeth, const char *targetMeth); + void testInterpKernelDEC2_2D_(const char *srcMeth, const char *targetMeth); + void testInterpKernelDEC_3D_(const char *srcMeth, const char *targetMeth); +}; + +// to automatically remove temporary files from disk +class ParaMEDMEMTest_TmpFilesRemover +{ +public: + ParaMEDMEMTest_TmpFilesRemover() {} + ~ParaMEDMEMTest_TmpFilesRemover(); + bool Register(const std::string theTmpFile); + +private: + std::set myTmpFiles; +}; + +/*! + * Tool to print array to stream. + */ +template +void ParaMEDMEMTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text) +{ + stream << text << ": {"; + if (length > 0) { + stream << array[0]; + for (int i = 1; i < length; i++) { + stream << ", " << array[i]; + } + } + stream << "}" << std::endl; +} + +#endif diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx new file mode 100644 index 000000000..e56972116 --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx @@ -0,0 +1,125 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include + +#include "MPI2Connector.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "MEDCouplingUMesh.hxx" +#include "MEDCouplingFieldDouble.hxx" +#include "InterpKernelDEC.hxx" +#include "MPIProcessorGroup.hxx" +#include "CommInterface.hxx" + +#include +#include +#include + +class MPI2ParaMEDMEMTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE( MPI2ParaMEDMEMTest ); + CPPUNIT_TEST( testBasicMPI2_1 ); + CPPUNIT_TEST_SUITE_END(); +public: + void testBasicMPI2_1(); +}; + +using namespace ParaMEDMEM; + +void MPI2ParaMEDMEMTest::testBasicMPI2_1() +{ + int lsize, lrank, gsize, grank; + MPI_Comm gcom; + std::string service = "SERVICE"; + std::ostringstream meshfilename, meshname; + ParaMEDMEM::ParaMESH *paramesh=0; + ParaMEDMEM::MEDCouplingUMesh *mesh; + ParaMEDMEM::ParaFIELD *parafield=0; + ParaMEDMEM::CommInterface *interface; + ParaMEDMEM::MPIProcessorGroup *source, *target; + + MPI_Comm_size( MPI_COMM_WORLD, &lsize ); + MPI_Comm_rank( MPI_COMM_WORLD, &lrank ); + if(lsize!=2) + { + CPPUNIT_ASSERT(false); + return; + } + + /* Connection to remote programm */ + MPI2Connector *mpio = new MPI2Connector; + gcom = mpio->remoteMPI2Connect(service); + MPI_Comm_size( gcom, &gsize ); + MPI_Comm_rank( gcom, &grank ); + if(gsize!=5) + { + CPPUNIT_ASSERT(false); + return; + } + interface = new ParaMEDMEM::CommInterface; + source = new ParaMEDMEM::MPIProcessorGroup(*interface,0,lsize-1,gcom); + target = new ParaMEDMEM::MPIProcessorGroup(*interface,lsize,gsize-1,gcom); + + const double sourceCoordsAll[2][8]={{0.4,0.5,0.4,1.5,1.6,1.5,1.6,0.5}, + {0.3,-0.5,1.6,-0.5,1.6,-1.5,0.3,-1.5}}; + + int conn4All[8]={0,1,2,3,4,5,6,7}; + + std::ostringstream stream; stream << "sourcemesh2D proc " << grank; + mesh=MEDCouplingUMesh::New(stream.str().c_str(),2); + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,2); + const double *sourceCoords=sourceCoordsAll[grank]; + std::copy(sourceCoords,sourceCoords+8,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*source,"source mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + double *value=parafield->getField()->getArray()->getPointer(); + value[0]=34+13*((double)grank); + + ParaMEDMEM::InterpKernelDEC dec(*source,*target); + parafield->getField()->setNature(ConservativeVolumic); + + + dec.setMethod("P0"); + dec.attachLocalField(parafield); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.sendData(); + /* Deconnection of remote programm */ + mpio->remoteMPI2Disconnect(service); + /* clean-up */ + delete mpio; + delete parafield; + mesh->decrRef(); + delete paramesh; + delete source; + delete target; + delete interface; +} + +CPPUNIT_TEST_SUITE_REGISTRATION( MPI2ParaMEDMEMTest ); + +#include "MPIMainTest.hxx" diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx new file mode 100644 index 000000000..102443ef0 --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx @@ -0,0 +1,130 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include + +#include "MPI2Connector.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "MEDCouplingUMesh.hxx" +#include "MEDCouplingFieldDouble.hxx" +#include "InterpKernelDEC.hxx" +#include "MPIProcessorGroup.hxx" +#include "CommInterface.hxx" + +#include +#include +#include + +class MPI2ParaMEDMEMTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE( MPI2ParaMEDMEMTest ); + CPPUNIT_TEST( testBasicMPI2_1 ); + CPPUNIT_TEST_SUITE_END(); +public: + void testBasicMPI2_1(); +}; + +using namespace ParaMEDMEM; + +void MPI2ParaMEDMEMTest::testBasicMPI2_1() +{ + int lsize, lrank, gsize, grank; + MPI_Comm gcom; + std::string service = "SERVICE"; + std::ostringstream meshfilename, meshname; + ParaMEDMEM::ParaMESH *paramesh=0; + ParaMEDMEM::MEDCouplingUMesh* mesh; + ParaMEDMEM::ParaFIELD *parafield=0; + ParaMEDMEM::CommInterface* interface; + ParaMEDMEM::MPIProcessorGroup* source, *target; + + MPI_Comm_size( MPI_COMM_WORLD, &lsize ); + MPI_Comm_rank( MPI_COMM_WORLD, &lrank ); + if(lsize!=3) + { + CPPUNIT_ASSERT(false); + return; + } + + /* Connection to remote programm */ + MPI2Connector *mpio = new MPI2Connector; + gcom = mpio->remoteMPI2Connect(service); + + MPI_Comm_size( gcom, &gsize ); + MPI_Comm_rank( gcom, &grank ); + if(gsize!=5) + { + CPPUNIT_ASSERT(false); + return; + } + + interface = new ParaMEDMEM::CommInterface; + source = new ParaMEDMEM::MPIProcessorGroup(*interface,0,gsize-lsize-1,gcom); + target = new ParaMEDMEM::MPIProcessorGroup(*interface,gsize-lsize,gsize-1,gcom); + + const double targetCoordsAll[3][16]={{0.7,1.45,0.7,1.65,0.9,1.65,0.9,1.45, 1.1,1.4,1.1,1.6,1.3,1.6,1.3,1.4}, + {0.7,-0.6,0.7,0.7,0.9,0.7,0.9,-0.6, 1.1,-0.7,1.1,0.6,1.3,0.6,1.3,-0.7}, + {0.7,-1.55,0.7,-1.35,0.9,-1.35,0.9,-1.55, 1.1,-1.65,1.1,-1.45,1.3,-1.45,1.3,-1.65}}; + int conn4All[8]={0,1,2,3,4,5,6,7}; + double targetResults[3][2]={{34.,34.},{38.333333333333336,42.666666666666664},{47.,47.}}; + + std::ostringstream stream; stream << "targetmesh2D proc " << grank-(gsize-lsize); + mesh=MEDCouplingUMesh::New(stream.str().c_str(),2); + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All+4); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(8,2); + const double *targetCoords=targetCoordsAll[grank-(gsize-lsize)]; + std::copy(targetCoords,targetCoords+16,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH (mesh,*target,"target mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + + ParaMEDMEM::InterpKernelDEC dec(*source,*target); + parafield->getField()->setNature(ConservativeVolumic); + + dec.setMethod("P0"); + dec.attachLocalField(parafield); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + const double *expected=targetResults[grank-(gsize-lsize)]; + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13); + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13); + /* Deconnection of remote programm */ + mpio->remoteMPI2Disconnect(service); + /* clean-up */ + delete mpio; + delete parafield; + mesh->decrRef(); + delete paramesh; + delete source; + delete target; + delete interface; +} + +CPPUNIT_TEST_SUITE_REGISTRATION( MPI2ParaMEDMEMTest ); + +#include "MPIMainTest.hxx" diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_BlockTopology.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_BlockTopology.cxx new file mode 100644 index 000000000..dc129ccf1 --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_BlockTopology.cxx @@ -0,0 +1,123 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaMEDMEMTest.hxx" +#include + +#include "InterpolationUtils.hxx" +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" + +#include + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + + +using namespace std; +using namespace ParaMEDMEM; + +/* + * Check methods defined in BlockTopology.hxx + * + BlockTopology(){}; + BlockTopology(const ProcessorGroup& group, const MEDMEM::GRID& grid); + BlockTopology(const BlockTopology& geom_topo, const ComponentTopology& comp_topo); + (+) BlockTopology(const ProcessorGroup& group, int nb_elem); + virtual ~BlockTopology(); + (+) inline int getNbElements()const; + (+) inline int getNbLocalElements() const; + const ProcessorGroup* getProcGroup()const {return _proc_group;}; + (+) inline std::pair globalToLocal (const int) const ; + (+) inline int localToGlobal (const std::pair) const; + (+) std::vector > getLocalArrayMinMax() const ; + (+) int getDimension() const {return _dimension;}; + (+) void serialize(int* & serializer, int& size) const ; + (+) void unserialize(const int* serializer, const CommInterface& comm_interface); + + */ + +void ParaMEDMEMTest::testBlockTopology_constructor() +{ + //test constructor + int size; + MPI_Comm_size(MPI_COMM_WORLD,&size); + int rank; + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + CommInterface interface; + MPIProcessorGroup group(interface); + BlockTopology blocktopo(group,1); + CPPUNIT_ASSERT_EQUAL(1,blocktopo.getNbLocalElements()); + CPPUNIT_ASSERT_EQUAL(size,blocktopo.getNbElements()); + CPPUNIT_ASSERT_EQUAL(1,blocktopo.getDimension()); + + //checking access methods + BlockTopology blocktopo2(group,2); + std::pair local= blocktopo2.globalToLocal(0); + CPPUNIT_ASSERT_EQUAL(local.first,0); + CPPUNIT_ASSERT_EQUAL(local.second,0); + int global=blocktopo2.localToGlobal(local); + CPPUNIT_ASSERT_EQUAL(global,0); + + local = blocktopo2.globalToLocal(1); + CPPUNIT_ASSERT_EQUAL(local.first,0); + CPPUNIT_ASSERT_EQUAL(local.second,1); + global=blocktopo2.localToGlobal(local); + CPPUNIT_ASSERT_EQUAL(global,1); + + local = blocktopo2.globalToLocal(2*size-1); + CPPUNIT_ASSERT_EQUAL(local.first,size-1); + CPPUNIT_ASSERT_EQUAL(local.second,1); + global=blocktopo2.localToGlobal(local); + CPPUNIT_ASSERT_EQUAL(global,2*size-1); + + std::vector > bounds = blocktopo2.getLocalArrayMinMax(); + int vecsize = bounds.size(); + CPPUNIT_ASSERT_EQUAL(1,vecsize); + CPPUNIT_ASSERT_EQUAL(2*rank, (bounds[0]).first); + CPPUNIT_ASSERT_EQUAL(2*rank+2, (bounds[0]).second); + } + +void ParaMEDMEMTest::testBlockTopology_serialize() +{ + + int size; + MPI_Comm_size(MPI_COMM_WORLD,&size); + int rank; + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + CommInterface interface; + MPIProcessorGroup group(interface); + BlockTopology blocktopo(group,3); + +//testing the serialization process that is used to transfer a +//block topology via a MPI_Send/Recv comm + BlockTopology blocktopo_recv; + int* serializer; + int sersize; + blocktopo.serialize(serializer,sersize); + blocktopo_recv.unserialize(serializer,interface); + CPPUNIT_ASSERT_EQUAL(blocktopo.getNbElements(),blocktopo_recv.getNbElements()); + delete [] serializer; +} diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_FabienAPI.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_FabienAPI.cxx new file mode 100644 index 000000000..341ed7c4e --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_FabienAPI.cxx @@ -0,0 +1,199 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaMEDMEMTest.hxx" +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "InterpKernelDEC.hxx" +#include "MEDCouplingUMesh.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "ComponentTopology.hxx" + +#include + +using namespace ParaMEDMEM; + +void ParaMEDMEMTest::testFabienAPI1() +{ + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + // + if(size!=3) + return ; + int procs_source_c[1]={0}; + std::set procs_source(procs_source_c,procs_source_c+1); + int procs_target_c[1]={1}; + std::set procs_target(procs_target_c,procs_target_c+1); + // + ParaMEDMEM::MEDCouplingUMesh *mesh=0; + ParaMEDMEM::ParaMESH *paramesh=0; + ParaMEDMEM::ParaFIELD *parafield=0; + // + ParaMEDMEM::CommInterface interface; + // + MPI_Barrier(MPI_COMM_WORLD); + double targetCoords[8]={ 0.,0., 1., 0., 0., 1., 1., 1. }; + CommInterface comm; + // + ParaMEDMEM::InterpKernelDEC *dec=new ParaMEDMEM::InterpKernelDEC(procs_source,procs_target); + if(dec->isInSourceSide()) + { + mesh=MEDCouplingUMesh::New(); + mesh->setMeshDimension(2); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,2); + std::copy(targetCoords,targetCoords+8,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + int targetConn[4]={0,2,3,1}; + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn); + mesh->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + paramesh=new ParaMESH(mesh,*dec->getSourceGrp(),"source mesh"); + parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafield->getField()->setNature(ConservativeVolumic); + double *vals=parafield->getField()->getArray()->getPointer(); + vals[0]=7.; + } + if(dec->isInTargetSide()) + { + mesh=MEDCouplingUMesh::New(); + mesh->setMeshDimension(2); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,2); + std::copy(targetCoords,targetCoords+8,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + int targetConn[6]={0,2,1,2,3,1}; + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn+3); + mesh->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + paramesh=new ParaMESH(mesh,*dec->getTargetGrp(),"target mesh"); + parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafield->getField()->setNature(ConservativeVolumic); + } + dec->attachLocalField(parafield); + dec->synchronize(); + dec->sendRecvData(); + if(dec->isInTargetSide()) + { + const double *valsToTest=parafield->getField()->getArray()->getConstPointer(); + CPPUNIT_ASSERT_DOUBLES_EQUAL(valsToTest[0],7.,1e-14); + CPPUNIT_ASSERT_DOUBLES_EQUAL(valsToTest[1],7.,1e-14); + } + // + delete parafield; + delete paramesh; + if(mesh) + mesh->decrRef(); + delete dec; + MPI_Barrier(MPI_COMM_WORLD); +} + +/*! + * Idem testFabienAPI1 except that procs are shuffled. Test of the good management of group translation in newly created communicator. + */ +void ParaMEDMEMTest::testFabienAPI2() +{ + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + // + if(size!=3) + return ; + int procs_source_c[1]={2};//difference with testFabienAPI1 + std::set procs_source(procs_source_c,procs_source_c+1); + int procs_target_c[1]={1}; + std::set procs_target(procs_target_c,procs_target_c+1); + // + ParaMEDMEM::MEDCouplingUMesh *mesh=0; + ParaMEDMEM::ParaMESH *paramesh=0; + ParaMEDMEM::ParaFIELD *parafield=0; + // + ParaMEDMEM::CommInterface interface; + // + MPI_Barrier(MPI_COMM_WORLD); + double targetCoords[8]={ 0.,0., 1., 0., 0., 1., 1., 1. }; + CommInterface comm; + // + ParaMEDMEM::InterpKernelDEC *dec=new ParaMEDMEM::InterpKernelDEC(procs_source,procs_target); + if(dec->isInSourceSide()) + { + mesh=MEDCouplingUMesh::New(); + mesh->setMeshDimension(2); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,2); + std::copy(targetCoords,targetCoords+8,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + int targetConn[4]={0,2,3,1}; + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn); + mesh->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + paramesh=new ParaMESH(mesh,*dec->getSourceGrp(),"source mesh"); + parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafield->getField()->setNature(ConservativeVolumic); + double *vals=parafield->getField()->getArray()->getPointer(); + vals[0]=7.; + } + if(dec->isInTargetSide()) + { + mesh=MEDCouplingUMesh::New(); + mesh->setMeshDimension(2); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,2); + std::copy(targetCoords,targetCoords+8,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + int targetConn[6]={0,2,1,2,3,1}; + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn+3); + mesh->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + paramesh=new ParaMESH(mesh,*dec->getTargetGrp(),"target mesh"); + parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafield->getField()->setNature(ConservativeVolumic); + } + dec->attachLocalField(parafield); + dec->synchronize(); + dec->sendRecvData(); + if(dec->isInTargetSide()) + { + const double *valsToTest=parafield->getField()->getArray()->getConstPointer(); + CPPUNIT_ASSERT_DOUBLES_EQUAL(valsToTest[0],7.,1e-14); + CPPUNIT_ASSERT_DOUBLES_EQUAL(valsToTest[1],7.,1e-14); + } + // + delete parafield; + delete paramesh; + if(mesh) + mesh->decrRef(); + delete dec; + MPI_Barrier(MPI_COMM_WORLD); +} diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx new file mode 100644 index 000000000..cc97ede18 --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx @@ -0,0 +1,665 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaMEDMEMTest.hxx" +#include + +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "DEC.hxx" +#include "InterpKernelDEC.hxx" +#include "MEDCouplingUMesh.hxx" +#include "MEDCouplingFieldDouble.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "ComponentTopology.hxx" +#include "BlockTopology.hxx" + +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace ParaMEDMEM; +using namespace ICoCo; + +void afficheGauthier1(const ParaFIELD& field, const double *vals, int lgth) +{ + const DataArrayDouble *valsOfField(field.getField()->getArray()); + CPPUNIT_ASSERT_EQUAL(lgth,valsOfField->getNumberOfTuples()); + for (int ele=0;elegetNumberOfTuples();ele++) + CPPUNIT_ASSERT_DOUBLES_EQUAL(vals[ele],valsOfField->getIJ(ele,0),1e-12); +} + +MEDCouplingUMesh *init_quadGauthier1(int is_master) +{ + MEDCouplingAutoRefCountObjectPtr m(MEDCouplingUMesh::New("champ_quad",2)); + MEDCouplingAutoRefCountObjectPtr coo(DataArrayDouble::New()); + if(is_master) + { + const double dataCoo[24]={0,0,0,1,0,0,0,0,1,1,0,1,0,1,0,1,1,0,0,1,1,1,1,1}; + coo->alloc(8,3); + std::copy(dataCoo,dataCoo+24,coo->getPointer()); + const int conn[8]={0,1,3,2,4,5,7,6}; + m->allocateCells(2); + m->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn); + m->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn+4); + } + else + { + coo->alloc(0,3); + m->allocateCells(0); + } + m->setCoords(coo); + return m.retn(); +} + +MEDCouplingUMesh *init_triangleGauthier1(int is_master) +{ + MEDCouplingAutoRefCountObjectPtr m(MEDCouplingUMesh::New("champ_triangle",2)); + MEDCouplingAutoRefCountObjectPtr coo(DataArrayDouble::New()); + if(is_master) + { + const double dataCoo[24]={0,0,0,1,0,0,0,0,1,1,0,1,0,1,0,1,1,0,0,1,1,1,1,1}; + coo->alloc(8,3); + std::copy(dataCoo,dataCoo+24,coo->getPointer()); + const int conn[12]={0,1,2,1,2,3,4,5,7,4,6,7}; + m->allocateCells(2); + for(int i=0;i<4;i++) + m->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn+3*i); + } + else + { + coo->alloc(0,3); + m->allocateCells(0); + } + m->setCoords(coo); + return m.retn(); +} + + +void ParaMEDMEMTest::testGauthier1() +{ + int num_cas=0; + int rank, size; + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + MPI_Comm_size(MPI_COMM_WORLD,&size); + + int is_master=0; + + CommInterface comm; + set emetteur_ids; + set recepteur_ids; + emetteur_ids.insert(0); + if(size!=4) + return; + recepteur_ids.insert(1); + if (size >2) + recepteur_ids.insert(2); + if (size >2) + emetteur_ids.insert(3); + if ((rank==0)||(rank==1)) + is_master=1; + + MPIProcessorGroup recepteur_group(comm,recepteur_ids); + MPIProcessorGroup emetteur_group(comm,emetteur_ids); + + string cas; + if (recepteur_group.containsMyRank()) + { + cas="recepteur"; + //freopen("recpeteur.out","w",stdout); + //freopen("recepteur.err","w",stderr); + } + else + { + cas="emetteur"; + // freopen("emetteur.out","w",stdout); + //freopen("emetteur.err","w",stderr); + } + double expected[8][4]={ + {1.,1.,1.,1.}, + {40., 40., 1., 1.}, + {1.,1.,1e200,1e200}, + {40.,1.,1e200,1e200}, + {1.,1.,1.,1.}, + {40.,1.,1.,1.}, + {1.,1.,1e200,1e200}, + {20.5,1.,1e200,1e200} + }; + int expectedLgth[8]={4,4,2,2,4,4,2,2}; + + for (int send=0;send<2;send++) + for (int rec=0;rec<2;rec++) + { + InterpKernelDEC dec_emetteur(emetteur_group, recepteur_group); + ParaMEDMEM::ParaFIELD *champ_emetteur(0),*champ_recepteur(0); + ParaMEDMEM::ParaMESH *paramesh(0); + MEDCouplingAutoRefCountObjectPtr mesh; + dec_emetteur.setOrientation(2); + if (send==0) + { + mesh=init_quadGauthier1(is_master); + } + else + { + mesh=init_triangleGauthier1(is_master); + } + paramesh=new ParaMEDMEM::ParaMESH(mesh,recepteur_group.containsMyRank()?recepteur_group:emetteur_group,"emetteur mesh"); + ParaMEDMEM::ComponentTopology comptopo; + champ_emetteur=new ParaMEDMEM::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); + champ_emetteur->getField()->setNature(ConservativeVolumic); + champ_emetteur->setOwnSupport(true); + if (rec==0) + { + mesh=init_triangleGauthier1(is_master); + } + else + { + mesh=init_quadGauthier1(is_master); + } + paramesh=new ParaMEDMEM::ParaMESH(mesh,recepteur_group.containsMyRank()?recepteur_group:emetteur_group,"recepteur mesh"); + champ_recepteur=new ParaMEDMEM::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); + champ_recepteur->getField()->setNature(ConservativeVolumic); + champ_recepteur->setOwnSupport(true); + if (cas=="emetteur") + { + champ_emetteur->getField()->getArray()->fillWithValue(1.); + } + + + MPI_Barrier(MPI_COMM_WORLD); + + //clock_t clock0= clock (); + int compti=0; + + bool init=true; // first time step ?? + bool stop=false; + //boucle sur les pas de quads + while (!stop) { + + compti++; + //clock_t clocki= clock (); + //cout << compti << " CLOCK " << (clocki-clock0)*1.e-6 << endl; + for (int non_unif=0;non_unif<2;non_unif++) + { + if (cas=="emetteur") + { + if (non_unif) + if(rank!=3) + champ_emetteur->getField()->getArray()->setIJ(0,0,40); + } + //bool ok=false; // Is the time interval successfully solved ? + + // Loop on the time interval tries + if(1) { + + + if (cas=="emetteur") + dec_emetteur.attachLocalField(champ_emetteur); + else + dec_emetteur.attachLocalField(champ_recepteur); + + + if(init) dec_emetteur.synchronize(); + init=false; + + if (cas=="emetteur") { + // affiche(champ_emetteur); + dec_emetteur.sendData(); + } + else if (cas=="recepteur") + { + dec_emetteur.recvData(); + if (is_master) + afficheGauthier1(*champ_recepteur,expected[num_cas],expectedLgth[num_cas]); + } + else + throw 0; + MPI_Barrier(MPI_COMM_WORLD); + } + stop=true; + num_cas++; + } + } + delete champ_emetteur; + delete champ_recepteur; + } +} + +void ParaMEDMEMTest::testGauthier2() +{ + double valuesExpected1[2]={0.,0.}; + double valuesExpected2[2]={0.95,0.970625}; + + double valuesExpected30[]={0., 0., 0.05, 0., 0., 0.15, 0., 0., 0.25, 0., 0., 0.35, 0., 0., 0.45, 0., 0., 0.55, 0., 0., 0.65, 0., 0., 0.75, 0., 0., 0.85, 0., 0., 0.95}; + double valuesExpected31[]={0., 0., 0.029375, 0., 0., 0.029375, 0., 0., 0.1, 0., 0., 0.1, 0., 0., 0.2, 0., 0., 0.2, 0., 0., 0.3, 0., 0., 0.3, 0., 0., 0.4, 0., 0., 0.4, 0., 0., 0.5, 0., 0., 0.5, 0., 0., 0.6, 0., 0., 0.6, 0., 0., 0.7, 0., 0., 0.7, 0., 0., 0.8, 0., 0., 0.8, 0., 0., 0.9, 0., 0., 0.9, 0., 0., 0.970625, 0., 0., 0.970625 }; + + double *valuesExpected3[2]={valuesExpected30,valuesExpected31}; + + int rank, size; + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + MPI_Comm_size(MPI_COMM_WORLD,&size); + if (size <2) + return ; + CommInterface comm; + set Genepi_ids; + set entree_chaude_ids; + Genepi_ids.insert(0); + for (int i=1;i mesh(MEDCouplingUMesh::New("mesh",2)); + MEDCouplingAutoRefCountObjectPtr arr(DataArrayDouble::New()); arr->alloc(63,3); + const double cooData[189]={0.,0.,0.,0.5,0.,0.,0.5,0.05,0.,0.,0.1,0.,0.5,0.1,0.,0.5,0.15,0.,0.,0.2,0.,0.5,0.2,0.,0.5,0.25,0.,0.,0.3,0.,0.5,0.3,0.,0.5,0.35,0.,0.,0.4,0.,0.5,0.4,0.,0.5,0.45,0.,0.,0.5,0.,0.5,0.5,0.,0.5,0.55,0.,0.,0.6,0.,0.5,0.6,0.,0.5,0.65,0.,0.,0.7,0.,0.5,0.7,0.,0.5,0.75,0.,0.,0.8,0.,0.5,0.8,0.,0.5,0.85,0.,0.,0.9,0.,0.5,0.9,0.,0.5,0.95,0.,1.,0.,0.,1.,0.1,0.,1.,0.2,0.,1.,0.3,0.,1.,0.4,0.,1.,0.5,0.,1.,0.6,0.,1.,0.7,0.,1.,0.8,0.,1.,0.9,0.,1.,0.05,0.,1.,0.15,0.,1.,0.25,0.,1.,0.35,0.,1.,0.45,0.,1.,0.55,0.,1.,0.65,0.,1.,0.75,0.,1.,0.85,0.,1.,0.95,0.,1.,1.,0.,0.,1.,0.,0.5,1.,0.,0.,0.05,0.,0.,0.15,0.,0.,0.25,0.,0.,0.35,0.,0.,0.45,0.,0.,0.55,0.,0.,0.65,0.,0.,0.75,0.,0.,0.85,0.,0.,0.95,0.}; + std::copy(cooData,cooData+189,arr->getPointer()); + mesh->setCoords(arr); + mesh->allocateCells(80); + const int conn[240]={0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,2,1,31,5,4,32,8,7,33,11,10,34,14,13,35,17,16,36,20,19,37,23,22,38,26,25,39,29,28,30,40,2,31,41,5,32,42,8,33,43,11,34,44,14,35,45,17,36,46,20,37,47,23,38,48,26,39,49,29,31,2,40,32,5,41,33,8,42,34,11,43,35,14,44,36,17,45,37,20,46,38,23,47,39,26,48,50,29,49,3,2,4,6,5,7,9,8,10,12,11,13,15,14,16,18,17,19,21,20,22,24,23,25,27,26,28,51,29,52,31,4,2,32,7,5,33,10,8,34,13,11,35,16,14,36,19,17,37,22,20,38,25,23,39,28,26,50,52,29,0,2,53,3,5,54,6,8,55,9,11,56,12,14,57,15,17,58,18,20,59,21,23,60,24,26,61,27,29,62,3,53,2,6,54,5,9,55,8,12,56,11,15,57,14,18,58,17,21,59,20,24,60,23,27,61,26,51,62,29}; + for(int i=0;i<80;i++) + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn+3*i); + MEDCouplingAutoRefCountObjectPtr f(MEDCouplingFieldDouble::New(ON_NODES,ONE_TIME)); + const double valsOfField[189]={0.,0.,0.,0.,0.,0.,0.,0.,0.05,0.,0.,0.1,0.,0.,0.1,0.,0.,0.15,0.,0.,0.2,0.,0.,0.2,0.,0.,0.25,0.,0.,0.3,0.,0.,0.3,0.,0.,0.35,0.,0.,0.4,0.,0.,0.4,0.,0.,0.45,0.,0.,0.5,0.,0.,0.5,0.,0.,0.55,0.,0.,0.6,0.,0.,0.6,0.,0.,0.65,0.,0.,0.7,0.,0.,0.7,0.,0.,0.75,0.,0.,0.8,0.,0.,0.8,0.,0.,0.85,0.,0.,0.9,0.,0.,0.9,0.,0.,0.95,0.,0.,0.,0.,0.,0.1,0.,0.,0.2,0.,0.,0.3,0.,0.,0.4,0.,0.,0.5,0.,0.,0.6,0.,0.,0.7,0.,0.,0.8,0.,0.,0.9,0.,0.,0.05,0.,0.,0.15,0.,0.,0.25,0.,0.,0.35,0.,0.,0.45,0.,0.,0.55,0.,0.,0.65,0.,0.,0.75,0.,0.,0.85,0.,0.,0.95,0.,0.,1.,0.,0.,1.,0.,0.,1.,0.,0.,0.05,0.,0.,0.15,0.,0.,0.25,0.,0.,0.35,0.,0.,0.45,0.,0.,0.55,0.,0.,0.65,0.,0.,0.75,0.,0.,0.85,0.,0.,0.95}; + f->setMesh(mesh); f->setName("VITESSE_P1_OUT"); + arr=DataArrayDouble::New(); arr->alloc(63,3); + std::copy(valsOfField,valsOfField+189,arr->getPointer()); + f->setArray(arr); f->setNature(ConservativeVolumic); + ParaMEDMEM::ParaMESH *paramesh(new ParaMEDMEM::ParaMESH(mesh,entree_chaude_group,"emetteur mesh")); + vitesse=new ParaMEDMEM::ParaFIELD(f,paramesh,entree_chaude_group); + vitesse->setOwnSupport(true); + dec_vit_in_chaude.setMethod("P1"); + } + else + { + MEDCouplingAutoRefCountObjectPtr mesh(MEDCouplingUMesh::New("mesh",2)); + MEDCouplingAutoRefCountObjectPtr arr(DataArrayDouble::New()); arr->alloc(22,3); + const double cooData[66]={0,0,0,1,0,0,0,0.1,0,1,0.1,0,0,0.2,0,1,0.2,0,0,0.3,0,1,0.3,0,0,0.4,0,1,0.4,0,0,0.5,0,1,0.5,0,0,0.6,0,1,0.6,0,0,0.7,0,1,0.7,0,0,0.8,0,1,0.8,0,0,0.9,0,1,0.9,0,0,1,0,1,1,0}; + std::copy(cooData,cooData+66,arr->getPointer()); + mesh->setCoords(arr); + mesh->allocateCells(10); + const int conn[40]={0,1,3,2,2,3,5,4,4,5,7,6,6,7,9,8,8,9,11,10,10,11,13,12,12,13,15,14,14,15,17,16,16,17,19,18,18,19,21,20}; + for(int i=0;i<10;i++) + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn+4*i); + MEDCouplingAutoRefCountObjectPtr f(MEDCouplingFieldDouble::New(type==0?ON_CELLS:ON_NODES,ONE_TIME)); + f->setMesh(mesh); f->setName("vitesse_in_chaude"); + arr=DataArrayDouble::New(); arr->alloc(f->getNumberOfTuplesExpected()*3); arr->fillWithZero(); arr->rearrange(3); + f->setArray(arr); f->setNature(ConservativeVolumic); + ParaMEDMEM::ParaMESH *paramesh(new ParaMEDMEM::ParaMESH(mesh,Genepi_group,"recepteur mesh")); + vitesse=new ParaMEDMEM::ParaFIELD(f,paramesh,Genepi_group); + vitesse->setOwnSupport(true); + dec_vit_in_chaude.setMethod(f->getDiscretization()->getRepr()); + } + + dec_vit_in_chaude.attachLocalField(vitesse); + + dec_vit_in_chaude.synchronize(); + + + // Envois - receptions + if (entree_chaude_group.containsMyRank()) + { + dec_vit_in_chaude.sendData(); + } + else + { + dec_vit_in_chaude.recvData(); + } + if ( !entree_chaude_group.containsMyRank() ) + { + double pmin=1e38, pmax=-1e38; + const double *p(vitesse->getField()->getArray()->begin()); + for(std::size_t i=0;igetField()->getArray()->getNbOfElems();i++,p++) + { + if (*ppmax) pmax=*p; + } + CPPUNIT_ASSERT_DOUBLES_EQUAL(valuesExpected1[type],pmin,1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(valuesExpected2[type],pmax,1e-12); + + int nbCompo(vitesse->getField()->getNumberOfComponents()); + p=vitesse->getField()->getArray()->begin(); + for(int i=0;igetField()->getNumberOfTuples();i++) + for(int c=0;c emetteur_ids; + set recepteur_ids; + emetteur_ids.insert(0); + if(size!=4) + return; + recepteur_ids.insert(1); + if (size >2) + recepteur_ids.insert(2); + if (size >2) + emetteur_ids.insert(3); + if ((rank==0)||(rank==1)) + is_master=1; + + MPIProcessorGroup recepteur_group(comm,recepteur_ids); + MPIProcessorGroup emetteur_group(comm,emetteur_ids); + + string cas; + if (recepteur_group.containsMyRank()) + { + cas="recepteur"; + //freopen("recpeteur.out","w",stdout); + //freopen("recepteur.err","w",stderr); + } + else + { + cas="emetteur"; + // freopen("emetteur.out","w",stdout); + //freopen("emetteur.err","w",stderr); + } + double expected[8][4]={ + {1.,1.,1.,1.}, + {40., 40., 1., 1.}, + {1.,1.,1e200,1e200}, + {40.,1.,1e200,1e200}, + {1.,1.,1.,1.}, + {40.,1.,1.,1.}, + {1.,1.,1e200,1e200}, + {20.5,1.,1e200,1e200} + }; + int expectedLgth[8]={4,4,2,2,4,4,2,2}; + + for (int send=0;send<2;send++) + for (int rec=0;rec<2;rec++) + { + std::vector decu(1); + decu[0]=InterpKernelDEC(emetteur_group,recepteur_group); + InterpKernelDEC& dec_emetteur=decu[0]; + ParaMEDMEM::ParaFIELD *champ_emetteur(0),*champ_recepteur(0); + ParaMEDMEM::ParaMESH *paramesh(0); + MEDCouplingAutoRefCountObjectPtr mesh; + dec_emetteur.setOrientation(2); + if (send==0) + { + mesh=init_quadGauthier1(is_master); + } + else + { + mesh=init_triangleGauthier1(is_master); + } + paramesh=new ParaMEDMEM::ParaMESH(mesh,recepteur_group.containsMyRank()?recepteur_group:emetteur_group,"emetteur mesh"); + ParaMEDMEM::ComponentTopology comptopo; + champ_emetteur=new ParaMEDMEM::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); + champ_emetteur->getField()->setNature(ConservativeVolumic); + champ_emetteur->setOwnSupport(true); + if (rec==0) + { + mesh=init_triangleGauthier1(is_master); + } + else + { + mesh=init_quadGauthier1(is_master); + } + paramesh=new ParaMEDMEM::ParaMESH(mesh,recepteur_group.containsMyRank()?recepteur_group:emetteur_group,"recepteur mesh"); + champ_recepteur=new ParaMEDMEM::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); + champ_recepteur->getField()->setNature(ConservativeVolumic); + champ_recepteur->setOwnSupport(true); + if (cas=="emetteur") + { + champ_emetteur->getField()->getArray()->fillWithValue(1.); + } + + + MPI_Barrier(MPI_COMM_WORLD); + + //clock_t clock0= clock (); + int compti=0; + + bool init=true; // first time step ?? + bool stop=false; + //boucle sur les pas de quads + while (!stop) { + + compti++; + //clock_t clocki= clock (); + //cout << compti << " CLOCK " << (clocki-clock0)*1.e-6 << endl; + for (int non_unif=0;non_unif<2;non_unif++) + { + if (cas=="emetteur") + { + if (non_unif) + if(rank!=3) + champ_emetteur->getField()->getArray()->setIJ(0,0,40); + } + //bool ok=false; // Is the time interval successfully solved ? + + // Loop on the time interval tries + if(1) { + + + if (cas=="emetteur") + dec_emetteur.attachLocalField(champ_emetteur); + else + dec_emetteur.attachLocalField(champ_recepteur); + + + if(init) dec_emetteur.synchronize(); + init=false; + + if (cas=="emetteur") { + // affiche(champ_emetteur); + dec_emetteur.sendData(); + } + else if (cas=="recepteur") + { + dec_emetteur.recvData(); + if (is_master) + afficheGauthier1(*champ_recepteur,expected[num_cas],expectedLgth[num_cas]); + } + else + throw 0; + MPI_Barrier(MPI_COMM_WORLD); + } + stop=true; + num_cas++; + } + } + delete champ_emetteur; + delete champ_recepteur; + } +} + +/*! + * This test is the parallel version of MEDCouplingBasicsTest.test3D1DOnP1P0_1 test. + */ +void ParaMEDMEMTest::testGauthier4() +{ + // + const double sourceCoords[19*3]={0.5,0.5,0.1,0.5,0.5,1.2,0.5,0.5,1.6,0.5,0.5,1.8,0.5,0.5,2.43,0.5,0.5,2.55,0.5,0.5,4.1,0.5,0.5,4.4,0.5,0.5,4.9,0.5,0.5,5.1,0.5,0.5,7.6,0.5,0.5,7.7,0.5,0.5,8.2,0.5,0.5,8.4,0.5,0.5,8.6,0.5,0.5,8.8,0.5,0.5,9.2,0.5,0.5,9.6,0.5,0.5,11.5}; + const int sourceConn[18*2]={0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,16,16,17,17,18}; + const double sourceVals[19]={0.49,2.8899999999999997,7.29,13.69,22.09,32.49,44.89,59.29,75.69,94.09, 114.49,136.89,161.29,187.69,216.09,246.49,278.89,313.29,349.69}; + const double targetCoords0[20*3]={0.,0.,0.,1.,0.,0.,0.,1.,0.,1.,1.,0.,0.,0.,1.,1.,0.,1.,0.,1.,1.,1.,1.,1.,0.,0.,2.,1.,0.,2.,0.,1.,2.,1.,1.,2.,0.,0.,3.,1.,0.,3.,0.,1.,3.,1.,1.,3.,0.,0.,4.,1.,0.,4.,0.,1.,4.,1.,1.,4.}; + const int targetConn0[8*4]={1,0,2,3,5,4,6,7,5,4,6,7,9,8,10,11,9,8,10,11,13,12,14,15,13,12,14,15,17,16,18,19}; + const double targetCoords1[28*3]={0.,0.,4.,1.,0.,4.,0.,1.,4.,1.,1.,4.,0.,0.,5.,1.,0.,5.,0.,1.,5.,1.,1.,5.,0.,0.,6.,1.,0.,6.,0.,1.,6.,1.,1.,6.,0.,0.,7.,1.,0.,7.,0.,1.,7.,1.,1.,7.,0.,0.,8.,1.,0.,8.,0.,1.,8.,1.,1.,8.,0.,0.,9.,1.,0.,9.,0.,1.,9.,1.,1.,9.,0.,0.,10.,1.,0.,10.,0.,1.,10.,1.,1.,10.}; + const int targetConn1[8*6]={1,0,2,3,5,4,6,7,5,4,6,7,9,8,10,11,9,8,10,11,13,12,14,15,13,12,14,15,17,16,18,19,17,16,18,19,21,20,22,23,21,20,22,23,25,24,26,27}; + // + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + // + if(size!=3) + return ; + int nproc_source = 1; + set self_procs; + set procs_source; + set procs_target; + + for (int i=0; icontainsMyRank()) + { + std::ostringstream stream; stream << "sourcemesh2D proc " << rank; + mesh=MEDCouplingUMesh::New(stream.str().c_str(),1); + mesh->allocateCells(); + for(int i=0;i<18;i++) + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,sourceConn+2*i); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(19,3); + std::copy(sourceCoords,sourceCoords+19*3,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*source_group,"source mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh,comptopo); + double *value=parafield->getField()->getArray()->getPointer(); + std::copy(sourceVals,sourceVals+19,value); + } + else + { + if(rank==1) + { + std::ostringstream stream; stream << "targetmesh2D proc " << rank-nproc_source; + mesh=MEDCouplingUMesh::New(stream.str().c_str(),3); + mesh->allocateCells(); + for(int i=0;i<4;i++) + mesh->insertNextCell(INTERP_KERNEL::NORM_HEXA8,8,targetConn0+8*i); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(20,3); + std::copy(targetCoords0,targetCoords0+20*3,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH (mesh,*target_group,"target mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + } + else if(rank==2) + { + std::ostringstream stream; stream << "targetmesh2D proc " << rank-nproc_source; + mesh=MEDCouplingUMesh::New(stream.str().c_str(),3); + mesh->allocateCells(); + for(int i=0;i<6;i++) + mesh->insertNextCell(INTERP_KERNEL::NORM_HEXA8,8,targetConn1+8*i); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(28,3); + std::copy(targetCoords1,targetCoords1+28*3,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH (mesh,*target_group,"target mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + } + } + //test 1 - primaire -> secondaire + ParaMEDMEM::InterpKernelDEC dec(*source_group,*target_group); + dec.setIntersectionType(INTERP_KERNEL::PointLocator); + parafield->getField()->setNature(ConservativeVolumic);//very important + if (source_group->containsMyRank()) + { + dec.setMethod("P1"); + dec.attachLocalField(parafield); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.sendData(); + } + else + { + dec.setMethod("P0"); + dec.attachLocalField(parafield); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.recvData(); + const double *res(parafield->getField()->getArray()->getConstPointer()); + if(rank==1) + { + const double expected0[4]={0.49,7.956666666666667,27.29,0.}; + for(int i=0;i<4;i++) + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected0[i],res[i],1e-13); + } + else + { + const double expected1[6]={59.95666666666667,94.09,0.,125.69,202.89,296.09}; + for(int i=0;i<6;i++) + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected1[i],res[i],1e-13); + } + } + MPI_Barrier(MPI_COMM_WORLD); + if (source_group->containsMyRank()) + { + dec.recvData(); + const double expected2[19]={0.49,7.956666666666667,7.956666666666667,7.956666666666667,27.29,27.29,59.95666666666667,59.95666666666667,59.95666666666667,94.09,125.69,125.69,202.89,202.89,202.89,202.89,296.09,296.09,0.}; + const double *res(parafield->getField()->getArray()->getConstPointer()); + for(int i=0;i<19;i++) + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected2[i],res[i],1e-13); + } + else + { + dec.sendData(); + } + delete parafield; + mesh->decrRef(); + delete paramesh; + delete self_group; + delete target_group; + delete source_group; + // + MPI_Barrier(MPI_COMM_WORLD); +} diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_ICoco.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_ICoco.cxx new file mode 100644 index 000000000..a4624005d --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_ICoco.cxx @@ -0,0 +1,194 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaMEDMEMTest.hxx" +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "ComponentTopology.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "InterpKernelDEC.hxx" + +#include "MEDCouplingUMesh.hxx" + +#include +#include +#include +#include +#include + +using namespace std; +using namespace ParaMEDMEM; +using namespace ICoCo; + +typedef enum {sync_and,sync_or} synctype; +void synchronize_bool(bool& stop, synctype s) +{ + int my_stop; + int my_stop_temp = stop?1:0; + if (s==sync_and) + MPI_Allreduce(&my_stop_temp,&my_stop,1,MPI_INTEGER,MPI_MIN,MPI_COMM_WORLD); + else if (s==sync_or) + MPI_Allreduce(&my_stop_temp,&my_stop,1,MPI_INTEGER,MPI_MAX,MPI_COMM_WORLD); + stop =(my_stop==1); +} + +void synchronize_dt(double& dt) +{ + double dttemp=dt; + MPI_Allreduce(&dttemp,&dt,1,MPI_DOUBLE,MPI_MIN,MPI_COMM_WORLD); +} + + +void affiche(const ParaFIELD& field) +{ + cout <getName()<getArray()->begin()); + for(int ele=0;elegetNumberOfTuples();ele++) + cout << ele <<": "<< vals[ele] << endl; +} + +MEDCouplingUMesh *init_quad() +{ + MEDCouplingAutoRefCountObjectPtr m(MEDCouplingUMesh::New("champ_quad",2)); + MEDCouplingAutoRefCountObjectPtr coo(DataArrayDouble::New()); + const double dataCoo[24]={0.,0.,0.,1.,0.,0.,0.,0.,1.,1.,0.,1.,0.,1e-05,0.,1.,1e-05,0.,0.,1e-05,1.,1.,1e-05,1.}; + coo->alloc(8,3); + std::copy(dataCoo,dataCoo+24,coo->getPointer()); + const int conn[8]={0,1,3,2,4,5,7,6}; + m->allocateCells(2); + m->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn); + m->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn+4); + m->setCoords(coo); + return m.retn(); +} + +MEDCouplingUMesh *init_triangle() +{ + MEDCouplingAutoRefCountObjectPtr m(MEDCouplingUMesh::New("champ_triangle",2)); + MEDCouplingAutoRefCountObjectPtr coo(DataArrayDouble::New()); + const double dataCoo[24]={0.,0.,0.,1.,0.,0.,0.,0.,1.,1.,0.,1.,0.,1e-05,0.,1.,1e-05,0.,0.,1e-05,1.,1.,1e-05,1.}; + coo->alloc(8,3); + std::copy(dataCoo,dataCoo+24,coo->getPointer()); + const int conn[12]={0,1,2,1,2,3,4,5,7,4,6,7}; + m->allocateCells(4); + for(int i=0;i<4;i++) + m->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn+3*i); + m->setCoords(coo); + return m.retn(); +} + +void ParaMEDMEMTest::testICoco1() +{ + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + //the test is meant to run on five processors + if (size !=2) return ; + + CommInterface comm; + set emetteur_ids; + set recepteur_ids; + emetteur_ids.insert(0); + recepteur_ids.insert(1); + + MPIProcessorGroup recepteur_group(comm,recepteur_ids); + MPIProcessorGroup emetteur_group(comm,emetteur_ids); + + string cas; + if (recepteur_group.containsMyRank()) + cas="recepteur"; + else + cas="emetteur"; + + InterpKernelDEC dec_emetteur(emetteur_group,recepteur_group); + dec_emetteur.setOrientation(2); + ParaMEDMEM::ParaFIELD *champ_emetteur(0),*champ_recepteur(0); + ParaMEDMEM::ParaMESH *paramesh(0); + if (cas=="emetteur") + { + MEDCouplingAutoRefCountObjectPtr mesh_emetteur(init_triangle()); + paramesh=new ParaMEDMEM::ParaMESH(mesh_emetteur,emetteur_group,"emetteur mesh"); + ParaMEDMEM::ComponentTopology comptopo; + champ_emetteur=new ParaMEDMEM::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); + champ_emetteur->getField()->setNature(ConservativeVolumic); + champ_emetteur->setOwnSupport(true); + champ_emetteur->getField()->getArray()->fillWithValue(1.); + } + else + { + MEDCouplingAutoRefCountObjectPtr mesh_recepteur(init_quad()); + paramesh=new ParaMEDMEM::ParaMESH(mesh_recepteur,recepteur_group,"recepteur mesh"); + ParaMEDMEM::ComponentTopology comptopo; + champ_recepteur=new ParaMEDMEM::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); + champ_recepteur->getField()->setNature(ConservativeVolumic); + champ_recepteur->setOwnSupport(true); + } + + + MPI_Barrier(MPI_COMM_WORLD); + + clock_t clock0(clock()); + int compti=0; + + bool init(true),stop(false); + //boucle sur les pas de quads + while(!stop) + { + compti++; + clock_t clocki= clock (); + cout << compti << " CLOCK " << (clocki-clock0)*1.e-6 << endl; + for (int non_unif=0;non_unif<2;non_unif++) + { + if (cas=="emetteur") + if (non_unif) + champ_emetteur->getField()->getArray()->setIJ(0,0,40.); + //bool ok=false; // Is the time interval successfully solved ? + + // Loop on the time interval tries + if (cas=="emetteur") + dec_emetteur.attachLocalField(champ_emetteur); + else + dec_emetteur.attachLocalField(champ_recepteur); + + if(init) + dec_emetteur.synchronize(); + init=false; + + if (cas=="emetteur") + { + dec_emetteur.sendData(); + affiche(*champ_emetteur); + } + else if (cas=="recepteur") + { + dec_emetteur.recvData(); + affiche(*champ_recepteur); + } + else + throw 0; + } + stop=true; + } + delete champ_recepteur; + delete champ_emetteur; +} diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_InterpKernelDEC.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_InterpKernelDEC.cxx new file mode 100644 index 000000000..20343e9e7 --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_InterpKernelDEC.cxx @@ -0,0 +1,2283 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaMEDMEMTest.hxx" +#include + +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "Topology.hxx" +#include "DEC.hxx" +#include "MxN_Mapping.hxx" +#include "InterpKernelDEC.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "ComponentTopology.hxx" +#include "ICoCoMEDField.hxx" +#include "ParaMEDLoader.hxx" +#include "MEDLoader.hxx" + + +#include +#include + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + + +using namespace std; +using namespace ParaMEDMEM; + +void ParaMEDMEMTest::testInterpKernelDEC_2D() +{ + testInterpKernelDEC_2D_("P0","P0"); +} + +void ParaMEDMEMTest::testInterpKernelDEC2_2D() +{ + testInterpKernelDEC2_2D_("P0","P0"); +} + +void ParaMEDMEMTest::testInterpKernelDEC_3D() +{ + testInterpKernelDEC_3D_("P0","P0"); +} + +void ParaMEDMEMTest::testInterpKernelDEC_2DP0P1() +{ + //testInterpKernelDEC_2D_("P0","P1"); +} + +void ParaMEDMEMTest::testInterpKernelDEC_1D() +{ + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + // + if(size!=5) + return ; + int nproc_source = 3; + set self_procs; + set procs_source; + set procs_target; + + for (int i=0; icontainsMyRank()) + { + if(rank==0) + { + double coords[4]={0.3,0.7, 0.9,1.0}; + int conn[4]={0,1,2,3}; + mesh=MEDCouplingUMesh::New("Source mesh Proc0",1); + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn+2); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,1); + std::copy(coords,coords+4,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + } + if(rank==1) + { + double coords[2]={0.7,0.9}; + int conn[2]={0,1}; + mesh=MEDCouplingUMesh::New("Source mesh Proc1",1); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(2,1); + std::copy(coords,coords+2,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + } + if(rank==2) + { + double coords[2]={1.,1.12}; + int conn[2]={0,1}; + mesh=MEDCouplingUMesh::New("Source mesh Proc2",1); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(2,1); + std::copy(coords,coords+2,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + } + paramesh=new ParaMESH(mesh,*source_group,"source mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + double *valueP0=parafieldP0->getField()->getArray()->getPointer(); + parafieldP0->getField()->setNature(ConservativeVolumic); + if(rank==0) + { + valueP0[0]=7.; valueP0[1]=8.; + } + if(rank==1) + { + valueP0[0]=9.; + } + if(rank==2) + { + valueP0[0]=10.; + } + } + else + { + const char targetMeshName[]="target mesh"; + if(rank==3) + { + double coords[2]={0.5,0.75}; + int conn[2]={0,1}; + mesh=MEDCouplingUMesh::New("Target mesh Proc3",1); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(2,1); + std::copy(coords,coords+2,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + } + if(rank==4) + { + double coords[2]={0.75,1.2}; + int conn[2]={0,1}; + mesh=MEDCouplingUMesh::New("Target mesh Proc4",1); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(2,1); + std::copy(coords,coords+2,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + } + ParaMEDMEM::ComponentTopology comptopo; + parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafieldP0->getField()->setNature(ConservativeVolumic); + } + // test 1 + ParaMEDMEM::InterpKernelDEC dec(*source_group,*target_group); + if (source_group->containsMyRank()) + { + dec.setMethod("P0"); + dec.attachLocalField(parafieldP0); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.sendData(); + dec.recvData(); + const double *valueP0=parafieldP0->getField()->getArray()->getPointer(); + if(rank==0) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(7.4,valueP0[0],1e-7); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.0540540540540544,valueP0[1],1e-7); + } + if(rank==1) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(8.64054054054054,valueP0[0],1e-7); + } + if(rank==2) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.0540540540540544,valueP0[0],1e-7); + } + } + else + { + dec.setMethod("P0"); + dec.attachLocalField(parafieldP0); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.recvData(); + const double *res=parafieldP0->getField()->getArray()->getConstPointer(); + if(rank==3) + { + CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfTuples()); + CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfComponents()); + CPPUNIT_ASSERT_DOUBLES_EQUAL(7.4,res[0],1e-12); + } + if(rank==4) + { + CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfTuples()); + CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfComponents()); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.0540540540540526,res[0],1e-12); + } + dec.sendData(); + } + // + delete parafieldP0; + mesh->decrRef(); + delete paramesh; + delete self_group; + delete target_group; + delete source_group; + // + MPI_Barrier(MPI_COMM_WORLD); +} + +void ParaMEDMEMTest::testInterpKernelDEC_2DCurve() +{ + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + // + if(size!=5) + return ; + int nproc_source = 3; + set self_procs; + set procs_source; + set procs_target; + + for (int i=0; icontainsMyRank()) + { + if(rank==0) + { + double coords[8]={0.3,0.3,0.7,0.7, 0.9,0.9,1.0,1.0}; + int conn[4]={0,1,2,3}; + mesh=MEDCouplingUMesh::New("Source mesh Proc0",1); + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn+2); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,2); + std::copy(coords,coords+8,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + } + if(rank==1) + { + double coords[4]={0.7,0.7,0.9,0.9}; + int conn[2]={0,1}; + mesh=MEDCouplingUMesh::New("Source mesh Proc1",1); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(2,2); + std::copy(coords,coords+4,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + } + if(rank==2) + { + double coords[4]={1.,1.,1.12,1.12}; + int conn[2]={0,1}; + mesh=MEDCouplingUMesh::New("Source mesh Proc2",1); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(2,2); + std::copy(coords,coords+4,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + } + paramesh=new ParaMESH(mesh,*source_group,"source mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + double *valueP0=parafieldP0->getField()->getArray()->getPointer(); + parafieldP0->getField()->setNature(ConservativeVolumic); + if(rank==0) + { + valueP0[0]=7.; valueP0[1]=8.; + } + if(rank==1) + { + valueP0[0]=9.; + } + if(rank==2) + { + valueP0[0]=10.; + } + } + else + { + const char targetMeshName[]="target mesh"; + if(rank==3) + { + double coords[4]={0.5,0.5,0.75,0.75}; + int conn[2]={0,1}; + mesh=MEDCouplingUMesh::New("Target mesh Proc3",1); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(2,2); + std::copy(coords,coords+4,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + } + if(rank==4) + { + double coords[4]={0.75,0.75,1.2,1.2}; + int conn[2]={0,1}; + mesh=MEDCouplingUMesh::New("Target mesh Proc4",1); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_SEG2,2,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(2,2); + std::copy(coords,coords+4,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + } + ParaMEDMEM::ComponentTopology comptopo; + parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafieldP0->getField()->setNature(ConservativeVolumic); + } + // test 1 + ParaMEDMEM::InterpKernelDEC dec(*source_group,*target_group); + if (source_group->containsMyRank()) + { + dec.setMethod("P0"); + dec.attachLocalField(parafieldP0); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.sendData(); + dec.recvData(); + const double *valueP0=parafieldP0->getField()->getArray()->getPointer(); + if(rank==0) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(7.4,valueP0[0],1e-7); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.0540540540540544,valueP0[1],1e-7); + } + if(rank==1) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(8.64054054054054,valueP0[0],1e-7); + } + if(rank==2) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.0540540540540544,valueP0[0],1e-7); + } + } + else + { + dec.setMethod("P0"); + dec.attachLocalField(parafieldP0); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.recvData(); + const double *res=parafieldP0->getField()->getArray()->getConstPointer(); + if(rank==3) + { + CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfTuples()); + CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfComponents()); + CPPUNIT_ASSERT_DOUBLES_EQUAL(7.4,res[0],1e-12); + } + if(rank==4) + { + CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfTuples()); + CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfComponents()); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.0540540540540526,res[0],1e-12); + } + dec.sendData(); + } + // + delete parafieldP0; + mesh->decrRef(); + delete paramesh; + delete self_group; + delete target_group; + delete source_group; + // + MPI_Barrier(MPI_COMM_WORLD); +} + + +/* + * Check methods defined in InterpKernelDEC.hxx + * + InterpKernelDEC(); + InterpKernelDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group); + virtual ~InterpKernelDEC(); + void synchronize(); + void recvData(); + void sendData(); +*/ + +void ParaMEDMEMTest::testInterpKernelDEC_2D_(const char *srcMeth, const char *targetMeth) +{ + std::string srcM(srcMeth); + std::string targetM(targetMeth); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + //the test is meant to run on five processors + if (size !=5) return ; + + int nproc_source = 3; + set self_procs; + set procs_source; + set procs_target; + + for (int i=0; icontainsMyRank()) + { + string master = filename_xml1; + + ostringstream strstream; + strstream <getField()->setNature(ConservativeVolumic); + } + else + parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo); + int nb_local; + if(srcM=="P0") + nb_local=mesh->getNumberOfCells(); + else + nb_local=mesh->getNumberOfNodes(); + // double * value= new double[nb_local]; + double *value=parafield->getField()->getArray()->getPointer(); + for(int ielem=0; ielemgetField()); + dec.setMethod(srcMeth); + dec.attachLocalField(icocofield); + } + + //loading the geometry for the target group + if (target_group->containsMyRank()) + { + string master= filename_xml2; + ostringstream strstream; + strstream << master<<(rank-nproc_source+1)<<".med"; + ostringstream meshname ; + meshname<< "Mesh_3_"<getField()->setNature(ConservativeVolumic); + } + else + parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo); + int nb_local; + if(targetM=="P0") + nb_local=mesh->getNumberOfCells(); + else + nb_local=mesh->getNumberOfNodes(); + // double * value= new double[nb_local]; + double *value=parafield->getField()->getArray()->getPointer(); + for(int ielem=0; ielemgetField()); + dec.setMethod(targetMeth); + dec.attachLocalField(icocofield); + } + + + //attaching a DEC to the source group + double field_before_int; + double field_after_int; + + if (source_group->containsMyRank()) + { + field_before_int = parafield->getVolumeIntegral(0,true); + dec.synchronize(); + cout<<"DEC usage"<myRank()==0) + aRemover.Register("./sourcesquareb"); + ostringstream filename; + filename<<"./sourcesquareb_"<myRank()+1; + aRemover.Register(filename.str().c_str()); + //MEDLoader::WriteField("./sourcesquareb",parafield->getField()); + + dec.recvData(); + cout <<"writing"<myRank()==0) + aRemover.Register("./sourcesquare"); + //MEDLoader::WriteField("./sourcesquare",parafield->getField()); + + + filename<<"./sourcesquare_"<myRank()+1; + aRemover.Register(filename.str().c_str()); + field_after_int = parafield->getVolumeIntegral(0,true); + + + // MPI_Bcast(&field_before_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + // MPI_Bcast(&field_after_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + + CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, 1e-6); + + } + + //attaching a DEC to the target group + if (target_group->containsMyRank()) + { + dec.synchronize(); + dec.setForcedRenormalization(false); + + dec.recvData(); + ParaMEDLoader::WriteParaMesh("./targetsquareb",paramesh); + //MEDLoader::WriteField("./targetsquareb",parafield->getField()); + if (target_group->myRank()==0) + aRemover.Register("./targetsquareb"); + ostringstream filename; + filename<<"./targetsquareb_"<myRank()+1; + aRemover.Register(filename.str().c_str()); + dec.sendData(); + ParaMEDLoader::WriteParaMesh("./targetsquare",paramesh); + //MEDLoader::WriteField("./targetsquare",parafield->getField()); + + if (target_group->myRank()==0) + aRemover.Register("./targetsquareb"); + + filename<<"./targetsquareb_"<myRank()+1; + aRemover.Register(filename.str().c_str()); + // double field_before_int, field_after_int; + // MPI_Bcast(&field_before_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + // MPI_Bcast(&field_after_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + + // CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, 1e-6); + + } + + delete source_group; + delete target_group; + delete self_group; + delete parafield; + delete paramesh; + mesh->decrRef(); + + delete icocofield; + + MPI_Barrier(MPI_COMM_WORLD); + cout << "end of InterpKernelDEC_2D test"< self_procs; + set procs_source; + set procs_target; + + for (int i=0; icontainsMyRank()) + { + string master = filename_xml1; + + ostringstream strstream; + strstream <setMesh(mesh); + DataArrayDouble *array=DataArrayDouble::New(); + array->alloc(mcfield->getNumberOfTuples(),1); + mcfield->setArray(array); + array->decrRef(); + mcfield->setNature(ConservativeVolumic); + } + else + { + mcfield = MEDCouplingFieldDouble::New(ON_CELLS,NO_TIME); + mcfield->setMesh(mesh); + DataArrayDouble *array=DataArrayDouble::New(); + array->alloc(mcfield->getNumberOfTuples(),1); + mcfield->setArray(array); + array->decrRef(); + } + int nb_local; + if(srcM=="P0") + nb_local=mesh->getNumberOfCells(); + else + nb_local=mesh->getNumberOfNodes(); + double *value=mcfield->getArray()->getPointer(); + for(int ielem=0; ielemcontainsMyRank()) + { + string master= filename_xml2; + ostringstream strstream; + strstream << master<<(rank-nproc_source+1)<<".med"; + ostringstream meshname ; + meshname<< "Mesh_3_"<setMesh(mesh); + DataArrayDouble *array=DataArrayDouble::New(); + array->alloc(mcfield->getNumberOfTuples(),1); + mcfield->setArray(array); + array->decrRef(); + mcfield->setNature(ConservativeVolumic); + } + else + { + mcfield = MEDCouplingFieldDouble::New(ON_NODES,NO_TIME); + mcfield->setMesh(mesh); + DataArrayDouble *array=DataArrayDouble::New(); + array->alloc(mcfield->getNumberOfTuples(),1); + mcfield->setArray(array); + array->decrRef(); + } + int nb_local; + if(targetM=="P0") + nb_local=mesh->getNumberOfCells(); + else + nb_local=mesh->getNumberOfNodes(); + double *value=mcfield->getArray()->getPointer(); + for(int ielem=0; ielemcontainsMyRank()) + { + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.sendData(); + dec.recvData(); + } + + //attaching a DEC to the target group + if (target_group->containsMyRank()) + { + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.recvData(); + dec.sendData(); + } + delete source_group; + delete target_group; + delete self_group; + mcfield->decrRef(); + mesh->decrRef(); + + MPI_Barrier(MPI_COMM_WORLD); + cout << "end of InterpKernelDEC2_2D test"< self_procs; + set procs_source; + set procs_target; + + for (int i=0; icontainsMyRank()) + { + string master = filename_xml1; + + ostringstream strstream; + strstream <getField()->setNature(ConservativeVolumic); + } + else + parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo); + int nb_local; + if(srcM=="P0") + nb_local=mesh->getNumberOfCells(); + else + nb_local=mesh->getNumberOfNodes(); + // double * value= new double[nb_local]; + double *value=parafield->getField()->getArray()->getPointer(); + for(int ielem=0; ielemgetField()); + dec.setMethod(srcMeth); + dec.attachLocalField(icocofield); + } + + //loading the geometry for the target group + if (target_group->containsMyRank()) + { + string master= filename_xml2; + ostringstream strstream; + strstream << master << ".med"; + ostringstream meshname ; + meshname<< "Mesh_6"; + mesh = MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0); + + paramesh=new ParaMESH (mesh,*target_group,"target mesh"); + // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT(support,*target_group); + ParaMEDMEM::ComponentTopology comptopo; + if(targetM=="P0") + { + parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafield->getField()->setNature(ConservativeVolumic); + } + else + parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo); + int nb_local; + if(targetM=="P0") + nb_local=mesh->getNumberOfCells(); + else + nb_local=mesh->getNumberOfNodes(); + // double * value= new double[nb_local]; + double *value=parafield->getField()->getArray()->getPointer(); + for(int ielem=0; ielemgetField()); + dec.setMethod(targetMeth); + dec.attachLocalField(icocofield); + } + //attaching a DEC to the source group + double field_before_int; + double field_after_int; + + if (source_group->containsMyRank()) + { + field_before_int = parafield->getVolumeIntegral(0,true); + dec.synchronize(); + cout<<"DEC usage"<myRank()==0) + aRemover.Register("./sourcesquareb"); + ostringstream filename; + filename<<"./sourcesquareb_"<myRank()+1; + aRemover.Register(filename.str().c_str()); + //MEDLoader::WriteField("./sourcesquareb",parafield->getField()); + + dec.recvData(); + cout <<"writing"<myRank()==0) + aRemover.Register("./sourcesquare"); + //MEDLoader::WriteField("./sourcesquare",parafield->getField()); + + + filename<<"./sourcesquare_"<myRank()+1; + aRemover.Register(filename.str().c_str()); + field_after_int = parafield->getVolumeIntegral(0,true); + + CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, 1e-6); + + } + + //attaching a DEC to the target group + if (target_group->containsMyRank()) + { + dec.synchronize(); + dec.setForcedRenormalization(false); + + dec.recvData(); + ParaMEDLoader::WriteParaMesh("./targetsquareb",paramesh); + //MEDLoader::WriteField("./targetsquareb",parafield->getField()); + if (target_group->myRank()==0) + aRemover.Register("./targetsquareb"); + ostringstream filename; + filename<<"./targetsquareb_"<myRank()+1; + aRemover.Register(filename.str().c_str()); + dec.sendData(); + ParaMEDLoader::WriteParaMesh("./targetsquare",paramesh); + //MEDLoader::WriteField("./targetsquare",parafield->getField()); + + if (target_group->myRank()==0) + aRemover.Register("./targetsquareb"); + + filename<<"./targetsquareb_"<myRank()+1; + aRemover.Register(filename.str().c_str()); + } + delete source_group; + delete target_group; + delete self_group; + delete parafield; + delete paramesh; + mesh->decrRef(); + + delete icocofield; + + MPI_Barrier(MPI_COMM_WORLD); + cout << "end of InterpKernelDEC_3D test"< self_procs; + set procs_source; + set procs_target; + + for (int i=0; icontainsMyRank()) + { + std::ostringstream stream; stream << "sourcemesh2D proc " << rank; + mesh=MEDCouplingUMesh::New(stream.str().c_str(),2); + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,2); + const double *sourceCoords=sourceCoordsAll[rank]; + std::copy(sourceCoords,sourceCoords+8,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*source_group,"source mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + double *value=parafield->getField()->getArray()->getPointer(); + value[0]=34+13*((double)rank); + } + else + { + std::ostringstream stream; stream << "targetmesh2D proc " << rank-nproc_source; + mesh=MEDCouplingUMesh::New(stream.str().c_str(),2); + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All+4); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(8,2); + const double *targetCoords=targetCoordsAll[rank-nproc_source]; + std::copy(targetCoords,targetCoords+16,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH (mesh,*target_group,"target mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + } + //test 1 - Conservative volumic + ParaMEDMEM::InterpKernelDEC dec(*source_group,*target_group); + parafield->getField()->setNature(ConservativeVolumic); + if (source_group->containsMyRank()) + { + dec.setMethod("P0"); + dec.attachLocalField(parafield); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.sendData(); + } + else + { + dec.setMethod("P0"); + dec.attachLocalField(parafield); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + const double *expected=targetResults[rank-nproc_source]; + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13); + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13); + } + //test 2 - Integral + ParaMEDMEM::InterpKernelDEC dec2(*source_group,*target_group); + parafield->getField()->setNature(Integral); + if (source_group->containsMyRank()) + { + dec2.setMethod("P0"); + dec2.attachLocalField(parafield); + dec2.synchronize(); + dec2.setForcedRenormalization(false); + dec2.sendData(); + } + else + { + dec2.setMethod("P0"); + dec2.attachLocalField(parafield); + dec2.synchronize(); + dec2.setForcedRenormalization(false); + dec2.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + const double *expected=targetResults2[rank-nproc_source]; + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13); + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13); + } + //test 3 - Integral with global constraint + ParaMEDMEM::InterpKernelDEC dec3(*source_group,*target_group); + parafield->getField()->setNature(IntegralGlobConstraint); + if (source_group->containsMyRank()) + { + dec3.setMethod("P0"); + dec3.attachLocalField(parafield); + dec3.synchronize(); + dec3.setForcedRenormalization(false); + dec3.sendData(); + } + else + { + dec3.setMethod("P0"); + dec3.attachLocalField(parafield); + dec3.synchronize(); + dec3.setForcedRenormalization(false); + dec3.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + const double *expected=targetResults3[rank-nproc_source]; + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13); + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13); + } + //test 4 - RevIntegral + ParaMEDMEM::InterpKernelDEC dec4(*source_group,*target_group); + parafield->getField()->setNature(RevIntegral); + if (source_group->containsMyRank()) + { + dec4.setMethod("P0"); + dec4.attachLocalField(parafield); + dec4.synchronize(); + dec4.setForcedRenormalization(false); + dec4.sendData(); + } + else + { + dec4.setMethod("P0"); + dec4.attachLocalField(parafield); + dec4.synchronize(); + dec4.setForcedRenormalization(false); + dec4.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + const double *expected=targetResults4[rank-nproc_source]; + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13); + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13); + } + //test 5 - Conservative volumic reversed + ParaMEDMEM::InterpKernelDEC dec5(*source_group,*target_group); + parafield->getField()->setNature(ConservativeVolumic); + if (source_group->containsMyRank()) + { + dec5.setMethod("P0"); + dec5.attachLocalField(parafield); + dec5.synchronize(); + dec5.setForcedRenormalization(false); + dec5.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + CPPUNIT_ASSERT_EQUAL(1,parafield->getField()->getNumberOfTuples()); + const double expected[]={37.8518518518519,43.5333333333333}; + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[rank],res[0],1e-13); + } + else + { + dec5.setMethod("P0"); + dec5.attachLocalField(parafield); + dec5.synchronize(); + dec5.setForcedRenormalization(false); + double *res=parafield->getField()->getArray()->getPointer(); + const double *toSet=targetResults[rank-nproc_source]; + res[0]=toSet[0]; + res[1]=toSet[1]; + dec5.sendData(); + } + //test 6 - Integral reversed + ParaMEDMEM::InterpKernelDEC dec6(*source_group,*target_group); + parafield->getField()->setNature(Integral); + if (source_group->containsMyRank()) + { + dec6.setMethod("P0"); + dec6.attachLocalField(parafield); + dec6.synchronize(); + dec6.setForcedRenormalization(false); + dec6.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + CPPUNIT_ASSERT_EQUAL(1,parafield->getField()->getNumberOfTuples()); + const double expected[]={0.794600591715977,1.35631163708087}; + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[rank],res[0],1e-13); + } + else + { + dec6.setMethod("P0"); + dec6.attachLocalField(parafield); + dec6.synchronize(); + dec6.setForcedRenormalization(false); + double *res=parafield->getField()->getArray()->getPointer(); + const double *toSet=targetResults2[rank-nproc_source]; + res[0]=toSet[0]; + res[1]=toSet[1]; + dec6.sendData(); + } + //test 7 - Integral with global constraint reversed + ParaMEDMEM::InterpKernelDEC dec7(*source_group,*target_group); + parafield->getField()->setNature(IntegralGlobConstraint); + if (source_group->containsMyRank()) + { + dec7.setMethod("P0"); + dec7.attachLocalField(parafield); + dec7.synchronize(); + dec7.setForcedRenormalization(false); + dec7.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + CPPUNIT_ASSERT_EQUAL(1,parafield->getField()->getNumberOfTuples()); + const double expected[]={36.4592592592593,44.5407407407407}; + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[rank],res[0],1e-13); + } + else + { + dec7.setMethod("P0"); + dec7.attachLocalField(parafield); + dec7.synchronize(); + dec7.setForcedRenormalization(false); + double *res=parafield->getField()->getArray()->getPointer(); + const double *toSet=targetResults3[rank-nproc_source]; + res[0]=toSet[0]; + res[1]=toSet[1]; + dec7.sendData(); + } + //test 8 - Integral with RevIntegral reversed + ParaMEDMEM::InterpKernelDEC dec8(*source_group,*target_group); + parafield->getField()->setNature(RevIntegral); + if (source_group->containsMyRank()) + { + dec8.setMethod("P0"); + dec8.attachLocalField(parafield); + dec8.synchronize(); + dec8.setForcedRenormalization(false); + dec8.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + CPPUNIT_ASSERT_EQUAL(1,parafield->getField()->getNumberOfTuples()); + const double expected[]={0.81314102564102553,1.3428994082840233}; + CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[rank],res[0],1e-13); + } + else + { + dec8.setMethod("P0"); + dec8.attachLocalField(parafield); + dec8.synchronize(); + dec8.setForcedRenormalization(false); + double *res=parafield->getField()->getArray()->getPointer(); + const double *toSet=targetResults4[rank-nproc_source]; + res[0]=toSet[0]; + res[1]=toSet[1]; + dec8.sendData(); + } + // + delete parafield; + mesh->decrRef(); + delete paramesh; + delete self_group; + delete target_group; + delete source_group; + // + MPI_Barrier(MPI_COMM_WORLD); +} + +void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P1P1P0() +{ + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + // + if(size!=5) + return ; + int nproc_source = 2; + set self_procs; + set procs_source; + set procs_target; + + for (int i=0; icontainsMyRank()) + { + if(rank==0) + { + double coords[6]={-0.3,-0.3, 0.7,0.7, 0.7,-0.3}; + int conn[3]={0,1,2}; + //int globalNode[3]={1,2,0}; + mesh=MEDCouplingUMesh::New("Source mesh Proc0",2); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(3,2); + std::copy(coords,coords+6,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + } + if(rank==1) + { + double coords[6]={-0.3,-0.3, -0.3,0.7, 0.7,0.7}; + int conn[3]={0,1,2}; + //int globalNode[3]={1,3,2}; + mesh=MEDCouplingUMesh::New("Source mesh Proc1",2); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(3,2); + std::copy(coords,coords+6,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + } + paramesh=new ParaMESH(mesh,*source_group,"source mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafieldP1 = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo); + double *valueP0=parafieldP0->getField()->getArray()->getPointer(); + double *valueP1=parafieldP1->getField()->getArray()->getPointer(); + parafieldP0->getField()->setNature(ConservativeVolumic); + parafieldP1->getField()->setNature(ConservativeVolumic); + if(rank==0) + { + valueP0[0]=31.; + valueP1[0]=34.; valueP1[1]=77.; valueP1[2]=53.; + } + if(rank==1) + { + valueP0[0]=47.; + valueP1[0]=34.; valueP1[1]=57.; valueP1[2]=77.; + } + } + else + { + const char targetMeshName[]="target mesh"; + if(rank==2) + { + double coords[10]={-0.3,-0.3, 0.2,-0.3, 0.7,-0.3, -0.3,0.2, 0.2,0.2 }; + int conn[7]={0,3,4,1, 1,4,2}; + //int globalNode[5]={4,3,0,2,1}; + mesh=MEDCouplingUMesh::New("Target mesh Proc2",2); + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn+4); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(5,2); + std::copy(coords,coords+10,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + DataArrayInt *da=DataArrayInt::New(); + const int globalNumberingP2[5]={0,1,2,3,4}; + da->useArray(globalNumberingP2,false,CPP_DEALLOC,5,1); + paramesh->setNodeGlobal(da); + da->decrRef(); + } + if(rank==3) + { + double coords[6]={0.2,0.2, 0.7,-0.3, 0.7,0.2}; + int conn[3]={0,2,1}; + //int globalNode[3]={1,0,5}; + mesh=MEDCouplingUMesh::New("Target mesh Proc3",2); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(3,2); + std::copy(coords,coords+6,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + DataArrayInt *da=DataArrayInt::New(); + const int globalNumberingP3[3]={4,2,5}; + da->useArray(globalNumberingP3,false,CPP_DEALLOC,3,1); + paramesh->setNodeGlobal(da); + da->decrRef(); + } + if(rank==4) + { + double coords[12]={-0.3,0.2, -0.3,0.7, 0.2,0.7, 0.2,0.2, 0.7,0.7, 0.7,0.2}; + int conn[8]={0,1,2,3, 3,2,4,5}; + //int globalNode[6]={2,6,7,1,8,5}; + mesh=MEDCouplingUMesh::New("Target mesh Proc4",2); + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn+4); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(6,2); + std::copy(coords,coords+12,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + DataArrayInt *da=DataArrayInt::New(); + const int globalNumberingP4[6]={3,6,7,4,8,5}; + da->useArray(globalNumberingP4,false,CPP_DEALLOC,6,1); + paramesh->setNodeGlobal(da); + da->decrRef(); + } + ParaMEDMEM::ComponentTopology comptopo; + parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafieldP1 = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo); + parafieldP0->getField()->setNature(ConservativeVolumic); + parafieldP1->getField()->setNature(ConservativeVolumic); + } + // test 1 - P0 P1 + ParaMEDMEM::InterpKernelDEC dec(*source_group,*target_group); + if (source_group->containsMyRank()) + { + dec.setMethod("P0"); + dec.attachLocalField(parafieldP0); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.sendData(); + dec.recvData(); + const double *valueP0=parafieldP0->getField()->getArray()->getPointer(); + if(rank==0) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(34.42857143,valueP0[0],1e-7); + } + if(rank==1) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(44.,valueP0[0],1e-7); + } + } + else + { + dec.setMethod("P1"); + dec.attachLocalField(parafieldP1); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.recvData(); + const double *res=parafieldP1->getField()->getArray()->getConstPointer(); + if(rank==2) + { + const double expectP2[5]={39.0, 31.0, 31.0, 47.0, 39.0}; + CPPUNIT_ASSERT_EQUAL(5,parafieldP1->getField()->getNumberOfTuples()); + CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents()); + for(int kk=0;kk<5;kk++) + CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP2[kk],res[kk],1e-12); + } + if(rank==3) + { + const double expectP3[3]={39.0, 31.0, 31.0}; + CPPUNIT_ASSERT_EQUAL(3,parafieldP1->getField()->getNumberOfTuples()); + CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents()); + for(int kk=0;kk<3;kk++) + CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP3[kk],res[kk],1e-12); + } + if(rank==4) + { + const double expectP4[6]={47.0, 47.0, 47.0, 39.0, 39.0, 31.0}; + CPPUNIT_ASSERT_EQUAL(6,parafieldP1->getField()->getNumberOfTuples()); + CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents()); + for(int kk=0;kk<6;kk++) + CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP4[kk],res[kk],1e-12); + } + dec.sendData(); + } + // + delete parafieldP0; + delete parafieldP1; + mesh->decrRef(); + delete paramesh; + delete self_group; + delete target_group; + delete source_group; + // + MPI_Barrier(MPI_COMM_WORLD); +} + +void ParaMEDMEMTest::testInterpKernelDEC2DM1D_P0P0() +{ + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + // + if(size!=3) + return ; + int nproc_source=2; + set procs_source; + set procs_target; + // + for (int i=0; icontainsMyRank()) + { + double targetCoords[18]={-0.3,-0.3, 0.2,-0.3, 0.7,-0.3, -0.3,0.2, 0.2,0.2, 0.7,0.2, -0.3,0.7, 0.2,0.7, 0.7,0.7 }; + mesh=MEDCouplingUMesh::New(); + mesh->setMeshDimension(2); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(9,2); + std::copy(targetCoords,targetCoords+18,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + if(rank==0) + { + int targetConn[7]={0,3,4,1, 1,4,2}; + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn+4); + mesh->finishInsertingCells(); + } + else + { + int targetConn[11]={4,5,2, 6,7,4,3, 7,8,5,4}; + mesh->allocateCells(3); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn+3); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn+7); + mesh->finishInsertingCells(); + } + ParaMEDMEM::ComponentTopology comptopo; + paramesh=new ParaMESH(mesh,*source_group,"source mesh"); + parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafield->getField()->setNature(ConservativeVolumic); + double *vals=parafield->getField()->getArray()->getPointer(); + if(rank==0) + { vals[0]=7.; vals[1]=8.; } + else + { vals[0]=9.; vals[1]=10.; vals[2]=11.; } + } + else + { + mesh=MEDCouplingUMesh::New("an example of -1 D mesh",-1); + ParaMEDMEM::ComponentTopology comptopo; + paramesh=new ParaMESH(mesh,*target_group,"target mesh"); + parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafield->getField()->setNature(ConservativeVolumic); + } + ParaMEDMEM::InterpKernelDEC dec(*source_group,*target_group); + if(source_group->containsMyRank()) + { + dec.setMethod("P0"); + dec.attachLocalField(parafield); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.sendData(); + dec.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + if(rank==0) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[0],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[1],1e-12); + } + else + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[0],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[1],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[2],1e-12); + } + } + else + { + dec.setMethod("P0"); + dec.attachLocalField(parafield); + dec.synchronize(); + dec.setForcedRenormalization(false); + dec.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[0],1e-12); + dec.sendData(); + } + ParaMEDMEM::InterpKernelDEC dec2(*source_group,*target_group); + dec2.setMethod("P0"); + parafield->getField()->setNature(IntegralGlobConstraint); + if(source_group->containsMyRank()) + { + double *vals=parafield->getField()->getArray()->getPointer(); + if(rank==0) + { vals[0]=7.; vals[1]=8.; } + else + { vals[0]=9.; vals[1]=10.; vals[2]=11.; } + dec2.attachLocalField(parafield); + dec2.synchronize(); + dec2.sendData(); + dec2.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + if(rank==0) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(11.25,res[0],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(5.625,res[1],1e-12); + } + else + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(5.625,res[0],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(11.25,res[1],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(11.25,res[2],1e-12); + } + } + else + { + dec2.attachLocalField(parafield); + dec2.synchronize(); + dec2.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + CPPUNIT_ASSERT_DOUBLES_EQUAL(45.,res[0],1e-12); + dec2.sendData(); + } + // + ParaMEDMEM::InterpKernelDEC dec3(*source_group,*target_group); + dec3.setMethod("P0"); + parafield->getField()->setNature(Integral); + if(source_group->containsMyRank()) + { + double *vals=parafield->getField()->getArray()->getPointer(); + if(rank==0) + { vals[0]=7.; vals[1]=8.; } + else + { vals[0]=9.; vals[1]=10.; vals[2]=11.; } + dec3.attachLocalField(parafield); + dec3.synchronize(); + dec3.sendData(); + dec3.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + if(rank==0) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(11.25,res[0],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(5.625,res[1],1e-12); + } + else + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(5.625,res[0],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(11.25,res[1],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(11.25,res[2],1e-12); + } + } + else + { + dec3.attachLocalField(parafield); + dec3.synchronize(); + dec3.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + CPPUNIT_ASSERT_DOUBLES_EQUAL(45.,res[0],1e-12); + dec3.sendData(); + } + // + ParaMEDMEM::InterpKernelDEC dec4(*source_group,*target_group); + dec4.setMethod("P0"); + parafield->getField()->setNature(RevIntegral); + if(source_group->containsMyRank()) + { + double *vals=parafield->getField()->getArray()->getPointer(); + if(rank==0) + { vals[0]=7.; vals[1]=8.; } + else + { vals[0]=9.; vals[1]=10.; vals[2]=11.; } + dec4.attachLocalField(parafield); + dec4.synchronize(); + dec4.sendData(); + dec4.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + if(rank==0) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[0],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[1],1e-12); + } + else + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[0],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[1],1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[2],1e-12); + } + } + else + { + dec4.attachLocalField(parafield); + dec4.synchronize(); + dec4.recvData(); + const double *res=parafield->getField()->getArray()->getConstPointer(); + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.125,res[0],1e-12); + dec4.sendData(); + } + delete parafield; + delete paramesh; + mesh->decrRef(); + delete target_group; + delete source_group; + // + MPI_Barrier(MPI_COMM_WORLD); +} + +void ParaMEDMEMTest::testInterpKernelDECPartialProcs() +{ + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + // + if(size!=3) + return ; + set procs_source; + set procs_target; + // + procs_source.insert(0); + procs_target.insert(1); + // + ParaMEDMEM::MEDCouplingUMesh *mesh=0; + ParaMEDMEM::ParaMESH *paramesh=0; + ParaMEDMEM::ParaFIELD *parafield=0; + // + ParaMEDMEM::CommInterface interface; + // + MPI_Barrier(MPI_COMM_WORLD); + double targetCoords[8]={ 0.,0., 1., 0., 0., 1., 1., 1. }; + CommInterface comm; + int grpIds[2]={0,1}; + MPI_Group grp,group_world; + comm.commGroup(MPI_COMM_WORLD,&group_world); + comm.groupIncl(group_world,2,grpIds,&grp); + MPI_Comm partialComm; + comm.commCreate(MPI_COMM_WORLD,grp,&partialComm); + // + ProcessorGroup* target_group=0; + ProcessorGroup* source_group=0; + // + ParaMEDMEM::InterpKernelDEC *dec=0; + if(rank==0 || rank==1) + { + target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target,partialComm); + source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source,partialComm); + if(source_group->containsMyRank()) + { + mesh=MEDCouplingUMesh::New(); + mesh->setMeshDimension(2); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,2); + std::copy(targetCoords,targetCoords+8,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + int targetConn[4]={0,2,3,1}; + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn); + mesh->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + paramesh=new ParaMESH(mesh,*source_group,"source mesh"); + parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafield->getField()->setNature(ConservativeVolumic); + double *vals=parafield->getField()->getArray()->getPointer(); + vals[0]=7.; + dec=new ParaMEDMEM::InterpKernelDEC(*source_group,*target_group); + dec->attachLocalField(parafield); + dec->synchronize(); + dec->sendData(); + dec->recvData(); + } + else + { + mesh=MEDCouplingUMesh::New(); + mesh->setMeshDimension(2); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,2); + std::copy(targetCoords,targetCoords+8,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + int targetConn[6]={0,2,1,2,3,1}; + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn+3); + mesh->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + paramesh=new ParaMESH(mesh,*target_group,"target mesh"); + parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafield->getField()->setNature(ConservativeVolumic); + dec=new ParaMEDMEM::InterpKernelDEC(*source_group,*target_group); + dec->attachLocalField(parafield); + dec->synchronize(); + dec->recvData(); + dec->sendData(); + } + } + delete parafield; + delete paramesh; + if(mesh) + mesh->decrRef(); + delete target_group; + delete source_group; + delete dec; + MPI_Barrier(MPI_COMM_WORLD); +} + +/*! + * This test reproduces bug of Gauthier on 13/9/2010 concerning 3DSurf meshes. + * It is possible to lead to dead lock in InterpKernelDEC when 3DSurfMeshes global bounding boxes intersects whereas cell bounding box intersecting only on one side. + */ +void ParaMEDMEMTest::testInterpKernelDEC3DSurfEmptyBBox() +{ + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + // + if(size!=3) + return ; + int nproc_source = 1; + set self_procs; + set procs_source; + set procs_target; + + for (int i=0; icontainsMyRank()) + { + double coords[15]={1.,0.,0., 2.,0.,0., 2.,2.,0., 0.,2.,0., 0.5,0.5,1.}; + int conn[7]={0,1,2,3,0,3,4}; + mesh=MEDCouplingUMesh::New("Source mesh Proc0",2); + mesh->allocateCells(2); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn); + mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn+4); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(5,3); + std::copy(coords,coords+15,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + // + paramesh=new ParaMESH(mesh,*source_group,"source mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + double *valueP0=parafieldP0->getField()->getArray()->getPointer(); + parafieldP0->getField()->setNature(ConservativeVolumic); + valueP0[0]=7.; valueP0[1]=8.; + } + else + { + const char targetMeshName[]="target mesh"; + if(rank==1) + { + double coords[12]={0.25,0.25,0.5, 0.,0.25,0.5, 0.,0.,0.5, 0.25,0.,0.5}; + int conn[4]={0,1,2,3}; + mesh=MEDCouplingUMesh::New("Target mesh Proc1",2); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,3); + std::copy(coords,coords+12,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + } + if(rank==2) + { + double coords[12]={0.,0.25,0.5, 0.,0.,0.5, -1.,0.,0.5, -1.,0.25,0.5}; + int conn[4]={0,1,2,3}; + mesh=MEDCouplingUMesh::New("Target mesh Proc2",2); + mesh->allocateCells(1); + mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn); + mesh->finishInsertingCells(); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,3); + std::copy(coords,coords+12,myCoords->getPointer()); + mesh->setCoords(myCoords); + myCoords->decrRef(); + paramesh=new ParaMESH(mesh,*target_group,targetMeshName); + } + ParaMEDMEM::ComponentTopology comptopo; + parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + parafieldP0->getField()->setNature(ConservativeVolumic); + } + // test 1 + ParaMEDMEM::InterpKernelDEC dec(*source_group,*target_group); + if (source_group->containsMyRank()) + { + dec.setMethod("P0"); + dec.attachLocalField(parafieldP0); + dec.synchronize(); + // dec.setForcedRenormalization(false); + // dec.sendData(); + // dec.recvData(); + // const double *valueP0=parafieldP0->getField()->getArray()->getPointer(); + // if(rank==0) + // { + // CPPUNIT_ASSERT_DOUBLES_EQUAL(7.4,valueP0[0],1e-7); + // CPPUNIT_ASSERT_DOUBLES_EQUAL(9.0540540540540544,valueP0[1],1e-7); + // } + // if(rank==1) + // { + // CPPUNIT_ASSERT_DOUBLES_EQUAL(8.64054054054054,valueP0[0],1e-7); + // } + // if(rank==2) + // { + // CPPUNIT_ASSERT_DOUBLES_EQUAL(9.0540540540540544,valueP0[0],1e-7); + // } + } + else + { + dec.setMethod("P0"); + dec.attachLocalField(parafieldP0); + dec.synchronize(); + // dec.setForcedRenormalization(false); + // dec.recvData(); + // const double *res=parafieldP0->getField()->getArray()->getConstPointer(); + // if(rank==3) + // { + // CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfTuples()); + // CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfComponents()); + // CPPUNIT_ASSERT_DOUBLES_EQUAL(7.4,res[0],1e-12); + // } + // if(rank==4) + // { + // CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfTuples()); + // CPPUNIT_ASSERT_EQUAL(1,parafieldP0->getField()->getNumberOfComponents()); + // CPPUNIT_ASSERT_DOUBLES_EQUAL(9.0540540540540526,res[0],1e-12); + // } + // dec.sendData(); + } + // + delete parafieldP0; + mesh->decrRef(); + delete paramesh; + delete self_group; + delete target_group; + delete source_group; + // + MPI_Barrier(MPI_COMM_WORLD); +} + +/*! + * Tests an asynchronous exchange between two codes + * one sends data with dtA as an interval, the max time being tmaxA + * the other one receives with dtB as an interval, the max time being tmaxB + */ +void ParaMEDMEMTest::testAsynchronousInterpKernelDEC_2D(double dtA, double tmaxA, + double dtB, double tmaxB, bool WithPointToPoint, bool Asynchronous, + bool WithInterp, const char *srcMeth, const char *targetMeth) +{ + std::string srcM(srcMeth); + std::string targetM(targetMeth); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + //the test is meant to run on five processors + if (size !=5) return ; + + int nproc_source = 3; + set self_procs; + set procs_source; + set procs_target; + + for (int i=0; icontainsMyRank()) + { + string master = filename_xml1; + + ostringstream strstream; + strstream <getField()->setNature(ConservativeVolumic);//InvertIntegral);//ConservativeVolumic); + } + else + parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo); + + int nb_local; + if(srcM=="P0") + nb_local=mesh->getNumberOfCells(); + else + nb_local=mesh->getNumberOfNodes(); + // double * value= new double[nb_local]; + double *value=parafield->getField()->getArray()->getPointer(); + for(int ielem=0; ielemgetField()); + + dec.attachLocalField(icocofield); + + + } + + //loading the geometry for the target group + if (target_group->containsMyRank()) + { + string master= filename_xml2; + ostringstream strstream; + strstream << master<<(rank-nproc_source+1)<<".med"; + ostringstream meshname ; + meshname<< "Mesh_3_"<getField()->setNature(ConservativeVolumic);//InvertIntegral);//ConservativeVolumic); + } + else + parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo); + + int nb_local; + if(targetM=="P0") + nb_local=mesh->getNumberOfCells(); + else + nb_local=mesh->getNumberOfNodes(); + + double *value=parafield->getField()->getArray()->getPointer(); + for(int ielem=0; ielemgetField()); + + dec.attachLocalField(icocofield); + } + + + //attaching a DEC to the source group + + if (source_group->containsMyRank()) + { + cout<<"DEC usage"<getField()->getArray()->getPointer(); + int nb_local=parafield->getField()->getMesh()->getNumberOfCells(); + for (int i=0; icontainsMyRank()) + { + cout<<"DEC usage"< times; + for (double time=0; timegetVolumeIntegral(0,true); + cout << "testAsynchronousInterpKernelDEC_2D" << rank << " time " << time + << " VolumeIntegral " << vi + << " time*10000 " << time*10000 << endl ; + + CPPUNIT_ASSERT_DOUBLES_EQUAL(vi,time*10000,0.001); + } + + } + + delete source_group; + delete target_group; + delete self_group; + delete parafield ; + delete paramesh ; + mesh->decrRef() ; + delete icocofield ; + + cout << "testAsynchronousInterpKernelDEC_2D" << rank << " MPI_Barrier " << endl ; + + if (Asynchronous) MPI_Barrier(MPI_COMM_WORLD); + cout << "end of InterpKernelDEC_2D test"< + +#include +#include +#include +#include + +using namespace std; +using namespace INTERP_KERNEL; +using namespace ParaMEDMEM; + +void ParaMEDMEMTest::testMEDLoaderRead1() +{ + string fileName=getResourceFile("pointe.med"); + vector meshNames=MEDLoader::GetMeshNames(fileName.c_str()); + CPPUNIT_ASSERT_EQUAL(1,(int)meshNames.size()); + MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),0); + CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(16,mesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(3,(int)mesh->getAllGeoTypes().size()); + for(int i=0;i<12;i++) + CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(i)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,mesh->getTypeOfCell(12)); + CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,mesh->getTypeOfCell(13)); + CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,mesh->getTypeOfCell(14)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,mesh->getTypeOfCell(15)); + CPPUNIT_ASSERT_EQUAL((std::size_t)90,mesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(701,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+90,0)); + CPPUNIT_ASSERT_EQUAL(705,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+17,0)); + CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12); + mesh->decrRef(); + // + vector families=MEDLoader::GetMeshFamiliesNames(fileName.c_str(),meshNames[0].c_str()); + CPPUNIT_ASSERT_EQUAL(8,(int)families.size()); + CPPUNIT_ASSERT(families[2]=="FAMILLE_ELEMENT_3"); + // + vector families2; + families2.push_back(families[2]); + mesh=MEDLoader::ReadUMeshFromFamilies(fileName.c_str(),meshNames[0].c_str(),0,families2); + CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(2,mesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllGeoTypes().size()); + CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(0)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,mesh->getTypeOfCell(1)); + CPPUNIT_ASSERT_EQUAL((std::size_t)11,mesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(132,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+11,0)); + CPPUNIT_ASSERT_EQUAL(16,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+3,0)); + CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12); + mesh->decrRef(); + // + vector groups=MEDLoader::GetMeshGroupsNames(fileName.c_str(),meshNames[0].c_str()); + CPPUNIT_ASSERT_EQUAL(5,(int)groups.size()); + CPPUNIT_ASSERT(groups[0]=="groupe1"); + CPPUNIT_ASSERT(groups[1]=="groupe2"); + CPPUNIT_ASSERT(groups[2]=="groupe3"); + CPPUNIT_ASSERT(groups[3]=="groupe4"); + CPPUNIT_ASSERT(groups[4]=="groupe5"); + vector groups2; + groups2.push_back(groups[0]); + mesh=MEDLoader::ReadUMeshFromGroups(fileName.c_str(),meshNames[0].c_str(),0,groups2); + CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(7,mesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllGeoTypes().size()); + for(int i=0;i<6;i++) + CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(i)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,mesh->getTypeOfCell(6)); + CPPUNIT_ASSERT_EQUAL((std::size_t)36,mesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(254,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+36,0)); + CPPUNIT_ASSERT_EQUAL(141,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+8,0)); + CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12); + mesh->decrRef(); + // + std::vector fieldsName=MEDLoader::GetCellFieldNamesOnMesh(fileName.c_str(),meshNames[0].c_str()); + CPPUNIT_ASSERT_EQUAL(2,(int)fieldsName.size()); + CPPUNIT_ASSERT(fieldsName[0]=="fieldcelldoublescalar"); + CPPUNIT_ASSERT(fieldsName[1]=="fieldcelldoublevector"); + std::vector > its0=MEDLoader::GetCellFieldIterations(fileName.c_str(),meshNames[0].c_str(),fieldsName[0].c_str()); + CPPUNIT_ASSERT_EQUAL(1,(int)its0.size()); + CPPUNIT_ASSERT_EQUAL(-1,its0[0].first); + CPPUNIT_ASSERT_EQUAL(-1,its0[0].second); + std::vector > its1=MEDLoader::GetCellFieldIterations(fileName.c_str(),meshNames[0].c_str(),fieldsName[1].c_str()); + CPPUNIT_ASSERT_EQUAL(1,(int)its1.size()); + CPPUNIT_ASSERT_EQUAL(-1,its1[0].first); + CPPUNIT_ASSERT_EQUAL(-1,its1[0].second); + // + MEDCouplingFieldDouble *field0=MEDLoader::ReadFieldCell(fileName.c_str(),meshNames[0].c_str(),0,fieldsName[0].c_str(),its0[0].first,its0[0].second); + field0->checkCoherency(); + CPPUNIT_ASSERT(field0->getName()==fieldsName[0]); + CPPUNIT_ASSERT_EQUAL(1,field0->getNumberOfComponents()); + CPPUNIT_ASSERT_EQUAL(16,field0->getNumberOfTuples()); + const double expectedValues[16]={1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,2.,3.,3.,2.}; + double diffValue[16]; + std::transform(field0->getArray()->getPointer(),field0->getArray()->getPointer()+16,expectedValues,diffValue,std::minus()); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue,diffValue+16),1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue,diffValue+16),1e-12); + const MEDCouplingUMesh *constMesh=dynamic_cast(field0->getMesh()); + CPPUNIT_ASSERT(constMesh); + CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllGeoTypes().size()); + for(int i=0;i<12;i++) + CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(12)); + CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(13)); + CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(14)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(15)); + CPPUNIT_ASSERT_EQUAL((std::size_t)90,constMesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getConstPointer(),constMesh->getNodalConnectivity()->getConstPointer()+90,0)); + CPPUNIT_ASSERT_EQUAL(705,std::accumulate(constMesh->getNodalConnectivityIndex()->getConstPointer(),constMesh->getNodalConnectivityIndex()->getConstPointer()+17,0)); + CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getConstPointer(),constMesh->getCoords()->getConstPointer()+57,0),1e-12); + field0->decrRef(); + // + MEDCouplingFieldDouble *field1=MEDLoader::ReadFieldCell(fileName.c_str(),meshNames[0].c_str(),0,fieldsName[1].c_str(),its1[0].first,its1[0].second); + field1->checkCoherency(); + CPPUNIT_ASSERT(field1->getName()==fieldsName[1]); + CPPUNIT_ASSERT_EQUAL(3,field1->getNumberOfComponents()); + CPPUNIT_ASSERT_EQUAL(16,field1->getNumberOfTuples()); + const double expectedValues2[48]={1.,0.,1.,1.,0.,1.,1.,0.,1.,2.,1.,0.,2.,1.,0.,2.,1.,0.,3.,0.,1.,3.,0.,1.,3.,0.,1.,4.,1.,0.,4.,1.,0.,4.,1.,0.,5.,0.,0.,6.,1.,1.,6.,0.,0.,5.,1.,1.}; + double diffValue2[48]; + std::transform(field1->getArray()->getPointer(),field1->getArray()->getPointer()+48,expectedValues2,diffValue2,std::minus()); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue2,diffValue2+48),1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue2,diffValue2+48),1e-12); + constMesh=dynamic_cast(field1->getMesh()); + CPPUNIT_ASSERT(constMesh); + CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllGeoTypes().size()); + for(int i=0;i<12;i++) + CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(12)); + CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(13)); + CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(14)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(15)); + CPPUNIT_ASSERT_EQUAL((std::size_t)90,constMesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getConstPointer(),constMesh->getNodalConnectivity()->getConstPointer()+90,0)); + CPPUNIT_ASSERT_EQUAL(705,std::accumulate(constMesh->getNodalConnectivityIndex()->getConstPointer(),constMesh->getNodalConnectivityIndex()->getConstPointer()+17,0)); + CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getConstPointer(),constMesh->getCoords()->getConstPointer()+57,0),1e-12); + field1->decrRef(); + //fields on nodes + std::vector fieldsNameNode=MEDLoader::GetNodeFieldNamesOnMesh(fileName.c_str(),meshNames[0].c_str()); + CPPUNIT_ASSERT_EQUAL(2,(int)fieldsNameNode.size()); + CPPUNIT_ASSERT(fieldsNameNode[0]=="fieldnodedouble"); + CPPUNIT_ASSERT(fieldsNameNode[1]=="fieldnodeint"); + std::vector > its0Node=MEDLoader::GetNodeFieldIterations(fileName.c_str(),meshNames[0].c_str(),fieldsNameNode[0].c_str()); + CPPUNIT_ASSERT_EQUAL(3,(int)its0Node.size()); + CPPUNIT_ASSERT_EQUAL(-1,its0Node[0].first); + CPPUNIT_ASSERT_EQUAL(-1,its0Node[0].second); + CPPUNIT_ASSERT_EQUAL(1,its0Node[1].first); + CPPUNIT_ASSERT_EQUAL(-1,its0Node[1].second); + CPPUNIT_ASSERT_EQUAL(2,its0Node[2].first); + CPPUNIT_ASSERT_EQUAL(-1,its0Node[2].second); + MEDCouplingFieldDouble *field0Nodes=MEDLoader::ReadFieldNode(fileName.c_str(),meshNames[0].c_str(),0,fieldsNameNode[0].c_str(),its0Node[0].first,its0Node[0].second); + field0Nodes->checkCoherency(); + CPPUNIT_ASSERT(field0Nodes->getName()==fieldsNameNode[0]); + CPPUNIT_ASSERT_EQUAL(1,field0Nodes->getNumberOfComponents()); + CPPUNIT_ASSERT_EQUAL(19,field0Nodes->getNumberOfTuples()); + const double expectedValues3[19]={1.,1.,1.,2.,2.,2.,3.,3.,3.,4.,4.,4.,5.,5.,5.,6.,6.,6.,7.}; + double diffValue3[19]; + std::transform(field0Nodes->getArray()->getPointer(),field0Nodes->getArray()->getPointer()+19,expectedValues3,diffValue3,std::minus()); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue3,diffValue3+19),1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue3,diffValue3+19),1e-12); + constMesh=dynamic_cast(field0Nodes->getMesh()); + CPPUNIT_ASSERT(constMesh); + field0Nodes->decrRef(); + // + field0Nodes=MEDLoader::ReadFieldNode(fileName.c_str(),meshNames[0].c_str(),0,fieldsNameNode[0].c_str(),its0Node[2].first,its0Node[2].second); + field0Nodes->checkCoherency(); + CPPUNIT_ASSERT(field0Nodes->getName()==fieldsNameNode[0]); + CPPUNIT_ASSERT_EQUAL(1,field0Nodes->getNumberOfComponents()); + CPPUNIT_ASSERT_EQUAL(19,field0Nodes->getNumberOfTuples()); + const double expectedValues4[19]={1.,2.,2.,2.,3.,3.,3.,4.,4.,4.,5.,5.,5.,6.,6.,6.,7.,7.,7.}; + std::transform(field0Nodes->getArray()->getPointer(),field0Nodes->getArray()->getPointer()+19,expectedValues4,diffValue3,std::minus()); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue3,diffValue3+19),1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue3,diffValue3+19),1e-12); + constMesh=dynamic_cast(field0Nodes->getMesh()); + CPPUNIT_ASSERT(constMesh); + CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllGeoTypes().size()); + for(int i=0;i<12;i++) + CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(12)); + CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(13)); + CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(14)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(15)); + CPPUNIT_ASSERT_EQUAL((std::size_t)90,constMesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getConstPointer(),constMesh->getNodalConnectivity()->getConstPointer()+90,0)); + CPPUNIT_ASSERT_EQUAL(705,std::accumulate(constMesh->getNodalConnectivityIndex()->getConstPointer(),constMesh->getNodalConnectivityIndex()->getConstPointer()+17,0)); + CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getConstPointer(),constMesh->getCoords()->getConstPointer()+57,0),1e-12); + field0Nodes->decrRef(); + // + field0Nodes=MEDLoader::ReadFieldNode(fileName.c_str(),meshNames[0].c_str(),0,fieldsNameNode[0].c_str(),its0Node[0].first,its0Node[0].second); + field0Nodes->checkCoherency(); + CPPUNIT_ASSERT(field0Nodes->getName()==fieldsNameNode[0]); + CPPUNIT_ASSERT_EQUAL(1,field0Nodes->getNumberOfComponents()); + CPPUNIT_ASSERT_EQUAL(19,field0Nodes->getNumberOfTuples()); + const double expectedValues5[19]={1.,1.,1.,2.,2.,2.,3.,3.,3.,4.,4.,4.,5.,5.,5.,6.,6.,6.,7.}; + std::transform(field0Nodes->getArray()->getPointer(),field0Nodes->getArray()->getPointer()+19,expectedValues5,diffValue3,std::minus()); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue3,diffValue3+19),1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue3,diffValue3+19),1e-12); + constMesh=dynamic_cast(field0Nodes->getMesh()); + CPPUNIT_ASSERT(constMesh); + CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllGeoTypes().size()); + for(int i=0;i<12;i++) + CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(12)); + CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(13)); + CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(14)); + CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(15)); + CPPUNIT_ASSERT_EQUAL((std::size_t)90,constMesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getConstPointer(),constMesh->getNodalConnectivity()->getConstPointer()+90,0)); + CPPUNIT_ASSERT_EQUAL(705,std::accumulate(constMesh->getNodalConnectivityIndex()->getConstPointer(),constMesh->getNodalConnectivityIndex()->getConstPointer()+17,0)); + CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getConstPointer(),constMesh->getCoords()->getConstPointer()+57,0),1e-12); + field0Nodes->decrRef(); +} + +void ParaMEDMEMTest::testMEDLoaderPolygonRead() +{ + string fileName=getResourceFile("polygones.med"); + vector meshNames=MEDLoader::GetMeshNames(fileName.c_str()); + CPPUNIT_ASSERT_EQUAL(1,(int)meshNames.size()); + CPPUNIT_ASSERT(meshNames[0]=="Bord"); + MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),0); + mesh->checkCoherency(); + CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(538,mesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(579,mesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllGeoTypes().size()); + for(int i=0;i<514;i++) + CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(i)); + for(int i=514;i<538;i++) + CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,mesh->getTypeOfCell(i)); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+1737,0),1e-12); + const double expectedVals1[12]={1.4851585216522212,-0.5,0.,1.4851585216522212,-0.4,0.,1.4851585216522212,-0.3,0., 1.5741585216522211, -0.5, 0. }; + double diffValue1[12]; + std::transform(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+12,expectedVals1,diffValue1,std::minus()); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue1,diffValue1+12),1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue1,diffValue1+12),1e-12); + CPPUNIT_ASSERT_EQUAL((std::size_t)2768,mesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(651050,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+2768,0)); + CPPUNIT_ASSERT_EQUAL(725943,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+539,0)); + mesh->decrRef(); + // + std::vector fieldsName=MEDLoader::GetCellFieldNamesOnMesh(fileName.c_str(),meshNames[0].c_str()); + CPPUNIT_ASSERT_EQUAL(3,(int)fieldsName.size()); + CPPUNIT_ASSERT(fieldsName[0]=="bord_:_distorsion"); + CPPUNIT_ASSERT(fieldsName[1]=="bord_:_familles"); + CPPUNIT_ASSERT(fieldsName[2]=="bord_:_non-ortho"); + std::vector > its0=MEDLoader::GetCellFieldIterations(fileName.c_str(),meshNames[0].c_str(),fieldsName[0].c_str()); + CPPUNIT_ASSERT_EQUAL(1,(int)its0.size()); + MEDCouplingFieldDouble *field=MEDLoader::ReadFieldCell(fileName.c_str(),meshNames[0].c_str(),0,fieldsName[0].c_str(),its0[0].first,its0[0].second); + field->checkCoherency(); + CPPUNIT_ASSERT(field->getName()==fieldsName[0]); + CPPUNIT_ASSERT_EQUAL(1,field->getNumberOfComponents()); + CPPUNIT_ASSERT_EQUAL(538,field->getNumberOfTuples()); + const MEDCouplingUMesh *constMesh=dynamic_cast(field->getMesh()); + CPPUNIT_ASSERT(constMesh); + CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(2,constMesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(538,constMesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(579,constMesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(2,(int)constMesh->getAllGeoTypes().size()); + for(int i=0;i<514;i++) + CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,constMesh->getTypeOfCell(i)); + for(int i=514;i<538;i++) + CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,constMesh->getTypeOfCell(i)); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,std::accumulate(constMesh->getCoords()->getConstPointer(),constMesh->getCoords()->getConstPointer()+1737,0),1e-12); + std::transform(constMesh->getCoords()->getConstPointer(),constMesh->getCoords()->getConstPointer()+12,expectedVals1,diffValue1,std::minus()); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue1,diffValue1+12),1e-12); + CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue1,diffValue1+12),1e-12); + CPPUNIT_ASSERT_EQUAL((std::size_t)2768,constMesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(651050,std::accumulate(constMesh->getNodalConnectivity()->getConstPointer(),constMesh->getNodalConnectivity()->getConstPointer()+2768,0)); + CPPUNIT_ASSERT_EQUAL(725943,std::accumulate(constMesh->getNodalConnectivityIndex()->getConstPointer(),constMesh->getNodalConnectivityIndex()->getConstPointer()+539,0)); + const double *values=field->getArray()->getPointer(); + CPPUNIT_ASSERT_DOUBLES_EQUAL(2.87214203182918,std::accumulate(values,values+538,0.),1e-12); + field->decrRef(); +} + +void ParaMEDMEMTest::testMEDLoaderPolyhedronRead() +{ + string fileName=getResourceFile("poly3D.med"); + vector meshNames=MEDLoader::GetMeshNames(fileName.c_str()); + CPPUNIT_ASSERT_EQUAL(1,(int)meshNames.size()); + CPPUNIT_ASSERT(meshNames[0]=="poly3D"); + MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),0); + mesh->checkCoherency(); + CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(3,mesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllGeoTypes().size()); + CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(0)); + CPPUNIT_ASSERT_EQUAL(NORM_POLYHED,mesh->getTypeOfCell(1)); + CPPUNIT_ASSERT_EQUAL(NORM_POLYHED,mesh->getTypeOfCell(2)); + CPPUNIT_ASSERT_EQUAL((std::size_t)98,mesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(725,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+98,0)); + CPPUNIT_ASSERT_DOUBLES_EQUAL(110.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12); + CPPUNIT_ASSERT_EQUAL(155,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+4,0)); + mesh->decrRef(); + // + mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),-1); + mesh->checkCoherency(); + CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(17,mesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(3,(int)mesh->getAllGeoTypes().size()); + CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,mesh->getTypeOfCell(0)); + CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(1)); + CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(2)); + CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(3)); + CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(4)); + CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(5)); + CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(6)); + CPPUNIT_ASSERT_EQUAL(NORM_TRI3,mesh->getTypeOfCell(7)); + CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,mesh->getTypeOfCell(8)); + CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,mesh->getTypeOfCell(9)); + CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(10)); + CPPUNIT_ASSERT_EQUAL(NORM_TRI3,mesh->getTypeOfCell(11)); + CPPUNIT_ASSERT_EQUAL(NORM_TRI3,mesh->getTypeOfCell(12)); + CPPUNIT_ASSERT_EQUAL(NORM_TRI3,mesh->getTypeOfCell(13)); + CPPUNIT_ASSERT_EQUAL(NORM_TRI3,mesh->getTypeOfCell(14)); + CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(15)); + CPPUNIT_ASSERT_EQUAL(NORM_TRI3,mesh->getTypeOfCell(16)); + CPPUNIT_ASSERT_DOUBLES_EQUAL(110.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12); + CPPUNIT_ASSERT_EQUAL((std::size_t)83,mesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(619,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+83,0)); + mesh->decrRef(); + // + vector families=MEDLoader::GetMeshFamiliesNames(fileName.c_str(),meshNames[0].c_str()); + CPPUNIT_ASSERT_EQUAL(4,(int)families.size()); + CPPUNIT_ASSERT(families[0]=="FAMILLE_FACE_POLYGONS3"); + CPPUNIT_ASSERT(families[1]=="FAMILLE_FACE_QUAD41"); + CPPUNIT_ASSERT(families[2]=="FAMILLE_FACE_TRIA32"); + CPPUNIT_ASSERT(families[3]=="FAMILLE_ZERO"); + vector families2; + families2.push_back(families[0]); + mesh=MEDLoader::ReadUMeshFromFamilies(fileName.c_str(),meshNames[0].c_str(),-1,families2); + mesh->checkCoherency(); + CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(3,mesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(1,(int)mesh->getAllGeoTypes().size()); + for(int i=0;i<3;i++) + CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,mesh->getTypeOfCell(i)); + CPPUNIT_ASSERT_EQUAL((std::size_t)19,mesh->getNodalConnectivity()->getNbOfElems()); + CPPUNIT_ASSERT_EQUAL(117,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+19,0)); + mesh->decrRef(); + // + mesh=MEDLoader::ReadUMeshFromFamilies(fileName.c_str(),meshNames[0].c_str(),0,families2); + CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension()); + CPPUNIT_ASSERT_EQUAL(0,mesh->getNumberOfCells()); + CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes()); + CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension()); + CPPUNIT_ASSERT_EQUAL(0,(int)mesh->getAllGeoTypes().size()); + mesh->decrRef(); +} diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_MPIProcessorGroup.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_MPIProcessorGroup.cxx new file mode 100644 index 000000000..91ef8bb8a --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_MPIProcessorGroup.cxx @@ -0,0 +1,149 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaMEDMEMTest.hxx" +#include +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "InterpolationUtils.hxx" + +#include + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + + +using namespace std; +using namespace ParaMEDMEM; + +/* + * Check methods defined in MPPIProcessorGroup.hxx + * + (+) MPIProcessorGroup(const CommInterface& interface); + (+) MPIProcessorGroup(const CommInterface& interface, set proc_ids); + (u) MPIProcessorGroup (const ProcessorGroup& proc_group, set proc_ids); + (+) MPIProcessorGroup(const CommInterface& interface,int pstart, int pend); + (+) virtual ~MPIProcessorGroup(); + (+) virtual ProcessorGroup* fuse (const ProcessorGroup&) const; + (u) void intersect (ProcessorGroup&){}; + (+) int myRank() const {int rank; MPI_Comm_rank(_comm,&rank); return rank;} + (+) bool containsMyRank() const { int rank; MPI_Group_rank(_group, &rank); return (rank!=MPI_UNDEFINED);} + (+) int translateRank(const ProcessorGroup* group, int rank) const; + (+) const MPI_Comm* getComm() const {return &_comm;} + (+) ProcessorGroup* createComplementProcGroup() const; + (o) ProcessorGroup* createProcGroup() const; + +*/ + +void ParaMEDMEMTest::testMPIProcessorGroup_constructor() +{ + CommInterface comm_interface; + MPIProcessorGroup* group = new MPIProcessorGroup(comm_interface);; + int size; + MPI_Comm_size(MPI_COMM_WORLD, &size); + CPPUNIT_ASSERT_EQUAL(size,group->size()); + int size2; + const MPI_Comm* communicator=group->getComm(); + MPI_Comm_size(*communicator, &size2); + CPPUNIT_ASSERT_EQUAL(size,size2); + delete group; + + set procs; + + procs.insert(0); + procs.insert(1); + if (size==1) + CPPUNIT_ASSERT_THROW(group=new MPIProcessorGroup(comm_interface,procs),INTERP_KERNEL::Exception); + else + { + CPPUNIT_ASSERT_NO_THROW( group=new MPIProcessorGroup(comm_interface,procs)); + CPPUNIT_ASSERT_EQUAL (group->size(),2); + delete group; + } + + //throws because plast1) + { + group=new MPIProcessorGroup(comm_interface,0,size-2); + CPPUNIT_ASSERT_EQUAL(group->size(),size-1); + delete group; + } + +} + +void ParaMEDMEMTest::testMPIProcessorGroup_boolean() +{ + int size; + MPI_Comm_size(MPI_COMM_WORLD, &size); + + CommInterface comm_interface; + MPIProcessorGroup group(comm_interface,0,0); + MPIProcessorGroup group2(comm_interface,size-1,size-1); + ProcessorGroup* group_fuse=group.fuse(group2); + int group_fuse_size=(size==1)?1:2; + CPPUNIT_ASSERT_EQUAL(group_fuse_size,group_fuse->size()); + + ProcessorGroup* group_complement=((MPIProcessorGroup*)group_fuse)->createComplementProcGroup(); + CPPUNIT_ASSERT_EQUAL(group_complement->size(),size-group_fuse_size); + + delete group_fuse; + delete group_complement; + + //intersect not implemented yet + // if (size>1) + // { + // MPIProcessorGroup group3(comm_interface,0,size-2); + // MPIProcessorGroup group4(comm_interface,1,size-1); + // group3.intersect(group4); + // CPPUNIT_ASSERT_EQUAL(group3.size(),size-2); + // } +} + +void ParaMEDMEMTest::testMPIProcessorGroup_rank() +{ + int size; + MPI_Comm_size(MPI_COMM_WORLD, &size); + int rank; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + CommInterface comm_interface; + MPIProcessorGroup group(comm_interface,0,0); + MPIProcessorGroup group2(comm_interface,size-1,size-1); + ProcessorGroup* group_fuse=group2.fuse(group); + + if (group.containsMyRank()) + CPPUNIT_ASSERT_EQUAL (group.myRank(), rank); + + if (group2.containsMyRank()) + { + int trank=group_fuse->translateRank(&group2,0); + if (size==1) + CPPUNIT_ASSERT_EQUAL(trank,0); + else + CPPUNIT_ASSERT_EQUAL(trank,1); + } + delete group_fuse; +} diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_NonCoincidentDEC.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_NonCoincidentDEC.cxx new file mode 100644 index 000000000..6ab7130df --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_NonCoincidentDEC.cxx @@ -0,0 +1,256 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifdef MED_ENABLE_FVM + +#include "ParaMEDMEMTest.hxx" +#include + +#include "MEDMEM_Exception.hxx" +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "Topology.hxx" +#include "DEC.hxx" +#include "NonCoincidentDEC.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "UnstructuredParaSUPPORT.hxx" +#include "ICoCoMEDField.hxx" + +#include + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + + +using namespace std; +using namespace ParaMEDMEM; +using namespace MEDMEM; + +/* + * Check methods defined in InterpKernelDEC.hxx + * + InterpKernelDEC(); + InterpKernelDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group); + virtual ~InterpKernelDEC(); + void synchronize(); + void recvData(); + void sendData(); +*/ + +void ParaMEDMEMTest::testNonCoincidentDEC_2D() +{ + + int size; + MPI_Comm_size(MPI_COMM_WORLD,&size); + + //the test is meant to run on five processors + if (size !=5) return ; + + testNonCoincidentDEC( "/share/salome/resources/med/square1_split", + "Mesh_2", + "/share/salome/resources/med/square2_split", + "Mesh_3", + 3, + 1e-6); +} + +void ParaMEDMEMTest::testNonCoincidentDEC_3D() +{ + int size; + MPI_Comm_size(MPI_COMM_WORLD,&size); + + //the test is meant to run on five processors + if (size !=4) return ; + + testNonCoincidentDEC( "/share/salome/resources/med/blade_12000_split2", + "Mesh_1", + "/share/salome/resources/med/blade_3000_split2", + "Mesh_1", + 2, + 1e4); +} + +void ParaMEDMEMTest::testNonCoincidentDEC(const string& filename1, + const string& meshname1, + const string& filename2, + const string& meshname2, + int nproc_source, + double epsilon) +{ + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + set self_procs; + set procs_source; + set procs_target; + + for (int i=0; i* field; + ParaMEDMEM::ParaMESH* paramesh; + ParaMEDMEM::ParaFIELD* parafield; + + string filename_xml1 = getResourceFile(filename1); + string filename_xml2 = getResourceFile(filename2); + //string filename_seq_wr = makeTmpFile(""); + //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med"); + + // To remove tmp files from disk + ParaMEDMEMTest_TmpFilesRemover aRemover; + //aRemover.Register(filename_seq_wr); + //aRemover.Register(filename_seq_med); + MPI_Barrier(MPI_COMM_WORLD); + ICoCo::Field* icocofield; + if (source_group->containsMyRank()) + { + string master = filename_xml1; + + ostringstream strstream; + strstream <getNumberOfElements(MED_EN::MED_ALL_ELEMENTS); + double * value= new double[nb_local]; + for(int ielem=0; ielemgetField()->setValue(value); + + icocofield=new ICoCo::MEDField(paramesh,parafield); + + dec.attachLocalField(icocofield); + delete [] value; + } + + //loading the geometry for the target group + if (target_group->containsMyRank()) + { + string master= filename_xml2; + ostringstream strstream; + strstream << master<<(rank-nproc_source+1)<<".med"; + ostringstream meshname ; + meshname<< meshname2<<"_"<getNumberOfElements(MED_EN::MED_ALL_ELEMENTS); + double * value= new double[nb_local]; + for(int ielem=0; ielemgetField()->setValue(value); + icocofield=new ICoCo::MEDField(paramesh,parafield); + + dec.attachLocalField(icocofield); + delete [] value; + } + + + //attaching a DEC to the source group + double field_before_int; + double field_after_int; + + if (source_group->containsMyRank()) + { + field_before_int = parafield->getVolumeIntegral(1); + MPI_Bcast(&field_before_int, 1,MPI_DOUBLE, 0,MPI_COMM_WORLD); + dec.synchronize(); + cout<<"DEC usage"<write(MED_DRIVER,"./sourcesquarenc"); + //parafield->write(MED_DRIVER,"./sourcesquarenc","boundary"); + + + } + + //attaching a DEC to the target group + if (target_group->containsMyRank()) + { + MPI_Bcast(&field_before_int, 1,MPI_DOUBLE, 0,MPI_COMM_WORLD); + + dec.synchronize(); + dec.setOption("ForcedRenormalization",false); + dec.recvData(); + //paramesh->write(MED_DRIVER, "./targetsquarenc"); + //parafield->write(MED_DRIVER, "./targetsquarenc", "boundary"); + field_after_int = parafield->getVolumeIntegral(1); + + } + MPI_Bcast(&field_before_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&field_after_int, 1,MPI_DOUBLE, size-1,MPI_COMM_WORLD); + + CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, epsilon); + + delete source_group; + delete target_group; + delete self_group; + delete icocofield; + delete paramesh; + delete parafield; + delete support; + delete parasupport; + delete mesh; + MPI_Barrier(MPI_COMM_WORLD); + +} +#endif diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx new file mode 100644 index 000000000..c7a610461 --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx @@ -0,0 +1,212 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaMEDMEMTest.hxx" +#include + +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "Topology.hxx" +#include "OverlapDEC.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "ComponentTopology.hxx" + +#include "MEDCouplingUMesh.hxx" + +#include + +void ParaMEDMEMTest::testOverlapDEC1() +{ + std::string srcM("P0"); + std::string targetM("P0"); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + if (size != 3) return ; + + int nproc = 3; + std::set procs; + + for (int i=0; isetMeshDimension(2); + ParaMEDMEM::DataArrayDouble *myCoords=ParaMEDMEM::DataArrayDouble::New(); + myCoords->alloc(5,2); + std::copy(coordsS,coordsS+10,myCoords->getPointer()); + meshS->setCoords(myCoords); + myCoords->decrRef(); + int connS[7]={0,3,4,1, 1,4,2}; + meshS->allocateCells(2); + meshS->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,connS); + meshS->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connS+4); + meshS->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + parameshS=new ParaMEDMEM::ParaMESH(meshS,*dec.getGrp(),"source mesh"); + parafieldS=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshS,comptopo); + parafieldS->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint + double *valsS=parafieldS->getField()->getArray()->getPointer(); + valsS[0]=7.; valsS[1]=8.; + // + meshT=ParaMEDMEM::MEDCouplingUMesh::New(); + meshT->setMeshDimension(2); + myCoords=ParaMEDMEM::DataArrayDouble::New(); + myCoords->alloc(3,2); + std::copy(coordsT,coordsT+6,myCoords->getPointer()); + meshT->setCoords(myCoords); + myCoords->decrRef(); + int connT[3]={0,2,1}; + meshT->allocateCells(1); + meshT->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connT); + meshT->finishInsertingCells(); + parameshT=new ParaMEDMEM::ParaMESH(meshT,*dec.getGrp(),"target mesh"); + parafieldT=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshT,comptopo); + parafieldT->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint + double *valsT=parafieldT->getField()->getArray()->getPointer(); + valsT[0]=7.; + } + // + if(rank==1) + { + const double coordsS[10]={1.,0.,0.5,0.5,1.,0.5,0.5,1.,1.,1.}; + const double coordsT[6]={0.,0.,0.5,0.5,0.,1.}; + meshS=ParaMEDMEM::MEDCouplingUMesh::New(); + meshS->setMeshDimension(2); + ParaMEDMEM::DataArrayDouble *myCoords=ParaMEDMEM::DataArrayDouble::New(); + myCoords->alloc(5,2); + std::copy(coordsS,coordsS+10,myCoords->getPointer()); + meshS->setCoords(myCoords); + myCoords->decrRef(); + int connS[7]={0,1,2, 1,3,4,2}; + meshS->allocateCells(2); + meshS->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connS); + meshS->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,connS+3); + meshS->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + parameshS=new ParaMEDMEM::ParaMESH(meshS,*dec.getGrp(),"source mesh"); + parafieldS=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshS,comptopo); + parafieldS->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint + double *valsS=parafieldS->getField()->getArray()->getPointer(); + valsS[0]=9.; valsS[1]=11.; + // + meshT=ParaMEDMEM::MEDCouplingUMesh::New(); + meshT->setMeshDimension(2); + myCoords=ParaMEDMEM::DataArrayDouble::New(); + myCoords->alloc(3,2); + std::copy(coordsT,coordsT+6,myCoords->getPointer()); + meshT->setCoords(myCoords); + myCoords->decrRef(); + int connT[3]={0,2,1}; + meshT->allocateCells(1); + meshT->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connT); + meshT->finishInsertingCells(); + parameshT=new ParaMEDMEM::ParaMESH(meshT,*dec.getGrp(),"target mesh"); + parafieldT=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshT,comptopo); + parafieldT->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint + double *valsT=parafieldT->getField()->getArray()->getPointer(); + valsT[0]=8.; + } + // + if(rank==2) + { + const double coordsS[8]={0.,0.5, 0.5,0.5, 0.,1., 0.5,1.}; + const double coordsT[6]={0.5,0.5,0.,1.,1.,1.}; + meshS=ParaMEDMEM::MEDCouplingUMesh::New(); + meshS->setMeshDimension(2); + ParaMEDMEM::DataArrayDouble *myCoords=ParaMEDMEM::DataArrayDouble::New(); + myCoords->alloc(4,2); + std::copy(coordsS,coordsS+8,myCoords->getPointer()); + meshS->setCoords(myCoords); + myCoords->decrRef(); + int connS[4]={0,2,3,1}; + meshS->allocateCells(1); + meshS->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,connS); + meshS->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + parameshS=new ParaMEDMEM::ParaMESH(meshS,*dec.getGrp(),"source mesh"); + parafieldS=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshS,comptopo); + parafieldS->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint + double *valsS=parafieldS->getField()->getArray()->getPointer(); + valsS[0]=10.; + // + meshT=ParaMEDMEM::MEDCouplingUMesh::New(); + meshT->setMeshDimension(2); + myCoords=ParaMEDMEM::DataArrayDouble::New(); + myCoords->alloc(3,2); + std::copy(coordsT,coordsT+6,myCoords->getPointer()); + meshT->setCoords(myCoords); + myCoords->decrRef(); + int connT[3]={0,1,2}; + meshT->allocateCells(1); + meshT->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connT); + meshT->finishInsertingCells(); + parameshT=new ParaMEDMEM::ParaMESH(meshT,*dec.getGrp(),"target mesh"); + parafieldT=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshT,comptopo); + parafieldT->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint + double *valsT=parafieldT->getField()->getArray()->getPointer(); + valsT[0]=9.; + } + dec.attachSourceLocalField(parafieldS); + dec.attachTargetLocalField(parafieldT); + dec.synchronize(); + dec.sendRecvData(true); + // + if(rank==0) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(8.75,parafieldT->getField()->getArray()->getIJ(0,0),1e-12); + } + if(rank==1) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(8.5,parafieldT->getField()->getArray()->getIJ(0,0),1e-12); + } + if(rank==2) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(10.5,parafieldT->getField()->getArray()->getIJ(0,0),1e-12); + } + delete parafieldS; + delete parafieldT; + delete parameshS; + delete parameshT; + meshS->decrRef(); + meshT->decrRef(); + + MPI_Barrier(MPI_COMM_WORLD); +} + diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_StructuredCoincidentDEC.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_StructuredCoincidentDEC.cxx new file mode 100644 index 000000000..491bbf9e9 --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_StructuredCoincidentDEC.cxx @@ -0,0 +1,160 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "ParaMEDMEMTest.hxx" +#include + +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "Topology.hxx" +#include "DEC.hxx" +#include "StructuredCoincidentDEC.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "ComponentTopology.hxx" +#include "ICoCoMEDField.hxx" +#include "MEDLoader.hxx" + +#include + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +/* + * Check methods defined in StructuredCoincidentDEC.hxx + * + StructuredCoincidentDEC(); + StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group); + virtual ~StructuredCoincidentDEC(); + void synchronize(); + void recvData(); + void sendData(); +*/ + +void ParaMEDMEMTest::testStructuredCoincidentDEC() { + string testname="ParaMEDMEM - testStructured CoincidentDEC"; + // MPI_Init(&argc, &argv); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (size<4) { + return; + } + + ParaMEDMEM::CommInterface interface; + + ParaMEDMEM::MPIProcessorGroup self_group (interface,rank,rank); + ParaMEDMEM::MPIProcessorGroup target_group(interface,3,size-1); + ParaMEDMEM::MPIProcessorGroup source_group (interface,0,2); + + ParaMEDMEM::MEDCouplingUMesh* mesh; + ParaMEDMEM::ParaMESH* paramesh; + ParaMEDMEM::ParaFIELD* parafield; + + string filename_xml1 = getResourceFile("square1_split"); + string filename_2 = getResourceFile("square1.med"); + //string filename_seq_wr = makeTmpFile(""); + //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med"); + + // To remove tmp files from disk + ParaMEDMEMTest_TmpFilesRemover aRemover; + + //loading the geometry for the source group + + ParaMEDMEM::StructuredCoincidentDEC dec(source_group, target_group); + + MPI_Barrier(MPI_COMM_WORLD); + if (source_group.containsMyRank()) { + string master = filename_xml1; + + ostringstream strstream; + strstream <getNumberOfCells(); + const int* global_numbering = paramesh->getGlobalNumberingCell(); + + double *value=parafield->getField()->getArray()->getPointer(); + for(int ielem=0; ielemgetCellMesh(),parafield->getField()); + + dec.attachLocalField(parafield); + dec.synchronize(); + dec.sendData(); + //delete icocofield; + } + + //loading the geometry for the target group + if (target_group.containsMyRank()) { + + string meshname2("Mesh_2"); + mesh = MEDLoader::ReadUMeshFromFile(filename_2.c_str(),meshname2.c_str(),0); + + paramesh=new ParaMESH (mesh,self_group,"target mesh"); + ParaMEDMEM::ComponentTopology comptopo(6, &target_group); + + parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + + int nb_local=mesh->getNumberOfCells(); + double *value=parafield->getField()->getArray()->getPointer(); + for (int ielem=0; ielemgetCellMesh(),parafield->getField()); + + dec.attachLocalField(parafield); + dec.synchronize(); + dec.recvData(); + + //checking validity of field + const double* recv_value = parafield->getField()->getArray()->getPointer(); + for (int i=0; i< nb_local; i++) { + int first = comptopo.firstLocalComponent(); + for (int icomp = 0; icomp < comptopo.nbLocalComponents(); icomp++) + CPPUNIT_ASSERT_DOUBLES_EQUAL(recv_value[i*comptopo.nbLocalComponents()+icomp],(double)(i*6+icomp+first),1e-12); + } + //delete icocofield; + } + delete parafield; + delete paramesh; + mesh->decrRef(); + + // MPI_Barrier(MPI_COMM_WORLD); + +} diff --git a/src/ParaMEDMEMTest/TestMPIAccess.cxx b/src/ParaMEDMEMTest/TestMPIAccess.cxx new file mode 100644 index 000000000..3b456a52e --- /dev/null +++ b/src/ParaMEDMEMTest/TestMPIAccess.cxx @@ -0,0 +1,30 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +// --- include all MPIAccess Test +// +#include "MPIAccessTest.hxx" + +// --- Registers the fixture into the 'registry' + +CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessTest ); + +// --- generic Main program from KERNEL_SRC/src/Basics/Test + +#include "MPIMainTest.hxx" diff --git a/src/ParaMEDMEMTest/TestMPIAccessDEC.cxx b/src/ParaMEDMEMTest/TestMPIAccessDEC.cxx new file mode 100644 index 000000000..15ed2081d --- /dev/null +++ b/src/ParaMEDMEMTest/TestMPIAccessDEC.cxx @@ -0,0 +1,30 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +// --- include all MPIAccessDEC Test +// +#include "MPIAccessDECTest.hxx" + +// --- Registers the fixture into the 'registry' + +CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessDECTest ); + +// --- generic Main program from KERNEL_SRC/src/Basics/Test + +#include "MPIMainTest.hxx" diff --git a/src/ParaMEDMEMTest/TestParaMEDMEM.cxx b/src/ParaMEDMEMTest/TestParaMEDMEM.cxx new file mode 100644 index 000000000..e1b804c9a --- /dev/null +++ b/src/ParaMEDMEMTest/TestParaMEDMEM.cxx @@ -0,0 +1,30 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +// --- include all MEDMEM Test +// +#include "ParaMEDMEMTest.hxx" + +// --- Registers the fixture into the 'registry' + +CPPUNIT_TEST_SUITE_REGISTRATION( ParaMEDMEMTest ); + +// --- generic Main program from KERNEL_SRC/src/Basics/Test + +#include "MPIMainTest.hxx" diff --git a/src/ParaMEDMEMTest/test_AllToAllDEC.cxx b/src/ParaMEDMEMTest/test_AllToAllDEC.cxx new file mode 100644 index 000000000..f5e702845 --- /dev/null +++ b/src/ParaMEDMEMTest/test_AllToAllDEC.cxx @@ -0,0 +1,170 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessDECTest.hxx" +#include +#include "MPIAccessDEC.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessDECTest::test_AllToAllDECSynchronousPointToPoint() { + test_AllToAllDEC( false ) ; +} +void MPIAccessDECTest::test_AllToAllDECAsynchronousPointToPoint() { + test_AllToAllDEC( true ) ; +} + +static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess mpi_access ) { + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + if ( sts != MPI_SUCCESS ) { + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + ostringstream strstream ; + strstream << "===========================================================" << endl + << "test_AllToAllDEC" << myrank << " KO" << endl + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + return ; +} + +void MPIAccessDECTest::test_AllToAllDEC( bool Asynchronous ) { + + cout << "test_AllToAllDEC" << endl ; + + // MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 || size > 11 ) { + ostringstream strstream ; + strstream << "usage :" << endl + << "mpirun -np test_AllToAllDEC" << endl + << " (nbprocs >=2)" << endl + << "test must be runned with more than 1 proc and less than 12 procs" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + cout << "test_AllToAllDEC" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + std::set sourceprocs; + std::set targetprocs; + int i ; + for ( i = 0 ; i < size/2 ; i++ ) { + sourceprocs.insert(i); + } + for ( i = size/2 ; i < size ; i++ ) { + targetprocs.insert(i); + } + + ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ; + ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ; + + MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , + Asynchronous ) ; + + MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ; + +#define maxreq 100 +#define datamsglength 10 + + // int sts ; + int sendcount = datamsglength ; + int recvcount = datamsglength ; + int * recvbuf = new int[datamsglength*size] ; + + int ireq ; + for ( ireq = 0 ; ireq < maxreq ; ireq++ ) { + int * sendbuf = new int[datamsglength*size] ; + int j ; + for ( j = 0 ; j < datamsglength*size ; j++ ) { + sendbuf[j] = myrank*1000000 + ireq*1000 + j ; + recvbuf[j] = -1 ; + } + + MyMPIAccessDEC->allToAll( sendbuf, sendcount , MPI_INT , + recvbuf, recvcount , MPI_INT ) ; + + int nRecvReq = mpi_access->recvRequestIdsSize() ; + int *ArrayOfRecvRequests = new int[nRecvReq] ; + int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; + mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ; + mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ; + delete [] ArrayOfRecvRequests ; + } + + int nSendReq = mpi_access->sendRequestIdsSize() ; + cout << "test_AllToAllDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests" + << endl ; + if ( nSendReq ) { + int *ArrayOfSendRequests = new int[nSendReq] ; + int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ; + mpi_access->waitAll( nReq , ArrayOfSendRequests ) ; + delete [] ArrayOfSendRequests ; + } + + int nRecvReq = mpi_access->recvRequestIdsSize() ; + if ( nRecvReq ) { + ostringstream strstream ; + strstream << "test_AllToAllDEC" << myrank << " final RecvRequestIds " << nRecvReq + << " RecvRequests # 0 Error" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "test_AllToAllDEC" << myrank << " final RecvRequestIds " << nRecvReq + << " RecvRequests = 0 OK" << endl ; + } + + mpi_access->barrier() ; + + delete sourcegroup ; + delete targetgroup ; + delete MyMPIAccessDEC ; + delete [] recvbuf ; + + // MPI_Finalize(); + + cout << "test_AllToAllDEC" << myrank << " OK" << endl ; + + return ; +} diff --git a/src/ParaMEDMEMTest/test_AllToAllTimeDEC.cxx b/src/ParaMEDMEMTest/test_AllToAllTimeDEC.cxx new file mode 100644 index 000000000..a46114e8c --- /dev/null +++ b/src/ParaMEDMEMTest/test_AllToAllTimeDEC.cxx @@ -0,0 +1,267 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessDECTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccessDEC.hxx" +#include "LinearTimeInterpolator.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessDECTest::test_AllToAllTimeDECSynchronousPointToPoint() { + test_AllToAllTimeDEC( false ) ; +} +void MPIAccessDECTest::test_AllToAllTimeDECAsynchronousPointToPoint() { + test_AllToAllTimeDEC( true ) ; +} + +static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) { + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + if ( sts != MPI_SUCCESS ) { + mpi_access->errorString(sts, msgerr, &lenerr) ; + cout << "test_AllToAllTimeDEC" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + ostringstream strstream ; + strstream << "===========================================================" + << "test_AllToAllTimeDEC" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + return ; +} + +void MPIAccessDECTest::test_AllToAllTimeDEC( bool Asynchronous ) { + + cout << "test_AllToAllTimeDEC" << endl ; + + // MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 || size > 11 ) { + ostringstream strstream ; + strstream << "usage :" << endl + << "mpirun -np test_AllToAllTimeDEC" << endl + << " (nbprocs >=2)" << endl + << "test must be runned with more than 1 proc and less than 12 procs" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + // int Asynchronous = atoi(argv[1]); + + cout << "test_AllToAllTimeDEC" << myrank << " Asynchronous " << Asynchronous << endl ; + + ParaMEDMEM::CommInterface interface ; + std::set sourceprocs; + std::set targetprocs; + int i ; + for ( i = 0 ; i < size/2 ; i++ ) { + sourceprocs.insert(i); + } + for ( i = size/2 ; i < size ; i++ ) { + targetprocs.insert(i); + } + + ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ; + ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ; + + // LinearTimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ; + MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , + Asynchronous ) ; + // Asynchronous , LinearInterp , 0.5 ) ; + MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ; + MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ; + + cout << "test_AllToAllTimeDEC" << myrank << " Barrier :" << endl ; + mpi_access->barrier() ; + cout << "test_AllToAllTimeDEC" << myrank << " Barrier done" << endl ; + +#define maxproc 11 +#define maxreq 10000 +#define datamsglength 10 + + int sts ; + int sendcount = datamsglength ; + int recvcount = datamsglength ; + + double time = 0 ; + // double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ; + double deltatime[maxproc] = {1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,11.} ; + double maxtime = maxreq ; + double nextdeltatime = deltatime[myrank] ; + // MyMPIAccessDEC->InitTime( time , deltatime[myrank] , maxtime ) ; + // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) { + for ( time = 0 ; time <= maxtime && nextdeltatime != 0 ; time+=nextdeltatime ) { + if ( time != 0 ) { + nextdeltatime = deltatime[myrank] ; + if ( time+nextdeltatime > maxtime ) { + nextdeltatime = 0 ; + } + // MyMPIAccessDEC->NextTime( nextdeltatime ) ; + } + MyMPIAccessDEC->setTime( time , nextdeltatime ) ; + cout << "test_AllToAllTimeDEC" << myrank << "=====TIME " << time << "=====DELTATIME " + << nextdeltatime << "=====MAXTIME " << maxtime << " ======" << endl ; + int * sendbuf = new int[datamsglength*size] ; + // int * sendbuf = (int *) malloc(sizeof(int)*datamsglength*size) ; + int * recvbuf = new int[datamsglength*size] ; + int j ; + for ( j = 0 ; j < datamsglength*size ; j++ ) { + sendbuf[j] = myrank*1000000 + (j/datamsglength)*1000 + j ; + recvbuf[j] = -1 ; + } + + int sts = MyMPIAccessDEC->allToAllTime( sendbuf, sendcount , MPI_INT , + recvbuf, recvcount , MPI_INT ) ; + chksts( sts , myrank , mpi_access ) ; + + // cout << "test_AllToAllTimeDEC" << myrank << " recvbuf before CheckSent" ; + // for ( i = 0 ; i < datamsglength*size ; i++ ) { + // cout << " " << recvbuf[i] ; + // } + // cout << endl ; + + // cout << "test_AllToAllTimeDEC" << myrank << " sendbuf " << sendbuf << endl ; + // MyMPIAccessDEC->CheckSent() ; + + int nRecvReq = mpi_access->recvRequestIdsSize() ; + if ( nRecvReq != 0 ) { + ostringstream strstream ; + strstream << "=============================================================" << endl + << "test_AllToAllTimeDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests # 0 ERROR" + << endl << "=============================================================" + << endl ; + int *ArrayOfRecvRequests = new int[nRecvReq] ; + int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; + mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ; + delete [] ArrayOfRecvRequests ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + // cout << "test_AllToAllTimeDEC" << myrank << " recvbuf" << endl ; + bool badrecvbuf = false ; + for ( i = 0 ; i < datamsglength*size ; i++ ) { + if ( recvbuf[i] != (i/datamsglength)*1000000 + myrank*1000 + + myrank*datamsglength+(i%datamsglength) ) { + badrecvbuf = true ; + cout << "test_AllToAllTimeDEC" << myrank << " recvbuf[" << i << "] " + << recvbuf[i] << " # " << (i/datamsglength)*1000000 + myrank*1000 + + myrank*datamsglength+(i%datamsglength) << endl ; + } + else if ( badrecvbuf ) { + cout << "test_AllToAllTimeDEC" << myrank << " recvbuf[" << i << "] " + << recvbuf[i] << " == " << (i/datamsglength)*1000000 + myrank*1000 + + myrank*datamsglength+(i%datamsglength) << endl ; + } + } + if ( badrecvbuf ) { + ostringstream strstream ; + strstream << "==============================================================" << endl + << "test_AllToAllTimeDEC" << myrank << " badrecvbuf" + << endl << "=============================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + delete [] recvbuf ; + } + + cout << "test_AllToAllTimeDEC" << myrank << " final CheckSent" << endl ; + sts = MyMPIAccessDEC->checkSent() ; + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "================================================================" << endl + << "test_AllToAllTimeDEC" << myrank << " final CheckSent ERROR" + << endl << "================================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + int nSendReq = mpi_access->sendRequestIdsSize() ; + cout << "test_AllToAllTimeDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests" + << endl ; + if ( nSendReq ) { + int *ArrayOfSendRequests = new int[nSendReq] ; + int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ; + mpi_access->waitAll( nReq , ArrayOfSendRequests ) ; + delete [] ArrayOfSendRequests ; + } + + int nRecvReq = mpi_access->recvRequestIdsSize() ; + if ( nRecvReq ) { + ostringstream strstream ; + strstream << "===============================================================" << endl + << "test_AllToAllTimeDEC" << myrank << " RecvRequestIds " << nRecvReq + << " RecvRequests # 0 Error" + << endl << "===============================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "test_AllToAllTimeDEC" << myrank << " RecvRequestIds " << nRecvReq + << " RecvRequests = 0 OK" << endl ; + } + + cout << "test_AllToAllTimeDEC" << myrank << " Barrier :" << endl ; + mpi_access->barrier() ; + cout << "test_AllToAllTimeDEC" << myrank << " Barrier done" << endl ; + + delete sourcegroup ; + delete targetgroup ; + // delete aLinearInterpDEC ; + delete MyMPIAccessDEC ; + + // MPI_Finalize(); + + cout << "test_AllToAllTimeDEC" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_AllToAllvDEC.cxx b/src/ParaMEDMEMTest/test_AllToAllvDEC.cxx new file mode 100644 index 000000000..f0ec8e292 --- /dev/null +++ b/src/ParaMEDMEMTest/test_AllToAllvDEC.cxx @@ -0,0 +1,212 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessDECTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccessDEC.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessDECTest::test_AllToAllvDECSynchronousPointToPoint() { + test_AllToAllvDEC( false ) ; +} +void MPIAccessDECTest::test_AllToAllvDECAsynchronousPointToPoint() { + test_AllToAllvDEC( true ) ; +} + +static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess mpi_access ) { + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + if ( sts != MPI_SUCCESS ) { + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test_AllToAllvDEC" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + ostringstream strstream ; + strstream << "===========================================================" + << "test_AllToAllvDEC" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + return ; +} + +void MPIAccessDECTest::test_AllToAllvDEC( bool Asynchronous ) { + + cout << "test_AllToAllvDEC" << endl ; + + // MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 || size > 11 ) { + ostringstream strstream ; + strstream << "usage :" << endl + << "mpirun -np test_AllToAllvDEC" << endl + << " (nbprocs >=2)" << endl + << "test must be runned with more than 1 proc and less than 12 procs" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + // int Asynchronous = atoi(argv[1]); + + cout << "test_AllToAllvDEC" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + std::set sourceprocs; + std::set targetprocs; + int i ; + for ( i = 0 ; i < size/2 ; i++ ) { + sourceprocs.insert(i); + } + for ( i = size/2 ; i < size ; i++ ) { + targetprocs.insert(i); + } + + ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ; + ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ; + + MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , + Asynchronous ) ; + + MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ; + +#define maxreq 100 +#define datamsglength 10 + + // int sts ; + int *sendcounts = new int[size] ; + int *sdispls = new int[size] ; + int *recvcounts = new int[size] ; + int *rdispls = new int[size] ; + for ( i = 0 ; i < size ; i++ ) { + sendcounts[i] = datamsglength-i; + sdispls[i] = i*datamsglength ; + recvcounts[i] = datamsglength-myrank; + rdispls[i] = i*datamsglength ; + } + int * recvbuf = new int[datamsglength*size] ; + + int ireq ; + for ( ireq = 0 ; ireq < maxreq ; ireq++ ) { + int * sendbuf = new int[datamsglength*size] ; + // int * sendbuf = (int *) malloc( sizeof(int)*datamsglength*size) ; + int j ; + for ( j = 0 ; j < datamsglength*size ; j++ ) { + sendbuf[j] = myrank*1000000 + ireq*1000 + j ; + recvbuf[j] = -1 ; + } + + MyMPIAccessDEC->allToAllv( sendbuf, sendcounts , sdispls , MPI_INT , + recvbuf, recvcounts , rdispls , MPI_INT ) ; + + // cout << "test_AllToAllvDEC" << myrank << " recvbuf before CheckSent" ; + // for ( i = 0 ; i < datamsglength*size ; i++ ) { + // cout << " " << recvbuf[i] ; + // } + // cout << endl ; + + // cout << "test_AllToAllvDEC" << myrank << " sendbuf " << sendbuf << endl ; + // MyMPIAccessDEC->CheckSent() ; + + int nRecvReq = mpi_access->recvRequestIdsSize() ; + // cout << "test_AllToAllvDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests" << endl ; + int *ArrayOfRecvRequests = new int[nRecvReq] ; + int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; + mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ; + mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ; + delete [] ArrayOfRecvRequests ; + + // cout << "test_AllToAllvDEC" << myrank << " recvbuf" ; + // for ( i = 0 ; i < datamsglength*size ; i++ ) { + // cout << " " << recvbuf[i] ; + // } + // cout << endl ; + } + + // cout << "test_AllToAllvDEC" << myrank << " final CheckSent" << endl ; + // MyMPIAccessDEC->CheckSent() ; + + int nSendReq = mpi_access->sendRequestIdsSize() ; + cout << "test_AllToAllvDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests" + << endl ; + if ( nSendReq ) { + int *ArrayOfSendRequests = new int[nSendReq] ; + int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ; + mpi_access->waitAll( nReq , ArrayOfSendRequests ) ; + delete [] ArrayOfSendRequests ; + } + + int nRecvReq = mpi_access->recvRequestIdsSize() ; + if ( nRecvReq ) { + ostringstream strstream ; + strstream << "test_AllToAllvDEC" << myrank << " final RecvRequestIds " << nRecvReq + << " RecvRequests # 0 Error" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "test_AllToAllvDEC" << myrank << " final RecvRequestIds " << nRecvReq + << " RecvRequests = 0 OK" << endl ; + } + + mpi_access->barrier() ; + + delete sourcegroup ; + delete targetgroup ; + delete MyMPIAccessDEC ; + delete [] sendcounts ; + delete [] sdispls ; + delete [] recvcounts ; + delete [] rdispls ; + delete [] recvbuf ; + + // MPI_Finalize(); + + cout << "test_AllToAllvDEC" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_AllToAllvTimeDEC.cxx b/src/ParaMEDMEMTest/test_AllToAllvTimeDEC.cxx new file mode 100644 index 000000000..78d8d47c0 --- /dev/null +++ b/src/ParaMEDMEMTest/test_AllToAllvTimeDEC.cxx @@ -0,0 +1,363 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include +#include + +#include "MPIAccessDECTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccessDEC.hxx" +#include "LinearTimeInterpolator.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessDECTest::test_AllToAllvTimeDECSynchronousNative() { + test_AllToAllvTimeDEC( false , true ) ; +} +void MPIAccessDECTest::test_AllToAllvTimeDECSynchronousPointToPoint() { + test_AllToAllvTimeDEC( false , false ) ; +} +void MPIAccessDECTest::test_AllToAllvTimeDECAsynchronousPointToPoint() { + test_AllToAllvTimeDEC( true , false ) ; +} + +static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) { + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + if ( sts != MPI_SUCCESS ) { + mpi_access->errorString(sts, msgerr, &lenerr) ; + cout << "test_AllToAllvTimeDEC" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + ostringstream strstream ; + strstream << "===========================================================" + << "test_AllToAllvTimeDEC" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + return ; +} + +void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINative ) { + + cout << "test_AllToAllvTimeDEC" << endl ; + + // MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 || size > 11 ) { + ostringstream strstream ; + strstream << "usage :" << endl + << "mpirun -np test_AllToAllTimeDEC" << endl + << " (nbprocs >=2)" << endl + << "test must be runned with more than 1 proc and less than 12 procs" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + // int Asynchronous = atoi(argv[1]) ; + int UseMPI_Alltoallv = UseMPINative ; + // if ( argc == 3 ) { + // UseMPI_Alltoallv = atoi(argv[2]) ; + // } + + cout << "test_AllToAllvTimeDEC" << myrank << " Asynchronous " << Asynchronous + << " UseMPI_Alltoallv " << UseMPI_Alltoallv << endl ; + + ParaMEDMEM::CommInterface interface ; + std::set sourceprocs; + std::set targetprocs; + int i ; + for ( i = 0 ; i < size/2 ; i++ ) { + sourceprocs.insert(i); + } + for ( i = size/2 ; i < size ; i++ ) { + targetprocs.insert(i); + } + + ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ; + ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ; + + // TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ; + MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , + Asynchronous ) ; + // Asynchronous , LinearInterp , 0.5 ) ; + MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp , 0.5 ) ; + MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ; + + cout << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ; + mpi_access->barrier() ; + cout << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ; + +#define maxproc 11 +#define maxreq 10000 +#define datamsglength 10 + + int sts ; + int *sendcounts = new int[size] ; + int *sdispls = new int[size] ; + int *recvcounts = new int[size] ; + int *rdispls = new int[size] ; + int *sendtimecounts = new int[size] ; + int *stimedispls = new int[size] ; + int *recvtimecounts = new int[size] ; + int *rtimedispls = new int[size] ; + for ( i = 0 ; i < size ; i++ ) { + sendcounts[i] = datamsglength-i ; + sdispls[i] = i*datamsglength ; + recvcounts[i] = datamsglength-myrank ; + rdispls[i] = i*datamsglength ; + sendtimecounts[i] = 1 ; + stimedispls[i] = 0 ; + recvtimecounts[i] = 1 ; + rtimedispls[i] = i ; + //rtimedispls[i] = i*mpi_access->TimeExtent() ; + } + + double timeLoc = 0 ; + double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ; + double maxtime ; + double nextdeltatime = deltatime[myrank] ; + if ( UseMPI_Alltoallv ) { + maxtime = maxreq*nextdeltatime - 0.1 ; + } + else { + maxtime = maxreq ; + // MyMPIAccessDEC->InitTime( time , nextdeltatime , maxtime ) ; + } + time_t begintime = time(NULL) ; + // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) { + for ( timeLoc = 0 ; timeLoc <= maxtime && nextdeltatime != 0 ; timeLoc+=nextdeltatime ) { + nextdeltatime = deltatime[myrank] ; + if ( timeLoc != 0 ) { + nextdeltatime = deltatime[myrank] ; + if ( timeLoc+nextdeltatime > maxtime ) { + nextdeltatime = 0 ; + } + // MyMPIAccessDEC->NextTime( nextdeltatime ) ; + } + MyMPIAccessDEC->setTime( timeLoc , nextdeltatime ) ; + cout << "test_AllToAllvTimeDEC" << myrank << "=====TIME " << time << "=====DELTATIME " + << nextdeltatime << "=====MAXTIME " << maxtime << " ======" << endl ; + int * sendbuf = new int[datamsglength*size] ; + // int * sendbuf = (int *) malloc(sizeof(int)*datamsglength*size) ; + int * recvbuf = new int[datamsglength*size] ; + int j ; + for ( j = 0 ; j < datamsglength*size ; j++ ) { + sendbuf[j] = myrank*1000000 + (j/datamsglength)*1000 + j ; + recvbuf[j] = -1 ; + } + + if ( UseMPI_Alltoallv ) { + const MPI_Comm* comm = MyMPIAccessDEC->getComm(); + TimeMessage * aSendTimeMessage = new TimeMessage ; + aSendTimeMessage->time = timeLoc ; + // aSendTimeMessage->deltatime = deltatime[myrank] ; + aSendTimeMessage->deltatime = nextdeltatime ; + // aSendTimeMessage->maxtime = maxtime ; + aSendTimeMessage->tag = (int ) (timeLoc/deltatime[myrank]) ; + TimeMessage * aRecvTimeMessage = new TimeMessage[size] ; + interface.allToAllV(aSendTimeMessage, sendtimecounts , stimedispls , + mpi_access->timeType() , + aRecvTimeMessage, recvtimecounts , rtimedispls , + mpi_access->timeType() , *comm ) ; + // for ( j = 0 ; j < size ; j++ ) { + // cout << "test_AllToAllvTimeDEC" << myrank << " TimeMessage received " << j << " " + // << aRecvTimeMessage[j] << endl ; + // } + delete aSendTimeMessage ; + delete [] aRecvTimeMessage ; + interface.allToAllV(sendbuf, sendcounts , sdispls , MPI_INT , + recvbuf, recvcounts , rdispls , MPI_INT , *comm ) ; + // free(sendbuf) ; + delete [] sendbuf ; + } + else { + int sts = MyMPIAccessDEC->allToAllvTime( sendbuf, sendcounts , sdispls , MPI_INT , + recvbuf, recvcounts , rdispls , MPI_INT ) ; + chksts( sts , myrank , mpi_access ) ; + } + + // cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf before CheckSent" ; + // for ( i = 0 ; i < datamsglength*size ; i++ ) { + // cout << " " << recvbuf[i] ; + // } + // cout << endl ; + + // cout << "test_AllToAllvTimeDEC" << myrank << " sendbuf " << sendbuf << endl ; + // MyMPIAccessDEC->CheckSent() ; + + int nRecvReq = mpi_access->recvRequestIdsSize() ; + if ( nRecvReq != 0 ) { + ostringstream strstream ; + strstream << "=============================================================" << endl + << "test_AllToAllvTimeDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests # 0 ERROR" + << endl << "=============================================================" + << endl ; + int *ArrayOfRecvRequests = new int[nRecvReq] ; + int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; + mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ; + delete [] ArrayOfRecvRequests ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + // cout << "test_AllToAllvTimeDEC" << myrank << " check of recvbuf" << endl ; + bool badrecvbuf = false ; + for ( i = 0 ; i < size ; i++ ) { + int j ; + for ( j = 0 ; j < datamsglength ; j++ ) { + int index = i*datamsglength+j ; + if ( j < recvcounts[i] ) { + if ( recvbuf[index] != (index/datamsglength)*1000000 + myrank*1000 + + myrank*datamsglength+(index%datamsglength) ) { + badrecvbuf = true ; + cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] " + << recvbuf[index] << " # " << (index/datamsglength)*1000000 + + myrank*1000 + + myrank*datamsglength+(index%datamsglength) << endl ; + } + else if ( badrecvbuf ) { + cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] " + << recvbuf[index] << " == " << (index/datamsglength)*1000000 + + myrank*1000 + + myrank*datamsglength+(index%datamsglength) << endl ; + } + } + else if ( recvbuf[index] != -1 ) { + badrecvbuf = true ; + cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] " + << recvbuf[index] << " # -1" << endl ; + } + } + } + if ( badrecvbuf ) { + ostringstream strstream ; + strstream << "==============================================================" << endl + << "test_AllToAllvTimeDEC" << myrank << " badrecvbuf" + << endl << "=============================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + delete [] recvbuf ; + } + + cout << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ; + mpi_access->barrier() ; + cout << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ; + + cout << "test_AllToAllvTimeDEC" << myrank << " CheckFinalSent" << endl ; + sts = MyMPIAccessDEC->checkFinalSent() ; + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "================================================================" << endl + << "test_AllToAllvTimeDEC" << myrank << " final CheckSent ERROR" + << endl << "================================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + cout << "test_AllToAllvTimeDEC" << myrank << " CheckFinalRecv" << endl ; + sts = MyMPIAccessDEC->checkFinalRecv() ; + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "================================================================" << endl + << "test_AllToAllvTimeDEC" << myrank << " CheckFinalRecv ERROR" + << endl << "================================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + int nRecvReq = mpi_access->recvRequestIdsSize() ; + if ( nRecvReq ) { + ostringstream strstream ; + strstream << "===============================================================" << endl + << "test_AllToAllvTimeDEC" << myrank << " RecvRequestIds " << nRecvReq + << " RecvRequests # 0 Error" + << endl << "===============================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "test_AllToAllvTimeDEC" << myrank << " RecvRequestIds " << nRecvReq + << " RecvRequests = 0 OK" << endl ; + } + + time_t endtime = time(NULL) ; + cout << "test_AllToAllvTimeDEC" << myrank << " begintime " << begintime << " endtime " << endtime + << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank] + << " calls to AllToAll" << endl ; + + cout << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ; + mpi_access->barrier() ; + cout << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ; + + delete sourcegroup ; + delete targetgroup ; + delete MyMPIAccessDEC ; + // delete aLinearInterpDEC ; + + delete [] sendcounts ; + delete [] sdispls ; + delete [] recvcounts ; + delete [] rdispls ; + delete [] sendtimecounts ; + delete [] stimedispls ; + delete [] recvtimecounts ; + delete [] rtimedispls ; + + // MPI_Finalize(); + + endtime = time(NULL) ; + + cout << "test_AllToAllvTimeDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime + << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank] + << " calls to AllToAll" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_AllToAllvTimeDoubleDEC.cxx b/src/ParaMEDMEMTest/test_AllToAllvTimeDoubleDEC.cxx new file mode 100644 index 000000000..f69c44285 --- /dev/null +++ b/src/ParaMEDMEMTest/test_AllToAllvTimeDoubleDEC.cxx @@ -0,0 +1,337 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include +#include +#include + +#include "MPIAccessDECTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccessDEC.hxx" +#include "LinearTimeInterpolator.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessDECTest::test_AllToAllvTimeDoubleDECSynchronousPointToPoint() { + test_AllToAllvTimeDoubleDEC( false ) ; +} +void MPIAccessDECTest::test_AllToAllvTimeDoubleDECAsynchronousPointToPoint() { + test_AllToAllvTimeDoubleDEC( true ) ; +} + +static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) { + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + if ( sts != MPI_SUCCESS ) { + mpi_access->errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + return ; +} + +void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) { + + cout << "test_AllToAllvTimeDoubleDEC" << endl ; + +// MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 || size > 11 ) { + ostringstream strstream ; + strstream << "usage :" << endl + << "mpirun -np test_AllToAllTimeDEC" << endl + << " (nbprocs >=2)" << endl + << "test must be runned with more than 1 proc and less than 12 procs" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + +// int Asynchronous = atoi(argv[1]) ; + + cout << "test_AllToAllvTimeDoubleDEC" << myrank << " Asynchronous " << Asynchronous << endl ; + + ParaMEDMEM::CommInterface interface ; + std::set sourceprocs; + std::set targetprocs; + int i ; + for ( i = 0 ; i < size/2 ; i++ ) { + sourceprocs.insert(i); + } + for ( i = size/2 ; i < size ; i++ ) { + targetprocs.insert(i); + } + + ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ; + ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ; + +// TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0 ) ; + MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , + Asynchronous ) ; +// Asynchronous , LinearInterp , 0.5 ) ; + MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ; + MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ; + + cout << "test_AllToAllvTimeDoubleDEC" << myrank << " Barrier :" << endl ; + mpi_access->barrier() ; + +#define maxproc 11 +#define maxreq 100 +#define datamsglength 10 + + int sts ; + int *sendcounts = new int[size] ; + int *sdispls = new int[size] ; + int *recvcounts = new int[size] ; + int *rdispls = new int[size] ; + int *sendtimecounts = new int[size] ; + int *stimedispls = new int[size] ; + int *recvtimecounts = new int[size] ; + int *rtimedispls = new int[size] ; + for ( i = 0 ; i < size ; i++ ) { + sendcounts[i] = datamsglength-i ; + sdispls[i] = i*datamsglength ; + recvcounts[i] = datamsglength-myrank ; + rdispls[i] = i*datamsglength ; + sendtimecounts[i] = 1 ; + stimedispls[i] = 0 ; + recvtimecounts[i] = 1 ; + rtimedispls[i] = i ; + } + + double timeLoc[maxproc] ; + double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ; + double maxtime[maxproc] ; + double nextdeltatime[maxproc] ; + for ( i = 0 ; i < size ; i++ ) { + timeLoc[i] = 0 ; + maxtime[i] = maxreq ; + nextdeltatime[i] = deltatime[i] ; + } + time_t begintime = time(NULL) ; + for ( timeLoc[myrank] = 0 ; timeLoc[myrank] <= maxtime[myrank] && nextdeltatime[myrank] != 0 ; + timeLoc[myrank]+=nextdeltatime[myrank] ) { +//local and target times + int target ; + for ( target = 0 ; target < size ; target++ ) { + nextdeltatime[target] = deltatime[target] ; + if ( timeLoc[target] != 0 ) { + if ( timeLoc[target]+nextdeltatime[target] > maxtime[target] ) { + nextdeltatime[target] = 0 ; + } + } + if ( target != myrank ) { + while ( timeLoc[myrank] >= timeLoc[target] ) { + timeLoc[target] += deltatime[target] ; + } + } + } + MyMPIAccessDEC->setTime( timeLoc[myrank] , nextdeltatime[myrank] ) ; + cout << "test" << myrank << "=====TIME " << timeLoc[myrank] << "=====DELTATIME " + << nextdeltatime[myrank] << "=====MAXTIME " << maxtime[myrank] << " ======" + << endl ; + double * sendbuf = new double[datamsglength*size] ; +// double * sendbuf = (double *) malloc(sizeof(double)*datamsglength*size) ; + double * recvbuf = new double[datamsglength*size] ; + int j ; + //cout << "test_AllToAllvTimeDoubleDEC" << myrank << " sendbuf" ; + for ( target = 0 ; target < size ; target++ ) { + for ( j = 0 ; j < datamsglength ; j++ ) { + //sendbuf[j] = myrank*10000 + (j/datamsglength)*100 + j ; + sendbuf[target*datamsglength+j] = myrank*1000000 + target*10000 + + (timeLoc[myrank]/deltatime[myrank])*100 + j ; + //cout << " " << (int ) sendbuf[target*datamsglength+j] ; + recvbuf[target*datamsglength+j] = -1 ; + } + //cout << endl ; + } + + int sts = MyMPIAccessDEC->allToAllvTime( sendbuf, sendcounts , sdispls , MPI_DOUBLE , + recvbuf, recvcounts , rdispls , MPI_DOUBLE ) ; + chksts( sts , myrank , mpi_access ) ; + +// cout << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf before CheckSent" ; +// for ( i = 0 ; i < datamsglength*size ; i++ ) { +// cout << " " << recvbuf[i] ; +// } +// cout << endl ; + + int nRecvReq = mpi_access->recvRequestIdsSize() ; + if ( nRecvReq != 0 ) { + ostringstream strstream ; + strstream << "=============================================================" << endl + << "test_AllToAllvTimeDoubleDEC" << myrank << " WaitAllRecv " + << nRecvReq << " Requests # 0 ERROR" + << endl << "============================================================" + << endl ; + int *ArrayOfRecvRequests = new int[nRecvReq] ; + int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; + mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ; + delete [] ArrayOfRecvRequests ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + +// cout << "test_AllToAllvTimeDoubleDEC" << myrank << " check of recvbuf" << endl ; + bool badrecvbuf = false ; + for ( target = 0 ; target < size ; target++ ) { + int j ; + for ( j = 0 ; j < datamsglength ; j++ ) { + int index = target*datamsglength+j ; + if ( j < recvcounts[target] ) { + if ( fabs(recvbuf[index] - (target*1000000 + myrank*10000 + + (timeLoc[target]/deltatime[target])*100 + j)) > 101) { + badrecvbuf = true ; + cout << "test_AllToAllvTimeDoubleDEC" << myrank << " target " << target << " timeLoc[target] " + << timeLoc[target] << " recvbuf[" << index << "] " << (int ) recvbuf[index] + << " # " << (int ) (target*1000000 + + myrank*10000 + (timeLoc[target]/deltatime[target])*100 + j) + << endl ; + } + else if ( badrecvbuf ) { + cout << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf[" << index << "] " + << recvbuf[index] << " ~= " << (int ) (target*1000000 + + myrank*10000 + (timeLoc[target]/deltatime[target])*100 + j) << endl ; + } + } + else if ( recvbuf[index] != -1 ) { + badrecvbuf = true ; + cout << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf[" << index << "] " + << recvbuf[index] << " # -1" << endl ; + } + } + } + if ( badrecvbuf ) { + ostringstream strstream ; + strstream << "==================================================================" << endl + << "test_AllToAllvTimeDoubleDEC" << myrank << " badrecvbuf" + << endl << "==================================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + delete [] recvbuf ; + } + + cout << "test_AllToAllvTimeDoubleDEC" << myrank << " Barrier :" << endl ; + mpi_access->barrier() ; + + cout << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalSent" << endl ; + sts = MyMPIAccessDEC->checkFinalSent() ; + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "=================================================================" << endl + << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalSent ERROR" + << endl << "=================================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + cout << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalRecv" << endl ; + sts = MyMPIAccessDEC->checkFinalRecv() ; + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "=================================================================" << endl + << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalRecv ERROR" + << endl << "================================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + int nRecvReq = mpi_access->recvRequestIdsSize() ; + if ( nRecvReq ) { + ostringstream strstream ; + strstream << "===============================================================" << endl + << "test_AllToAllvTimeDoubleDEC" << myrank << " RecvRequestIds " << nRecvReq + << " RecvRequests # 0 Error" + << endl << "===============================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "test_AllToAllvTimeDoubleDEC" << myrank << " RecvRequestIds " << nRecvReq + << " RecvRequests = 0 OK" << endl ; + } + + time_t endtime = time(NULL) ; + cout << "test_AllToAllvTimeDoubleDEC" << myrank << " begintime " << begintime << " endtime " << endtime + << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank] + << " calls to AllToAll" << endl ; + + cout << "test" << myrank << " Barrier :" << endl ; + mpi_access->barrier() ; + + delete sourcegroup ; + delete targetgroup ; + delete MyMPIAccessDEC ; +// delete aLinearInterpDEC ; + + delete [] sendcounts ; + delete [] sdispls ; + delete [] recvcounts ; + delete [] rdispls ; + delete [] sendtimecounts ; + delete [] stimedispls ; + delete [] recvtimecounts ; + delete [] rtimedispls ; + +// MPI_Finalize(); + + endtime = time(NULL) ; + + cout << "test_AllToAllvTimeDoubleDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime + << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank] + << " calls to AllToAll" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_Cancel.cxx b/src/ParaMEDMEMTest/test_MPI_Access_Cancel.cxx new file mode 100644 index 000000000..133a2c658 --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_Cancel.cxx @@ -0,0 +1,325 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include +#include + +#ifndef WIN32 +#include +#endif + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_Cancel() { + + cout << "test_MPI_Access_Cancel" << endl ; + +// MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + ostringstream strstream ; + strstream << "test_MPI_Access_Cancel must be runned with 2 procs" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + cout << "test_MPI_Access_Cancel" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + + if ( myrank >= 2 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int target = 1 - myrank ; + int intsendbuf[5] ; + double doublesendbuf[10] ; + int RequestId[10] ; + int sts ; + int i , j ; + for ( j = 0 ; j < 3 ; j++ ) { + for ( i = 0 ; i < 10 ; i++ ) { + cout << "test" << myrank << " ============================ i " << i + << "============================" << endl ; + if ( myrank == 0 ) { + if ( i < 5 ) { + intsendbuf[i] = i ; + sts = mpi_access.ISend(&intsendbuf[i],1,MPI_INT,target, RequestId[i]) ; + cout << "test" << myrank << " Send MPI_INT RequestId " << RequestId[i] + << endl ; + } + else { + doublesendbuf[i] = i ; + sts = mpi_access.ISend(&doublesendbuf[i],1,MPI_DOUBLE,target, + RequestId[i]) ; + cout << "test" << myrank << " Send MPI_DOUBLE RequestId " << RequestId[i] + << endl ; + } + } + else { + int flag = false ; + while ( !flag ) { + int source, tag, outcount ; + MPI_Datatype datatype ; + sts = mpi_access.IProbe(target, source, tag, datatype, outcount, + flag ) ; + if ( flag ) { + cout << "test" << myrank << " " << i << " IProbe target " << target + << " source " << source << " tag " << tag + << " outcount " << outcount << " flag " << flag << endl ; + } + else { + cout << "test" << myrank << " flag " << flag << endl ; + sleep( 1 ) ; + } + if ( flag ) { + int recvbuf ; + sts = mpi_access.IRecv(&recvbuf,outcount,MPI_INT,source, + RequestId[i] ) ; + if ( datatype == MPI_INT ) { + int source, tag, error, outcount ; + mpi_access.wait( RequestId[i] ) ; + mpi_access.status( RequestId[i], source, tag, error, outcount, + true ) ; + if ( (outcount != 1) | (recvbuf != i) ) { + ostringstream strstream ; + strstream << "======================================================" + << endl << "test" << myrank << " outcount " << outcount + << " recvbuf " << recvbuf << " KO" << endl + << "======================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + cout << "========================================================" + << endl << "test" << myrank << " outcount " << outcount + << " recvbuf " << recvbuf << " OK" << endl + << "========================================================" + << endl ; + } + } + } + } + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.check() ; + } + + if ( myrank != 0 ) { + int iprobe ; + for ( iprobe = 5 ; iprobe < 10 ; iprobe++ ) { + cout << "test" << myrank << " ============================ iprobe " + << iprobe << "============================" << endl ; + int source, tag, outcount ; + MPI_Datatype datatype ; + int probeflag = false ; + while ( !probeflag ) { + sts = mpi_access.IProbe( target, source, tag, datatype, outcount, + probeflag ) ; + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " IProbe iprobe " << iprobe + << " target " << target << " probeflag " << probeflag + << " tag " << tag << " outcount " << outcount << " datatype " + << datatype << " lenerr " << lenerr << " " << msgerr << endl ; + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "==========================================================" + << endl << "test" << myrank << " IProbe KO iprobe " << iprobe + << endl + << "==========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + if ( !probeflag ) { + //cout << "========================================================" + // << endl << "test" << myrank << " IProbe KO(OK) iprobe " << iprobe + // << " probeflag " << probeflag << endl + // << "========================================================" + // << endl ; + } + else { + cout << "test" << myrank << " " << iprobe << " IProbe target " + << target << " source " << source << " tag " << tag + << " outcount " << outcount << " probeflag " << probeflag + << endl ; + if ( datatype != MPI_DOUBLE ) { + ostringstream strstream ; + strstream << "========================================================" + << endl << "test" << myrank << " MPI_DOUBLE KO" << endl + << "========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + int flag ; + sts = mpi_access.cancel( source, tag, datatype, outcount, flag ) ; + if ( sts != MPI_SUCCESS || !flag ) { + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "======================================================" + << endl << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl << "test" << myrank + << " Cancel PendingIrecv KO flag " << flag << " iprobe " + << iprobe << " Irecv completed" << endl + << "======================================================" + << endl ; + //return 1 ; + } + else { + cout << "======================================================" + << endl << "test" << myrank + << " Cancel PendingIrecv OK RequestId " << " flag " + << flag << " iprobe " << iprobe << endl + << "======================================================" + << endl ; + } + } + int Reqtarget, Reqtag, Reqerror, Reqoutcount ; + mpi_access.status( RequestId[iprobe], Reqtarget, Reqtag, Reqerror, + Reqoutcount, true ) ; + cout << "test" << myrank << " Status Reqtarget "<< Reqtarget + << " Reqtag " << Reqtag << " Reqoutcount " << Reqoutcount + << endl ; + int Reqflag ; + sts = mpi_access.cancel( RequestId[iprobe] , Reqflag ) ; + cout << "test" << myrank << " " << iprobe + << " Cancel Irecv done Reqtarget " << Reqtarget + << " Reqtag " << Reqtag << " Reqoutcount " << Reqoutcount + << " Reqflag " << Reqflag << endl ; + if ( sts != MPI_SUCCESS || !Reqflag ) { + mpi_access.errorString(sts, msgerr, &lenerr) ; + ostringstream strstream ; + strstream << "========================================================" + << endl << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl << "test" << myrank + << " Cancel Irecv KO Reqflag " << Reqflag << " iprobe " + << iprobe << endl + << "========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "========================================================" + << endl << "test" << myrank + << " Cancel Irecv OK RequestId " << RequestId[iprobe] + << " Reqflag " << Reqflag << " iprobe " << iprobe << endl + << "========================================================" + << endl ; + probeflag = Reqflag ; + } + } + } + } + } + mpi_access.waitAll(10,RequestId) ; + mpi_access.deleteRequests(10,RequestId) ; + } + + int source, tag, outcount, flag ; + MPI_Datatype datatype ; + sts = mpi_access.IProbe(target, source, tag, datatype, outcount, flag ) ; + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + if ( sts != MPI_SUCCESS || flag ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " IProbe KO flag " << flag + << " remaining unread/cancelled message :" << endl + << " source " << source << " tag " << tag << endl + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + mpi_access.testAll(10,RequestId,flag) ; + mpi_access.waitAll(10,RequestId) ; + mpi_access.deleteRequests(10,RequestId) ; + mpi_access.testAll(10,RequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "test" << myrank << " flag " << flag << " KO" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.check() ; + + mpi_access.barrier() ; + + delete group ; + +// MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_Cyclic_ISend_IRecv.cxx b/src/ParaMEDMEMTest/test_MPI_Access_Cyclic_ISend_IRecv.cxx new file mode 100644 index 000000000..ef785ab0f --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_Cyclic_ISend_IRecv.cxx @@ -0,0 +1,270 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_Cyclic_ISend_IRecv() { + + cout << "test_MPI_Access_Cyclic_ISend_IRecv" << endl ; + +// MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 3 ) { + cout << "test_MPI_Access_Cyclic_ISend_IRecv must be runned with 3 procs" << endl ; + CPPUNIT_FAIL("test_MPI_Access_Cyclic_ISend_IRecv must be runned with 3 procs") ; + } + + cout << "test_MPI_Access_Cyclic_ISend_IRecv" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + +#define maxsend 100 + + if ( myrank >= 3 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int alltarget[3] = {1 , 2 , 0 } ; + int allsource[3] = {2 , 0 , 1 } ; + int SendRequestId[maxsend] ; + int RecvRequestId[maxsend] ; + int sendbuf[maxsend] ; + int recvbuf[maxsend] ; + int sts ; + int i = 0 ; + if ( myrank == 0 ) { + sendbuf[i] = i ; + sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,alltarget[myrank], + SendRequestId[i]) ; + cout << "test" << myrank << " Send RequestId " << SendRequestId[i] + << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ; + } + for ( i = 0 ; i < maxsend ; i++ ) { + recvbuf[i] = -1 ; + sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,allsource[myrank], + RecvRequestId[i]) ; + cout << "test" << myrank << " Recv RequestId " << RecvRequestId[i] + << " tag " << mpi_access.recvMPITag(allsource[myrank]) << endl ; + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr + << " " << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + int j ; + for (j = 0 ; j <= i ; j++) { + int flag ; + if ( j < i ) { + cout << "test" << myrank << " " << j << " -> Test-Send("<< SendRequestId[j] + << ")" << endl ; + mpi_access.test( SendRequestId[j], flag ) ; + if ( flag ) { + int target, tag, error, outcount ; + mpi_access.status( SendRequestId[j], target, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Send RequestId " << SendRequestId[j] + << " target " << target << " tag " << tag << " error " << error + << endl ; + mpi_access.deleteRequest( SendRequestId[j] ) ; + } + } + cout << "test" << myrank << " " << j << " -> Test-Recv("<< SendRequestId[j] + << ")" << endl ; + mpi_access.test( RecvRequestId[j], flag ) ; + if ( flag ) { + int source, tag, error, outcount ; + mpi_access.status( RecvRequestId[j], source, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Recv RequestId" << j << " " + << RecvRequestId[j] << " source " << source << " tag " << tag + << " error " << error << " outcount " << outcount << endl ; + if ( (outcount != 1) | (recvbuf[j] != j) ) { + ostringstream strstream ; + strstream << "=====================================================" + << endl << "test" << myrank << " outcount " + << outcount << " recvbuf[ " << j << " ] " << recvbuf[j] << " KO" + << endl << "=====================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + } + if ( myrank == 0 ) { + if ( i != maxsend-1 ) { + sendbuf[i+1] = i + 1 ; + sts = mpi_access.ISend(&sendbuf[i+1],1,MPI_INT,alltarget[myrank], + SendRequestId[i+1]) ; + cout << "test" << myrank << " Send RequestId " << SendRequestId[i+1] + << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ; + } + } + else { + sendbuf[i] = i ; + sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,alltarget[myrank], + SendRequestId[i]) ; + cout << "test" << myrank << " Send RequestId " << SendRequestId[i] + << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ; + } + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr + << " " << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.check() ; + } + + int flag ; + mpi_access.testAll(maxsend,SendRequestId,flag) ; + mpi_access.testAll(maxsend,RecvRequestId,flag) ; + mpi_access.waitAll(maxsend,SendRequestId) ; + mpi_access.deleteRequests(maxsend,SendRequestId) ; + mpi_access.waitAll(maxsend,RecvRequestId) ; + mpi_access.deleteRequests(maxsend,RecvRequestId) ; + mpi_access.check() ; + mpi_access.testAll(maxsend,SendRequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " TestAllSendflag " << flag << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " TestAllSendflag " << flag << " OK" << endl + << "=========================================================" << endl ; + } + mpi_access.testAll(maxsend,RecvRequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " TestAllRecvflag " << flag << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " TestAllRecvflag " << flag << " OK" << endl + << "=========================================================" << endl ; + } + + int sendrequests[maxsend] ; + int sendreqsize = mpi_access.sendRequestIds( alltarget[myrank] , maxsend , + sendrequests ) ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + int source, tag, error, outcount ; + mpi_access.status(sendrequests[0], source, tag, error, outcount, true) ; + cout << "test" << myrank << " RequestId " << sendrequests[0] + << " source " << source << " tag " << tag << " error " << error + << " outcount " << outcount << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl + << "=========================================================" << endl ; + } + int recvrequests[maxsend] ; + int recvreqsize = mpi_access.sendRequestIds( allsource[myrank] , maxsend , + recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl + << "=========================================================" << endl ; + } + + mpi_access.barrier() ; + + delete group ; + +// MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_Cyclic_Send_Recv.cxx b/src/ParaMEDMEMTest/test_MPI_Access_Cyclic_Send_Recv.cxx new file mode 100644 index 000000000..21ee606e8 --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_Cyclic_Send_Recv.cxx @@ -0,0 +1,187 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_Cyclic_Send_Recv() { + + cout << "test_MPI_Access_Cyclic_Send_Recv" << endl ; + +// MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 3 ) { + cout << "test_MPI_Access_Send_Recv must be runned with 3 procs" << endl ; + CPPUNIT_FAIL("test_MPI_Access_Send_Recv must be runned with 3 procs") ; + } + + cout << "test_MPI_Access_Cyclic_Send_Recv" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + + if ( myrank >= 3 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int alltarget[3] = {1 , 2 , 0 } ; + int allsource[3] = {2 , 0 , 1 } ; + int RequestId[10] ; + int sts ; + int i = 0 ; + if ( myrank == 0 ) { + sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ; + cout << "test" << myrank << " Send RequestId " << RequestId[i] + << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ; + } + for ( i = 0 ; i < 10 ; i++ ) { + int recvbuf ; + int outcount ; + if ( i & 1 ) { + outcount = 0 ; + sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i], + &outcount) ; + } + else { + sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i]) ; + outcount = 1 ; + } + //int source, tag, error, outcount ; + //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ; + cout << "test" << myrank << " Recv RequestId " << RequestId[i] + << " tag " << mpi_access.recvMPITag(allsource[myrank]) + << " outcount " << outcount << endl ; + if ( (outcount != 1) | (recvbuf != i) ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " outcount " + << outcount << " recvbuf " << recvbuf << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + if ( myrank == 0 ) { + if ( i != 9 ) { + int ii = i + 1 ; + sts = mpi_access.send(&ii,1,MPI_INT,alltarget[myrank], RequestId[i]) ; + cout << "test" << myrank << " Send RequestId " << RequestId[i] + << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ; + } + } + else { + sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ; + cout << "test" << myrank << " Send RequestId " << RequestId[i] + << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ; + } + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr + << " " << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.check() ; + } + + int flag ; + mpi_access.testAll(10,RequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "test" << myrank << " flag " << flag << " KO" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.waitAll(10,RequestId) ; + mpi_access.check() ; + + int sendrequests[10] ; + int sendreqsize = mpi_access.sendRequestIds( alltarget[myrank] , 10 , + sendrequests ) ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + int recvrequests[10] ; + int recvreqsize = mpi_access.sendRequestIds( allsource[myrank] , 10 , + recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + mpi_access.barrier() ; + + delete group ; + +// MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_IProbe.cxx b/src/ParaMEDMEMTest/test_MPI_Access_IProbe.cxx new file mode 100644 index 000000000..92fdcf2f2 --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_IProbe.cxx @@ -0,0 +1,172 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include +#include + +#ifndef WIN32 +#include +#endif + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_IProbe() { + + cout << "test_MPI_Access_IProbe" << endl ; + +// MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + ostringstream strstream ; + strstream << "test_MPI_Access_IProbe must be runned with 2 procs" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + cout << "test_MPI_Access_IProbe" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + + if ( myrank >= 2 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int target = 1 - myrank ; + int sendbuf[10] ; + int RequestId[10] ; + int sts ; + int i ; + for ( i = 0 ; i < 10 ; i++ ) { + if ( myrank == 0 ) { + sendbuf[i] = i ; + sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, RequestId[i]) ; + cout << "test" << myrank << " Send RequestId " << RequestId[i] + << endl ; + } + else { + int flag = false ; + while ( !flag ) { + int source, tag, outcount ; + MPI_Datatype datatype ; + sts = mpi_access.IProbe(target, source, tag, datatype, outcount, flag ) ; + if ( flag ) { + cout << "test" << myrank << " " << i << " IProbe target " << target + << " source " << source << " tag " << tag + << " outcount " << outcount << " flag " << flag << endl ; + } + else { + cout << "test" << myrank << " IProbe flag " << flag << endl ; + sleep( 1 ) ; + } + if ( flag ) { + int recvbuf ; + sts = mpi_access.recv(&recvbuf,outcount,datatype,source, RequestId[i], + &outcount) ; + if ( (outcount != 1) | (recvbuf != i) ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " outcount " << outcount + << " recvbuf " << recvbuf << " KO" << endl + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + cout << "===========================================================" + << endl << "test" << myrank << " outcount " << outcount + << " recvbuf " << recvbuf << " OK" << endl + << "===========================================================" + << endl ; + } + } + } + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.check() ; + } + int flag ; + mpi_access.testAll(10,RequestId,flag) ; + mpi_access.waitAll(10,RequestId) ; + mpi_access.deleteRequests(10,RequestId) ; + mpi_access.testAll(10,RequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "test" << myrank << " flag " << flag << " KO" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.check() ; + + mpi_access.barrier() ; + + delete group ; + +// MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_ISendRecv.cxx b/src/ParaMEDMEMTest/test_MPI_Access_ISendRecv.cxx new file mode 100644 index 000000000..07dcb41aa --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_ISendRecv.cxx @@ -0,0 +1,216 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_ISendRecv() { + + cout << "test_MPI_Access_ISendRecv" << endl ; + +// MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + cout << "test_MPI_Access_ISendRecv must be runned with 2 procs" << endl ; + CPPUNIT_FAIL("test_MPI_Access_ISendRecv must be runned with 2 procs") ; + } + + cout << "test_MPI_Access_ISendRecv" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + + if ( myrank >= 2 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int target = 1 - myrank ; + int SendRequestId[10] ; + int RecvRequestId[10] ; + int sendbuf[10] ; + int recvbuf[10] ; + int sts ; + int i ; + for ( i = 0 ; i < 10 ; i++ ) { + sendbuf[i] = i ; + sts = mpi_access.ISendRecv(&sendbuf[i],1,MPI_INT,target, SendRequestId[i], + &recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ; + cout << "test" << myrank << " Send sendRequestId " << SendRequestId[i] + << " tag " << mpi_access.sendMPITag(target) + << " recvRequestId " << RecvRequestId[i] + << " tag " << mpi_access.recvMPITag(target) << endl ; + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr + << " " << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + int j ; + for (j = 0 ; j <= i ; j++) { + int flag ; + if ( j < i ) { + cout << "test" << myrank << " " << j << " -> Test-Send("<< SendRequestId[j] + << ")" << endl ; + mpi_access.test( SendRequestId[j], flag ) ; + if ( flag ) { + int target, tag, error, outcount ; + mpi_access.status( SendRequestId[j], target, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Send RequestId " << SendRequestId[j] + << " target " << target << " tag " << tag << " error " << error + << endl ; + mpi_access.deleteRequest( SendRequestId[j] ) ; + } + } + cout << "test" << myrank << " " << j << " -> Test-Recv("<< SendRequestId[j] + << ")" << endl ; + mpi_access.test( RecvRequestId[j], flag ) ; + if ( flag ) { + int source, tag, error, outcount ; + mpi_access.status( RecvRequestId[j], source, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Recv RequestId" << j << " " + << RecvRequestId[j] << " source " << source << " tag " << tag + << " error " << error << " outcount " << outcount << endl ; + if ( (outcount != 1) | (recvbuf[j] != j) ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " outcount " + << outcount << " recvbuf[ " << j << " ] " << recvbuf[j] << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + } + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + mpi_access.check() ; + } + + int flag ; + mpi_access.testAll(10,SendRequestId,flag) ; + mpi_access.waitAll(10,SendRequestId) ; + mpi_access.deleteRequests(10,SendRequestId) ; + mpi_access.testAll(10,SendRequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "test" << myrank << " flag " << flag << " KO" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + mpi_access.testAll(10,RecvRequestId,flag) ; + mpi_access.waitAll(10,RecvRequestId) ; + mpi_access.deleteRequests(10,RecvRequestId) ; + mpi_access.testAll(10,RecvRequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "test" << myrank << " flag " << flag << " KO" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.check() ; + + int sendrequests[10] ; + int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl + << "=========================================================" << endl ; + } + int recvrequests[10] ; + int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl + << "=========================================================" << endl ; + } + + mpi_access.barrier() ; + + delete group ; + +// MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv.cxx b/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv.cxx new file mode 100644 index 000000000..3498074df --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv.cxx @@ -0,0 +1,221 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_ISend_IRecv() { + + cout << "test_MPI_Access_ISend_IRecv" << endl ; + + // MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + cout << "test_MPI_Access_ISend_IRecv must be runned with 2 procs" << endl ; + CPPUNIT_FAIL("test_MPI_Access_ISend_IRecv must be runned with 2 procs") ; + } + + cout << "test_MPI_Access_ISend_IRecv" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + +#define maxreq 100 + + if ( myrank >= 2 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int target = 1 - myrank ; + int SendRequestId[maxreq] ; + int RecvRequestId[maxreq] ; + int sts ; + int sendbuf[maxreq] ; + int recvbuf[maxreq] ; + int i ; + for ( i = 0 ; i < maxreq ; i++ ) { + if ( myrank == 0 ) { + sendbuf[i] = i ; + sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, SendRequestId[i]) ; + cout << "test" << myrank << " ISend RequestId " << SendRequestId[i] + << " tag " << mpi_access.sendMPITag(target) << endl ; + } + else { + sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ; + cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i] + << " tag " << mpi_access.recvMPITag(target) << endl ; + } + int j ; + for (j = 0 ; j <= i ; j++) { + int flag ; + if ( myrank == 0 ) { + mpi_access.test( SendRequestId[j], flag ) ; + } + else { + mpi_access.test( RecvRequestId[j], flag ) ; + } + if ( flag ) { + int target,source, tag, error, outcount ; + if ( myrank == 0 ) { + mpi_access.status( SendRequestId[j], target, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j] + << ") : target " << target << " tag " << tag << " error " << error + << " flag " << flag << endl ; + } + else { + mpi_access.status( RecvRequestId[j], source, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Test(Recv RequestId " + << RecvRequestId[j] << ") : source " << source << " tag " << tag + << " error " << error << " outcount " << outcount + << " flag " << flag << endl ; + if ( (outcount != 1) | (recvbuf[j] != j) ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " outcount " + << outcount << " recvbuf " << recvbuf[j] << " KO" << endl + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + //else { + // cout << "===========================================================" + // << endl << "test" << myrank << " outcount " << outcount + // << " RequestId " << RecvRequestId[j] << " recvbuf " + // << recvbuf[j] << " OK" << endl + // << "===========================================================" + // << endl ; + //} + } + } + } + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + + mpi_access.check() ; + if ( myrank == 0 ) { + mpi_access.waitAll(maxreq, SendRequestId) ; + mpi_access.deleteRequests(maxreq, SendRequestId) ; + } + else { + mpi_access.waitAll(maxreq, RecvRequestId) ; + mpi_access.deleteRequests(maxreq, RecvRequestId) ; + } + mpi_access.check() ; + + if ( myrank == 0 ) { + int sendrequests[maxreq] ; + int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ; + int i ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + for ( i = 0 ; i < sendreqsize ; i++ ) { + cout << "test" << myrank << " sendrequests[ " << i << " ] = " + << sendrequests[i] << endl ; + } + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + else { + int recvrequests[maxreq] ; + int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + + mpi_access.barrier() ; + + delete group ; + + // MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_BottleNeck.cxx b/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_BottleNeck.cxx new file mode 100644 index 000000000..5bdf39a5f --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_BottleNeck.cxx @@ -0,0 +1,225 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_ISend_IRecv_BottleNeck() { + + cout << "test_MPI_Access_ISend_IRecv_BottleNeck" << endl ; + +// MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + ostringstream strstream ; + strstream << "test_MPI_Access_ISend_IRecv_BottleNeck must be runned with 2 procs" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + cout << "test_MPI_Access_ISend_IRecv_BottleNeck" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + +#define maxreq 10000 + + if ( myrank >= 2 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int target = 1 - myrank ; + int SendRequestId[maxreq] ; + int RecvRequestId[maxreq] ; + int sts ; + int sendbuf[maxreq] ; + int recvbuf[maxreq] ; + int i ; + for ( i = 0 ; i < maxreq ; i++ ) { + if ( myrank == 0 ) { + sendbuf[i] = i ; + sts = mpi_access.ISend(sendbuf,i,MPI_INT,target, SendRequestId[i]) ; + cout << "test" << myrank << " ISend RequestId " << SendRequestId[i] + << " tag " << mpi_access.sendMPITag(target) << endl ; + } + else { + //sleep( 1 ) ; + sts = mpi_access.IRecv(recvbuf,i,MPI_INT,target, RecvRequestId[i]) ; + cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i] + << " tag " << mpi_access.recvMPITag(target) << endl ; + int recvreqsize = mpi_access.recvRequestIdsSize() ; + int * recvrequests = new int[ recvreqsize ] ; + recvreqsize = mpi_access.recvRequestIds( target , recvreqsize , recvrequests ) ; + int j ; + for (j = 0 ; j < recvreqsize ; j++) { + int flag ; + mpi_access.test( recvrequests[j], flag ) ; + if ( flag ) { + int source, tag, error, outcount ; + mpi_access.status( recvrequests[j], source, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Test(Recv RequestId " + << recvrequests[j] << ") : source " << source << " tag " << tag + << " error " << error << " outcount " << outcount + << " flag " << flag << " : DeleteRequest" << endl ; + mpi_access.deleteRequest( recvrequests[j] ) ; + } + else { +// cout << "test" << myrank << " Test(Recv RequestId " +// << recvrequests[j] << ") flag " << flag << endl ; + } + } + delete [] recvrequests ; + } + if ( sts != MPI_SUCCESS ) { + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + } + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + + mpi_access.check() ; + if ( myrank == 0 ) { + int size = mpi_access.sendRequestIdsSize() ; + cout << "test" << myrank << " before WaitAll sendreqsize " << size << endl ; + mpi_access.waitAll(maxreq, SendRequestId) ; + size = mpi_access.sendRequestIdsSize() ; + cout << "test" << myrank << " after WaitAll sendreqsize " << size << endl ; + int * ArrayOfSendRequests = new int[ size ] ; + int nSendRequest = mpi_access.sendRequestIds( size , ArrayOfSendRequests ) ; + int i ; + for ( i = 0 ; i < nSendRequest ; i++ ) { + mpi_access.deleteRequest( ArrayOfSendRequests[i] ) ; + } + delete [] ArrayOfSendRequests ; + } + else { + int size = mpi_access.recvRequestIdsSize() ; + cout << "test" << myrank << " before WaitAll recvreqsize " << size << endl ; + mpi_access.waitAll(maxreq, RecvRequestId) ; + size = mpi_access.recvRequestIdsSize() ; + cout << "test" << myrank << " after WaitAll recvreqsize " << size << endl ; + int * ArrayOfRecvRequests = new int[ size ] ; + int nRecvRequest = mpi_access.recvRequestIds( size , ArrayOfRecvRequests ) ; + int i ; + for ( i = 0 ; i < nRecvRequest ; i++ ) { + mpi_access.deleteRequest( ArrayOfRecvRequests[i] ) ; + } + delete [] ArrayOfRecvRequests ; + } + mpi_access.check() ; + + if ( myrank == 0 ) { + int sendrequests[maxreq] ; + int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ; + int i ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + for ( i = 0 ; i < sendreqsize ; i++ ) { + cout << "test" << myrank << " sendrequests[ " << i << " ] = " + << sendrequests[i] << endl ; + } + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + else { + int recvrequests[maxreq] ; + int recvreqsize = mpi_access.recvRequestIds( target , maxreq , recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + + mpi_access.barrier() ; + + delete group ; + +// MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_Length.cxx b/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_Length.cxx new file mode 100644 index 000000000..16c575557 --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_Length.cxx @@ -0,0 +1,234 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length() { + + cout << "test_MPI_Access_ISend_IRecv_Length" << endl ; + + // MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + ostringstream strstream ; + strstream << "test_MPI_Access_ISend_IRecv_Length must be runned with 2 procs" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + cout << "test_MPI_Access_ISend_IRecv_Length" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + +#define maxreq 10 + + if ( myrank >= 2 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int target = 1 - myrank ; + int SendRequestId[maxreq] ; + int RecvRequestId[maxreq] ; + int sts ; + int sendbuf[1000*(maxreq-1)] ; + int recvbuf[maxreq-1][1000*(maxreq-1)] ; + int i ; + for ( i = 0 ; i < 1000*(maxreq-1) ; i++ ) { + sendbuf[i] = i ; + } + for ( i = 0 ; i < maxreq ; i++ ) { + if ( myrank == 0 ) { + sts = mpi_access.ISend( sendbuf, 1000*i, MPI_INT, target, SendRequestId[i] ) ; + cout << "test" << myrank << " ISend RequestId " << SendRequestId[i] + << " tag " << mpi_access.sendMPITag(target) << endl ; + } + else { + sts = mpi_access.IRecv( recvbuf[i], 1000*i, MPI_INT, target, + RecvRequestId[i] ) ; + cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i] + << " tag " << mpi_access.recvMPITag(target) << endl ; + } + int j ; + for (j = 0 ; j <= i ; j++) { + int flag ; + if ( myrank == 0 ) { + mpi_access.test( SendRequestId[j], flag ) ; + } + else { + mpi_access.test( RecvRequestId[j], flag ) ; + } + if ( flag ) { + int target,source, tag, error, outcount ; + if ( myrank == 0 ) { + mpi_access.status( SendRequestId[j], target, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j] + << ") : target " << target << " tag " << tag << " error " << error + << " flag " << flag << endl ; + } + else { + mpi_access.status( RecvRequestId[j], source, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Test(Recv RequestId " + << RecvRequestId[j] << ") : source " << source << " tag " << tag + << " error " << error << " outcount " << outcount + << " flag " << flag << endl ; + if ( outcount != 0 ) { + if ( (outcount != 1000*j) | + (recvbuf[j][outcount-1] != (outcount-1)) ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " outcount " + << outcount << " recvbuf " << recvbuf[j][outcount-1] << " KO" + << endl + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "===========================================================" + << endl << "test" << myrank << " outcount " << outcount + << " RequestId " << RecvRequestId[j] << " recvbuf " + << recvbuf[j][outcount-1] << " OK" << endl + << "===========================================================" + << endl ; + } + } + else { + cout << "===========================================================" + << endl << "test" << myrank << " outcount " << outcount + << " RequestId " << RecvRequestId[j] << " OK" << endl + << "===========================================================" + << endl ; + } + } + } + } + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + + mpi_access.check() ; + cout << "test" << myrank << " WaitAll" << endl ; + if ( myrank == 0 ) { + mpi_access.waitAll(maxreq, SendRequestId) ; + mpi_access.deleteRequests(maxreq, SendRequestId) ; + } + else { + mpi_access.waitAll(maxreq, RecvRequestId) ; + mpi_access.deleteRequests(maxreq, RecvRequestId) ; + } + mpi_access.check() ; + + if ( myrank == 0 ) { + int sendrequests[maxreq] ; + int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ; + sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + else { + int recvrequests[maxreq] ; + int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + + mpi_access.barrier() ; + + delete group ; + + // MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_Length_1.cxx b/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_Length_1.cxx new file mode 100644 index 000000000..888c961d2 --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_ISend_IRecv_Length_1.cxx @@ -0,0 +1,305 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length_1() { + + // MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + ostringstream strstream ; + strstream << "test_MPI_Access_ISend_IRecv_Length_1 must be runned with 2 procs" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + cout << "test_MPI_Access_ISend_IRecv_Length_1" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + +#define maxreq 10 + + if ( myrank >= 2 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int target = 1 - myrank ; + int SendRequestId[maxreq] ; + int RecvRequestId[maxreq] ; + int sts ; + int sendbuf[1000*(maxreq-1)] ; + int recvbuf[maxreq-1][1000*(maxreq-1)] ; + int maxirecv = 1 ; + int i ; + RecvRequestId[0] = -1 ; + for ( i = 0 ; i < 1000*(maxreq-1) ; i++ ) { + sendbuf[i] = i ; + } + for ( i = 0 ; i < maxreq ; i++ ) { + sts = MPI_SUCCESS ; + if ( myrank == 0 ) { + sts = mpi_access.ISend( sendbuf, 1000*i, MPI_INT, target, SendRequestId[i] ) ; + cout << "test" << myrank << " ISend RequestId " << SendRequestId[i] + << " tag " << mpi_access.sendMPITag(target) << endl ; + } + int j ; + for (j = 1 ; j <= i ; j++) { + int source ; + MPI_Datatype datatype ; + int outcount ; + int flag ; + if ( myrank == 0 ) { + mpi_access.test( SendRequestId[j], flag ) ; + } + else { + int MPITag ; + sts = mpi_access.IProbe( target , source, MPITag, datatype, + outcount, flag) ; + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " IProbe lenerr " << lenerr << " " + << msgerr << endl ; + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " IProbe KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + cout << "test" << myrank << " IProbe i/j " << i << "/" << j + << " MPITag " << MPITag << " datatype " << datatype + << " outcount " << outcount << " flag " << flag << endl ; + } + if ( flag ) { + if ( myrank == 0 ) { + int target, tag, error, outcount ; + mpi_access.status( SendRequestId[j], target, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j] + << ") : target " << target << " tag " << tag << " error " << error + << " flag " << flag << endl ; + } + else { + sts = mpi_access.IRecv( recvbuf[maxirecv], outcount, datatype, source, + RecvRequestId[maxirecv] ) ; + cout << "test" << myrank << " maxirecv " << maxirecv << " IRecv RequestId " + << RecvRequestId[maxirecv] << " source " << source + << " outcount " << outcount << " tag " + << mpi_access.recvMPITag(target) << endl ; + maxirecv = maxirecv + 1 ; + } + } + else if ( myrank == 1 && i == maxreq-1 && j >= maxirecv ) { + sts = mpi_access.IRecv( recvbuf[j], 1000*j, MPI_INT, target, + RecvRequestId[j] ) ; + cout << "test" << myrank << " maxirecv " << maxirecv << " IRecv RequestId " + << RecvRequestId[j] << " target " << target << " length " << 1000*j + << " tag " << mpi_access.recvMPITag(target) << endl ; + maxirecv = maxirecv + 1 ; + } + } + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " KO" << endl + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + + mpi_access.check() ; + int flag ; + if ( myrank == 0 ) { + mpi_access.testAll( maxreq, SendRequestId, flag ) ; + cout << "test" << myrank << " TestAll SendRequest flag " << flag << endl ; + } + else { + int i ; + int source ; + int outcount ; + int flag ; + if ( maxirecv != maxreq ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " KO" << " maxirecv " << maxirecv + << " != maxreq " << maxreq << endl + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + while ( maxirecv > 0 ) { + for ( i = 1 ; i < maxreq ; i++ ) { + cout << "test" << myrank << " IProbe : " << endl ; + sts = mpi_access.test( RecvRequestId[i] , flag ) ; + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " flag " << flag << " lenerr " + << lenerr << " " << msgerr << " maxirecv " << maxirecv << endl ; + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + cout << "test" << myrank << " Test flag " << flag << endl ; + if ( flag ) { + int tag, error ; + mpi_access.status( RecvRequestId[i] , source , tag , error , + outcount ) ; + if ( i != 0 ) { + if ( outcount != 1000*i | + (recvbuf[i][outcount-1] != (outcount-1)) ) { + ostringstream strstream ; + strstream << "========================================================" + << endl << "test" << myrank << " outcount " << outcount + << " KO" << " i " << i + << " recvbuf " << recvbuf[i][outcount-1] << endl + << "========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + else if ( outcount != 0 ) { + ostringstream strstream ; + strstream << "========================================================" + << endl << "test" << myrank << " outcount " << outcount + << " KO" << " i " << i << endl + << "========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + maxirecv = maxirecv - 1 ; + } + } + } + mpi_access.testAll( maxreq, RecvRequestId, flag ) ; + cout << "test" << myrank << " TestAll RecvRequest flag " << flag << endl ; + } + mpi_access.check() ; + cout << "test" << myrank << " WaitAll :" << endl ; + if ( myrank == 0 ) { + mpi_access.waitAll( maxreq, SendRequestId ) ; + mpi_access.deleteRequests( maxreq, SendRequestId ) ; + } + else { + mpi_access.waitAll( maxreq, RecvRequestId ) ; + mpi_access.deleteRequests( maxreq, RecvRequestId ) ; + } + + if ( myrank == 0 ) { + int sendrequests[maxreq] ; + int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ; + sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + else { + int recvrequests[maxreq] ; + int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + + mpi_access.barrier() ; + + delete group ; + + // MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_Probe.cxx b/src/ParaMEDMEMTest/test_MPI_Access_Probe.cxx new file mode 100644 index 000000000..8e3bdadda --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_Probe.cxx @@ -0,0 +1,144 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_Probe() { + + cout << "test_MPI_Access_Probe" << endl ; + +// MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + cout << "test_MPI_Access_Probe must be runned with 2 procs" << endl ; + CPPUNIT_FAIL("test_MPI_Access_Probe must be runned with 2 procs") ; + } + + cout << "test_MPI_Access_Probe" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + + if ( myrank >= 2 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int target = 1 - myrank ; + int RequestId[10] ; + int sts ; + int i ; + for ( i = 0 ; i < 10 ; i++ ) { + if ( myrank == 0 ) { + sts = mpi_access.send(&i,1,MPI_INT,target, RequestId[i]) ; + cout << "test" << myrank << " Send RequestId " << RequestId[i] + << endl ; + } + else { + int source, tag, outcount ; + MPI_Datatype datatype ; + sts = mpi_access.probe(target, source, tag, datatype, outcount ) ; + cout << "test" << myrank << " Probe target " << target << " source " << source + << " tag " << tag << " outcount " << outcount << endl ; + int recvbuf ; + sts = mpi_access.recv(&recvbuf,outcount,datatype,source, RequestId[i], + &outcount) ; + if ( (outcount != 1) | (recvbuf != i) ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " outcount " << outcount + << " recvbuf " << recvbuf << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.check() ; + } + int flag ; + mpi_access.testAll(10,RequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "test" << myrank << " flag " << flag << " KO" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.waitAll(10,RequestId) ; + mpi_access.check() ; + + mpi_access.barrier() ; + + delete group ; + +// MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_SendRecv.cxx b/src/ParaMEDMEMTest/test_MPI_Access_SendRecv.cxx new file mode 100644 index 000000000..6055857e9 --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_SendRecv.cxx @@ -0,0 +1,180 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_SendRecv() { + + cout << "MPIAccessTest::test_MPI_Access_SendRecv" << endl ; + +// MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + cout << "MPIAccessTest::test_MPI_Access_SendRecv must be runned with 2 procs" << endl ; + CPPUNIT_FAIL("test_MPI_Access_SendRecv must be runned with 2 procs") ; + } + + cout << "MPIAccessTest::test_MPI_Access_SendRecv" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + + if ( myrank >= 2 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int target = 1 - myrank ; + int sendRequestId[10] ; + int recvRequestId[10] ; + int sts ; + int i ; + for ( i = 0 ; i < 10 ; i++ ) { + int recvbuf ; + int outcount ; + if ( i & 1 ) { + outcount = -1 ; + sts = mpi_access.sendRecv(&i,1,MPI_INT,target, sendRequestId[i], + &recvbuf,1,MPI_INT,target, recvRequestId[i], + &outcount) ; + } + else { + sts = mpi_access.sendRecv(&i,1,MPI_INT,target, sendRequestId[i], + &recvbuf,1,MPI_INT,target, recvRequestId[i]) ; +// outcount = mpi_access.MPIOutCount( recvRequestId[i] ) ; + outcount = 1 ; + } + cout << "test" << myrank << " Send sendRequestId " << sendRequestId[i] + << " tag " << mpi_access.sendMPITag(target) + << " recvRequestId " << recvRequestId[i] + << " tag " << mpi_access.recvMPITag(target) + << " outcount " << outcount << " MPIOutCount " + << mpi_access.MPIOutCount( recvRequestId[i] ) << endl ; + if ( (outcount != 1) | (recvbuf != i) ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " outcount " << outcount + << " recvbuf " << recvbuf << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.check() ; + } + + int flag ; + mpi_access.testAll(10,sendRequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "test" << myrank << " flag " << flag << " KO" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.waitAll(10,sendRequestId) ; + mpi_access.testAll(10,recvRequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "test" << myrank << " flag " << flag << " KO" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.waitAll(10,recvRequestId) ; + mpi_access.check() ; + + int sendrequests[10] ; + int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + int recvrequests[10] ; + int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + mpi_access.barrier() ; + + delete group ; + +// MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_Send_Recv.cxx b/src/ParaMEDMEMTest/test_MPI_Access_Send_Recv.cxx new file mode 100644 index 000000000..547df625c --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_Send_Recv.cxx @@ -0,0 +1,166 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_Send_Recv() { + + cout << "test_MPI_Access_Send_Recv" << endl ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + cout << "test_MPI_Access_Send_Recv must be runned with 2 procs" << endl ; + CPPUNIT_FAIL("test_MPI_Access_Send_Recv must be runned with 2 procs") ; + } + + cout << "test_MPI_Access_Send_Recv" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + + if ( myrank >= 2 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int target = 1 - myrank ; + int RequestId[10] ; + int sts ; + int i ; + for ( i = 0 ; i < 10 ; i++ ) { + if ( myrank == 0 ) { + sts = mpi_access.send(&i,1,MPI_INT,target, RequestId[i]) ; + cout << "test" << myrank << " Send RequestId " << RequestId[i] + << " tag " << mpi_access.sendMPITag(target) << endl ; + } + else { + int recvbuf ; + int outcount ; + sts = mpi_access.recv(&recvbuf,1,MPI_INT,target, RequestId[i],&outcount) ; + //int source, tag, error, outcount ; + //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ; + cout << "test" << myrank << " Recv RequestId " << RequestId[i] + << " tag " << mpi_access.recvMPITag(target) + << " outcount " << outcount << endl ; + if ( (outcount != 1) | (recvbuf != i) ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " outcount " << outcount + << " recvbuf " << recvbuf << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.check() ; + } + int flag ; + mpi_access.testAll(10,RequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "test" << myrank << " flag " << flag << " KO" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.waitAll(10,RequestId) ; + mpi_access.check() ; + + if ( myrank == 0 ) { + int sendrequests[10] ; + int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + else { + int recvrequests[10] ; + int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + + mpi_access.barrier() ; + + delete group ; + +// MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_Send_Recv_Length.cxx b/src/ParaMEDMEMTest/test_MPI_Access_Send_Recv_Length.cxx new file mode 100644 index 000000000..3e034b661 --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_Send_Recv_Length.cxx @@ -0,0 +1,190 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_Send_Recv_Length() { + + cout << "test_MPI_Access_Send_Recv_Length" << endl ; + +// MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + ostringstream strstream ; + strstream << "test_MPI_Access_Send_Recv_Length must be runned with 2 procs" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + cout << "test_MPI_Access_Send_Recv_Length" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + + if ( myrank >= 2 ) { + mpi_access.barrier() ; + delete group ; + return ; + } + + int target = 1 - myrank ; + int RequestId[10] ; + int sendbuf[9000] ; + int recvbuf[9000] ; + bool recvbufok ; + int sts ; + int i , j ; + for ( i = 0 ; i < 9000 ; i++ ) { + sendbuf[i] = i ; + } + for ( i = 0 ; i < 10 ; i++ ) { + if ( myrank == 0 ) { + sts = mpi_access.send( sendbuf, 1000*i, MPI_INT, target, RequestId[i] ) ; + cout << "test" << myrank << " Send RequestId " << RequestId[i] + << " tag " << mpi_access.sendMPITag(target) << endl ; + } + else { + sts = MPI_SUCCESS ; + RequestId[i] = -1 ; + int outcount = 0 ; + if ( i != 0 ) { + sts = mpi_access.recv( recvbuf,1000*i+1,MPI_INT,target, RequestId[i], + &outcount ) ; + } + //int source, tag, error, outcount ; + //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ; + cout << "test" << myrank << " Recv RequestId " << RequestId[i] + << " tag " << mpi_access.recvMPITag(target) + << " outcount " << outcount << endl ; + recvbufok = true ; + for ( j = 0 ; j < outcount ; j++ ) { + if ( recvbuf[j] != j ) { + cout << "test" << myrank << " recvbuf[ " << j << " ] = " << recvbuf[j] + << endl ; + recvbufok = false ; + break ; + } + } + if ( (outcount != 1000*i) | !recvbufok ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " outcount " << outcount + << " recvbuf " << recvbuf << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.check() ; + } + int flag ; + mpi_access.testAll(10,RequestId,flag) ; + if ( !flag ) { + ostringstream strstream ; + strstream << "test" << myrank << " flag " << flag << " KO" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + mpi_access.waitAll(10,RequestId) ; + mpi_access.check() ; + + if ( myrank == 0 ) { + int sendrequests[10] ; + int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + else { + int recvrequests[10] ; + int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + } + + mpi_access.barrier() ; + + delete group ; + +// MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_Time.cxx b/src/ParaMEDMEMTest/test_MPI_Access_Time.cxx new file mode 100644 index 000000000..d8e89e555 --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_Time.cxx @@ -0,0 +1,290 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void MPIAccessTest::test_MPI_Access_Time() { + + cout << "test_MPI_Access_Time" << endl ; + + // MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + ostringstream strstream ; + strstream << "test_MPI_Access_Time must be runned with 2 procs" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + + cout << "test_MPI_Access_Time" << myrank << endl ; + + ParaMEDMEM::CommInterface interface ; + + ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ; + + ParaMEDMEM::MPIAccess mpi_access( group ) ; + +#define maxreq 10 + + if ( myrank >= 2 ) { + cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->Barrier" << endl ; + mpi_access.barrier() ; + cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->Barrier" << endl ; + delete group ; + cout << "test_MPI_Access_Time" << myrank << " OK" << endl ; + return ; + } + + int target = 1 - myrank ; + int SendTimeRequestId[maxreq] ; + int RecvTimeRequestId[maxreq] ; + int SendRequestId[maxreq] ; + int RecvRequestId[maxreq] ; + int sts ; + int sendbuf[maxreq] ; + int recvbuf[maxreq] ; + int i = 0 ; + ParaMEDMEM::TimeMessage aSendTimeMsg[maxreq] ; + ParaMEDMEM::TimeMessage aRecvTimeMsg[maxreq] ; + double t ; + double dt = 1. ; + double maxt = 10. ; + for ( t = 0 ; t < maxt ; t = t+dt ) { + if ( myrank == 0 ) { + aSendTimeMsg[i].time = t ; + aSendTimeMsg[i].deltatime = dt ; + //aSendTimeMsg[i].maxtime = maxt ; + //sts = mpi_access.ISend( &aSendTimeMsg , mpi_access.timeExtent() , + sts = mpi_access.ISend( &aSendTimeMsg[i] , 1 , + mpi_access.timeType() , target , + SendTimeRequestId[i]) ; + cout << "test" << myrank << " ISend RequestId " << SendTimeRequestId[i] + << " tag " << mpi_access.sendMPITag(target) << endl ; + sendbuf[i] = i ; + sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, SendRequestId[i]) ; + cout << "test" << myrank << " ISend RequestId " << SendRequestId[i] + << " tag " << mpi_access.sendMPITag(target) << endl ; + } + else { + //sts = mpi_access.IRecv( &aRecvTimeMsg , mpi_access.timeExtent() , + sts = mpi_access.IRecv( &aRecvTimeMsg[i] , 1 , + mpi_access.timeType() , target , + RecvTimeRequestId[i]) ; + cout << "test" << myrank << " IRecv RequestId " << RecvTimeRequestId[i] + << " tag " << mpi_access.recvMPITag(target) << endl ; + sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ; + cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i] + << " tag " << mpi_access.recvMPITag(target) << endl ; + } + int j ; + for (j = 0 ; j <= i ; j++) { + int flag ; + if ( myrank == 0 ) { + mpi_access.test( SendTimeRequestId[j], flag ) ; + } + else { + mpi_access.test( RecvTimeRequestId[j], flag ) ; + } + if ( flag ) { + int target,source, tag, error, outcount ; + if ( myrank == 0 ) { + mpi_access.status( SendTimeRequestId[j], target, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Test(Send TimeRequestId " << SendTimeRequestId[j] + << ") : target " << target << " tag " << tag << " error " << error + << " flag " << flag << aSendTimeMsg[j] << endl ; + } + else { + mpi_access.status( RecvTimeRequestId[j], source, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Test(Recv TimeRequestId " + << RecvTimeRequestId[j] << ") : source " << source << " tag " << tag + << " error " << error << " outcount " << outcount + << " flag " << flag << aRecvTimeMsg[j] << endl ; + if ( (outcount != 1) | (aRecvTimeMsg[j].time != j) ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " outcount " << outcount << " KO" + << " RecvTimeRequestId " << RecvTimeRequestId[j] << endl + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "===========================================================" + << endl << "test" << myrank << " outcount " << outcount + << " RecvTimeRequestId " << RecvTimeRequestId[j] << " OK" << endl + << "===========================================================" + << endl ; + } + } + } + if ( myrank == 0 ) { + mpi_access.test( SendRequestId[j], flag ) ; + } + else { + mpi_access.test( RecvRequestId[j], flag ) ; + } + if ( flag ) { + int target,source, tag, error, outcount ; + if ( myrank == 0 ) { + mpi_access.status( SendRequestId[j], target, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j] + << ") : target " << target << " tag " << tag << " error " << error + << " flag " << flag << endl ; + } + else { + mpi_access.status( RecvRequestId[j], source, tag, error, outcount, + true ) ; + cout << "test" << myrank << " Test(Recv RequestId " + << RecvRequestId[j] << ") : source " << source << " tag " << tag + << " error " << error << " outcount " << outcount + << " flag " << flag << endl ; + if ( (outcount != 1) | (recvbuf[j] != j) ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " outcount " + << outcount << " recvbuf " << recvbuf[j] << " KO" << endl + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "===========================================================" + << endl << "test" << myrank << " outcount " << outcount + << " RequestId " << RecvRequestId[j] << " OK" << endl + << "===========================================================" + << endl ; + } + } + } + } + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + mpi_access.errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + + if ( sts != MPI_SUCCESS ) { + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + i = i + 1 ; + } + + mpi_access.check() ; + if ( myrank == 0 ) { + mpi_access.waitAll(maxreq, SendTimeRequestId) ; + mpi_access.deleteRequests(maxreq, SendTimeRequestId) ; + mpi_access.waitAll(maxreq, SendRequestId) ; + mpi_access.deleteRequests(maxreq, SendRequestId) ; + } + else { + mpi_access.waitAll(maxreq, RecvTimeRequestId) ; + mpi_access.deleteRequests(maxreq, RecvTimeRequestId) ; + mpi_access.waitAll(maxreq, RecvRequestId) ; + mpi_access.deleteRequests(maxreq, RecvRequestId) ; + } + mpi_access.check() ; + + if ( myrank == 0 ) { + int sendrequests[2*maxreq] ; + int sendreqsize = mpi_access.sendRequestIds( target , 2*maxreq , sendrequests ) ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + else { + int recvrequests[2*maxreq] ; + int recvreqsize = mpi_access.sendRequestIds( target , 2*maxreq , recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + + cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->Barrier" << endl ; + mpi_access.barrier() ; + cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->Barrier" << endl ; + + delete group ; + + // MPI_Finalize(); + + cout << "test_MPI_Access_Time" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_MPI_Access_Time_0.cxx b/src/ParaMEDMEMTest/test_MPI_Access_Time_0.cxx new file mode 100644 index 000000000..087814673 --- /dev/null +++ b/src/ParaMEDMEMTest/test_MPI_Access_Time_0.cxx @@ -0,0 +1,471 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include +#include + +#include "MPIAccessTest.hxx" +#include + +//#include "CommInterface.hxx" +//#include "ProcessorGroup.hxx" +//#include "MPIProcessorGroup.hxx" +#include "MPIAccess.hxx" + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +using namespace std; +using namespace ParaMEDMEM; + +void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) { + char msgerr[MPI_MAX_ERROR_STRING] ; + int lenerr ; + if ( sts != MPI_SUCCESS ) { + mpi_access->errorString(sts, msgerr, &lenerr) ; + cout << "test" << myrank << " lenerr " << lenerr << " " + << msgerr << endl ; + ostringstream strstream ; + strstream << "===========================================================" + << "test" << myrank << " KO" + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } +return ; +} + +void MPIAccessTest::test_MPI_Access_Time_0() { + + cout << "test_MPI_Access_Time_0" << endl ; + +// MPI_Init(&argc, &argv) ; + + int size ; + int myrank ; + MPI_Comm_size(MPI_COMM_WORLD,&size) ; + MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ; + + if ( size < 2 ) { + ostringstream strstream ; + strstream << "usage :" << endl + << "mpirun -np test_MPI_Access_Time_0" <= 2 ) { + cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ; + mpi_access->barrier() ; + cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ; + cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ; + mpi_access->barrier() ; + cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ; + delete group ; + delete mpi_access ; + cout << "test_MPI_Access_Time" << myrank << " OK" << endl ; + return ; + } + + int target = 1 - myrank ; + int SendTimeRequestId[maxreq] ; + int RecvTimeRequestId[maxreq] ; + int SendRequestId[maxreq] ; + int RecvRequestId[maxreq] ; + int sts ; + int sendbuf[maxreq] ; + int recvbuf[maxreq] ; + ParaMEDMEM::TimeMessage aSendTimeMsg[maxreq] ; + int lasttime = -1 ; + ParaMEDMEM::TimeMessage RecvTimeMessages[maxreq+1] ; + ParaMEDMEM::TimeMessage *aRecvTimeMsg = &RecvTimeMessages[1] ; +// mpi_access->Trace() ; + int istep = 0 ; + for ( t = 0 ; t < maxt ; t = t+dt[myrank] ) { + cout << "test" << myrank << " ==========================TIME " << t + << " ==========================" << endl ; + if ( myrank == 0 ) { + aSendTimeMsg[istep].time = t ; + aSendTimeMsg[istep].deltatime = dt[myrank] ; + //aSendTimeMsg[istep].maxtime = maxt ; + if ( t+dt[myrank] >= maxt ) { + aSendTimeMsg[istep].deltatime = 0 ; + } + sts = mpi_access->ISend( &aSendTimeMsg[istep] , 1 , + mpi_access->timeType() , target , + SendTimeRequestId[istep]) ; + cout << "test" << myrank << " ISend TimeRequestId " << SendTimeRequestId[istep] + << " tag " << mpi_access->MPITag(SendTimeRequestId[istep]) << endl ; + chksts( sts , myrank , mpi_access ) ; + sendbuf[istep] = istep ; + sts = mpi_access->ISend(&sendbuf[istep],1,MPI_INT,target, SendRequestId[istep]) ; + cout << "test" << myrank << " ISend Data RequestId " << SendRequestId[istep] + << " tag " << mpi_access->MPITag(SendRequestId[istep]) << endl ; + chksts( sts , myrank , mpi_access ) ; +//CheckSent +//========= + int sendrequests[2*maxreq] ; + int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , + sendrequests ) ; + int j , flag ; + for ( j = 0 ; j < sendreqsize ; j++ ) { + sts = mpi_access->test( sendrequests[j] , flag ) ; + chksts( sts , myrank , mpi_access ) ; + if ( flag ) { + mpi_access->deleteRequest( sendrequests[j] ) ; + cout << "test" << myrank << " " << j << ". " << sendrequests[j] + << " sendrequest deleted" << endl ; + } + } + } + else { +//InitRecv +//======== + if ( t == 0 ) { + aRecvTimeMsg[lasttime].time = 0 ; + sts = mpi_access->IRecv( &aRecvTimeMsg[lasttime+1] , 1 , + mpi_access->timeType() , + target , RecvTimeRequestId[lasttime+1]) ; + cout << "test" << myrank << " t == 0 IRecv TimeRequestId " + << RecvTimeRequestId[lasttime+1] + << " MPITag " << mpi_access->MPITag( RecvTimeRequestId[lasttime+1] ) + << " MPICompleted " + << mpi_access->MPICompleted( RecvTimeRequestId[lasttime+1] ) << endl ; + chksts( sts , myrank , mpi_access ) ; + } + else { + cout << "test" << myrank << " t # 0 lasttime " << lasttime << endl ; +//InitialOutTime +//============== + bool outtime = false ; + if ( lasttime != -1 ) { + if ( t <= aRecvTimeMsg[lasttime-1].time ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " t " << t << " <= " + << "aRecvTimeMsg[ " << lasttime << "-1 ].time " + << aRecvTimeMsg[lasttime-1].time << " KO" << endl + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "===========================================================" + << endl << "test" << myrank << " t " << t << " > " + << "aRecvTimeMsg[ " << lasttime << "-1 ].time " + << aRecvTimeMsg[lasttime-1].time << " OK" << endl + << "===========================================================" + << endl ; + } + //outtime = ((aRecvTimeMsg[lasttime].time + + // aRecvTimeMsg[lasttime].deltatime) >= + // aRecvTimeMsg[lasttime].maxtime) ; + outtime = aRecvTimeMsg[lasttime].deltatime == 0 ; + } +// CheckRecv - CheckTime +// On a lasttime tel que : +// aRecvTimeMsg[ lasttime-1 ].time < T(i-1) <= aRecvTimeMsg[ lasttime ].time +// On cherche lasttime tel que : +// aRecvTimeMsg[ lasttime-1 ].time < T(i) <= aRecvTimeMsg[ lasttime ].time + if ( t <= aRecvTimeMsg[lasttime].time ) { + outtime = false ; + } + cout << "test" << myrank << " while outtime( " << outtime << " && t " << t + << " > aRecvTimeMsg[ " << lasttime << " ] " + << aRecvTimeMsg[lasttime].time << " )" << endl ; + while ( !outtime && (t > aRecvTimeMsg[lasttime].time) ) { + lasttime += 1 ; +//TimeMessage +//=========== + sts = mpi_access->wait( RecvTimeRequestId[lasttime] ) ; + chksts( sts , myrank , mpi_access ) ; + cout << "test" << myrank << " Wait done RecvTimeRequestId " + << RecvTimeRequestId[lasttime] << " lasttime " << lasttime + << " tag " << mpi_access->MPITag(RecvTimeRequestId[lasttime]) + << aRecvTimeMsg[lasttime] << endl ; + if ( lasttime == 0 ) { + aRecvTimeMsg[lasttime-1] = aRecvTimeMsg[lasttime] ; + } + mpi_access->deleteRequest( RecvTimeRequestId[lasttime] ) ; + + double deltatime = aRecvTimeMsg[lasttime].deltatime ; + //double maxtime = aRecvTimeMsg[lasttime].maxtime ; + double nexttime = aRecvTimeMsg[lasttime].time + deltatime ; + cout << "test" << myrank << " t " << t << " lasttime " << lasttime + << " deltatime " << deltatime + << " nexttime " << nexttime << endl ; + //if ( nexttime < maxtime && t > nexttime ) { + if ( deltatime != 0 && t > nexttime ) { +//CheckRecv : +//========= + //while ( nexttime < maxtime && t > nexttime ) { + while ( deltatime != 0 && t > nexttime ) { + int source, MPITag, outcount ; + MPI_Datatype datatype ; + sts = mpi_access->probe( target , source, MPITag, datatype, + outcount ) ; + chksts( sts , myrank , mpi_access ) ; +// Cancel DataMessages jusqu'a un TimeMessage + int cancelflag ; + while ( !mpi_access->isTimeMessage( MPITag ) ) { + sts = mpi_access->cancel( source, MPITag, datatype, outcount , + //sts = mpi_access->cancel( source, datatype, outcount , + //RecvRequestId[lasttime] , + cancelflag ) ; + cout << "test" << myrank << " Recv TO CANCEL RequestId " + << RecvRequestId[lasttime] + << " tag " << mpi_access->recvMPITag( target ) + << " cancelflag " << cancelflag << endl ; + chksts( sts , myrank , mpi_access ) ; + sts = mpi_access->probe( target , source, MPITag, datatype, + outcount ) ; + chksts( sts , myrank , mpi_access ) ; + } +//On peut avancer en temps + nexttime += deltatime ; + //if ( nexttime < maxtime && t > nexttime ) { + if ( deltatime != 0 && t > nexttime ) { +// Cancel du TimeMessage + sts = mpi_access->cancel( source, MPITag, datatype, outcount , + //sts = mpi_access->cancel( source, datatype, outcount , + //RecvRequestId[lasttime] , + cancelflag ) ; + cout << "test" << myrank << " Time TO CANCEL RequestId " + << RecvRequestId[lasttime] + << " tag " << mpi_access->recvMPITag( target ) + << " cancelflag " << cancelflag << endl ; + chksts( sts , myrank , mpi_access ) ; + } + } + } + else { +//DoRecv +//====== + cout << "test" << myrank << " Recv target " << target + << " lasttime " << lasttime + << " lasttime-1 " << aRecvTimeMsg[lasttime-1] + << " lasttime " << aRecvTimeMsg[lasttime] + << endl ; + sts = mpi_access->recv(&recvbuf[lasttime],1,MPI_INT,target, + RecvRequestId[lasttime]) ; + cout << "test" << myrank << " Recv RequestId " + << RecvRequestId[lasttime] + << " tag " << mpi_access->recvMPITag( target ) + << endl ; + chksts( sts , myrank , mpi_access ) ; + } + //outtime = ((aRecvTimeMsg[lasttime].time + + // aRecvTimeMsg[lasttime].deltatime) >= + // aRecvTimeMsg[lasttime].maxtime) ; + outtime = aRecvTimeMsg[lasttime].deltatime == 0 ; + if ( !outtime ) { +// Une lecture asynchrone d'un message temps a l'avance + sts = mpi_access->IRecv( &aRecvTimeMsg[lasttime+1] , 1 , + mpi_access->timeType() , target , + RecvTimeRequestId[lasttime+1]) ; + cout << "test" << myrank << " IRecv TimeRequestId " + << RecvTimeRequestId[lasttime+1] << " MPITag " + << mpi_access->MPITag( RecvTimeRequestId[lasttime+1] ) + << " MPICompleted " + << mpi_access->MPICompleted( RecvTimeRequestId[lasttime+1] ) + << endl ; + chksts( sts , myrank , mpi_access ) ; + } + else if ( t <= aRecvTimeMsg[lasttime].time ) { + outtime = false ; + } + } + + //printf("DEBUG t %.15f Msg[lasttime-1] %.15f Msg[lasttime] %.15f \n",t, + // aRecvTimeMsg[lasttime-1].time,aRecvTimeMsg[lasttime].time) ; + if ( ((t <= aRecvTimeMsg[lasttime-1].time) || + (t > aRecvTimeMsg[lasttime].time)) && !outtime ) { + ostringstream strstream ; + strstream << "===========================================================" + << endl << "test" << myrank << " t " << t << " <= " + << "aRecvTimeMsg[ " << lasttime << "-1 ].time " + << aRecvTimeMsg[lasttime-1].time << " ou t " << t << " > " + << "aRecvTimeMsg[ " << lasttime << " ].time " + << aRecvTimeMsg[lasttime].time << endl + << " ou bien outtime " << outtime << " KO RequestTimeIds " + << RecvTimeRequestId[lasttime-1] << " " << RecvTimeRequestId[lasttime] + << " RequestIds " + << RecvRequestId[lasttime-1] << " " << RecvRequestId[lasttime] << endl + << "===========================================================" + << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "===========================================================" + << endl << "test" << myrank + << " aRecvTimeMsg[ " << lasttime << "-1 ].time " + << aRecvTimeMsg[lasttime-1].time << " < t " << t << " <= " + << "aRecvTimeMsg[ " << lasttime << " ].time " + << aRecvTimeMsg[lasttime].time << endl + << " ou bien outtime " << outtime << " OK RequestTimeIds " + << RecvTimeRequestId[lasttime-1] << " " << RecvTimeRequestId[lasttime] + << " RequestIds " + << RecvRequestId[lasttime-1] << " " << RecvRequestId[lasttime] << endl + << "===========================================================" + << endl ; + } + } + } + chksts( sts , myrank , mpi_access ) ; + istep = istep + 1 ; + } + + cout << "test" << myrank << " Barrier :" << endl ; + mpi_access->barrier() ; + + mpi_access->check() ; + + if ( myrank == 0 ) { +//CheckFinalSent +//============== + cout << "test" << myrank << " CheckFinalSent :" << endl ; + int sendrequests[2*maxreq] ; + int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ; + int j ; + for ( j = 0 ; j < sendreqsize ; j++ ) { + sts = mpi_access->wait( sendrequests[j] ) ; + chksts( sts , myrank , mpi_access ) ; + mpi_access->deleteRequest( sendrequests[j] ) ; + cout << "test" << myrank << " " << j << ". " << sendrequests[j] << " deleted" + << endl ; + } + } + else { + cout << "test" << myrank << " CheckFinalRecv :" << endl ; + int recvrequests[2*maxreq] ; + int recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ; + int cancelflag ; + int j ; + for ( j = 0 ; j < recvreqsize ; j++ ) { + sts = mpi_access->cancel( recvrequests[j] , cancelflag ) ; + chksts( sts , myrank , mpi_access ) ; + mpi_access->deleteRequest( recvrequests[j] ) ; + cout << "test" << myrank << " " << j << ". " << recvrequests[j] << " deleted" + << " cancelflag " << cancelflag << endl ; + } + int source, MPITag, outcount , flag ; + MPI_Datatype datatype ; + sts = mpi_access->IProbe( target , source, MPITag, datatype, + outcount , flag ) ; + chksts( sts , myrank , mpi_access ) ; + while ( flag ) { + sts = mpi_access->cancel( source, MPITag, datatype, outcount , + //sts = mpi_access->cancel( source, datatype, outcount , + //RecvRequestId[lasttime] , + cancelflag ) ; + cout << "test" << myrank << " TO CANCEL RequestId " + << RecvRequestId[lasttime] + << " tag " << mpi_access->recvMPITag( target ) + << " cancelflag " << cancelflag << endl ; + chksts( sts , myrank , mpi_access ) ; + sts = mpi_access->IProbe( target , source, MPITag, datatype, + outcount , flag ) ; + chksts( sts , myrank , mpi_access ) ; + } + } + mpi_access->check() ; + + if ( myrank == 0 ) { + int sendrequests[2*maxreq] ; + int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ; + if ( sendreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + else { + int recvrequests[2*maxreq] ; + int recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ; + if ( recvreqsize != 0 ) { + ostringstream strstream ; + strstream << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl + << "=========================================================" << endl ; + cout << strstream.str() << endl ; + CPPUNIT_FAIL( strstream.str() ) ; + } + else { + cout << "=========================================================" << endl + << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl + << "=========================================================" << endl ; + } + } + + int i ; + for ( i = 0 ; i <= lasttime ; i++ ) { + cout << "test" << myrank << " " << i << ". RecvTimeMsg " + << aRecvTimeMsg[i].time << " recvbuf " << recvbuf[i] << endl ; + } + + cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ; + mpi_access->barrier() ; + cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ; + + delete group ; + delete mpi_access ; + +// MPI_Finalize(); + + cout << "test" << myrank << " OK" << endl ; + + return ; +} + + + + diff --git a/src/ParaMEDMEMTest/test_perf.cxx b/src/ParaMEDMEMTest/test_perf.cxx new file mode 100644 index 000000000..2250280c0 --- /dev/null +++ b/src/ParaMEDMEMTest/test_perf.cxx @@ -0,0 +1,337 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include +#include +#include +#include "ParaMEDMEMTest.hxx" +#include + +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "Topology.hxx" +#include "DEC.hxx" +#include "MxN_Mapping.hxx" +#include "InterpKernelDEC.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "ComponentTopology.hxx" +#include "ICoCoMEDField.hxx" +#include "MEDLoader.hxx" + +#include +#include + +// use this define to enable lines, execution of which leads to Segmentation Fault +#define ENABLE_FAULTS + +// use this define to enable CPPUNIT asserts and fails, showing bugs +#define ENABLE_FORCED_FAILURES + +#ifndef CLK_TCK +#include +#define CLK_TCK sysconf(_SC_CLK_TCK); +#endif + +using namespace std; +using namespace ParaMEDMEM; + +void testInterpKernelDEC_2D(const string& filename1, const string& meshname1, + const string& filename2, const string& meshname2, + int nproc_source, double epsilon, bool tri, bool all); +void get_time( float *telps, float *tuser, float *tsys, float *tcpu ); + +int main(int argc, char *argv[]) +{ + string filename1, filename2; + string meshname1, meshname2; + int nproc_source=1, rank; + double epsilon=1.e-6; + int count=0; + bool tri=false; + bool all=false; + + MPI_Init(&argc,&argv); + + for(int i=1;i self_procs; + set procs_source; + set procs_target; + + for (int i=0; icontainsMyRank()){ + string master = filename_xml1; + + ostringstream strstream; + if( nproc_source == 1 ) + strstream <getNumberOfCells(); + double *value=parafield->getField()->getArray()->getPointer(); + for(int ielem=0; ielemgetField()); + + dec.attachLocalField(icocofield); + } + + //loading the geometry for the target group + if (target_group->containsMyRank()){ + string master= filename_xml2; + ostringstream strstream; + if( (size-nproc_source) == 1 ) + strstream << master<<".med"; + else + strstream << master<<(rank-nproc_source+1)<<".med"; + ostringstream meshname ; + if( (size-nproc_source) == 1 ) + meshname<< meshname2; + else + meshname<< meshname2<<"_"<incrRef(); + + paramesh=new ParaMESH (mesh,*target_group,"target mesh"); + ParaMEDMEM::ComponentTopology comptopo; + parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); + + int nb_local=mesh->getNumberOfCells(); + double *value=parafield->getField()->getArray()->getPointer(); + for(int ielem=0; ielemgetField()); + + dec.attachLocalField(icocofield); + } + + + //attaching a DEC to the source group + double field_before_int; + double field_after_int; + + if (source_group->containsMyRank()){ + field_before_int = parafield->getVolumeIntegral(0,true); + get_time( &telps, &tcpu_u, &tcpu_s, &tcpu ); + dec.synchronize(); + get_time( &telps, &tcpu_u, &tcpu_s, &tcpu ); + if( rank == 0 ) + cout << "SYNCHRONIZE : Telapse = " << telps << " TuserCPU = " << tcpu_u << " TsysCPU = " << tcpu_s << " TCPU = " << tcpu << endl; + cout<<"DEC usage"< + +using namespace ParaMEDMEM; +using namespace ICoCo; + +enum mpi_constants { mpi_comm_world, mpi_comm_self, mpi_double, mpi_int }; +%} + +%include "CommInterface.hxx" +%include "ProcessorGroup.hxx" +%include "DECOptions.hxx" +%include "ParaMESH.hxx" +%include "ParaFIELD.hxx" +%include "MPIProcessorGroup.hxx" +%include "ComponentTopology.hxx" +%include "DEC.hxx" +%include "InterpKernelDEC.hxx" +%include "StructuredCoincidentDEC.hxx" + +%rename(ICoCoMEDField) ICoCo::MEDField; +%include "ICoCoMEDField.hxx" + +%nodefaultctor; + +/* This object can be used only if MED_ENABLE_FVM is defined*/ +#ifdef MED_ENABLE_FVM +class NonCoincidentDEC : public DEC +{ +public: + NonCoincidentDEC(ProcessorGroup& source, ProcessorGroup& target); +}; +#endif + +%extend ParaMEDMEM::ParaMESH +{ + PyObject *getGlobalNumberingCell2() const + { + const int *tmp=self->getGlobalNumberingCell(); + int size=self->getCellMesh()->getNumberOfCells(); + PyObject *ret=PyList_New(size); + for(int i=0;igetGlobalNumberingFace(); + int size=self->getFaceMesh()->getNumberOfCells(); + PyObject *ret=PyList_New(size); + for(int i=0;igetGlobalNumberingNode(); + int size=self->getCellMesh()->getNumberOfNodes(); + PyObject *ret=PyList_New(size); + for(int i=0;i MPI_COMM_WORLD and MPI_COMM_SELF +%typemap(in) MPI_Comm +{ + switch (PyInt_AsLong($input)) + { + case mpi_comm_world: $1 = MPI_COMM_WORLD; break; + case mpi_comm_self: $1 = MPI_COMM_SELF; break; + default: + PyErr_SetString(PyExc_TypeError,"unexpected value of MPI_Comm"); + return NULL; + } +} +// Map mpi_double and mpi_int -> MPI_DOUBLE and MPI_INT +%typemap(in) MPI_Datatype +{ + switch (PyInt_AsLong($input)) + { + case mpi_double: $1 = MPI_DOUBLE; break; + case mpi_int: $1 = MPI_INT; break; + default: + PyErr_SetString(PyExc_TypeError,"unexpected value of MPI_Datatype"); + return NULL; + } +} +// The following code gets inserted into the result python file: +// create needed python symbols +%pythoncode %{ +MPI_COMM_WORLD = mpi_comm_world +MPI_COMM_SELF = mpi_comm_self +MPI_DOUBLE = mpi_double +MPI_INT = mpi_int +%} +//============================================================================================= + +// ============== +// MPI_Comm_size +// ============== +%inline %{ PyObject* MPI_Comm_size(MPI_Comm comm) + { + int res = 0; + int err = MPI_Comm_size(comm, &res); + if ( err != MPI_SUCCESS ) + { + PyErr_SetString(PyExc_RuntimeError,"Erorr in MPI_Comm_size()"); + return NULL; + } + return PyInt_FromLong( res ); + } %} + +// ============== +// MPI_Comm_rank +// ============== +%inline %{ PyObject* MPI_Comm_rank(MPI_Comm comm) + { + int res = 0; + int err = MPI_Comm_rank(comm, &res); + if ( err != MPI_SUCCESS ) + { + PyErr_SetString(PyExc_RuntimeError,"Erorr in MPI_Comm_rank()"); + return NULL; + } + return PyInt_FromLong( res ); + } + %} + +int MPI_Init(int *argc, char ***argv ); +int MPI_Barrier(MPI_Comm comm); +int MPI_Finalize(); + +// ========== +// MPI_Bcast +// ========== + +%inline %{ PyObject* MPI_Bcast(PyObject* buffer, int nb, MPI_Datatype type, int root, MPI_Comm c) + { + // buffer must be a list + if (!PyList_Check(buffer)) + { + PyErr_SetString(PyExc_TypeError, "buffer is expected to be a list"); + return NULL; + } + // check list size + int aSize = PyList_Size(buffer); + if ( aSize != nb ) + { + std::ostringstream stream; stream << "buffer is expected to be of size " << nb; + PyErr_SetString(PyExc_ValueError, stream.str().c_str()); + return NULL; + } + // allocate and fill a buffer + void* aBuf = 0; + int* intBuf = 0; + double* dblBuf = 0; + if ( type == MPI_DOUBLE ) + { + aBuf = (void*) ( dblBuf = new double[ nb ] ); + for ( int i = 0; i < aSize; ++i ) + dblBuf[i] = PyFloat_AS_DOUBLE( PyList_GetItem( buffer, i )); + } + else if ( type == MPI_INT ) + { + aBuf = (void*) ( intBuf = new int[ nb ] ); + for ( int i = 0; i < aSize; ++i ) + intBuf[i] = int( PyInt_AS_LONG( PyList_GetItem( buffer, i ))); + } + else + { + PyErr_SetString(PyExc_TypeError, "Only MPI_DOUBLE and MPI_INT supported"); + return NULL; + } + // call MPI_Bcast + int err = MPI_Bcast(aBuf, nb, type, root, c); + // treat error + if ( err != MPI_SUCCESS ) + { + PyErr_SetString(PyExc_RuntimeError,"Erorr in MPI_Bcast()"); + delete [] intBuf; delete [] dblBuf; + return NULL; + } + // put recieved data into the list + int pyerr = 0; + if ( type == MPI_DOUBLE ) + { + for ( int i = 0; i < aSize && !pyerr; ++i ) + pyerr = PyList_SetItem(buffer, i, PyFloat_FromDouble( dblBuf[i] )); + delete [] dblBuf; + } + else + { + for ( int i = 0; i < aSize && !pyerr; ++i ) + pyerr = PyList_SetItem(buffer, i, PyInt_FromLong( intBuf[i] )); + delete [] intBuf; + } + if ( pyerr ) + { + PyErr_SetString(PyExc_RuntimeError, "Error of PyList_SetItem()"); + return NULL; + } + return PyInt_FromLong( err ); + + } + %} + +%pythoncode %{ +def ParaMEDMEMDataArrayDoublenew(cls,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayDouble____new___(cls,args) +def ParaMEDMEMDataArrayDoubleIadd(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayDouble____iadd___(self, self, *args) +def ParaMEDMEMDataArrayDoubleIsub(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayDouble____isub___(self, self, *args) +def ParaMEDMEMDataArrayDoubleImul(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayDouble____imul___(self, self, *args) +def ParaMEDMEMDataArrayDoubleIdiv(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayDouble____idiv___(self, self, *args) +def ParaMEDMEMDataArrayDoubleIpow(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayDouble____ipow___(self, self, *args) +def ParaMEDMEMDataArrayDoubleTupleIadd(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayDoubleTuple____iadd___(self, self, *args) +def ParaMEDMEMDataArrayDoubleTupleIsub(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayDoubleTuple____isub___(self, self, *args) +def ParaMEDMEMDataArrayDoubleTupleImul(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayDoubleTuple____imul___(self, self, *args) +def ParaMEDMEMDataArrayDoubleTupleIdiv(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayDoubleTuple____idiv___(self, self, *args) +def ParaMEDMEMMEDCouplingFieldDoublenew(cls,*args): + import _ParaMEDMEM + return _ParaMEDMEM.MEDCouplingFieldDouble____new___(cls,args) +def ParaMEDMEMMEDCouplingFieldDoubleIadd(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.MEDCouplingFieldDouble____iadd___(self, self, *args) +def ParaMEDMEMMEDCouplingFieldDoubleIsub(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.MEDCouplingFieldDouble____isub___(self, self, *args) +def ParaMEDMEMMEDCouplingFieldDoubleImul(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.MEDCouplingFieldDouble____imul___(self, self, *args) +def ParaMEDMEMMEDCouplingFieldDoubleIdiv(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.MEDCouplingFieldDouble____idiv___(self, self, *args) +def ParaMEDMEMMEDCouplingFieldDoubleIpow(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.MEDCouplingFieldDouble____ipow___(self, self, *args) +def ParaMEDMEMDataArrayIntnew(cls,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayInt____new___(cls,args) +def ParaMEDMEMDataArrayIntIadd(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayInt____iadd___(self, self, *args) +def ParaMEDMEMDataArrayIntIsub(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayInt____isub___(self, self, *args) +def ParaMEDMEMDataArrayIntImul(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayInt____imul___(self, self, *args) +def ParaMEDMEMDataArrayIntIdiv(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayInt____idiv___(self, self, *args) +def ParaMEDMEMDataArrayIntImod(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayInt____imod___(self, self, *args) +def ParaMEDMEMDataArrayIntIpow(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayInt____ipow___(self, self, *args) +def ParaMEDMEMDataArrayIntTupleIadd(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayIntTuple____iadd___(self, self, *args) +def ParaMEDMEMDataArrayIntTupleIsub(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayIntTuple____isub___(self, self, *args) +def ParaMEDMEMDataArrayIntTupleImul(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayIntTuple____imul___(self, self, *args) +def ParaMEDMEMDataArrayIntTupleIdiv(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayIntTuple____idiv___(self, self, *args) +def ParaMEDMEMDataArrayIntTupleImod(self,*args): + import _ParaMEDMEM + return _ParaMEDMEM.DataArrayIntTuple____imod___(self, self, *args) +%} + +%include "MEDCouplingFinalize.i" diff --git a/src/ParaMEDMEM_Swig/ParaMEDMEM.typemap b/src/ParaMEDMEM_Swig/ParaMEDMEM.typemap new file mode 100644 index 000000000..80eb7259c --- /dev/null +++ b/src/ParaMEDMEM_Swig/ParaMEDMEM.typemap @@ -0,0 +1,84 @@ +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +%include std_set.i +%include std_string.i + +%template() std::set; + +// Creates "int *argc, char ***argv" parameters from input list +%typemap(in) (int *argc, char ***argv) { + int i; + if (!PyList_Check($input)) { + PyErr_SetString(PyExc_ValueError, "Expecting a list"); + return NULL; + } + int aSize = PyList_Size($input); + $1 = &aSize; + char** aStrs = (char **) malloc((aSize+1)*sizeof(char *)); + for (i = 0; i < aSize; i++) { + PyObject *s = PyList_GetItem($input,i); + if (!PyString_Check(s)) { + free(aStrs); + PyErr_SetString(PyExc_ValueError, "List items must be strings"); + return NULL; + } + aStrs[i] = PyString_AsString(s); + } + aStrs[i] = 0; + $2 = &aStrs; +} + +%typemap(freearg) (int *argc, char ***argv) { + if ($2) free(*($2)); +} + +/* MACRO: IN typemap for std::set C++ object */ +%define TYPEMAP_INPUT_SET_BY_VALUE( TYPE ) +{ + /* typemap in for set */ + /* Check if is a list */ + if (PyList_Check($input)) + { + int size = PyList_Size($input); + std::set< TYPE > tmpSet; + + for (int i=0; i < size; i++) + { + PyObject * tmp = PyList_GetItem($input,i); + TYPE elem = PyInt_AsLong(tmp); + tmpSet.insert(elem); + } + $1 = tmpSet; + } + else + { + PyErr_SetString(PyExc_TypeError,"not a list"); + return NULL; + } +} +%enddef + +%typemap(in) std::set +{ + TYPEMAP_INPUT_SET_BY_VALUE( int ) +} +%typecheck(SWIG_TYPECHECK_POINTER) std::set { + $1 = PyList_Check($input) ? 1 : 0; +} diff --git a/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py b/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py new file mode 100755 index 000000000..0d6d6cdb6 --- /dev/null +++ b/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +# -*- coding: iso-8859-1 -*- +# Copyright (C) 2007-2015 CEA/DEN, EDF R&D +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# + +from ParaMEDMEM import * +import sys, os +import unittest +import math + +class ParaMEDMEMBasicsTest(unittest.TestCase): + def testInterpKernelDEC_2D(self): + MPI_Init(sys.argv) + size = MPI_Comm_size(MPI_COMM_WORLD) + rank = MPI_Comm_rank(MPI_COMM_WORLD) + if size != 5: + raise RuntimeError, "Expect MPI_COMM_WORLD size == 5" + print rank + nproc_source = 3 + procs_source = range( nproc_source ) + procs_target = range( size - nproc_source + 1, size) + + interface = CommInterface() + target_group = MPIProcessorGroup(interface, procs_target) + source_group = MPIProcessorGroup(interface, procs_source) + dec = InterpKernelDEC(source_group, target_group) + + mesh =0 + support =0 + paramesh =0 + parafield =0 + icocofield =0 + data_dir = os.environ['MED_ROOT_DIR'] + tmp_dir = os.environ['TMP'] + + if not tmp_dir or len(tmp_dir)==0: + tmp_dir = "/tmp" + pass + + filename_xml1 = os.path.join(data_dir, "share/salome/resources/med/square1_split") + filename_xml2 = os.path.join(data_dir, "share/salome/resources/med/square2_split") + + MPI_Barrier(MPI_COMM_WORLD) + if source_group.containsMyRank(): + filename = filename_xml1 + str(rank+1) + ".med" + meshname = "Mesh_2_" + str(rank+1) + mesh=MEDLoader.ReadUMeshFromFile(filename,meshname,0) + paramesh=ParaMESH(mesh,source_group,"source mesh") + comptopo = ComponentTopology() + parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo) + parafield.getField().setNature(ConservativeVolumic) + nb_local=mesh.getNumberOfCells() + value = [1.0]*nb_local + parafield.getField().setValues(value) + icocofield = ICoCoMEDField(mesh,parafield.getField()) + dec.setMethod("P0") + dec.attachLocalField(icocofield) + pass + else: + filename = filename_xml2 + str(rank - nproc_source + 1) + ".med" + meshname = "Mesh_3_" + str(rank - nproc_source + 1) + mesh=MEDLoader.ReadUMeshFromFile(filename,meshname,0) + paramesh=ParaMESH(mesh,target_group,"target mesh") + comptopo = ComponentTopology() + parafield = ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo) + parafield.getField().setNature(ConservativeVolumic) + nb_local=mesh.getNumberOfCells() + value = [0.0]*nb_local + parafield.getField().setValues(value) + icocofield = ICoCoMEDField(mesh,parafield.getField()) + dec.setMethod("P0") + dec.attachLocalField(icocofield) + pass + + if source_group.containsMyRank(): + field_before_int = parafield.getVolumeIntegral(0,True) + dec.synchronize() + dec.setForcedRenormalization(False) + dec.sendData() + dec.recvData() + field_after_int=parafield.getVolumeIntegral(0,True); + self.failUnless(math.fabs(field_after_int-field_before_int)<1e-8) + pass + else: + dec.synchronize() + dec.setForcedRenormalization(False) + dec.recvData() + dec.sendData() + pass + ## end + interface = 0 + target_group = 0 + source_group = 0 + dec = 0 + mesh =0 + support =0 + paramesh =0 + parafield =0 + icocofield =0 + MPI_Barrier(MPI_COMM_WORLD) + MPI_Finalize() + pass + pass + +unittest.main() diff --git a/src/ParaMEDMEM_Swig/test_NonCoincidentDEC.py b/src/ParaMEDMEM_Swig/test_NonCoincidentDEC.py new file mode 100755 index 000000000..acf78aa5e --- /dev/null +++ b/src/ParaMEDMEM_Swig/test_NonCoincidentDEC.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python +# -*- coding: iso-8859-1 -*- +# Copyright (C) 2007-2015 CEA/DEN, EDF R&D +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# + +from ParaMEDMEM import * +import sys, os + +MPI_Init(sys.argv) + +size = MPI_Comm_size(MPI_COMM_WORLD) +rank = MPI_Comm_rank(MPI_COMM_WORLD) +if size != 5: + raise RuntimeError, "Expect MPI_COMM_WORLD size == 5" + +nproc_source = 3 +procs_source = range( nproc_source ) +procs_target = range( size - nproc_source + 1, size) + +interface = CommInterface() + +target_group = MPIProcessorGroup(interface, procs_target) +source_group = MPIProcessorGroup(interface, procs_source) + +source_mesh= 0 +target_mesh= 0 +parasupport= 0 +mesh = 0 +support = 0 +field = 0 +paramesh = 0 +parafield = 0 +icocofield = 0 + +dec = NonCoincidentDEC(source_group, target_group) + +data_dir = os.environ['MED_ROOT_DIR'] +tmp_dir = os.environ['TMP'] +if tmp_dir == '': + tmp_dir = "/tmp" + pass + +filename_xml1 = data_dir + "/share/salome/resources/med/square1_split" +filename_xml2 = data_dir + "/share/salome/resources/med/square2_split" + +MPI_Barrier(MPI_COMM_WORLD) + +if source_group.containsMyRank(): + + filename = filename_xml1 + str(rank+1) + ".med" + meshname = "Mesh_2_" + str(rank+1) + + mesh = MESH(MED_DRIVER, filename, meshname) + support = SUPPORT(mesh, "all elements", MED_CELL) + paramesh = ParaMESH(mesh, source_group, "source mesh") + + parasupport = UnstructuredParaSUPPORT( support, source_group) + comptopo = ComponentTopology() + + parafield = ParaFIELD(parasupport, comptopo) + + nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS); + + value = [1.0]*nb_local + + parafield.getField().setValue(value) + icocofield = ICoCo_MEDField(paramesh,parafield) + dec.attachLocalField(icocofield,'P0') + pass + +if target_group.containsMyRank(): + + filename = filename_xml2 + str(rank - nproc_source + 1) + ".med" + meshname = "Mesh_3_" + str(rank - nproc_source + 1) + + mesh = MESH(MED_DRIVER, filename, meshname) + support = SUPPORT(mesh, "all elements", MED_CELL) + paramesh = ParaMESH(mesh, target_group, "target mesh") + + parasupport = UnstructuredParaSUPPORT( support, target_group) + comptopo = ComponentTopology() + parafield = ParaFIELD(parasupport, comptopo) + + nb_local = support.getNumberOfElements(MED_ALL_ELEMENTS) + value = [0.0]*nb_local + + parafield.getField().setValue(value) + icocofield = ICoCo_MEDField(paramesh,parafield) + + dec.attachLocalField(icocofield, 'P0') + pass + +field_before_int = [0.0] +field_after_int = [0.0] + +if source_group.containsMyRank(): + + field_before_int = [parafield.getVolumeIntegral(1)] + MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); + dec.synchronize() + print "DEC usage" + dec.setForcedRenormalization(False) + + dec.sendData() + pass + +if target_group.containsMyRank(): + + MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD) + dec.synchronize() + dec.setForcedRenormalization(False) + dec.recvData() + field_after_int = [parafield.getVolumeIntegral(1)] + pass + +MPI_Bcast(field_before_int, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD) +MPI_Bcast(field_after_int , 1, MPI_DOUBLE, size-1, MPI_COMM_WORLD) + +epsilon = 1e-6 +if abs(field_before_int[0] - field_after_int[0]) > epsilon: + print "Field before is not equal field after: %s != %s"%\ + (field_before_int[0],field_after_int[0]) + pass + + +MPI_Barrier(MPI_COMM_WORLD) +MPI_Finalize() +print "# End of testNonCoincidentDEC" diff --git a/src/ParaMEDMEM_Swig/test_StructuredCoincidentDEC.py b/src/ParaMEDMEM_Swig/test_StructuredCoincidentDEC.py new file mode 100755 index 000000000..90c9aad2f --- /dev/null +++ b/src/ParaMEDMEM_Swig/test_StructuredCoincidentDEC.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# -*- coding: iso-8859-1 -*- +# Copyright (C) 2007-2015 CEA/DEN, EDF R&D +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# + +from ParaMEDMEM import * +import sys, os +import unittest +import math + +class ParaMEDMEMBasicsTest2(unittest.TestCase): + def testStructuredCoincidentDEC(self): + MPI_Init(sys.argv) + # + size = MPI_Comm_size(MPI_COMM_WORLD) + rank = MPI_Comm_rank(MPI_COMM_WORLD) + # + if size < 4: + raise RuntimeError, "Expect MPI_COMM_WORLD size >= 4" + # + interface = CommInterface() + # + self_group = MPIProcessorGroup(interface, rank, rank) + target_group = MPIProcessorGroup(interface, 3, size-1) + source_group = MPIProcessorGroup(interface, 0, 2) + # + mesh = 0 + support = 0 + paramesh = 0 + parafield = 0 + comptopo = 0 + icocofield= 0 + # + data_dir = os.environ['MED_ROOT_DIR'] + tmp_dir = os.environ['TMP'] + if tmp_dir == '': + tmp_dir = "/tmp" + pass + + filename_xml1 = data_dir + "/share/salome/resources/med/square1_split" + filename_2 = data_dir + "/share/salome/resources/med/square1.med" + filename_seq_wr = tmp_dir + "/" + filename_seq_med = tmp_dir + "/myWrField_seq_pointe221.med" + + dec = StructuredCoincidentDEC(source_group, target_group) + MPI_Barrier(MPI_COMM_WORLD) + if source_group.containsMyRank(): + filename = filename_xml1 + str(rank+1) + ".med" + meshname = "Mesh_2_" + str(rank+1) + mesh=MEDLoader.ReadUMeshFromFile(filename,meshname,0) + paramesh=ParaMESH(mesh,source_group,"source mesh") + comptopo=ComponentTopology(6) + parafield=ParaFIELD(ON_CELLS,NO_TIME,paramesh,comptopo) + parafield.getField().setNature(ConservativeVolumic) + nb_local=mesh.getNumberOfCells() + global_numbering=paramesh.getGlobalNumberingCell2() + value = [] + for ielem in range(nb_local): + for icomp in range(6): + value.append(global_numbering[ielem]*6.0+icomp); + pass + pass + parafield.getField().setValues(value) + icocofield = ICoCoMEDField(mesh,parafield.getField()) + dec.setMethod("P0") + dec.attachLocalField(parafield) + dec.synchronize() + dec.sendData() + pass + + if target_group.containsMyRank(): + meshname2 = "Mesh_2" + mesh=MEDLoader.ReadUMeshFromFile(filename_2, meshname2,0) + paramesh=ParaMESH(mesh, self_group, "target mesh") + comptopo=ComponentTopology(6,target_group) + parafield=ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo) + parafield.getField().setNature(ConservativeVolumic) + nb_local=mesh.getNumberOfCells() + value = [0.0]*(nb_local*comptopo.nbLocalComponents()) + parafield.getField().setValues(value) + icocofield = ICoCoMEDField(mesh,parafield.getField()) + dec.setMethod("P0") + dec.attachLocalField(parafield) + dec.synchronize() + dec.recvData() + recv_value = parafield.getField().getArray().getValues() + for i in range(nb_local): + first=comptopo.firstLocalComponent() + for icomp in range(comptopo.nbLocalComponents()): + self.failUnless(math.fabs(recv_value[i*comptopo.nbLocalComponents()+icomp]- + (float)(i*6+icomp+first))<1e-12) + pass + pass + pass + comptopo=0 + interface = 0 + mesh =0 + support =0 + paramesh =0 + parafield =0 + icocofield =0 + dec=0 + self_group =0 + target_group = 0 + source_group = 0 + MPI_Barrier(MPI_COMM_WORLD) + MPI_Finalize() + print "End of test StructuredCoincidentDEC" + pass + + +unittest.main() diff --git a/src/RENUMBER/testRenumbering.py b/src/RENUMBER/testRenumbering.py index c18ba6795..5454f1357 100755 --- a/src/RENUMBER/testRenumbering.py +++ b/src/RENUMBER/testRenumbering.py @@ -189,21 +189,9 @@ class RenumberingTest(unittest.TestCase): pass def setUp(self): - srcdir = os.getenv("srcdir") - med_root = os.getenv("MED_ROOT_DIR") - if srcdir: - # make test is being performed - self.dir_renumber="./renumber" - self.dir_mesh = os.path.join( srcdir, "../../resources") - elif med_root: - # hope renumber has been already installed - self.dir_renumber=os.path.join( med_root, "bin/salome/renumber") - self.dir_mesh = os.path.join( med_root, "share/salome/resources/med") - else: - # initial version - self.dir_renumber="../../../MED_INSTALL/bin/salome/renumber" - self.dir_mesh="../../resources" - pass + med_root_dir=os.getenv("MEDTOOL_ROOT_DIR") + self.dir_renumber=os.path.join(med_root_dir, "bin/renumber") + self.dir_mesh=os.path.join(med_root_dir, "share","resources","med") pass pass diff --git a/src/RENUMBER_Swig/CMakeLists.txt b/src/RENUMBER_Swig/CMakeLists.txt index ca5363830..afd126fcb 100644 --- a/src/RENUMBER_Swig/CMakeLists.txt +++ b/src/RENUMBER_Swig/CMakeLists.txt @@ -66,9 +66,9 @@ INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/MEDRenumber.py DESTINATION ${MEDTOOL_I INSTALL(FILES MEDRenumber.i MEDRenumberCommon.i DESTINATION ${MEDTOOL_INSTALL_HEADERS}) INSTALL(FILES MEDRenumberTest.py DESTINATION ${MEDTOOL_INSTALL_SCRIPT_PYTHON}) -#SALOME_GENERATE_TESTS_ENVIRONMENT(tests_env) ADD_TEST(MEDRenumberTest ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MEDRenumberTest.py) -SET_TESTS_PROPERTIES(MEDRenumberTest PROPERTIES ENVIRONMENT "${tests_env}") +SET(MEDRenumberTest_PYTHONPATH "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}:${CMAKE_CURRENT_BINARY_DIR}/../MEDCoupling_Swig") +SET_TESTS_PROPERTIES(MEDRenumberTest PROPERTIES ENVIRONMENT "${MEDRenumberTest_PYTHONPATH}") # Application tests -- 2.39.2