]> SALOME platform Git repositories - tools/solverlab.git/commitdiff
Salome HOME
Added C MPI tests
authormichael <michael@localhost.localdomain>
Fri, 19 Nov 2021 12:29:43 +0000 (13:29 +0100)
committermichael <michael@localhost.localdomain>
Fri, 19 Nov 2021 12:29:43 +0000 (13:29 +0100)
CoreFlows/examples/C/CMakeLists.txt
CoreFlows/examples/C/DiffusionEquation_1DHeatedRod_FE_MPI.cxx [new file with mode: 0755]
CoreFlows/examples/C/DiffusionEquation_1DHeatedRod_FV_MPI.cxx [new file with mode: 0755]
CoreFlows/examples/C/MEDCouplingSendRecvFieldDifferentMeshes_MPI.cxx [new file with mode: 0644]
CoreFlows/examples/C/MEDCouplingSendRecvFieldSameMesh_MPI.cxx [new file with mode: 0644]
CoreFlows/examples/C/TransportEquation_1DHeatedChannel_MPI.cxx [new file with mode: 0755]

index 1b27de338cc07abf41c0bdd78b1e984178dbac05..e92764da27131e005c2daa1b946a2856f45b81f8 100755 (executable)
@@ -108,25 +108,58 @@ CreateTestExecAndInstall(IsothermalTwoFluid_2DInclinedSedimentation.cxx  "${libs
 CreateTestExecAndInstall(IsothermalTwoFluid_2DVidangeReservoir.cxx  "${libs_for_tests}" )
 
  
 CreateTestExecAndInstall(IsothermalTwoFluid_2DVidangeReservoir.cxx  "${libs_for_tests}" )
 
  
-  add_executable(WaveSystem_FV_SphericalExplosion_CDMATH.exe WaveSystem_FV_SphericalExplosion_CDMATH.cxx)
-  target_link_libraries(WaveSystem_FV_SphericalExplosion_CDMATH.exe CoreFlowsLibs )
-  install(TARGETS WaveSystem_FV_SphericalExplosion_CDMATH.exe DESTINATION share/examples)
-  add_test(NAME WaveSystem_2DFV_SphericalExplosion_CDMATH_SQUARE         COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe")     
-  add_test(NAME WaveSystem_2DFV_SphericalExplosion_CDMATH_HEXAGON        COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshHexagonWithTriangles10.med)
-  add_test(NAME WaveSystem_3DFV_SphericalExplosion_CDMATH_CUBE           COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshCube.med)     
-  add_test(NAME WaveSystem_3DFV_SphericalExplosion_CDMATH_TETRAHEDRON    COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshTetrahedron10.med)     
-
-if( SOLVERLAB_WITH_MPI )
+add_executable(WaveSystem_FV_SphericalExplosion_CDMATH.exe WaveSystem_FV_SphericalExplosion_CDMATH.cxx)
+target_link_libraries(WaveSystem_FV_SphericalExplosion_CDMATH.exe CoreFlowsLibs )
+install(TARGETS WaveSystem_FV_SphericalExplosion_CDMATH.exe DESTINATION share/examples)
+
+add_test(NAME WaveSystem_2DFV_SphericalExplosion_CDMATH_SQUARE         COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe")     
+add_test(NAME WaveSystem_2DFV_SphericalExplosion_CDMATH_HEXAGON        COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshHexagonWithTriangles10.med)
+add_test(NAME WaveSystem_3DFV_SphericalExplosion_CDMATH_CUBE           COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshCube.med)     
+add_test(NAME WaveSystem_3DFV_SphericalExplosion_CDMATH_TETRAHEDRON    COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshTetrahedron10.med)     
+
+if( SOLVERLAB_WITH_MPI )#Tests parallèles
+  add_executable(MEDCouplingSendRecvFieldSameMesh_MPI.exe MEDCouplingSendRecvFieldSameMesh_MPI.cxx)
+  target_link_libraries(MEDCouplingSendRecvFieldSameMesh_MPI.exe ${MPI_LIBRARY} medcoupling medloader paramedmem)
+  install(TARGETS MEDCouplingSendRecvFieldSameMesh_MPI.exe DESTINATION share/examples)
+
+  add_test(NAME MEDCouplingSendRecvFieldSameMesh_MPI_4Procs.exe                      COMMAND "${MPIEXEC}" "-n" "4" "./MEDCouplingSendRecvFieldSameMesh_MPI.exe" ) 
+
+  add_executable(MEDCouplingSendRecvFieldDifferentMeshes_MPI.exe MEDCouplingSendRecvFieldDifferentMeshes_MPI.cxx)
+  target_link_libraries(MEDCouplingSendRecvFieldDifferentMeshes_MPI.exe ${MPI_LIBRARY} medcoupling medloader paramedmem)
+  install(TARGETS MEDCouplingSendRecvFieldDifferentMeshes_MPI.exe DESTINATION share/examples)
+
+  add_test(NAME MEDCouplingSendRecvFieldDifferentMeshes_MPI_4Procs.exe                      COMMAND "${MPIEXEC}" "-n" "4" "./MEDCouplingSendRecvFieldDifferentMeshes_MPI.exe" ) 
+
+  add_executable(DiffusionEquation_1DHeatedRod_FE_MPI.exe DiffusionEquation_1DHeatedRod_FE_MPI.cxx)                    # compilation of the testxxx.exe 
+  target_link_libraries(DiffusionEquation_1DHeatedRod_FE_MPI.exe CoreFlowsLibs ${MPI_LIBRARY})              # provide required lib for testxxx.exe 
+  install(TARGETS DiffusionEquation_1DHeatedRod_FE_MPI.exe DESTINATION share/examples)
+
+  add_test(NAME DiffusionEquation_1DHeatedRod_FE_MPI_2Procs.exe         COMMAND "${MPIEXEC}" "-n" "2" "./DiffusionEquation_1DHeatedRod_FE_MPI.exe")     
+
+  add_executable(DiffusionEquation_1DHeatedRod_FV_MPI.exe DiffusionEquation_1DHeatedRod_FV_MPI.cxx)                    # compilation of the testxxx.exe 
+  target_link_libraries(DiffusionEquation_1DHeatedRod_FV_MPI.exe CoreFlowsLibs ${MPI_LIBRARY})              # provide required lib for testxxx.exe 
+  install(TARGETS DiffusionEquation_1DHeatedRod_FV_MPI.exe DESTINATION share/examples)
+
+  add_test(NAME DiffusionEquation_1DHeatedRod_FV_MPI_2Procs.exe         COMMAND "${MPIEXEC}" "-n" "2" "./DiffusionEquation_1DHeatedRod_FV_MPI.exe")     
+
+  add_executable(TransportEquation_1DHeatedChannel_MPI.exe TransportEquation_1DHeatedChannel_MPI.cxx)                    # compilation of the testxxx.exe 
+  target_link_libraries(TransportEquation_1DHeatedChannel_MPI.exe CoreFlowsLibs ${MPI_LIBRARY})              # provide required lib for testxxx.exe 
+  install(TARGETS TransportEquation_1DHeatedChannel_MPI.exe DESTINATION share/examples)
+
+  add_test(NAME TransportEquation_1DHeatedChannel_MPI_2Procs.exe         COMMAND "${MPIEXEC}" "-n" "2" "./TransportEquation_1DHeatedChannel_MPI.exe")     
+
   add_executable(WaveSystem_FV_SphericalExplosion_MPI.exe WaveSystem_FV_SphericalExplosion_MPI.cxx)                    # compilation of the testxxx.exe 
   add_executable(WaveSystem_FV_SphericalExplosion_MPI.exe WaveSystem_FV_SphericalExplosion_MPI.cxx)                    # compilation of the testxxx.exe 
-  target_link_libraries(WaveSystem_FV_SphericalExplosion_MPI.exe CoreFlowsLibs  ${MPI_LIBRARY})              # provide required lib for testxxx.exe 
+  target_link_libraries(WaveSystem_FV_SphericalExplosion_MPI.exe CoreFlowsLibs ${MPI_LIBRARY})              # provide required lib for testxxx.exe 
   install(TARGETS WaveSystem_FV_SphericalExplosion_MPI.exe DESTINATION share/examples)
   install(TARGETS WaveSystem_FV_SphericalExplosion_MPI.exe DESTINATION share/examples)
-  add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_SEQ_SQUARE         COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe")     
-  add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_SEQ_HEXAGON        COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshHexagonWithTriangles10.med) 
-  add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_SEQ_CUBE           COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshCube.med) 
-  add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_SEQ_TETRAHEDRON    COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshTetrahedron10.med) 
-  add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_PAR_SQUARE         COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe")
-  add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_PAR_HEXAGON        COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshHexagonWithTriangles10.med)
-  add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_PAR_CUBE           COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshCube.med) 
-  add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_PAR_TETRAHEDRON    COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshTetrahedron10.med) 
+
+  add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_1Proc_SQUARE.exe         COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe")     
+  add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_1Proc_HEXAGON.exe        COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshHexagonWithTriangles10.med) 
+  add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_1Proc_CUBE.exe           COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshCube.med) 
+  add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_1Proc_TETRAHEDRON.exe    COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshTetrahedron10.med) 
+  add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_2Procs_SQUARE.exe         COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe")
+  add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_2Procs_HEXAGON.exe        COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshHexagonWithTriangles10.med)
+  add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_2Procs_CUBE.exe           COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshCube.med) 
+  add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_2Procs_TETRAHEDRON.exe    COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshTetrahedron10.med) 
+
 endif( SOLVERLAB_WITH_MPI )
 
 endif( SOLVERLAB_WITH_MPI )
 
diff --git a/CoreFlows/examples/C/DiffusionEquation_1DHeatedRod_FE_MPI.cxx b/CoreFlows/examples/C/DiffusionEquation_1DHeatedRod_FE_MPI.cxx
new file mode 100755 (executable)
index 0000000..ddb8a4c
--- /dev/null
@@ -0,0 +1,101 @@
+#include "DiffusionEquation.hxx"\r
+\r
+using namespace std;\r
+\r
+#define PI 3.14159265\r
+\r
+void power_field_diffusionTest(Field & Phi){\r
+       double L=4.2;\r
+       double lambda=0.2;\r
+       double phi=1e5;\r
+       double x;\r
+       Mesh M = Phi.getMesh();\r
+       int nbNodes = M.getNumberOfNodes();\r
+       for (int j = 0; j < nbNodes; j++) {\r
+               x=M.getNode(j).x();\r
+               Phi(j) = phi*cos(PI*(x-L/2)/(L+lambda));\r
+       }\r
+}\r
+\r
+int main(int argc, char** argv)\r
+{\r
+       //Preprocessing: mesh and group creation\r
+       double xinf=0.0;\r
+       double xsup=4.2;\r
+       int nx=10;\r
+       cout << "Building of a 1D mesh with "<<nx<<" cells" << endl;\r
+       Mesh M(xinf,xsup,nx);\r
+       double eps=1.E-6;\r
+       M.setGroupAtPlan(xsup,0,eps,"Neumann");\r
+       M.setGroupAtPlan(xinf,0,eps,"Neumann");\r
+       int spaceDim = M.getSpaceDimension();\r
+\r
+\r
+       //Solid parameters\r
+       double cp_ur=300;//Uranium specific heat\r
+       double rho_ur=10000;//Uranium density\r
+       double lambda_ur=5;\r
\r
+    bool FEcalculation=true;\r
+       DiffusionEquation  myProblem(spaceDim,FEcalculation,rho_ur,cp_ur,lambda_ur);\r
+       Field VV("Solid temperature", NODES, M, 1);\r
+\r
+       //Set fluid temperature (temperature du fluide)\r
+       double fluidTemp=573;//fluid mean temperature\r
+       double heatTransfertCoeff=1000;//fluid/solid exchange coefficient\r
+       myProblem.setFluidTemperature(fluidTemp);\r
+       myProblem.setHeatTransfertCoeff(heatTransfertCoeff);\r
+       //Set heat source\r
+       Field Phi("Heat power field", NODES, M, 1);\r
+       power_field_diffusionTest(Phi);\r
+       myProblem.setHeatPowerField(Phi);\r
+       Phi.writeVTK("1DheatPowerField");\r
+\r
+       //Initial field creation\r
+       Vector VV_Constant(1);\r
+       VV_Constant(0) = 623;//Rod clad temperature\r
+\r
+       cout << "Building initial data" << endl;\r
+       myProblem.setInitialFieldConstant(M,VV_Constant,NODES);\r
+\r
+       //set the boundary conditions\r
+       myProblem.setNeumannBoundaryCondition("Neumann");\r
+\r
+       // set the numerical method\r
+       myProblem.setTimeScheme( Explicit);\r
+\r
+       // name result file\r
+       string fileName = "1DRodTemperature_FE";\r
+\r
+       // parameters calculation\r
+       unsigned MaxNbOfTimeStep =3;\r
+       int freqSave = 1;\r
+       double cfl = 0.5;\r
+       double maxTime = 1000000;\r
+       double precision = 1e-6;\r
+\r
+       myProblem.setCFL(cfl);\r
+       myProblem.setPrecision(precision);\r
+       myProblem.setMaxNbOfTimeStep(MaxNbOfTimeStep);\r
+       myProblem.setTimeMax(maxTime);\r
+       myProblem.setFreqSave(freqSave);\r
+       myProblem.setFileName(fileName);\r
+\r
+       // set display option to monitor the calculation\r
+       myProblem.setVerbose( true);\r
+       //set file saving format\r
+       myProblem.setSaveFileFormat(CSV);\r
+\r
+       // evolution\r
+       myProblem.initialize();\r
+       bool ok = myProblem.run();\r
+       if (ok)\r
+               cout << "Simulation "<<fileName<<" is successful !" << endl;\r
+       else\r
+               cout << "Simulation "<<fileName<<"  failed ! " << endl;\r
+\r
+       cout << "------------ End of calculation -----------" << endl;\r
+       myProblem.terminate();\r
+\r
+       return EXIT_SUCCESS;\r
+}\r
diff --git a/CoreFlows/examples/C/DiffusionEquation_1DHeatedRod_FV_MPI.cxx b/CoreFlows/examples/C/DiffusionEquation_1DHeatedRod_FV_MPI.cxx
new file mode 100755 (executable)
index 0000000..0c275b2
--- /dev/null
@@ -0,0 +1,101 @@
+#include "DiffusionEquation.hxx"\r
+\r
+using namespace std;\r
+\r
+#define PI 3.14159265\r
+\r
+void power_field_diffusionTest(Field & Phi){\r
+       double L=4.2;\r
+       double lambda=0.2;\r
+       double phi=1e5;\r
+       double x;\r
+       Mesh M = Phi.getMesh();\r
+       int nbCells = M.getNumberOfCells();\r
+       for (int j = 0; j < nbCells; j++) {\r
+               x=M.getCell(j).x();\r
+               Phi(j) = phi*cos(PI*(x-L/2)/(L+lambda));\r
+       }\r
+}\r
+\r
+int main(int argc, char** argv)\r
+{\r
+       //Preprocessing: mesh and group creation\r
+       double xinf=0.0;\r
+       double xsup=4.2;\r
+       int nx=10;\r
+       cout << "Building of a 1D mesh with "<<nx<<" cells" << endl;\r
+       Mesh M(xinf,xsup,nx);\r
+       double eps=1.E-6;\r
+       M.setGroupAtPlan(xsup,0,eps,"Neumann");\r
+       M.setGroupAtPlan(xinf,0,eps,"Neumann");\r
+       int spaceDim = M.getSpaceDimension();\r
+\r
+\r
+       //Solid parameters\r
+       double cp_ur=300;//Uranium specific heat\r
+       double rho_ur=10000;//Uranium density\r
+       double lambda_ur=5;\r
\r
+    bool FEcalculation=false;\r
+       DiffusionEquation  myProblem(spaceDim,FEcalculation,rho_ur,cp_ur,lambda_ur);\r
+       Field VV("Solid temperature", CELLS, M, 1);\r
+\r
+       //Set fluid temperature (temperature du fluide)\r
+       double fluidTemp=573;//fluid mean temperature\r
+       double heatTransfertCoeff=1000;//fluid/solid exchange coefficient\r
+       myProblem.setFluidTemperature(fluidTemp);\r
+       myProblem.setHeatTransfertCoeff(heatTransfertCoeff);\r
+       //Set heat source\r
+       Field Phi("Heat power field", CELLS, M, 1);\r
+       power_field_diffusionTest(Phi);\r
+       myProblem.setHeatPowerField(Phi);\r
+       Phi.writeVTK("1DheatPowerField");\r
+\r
+       //Initial field creation\r
+       Vector VV_Constant(1);\r
+       VV_Constant(0) = 623;//Rod clad temperature\r
+\r
+       cout << "Building initial data" << endl;\r
+       myProblem.setInitialFieldConstant(M,VV_Constant);\r
+\r
+       //set the boundary conditions\r
+       myProblem.setNeumannBoundaryCondition("Neumann");\r
+\r
+       // set the numerical method\r
+       myProblem.setTimeScheme( Explicit);\r
+\r
+       // name result file\r
+       string fileName = "1DRodTemperature_FV";\r
+\r
+       // parameters calculation\r
+       unsigned MaxNbOfTimeStep =3;\r
+       int freqSave = 1;\r
+       double cfl = 0.5;\r
+       double maxTime = 1000000;\r
+       double precision = 1e-6;\r
+\r
+       myProblem.setCFL(cfl);\r
+       myProblem.setPrecision(precision);\r
+       myProblem.setMaxNbOfTimeStep(MaxNbOfTimeStep);\r
+       myProblem.setTimeMax(maxTime);\r
+       myProblem.setFreqSave(freqSave);\r
+       myProblem.setFileName(fileName);\r
+\r
+       // set display option to monitor the calculation\r
+       myProblem.setVerbose( true);\r
+       //set file saving format\r
+       myProblem.setSaveFileFormat(CSV);\r
+\r
+       // evolution\r
+       myProblem.initialize();\r
+       bool ok = myProblem.run();\r
+       if (ok)\r
+               cout << "Simulation "<<fileName<<" is successful !" << endl;\r
+       else\r
+               cout << "Simulation "<<fileName<<"  failed ! " << endl;\r
+\r
+       cout << "------------ End of calculation -----------" << endl;\r
+       myProblem.terminate();\r
+\r
+       return EXIT_SUCCESS;\r
+}\r
diff --git a/CoreFlows/examples/C/MEDCouplingSendRecvFieldDifferentMeshes_MPI.cxx b/CoreFlows/examples/C/MEDCouplingSendRecvFieldDifferentMeshes_MPI.cxx
new file mode 100644 (file)
index 0000000..0a27ccb
--- /dev/null
@@ -0,0 +1,112 @@
+//============================================================================
+// Name        : Tests of using a subcommnicator for sending and receiving a 2D MEDCoupling field on cells (P0) lying on different meshes between two groups of two processors
+// Author      : Michael NDJINGA
+// Date        : November 2021
+// Description : Use of the parallel Data Exchange Channel InterpKernelDEC of MEDCoupling
+//============================================================================
+
+#include <iostream>
+#include <string>
+#include <set>
+
+#include "InterpKernelDEC.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "MEDCouplingFieldDouble.hxx"
+#include "MEDCouplingCMesh.hxx"
+#include "MEDCouplingUMesh.hxx"
+#include "MEDLoader.hxx"
+
+#include <mpi.h>
+#include <cassert>
+
+using namespace std;
+
+int main(int argc, char *argv[])
+{
+       /* PETSc initialisation */
+       MPI_Init(&argc, &argv);
+       int    size;        /* size of communicator */
+       int    rank;        /* processor rank */
+       int sub_rank, sub_size;/* rank in subcommunicator */
+       int color;/* tells if I belong to the sub_communicator */
+       MPI_Comm sub_comm ;/*subcommunicator will be used in exchanges */
+       std::set<int> procs_source, procs_target;/* group of procs that will send or receive data */
+       MEDCoupling::MEDCouplingFieldDouble * field;/*field used to send or receive data */
+       MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+       MPI_Comm_size(MPI_COMM_WORLD,&size);
+
+       if(size!=4)
+               printf("Processor %d : aborting.\n Simulation should done on four processors.\n %d processors given\n",rank,size);
+       
+       color=rank/2;
+               
+       printf("My rank is %d among %d processors, my color is %d\n",rank, size,color);
+       
+       MPI_Comm_split(MPI_COMM_WORLD, color, rank, &sub_comm); /* two groups (0,1) and (2,3) */
+       MPI_Comm_rank(sub_comm, &sub_rank);
+       MPI_Comm_size(sub_comm, &sub_size);
+       
+       printf("WORLD RANK/SIZE: %d/%d \t subcommunicator RANK/SIZE: %d/%d\n",  rank, size, sub_rank, sub_size);
+
+       
+       procs_source.insert(0);/* sub rank 0 will send data */
+       procs_target.insert(1);/* sub rank 1 will receive data */
+
+       MEDCoupling::CommInterface interface = MEDCoupling::CommInterface();
+       MEDCoupling::MPIProcessorGroup source_group = MEDCoupling::MPIProcessorGroup(interface, procs_source,sub_comm);
+       MEDCoupling::MPIProcessorGroup target_group = MEDCoupling::MPIProcessorGroup(interface, procs_target,sub_comm);
+       MEDCoupling::InterpKernelDEC dec = MEDCoupling::InterpKernelDEC(source_group, target_group);
+
+       //Create a MEDCouplingUMesh from a 3D cartesian mesh
+       MEDCoupling::DataArrayDouble * xarr=MEDCoupling::DataArrayDouble::New();
+       xarr->alloc(11,1);
+       xarr->iota(0.);
+       MEDCoupling::MEDCouplingCMesh * cmesh=MEDCoupling::MEDCouplingCMesh::New("My2D_CMesh");
+       cmesh->setCoords(xarr,xarr);
+       MEDCoupling::MEDCouplingUMesh * mesh=cmesh->buildUnstructured();
+       mesh->setName("RegularSquare");
+       mesh->simplexize(sub_rank);//The squares are cut in two right triangles according to one of the two possible diagonals
+
+       if(sub_rank == 0)
+       {
+               field = mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)");
+               field->setName("SourceField");
+               field->setNature(MEDCoupling::IntensiveMaximum);
+               MEDCoupling::WriteField("source_field"+to_string(rank)+".med", field, true);
+               printf("Processor with global rank %d has created and saved the source field\n", rank);
+       }
+       else
+       {
+               field=mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"0");
+               field->setName("TargetField");
+               field->setNature(MEDCoupling::IntensiveMaximum);
+               printf("Processor with global rank %d has created the target field\n", rank);
+       }
+       
+       dec.attachLocalField(field);
+       dec.synchronize();
+       
+       if(sub_rank == 0)
+       {
+               dec.sendData();
+               printf("Processor with global rank %d has sent the source field\n", rank);
+       }
+       else
+       {
+               dec.recvData();
+               printf("Processor with global rank %d has received the source field on the target mesh\n", rank);
+               MEDCoupling::MEDCouplingFieldDouble * exact_field=mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)");
+               exact_field->setName("ExactField");
+               //To do : compare target and exact field (maximul value etc ...
+               //double error=((*field)-(*exact_field))->normMax(0)/exact_field->normMax(0);
+               //printf("Processor with global rank %d received source field that differs from theoretical value by %d (maximum relative norm on cells)\n", rank, error );
+               //assert( fabs(error)<1.e-6 );
+               MEDCoupling::WriteField("target_field"+to_string(rank)+".med", field, true);
+               MEDCoupling::WriteField("exact_field"+to_string(rank)+".med", exact_field, true);       
+       }
+
+       MPI_Comm_free(&sub_comm);
+       MPI_Finalize();
+    return 0;
+}
diff --git a/CoreFlows/examples/C/MEDCouplingSendRecvFieldSameMesh_MPI.cxx b/CoreFlows/examples/C/MEDCouplingSendRecvFieldSameMesh_MPI.cxx
new file mode 100644 (file)
index 0000000..b6ba1e6
--- /dev/null
@@ -0,0 +1,109 @@
+//============================================================================
+// Name        : Tests of using a subcommnicator for sending and receiving a 3D MEDCoupling field on cells (P0) lying on the same mesh between two groups of two processors
+// Author      : Michael NDJINGA
+// Date        : November 2021
+// Description : Use of the parallel Data Exchange Channel StructuredCoincidentDEC of MEDCoupling
+//============================================================================
+
+#include <iostream>
+#include <string>
+#include <set>
+
+#include "StructuredCoincidentDEC.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "MEDCouplingFieldDouble.hxx"
+#include "MEDCouplingCMesh.hxx"
+#include "MEDCouplingUMesh.hxx"
+#include "MEDLoader.hxx"
+
+#include <mpi.h>
+#include <cassert>
+
+using namespace std;
+
+int main(int argc, char *argv[])
+{
+       /* PETSc initialisation */
+       MPI_Init(&argc, &argv);
+       int    size;        /* size of communicator */
+       int    rank;        /* processor rank */
+       int sub_rank, sub_size;/* rank in subcommunicator */
+       int color;/* tells if I belong to the sub_communicator */
+       MPI_Comm sub_comm ;/*subcommunicator will be used in exchanges */
+       std::set<int> procs_source, procs_target;/* group of procs that will send or receive data */
+       MEDCoupling::MEDCouplingFieldDouble * field;/*field used to send or receive data */
+       MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+       MPI_Comm_size(MPI_COMM_WORLD,&size);
+
+       if(size!=4)
+               printf("Processor %d : aborting.\n Simulation should done on four processors.\n %d processors given\n",rank,size);
+       
+       color=rank/2;
+               
+       printf("My rank is %d among %d processors, my color is %d\n",rank, size,color);
+       
+       MPI_Comm_split(MPI_COMM_WORLD, color, rank, &sub_comm); /* two groups (0,1) and (2,3) */
+       MPI_Comm_rank(sub_comm, &sub_rank);
+       MPI_Comm_size(sub_comm, &sub_size);
+       
+       printf("WORLD RANK/SIZE: %d/%d \t subcommunicator RANK/SIZE: %d/%d\n",  rank, size, sub_rank, sub_size);
+
+       
+       procs_source.insert(0);/* sub rank 0 will send data */
+       procs_target.insert(1);/* sub rank 1 will receive data */
+
+       MEDCoupling::CommInterface interface = MEDCoupling::CommInterface();
+       MEDCoupling::MPIProcessorGroup source_group = MEDCoupling::MPIProcessorGroup(interface, procs_source,sub_comm);
+       MEDCoupling::MPIProcessorGroup target_group = MEDCoupling::MPIProcessorGroup(interface, procs_target,sub_comm);
+       MEDCoupling::StructuredCoincidentDEC dec = MEDCoupling::StructuredCoincidentDEC(source_group, target_group);
+
+       //Create a MEDCouplingUMesh from a 2D cartesian mesh
+       MEDCoupling::DataArrayDouble * xarr=MEDCoupling::DataArrayDouble::New();
+       xarr->alloc(11,1);
+       xarr->iota(0.);
+       MEDCoupling::MEDCouplingCMesh * cmesh=MEDCoupling::MEDCouplingCMesh::New("My2D_CMesh");
+       cmesh->setCoords(xarr,xarr,xarr);
+       MEDCoupling::MEDCouplingUMesh * mesh=cmesh->buildUnstructured();
+       mesh->setName("RegularSquare");
+
+       if(sub_rank == 0)
+       {
+               field = mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)+(z-5.)*(z-5.)");
+               field->setName("SourceField");
+               MEDCoupling::WriteField("source_field"+to_string(rank)+".med", field, true);
+               printf("Processor with global rank %d has created and saved the source field\n", rank);
+       }
+       else
+       {
+               field=mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"0");
+               field->setName("TargetField");
+               printf("Processor with global rank %d has created the target field\n", rank);
+       }
+       
+       dec.attachLocalField(field);
+       dec.synchronize();
+       
+       if(sub_rank == 0)
+       {
+               dec.sendData();
+               printf("Processor with global rank %d has sent the source field\n", rank);
+       }
+       else
+       {
+               dec.recvData();
+               printf("Processor with global rank %d has received the source field on the target mesh\n", rank);
+               /* Solve the bug in StructuredCoincidentDEC then uncomment the lines below to check the result */
+               //MEDCoupling::MEDCouplingFieldDouble * exact_field=mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)+(z-5.)*(z-5.)");
+               //exact_field->setName("ExactField");
+               //double error=((*field)-(*exact_field))->normMax(0)/exact_field->normMax(0);
+               //printf("Processor with global rank %d received source field that differs from theoretical value by %d (maximum relative norm on cells)\n", rank, error );
+               //assert( fabs(error)<1.e-6 );
+               //MEDCoupling::WriteField("target_field"+to_string(rank)+".med", field, true);
+               //MEDCoupling::WriteField("exact_field"+to_string(rank)+".med", exact_field, true);     
+       }
+
+       MPI_Comm_free(&sub_comm);
+       MPI_Finalize();
+    return 0;
+}
diff --git a/CoreFlows/examples/C/TransportEquation_1DHeatedChannel_MPI.cxx b/CoreFlows/examples/C/TransportEquation_1DHeatedChannel_MPI.cxx
new file mode 100755 (executable)
index 0000000..2cd886d
--- /dev/null
@@ -0,0 +1,93 @@
+#include "TransportEquation.hxx"\r
+\r
+using namespace std;\r
+\r
+\r
+int main(int argc, char** argv)\r
+{\r
+       //Preprocessing: mesh and group creation\r
+       double xinf=0.0;\r
+       double xsup=4.2;\r
+       int nx=10;\r
+       cout << "Building a 1D mesh with "<<nx<<" cells" << endl;\r
+       Mesh M(xinf,xsup,nx);\r
+       double eps=1.E-8;\r
+       M.setGroupAtPlan(xsup,0,eps,"Neumann");\r
+       M.setGroupAtPlan(xinf,0,eps,"Inlet");\r
+       int spaceDim = M.getSpaceDimension();\r
+\r
+       // Boundary conditions\r
+       map<string, LimitFieldTransport> boundaryFields;\r
+\r
+       LimitFieldTransport limitNeumann;\r
+       limitNeumann.bcType=NeumannTransport;\r
+       boundaryFields["Neumann"] = limitNeumann;\r
+\r
+       LimitFieldTransport limitInlet;\r
+       limitInlet.bcType=InletTransport;\r
+       limitInlet.h =1.3e6;//Inlet water enthalpy\r
+       boundaryFields["Inlet"] = limitInlet;\r
+\r
+       //Set the fluid transport velocity\r
+       vector<double> transportVelocity(1,5);//fluid velocity vector\r
+\r
+       TransportEquation  myProblem(LiquidPhase,around155bars600KTransport,transportVelocity);\r
+       Field VV("Enthalpy", CELLS, M, 1);\r
+\r
+       //Set rod temperature and heat exchamge coefficient\r
+       double rodTemp=623;//Rod clad temperature\r
+       double heatTransfertCoeff=1000;//fluid/solid exchange coefficient \r
+       myProblem.setRodTemperature(rodTemp);\r
+       myProblem.setHeatTransfertCoeff(heatTransfertCoeff);\r
+\r
+       //Initial field creation\r
+       Vector VV_Constant(1);//initial enthalpy\r
+       VV_Constant(0) = 1.3e6;\r
+\r
+       cout << "Building the initial data " << endl;\r
+\r
+       // generate initial condition\r
+       myProblem.setInitialFieldConstant(M,VV_Constant);\r
+\r
+       //set the boundary conditions\r
+       myProblem.setBoundaryFields(boundaryFields);\r
+\r
+       // set the numerical method\r
+       myProblem.setTimeScheme( Explicit);\r
+\r
+       // name result file\r
+       string fileName = "1DFluidEnthalpy";\r
+\r
+       // parameters calculation\r
+       unsigned MaxNbOfTimeStep =3;\r
+       int freqSave = 1;\r
+       double cfl = 0.95;\r
+       double maxTime = 5;\r
+       double precision = 1e-6;\r
+\r
+       myProblem.setCFL(cfl);\r
+       myProblem.setPrecision(precision);\r
+       myProblem.setMaxNbOfTimeStep(MaxNbOfTimeStep);\r
+       myProblem.setTimeMax(maxTime);\r
+       myProblem.setFreqSave(freqSave);\r
+       myProblem.setFileName(fileName);\r
+\r
+       // set display option to monitor the calculation\r
+       bool computation=true;\r
+       bool system=true;\r
+       myProblem.setVerbose( computation, system);\r
+       myProblem.setSaveFileFormat(CSV);\r
+\r
+       // evolution\r
+       myProblem.initialize();\r
+       bool ok = myProblem.run();\r
+       if (ok)\r
+               cout << "Simulation "<<fileName<<" is successful !" << endl;\r
+       else\r
+               cout << "Simulation "<<fileName<<"  failed ! " << endl;\r
+\r
+       cout << "------------ End of calculation -----------" << endl;\r
+       myProblem.terminate();\r
+\r
+       return EXIT_SUCCESS;\r
+}\r