From: michael Date: Fri, 19 Nov 2021 12:29:43 +0000 (+0100) Subject: Added C MPI tests X-Git-Tag: V9_8_0~42 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=92f5029a3f44269125d9ab0b4c84c8e9a1752a9a;p=tools%2Fsolverlab.git Added C MPI tests --- diff --git a/CoreFlows/examples/C/CMakeLists.txt b/CoreFlows/examples/C/CMakeLists.txt index 1b27de3..e92764d 100755 --- a/CoreFlows/examples/C/CMakeLists.txt +++ b/CoreFlows/examples/C/CMakeLists.txt @@ -108,25 +108,58 @@ CreateTestExecAndInstall(IsothermalTwoFluid_2DInclinedSedimentation.cxx "${libs CreateTestExecAndInstall(IsothermalTwoFluid_2DVidangeReservoir.cxx "${libs_for_tests}" ) - add_executable(WaveSystem_FV_SphericalExplosion_CDMATH.exe WaveSystem_FV_SphericalExplosion_CDMATH.cxx) - target_link_libraries(WaveSystem_FV_SphericalExplosion_CDMATH.exe CoreFlowsLibs ) - install(TARGETS WaveSystem_FV_SphericalExplosion_CDMATH.exe DESTINATION share/examples) - add_test(NAME WaveSystem_2DFV_SphericalExplosion_CDMATH_SQUARE COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe") - add_test(NAME WaveSystem_2DFV_SphericalExplosion_CDMATH_HEXAGON COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshHexagonWithTriangles10.med) - add_test(NAME WaveSystem_3DFV_SphericalExplosion_CDMATH_CUBE COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshCube.med) - add_test(NAME WaveSystem_3DFV_SphericalExplosion_CDMATH_TETRAHEDRON COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshTetrahedron10.med) - -if( SOLVERLAB_WITH_MPI ) +add_executable(WaveSystem_FV_SphericalExplosion_CDMATH.exe WaveSystem_FV_SphericalExplosion_CDMATH.cxx) +target_link_libraries(WaveSystem_FV_SphericalExplosion_CDMATH.exe CoreFlowsLibs ) +install(TARGETS WaveSystem_FV_SphericalExplosion_CDMATH.exe DESTINATION share/examples) + +add_test(NAME WaveSystem_2DFV_SphericalExplosion_CDMATH_SQUARE COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe") +add_test(NAME WaveSystem_2DFV_SphericalExplosion_CDMATH_HEXAGON COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshHexagonWithTriangles10.med) +add_test(NAME WaveSystem_3DFV_SphericalExplosion_CDMATH_CUBE COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshCube.med) +add_test(NAME WaveSystem_3DFV_SphericalExplosion_CDMATH_TETRAHEDRON COMMAND "./WaveSystem_FV_SphericalExplosion_CDMATH.exe" resources/meshTetrahedron10.med) + +if( SOLVERLAB_WITH_MPI )#Tests parallèles + add_executable(MEDCouplingSendRecvFieldSameMesh_MPI.exe MEDCouplingSendRecvFieldSameMesh_MPI.cxx) + target_link_libraries(MEDCouplingSendRecvFieldSameMesh_MPI.exe ${MPI_LIBRARY} medcoupling medloader paramedmem) + install(TARGETS MEDCouplingSendRecvFieldSameMesh_MPI.exe DESTINATION share/examples) + + add_test(NAME MEDCouplingSendRecvFieldSameMesh_MPI_4Procs.exe COMMAND "${MPIEXEC}" "-n" "4" "./MEDCouplingSendRecvFieldSameMesh_MPI.exe" ) + + add_executable(MEDCouplingSendRecvFieldDifferentMeshes_MPI.exe MEDCouplingSendRecvFieldDifferentMeshes_MPI.cxx) + target_link_libraries(MEDCouplingSendRecvFieldDifferentMeshes_MPI.exe ${MPI_LIBRARY} medcoupling medloader paramedmem) + install(TARGETS MEDCouplingSendRecvFieldDifferentMeshes_MPI.exe DESTINATION share/examples) + + add_test(NAME MEDCouplingSendRecvFieldDifferentMeshes_MPI_4Procs.exe COMMAND "${MPIEXEC}" "-n" "4" "./MEDCouplingSendRecvFieldDifferentMeshes_MPI.exe" ) + + add_executable(DiffusionEquation_1DHeatedRod_FE_MPI.exe DiffusionEquation_1DHeatedRod_FE_MPI.cxx) # compilation of the testxxx.exe + target_link_libraries(DiffusionEquation_1DHeatedRod_FE_MPI.exe CoreFlowsLibs ${MPI_LIBRARY}) # provide required lib for testxxx.exe + install(TARGETS DiffusionEquation_1DHeatedRod_FE_MPI.exe DESTINATION share/examples) + + add_test(NAME DiffusionEquation_1DHeatedRod_FE_MPI_2Procs.exe COMMAND "${MPIEXEC}" "-n" "2" "./DiffusionEquation_1DHeatedRod_FE_MPI.exe") + + add_executable(DiffusionEquation_1DHeatedRod_FV_MPI.exe DiffusionEquation_1DHeatedRod_FV_MPI.cxx) # compilation of the testxxx.exe + target_link_libraries(DiffusionEquation_1DHeatedRod_FV_MPI.exe CoreFlowsLibs ${MPI_LIBRARY}) # provide required lib for testxxx.exe + install(TARGETS DiffusionEquation_1DHeatedRod_FV_MPI.exe DESTINATION share/examples) + + add_test(NAME DiffusionEquation_1DHeatedRod_FV_MPI_2Procs.exe COMMAND "${MPIEXEC}" "-n" "2" "./DiffusionEquation_1DHeatedRod_FV_MPI.exe") + + add_executable(TransportEquation_1DHeatedChannel_MPI.exe TransportEquation_1DHeatedChannel_MPI.cxx) # compilation of the testxxx.exe + target_link_libraries(TransportEquation_1DHeatedChannel_MPI.exe CoreFlowsLibs ${MPI_LIBRARY}) # provide required lib for testxxx.exe + install(TARGETS TransportEquation_1DHeatedChannel_MPI.exe DESTINATION share/examples) + + add_test(NAME TransportEquation_1DHeatedChannel_MPI_2Procs.exe COMMAND "${MPIEXEC}" "-n" "2" "./TransportEquation_1DHeatedChannel_MPI.exe") + add_executable(WaveSystem_FV_SphericalExplosion_MPI.exe WaveSystem_FV_SphericalExplosion_MPI.cxx) # compilation of the testxxx.exe - target_link_libraries(WaveSystem_FV_SphericalExplosion_MPI.exe CoreFlowsLibs ${MPI_LIBRARY}) # provide required lib for testxxx.exe + target_link_libraries(WaveSystem_FV_SphericalExplosion_MPI.exe CoreFlowsLibs ${MPI_LIBRARY}) # provide required lib for testxxx.exe install(TARGETS WaveSystem_FV_SphericalExplosion_MPI.exe DESTINATION share/examples) - add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_SEQ_SQUARE COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe") - add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_SEQ_HEXAGON COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshHexagonWithTriangles10.med) - add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_SEQ_CUBE COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshCube.med) - add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_SEQ_TETRAHEDRON COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshTetrahedron10.med) - add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_PAR_SQUARE COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe") - add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_PAR_HEXAGON COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshHexagonWithTriangles10.med) - add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_PAR_CUBE COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshCube.med) - add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_PAR_TETRAHEDRON COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshTetrahedron10.med) + + add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_1Proc_SQUARE.exe COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe") + add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_1Proc_HEXAGON.exe COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshHexagonWithTriangles10.med) + add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_1Proc_CUBE.exe COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshCube.med) + add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_1Proc_TETRAHEDRON.exe COMMAND "${MPIEXEC}" "-n" "1" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshTetrahedron10.med) + add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_2Procs_SQUARE.exe COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe") + add_test(NAME WaveSystem_2DFV_SphericalExplosion_MPI_2Procs_HEXAGON.exe COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshHexagonWithTriangles10.med) + add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_2Procs_CUBE.exe COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshCube.med) + add_test(NAME WaveSystem_3DFV_SphericalExplosion_MPI_2Procs_TETRAHEDRON.exe COMMAND "${MPIEXEC}" "-n" "2" "./WaveSystem_FV_SphericalExplosion_MPI.exe" resources/meshTetrahedron10.med) + endif( SOLVERLAB_WITH_MPI ) diff --git a/CoreFlows/examples/C/DiffusionEquation_1DHeatedRod_FE_MPI.cxx b/CoreFlows/examples/C/DiffusionEquation_1DHeatedRod_FE_MPI.cxx new file mode 100755 index 0000000..ddb8a4c --- /dev/null +++ b/CoreFlows/examples/C/DiffusionEquation_1DHeatedRod_FE_MPI.cxx @@ -0,0 +1,101 @@ +#include "DiffusionEquation.hxx" + +using namespace std; + +#define PI 3.14159265 + +void power_field_diffusionTest(Field & Phi){ + double L=4.2; + double lambda=0.2; + double phi=1e5; + double x; + Mesh M = Phi.getMesh(); + int nbNodes = M.getNumberOfNodes(); + for (int j = 0; j < nbNodes; j++) { + x=M.getNode(j).x(); + Phi(j) = phi*cos(PI*(x-L/2)/(L+lambda)); + } +} + +int main(int argc, char** argv) +{ + //Preprocessing: mesh and group creation + double xinf=0.0; + double xsup=4.2; + int nx=10; + cout << "Building of a 1D mesh with "< +#include +#include + +#include "InterpKernelDEC.hxx" +#include "MPIProcessorGroup.hxx" +#include "MEDCouplingFieldDouble.hxx" +#include "MEDCouplingCMesh.hxx" +#include "MEDCouplingUMesh.hxx" +#include "MEDLoader.hxx" + +#include +#include + +using namespace std; + + +int main(int argc, char *argv[]) +{ + /* PETSc initialisation */ + MPI_Init(&argc, &argv); + int size; /* size of communicator */ + int rank; /* processor rank */ + int sub_rank, sub_size;/* rank in subcommunicator */ + int color;/* tells if I belong to the sub_communicator */ + MPI_Comm sub_comm ;/*subcommunicator will be used in exchanges */ + std::set procs_source, procs_target;/* group of procs that will send or receive data */ + MEDCoupling::MEDCouplingFieldDouble * field;/*field used to send or receive data */ + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + MPI_Comm_size(MPI_COMM_WORLD,&size); + + if(size!=4) + printf("Processor %d : aborting.\n Simulation should done on four processors.\n %d processors given\n",rank,size); + + color=rank/2; + + printf("My rank is %d among %d processors, my color is %d\n",rank, size,color); + + MPI_Comm_split(MPI_COMM_WORLD, color, rank, &sub_comm); /* two groups (0,1) and (2,3) */ + MPI_Comm_rank(sub_comm, &sub_rank); + MPI_Comm_size(sub_comm, &sub_size); + + printf("WORLD RANK/SIZE: %d/%d \t subcommunicator RANK/SIZE: %d/%d\n", rank, size, sub_rank, sub_size); + + + procs_source.insert(0);/* sub rank 0 will send data */ + procs_target.insert(1);/* sub rank 1 will receive data */ + + MEDCoupling::CommInterface interface = MEDCoupling::CommInterface(); + MEDCoupling::MPIProcessorGroup source_group = MEDCoupling::MPIProcessorGroup(interface, procs_source,sub_comm); + MEDCoupling::MPIProcessorGroup target_group = MEDCoupling::MPIProcessorGroup(interface, procs_target,sub_comm); + MEDCoupling::InterpKernelDEC dec = MEDCoupling::InterpKernelDEC(source_group, target_group); + + //Create a MEDCouplingUMesh from a 3D cartesian mesh + MEDCoupling::DataArrayDouble * xarr=MEDCoupling::DataArrayDouble::New(); + xarr->alloc(11,1); + xarr->iota(0.); + MEDCoupling::MEDCouplingCMesh * cmesh=MEDCoupling::MEDCouplingCMesh::New("My2D_CMesh"); + cmesh->setCoords(xarr,xarr); + MEDCoupling::MEDCouplingUMesh * mesh=cmesh->buildUnstructured(); + mesh->setName("RegularSquare"); + mesh->simplexize(sub_rank);//The squares are cut in two right triangles according to one of the two possible diagonals + + if(sub_rank == 0) + { + field = mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)"); + field->setName("SourceField"); + field->setNature(MEDCoupling::IntensiveMaximum); + MEDCoupling::WriteField("source_field"+to_string(rank)+".med", field, true); + printf("Processor with global rank %d has created and saved the source field\n", rank); + } + else + { + field=mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"0"); + field->setName("TargetField"); + field->setNature(MEDCoupling::IntensiveMaximum); + printf("Processor with global rank %d has created the target field\n", rank); + } + + dec.attachLocalField(field); + dec.synchronize(); + + if(sub_rank == 0) + { + dec.sendData(); + printf("Processor with global rank %d has sent the source field\n", rank); + } + else + { + dec.recvData(); + printf("Processor with global rank %d has received the source field on the target mesh\n", rank); + MEDCoupling::MEDCouplingFieldDouble * exact_field=mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)"); + exact_field->setName("ExactField"); + //To do : compare target and exact field (maximul value etc ... + //double error=((*field)-(*exact_field))->normMax(0)/exact_field->normMax(0); + //printf("Processor with global rank %d received source field that differs from theoretical value by %d (maximum relative norm on cells)\n", rank, error ); + //assert( fabs(error)<1.e-6 ); + MEDCoupling::WriteField("target_field"+to_string(rank)+".med", field, true); + MEDCoupling::WriteField("exact_field"+to_string(rank)+".med", exact_field, true); + } + + MPI_Comm_free(&sub_comm); + MPI_Finalize(); + return 0; +} diff --git a/CoreFlows/examples/C/MEDCouplingSendRecvFieldSameMesh_MPI.cxx b/CoreFlows/examples/C/MEDCouplingSendRecvFieldSameMesh_MPI.cxx new file mode 100644 index 0000000..b6ba1e6 --- /dev/null +++ b/CoreFlows/examples/C/MEDCouplingSendRecvFieldSameMesh_MPI.cxx @@ -0,0 +1,109 @@ +//============================================================================ +// Name : Tests of using a subcommnicator for sending and receiving a 3D MEDCoupling field on cells (P0) lying on the same mesh between two groups of two processors +// Author : Michael NDJINGA +// Date : November 2021 +// Description : Use of the parallel Data Exchange Channel StructuredCoincidentDEC of MEDCoupling +//============================================================================ + +#include +#include +#include + +#include "StructuredCoincidentDEC.hxx" +#include "MPIProcessorGroup.hxx" +#include "MEDCouplingFieldDouble.hxx" +#include "MEDCouplingCMesh.hxx" +#include "MEDCouplingUMesh.hxx" +#include "MEDLoader.hxx" + +#include +#include + +using namespace std; + + +int main(int argc, char *argv[]) +{ + /* PETSc initialisation */ + MPI_Init(&argc, &argv); + int size; /* size of communicator */ + int rank; /* processor rank */ + int sub_rank, sub_size;/* rank in subcommunicator */ + int color;/* tells if I belong to the sub_communicator */ + MPI_Comm sub_comm ;/*subcommunicator will be used in exchanges */ + std::set procs_source, procs_target;/* group of procs that will send or receive data */ + MEDCoupling::MEDCouplingFieldDouble * field;/*field used to send or receive data */ + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + MPI_Comm_size(MPI_COMM_WORLD,&size); + + if(size!=4) + printf("Processor %d : aborting.\n Simulation should done on four processors.\n %d processors given\n",rank,size); + + color=rank/2; + + printf("My rank is %d among %d processors, my color is %d\n",rank, size,color); + + MPI_Comm_split(MPI_COMM_WORLD, color, rank, &sub_comm); /* two groups (0,1) and (2,3) */ + MPI_Comm_rank(sub_comm, &sub_rank); + MPI_Comm_size(sub_comm, &sub_size); + + printf("WORLD RANK/SIZE: %d/%d \t subcommunicator RANK/SIZE: %d/%d\n", rank, size, sub_rank, sub_size); + + + procs_source.insert(0);/* sub rank 0 will send data */ + procs_target.insert(1);/* sub rank 1 will receive data */ + + MEDCoupling::CommInterface interface = MEDCoupling::CommInterface(); + MEDCoupling::MPIProcessorGroup source_group = MEDCoupling::MPIProcessorGroup(interface, procs_source,sub_comm); + MEDCoupling::MPIProcessorGroup target_group = MEDCoupling::MPIProcessorGroup(interface, procs_target,sub_comm); + MEDCoupling::StructuredCoincidentDEC dec = MEDCoupling::StructuredCoincidentDEC(source_group, target_group); + + //Create a MEDCouplingUMesh from a 2D cartesian mesh + MEDCoupling::DataArrayDouble * xarr=MEDCoupling::DataArrayDouble::New(); + xarr->alloc(11,1); + xarr->iota(0.); + MEDCoupling::MEDCouplingCMesh * cmesh=MEDCoupling::MEDCouplingCMesh::New("My2D_CMesh"); + cmesh->setCoords(xarr,xarr,xarr); + MEDCoupling::MEDCouplingUMesh * mesh=cmesh->buildUnstructured(); + mesh->setName("RegularSquare"); + + if(sub_rank == 0) + { + field = mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)+(z-5.)*(z-5.)"); + field->setName("SourceField"); + MEDCoupling::WriteField("source_field"+to_string(rank)+".med", field, true); + printf("Processor with global rank %d has created and saved the source field\n", rank); + } + else + { + field=mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"0"); + field->setName("TargetField"); + printf("Processor with global rank %d has created the target field\n", rank); + } + + dec.attachLocalField(field); + dec.synchronize(); + + if(sub_rank == 0) + { + dec.sendData(); + printf("Processor with global rank %d has sent the source field\n", rank); + } + else + { + dec.recvData(); + printf("Processor with global rank %d has received the source field on the target mesh\n", rank); + /* Solve the bug in StructuredCoincidentDEC then uncomment the lines below to check the result */ + //MEDCoupling::MEDCouplingFieldDouble * exact_field=mesh->fillFromAnalytic(MEDCoupling::ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)+(z-5.)*(z-5.)"); + //exact_field->setName("ExactField"); + //double error=((*field)-(*exact_field))->normMax(0)/exact_field->normMax(0); + //printf("Processor with global rank %d received source field that differs from theoretical value by %d (maximum relative norm on cells)\n", rank, error ); + //assert( fabs(error)<1.e-6 ); + //MEDCoupling::WriteField("target_field"+to_string(rank)+".med", field, true); + //MEDCoupling::WriteField("exact_field"+to_string(rank)+".med", exact_field, true); + } + + MPI_Comm_free(&sub_comm); + MPI_Finalize(); + return 0; +} diff --git a/CoreFlows/examples/C/TransportEquation_1DHeatedChannel_MPI.cxx b/CoreFlows/examples/C/TransportEquation_1DHeatedChannel_MPI.cxx new file mode 100755 index 0000000..2cd886d --- /dev/null +++ b/CoreFlows/examples/C/TransportEquation_1DHeatedChannel_MPI.cxx @@ -0,0 +1,93 @@ +#include "TransportEquation.hxx" + +using namespace std; + + +int main(int argc, char** argv) +{ + //Preprocessing: mesh and group creation + double xinf=0.0; + double xsup=4.2; + int nx=10; + cout << "Building a 1D mesh with "< boundaryFields; + + LimitFieldTransport limitNeumann; + limitNeumann.bcType=NeumannTransport; + boundaryFields["Neumann"] = limitNeumann; + + LimitFieldTransport limitInlet; + limitInlet.bcType=InletTransport; + limitInlet.h =1.3e6;//Inlet water enthalpy + boundaryFields["Inlet"] = limitInlet; + + //Set the fluid transport velocity + vector transportVelocity(1,5);//fluid velocity vector + + TransportEquation myProblem(LiquidPhase,around155bars600KTransport,transportVelocity); + Field VV("Enthalpy", CELLS, M, 1); + + //Set rod temperature and heat exchamge coefficient + double rodTemp=623;//Rod clad temperature + double heatTransfertCoeff=1000;//fluid/solid exchange coefficient + myProblem.setRodTemperature(rodTemp); + myProblem.setHeatTransfertCoeff(heatTransfertCoeff); + + //Initial field creation + Vector VV_Constant(1);//initial enthalpy + VV_Constant(0) = 1.3e6; + + cout << "Building the initial data " << endl; + + // generate initial condition + myProblem.setInitialFieldConstant(M,VV_Constant); + + //set the boundary conditions + myProblem.setBoundaryFields(boundaryFields); + + // set the numerical method + myProblem.setTimeScheme( Explicit); + + // name result file + string fileName = "1DFluidEnthalpy"; + + // parameters calculation + unsigned MaxNbOfTimeStep =3; + int freqSave = 1; + double cfl = 0.95; + double maxTime = 5; + double precision = 1e-6; + + myProblem.setCFL(cfl); + myProblem.setPrecision(precision); + myProblem.setMaxNbOfTimeStep(MaxNbOfTimeStep); + myProblem.setTimeMax(maxTime); + myProblem.setFreqSave(freqSave); + myProblem.setFileName(fileName); + + // set display option to monitor the calculation + bool computation=true; + bool system=true; + myProblem.setVerbose( computation, system); + myProblem.setSaveFileFormat(CSV); + + // evolution + myProblem.initialize(); + bool ok = myProblem.run(); + if (ok) + cout << "Simulation "<