1 //============================================================================
2 // Author : Michael NDJINGA
3 // Date : November 2020
4 // Description : multiD linear wave system run in parallel with calls to MPI
5 // Test used in the MPI version of the Salome platform
6 //============================================================================
16 #include "CdmathException.hxx"
22 double p0 =155e5; //reference pressure in a pressurised nuclear vessel
23 double c0 =700.; //reference sound speed for water at 155 bars, 600K
24 double rho0=p0/c0*c0;//reference density
25 double precision=1e-5;
27 void initial_conditions_shock(Mesh my_mesh,Field& pressure_field,Field& velocity_field)
35 double val, valX, valY, valZ;
37 int dim =my_mesh.getMeshDimension();
38 int nbCells=my_mesh.getNumberOfCells();
40 for (int j=0 ; j<nbCells ; j++)
42 x = my_mesh.getCell(j).x() ;
45 y = my_mesh.getCell(j).y() ;
47 z = my_mesh.getCell(j).z() ;
50 valX=(x-xcentre)*(x-xcentre);
55 valY=(y-ycentre)*(y-ycentre);
60 valY=(y-ycentre)*(y-ycentre);
61 valZ=(z-zcentre)*(z-zcentre);
62 val=sqrt(valX+valY+valZ);
65 for(int idim=0; idim<dim; idim++)
66 velocity_field[j,idim]=0;
69 pressure_field(j) = 155e5;
71 pressure_field(j) = 70e5;
75 void addValue( int i, int j, Matrix M, Mat * mat )
78 for (int k=0; k<M.getNumberOfRows(); k++)
79 for (int l=0; l<M.getNumberOfColumns(); l++)
83 MatSetValues( *mat,1, &I, 1, &J, &M(k,l), ADD_VALUES);
87 Matrix jacobianMatrices(Vector normal, double coeff)
89 int dim=normal.size();
90 Matrix A(dim+1,dim+1);
91 Matrix absA(dim+1,dim+1);
94 for(int i=0 ; i<dim; i++)
96 A(i+1,0)= normal[i]*coeff;
97 A(0,i+1)=c0*c0*normal[i]*coeff;
98 for( int j=0 ; j<dim; j++)
99 absA(i+1,j+1)=c0*normal[i]*normal[j]*coeff;
101 return (A - absA)*(1./2);
104 void computeDivergenceMatrix(Mesh my_mesh, Mat * implMat, double dt)
106 int nbCells = my_mesh.getNumberOfCells();
107 int dim=my_mesh.getMeshDimension();
111 Matrix idMoinsJacCL(nbComp);
113 for(int j=0; j<nbCells; j++)//On parcourt les cellules
115 Cell Cj = my_mesh.getCell(j);
116 int nbFaces = Cj.getNumberOfFaces();
118 for(int k=0; k<nbFaces; k++)
120 int indexFace = Cj.getFacesId()[k];
121 Face Fk = my_mesh.getFace(indexFace);
122 for(int i =0; i<dim ; i++)
123 normal[i] = Cj.getNormalVector(k, i);//normale sortante
125 Matrix Am=jacobianMatrices( normal,dt*Fk.getMeasure()/Cj.getMeasure());
128 if ( not Fk.isBorder())
130 /* hypothese: La cellule d'index indexC1 est la cellule courante index j */
131 if (Fk.getCellsId()[0] == j)
132 // hypothese verifiée
133 cellAutre = Fk.getCellsId()[1];
134 else if(Fk.getCellsId()[1] == j)
135 // hypothese non verifiée
136 cellAutre = Fk.getCellsId()[0];
138 throw CdmathException("computeDivergenceMatrix: problem with mesh, unknown cell number");
140 addValue(j*nbComp,cellAutre*nbComp,Am ,implMat);
141 addValue(j*nbComp, j*nbComp,Am*(-1.),implMat);
145 if( Fk.getGroupName() != "Periodic" && Fk.getGroupName() != "Neumann")//Wall boundary condition unless Periodic/Neumann specified explicitly
148 for(int i=0; i<dim; i++)
150 Matrix idMoinsJacCL=v.tensProduct(v)*2;
152 addValue(j*nbComp,j*nbComp,Am*(-1.)*idMoinsJacCL,implMat);
154 else if( Fk.getGroupName() == "Periodic")//Periodic boundary condition
156 int indexFP=my_mesh.getIndexFacePeriodic(indexFace);
157 Face Fp = my_mesh.getFace(indexFP);
158 cellAutre = Fp.getCellsId()[0];
160 addValue(j*nbComp,cellAutre*nbComp,Am ,implMat);
161 addValue(j*nbComp, j*nbComp,Am*(-1.),implMat);
163 else if(Fk.getGroupName() != "Neumann")//Nothing to do for Neumann boundary condition
165 cout<< Fk.getGroupName() <<endl;
166 throw CdmathException("computeDivergenceMatrix: Unknown boundary condition name");
173 void WaveSystem(double tmax, int ntmax, double cfl, int output_freq, const Mesh& my_mesh, const string file, int rank, int size, string resultDirectory)
175 /* Time iteration variables */
177 bool isStationary=false;
182 /* PETSc variables */
183 int globalNbUnknowns;
191 /* Mesh parameters managed only by proc 0 */
195 int idx;//Index where to add the vector values
196 double value;//value to add in the vector
197 Field pressure_field, velocity_field;
198 std::string meshName;
201 globalNbUnknowns=my_mesh.getNumberOfCells()*(my_mesh.getMeshDimension()+1);//nbCells*nbComp
203 MPI_Bcast(&globalNbUnknowns, 1, MPI_INT, 0, MPI_COMM_WORLD);
205 /* iteration vectors */
206 VecCreateMPI(PETSC_COMM_WORLD,PETSC_DECIDE ,globalNbUnknowns,&Un);
207 VecDuplicate (Un,&dUn);
209 VecCreateSeq(PETSC_COMM_SELF,globalNbUnknowns,&Un_seq);//For saving results on proc 0
211 VecScatterCreateToZero(Un,&scat,&Un_seq);
214 VecGetLocalSize(Un, &localNbUnknowns);
218 int nbVoisinsMax = my_mesh.getMaxNbNeighbours(CELLS);
219 d_nnz=(nbVoisinsMax+1)*(my_mesh.getMeshDimension()+1);//(nbVoisinsMax+1)*nbComp
220 o_nnz= nbVoisinsMax *(my_mesh.getMeshDimension()+1);// nbComp
222 MPI_Bcast(&d_nnz, 1, MPI_INT, 0, MPI_COMM_WORLD);
223 MPI_Bcast(&o_nnz, 1, MPI_INT, 0, MPI_COMM_WORLD);
225 MatCreateAIJ(PETSC_COMM_WORLD,localNbUnknowns,localNbUnknowns,globalNbUnknowns,globalNbUnknowns,d_nnz,NULL,o_nnz,NULL,&divMat);
229 /* Retrieve mesh data */
230 nbCells = my_mesh.getNumberOfCells();
231 dim=my_mesh.getMeshDimension();
233 meshName=my_mesh.getName();
234 double dx_min=my_mesh.minRatioVolSurf();
235 dt = cfl * dx_min / c0;
237 /* Initial conditions */
238 cout<<"Building the initial condition on processor 0" << endl;
240 pressure_field=Field("Pressure",CELLS,my_mesh,1) ;
241 velocity_field=Field("Velocity",CELLS,my_mesh,dim) ;
242 initial_conditions_shock(my_mesh,pressure_field, velocity_field);
244 cout << "Saving the solution at T=" << time <<" on processor 0"<<endl;
245 pressure_field.setTime(time,it);
246 pressure_field.writeMED(resultDirectory+"/WaveSystem"+to_string(dim)+"DUpwind_"+to_string(size)+"Procs_"+meshName+"_pressure");
247 velocity_field.setTime(time,it);
248 velocity_field.writeMED(resultDirectory+"/WaveSystem"+to_string(dim)+"DUpwind_"+to_string(size)+"Procs_"+meshName+"_velocity");
249 /* --------------------------------------------- */
252 for(int k =0; k<nbCells; k++)
255 value=pressure_field[k];//vale to add in the vector
256 VecSetValues(Un,1,&idx,&value,INSERT_VALUES);
257 for(int idim =0; idim<dim; idim++)
259 idx = k*nbComp+1+idim;
260 value =rho0*velocity_field[k,idim];
261 VecSetValues(Un,1,&idx,&value,INSERT_VALUES);
264 computeDivergenceMatrix(my_mesh,&divMat,dt);
267 VecAssemblyBegin(Un);
270 //VecView(Un, PETSC_VIEWER_STDOUT_WORLD );
272 MatAssemblyBegin(divMat, MAT_FINAL_ASSEMBLY);
273 MatAssemblyEnd( divMat, MAT_FINAL_ASSEMBLY);
275 //MatView(divMat, PETSC_VIEWER_STDOUT_WORLD );
277 MPI_Bcast(&dt, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
280 PetscPrintf(PETSC_COMM_WORLD,"Starting computation of the linear wave system on all processors : \n\n");
281 while (it<ntmax && time <= tmax && !isStationary)
283 MatMult(divMat,Un,dUn);
289 VecNorm(dUn,NORM_2,&norm);
290 isStationary = norm<precision;
292 if(it%output_freq==0 or it>=ntmax or isStationary or time >=tmax )
294 PetscPrintf(PETSC_COMM_WORLD,"-- Iteration: %d, Time: %f, dt: %f, saving results on processor 0 \n", it, time, dt);
295 VecScatterBegin(scat,Un,Un_seq,INSERT_VALUES,SCATTER_FORWARD);
296 VecScatterEnd( scat,Un,Un_seq,INSERT_VALUES,SCATTER_FORWARD);
300 for(int k=0; k<nbCells; k++)
303 VecGetValues(Un_seq,1,&idx,&value);
304 pressure_field[k] =value;
305 for(int idim =0; idim<dim; idim++)
307 idx = k*nbComp+1+idim;
308 VecGetValues(Un_seq,1,&idx,&value);
309 velocity_field[k,idim] = value/rho0;
312 pressure_field.setTime(time,it);
313 pressure_field.writeMED(resultDirectory+"/WaveSystem"+to_string(dim)+"DUpwind_"+to_string(size)+"Procs_"+meshName+"_pressure",false);
314 velocity_field.setTime(time,it);
315 velocity_field.writeMED(resultDirectory+"/WaveSystem"+to_string(dim)+"DUpwind_"+to_string(size)+"Procs_"+meshName+"_velocity",false);
320 PetscPrintf(PETSC_COMM_WORLD,"\n End of calculations at iteration: %d, and time: %f\n", it, time);
322 PetscPrintf(PETSC_COMM_WORLD, "Nombre de pas de temps maximum ntmax= %d atteint\n", ntmax);
323 else if(isStationary)
324 PetscPrintf(PETSC_COMM_WORLD, "Régime stationnaire atteint au pas de temps %d, t= %f\n", it, time);
326 PetscPrintf(PETSC_COMM_WORLD, "Temps maximum tmax= %f atteint\n", tmax);
334 int main(int argc, char *argv[])
336 /* PETSc initialisation */
337 PetscInitialize(&argc, &argv, PETSC_NULL, PETSC_NULL);
338 PetscMPIInt size; /* size of communicator */
339 PetscMPIInt rank; /* processor rank */
340 MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
341 MPI_Comm_size(PETSC_COMM_WORLD,&size);
347 string fileOutPut="SphericalWave";
349 string resultDirectory="./";
352 PetscPrintf(PETSC_COMM_WORLD,"---- More than one processor detected : running a parallel simulation ----\n");
353 PetscPrintf(PETSC_COMM_WORLD,"---- Limited parallelism : input and output remain sequential ----\n");
354 PetscPrintf(PETSC_COMM_WORLD,"---- Only the matrix-vector products are done in parallel ----\n");
355 PetscPrintf(PETSC_COMM_WORLD,"---- Processor 0 is in charge of building the mesh, saving the results, filling and then distributing the matrix to other processors.\n\n");
359 cout << "-- Starting the RESOLUTION OF THE 2D WAVE SYSTEM on "<< size <<" processors"<<endl;
360 cout << "- Numerical scheme : Upwind explicit scheme" << endl;
361 cout << "- Boundary conditions : WALL" << endl;
363 /* Read or create mesh */
366 cout << "- DOMAIN : SQUARE" << endl;
367 cout << "- MESH : CARTESIAN, GENERATED INTERNALLY WITH CDMATH" << endl<< endl;
368 cout << "Construction of a cartesian mesh on processor 0" << endl;
375 myMesh=Mesh(xinf,xsup,nx,yinf,ysup,ny);
377 double eps=precision;
378 myMesh.setGroupAtPlan(xsup,0,eps,"RightEdge");
379 myMesh.setGroupAtPlan(xinf,0,eps,"LeftEdge");
380 myMesh.setGroupAtPlan(yinf,1,eps,"BottomEdge");
381 myMesh.setGroupAtPlan(ysup,1,eps,"TopEdge");
385 cout << "- MESH: GENERATED EXTERNALLY WITH SALOME" << endl<< endl;
386 cout << "Loading of a mesh named "<<argv[1]<<" on processor 0" << endl;
387 string filename = argv[1];
388 myMesh=Mesh(filename);
391 /*Detect directory where to same results*/
393 resultDirectory = argv[2];
396 double cfl=1./myMesh.getSpaceDimension();
397 WaveSystem(tmax,ntmax,cfl,freqSortie,myMesh,fileOutPut, rank, size, resultDirectory);
400 cout << "Simulation complete." << endl;