--- /dev/null
+#!/usr/bin/env python3
+# -*-coding:utf-8 -*
+
+#===============================================================================================================================
+# Name : Tests of the library mpi4py from MPI4PY tutorial
+# Author : Michaël Ndjinga
+# Copyright : CEA Saclay 2021
+# Description : https://mpi4py.readthedocs.io/en/stable/tutorial.html
+#================================================================================================================================
+
+from mpi4py import MPI
+import numpy as np
+
+# Tests from MPI4PY tutorial https://mpi4py.readthedocs.io/en/stable/tutorial.html
+
+comm = MPI.COMM_WORLD
+size = comm.Get_size()
+rank = comm.Get_rank()
+
+print("My rank is ", rank, " among ", size, "processors ")
+
+###Point-to-Point Communication
+
+#Python objects (pickle under the hood):
+
+if rank == 0:
+ data = {'a': 7, 'b': 3.14}
+ comm.send(data, dest=1, tag=11)
+elif rank == 1:
+ data = comm.recv(source=0, tag=11)
+
+#Python objects with non-blocking communication:
+
+if rank == 0:
+ data = {'a': 7, 'b': 3.14}
+ req = comm.isend(data, dest=1, tag=11)
+ req.wait()
+elif rank == 1:
+ req = comm.irecv(source=0, tag=11)
+ data = req.wait()
+
+# passing MPI datatypes explicitly
+if rank == 0:
+ data = np.arange(1000, dtype='i')
+ comm.Send([data, MPI.INT], dest=1, tag=77)
+elif rank == 1:
+ data = np.empty(1000, dtype='i')
+ comm.Recv([data, MPI.INT], source=0, tag=77)
+
+# automatic MPI datatype discovery
+if rank == 0:
+ data = np.arange(100, dtype=np.float64)
+ comm.Send(data, dest=1, tag=13)
+elif rank == 1:
+ data = np.empty(100, dtype=np.float64)
+ comm.Recv(data, source=0, tag=13)
+
+###Collective Communication
+
+#Broadcasting a Python dictionary:
+
+if rank == 0:
+ data = {'key1' : [7, 2.72, 2+3j],
+ 'key2' : ( 'abc', 'xyz')}
+else:
+ data = None
+data = comm.bcast(data, root=0)
+
+#Scattering Python objects:
+
+if rank == 0:
+ data = [(i+1)**2 for i in range(size)]
+else:
+ data = None
+data = comm.scatter(data, root=0)
+assert data == (rank+1)**2
+
+#Gathering Python objects:
+
+data = (rank+1)**2
+data = comm.gather(data, root=0)
+if rank == 0:
+ for i in range(size):
+ assert data[i] == (i+1)**2
+else:
+ assert data is None
+
+# Broadcasting a NumPy array:
+
+if rank == 0:
+ data = np.arange(100, dtype='i')
+else:
+ data = np.empty(100, dtype='i')
+comm.Bcast(data, root=0)
+for i in range(100):
+ assert data[i] == i
+
+#Scattering NumPy arrays:
+
+sendbuf = None
+if rank == 0:
+ sendbuf = np.empty([size, 100], dtype='i')
+ sendbuf.T[:,:] = range(size)
+recvbuf = np.empty(100, dtype='i')
+comm.Scatter(sendbuf, recvbuf, root=0)
+assert np.allclose(recvbuf, rank)
+
+#Gathering NumPy arrays:
+
+sendbuf = np.zeros(100, dtype='i') + rank
+recvbuf = None
+if rank == 0:
+ recvbuf = np.empty([size, 100], dtype='i')
+comm.Gather(sendbuf, recvbuf, root=0)
+if rank == 0:
+ for i in range(size):
+ assert np.allclose(recvbuf[i,:], i)
+
+#Parallel matrix-vector product:
+
+
--- /dev/null
+#!/usr/bin/env python3
+# -*-coding:utf-8 -*
+
+#===============================================================================================================================
+# Name : Tests of sending and receiving 2D MEDCoupling fields on nodes (P1) lying on different meshes between two processors
+# Author : Michaël Ndjinga
+# Copyright : CEA Saclay 2021
+# Description : Use of the parallel Data Exchange Channel InterpKernelDEC of MEDCoupling
+#================================================================================================================================
+
+from mpi4py import MPI
+import medcoupling as mc
+
+comm = MPI.COMM_WORLD
+size = comm.Get_size()
+rank = comm.Get_rank()
+
+if(size!=2):
+ raise ValueError("Processor ", rank, " : aborting.\n Simulation should done on two processors.\n", size, " processors given")
+
+print("My rank is ", rank, " among ", size, "processors")
+
+procs_source = [0]
+procs_target = [1]
+
+interface = mc.CommInterface()
+source_group = mc.MPIProcessorGroup(interface, procs_source)
+target_group = mc.MPIProcessorGroup(interface, procs_target)
+dec = mc.InterpKernelDEC(source_group, target_group)
+
+# Create a MEDCouplingUMesh from a 3D cartesian mesh
+xarr=mc.DataArrayDouble.New(11,1)
+xarr.iota(0.)
+cmesh=mc.MEDCouplingCMesh.New()
+cmesh.setCoords(xarr,xarr)
+mesh=cmesh.buildUnstructured()
+mesh.setName("RegularSquare")
+mesh.simplexize(rank)#The squares are cut in two right triangles according to one of the two possible diagonals
+
+#Create a field by application of an analytic function
+if source_group.containsMyRank():
+ field=mesh.fillFromAnalytic(mc.ON_NODES,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)")
+ field.setName("SourceField")
+ #field.setNature(mc.ExtensiveConservation)
+ field.setNature(mc.IntensiveMaximum)
+ mc.WriteField("source_field.med", field, True)
+ print("Processor ", rank, " has created and saved the source field")
+else:
+ field=mesh.fillFromAnalytic(mc.ON_NODES,1,"0")
+ field.setName("TargetField")
+ #field.setNature(mc.ExtensiveConservation)
+ field.setNature(mc.IntensiveMaximum)
+ print("Processor ", rank, " has created the target field")
+
+dec.setMethod("P1")
+dec.attachLocalField(field)
+dec.synchronize()
+
+if source_group.containsMyRank():
+ dec.sendData()
+ print("Processor ", rank, " has sent the source field")
+else:
+ dec.recvData()
+ print("Processor ", rank, " has received the source field on the target mesh")
+ exact_field=mesh.fillFromAnalytic(mc.ON_NODES,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)")
+ exact_field.setName("ExactField")
+ #Computing maximum error
+ coordsArr = mesh.getCoords()
+ values=field.getValueOnMulti(coordsArr)
+ exact_values=exact_field.getValueOnMulti(coordsArr)
+ error=(values-exact_values).normMax()/exact_values.normMax()
+ print("Processor ", rank, " received source field differs from theoretical value by less than", error, " (maximum relative norm on node values)" )
+ assert error<1.e-1
+ mc.WriteField("target_field.med", field, True)
+ mc.WriteField("exact_field.med", exact_field, True)
--- /dev/null
+#!/usr/bin/env python3
+# -*-coding:utf-8 -*
+
+#===============================================================================================================================
+# Name : Tests of sending and receiving a 3D MEDCoupling field on cells (P0) lying on the same mesh between two processors
+# Author : Michaël Ndjinga
+# Copyright : CEA Saclay 2021
+# Description : Use of the parallel Data Exchange Channel StructuredCoincidentDEC of MEDCoupling
+#================================================================================================================================
+
+from mpi4py import MPI
+import medcoupling as mc
+
+comm = MPI.COMM_WORLD
+size = comm.Get_size()
+rank = comm.Get_rank()
+
+if(size!=2):
+ raise ValueError("Processor ", rank, " : aborting.\n Simulation should done on two processors.\n", size, " processors given")
+
+print("My rank is ", rank, " among ", size, "processors")
+
+procs_source = [0]
+procs_target = [1]
+
+interface = mc.CommInterface()
+source_group = mc.MPIProcessorGroup(interface, procs_source)
+target_group = mc.MPIProcessorGroup(interface, procs_target)
+dec = mc.StructuredCoincidentDEC(source_group, target_group)
+
+# Create a MEDCouplingUMesh from a 3D cartesian mesh
+xarr=mc.DataArrayDouble.New(11,1)
+xarr.iota(0.)
+cmesh=mc.MEDCouplingCMesh.New()
+cmesh.setCoords(xarr,xarr,xarr)
+mesh=cmesh.buildUnstructured()
+mesh.setName("RegularSquare")
+
+#Create a field by application of an analytic function
+if source_group.containsMyRank():
+ field=mesh.fillFromAnalytic(mc.ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)+(z-5.)*(z-5.)")
+ field.setName("SourceField")
+ mc.WriteField("source_field.med", field, True)
+ print("Processor ", rank, " has created and saved the source field")
+else:
+ field=mesh.fillFromAnalytic(mc.ON_CELLS,1,"0")
+ field.setName("TargetField")
+ print("Processor ", rank, " has created the target field")
+
+dec.attachLocalField(field)
+dec.synchronize()
+
+if source_group.containsMyRank():
+ dec.sendData()
+ print("Processor ", rank, " has sent the source field")
+else:
+ dec.recvData()
+ print("Processor ", rank, " has received the source field on the target mesh")
+ exact_field=mesh.fillFromAnalytic(mc.ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)+(z-5.)*(z-5.)")
+ exact_field.setName("ExactField")
+ error=(field-exact_field).normL2()[0]
+ print("Processor ", rank, " received source field differs from theoretical value by ", error, " (L2 norm on cells)" )
+ assert abs(error)<1.e-6
+ mc.WriteField("target_field.med", field, True)
+ mc.WriteField("exact_field.med", exact_field, True)
--- /dev/null
+#!/usr/bin/env python3
+# -*-coding:utf-8 -*
+
+#===============================================================================================================================
+# Name : Tests of using a subcommnicator for sending and receiving a 3D MEDCoupling field on cells (P0) lying on the same mesh between two processors
+# Author : Michaël Ndjinga
+# Copyright : CEA Saclay 2021
+# Description : Use of the parallel Data Exchange Channel StructuredCoincidentDEC of MEDCoupling
+#================================================================================================================================
+
+from mpi4py import MPI
+import medcoupling as mc
+
+comm = MPI.COMM_WORLD
+size = comm.Get_size()
+rank = comm.Get_rank()
+
+if(size!=3):
+ raise ValueError("Processor ", rank, " : aborting.\n Simulation should done on three processors.\n", size, " processors given")
+
+print("My rank is ", rank, " among ", size, "processors")
+
+procs_source = [0]
+procs_target = [1]
+procs_idle = [2]
+
+interface = mc.CommInterface()
+source_group = mc.MPIProcessorGroup(interface, procs_source)
+target_group = mc.MPIProcessorGroup(interface, procs_target)
+dec = mc.StructuredCoincidentDEC(source_group, target_group)
+
+# Create a MEDCouplingUMesh from a 3D cartesian mesh
+xarr=mc.DataArrayDouble.New(11,1)
+xarr.iota(0.)
+cmesh=mc.MEDCouplingCMesh.New()
+cmesh.setCoords(xarr,xarr,xarr)
+mesh=cmesh.buildUnstructured()
+mesh.setName("RegularSquare")
+
+#Create a field by application of an analytic function
+if source_group.containsMyRank():
+ field=mesh.fillFromAnalytic(mc.ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)+(z-5.)*(z-5.)")
+ field.setName("SourceField")
+ mc.WriteField("source_field.med", field, True)
+ print("Processor ", rank, " has created and saved the source field")
+else:
+ field=mesh.fillFromAnalytic(mc.ON_CELLS,1,"0")
+ field.setName("TargetField")
+ print("Processor ", rank, " has created the target field")
+
+dec.attachLocalField(field)
+dec.synchronize()
+
+if source_group.containsMyRank():
+ dec.sendData()
+ print("Processor ", rank, " has sent the source field")
+elif target_group.containsMyRank():
+ dec.recvData()
+ print("Processor ", rank, " has received the source field on the target mesh")
+ exact_field=mesh.fillFromAnalytic(mc.ON_CELLS,1,"(x-5.)*(x-5.)+(y-5.)*(y-5.)+(z-5.)*(z-5.)")
+ exact_field.setName("ExactField")
+ error=(field-exact_field).normL2()[0]
+ print("Processor ", rank, " received source field differs from theoretical value by ", error, " (L2 norm on cells)" )
+ assert abs(error)<1.e-6
+ mc.WriteField("target_field.med", field, True)
+ mc.WriteField("exact_field.med", exact_field, True)
+else:
+ print("Processor ", rank, " did nothing" )
--- /dev/null
+#!/usr/bin/env python3
+# -*-coding:utf-8 -*
+
+#===============================================================================================================================
+# Name : Tests of launching two independent simulations in parallel
+# Author : Michaël Ndjinga
+# Copyright : CEA Saclay 2021
+# Description :
+#================================================================================================================================
+
+from mpi4py import MPI
+import numpy as np
+import solverlab
+from math import sin, pi
+
+def StationaryDiffusionEquation_2DEF_StructuredTriangles_par(split_direction, rank):
+ spaceDim = 2;
+ # Prepare for the mesh
+ print("Processor ", rank, " : Building mesh " );
+ xinf = 0 ;
+ xsup=1.0;
+ yinf=0.0;
+ ysup=1.0;
+ nx=20;
+ ny=20;
+ M=solverlab.Mesh(xinf,xsup,nx,yinf,ysup,ny,split_direction)#Regular triangular mesh
+ # set the limit field for each boundary
+ eps=1e-6;
+ M.setGroupAtPlan(xsup,0,eps,"Bord1")
+ M.setGroupAtPlan(xinf,0,eps,"Bord2")
+ M.setGroupAtPlan(ysup,1,eps,"Bord3")
+ M.setGroupAtPlan(yinf,1,eps,"Bord4")
+
+ print("Processor ", rank, " : Built a regular triangular 2D mesh from a square mesh with ", nx,"x" ,ny, " cells.")
+ print("Processor ", rank, " : Each square was split in two in direction ",split_direction)
+ FEComputation=True
+ myProblem = solverlab.StationaryDiffusionEquation(spaceDim,FEComputation);
+ myProblem.setMesh(M);
+
+ # set the limit value for each boundary
+ T1=0;
+ T2=0;
+ T3=0;
+ T4=0;
+
+ myProblem.setDirichletBoundaryCondition("Bord1",T1)
+ myProblem.setDirichletBoundaryCondition("Bord2",T2)
+ myProblem.setDirichletBoundaryCondition("Bord3",T3)
+ myProblem.setDirichletBoundaryCondition("Bord4",T4)
+
+ #Set the right hand side function
+ my_RHSfield = solverlab.Field("RHS_field", solverlab.NODES, M, 1)
+ for i in range(M.getNumberOfNodes()):
+ Ni= M.getNode(i)
+ x = Ni.x()
+ y = Ni.y()
+
+ my_RHSfield[i]=2*pi*pi*sin(pi*x)*sin(pi*y)#mettre la fonction definie au second membre de l'edp
+
+ myProblem.setHeatPowerField(my_RHSfield)
+ myProblem.setLinearSolver(solverlab.GMRES,solverlab.ILU);
+
+ # name of result file
+ fileName = "StationnaryDiffusion_2DEF_StructuredTriangles"+str(rank);
+
+ # computation parameters
+ myProblem.setFileName(fileName);
+
+ # Run the computation
+ myProblem.initialize();
+ print("Processor ", rank, " : Running python "+ fileName );
+
+ ok = myProblem.solveStationaryProblem();
+ if (not ok):
+ print( "Python simulation of " + fileName + " failed ! " );
+ pass
+ else:
+ ####################### Postprocessing #########################
+ my_ResultField = myProblem.getOutputTemperatureField()
+ #The following formulas use the fact that the exact solution is equal the right hand side divided by 2*pi*pi
+ max_abs_sol_exacte=max(my_RHSfield.max(),-my_RHSfield.min())/(2*pi*pi)
+ max_sol_num=my_ResultField.max()
+ min_sol_num=my_ResultField.min()
+ erreur_abs=0
+ for i in range(M.getNumberOfNodes()) :
+ if erreur_abs < abs(my_RHSfield[i]/(2*pi*pi) - my_ResultField[i]) :
+ erreur_abs = abs(my_RHSfield[i]/(2*pi*pi) - my_ResultField[i])
+
+ print("Processor ", rank, " : Absolute error = max(| exact solution - numerical solution |) = ",erreur_abs )
+ print("Processor ", rank, " : Relative error = max(| exact solution - numerical solution |)/max(| exact solution |) = ",erreur_abs/max_abs_sol_exacte)
+ print("Processor ", rank, " : Maximum numerical solution = ", max_sol_num, " Minimum numerical solution = ", min_sol_num)
+
+ assert erreur_abs/max_abs_sol_exacte <1.
+ pass
+
+ print("Processor ", rank, " : ------------ !!! End of calculation !!! -----------" );
+
+ myProblem.terminate();
+ return erreur_abs/max_abs_sol_exacte
+
+if __name__ == """__main__""":
+ comm = MPI.COMM_WORLD
+ size = comm.Get_size()
+ rank = comm.Get_rank()
+
+ if(size!=2):
+ raise ValueError("Processor ", rank, " : aborting.\n Simulation should done on two processors.\n", size, " processors given")
+
+ print("My rank is ", rank, " among ", size, "processors ")
+
+ my_relative_error=StationaryDiffusionEquation_2DEF_StructuredTriangles_par(rank, rank)
+
+ if rank == 0:
+ comm.send(my_relative_error, dest=1, tag=11)
+ other_relative_error = comm.recv(source=1, tag=17)
+ elif rank == 1:
+ other_relative_error = comm.recv(source=0, tag=11)
+ comm.send(my_relative_error, dest=0, tag=17)
+
+ print("Processor ", rank, " : Difference between the two processor relative errors is ", abs(my_relative_error-other_relative_error) )
+ #print("Processor ", rank, " : Difference between the two processors is ", (my_ResultField-other_ResultField).normMax()[0] )
+