1 // Copyright (C) 2005 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
2 // CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License.
9 // This library is distributed in the hope that it will be useful
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 %module libParaMEDMEM_Swig
22 %include "libParaMEDMEM_Swig.typemap"
23 %include "../MEDMEM_SWIG/libMEDMEM_Swig.i"
26 #include <CommInterface.hxx>
27 #include <ProcessorGroup.hxx>
28 #include <Topology.hxx>
29 #include <MPIProcessorGroup.hxx>
31 #include <IntersectionDEC.hxx>
32 #include <NonCoincidentDEC.hxx>
33 #include <StructuredCoincidentDEC.hxx>
34 #include <ParaMESH.hxx>
35 #include <UnstructuredParaSUPPORT.hxx>
36 #include <StructuredParaSUPPORT.hxx>
37 #include <ParaFIELD.hxx>
38 #include <ICoCoMEDField.hxx>
42 using namespace ParaMEDMEM;
43 using namespace ICoCo;
45 enum mpi_constants { mpi_comm_world, mpi_comm_self, mpi_double, mpi_int };
55 class ProcessorGroup {
58 // avoid error when SWIG creates a default constructor of an abstract class
59 ProcessorGroup() { return NULL; }
63 class MPIProcessorGroup: public ProcessorGroup {
65 MPIProcessorGroup(const CommInterface& interface);
66 MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids);
67 MPIProcessorGroup(const CommInterface& interface,int pstart, int pend);
69 int translateRank(const ProcessorGroup* group, int rank) const;
70 ProcessorGroup* createComplementProcGroup() const;
71 ProcessorGroup* fuse (const ProcessorGroup&) const;
73 bool containsMyRank() const;
77 %rename(ICoCo_MEDField) ICoCo::MEDField;
82 class MEDField: public Field {
84 MEDField(ParaMESH* mesh, ParaFIELD* field);
91 // avoid error when SWIG creates a default constructor of an abstract class
92 DEC() { return NULL; };
98 void attachLocalField(FIELD<double>* field, const char* method="P0");
99 void attachLocalField(const ParaFIELD* field, const char* method="P0");
100 void attachLocalField(const ICoCo::Field* field, const char* method="P0");
102 void renormalizeTargetField();
106 typedef enum{WithoutTimeInterp,LinearTimeInterp} TimeInterpolationMethod;
108 typedef enum{Native,PointToPoint} AllToAllMethod;
110 class IntersectionDEC: public DEC {
113 IntersectionDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group);
116 void recvData( double time );
119 void sendData( double time, double deltatime );
121 void setTimeInterpolationMethod(TimeInterpolationMethod it);
122 TimeInterpolationMethod getTimeInterpolationMethod();
124 void setAsynchronous( bool dr);
125 bool getAsynchronous();
127 AllToAllMethod getAllToAllMethod();
128 void setAllToAllMethod(AllToAllMethod sp);
130 bool getForcedRenormalization();
131 void setForcedRenormalization( bool dr);
136 /* This object can be used only if MED_ENABLE_FVM is defined*/
137 #ifdef MED_ENABLE_FVM
138 class NonCoincidentDEC: public DEC {
140 NonCoincidentDEC(ProcessorGroup& source, ProcessorGroup& target);
145 class StructuredCoincidentDEC: public DEC {
147 StructuredCoincidentDEC(ProcessorGroup& source, ProcessorGroup& target);
148 void prepareSourceDE();
149 void prepareTargetDE();
155 ParaMESH(driverTypes driver_type, const char* file_name, const ProcessorGroup& group);
156 ParaMESH(MESH& subdomain_mesh, const ProcessorGroup& proc_group, const char* name);
158 void write(driverTypes driverType, const char* fileName="");
159 MESH* getMesh() const;
167 // avoid error when SWIG creates a default constructor of an abstract class
168 ParaSUPPORT() { return NULL; }
170 PyObject* getGlobalNumbering() {
171 const int * numIndex = self->getGlobalNumbering();
172 int aSize = self->getSupport()->getNumberOfElements(999);
173 TYPEMAP_OUTPUT_ARRAY(numIndex, aSize, PyInt_FromLong, ParaSUPPORT::getGlobalNumbering);
179 class UnstructuredParaSUPPORT: public ParaSUPPORT {
181 UnstructuredParaSUPPORT(const ParaMESH* const mesh, const SUPPORT* support );
182 UnstructuredParaSUPPORT(const ParaMESH* const mesh, const medEntityMesh entity);
183 UnstructuredParaSUPPORT(const SUPPORT* support, const ProcessorGroup& group);
186 class StructuredParaSUPPORT: public ParaSUPPORT {
188 StructuredParaSUPPORT(const ParaGRID* const grid, const medEntityMesh entity);
189 StructuredParaSUPPORT(const ParaMESH* const mesh, const medEntityMesh entity);
192 class ComponentTopology {
195 ComponentTopology(int nb_comp);
196 ComponentTopology(int nb_comp, int nb_blocks);
197 ComponentTopology(int nb_comp, ProcessorGroup* group);
200 int nbLocalComponents();
206 ParaFIELD(const ParaSUPPORT* support,
207 const ComponentTopology& component_topology);
209 ParaFIELD(driverTypes driver_type,
210 const char* file_name,
211 const char* driver_name,
212 const ComponentTopology& component_topology);
214 ParaFIELD(FIELDDOUBLE* field, const ProcessorGroup& group);
216 void write(driverTypes driverType,
217 const char* fileName="",
218 const char* meshName="");
220 FIELDDOUBLE* getField() const;
222 double getVolumeIntegral(int icomp) const;
225 //=============================================================================================
226 // Interface for MPI-realization-specific constants like MPI_COMM_WORLD.
228 // Type and values of constants like MPI_COMM_WORLD depends on MPI realization
229 // and usually such constants actually are macros. To have such symbols in python
230 // and translate them into correct values we use the following technique.
231 // We define some constants (enum mpi_constants) and map them into real MPI values
232 // using typemaps, and we create needed python symbols equal to 'mpi_constants'
233 // via %pythoncode directive.
235 // Constants corresponding to similar MPI definitions
236 enum mpi_constants { mpi_comm_world, mpi_comm_self, mpi_double, mpi_int };
238 // Map mpi_comm_world and mpi_comm_self -> MPI_COMM_WORLD and MPI_COMM_SELF
239 %typemap(in) MPI_Comm
241 switch (PyInt_AsLong($input)) {
242 case mpi_comm_world: $1 = MPI_COMM_WORLD; break;
243 case mpi_comm_self: $1 = MPI_COMM_SELF; break;
245 PyErr_SetString(PyExc_TypeError,"unexpected value of MPI_Comm");
249 // Map mpi_double and mpi_int -> MPI_DOUBLE and MPI_INT
250 %typemap(in) MPI_Datatype
252 switch (PyInt_AsLong($input)) {
253 case mpi_double: $1 = MPI_DOUBLE; break;
254 case mpi_int: $1 = MPI_INT; break;
256 PyErr_SetString(PyExc_TypeError,"unexpected value of MPI_Datatype");
260 // The following code gets inserted into the result python file:
261 // create needed python symbols
263 MPI_COMM_WORLD = mpi_comm_world
264 MPI_COMM_SELF = mpi_comm_self
265 MPI_DOUBLE = mpi_double
268 //=============================================================================================
273 %inline %{ PyObject* MPI_Comm_size(MPI_Comm comm) {
275 int err = MPI_Comm_size(comm, &res);
276 if ( err != MPI_SUCCESS ) {
277 PyErr_SetString(PyExc_RuntimeError,"Erorr in MPI_Comm_size()");
280 return PyInt_FromLong( res );
286 %inline %{ PyObject* MPI_Comm_rank(MPI_Comm comm) {
288 int err = MPI_Comm_rank(comm, &res);
289 if ( err != MPI_SUCCESS ) {
290 PyErr_SetString(PyExc_RuntimeError,"Erorr in MPI_Comm_rank()");
293 return PyInt_FromLong( res );
296 int MPI_Init(int *argc, char ***argv );
297 int MPI_Barrier(MPI_Comm comm);
304 %inline %{ PyObject* MPI_Bcast(PyObject* buffer, int nb, MPI_Datatype type, int root, MPI_Comm c) {
305 // buffer must be a list
306 if (!PyList_Check(buffer)) {
307 PyErr_SetString(PyExc_TypeError, "buffer is expected to be a list");
311 int aSize = PyList_Size(buffer);
313 PyErr_SetString(PyExc_ValueError, MEDMEM::STRING("buffer is expected to be of size ")<<nb);
316 // allocate and fill a buffer
320 if ( type == MPI_DOUBLE ) {
321 aBuf = (void*) ( dblBuf = new double[ nb ] );
322 for ( int i = 0; i < aSize; ++i )
323 dblBuf[i] = PyFloat_AS_DOUBLE( PyList_GetItem( buffer, i ));
325 else if ( type == MPI_INT ) {
326 aBuf = (void*) ( intBuf = new int[ nb ] );
327 for ( int i = 0; i < aSize; ++i )
328 intBuf[i] = int( PyInt_AS_LONG( PyList_GetItem( buffer, i )));
331 PyErr_SetString(PyExc_TypeError, "Only MPI_DOUBLE and MPI_INT supported");
335 int err = MPI_Bcast(aBuf, nb, type, root, c);
337 if ( err != MPI_SUCCESS ) {
338 PyErr_SetString(PyExc_RuntimeError,"Erorr in MPI_Bcast()");
339 delete [] intBuf; delete [] dblBuf;
342 // put recieved data into the list
344 if ( type == MPI_DOUBLE ) {
345 for ( int i = 0; i < aSize && !pyerr; ++i )
346 pyerr = PyList_SetItem(buffer, i, PyFloat_FromDouble( dblBuf[i] ));
350 for ( int i = 0; i < aSize && !pyerr; ++i )
351 pyerr = PyList_SetItem(buffer, i, PyInt_FromLong( intBuf[i] ));
355 PyErr_SetString(PyExc_RuntimeError, "Error of PyList_SetItem()");
358 return PyInt_FromLong( err );