-// Copyright (C) 2007-2014 CEA/DEN, EDF R&D
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
using namespace std;
-namespace ParaMEDMEM
+namespace MEDCoupling
{
- /*! \defgroup mpi_access MPIAccess
- Class \a MPIAccess is the gateway to the MPI library.
+ /**!
+ \anchor MPIAccess-det
+ \class MPIAccess
+
+ The class \a MPIAccess is the gateway to the MPI library.
It is a helper class that gathers the calls to the MPI
library that are made in the ParaMEDMEM library. This gathering
allows easier gathering of information about the communication
{
//initialization
MPI_Init(&argc, &argv);
- ParaMEDMEM::CommInterface comm_interface;
+ MEDCoupling::CommInterface comm_interface;
//setting up a processor group with proc 0
set<int> procs;
procs.insert(0);
- ParaMEDMEM::ProcessorGroup group(procs, comm_interface);
+ MEDCoupling::ProcessorGroup group(procs, comm_interface);
- ParaMEDMEM::MPIAccess mpi_access(group);
+ MEDCoupling::MPIAccess mpi_access(group);
//cleanup
MPI_Finalize();
array_of_displacements[0] = 0 ;
array_of_displacements[1] = sizeof(double) ;
array_of_displacements[2] = 2*sizeof(double) ;
- MPI_Type_struct(3, array_of_blocklengths, array_of_displacements,
+ MPI_Type_create_struct(3, array_of_blocklengths, array_of_displacements,
array_of_types, &_MPI_TIME) ;
MPI_Type_commit(&_MPI_TIME) ;
}
"_MapOfRequestStruct".
That structure RequestStruct give the possibility to manage
the structures MPI_Request and MPI_Status * of MPI. It give
- also the possibility to get informations about that request :
+ also the possibility to get information about that request :
target, send/recv, tag, [a]synchronous, type, outcount.
. That identifier is used to control an asynchronous request
The default is [ 0 , MPI_TAG_UB], MPI_TAG_UB being the maximum
value in an implementation of MPI (minimum 32767 = 2**15-1).
On awa with the implementation lam MPI_TAG_UB value is
- 7353944. The norma MPI specify that value is the same in all
+ 7353944. The norm MPI specify that value is the same in all
processes started by mpirun.
In the case of the use of the same IntraCommunicator in a process
for several distinct data flows (or for several IntraCommunicators
- with common processes), that permits to avoid ambibuity
+ with common processes), that permits to avoid ambiguity
and may help debug.
. In MPIAccess the tags have two parts (#define MODULO_TAG 10) :
+ The last decimal digit decimal correspond to MPI_DataType ( 1 for
TimeMessages, 2 for MPI_INT and 3 for MPI_DOUBLE)
- + The value of other digits correspond to a circular numero for each
+ + The value of other digits correspond to a circular number for each
message.
- + A TimeMessage and the associated DataMessage have the same numero
+ + A TimeMessage and the associated DataMessage have the same number
(but the types are different and the tags also).
. For a Send of a message from a process "source" to a process
"target", we have _send_MPI_tag[target] in the process
- source (it contains the last "tag" used for the Send of a pour l'envoi de
+ source (it contains the last "tag" used for the Send of a
message to the process target).
And in the process "target" which receive that message, we have
_recv_MPI_Tag[source] (it contains the last "tag" used for the Recv
of messages from the process source).
- Naturally in the MPI norma the values of that tags must be the same.
+ Naturally in the MPI norm the values of that tags must be the same.
*/
int MPIAccess::newSendTag( MPI_Datatype datatype, int destrank , int method ,
bool asynchronous, int &RequestId )
{
int size = 0;
for (int i = 0 ; i < _processor_group_size ; i++ )
- size += _send_requests[ i ].size() ;
+ size += (int)_send_requests[ i ].size() ;
return size ;
}
{
int size = 0 ;
for (int i = 0 ; i < _processor_group_size ; i++ )
- size += _recv_requests[ i ].size() ;
+ size += (int)_recv_requests[ i ].size() ;
return size ;
}
list< int >::const_iterator iter ;
for (iter = _send_requests[ destrank ].begin() ; iter != _send_requests[destrank].end() ; iter++ )
ArrayOfSendRequests[i++] = *iter ;
- return _send_requests[destrank].size() ;
+ return (int)_send_requests[destrank].size() ;
}
// Returns in ArrayOfRecvRequests with the dimension "size" all the
_recv_requests[ sourcerank ] ;
for (iter = _recv_requests[ sourcerank ].begin() ; iter != _recv_requests[sourcerank].end() ; iter++ )
ArrayOfRecvRequests[i++] = *iter ;
- return _recv_requests[sourcerank].size() ;
+ return (int)_recv_requests[sourcerank].size() ;
}
// Send in synchronous mode count values of type datatype from buffer to target
// Receive (read) in synchronous mode count values of type datatype in buffer from source
// (returns RequestId identifier even if the corresponding structure is deleted :
// it is only in order to have the same signature as the asynchronous mode)
- // The output argument OutCount is optionnal : *OutCount <= count
+ // The output argument OutCount is optional : *OutCount <= count
int MPIAccess::recv(void* buffer, int count, MPI_Datatype datatype, int source, int &RequestId, int *OutCount)
{
int sts = MPI_SUCCESS ;
int outcount = 0 ;
if ( sts == MPI_SUCCESS )
{
- MPI_Datatype datatype = MPIDatatype( RequestId ) ;
- _comm_interface.getCount(MPIStatus( RequestId ), datatype, &outcount ) ;
+ MPI_Datatype datatype2 = MPIDatatype( RequestId ) ;
+ _comm_interface.getCount(MPIStatus( RequestId ), datatype2, &outcount ) ;
setMPIOutCount( RequestId , outcount ) ;
setMPICompleted( RequestId , true ) ;
deleteStatus( RequestId ) ;
source = aMPIStatus.MPI_SOURCE ;
MPITag = aMPIStatus.MPI_TAG ;
int MethodId = (MPITag % MODULO_TAG) ;
- myDatatype = datatype( (ParaMEDMEM::_MessageIdent) MethodId ) ;
+ myDatatype = datatype( (MEDCoupling::_MessageIdent) MethodId ) ;
_comm_interface.getCount(&aMPIStatus, myDatatype, &outcount ) ;
if ( _trace )
cout << "MPIAccess::Probe" << _my_rank << " FromSource " << FromSource
source = aMPIStatus.MPI_SOURCE ;
MPITag = aMPIStatus.MPI_TAG ;
int MethodId = (MPITag % MODULO_TAG) ;
- myDataType = datatype( (ParaMEDMEM::_MessageIdent) MethodId ) ;
+ myDataType = datatype( (MEDCoupling::_MessageIdent) MethodId ) ;
_comm_interface.getCount(&aMPIStatus, myDataType, &outcount ) ;
if ( _trace )
cout << "MPIAccess::IProbe" << _my_rank << " FromSource " << FromSource
int MPIAccess::cancel( int source, int theMPITag, MPI_Datatype datatype, int outcount, int &flag )
{
int sts ;
- MPI_Aint extent ;
+ MPI_Aint extent, lbound ;
flag = 0 ;
- sts = MPI_Type_extent( datatype , &extent ) ;
+ sts = MPI_Type_get_extent( datatype , &lbound, &extent ) ;
if ( sts == MPI_SUCCESS )
{
void * recvbuf = malloc( extent*outcount ) ;
return _comm_interface.requestFree( request ) ;
}
- // Print all informations of all known requests for debugging purpose
+ // Print all information of all known requests for debugging purpose
void MPIAccess::check() const
{
int i = 0 ;
}
}
+ // Returns the MPI size of a TimeMessage
+ MPI_Aint MPIAccess::timeExtent() const
+ {
+ MPI_Aint aextent, lbound ;
+ MPI_Type_get_extent( _MPI_TIME , &lbound, &aextent ) ;
+ return aextent ;
+ }
+
+ // Returns the MPI size of a MPI_INT
+ MPI_Aint MPIAccess::intExtent() const
+ {
+ MPI_Aint aextent, lbound ;
+ MPI_Type_get_extent( MPI_INT , &lbound, &aextent ) ;
+ return aextent ;
+ }
+
+ // Returns the MPI size of a MPI_LONG
+ MPI_Aint MPIAccess::longExtent() const
+ {
+ MPI_Aint aextent, lbound ;
+ MPI_Type_get_extent( MPI_LONG , &lbound, &aextent ) ;
+ return aextent ;
+ }
+
+ // Returns the MPI size of a MPI_DOUBLE
+ MPI_Aint MPIAccess::doubleExtent() const
+ {
+ MPI_Aint aextent, lbound ;
+ MPI_Type_get_extent( MPI_DOUBLE , &lbound, &aextent ) ;
+ return aextent ;
+ }
+
// Outputs fields of a TimeMessage structure
ostream & operator<< (ostream & f ,const TimeMessage & aTimeMsg )
{