-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+// Copyright (C) 2007-2019 CEA/DEN, EDF R&D
//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
//
+
#include "MPIAccess.hxx"
#include "InterpolationUtils.hxx"
using namespace std;
-namespace ParaMEDMEM
+namespace MEDCoupling
{
- /*! \defgroup mpi_access MPIAccess
- Class \a MPIAccess is the gateway to the MPI library.
+ /**!
+ \anchor MPIAccess-det
+ \class MPIAccess
+
+ The class \a MPIAccess is the gateway to the MPI library.
It is a helper class that gathers the calls to the MPI
library that are made in the ParaMEDMEM library. This gathering
allows easier gathering of information about the communication
{
//initialization
MPI_Init(&argc, &argv);
- ParaMEDMEM::CommInterface comm_interface;
+ MEDCoupling::CommInterface comm_interface;
//setting up a processor group with proc 0
set<int> procs;
procs.insert(0);
- ParaMEDMEM::ProcessorGroup group(procs, comm_interface);
+ MEDCoupling::ProcessorGroup group(procs, comm_interface);
- ParaMEDMEM::MPIAccess mpi_access(group);
+ MEDCoupling::MPIAccess mpi_access(group);
//cleanup
MPI_Finalize();
_comm_interface( ProcessorGroup->getCommInterface() ) ,
_intra_communicator( ProcessorGroup->getComm() )
{
+ void *v ;
int mpitagub ;
int flag ;
- //MPI_Attr_get does not run with _IntraCommunicator ???
- //MPI_Attr_get(*_IntraCommunicator,MPI_TAG_UB,&mpitagub,&flag) ;
- MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&mpitagub,&flag) ;
- mpitagub=abs(mpitagub);
+ //MPI_Comm_get_attr does not run with _IntraCommunicator ???
+ //MPI_Comm_get_attr(*_IntraCommunicator,MPID_TAG_UB,&mpitagub,&flag) ;
+ MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&v,&flag) ;
+ mpitagub=*(reinterpret_cast<int*>(v));
if ( BaseTag != 0 )
BaseTag = (BaseTag/MODULO_TAG)*MODULO_TAG ;
if ( MaxTag == 0 )
MaxTag = (mpitagub/MODULO_TAG-1)*MODULO_TAG ;
MPI_Comm_rank( *_intra_communicator, &_my_rank ) ;
- cout << "MPIAccess::MPIAccess" << _my_rank << " this " << this << " BaseTag " << BaseTag
- << " MaxTag " << MaxTag << " mpitagub " << mpitagub << " (minimum 32767) "
- << " flag " << flag << endl ;
if ( !flag | (BaseTag < 0) | (BaseTag >= MaxTag) | (MaxTag > mpitagub) )
throw INTERP_KERNEL::Exception("wrong call to MPIAccess constructor");
_processor_group = ProcessorGroup ;
_processor_group_size = _processor_group->size() ;
_trace = false ;
-
- cout << "MPIAccess::MPIAccess" << _my_rank << " _processor_group_size "
- << _processor_group_size << endl ;
-
+
_base_request = -1 ;
_max_request = std::numeric_limits<int>::max() ;
_request = _base_request ;
array_of_displacements[0] = 0 ;
array_of_displacements[1] = sizeof(double) ;
array_of_displacements[2] = 2*sizeof(double) ;
- MPI_Type_struct(3, array_of_blocklengths, array_of_displacements,
+ MPI_Type_create_struct(3, array_of_blocklengths, array_of_displacements,
array_of_types, &_MPI_TIME) ;
MPI_Type_commit(&_MPI_TIME) ;
}
MPIAccess::~MPIAccess()
{
- cout << "MPIAccess::~MPIAccess" << _my_rank << " this " << this << endl ;
delete [] _send_request ;
delete [] _recv_request ;
delete [] _send_MPI_tag ;
delete [] _recv_MPI_Tag ;
MPI_Type_free(&_MPI_TIME) ;
- cout << "End of MPIAccess::~MPIAccess" << _my_rank << " this " << this << endl ;
}
/*
"_MapOfRequestStruct".
That structure RequestStruct give the possibility to manage
the structures MPI_Request and MPI_Status * of MPI. It give
- also the possibility to get informations about that request :
+ also the possibility to get information about that request :
target, send/recv, tag, [a]synchronous, type, outcount.
. That identifier is used to control an asynchronous request
The default is [ 0 , MPI_TAG_UB], MPI_TAG_UB being the maximum
value in an implementation of MPI (minimum 32767 = 2**15-1).
On awa with the implementation lam MPI_TAG_UB value is
- 7353944. The norma MPI specify that value is the same in all
+ 7353944. The norm MPI specify that value is the same in all
processes started by mpirun.
In the case of the use of the same IntraCommunicator in a process
for several distinct data flows (or for several IntraCommunicators
- with common processes), that permits to avoid ambibuity
+ with common processes), that permits to avoid ambiguity
and may help debug.
. In MPIAccess the tags have two parts (#define MODULO_TAG 10) :
+ The last decimal digit decimal correspond to MPI_DataType ( 1 for
TimeMessages, 2 for MPI_INT and 3 for MPI_DOUBLE)
- + The value of other digits correspond to a circular numero for each
+ + The value of other digits correspond to a circular number for each
message.
- + A TimeMessage and the associated DataMessage have the same numero
+ + A TimeMessage and the associated DataMessage have the same number
(but the types are different and the tags also).
. For a Send of a message from a process "source" to a process
"target", we have _send_MPI_tag[target] in the process
- source (it contains the last "tag" used for the Send of a pour l'envoi de
+ source (it contains the last "tag" used for the Send of a
message to the process target).
And in the process "target" which receive that message, we have
_recv_MPI_Tag[source] (it contains the last "tag" used for the Recv
of messages from the process source).
- Naturally in the MPI norma the values of that tags must be the same.
+ Naturally in the MPI norm the values of that tags must be the same.
*/
int MPIAccess::newSendTag( MPI_Datatype datatype, int destrank , int method ,
bool asynchronous, int &RequestId )
// SendRequestIds to a destination rank
int MPIAccess::sendRequestIds(int destrank, int size, int *ArrayOfSendRequests)
{
- if (size < _send_requests[destrank].size() )
+ if (size < (int)_send_requests[destrank].size() )
throw INTERP_KERNEL::Exception("wrong call to MPIAccess::SendRequestIds");
int i = 0 ;
list< int >::const_iterator iter ;
// RecvRequestIds from a sourcerank
int MPIAccess::recvRequestIds(int sourcerank, int size, int *ArrayOfRecvRequests)
{
- if (size < _recv_requests[sourcerank].size() )
+ if (size < (int)_recv_requests[sourcerank].size() )
throw INTERP_KERNEL::Exception("wrong call to MPIAccess::RecvRequestIds");
int i = 0 ;
list< int >::const_iterator iter ;
// Receive (read) in synchronous mode count values of type datatype in buffer from source
// (returns RequestId identifier even if the corresponding structure is deleted :
// it is only in order to have the same signature as the asynchronous mode)
- // The output argument OutCount is optionnal : *OutCount <= count
+ // The output argument OutCount is optional : *OutCount <= count
int MPIAccess::recv(void* buffer, int count, MPI_Datatype datatype, int source, int &RequestId, int *OutCount)
{
int sts = MPI_SUCCESS ;
source = aMPIStatus.MPI_SOURCE ;
MPITag = aMPIStatus.MPI_TAG ;
int MethodId = (MPITag % MODULO_TAG) ;
- myDatatype = datatype( (ParaMEDMEM::_MessageIdent) MethodId ) ;
+ myDatatype = datatype( (MEDCoupling::_MessageIdent) MethodId ) ;
_comm_interface.getCount(&aMPIStatus, myDatatype, &outcount ) ;
if ( _trace )
cout << "MPIAccess::Probe" << _my_rank << " FromSource " << FromSource
source = aMPIStatus.MPI_SOURCE ;
MPITag = aMPIStatus.MPI_TAG ;
int MethodId = (MPITag % MODULO_TAG) ;
- myDataType = datatype( (ParaMEDMEM::_MessageIdent) MethodId ) ;
+ myDataType = datatype( (MEDCoupling::_MessageIdent) MethodId ) ;
_comm_interface.getCount(&aMPIStatus, myDataType, &outcount ) ;
if ( _trace )
cout << "MPIAccess::IProbe" << _my_rank << " FromSource " << FromSource
int MPIAccess::cancel( int source, int theMPITag, MPI_Datatype datatype, int outcount, int &flag )
{
int sts ;
- MPI_Aint extent ;
+ MPI_Aint extent, lbound ;
flag = 0 ;
- sts = MPI_Type_extent( datatype , &extent ) ;
+ sts = MPI_Type_get_extent( datatype , &lbound, &extent ) ;
if ( sts == MPI_SUCCESS )
{
void * recvbuf = malloc( extent*outcount ) ;
return _comm_interface.requestFree( request ) ;
}
- // Print all informations of all known requests for debugging purpose
+ // Print all information of all known requests for debugging purpose
void MPIAccess::check() const
{
int i = 0 ;
}
}
+ // Returns the MPI size of a TimeMessage
+ MPI_Aint MPIAccess::timeExtent() const
+ {
+ MPI_Aint aextent, lbound ;
+ MPI_Type_get_extent( _MPI_TIME , &lbound, &aextent ) ;
+ return aextent ;
+ }
+
+ // Returns the MPI size of a MPI_INT
+ MPI_Aint MPIAccess::intExtent() const
+ {
+ MPI_Aint aextent, lbound ;
+ MPI_Type_get_extent( MPI_INT , &lbound, &aextent ) ;
+ return aextent ;
+ }
+
+ // Returns the MPI size of a MPI_DOUBLE
+ MPI_Aint MPIAccess::doubleExtent() const
+ {
+ MPI_Aint aextent, lbound ;
+ MPI_Type_get_extent( MPI_DOUBLE , &lbound, &aextent ) ;
+ return aextent ;
+ }
+
// Outputs fields of a TimeMessage structure
ostream & operator<< (ostream & f ,const TimeMessage & aTimeMsg )
{