-// Copyright (C) 2007-2014 CEA/DEN, EDF R&D
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
using namespace std;
-namespace ParaMEDMEM
+namespace MEDCoupling
{
/*!
\param source_group working side ProcessorGroup
\param target_group lazy side ProcessorGroup
\param Asynchronous Communication mode (default asynchronous)
- \param nStepBefore Number of Time step needed for the interpolation before current time
- \param nStepAfter Number of Time step needed for the interpolation after current time
-
*/
-
MPIAccessDEC::MPIAccessDEC( const ProcessorGroup& source_group,
const ProcessorGroup& target_group,
bool Asynchronous )
procs.insert(i) ;
}
MPIProcessorGroup *mpilg = static_cast<MPIProcessorGroup *>(const_cast<ProcessorGroup *>(&source_group));
- _MPI_union_group = new ParaMEDMEM::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm());
+ _MPI_union_group = new MEDCoupling::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm());
delete union_group ;
_my_rank = _MPI_union_group->myRank() ;
_group_size = _MPI_union_group->size() ;
sts = _MPI_access->ISend( &((int *) sendbuf)[offset] , sendcount , sendtype ,
target , SendRequestId ) ;
}
+ else if ( sendtype == MPI_LONG )
+ {
+ sts = _MPI_access->ISend( &((long *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
else
{
sts = _MPI_access->ISend( &((double *) sendbuf)[offset] , sendcount , sendtype ,
sts = _MPI_access->send( &((int *) sendbuf)[offset] , sendcount , sendtype ,
target , SendRequestId ) ;
}
+ else if ( sendtype == MPI_LONG )
+ {
+ sts = _MPI_access->send( &((long *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
else
{
sts = _MPI_access->send( &((double *) sendbuf)[offset] , sendcount , sendtype ,
sts = _MPI_access->IRecv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
target , RecvRequestId ) ;
}
+ else if ( recvtype == MPI_LONG )
+ {
+ sts = _MPI_access->IRecv( &((long *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
else
{
sts = _MPI_access->IRecv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
sts = _MPI_access->recv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
target , RecvRequestId ) ;
}
+ else if ( recvtype == MPI_LONG )
+ {
+ sts = _MPI_access->recv( &((long *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
else
{
sts = _MPI_access->recv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
{
return allToAllTime( sendbuf, sendcount, sendtype , recvbuf, recvcount, recvtype ) ;
}
- int sts ;
+ int sts = 0;
int target ;
int sendoffset = 0 ;
int recvoffset = 0 ;
return allToAllvTime( sendbuf, sendcounts, sdispls, sendtype ,
recvbuf, recvcounts, rdispls, recvtype ) ;
}
- int sts ;
+ int sts = 0;
int target ;
int SendRequestId ;
int RecvRequestId ;
. We assume that buffers are allocated with a new double[]. so a
delete [] is done.
- . The structure SendBuffStruct permit to keep the adress of the buffer
+ . The structure SendBuffStruct permit to keep the address of the buffer
and to manage a reference counter of that buffer. It contains
also MPI_Datatype for the delete [] (double *) ... when the counter
is null.
- . The map _MapOfSendBuffers etablish the correspondance between each
+ . The map _MapOfSendBuffers establish the correspondence between each
RequestId given by a MPI_Access->ISend(...) and a SendBuffStruct
for each "target" of a part of the buffer.
int MPIAccessDEC::allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
void* recvbuf, int recvcount , MPI_Datatype recvtype )
{
- int sts ;
+ int sts = 0;
int target ;
int sendoffset = 0 ;
int SendTimeRequestId ;
{
for ( target = 0 ; target < _group_size ; target++ )
{
- int recvsize = recvcount*_MPI_access->extent( recvtype ) ;
+ int recvsize = (int)(recvcount*_MPI_access->extent( recvtype ));
checkTime( recvcount , recvtype , target , false ) ;
//===========================================================================
- //TODO : it is assumed actually that we have only 1 timestep before nad after
+ //TODO : it is assumed actually that we have only 1 timestep before and after
//===========================================================================
if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
{
void* recvbuf, int* recvcounts, int* rdispls,
MPI_Datatype recvtype )
{
- int sts ;
+ int sts = 0;
int target ;
int SendTimeRequestId ;
int SendDataRequestId ;
{
if ( recvcounts[target] )
{
- int recvsize = recvcounts[target]*_MPI_access->extent( recvtype ) ;
+ int recvsize = (int)(recvcounts[target]*_MPI_access->extent( recvtype ));
checkTime( recvcounts[target] , recvtype , target , false ) ;
//===========================================================================
//TODO : it is assumed actually that we have only 1 timestep before nad after
{
if ( WithWait )
{
- cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
- << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget "
- << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag "
- << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ;
+ if (SendTrace)
+ {
+ cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
+ << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget "
+ << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag "
+ << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ;
+ }
sts = _MPI_access->wait( ArrayOfSendRequests[i] ) ;
}
else