INCLUDE_DIRECTORIES(
${MPI_INCLUDE_DIRS}
${CMAKE_CURRENT_SOURCE_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR}/MPIAccess
${CMAKE_CURRENT_SOURCE_DIR}/../MEDCoupling
${CMAKE_CURRENT_SOURCE_DIR}/../ICoCo
${CMAKE_CURRENT_SOURCE_DIR}/../INTERP_KERNEL
)
SET(paramedmem_SOURCES
- ProcessorGroup.cxx
- MPIProcessorGroup.cxx
- ParaMESH.cxx
+ BlockTopology.cxx
CommInterface.cxx
- ParaUMesh.cxx
- ParaSkyLineArray.cxx
- ParaDataArray.cxx
ComponentTopology.cxx
- MPIAccess.cxx
- InterpolationMatrix.cxx
- OverlapInterpolationMatrix.cxx
- StructuredCoincidentDEC.cxx
- ExplicitCoincidentDEC.cxx
- InterpKernelDEC.cxx
- ElementLocator.cxx
- OverlapElementLocator.cxx
- MPIAccessDEC.cxx
- TimeInterpolator.cxx
- LinearTimeInterpolator.cxx
DEC.cxx
DisjointDEC.cxx
- OverlapDEC.cxx
+ ElementLocator.cxx
+ ExplicitCoincidentDEC.cxx
+ ExplicitMapping.cxx
ExplicitTopology.cxx
+ InterpKernelDEC.cxx
+ InterpolationMatrix.cxx
+ LinearTimeInterpolator.cxx
+ MPIProcessorGroup.cxx
MxN_Mapping.cxx
+ OverlapDEC.cxx
+ OverlapElementLocator.cxx
+ OverlapInterpolationMatrix.cxx
OverlapMapping.cxx
+ ParaDataArray.cxx
ParaFIELD.cxx
ParaGRID.cxx
- BlockTopology.cxx
- ExplicitMapping.cxx
- )
+ ParaMESH.cxx
+ ParaSkyLineArray.cxx
+ ParaUMesh.cxx
+ ProcessorGroup.cxx
+ StructuredCoincidentDEC.cxx
+ TimeInterpolator.cxx
+ MPIAccess/MPIAccess.cxx
+ MPIAccess/MPIAccessDEC.cxx
+)
ADD_LIBRARY(paramedmem ${paramedmem_SOURCES})
TARGET_LINK_LIBRARIES(paramedmem medcouplingcpp ${MPI_LIBRARIES})
INSTALL(TARGETS paramedmem EXPORT ${PROJECT_NAME}TargetGroup DESTINATION ${MEDCOUPLING_INSTALL_LIBS})
FILE(GLOB paramedmem_HEADERS_HXX "${CMAKE_CURRENT_SOURCE_DIR}/*.hxx")
+FILE(GLOB mpiaccess_HEADERS_HXX "${CMAKE_CURRENT_SOURCE_DIR}/MPIAccess/*.hxx")
+LIST(APPEND paramedmem_HEADERS_HXX ${mpiaccess_HEADERS_HXX})
INSTALL(FILES ${paramedmem_HEADERS_HXX} DESTINATION ${MEDCOUPLING_INSTALL_HEADERS})
FILE(GLOB paramedmem_HEADERS_TXX "${CMAKE_CURRENT_SOURCE_DIR}/*.txx")
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include "MPIAccess.hxx"
-#include "InterpolationUtils.hxx"
-
-#include <iostream>
-
-using namespace std;
-
-namespace MEDCoupling
-{
- /**!
- \anchor MPIAccess-det
- \class MPIAccess
-
- The class \a MPIAccess is the gateway to the MPI library.
- It is a helper class that gathers the calls to the MPI
- library that are made in the ParaMEDMEM library. This gathering
- allows easier gathering of information about the communication
- in the library. With MPIAccess, tags are managed automatically
- and asynchronous operations are easier.
-
- It is typically called after the MPI_Init() call in a program. It is afterwards passed as a parameter to the constructors of ParaMEDMEM objects so that they access the MPI library via the MPIAccess.
-
- As an example, the following code initializes a processor group made of the zero processor.
-
- \verbatim
- #include "MPIAccess.hxx"
- #include "ProcessorGroup.hxx"
-
- int main(int argc, char** argv)
- {
- //initialization
- MPI_Init(&argc, &argv);
- MEDCoupling::CommInterface comm_interface;
-
- //setting up a processor group with proc 0
- set<int> procs;
- procs.insert(0);
- MEDCoupling::ProcessorGroup group(procs, comm_interface);
-
- MEDCoupling::MPIAccess mpi_access(group);
-
- //cleanup
- MPI_Finalize();
- }
- \endverbatim
- */
-
-
- /*! Creates a MPIAccess that is based on the processors included in \a ProcessorGroup.
- This class may be called for easier use of MPI API.
-
- \param ProcessorGroup MPIProcessorGroup object giving access to group management
- \param BaseTag and MaxTag define the range of tags to be used.
- Tags are managed by MPIAccess. They are cyclically incremented.
- When there is a Send or a Receive operation there is a new RequestId tag returned
- to the caller. That RequestId may be used to manage the operation Wait, Check of
- status etc... The MPITag internally managed by MPIAccess is used as "tag" argument
- in MPI call.
- */
-
- MPIAccess::MPIAccess(MPIProcessorGroup * ProcessorGroup, int BaseTag, int MaxTag) :
- _comm_interface( ProcessorGroup->getCommInterface() ) ,
- _intra_communicator( ProcessorGroup->getComm() )
- {
- void *v ;
- int mpitagub ;
- int flag ;
- //MPI_Comm_get_attr does not run with _IntraCommunicator ???
- //MPI_Comm_get_attr(*_IntraCommunicator,MPID_TAG_UB,&mpitagub,&flag) ;
- MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&v,&flag) ;
- mpitagub=*(reinterpret_cast<int*>(v));
- if ( BaseTag != 0 )
- BaseTag = (BaseTag/MODULO_TAG)*MODULO_TAG ;
- if ( MaxTag == 0 )
- MaxTag = (mpitagub/MODULO_TAG-1)*MODULO_TAG ;
- MPI_Comm_rank( *_intra_communicator, &_my_rank ) ;
- if ( !flag | (BaseTag < 0) | (BaseTag >= MaxTag) | (MaxTag > mpitagub) )
- throw INTERP_KERNEL::Exception("wrong call to MPIAccess constructor");
-
- _processor_group = ProcessorGroup ;
- _processor_group_size = _processor_group->size() ;
- _trace = false ;
-
- _base_request = -1 ;
- _max_request = std::numeric_limits<int>::max() ;
- _request = _base_request ;
-
- _base_MPI_tag = BaseTag ;
- _max_MPI_tag = MaxTag ;
-
- _send_request = new int[ _processor_group_size ] ;
- _recv_request = new int[ _processor_group_size ] ;
-
- _send_requests.resize( _processor_group_size ) ;
- _recv_requests.resize( _processor_group_size ) ;
-
- _send_MPI_tag = new int[ _processor_group_size ] ;
- _recv_MPI_Tag = new int[ _processor_group_size ] ;
- int i ;
- for (i = 0 ; i < _processor_group_size ; i++ )
- {
- _send_request[ i ] = _max_request ;
- _recv_request[ i ] = _max_request ;
- _send_requests[ i ].resize(0) ;
- _recv_requests[ i ].resize(0) ;
- _send_MPI_tag[ i ] = _max_MPI_tag ;
- _recv_MPI_Tag[ i ] = _max_MPI_tag ;
- }
- MPI_Datatype array_of_types[3] ;
- array_of_types[0] = MPI_DOUBLE ;
- array_of_types[1] = MPI_DOUBLE ;
- array_of_types[2] = MPI_INT ;
- int array_of_blocklengths[3] ;
- array_of_blocklengths[0] = 1 ;
- array_of_blocklengths[1] = 1 ;
- array_of_blocklengths[2] = 1 ;
- MPI_Aint array_of_displacements[3] ;
- array_of_displacements[0] = 0 ;
- array_of_displacements[1] = sizeof(double) ;
- array_of_displacements[2] = 2*sizeof(double) ;
- MPI_Type_create_struct(3, array_of_blocklengths, array_of_displacements,
- array_of_types, &_MPI_TIME) ;
- MPI_Type_commit(&_MPI_TIME) ;
- }
-
- MPIAccess::~MPIAccess()
- {
- delete [] _send_request ;
- delete [] _recv_request ;
- delete [] _send_MPI_tag ;
- delete [] _recv_MPI_Tag ;
- MPI_Type_free(&_MPI_TIME) ;
- }
-
- /*
- MPIAccess and "RequestIds" :
- ============================
-
- . WARNING : In the specification document, the distinction
- between "MPITags" and "RequestIds" is not clear. "MPITags"
- are arguments of calls to MPI. "RequestIds" does not concern
- calls to MPI. "RequestIds" are named "tag"as arguments in/out
- in the MPIAccess API in the specification documentation.
- But in the implementation we have the right name RequestId (or
- RecvRequestId/SendRequestId).
-
- . When we have a MPI write/read request via MPIAccess, we get
- an identifier "RequestId".
- That identifier matches a structure RequestStruct of
- MPIAccess. The access to that structure is done with the map
- "_MapOfRequestStruct".
- That structure RequestStruct give the possibility to manage
- the structures MPI_Request and MPI_Status * of MPI. It give
- also the possibility to get information about that request :
- target, send/recv, tag, [a]synchronous, type, outcount.
-
- . That identifier is used to control an asynchronous request
- via MPIAccess : Wait, Test, Probe, etc...
-
- . In practise "RequestId" is simply an integer fo the interval
- [0 , 2**32-1]. There is only one such a cyclic for
- [I]Sends and [I]Recvs.
-
- . That "RequestIds" and their associated structures give an easy
- way to manage asynchronous communications.
- For example we have mpi_access->Wait( int RequestId ) instead of
- MPI_Wait(MPI_Request *request, MPI_Status *status).
-
- . The API of MPIAccess may give the "SendRequestIds" of a "target",
- the "RecvRequestIds" from a "source" or the "SendRequestIds" of
- all "targets" or the "RecvRequestIds" of all "sources".
- That avoid to manage them in Presentation-ParaMEDMEM.
- */
-
- int MPIAccess::newRequest( MPI_Datatype datatype, int tag , int destsourcerank ,
- bool fromsourcerank , bool asynchronous )
- {
- RequestStruct *mpiaccessstruct = new RequestStruct;
- mpiaccessstruct->MPITag = tag ;
- mpiaccessstruct->MPIDatatype = datatype ;
- mpiaccessstruct->MPITarget = destsourcerank ;
- mpiaccessstruct->MPIIsRecv = fromsourcerank ;
- MPI_Status *aStatus = new MPI_Status ;
- mpiaccessstruct->MPIStatus = aStatus ;
- mpiaccessstruct->MPIAsynchronous = asynchronous ;
- mpiaccessstruct->MPICompleted = !asynchronous ;
- mpiaccessstruct->MPIOutCount = -1 ;
- if ( !asynchronous )
- {
- mpiaccessstruct->MPIRequest = MPI_REQUEST_NULL ;
- mpiaccessstruct->MPIStatus->MPI_SOURCE = destsourcerank ;
- mpiaccessstruct->MPIStatus->MPI_TAG = tag ;
- mpiaccessstruct->MPIStatus->MPI_ERROR = MPI_SUCCESS ;
- }
- if ( _request == _max_request )
- _request = _base_request ;
- _request += 1 ;
- _map_of_request_struct[_request] = mpiaccessstruct ;
- if ( fromsourcerank )
- _recv_request[ destsourcerank ] = _request;
- else
- _send_request[ destsourcerank ] = _request;
- if ( _trace )
- cout << "NewRequest" << _my_rank << "( " << _request << " ) "
- << mpiaccessstruct << endl ;
- return _request ;
- }
-
- /*
- MPIAccess and "tags" (or "MPITags") :
- =====================================
-
- . The constructor give the possibility to choose an interval of
- tags to use : [BaseTag , MaxTag].
- The default is [ 0 , MPI_TAG_UB], MPI_TAG_UB being the maximum
- value in an implementation of MPI (minimum 32767 = 2**15-1).
- On awa with the implementation lam MPI_TAG_UB value is
- 7353944. The norm MPI specify that value is the same in all
- processes started by mpirun.
- In the case of the use of the same IntraCommunicator in a process
- for several distinct data flows (or for several IntraCommunicators
- with common processes), that permits to avoid ambiguity
- and may help debug.
-
- . In MPIAccess the tags have two parts (#define MODULO_TAG 10) :
- + The last decimal digit decimal correspond to MPI_DataType ( 1 for
- TimeMessages, 2 for MPI_INT and 3 for MPI_DOUBLE)
- + The value of other digits correspond to a circular number for each
- message.
- + A TimeMessage and the associated DataMessage have the same number
- (but the types are different and the tags also).
-
- . For a Send of a message from a process "source" to a process
- "target", we have _send_MPI_tag[target] in the process
- source (it contains the last "tag" used for the Send of a
- message to the process target).
- And in the process "target" which receive that message, we have
- _recv_MPI_Tag[source] (it contains the last "tag" used for the Recv
- of messages from the process source).
- Naturally in the MPI norm the values of that tags must be the same.
- */
- int MPIAccess::newSendTag( MPI_Datatype datatype, int destrank , int method ,
- bool asynchronous, int &RequestId )
- {
- int tag ;
- tag = incrTag( _send_MPI_tag[destrank] ) ;
- tag = valTag( tag, method ) ;
- _send_MPI_tag[ destrank ] = tag ;
- RequestId = newRequest( datatype, tag, destrank , false , asynchronous ) ;
- _send_request[ destrank ] = RequestId ;
- _send_requests[ destrank ].push_back( RequestId ) ;
- return tag ;
- }
-
- int MPIAccess::newRecvTag( MPI_Datatype datatype, int sourcerank , int method ,
- bool asynchronous, int &RequestId )
- {
- int tag ;
- tag = incrTag( _recv_MPI_Tag[sourcerank] ) ;
- tag = valTag( tag, method ) ;
- _recv_MPI_Tag[ sourcerank ] = tag ;
- RequestId = newRequest( datatype, tag , sourcerank , true , asynchronous ) ;
- _recv_request[ sourcerank ] = RequestId ;
- _recv_requests[ sourcerank ].push_back( RequestId ) ;
- return tag ;
- }
-
- // Returns the number of all SendRequestIds that may be used to allocate
- // ArrayOfSendRequests for the call to SendRequestIds
- int MPIAccess::sendRequestIdsSize()
- {
- int size = 0;
- for (int i = 0 ; i < _processor_group_size ; i++ )
- size += (int)_send_requests[ i ].size() ;
- return size ;
- }
-
- // Returns in ArrayOfSendRequests with the dimension "size" all the
- // SendRequestIds
- int MPIAccess::sendRequestIds(int size, int *ArrayOfSendRequests)
- {
- int destrank ;
- int i = 0 ;
- for ( destrank = 0 ; destrank < _processor_group_size ; destrank++ )
- {
- list< int >::const_iterator iter ;
- for (iter = _send_requests[ destrank ].begin() ; iter != _send_requests[destrank].end() ; iter++ )
- ArrayOfSendRequests[i++] = *iter ;
- }
- return i ;
- }
-
- // Returns the number of all RecvRequestIds that may be used to allocate
- // ArrayOfRecvRequests for the call to RecvRequestIds
- int MPIAccess::recvRequestIdsSize()
- {
- int size = 0 ;
- for (int i = 0 ; i < _processor_group_size ; i++ )
- size += (int)_recv_requests[ i ].size() ;
- return size ;
- }
-
- // Returns in ArrayOfRecvRequests with the dimension "size" all the
- // RecvRequestIds
- int MPIAccess::recvRequestIds(int size, int *ArrayOfRecvRequests)
- {
- int sourcerank ;
- int i = 0 ;
- for ( sourcerank = 0 ; sourcerank < _processor_group_size ; sourcerank++ )
- {
- list< int >::const_iterator iter ;
- for (iter = _recv_requests[ sourcerank ].begin() ; iter != _recv_requests[sourcerank].end() ; iter++ )
- ArrayOfRecvRequests[i++] = *iter ;
- }
- return i ;
- }
-
- // Returns in ArrayOfSendRequests with the dimension "size" all the
- // SendRequestIds to a destination rank
- int MPIAccess::sendRequestIds(int destrank, int size, int *ArrayOfSendRequests)
- {
- if (size < (int)_send_requests[destrank].size() )
- throw INTERP_KERNEL::Exception("wrong call to MPIAccess::SendRequestIds");
- int i = 0 ;
- list< int >::const_iterator iter ;
- for (iter = _send_requests[ destrank ].begin() ; iter != _send_requests[destrank].end() ; iter++ )
- ArrayOfSendRequests[i++] = *iter ;
- return (int)_send_requests[destrank].size() ;
- }
-
- // Returns in ArrayOfRecvRequests with the dimension "size" all the
- // RecvRequestIds from a sourcerank
- int MPIAccess::recvRequestIds(int sourcerank, int size, int *ArrayOfRecvRequests)
- {
- if (size < (int)_recv_requests[sourcerank].size() )
- throw INTERP_KERNEL::Exception("wrong call to MPIAccess::RecvRequestIds");
- int i = 0 ;
- list< int >::const_iterator iter ;
- _recv_requests[ sourcerank ] ;
- for (iter = _recv_requests[ sourcerank ].begin() ; iter != _recv_requests[sourcerank].end() ; iter++ )
- ArrayOfRecvRequests[i++] = *iter ;
- return (int)_recv_requests[sourcerank].size() ;
- }
-
- // Send in synchronous mode count values of type datatype from buffer to target
- // (returns RequestId identifier even if the corresponding structure is deleted :
- // it is only in order to have the same signature as the asynchronous mode)
- int MPIAccess::send(void* buffer, int count, MPI_Datatype datatype, int target, int &RequestId)
- {
- int sts = MPI_SUCCESS ;
- RequestId = -1 ;
- if ( count )
- {
- _MessageIdent aMethodIdent = methodId( datatype ) ;
- int MPItag = newSendTag( datatype, target , aMethodIdent , false , RequestId ) ;
- if ( aMethodIdent == _message_time )
- {
- TimeMessage *aTimeMsg = (TimeMessage *) buffer ;
- aTimeMsg->tag = MPItag ;
- }
- deleteRequest( RequestId ) ;
- sts = _comm_interface.send(buffer, count, datatype, target, MPItag,
- *_intra_communicator ) ;
- if ( _trace )
- cout << "MPIAccess::Send" << _my_rank << " SendRequestId "
- << RequestId << " count " << count << " target " << target
- << " MPItag " << MPItag << endl ;
- }
- return sts ;
- }
-
- // Receive (read) in synchronous mode count values of type datatype in buffer from source
- // (returns RequestId identifier even if the corresponding structure is deleted :
- // it is only in order to have the same signature as the asynchronous mode)
- // The output argument OutCount is optional : *OutCount <= count
- int MPIAccess::recv(void* buffer, int count, MPI_Datatype datatype, int source, int &RequestId, int *OutCount)
- {
- int sts = MPI_SUCCESS ;
- RequestId = -1 ;
- if ( OutCount != NULL )
- *OutCount = -1 ;
- if ( count )
- {
- _MessageIdent aMethodIdent = methodId( datatype ) ;
- int MPItag = newRecvTag( datatype, source , aMethodIdent , false , RequestId ) ;
- sts = _comm_interface.recv(buffer, count, datatype, source, MPItag,
- *_intra_communicator , MPIStatus( RequestId ) ) ;
- int outcount = 0 ;
- if ( sts == MPI_SUCCESS )
- {
- MPI_Datatype datatype2 = MPIDatatype( RequestId ) ;
- _comm_interface.getCount(MPIStatus( RequestId ), datatype2, &outcount ) ;
- setMPIOutCount( RequestId , outcount ) ;
- setMPICompleted( RequestId , true ) ;
- deleteStatus( RequestId ) ;
- }
- if ( OutCount != NULL )
- *OutCount = outcount ;
- if ( _trace )
- cout << "MPIAccess::Recv" << _my_rank << " RecvRequestId "
- << RequestId << " count " << count << " source " << source
- << " MPItag " << MPItag << endl ;
- deleteRequest( RequestId ) ;
- }
- return sts ;
- }
-
- // Send in asynchronous mode count values of type datatype from buffer to target
- // Returns RequestId identifier.
- int MPIAccess::ISend(void* buffer, int count, MPI_Datatype datatype, int target, int &RequestId)
- {
- int sts = MPI_SUCCESS ;
- RequestId = -1 ;
- if ( count )
- {
- _MessageIdent aMethodIdent = methodId( datatype ) ;
- int MPItag = newSendTag( datatype, target , aMethodIdent , true , RequestId ) ;
- if ( aMethodIdent == _message_time )
- {
- TimeMessage *aTimeMsg = (TimeMessage *) buffer ;
- aTimeMsg->tag = MPItag ;
- }
- MPI_Request *aSendRequest = MPIRequest( RequestId ) ;
- if ( _trace )
- {
- cout << "MPIAccess::ISend" << _my_rank << " ISendRequestId "
- << RequestId << " count " << count << " target " << target
- << " MPItag " << MPItag << endl ;
- if ( MPItag == 1 )
- cout << "MPIAccess::ISend" << _my_rank << " time "
- << ((TimeMessage *)buffer)->time << " " << ((TimeMessage *)buffer)->deltatime
- << endl ;
- }
- sts = _comm_interface.Isend(buffer, count, datatype, target, MPItag,
- *_intra_communicator , aSendRequest) ;
- }
- return sts ;
- }
-
- // Receive (read) in asynchronous mode count values of type datatype in buffer from source
- // returns RequestId identifier.
- int MPIAccess::IRecv(void* buffer, int count, MPI_Datatype datatype, int source, int &RequestId)
- {
- int sts = MPI_SUCCESS ;
- RequestId = -1 ;
- if ( count )
- {
- _MessageIdent aMethodIdent = methodId( datatype ) ;
- int MPItag = newRecvTag( datatype, source , aMethodIdent , true , RequestId ) ;
- MPI_Request *aRecvRequest = MPIRequest( RequestId ) ;
- if ( _trace )
- {
- cout << "MPIAccess::IRecv" << _my_rank << " IRecvRequestId "
- << RequestId << " count " << count << " source " << source
- << " MPItag " << MPItag << endl ;
- if ( MPItag == 1 )
- cout << "MPIAccess::ISend" << _my_rank << " time "
- << ((TimeMessage *)buffer)->time << " " << ((TimeMessage *)buffer)->deltatime
- << endl ;
- }
- sts = _comm_interface.Irecv(buffer, count, datatype, source, MPItag,
- *_intra_communicator , aRecvRequest) ;
- }
- return sts ;
- }
-
- // Perform a Send and a Recv in synchronous mode
- int MPIAccess::sendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
- int dest, int &SendRequestId,
- void* recvbuf, int recvcount, MPI_Datatype recvtype,
- int source, int &RecvRequestId, int *OutCount)
- {
- int sts = MPI_SUCCESS ;
- SendRequestId = -1 ;
- RecvRequestId = -1 ;
- if ( recvcount )
- sts = IRecv(recvbuf, recvcount, recvtype, source, RecvRequestId) ;
- int outcount = -1 ;
- if ( _trace )
- cout << "MPIAccess::SendRecv" << _my_rank << " IRecv RecvRequestId "
- << RecvRequestId << endl ;
- if ( sts == MPI_SUCCESS )
- {
- if ( sendcount )
- sts = send(sendbuf, sendcount, sendtype, dest, SendRequestId) ;
- if ( _trace )
- cout << "MPIAccess::SendRecv" << _my_rank << " Send SendRequestId "
- << SendRequestId << endl ;
- if ( sts == MPI_SUCCESS && recvcount )
- {
- sts = wait( RecvRequestId ) ;
- outcount = MPIOutCount( RecvRequestId ) ;
- if ( _trace )
- cout << "MPIAccess::SendRecv" << _my_rank << " IRecv RecvRequestId "
- << RecvRequestId << " outcount " << outcount << endl ;
- }
- }
- if ( OutCount != NULL )
- {
- *OutCount = outcount ;
- if ( _trace )
- cout << "MPIAccess::SendRecv" << _my_rank << " *OutCount = " << *OutCount
- << endl ;
- }
- deleteRequest( RecvRequestId ) ;
- return sts ;
- }
-
- // Perform a Send and a Recv in asynchronous mode
- int MPIAccess::ISendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
- int dest, int &SendRequestId,
- void* recvbuf, int recvcount, MPI_Datatype recvtype,
- int source, int &RecvRequestId)
- {
- int sts = MPI_SUCCESS ;
- SendRequestId = -1 ;
- RecvRequestId = -1 ;
- if ( recvcount )
- sts = IRecv(recvbuf, recvcount, recvtype, source, RecvRequestId) ;
- if ( sts == MPI_SUCCESS )
- if ( sendcount )
- sts = ISend(sendbuf, sendcount, sendtype, dest, SendRequestId) ;
- return sts ;
- }
-
- // Perform a wait of a Send or Recv asynchronous Request
- // Do nothing for a synchronous Request
- // Manage MPI_Request * and MPI_Status * structure
- int MPIAccess::wait( int RequestId )
- {
- int status = MPI_SUCCESS ;
- if ( !MPICompleted( RequestId ) )
- {
- if ( *MPIRequest( RequestId ) != MPI_REQUEST_NULL )
- {
- if ( _trace )
- cout << "MPIAccess::Wait" << _my_rank << " -> wait( " << RequestId
- << " ) MPIRequest " << MPIRequest( RequestId ) << " MPIStatus "
- << MPIStatus( RequestId ) << " MPITag " << MPITag( RequestId )
- << " MPIIsRecv " << MPIIsRecv( RequestId ) << endl ;
- status = _comm_interface.wait(MPIRequest( RequestId ), MPIStatus( RequestId )) ;
- }
- else
- {
- if ( _trace )
- cout << "MPIAccess::Wait" << _my_rank << " MPIRequest == MPI_REQUEST_NULL"
- << endl ;
- }
- setMPICompleted( RequestId , true ) ;
- if ( MPIIsRecv( RequestId ) && MPIStatus( RequestId ) )
- {
- MPI_Datatype datatype = MPIDatatype( RequestId ) ;
- int outcount ;
- status = _comm_interface.getCount(MPIStatus( RequestId ), datatype,
- &outcount ) ;
- if ( status == MPI_SUCCESS )
- {
- setMPIOutCount( RequestId , outcount ) ;
- deleteStatus( RequestId ) ;
- if ( _trace )
- cout << "MPIAccess::Wait" << _my_rank << " RequestId " << RequestId
- << "MPIIsRecv " << MPIIsRecv( RequestId ) << " outcount " << outcount
- << endl ;
- }
- else
- {
- if ( _trace )
- cout << "MPIAccess::Wait" << _my_rank << " MPIIsRecv "
- << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ;
- }
- }
- else
- {
- if ( _trace )
- cout << "MPIAccess::Wait" << _my_rank << " MPIIsRecv " << MPIIsRecv( RequestId )
- << " MPIOutCount " << MPIOutCount( RequestId ) << endl ;
- }
- }
- if ( _trace )
- cout << "MPIAccess::Wait" << _my_rank << " RequestId " << RequestId
- << " Request " << MPIRequest( RequestId )
- << " Status " << MPIStatus( RequestId ) << " MPICompleted "
- << MPICompleted( RequestId ) << " MPIOutCount " << MPIOutCount( RequestId )
- << endl ;
- return status ;
- }
-
- // Perform a "test" of a Send or Recv asynchronous Request
- // If the request is done, returns true in the flag argument
- // If the request is not finished, returns false in the flag argument
- // Do nothing for a synchronous Request
- // Manage MPI_request * and MPI_status * structure
- int MPIAccess::test(int RequestId, int &flag)
- {
- int status = MPI_SUCCESS ;
- flag = MPICompleted( RequestId ) ;
- if ( _trace )
- cout << "MPIAccess::Test" << _my_rank << " flag " << flag ;
- if ( MPIIsRecv( RequestId ) )
- {
- if ( _trace )
- cout << " Recv" ;
- }
- else
- {
- if ( _trace )
- cout << " Send" ;
- }
- if( _trace )
- cout << "Request" << RequestId << " " << MPIRequest( RequestId )
- << " Status " << MPIStatus( RequestId ) << endl ;
- if ( !flag )
- {
- if ( *MPIRequest( RequestId ) != MPI_REQUEST_NULL )
- {
- if ( _trace )
- cout << "MPIAccess::Test" << _my_rank << " -> test( " << RequestId
- << " ) MPIRequest " << MPIRequest( RequestId )
- << " MPIStatus " << MPIStatus( RequestId )
- << " MPITag " << MPITag( RequestId )
- << " MPIIsRecv " << MPIIsRecv( RequestId ) << endl ;
- status = _comm_interface.test(MPIRequest( RequestId ), &flag,
- MPIStatus( RequestId )) ;
- }
- else
- {
- if ( _trace )
- cout << "MPIAccess::Test" << _my_rank << " MPIRequest == MPI_REQUEST_NULL"
- << endl ;
- }
- if ( flag )
- {
- setMPICompleted( RequestId , true ) ;
- if ( MPIIsRecv( RequestId ) && MPIStatus( RequestId ) )
- {
- int outcount ;
- MPI_Datatype datatype = MPIDatatype( RequestId ) ;
- status = _comm_interface.getCount( MPIStatus( RequestId ), datatype,
- &outcount ) ;
- if ( status == MPI_SUCCESS )
- {
- setMPIOutCount( RequestId , outcount ) ;
- deleteStatus( RequestId ) ;
- if ( _trace )
- cout << "MPIAccess::Test" << _my_rank << " MPIIsRecv "
- << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ;
- }
- else
- {
- if ( _trace )
- cout << "MPIAccess::Test" << _my_rank << " MPIIsRecv "
- << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ;
- }
- }
- else
- {
- if ( _trace )
- cout << "MPIAccess::Test" << _my_rank << " MPIIsRecv "
- << MPIIsRecv( RequestId ) << " MPIOutCount "
- << MPIOutCount( RequestId ) << endl ;
- }
- }
- }
- if ( _trace )
- cout << "MPIAccess::Test" << _my_rank << " RequestId " << RequestId
- << " flag " << flag << " MPICompleted " << MPICompleted( RequestId )
- << " MPIOutCount " << MPIOutCount( RequestId ) << endl ;
- return status ;
- }
-
- int MPIAccess::waitAny(int count, int *array_of_RequestIds, int &RequestId)
- {
- int status = MPI_ERR_OTHER ;
- RequestId = -1 ;
- cout << "MPIAccess::WaitAny not yet implemented" << endl ;
- return status ;
- }
-
- int MPIAccess::testAny(int count, int *array_of_RequestIds, int &RequestId, int &flag)
- {
- int status = MPI_ERR_OTHER ;
- RequestId = -1 ;
- flag = 0 ;
- cout << "MPIAccess::TestAny not yet implemented" << endl ;
- return status ;
- }
-
- // Perform a wait of each Send or Recv asynchronous Request of the array
- // array_of_RequestIds of size "count".
- // That array may be filled with a call to SendRequestIdsSize or RecvRequestIdsSize
- // Do nothing for a synchronous Request
- // Manage MPI_Request * and MPI_Status * structure
- int MPIAccess::waitAll(int count, int *array_of_RequestIds)
- {
- if ( _trace )
- cout << "WaitAll" << _my_rank << " : count " << count << endl ;
- int status ;
- int retstatus = MPI_SUCCESS ;
- int i ;
- for ( i = 0 ; i < count ; i++ )
- {
- if ( _trace )
- cout << "WaitAll" << _my_rank << " " << i << " -> Wait( "
- << array_of_RequestIds[i] << " )" << endl ;
- status = wait( array_of_RequestIds[i] ) ;
- if ( status != MPI_SUCCESS )
- retstatus = status ;
- }
- if ( _trace )
- cout << "EndWaitAll" << _my_rank << endl ;
- return retstatus ;
- }
-
- // Perform a "test" of each Send or Recv asynchronous Request of the array
- // array_of_RequestIds of size "count".
- // That array may be filled with a call to SendRequestIdsSize or RecvRequestIdsSize
- // If all requests are done, returns true in the flag argument
- // If all requests are not finished, returns false in the flag argument
- // Do nothing for a synchronous Request
- // Manage MPI_Request * and MPI_Status * structure
- int MPIAccess::testAll(int count, int *array_of_RequestIds, int &flag)
- {
- if ( _trace )
- cout << "TestAll" << _my_rank << " : count " << count << endl ;
- int status ;
- int retstatus = MPI_SUCCESS ;
- bool retflag = true ;
- int i ;
- for ( i = 0 ; i < count ; i++ )
- {
- status = test( array_of_RequestIds[i] , flag ) ;
- retflag = retflag && (flag != 0) ;
- if ( status != MPI_SUCCESS )
- retstatus = status ;
- }
- flag = retflag ;
- if ( _trace )
- cout << "EndTestAll" << _my_rank << endl ;
- return retstatus ;
- }
-
- int MPIAccess::waitSome(int count, int *array_of_RequestIds, int outcount,
- int *outarray_of_RequestIds)
- {
- int status = MPI_ERR_OTHER ;
- cout << "MPIAccess::WaitSome not yet implemented" << endl ;
- return status ;
- }
-
- int MPIAccess::testSome(int count, int *array_of_RequestIds, int outcounts,
- int *outarray_of_RequestIds)
- {
- int status = MPI_ERR_OTHER ;
- cout << "MPIAccess::TestSome not yet implemented" << endl ;
- return status ;
- }
-
- // Probe checks if a message is available for read from FromSource rank.
- // Returns the corresponding source, MPITag, datatype and outcount
- // Probe is a blocking call which wait until a message is available
- int MPIAccess::probe(int FromSource, int &source, int &MPITag,
- MPI_Datatype &myDatatype, int &outcount)
- {
- MPI_Status aMPIStatus ;
- int sts = _comm_interface.probe( FromSource, MPI_ANY_TAG,
- *_intra_communicator , &aMPIStatus ) ;
- if ( sts == MPI_SUCCESS )
- {
- source = aMPIStatus.MPI_SOURCE ;
- MPITag = aMPIStatus.MPI_TAG ;
- int MethodId = (MPITag % MODULO_TAG) ;
- myDatatype = datatype( (MEDCoupling::_MessageIdent) MethodId ) ;
- _comm_interface.getCount(&aMPIStatus, myDatatype, &outcount ) ;
- if ( _trace )
- cout << "MPIAccess::Probe" << _my_rank << " FromSource " << FromSource
- << " source " << source << " MPITag " << MPITag << " MethodId "
- << MethodId << " datatype " << myDatatype << " outcount " << outcount
- << endl ;
- }
- else
- {
- source = -1 ;
- MPITag = -1 ;
- myDatatype = 0 ;
- outcount = -1 ;
- }
- return sts ;
- }
-
- // IProbe checks if a message is available for read from FromSource rank.
- // If there is a message available, returns the corresponding source,
- // MPITag, datatype and outcount with flag = true
- // If not, returns flag = false
- int MPIAccess::IProbe(int FromSource, int &source, int &MPITag,
- MPI_Datatype &myDataType, int &outcount, int &flag)
- {
- MPI_Status aMPIStatus ;
- int sts = _comm_interface.Iprobe( FromSource, MPI_ANY_TAG,
- *_intra_communicator , &flag,
- &aMPIStatus ) ;
- if ( sts == MPI_SUCCESS && flag )
- {
- source = aMPIStatus.MPI_SOURCE ;
- MPITag = aMPIStatus.MPI_TAG ;
- int MethodId = (MPITag % MODULO_TAG) ;
- myDataType = datatype( (MEDCoupling::_MessageIdent) MethodId ) ;
- _comm_interface.getCount(&aMPIStatus, myDataType, &outcount ) ;
- if ( _trace )
- cout << "MPIAccess::IProbe" << _my_rank << " FromSource " << FromSource
- << " source " << source << " MPITag " << MPITag << " MethodId "
- << MethodId << " datatype " << myDataType << " outcount " << outcount
- << " flag " << flag << endl ;
- }
- else
- {
- source = -1 ;
- MPITag = -1 ;
- myDataType = 0 ;
- outcount = -1 ;
- }
- return sts ;
- }
-
- // Cancel concerns a "posted" asynchronous IRecv
- // Returns flag = true if the receiving request was successfully canceled
- // Returns flag = false if the receiving request was finished but not canceled
- // Use cancel, wait and test_cancelled of the MPI API
- int MPIAccess::cancel( int RecvRequestId, int &flag )
- {
- flag = 0 ;
- int sts = _comm_interface.cancel( MPIRequest( RecvRequestId ) ) ;
- if ( sts == MPI_SUCCESS )
- {
- sts = _comm_interface.wait( MPIRequest( RecvRequestId ) ,
- MPIStatus( RecvRequestId ) ) ;
- if ( sts == MPI_SUCCESS )
- sts = _comm_interface.testCancelled( MPIStatus( RecvRequestId ) , &flag ) ;
- }
- return sts ;
- }
-
- // Cancel concerns a "pending" receiving message (without IRecv "posted")
- // Returns flag = true if the message was successfully canceled
- // Returns flag = false if the receiving request was finished but not canceled
- // Use Irecv, cancel, wait and test_cancelled of the MPI API
- int MPIAccess::cancel( int source, int theMPITag, MPI_Datatype datatype, int outcount, int &flag )
- {
- int sts ;
- MPI_Aint extent, lbound ;
- flag = 0 ;
- sts = MPI_Type_get_extent( datatype , &lbound, &extent ) ;
- if ( sts == MPI_SUCCESS )
- {
- void * recvbuf = malloc( extent*outcount ) ;
- MPI_Request aRecvRequest ;
- if ( _trace )
- cout << "MPIAccess::Cancel" << _my_rank << " Irecv extent " << extent
- << " datatype " << datatype << " source " << source << " theMPITag "
- << theMPITag << endl ;
- sts = _comm_interface.Irecv( recvbuf, outcount, datatype, source, theMPITag,
- *_intra_communicator , &aRecvRequest ) ;
- if ( sts == MPI_SUCCESS )
- {
- sts = _comm_interface.cancel( &aRecvRequest ) ;
- if ( _trace )
- cout << "MPIAccess::Cancel" << _my_rank << " theMPITag " << theMPITag
- << " cancel done" << endl ;
- if ( sts == MPI_SUCCESS )
- {
- MPI_Status aStatus ;
- if ( _trace )
- cout << "MPIAccess::Cancel" << _my_rank << " wait" << endl ;
- sts = _comm_interface.wait( &aRecvRequest , &aStatus ) ;
- if ( sts == MPI_SUCCESS )
- {
- if ( _trace )
- cout << "MPIAccess::Cancel" << _my_rank << " test_cancelled" << endl ;
- sts = _comm_interface.testCancelled( &aStatus , &flag ) ;
- }
- }
- }
- if ( _trace && datatype == timeType() )
- cout << "MPIAccess::Cancel" << _my_rank << " time "
- << ((TimeMessage *) recvbuf)->time << " "
- << ((TimeMessage *) recvbuf)->deltatime << endl ;
- free( recvbuf ) ;
- }
- if ( _trace )
- cout << "MPIAccess::Cancel" << _my_rank << " flag " << flag << endl ;
- return sts ;
- }
-
-
- // CancelAll concerns all "pending" receiving message (without IRecv "posted")
- // CancelAll use IProbe and Cancel (see obove)
- int MPIAccess::cancelAll()
- {
- int sts = MPI_SUCCESS ;
- int target ;
- int source ;
- int MPITag ;
- MPI_Datatype datatype ;
- int outcount ;
- int flag ;
- for ( target = 0 ; target < _processor_group_size ; target++ )
- {
- sts = IProbe(target, source, MPITag, datatype, outcount, flag) ;
- if ( sts == MPI_SUCCESS && flag )
- {
- sts = cancel(source, MPITag, datatype, outcount, flag) ;
- if ( _trace )
- cout << "MPIAccess::CancelAll" << _my_rank << " source " << source
- << " MPITag " << MPITag << " datatype " << datatype
- << " outcount " << outcount << " Cancel flag " << flag << endl ;
- if ( sts != MPI_SUCCESS )
- break ;
- }
- else if ( sts != MPI_SUCCESS )
- break ;
- }
- return sts ;
- }
-
- // Same as barrier of MPI API
- int MPIAccess::barrier()
- {
- int status = _comm_interface.barrier( *_intra_communicator ) ;
- return status ;
- }
-
- // Same as Error_string of MPI API
- int MPIAccess::errorString(int errorcode, char *string, int *resultlen) const
- {
- return _comm_interface.errorString( errorcode, string, resultlen) ;
- }
-
- // Returns source, tag, error and outcount corresponding to receiving RequestId
- // By default the corresponding structure of RequestId is deleted
- int MPIAccess::status(int RequestId, int &source, int &tag, int &error,
- int &outcount, bool keepRequestStruct)
- {
- MPI_Status *myStatus = MPIStatus( RequestId ) ;
- if ( _trace )
- cout << "MPIAccess::status" << _my_rank << " RequestId " << RequestId
- << " status " << myStatus << endl ;
- if ( myStatus != NULL && MPIAsynchronous( RequestId ) &&
- MPICompleted( RequestId ) )
- {
- if ( MPIIsRecv( RequestId ) )
- {
- source = myStatus->MPI_SOURCE ;
- tag = myStatus->MPI_TAG ;
- error = myStatus->MPI_ERROR ;
- MPI_Datatype datatype = MPIDatatype( RequestId ) ;
- _comm_interface.getCount(myStatus, datatype, &outcount ) ;
- if ( _trace )
- cout << "MPIAccess::status" << _my_rank << " RequestId " << RequestId
- << " status " << myStatus << " outcount " << outcount << endl ;
- setMPIOutCount( RequestId , outcount ) ;
- }
- else
- {
- source = MPITarget( RequestId ) ;
- tag = MPITag( RequestId ) ;
- error = 0 ;
- outcount = MPIOutCount( RequestId ) ;
- }
- if ( !keepRequestStruct )
- deleteRequest( RequestId ) ;
- return MPI_SUCCESS ;
- }
- else
- {
- source = MPITarget( RequestId ) ;
- tag = MPITag( RequestId ) ;
- error = 0 ;
- outcount = MPIOutCount( RequestId ) ;
- }
- return MPI_SUCCESS ;
- }
-
- int MPIAccess::requestFree( MPI_Request *request )
- {
- return _comm_interface.requestFree( request ) ;
- }
-
- // Print all information of all known requests for debugging purpose
- void MPIAccess::check() const
- {
- int i = 0 ;
- map< int , RequestStruct * >::const_iterator MapOfRequestStructiterator ;
- cout << "MPIAccess::Check" << _my_rank << "_map_of_request_struct_size "
- << _map_of_request_struct.size() << endl ;
- for ( MapOfRequestStructiterator = _map_of_request_struct.begin() ;
- MapOfRequestStructiterator != _map_of_request_struct.end() ;
- MapOfRequestStructiterator++ )
- {
- if ( MapOfRequestStructiterator->second != NULL )
- {
- cout << " Check" << _my_rank << " " << i << ". Request"
- << MapOfRequestStructiterator->first << "-->" ;
- if ( (MapOfRequestStructiterator->second)->MPIAsynchronous )
- cout << "I" ;
- if ( (MapOfRequestStructiterator->second)->MPIIsRecv )
- cout << "Recv from " ;
- else
- cout << "Send to " ;
- cout << (MapOfRequestStructiterator->second)->MPITarget
- << " MPITag " << (MapOfRequestStructiterator->second)->MPITag
- << " DataType " << (MapOfRequestStructiterator->second)->MPIDatatype
- << " Request " << (MapOfRequestStructiterator->second)->MPIRequest
- << " Status " << (MapOfRequestStructiterator->second)->MPIStatus
- << " Completed " << (MapOfRequestStructiterator->second)->MPICompleted
- << endl ;
- }
- i++ ;
- }
- }
-
- // Returns the MPI size of a TimeMessage
- MPI_Aint MPIAccess::timeExtent() const
- {
- MPI_Aint aextent, lbound ;
- MPI_Type_get_extent( _MPI_TIME , &lbound, &aextent ) ;
- return aextent ;
- }
-
- // Returns the MPI size of a MPI_INT
- MPI_Aint MPIAccess::intExtent() const
- {
- MPI_Aint aextent, lbound ;
- MPI_Type_get_extent( MPI_INT , &lbound, &aextent ) ;
- return aextent ;
- }
-
- // Returns the MPI size of a MPI_LONG
- MPI_Aint MPIAccess::longExtent() const
- {
- MPI_Aint aextent, lbound ;
- MPI_Type_get_extent( MPI_LONG , &lbound, &aextent ) ;
- return aextent ;
- }
-
- // Returns the MPI size of a MPI_DOUBLE
- MPI_Aint MPIAccess::doubleExtent() const
- {
- MPI_Aint aextent, lbound ;
- MPI_Type_get_extent( MPI_DOUBLE , &lbound, &aextent ) ;
- return aextent ;
- }
-
- // Outputs fields of a TimeMessage structure
- ostream & operator<< (ostream & f ,const TimeMessage & aTimeMsg )
- {
- f << " time " << aTimeMsg.time << " deltatime " << aTimeMsg.deltatime
- << " tag " << aTimeMsg.tag ;
- return f;
- }
-
- // Outputs the DataType coded in a Tag
- ostream & operator<< (ostream & f ,const _MessageIdent & methodtype )
- {
- switch (methodtype)
- {
- case _message_time :
- f << " MethodTime ";
- break;
- case _message_int :
- f << " MPI_INT ";
- break;
- case _message_double :
- f << " MPI_DOUBLE ";
- break;
- default :
- f << " UnknownMethodType ";
- break;
- }
- return f;
- }
-}
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#ifndef __MPIACCESS_HXX__
-#define __MPIACCESS_HXX__
-
-#include "CommInterface.hxx"
-#include "ProcessorGroup.hxx"
-#include "MPIProcessorGroup.hxx"
-
-#include <map>
-#include <list>
-#include <vector>
-#include <iostream>
-
-namespace MEDCoupling
-{
- typedef struct
- {
- double time ;
- double deltatime ;
- int tag ;
- } TimeMessage;
-
- static MPI_Request mpirequestnull = MPI_REQUEST_NULL ;
- enum _MessageIdent { _message_unknown, _message_time, _message_int, _message_double } ;
-
- class MPIAccess
- {
- private:
- struct RequestStruct
- {
- int MPITarget ;
- bool MPIIsRecv ;
- int MPITag ;
- bool MPIAsynchronous ;
- bool MPICompleted ;
- MPI_Datatype MPIDatatype ;
- MPI_Request MPIRequest ;
- MPI_Status *MPIStatus ;
- int MPIOutCount ;
- };
- public:
- MPIAccess(MPIProcessorGroup * ProcessorGroup, int BaseTag=0, int MaxTag=0) ;
- virtual ~MPIAccess() ;
-
- void trace( bool trace = true ) ;
-
- void deleteRequest( int RequestId ) ;
- void deleteRequests(int size , int *ArrayOfSendRequests ) ;
-
- int sendMPITag(int destrank) ;
- int recvMPITag(int sourcerank) ;
-
- int sendRequestIdsSize() ;
- int sendRequestIds(int size, int *ArrayOfSendRequests) ;
- int recvRequestIdsSize() ;
- int recvRequestIds(int size, int *ArrayOfRecvRequests) ;
-
- int sendRequestIdsSize(int destrank) ;
- int sendRequestIds(int destrank, int size, int *ArrayOfSendRequests) ;
- int recvRequestIdsSize(int sourcerank) ;
- int recvRequestIds(int sourcerank, int size, int *ArrayOfRecvRequests) ;
-
- int send(void* buffer, int count, MPI_Datatype datatype, int target,
- int &RequestId) ;
- int ISend(void* buffer, int count, MPI_Datatype datatype, int target,
- int &RequestId) ;
- int recv(void* buffer, int count, MPI_Datatype datatype, int source,
- int &RequestId, int *OutCount=NULL) ;
- int IRecv(void* buffer, int count, MPI_Datatype datatype, int source,
- int &RequestId) ;
- int sendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, int dest,
- int &SendRequestId, void* recvbuf, int recvcount,
- MPI_Datatype recvtype, int source,
- int &RecvRequestId, int *OutCount=NULL) ;
- int ISendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, int dest,
- int &SendRequestId, void* recvbuf, int recvcount,
- MPI_Datatype recvtype, int source, int &RecvRequestId) ;
-
- int wait(int RequestId) ;
- int test(int RequestId, int &flag) ;
- int waitAny(int count, int *array_of_RequestIds, int &RequestId) ;
- int testAny(int count, int *array_of_RequestIds, int &RequestId, int &flag) ;
- int waitAll(int count, int *array_of_RequestIds) ;
- int testAll(int count, int *array_of_RequestIds, int &flag) ;
- int waitSome(int count, int *array_of_RequestIds, int outcount,
- int *outarray_of_RequestIds) ;
- int testSome(int count, int *array_of_RequestIds, int outcounts,
- int *outarray_of_RequestIds) ;
- int probe(int FromSource, int &source, int &MPITag, MPI_Datatype &datatype,
- int &outcount) ;
- int IProbe(int FromSource, int &source, int &MPITag, MPI_Datatype &datatype,
- int &outcount, int &flag) ;
- int cancel( int RecvRequestId, int &flag ) ;
- int cancel( int source, int MPITag, MPI_Datatype datatype, int outcount,
- int &flag ) ;
- int cancelAll() ;
- int barrier() ;
- int errorString(int errorcode, char *string, int *resultlen) const ;
- int status(int RequestId, int &source, int &tag, int &error, int &outcount,
- bool keepRequestStruct=false) ;
- int requestFree( MPI_Request *request ) ;
-
- void check() const ;
-
- MPI_Datatype timeType() const ;
- bool isTimeMessage( int MPITag ) const ;
- MPI_Aint timeExtent() const ;
- MPI_Aint intExtent() const ;
- MPI_Aint longExtent() const ;
- MPI_Aint doubleExtent() const ;
- MPI_Aint extent( MPI_Datatype datatype ) const ;
-
- int MPITag( int RequestId ) ;
- int MPITarget( int RequestId ) ;
- bool MPIIsRecv( int RequestId ) ;
- bool MPIAsynchronous( int RequestId ) ;
- bool MPICompleted( int RequestId ) ;
- MPI_Datatype MPIDatatype( int RequestId ) ;
- int MPIOutCount( int RequestId ) ;
-
- private:
- int newRequest( MPI_Datatype datatype, int tag , int destsourcerank ,
- bool fromsourcerank , bool asynchronous ) ;
- int newSendTag( MPI_Datatype datatype, int destrank , int method ,
- bool asynchronous, int &RequestId ) ;
- int newRecvTag( MPI_Datatype datatype, int sourcerank , int method ,
- bool asynchronous, int &RequestId ) ;
- int incrTag( int prevtag ) ;
- int valTag( int tag, int method ) ;
-
- void deleteSendRecvRequest( int RequestId ) ;
-
- void deleteStatus( int RequestId ) ;
-
- MPI_Request *MPIRequest( int RequestId ) ;
- MPI_Status *MPIStatus( int RequestId ) ;
- void setMPICompleted( int RequestId , bool completed ) ;
- void setMPIOutCount( int RequestId , int outcount ) ;
- void clearMPIStatus( int RequestId ) ;
-
- _MessageIdent methodId( MPI_Datatype datatype ) const ;
- MPI_Datatype datatype( _MessageIdent aMethodIdent ) const ;
- private:
- const CommInterface &_comm_interface ;
- const MPI_Comm* _intra_communicator ;
- MPIProcessorGroup * _processor_group ;
- int _processor_group_size ;
- int _my_rank ;
- bool _trace ;
- int _base_request ;
- int _max_request ;
- int _request ;
- int * _send_request ;
- int * _recv_request ;
- std::vector< std::list< int > > _send_requests ;
- std::vector< std::list< int > > _recv_requests ;
- int _base_MPI_tag ;
- int _max_MPI_tag ;
- int * _send_MPI_tag ;
- int * _recv_MPI_Tag ;
- MPI_Datatype _MPI_TIME ;
- static const int MODULO_TAG=10;
- std::map< int , RequestStruct * > _map_of_request_struct ;
-
- };
-
- inline void MPIAccess::trace( bool atrace )
- {
- _trace = atrace ;
- }
-
- // Delete the structure Request corresponding to RequestId identifier after
- // the deletion of the structures MPI_Request * and MPI_Status *
- // remove it from _MapOfRequestStruct (erase)
- inline void MPIAccess::deleteRequest( int RequestId )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- {
- if ( _trace )
- std::cout << "MPIAccess::DeleteRequest" << _my_rank << "( " << RequestId << " ) "
- << aRequestStruct << " MPIRequest " << aRequestStruct->MPIRequest
- << " MPIIsRecv " << aRequestStruct->MPIIsRecv << std::endl ;
- if ( _map_of_request_struct[RequestId]->MPIRequest != MPI_REQUEST_NULL )
- requestFree( &_map_of_request_struct[RequestId]->MPIRequest ) ;
- deleteSendRecvRequest( RequestId ) ;
- deleteStatus( RequestId ) ;
- _map_of_request_struct.erase( RequestId ) ;
- delete aRequestStruct ;
- }
- else
- {
- if ( _trace )
- std::cout << "MPIAccess::DeleteRequest" << _my_rank << "( " << RequestId
- << " ) Request not found" << std::endl ;
- }
- }
-
- // Delete all requests of the array ArrayOfSendRequests
- inline void MPIAccess::deleteRequests(int size , int *ArrayOfSendRequests )
- {
- for (int i = 0 ; i < size ; i++ )
- deleteRequest( ArrayOfSendRequests[i] ) ;
- }
-
- // Returns the last MPITag of the destination rank destrank
- inline int MPIAccess::sendMPITag(int destrank)
- {
- return _send_MPI_tag[destrank] ;
- }
-
- // Returns the last MPITag of the source rank sourcerank
- inline int MPIAccess::recvMPITag(int sourcerank)
- {
- return _recv_MPI_Tag[sourcerank] ;
- }
-
- // Returns the number of all SendRequestIds matching a destination rank. It may be
- // used to allocate ArrayOfSendRequests for the call to SendRequestIds
- inline int MPIAccess::sendRequestIdsSize(int destrank)
- {
- return (int)_send_requests[destrank].size() ;
- }
-
- // Returns the number of all RecvRequestIds matching a source rank. It may be
- // used to allocate ArrayOfRecvRequests for the call to RecvRequestIds
- inline int MPIAccess::recvRequestIdsSize(int sourcerank)
- {
- return (int)_recv_requests[sourcerank].size() ;
- }
-
- // Returns the MPI_Datatype (registered in MPI in the constructor with
- // MPI_Type_struct and MPI_Type_commit) for TimeMessages
- inline MPI_Datatype MPIAccess::timeType() const
- {
- return _MPI_TIME ;
- }
-
- // Returns true if the tag MPITag corresponds to a TimeMessage
- inline bool MPIAccess::isTimeMessage( int aMPITag ) const
- {
- return ((aMPITag%MODULO_TAG) == _message_time) ;
- }
-
- // Returns the MPI size of the MPI_Datatype datatype
- inline MPI_Aint MPIAccess::extent( MPI_Datatype adatatype ) const
- {
- if ( adatatype == _MPI_TIME )
- return timeExtent() ;
- if ( adatatype == MPI_INT )
- return intExtent() ;
- if ( adatatype == MPI_LONG )
- return longExtent() ;
- if ( adatatype == MPI_DOUBLE )
- return doubleExtent() ;
- return 0 ;
- }
-
- // Returns the MPITag of the request corresponding to RequestId identifier
- inline int MPIAccess::MPITag( int RequestId )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- return aRequestStruct->MPITag ;
- return -1 ;
- }
-
- // Returns the MPITarget of the request corresponding to RequestId identifier
- inline int MPIAccess::MPITarget( int RequestId )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- return aRequestStruct->MPITarget ;
- return -1 ;
- }
-
- // Returns true if the request corresponding to RequestId identifier was [I]Recv
- inline bool MPIAccess::MPIIsRecv( int RequestId )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- return aRequestStruct->MPIIsRecv ;
- return false ;
- }
-
- // Returns true if the request corresponding to RequestId identifier was asynchronous
- inline bool MPIAccess::MPIAsynchronous( int RequestId )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- return aRequestStruct->MPIAsynchronous ;
- return false ;
- }
-
- // Returns true if the request corresponding to RequestId identifier was completed
- inline bool MPIAccess::MPICompleted( int RequestId )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- return aRequestStruct->MPICompleted;
- return true ;
- }
-
- // Returns the MPI_datatype of the request corresponding to RequestId identifier
- inline MPI_Datatype MPIAccess::MPIDatatype( int RequestId )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- return aRequestStruct->MPIDatatype;
- return MPI_DATATYPE_NULL;
- }
-
- // Returns the size of the receiving message of the request corresponding to
- // RequestId identifier
- inline int MPIAccess::MPIOutCount( int RequestId )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- return aRequestStruct->MPIOutCount;
- return 0 ;
- }
-
- // Increments the previous tag value (cyclically)
- // Look at MPIAccess::NewSendTag/NewRecvTag in MPIAccess.cxx
- inline int MPIAccess::incrTag( int prevtag )
- {
- int tag;
- if ( (prevtag % MODULO_TAG) == _message_time )
- tag = ((prevtag/MODULO_TAG)*MODULO_TAG);
- else
- tag = ((prevtag/MODULO_TAG + 1)*MODULO_TAG);
- if ( tag > _max_MPI_tag )
- tag = _base_MPI_tag ;
- return tag ;
- }
-
- // Returns the MPITag with the method-type field
- // Look at MPIAccess::NewSendTag/NewRecvTag in MPIAccess.cxx
- inline int MPIAccess::valTag( int tag, int method )
- {
- return ((tag/MODULO_TAG)*MODULO_TAG) + method;
- }
-
- // Remove a Request identifier from the list _RecvRequests/_SendRequests for
- // the corresponding target.
- inline void MPIAccess::deleteSendRecvRequest( int RequestId )
- {
- if ( _trace )
- std::cout << "MPIAccess::DeleteSendRecvRequest" << _my_rank
- << "( " << RequestId << " ) " << std::endl ;
- if ( MPIIsRecv( RequestId ) )
- _recv_requests[ MPITarget( RequestId ) ].remove( RequestId );
- else
- _send_requests[ MPITarget( RequestId ) ].remove( RequestId );
- }
-
- // Delete the MPI structure MPI_status * of a ReaquestId
- inline void MPIAccess::deleteStatus( int RequestId )
- {
- if ( _map_of_request_struct[RequestId]->MPIStatus != NULL )
- {
- delete _map_of_request_struct[RequestId]->MPIStatus ;
- clearMPIStatus( RequestId ) ;
- }
- }
-
- // Returns the MPI structure MPI_request * of a RequestId
- inline MPI_Request * MPIAccess::MPIRequest( int RequestId )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- return &aRequestStruct->MPIRequest;
- return &mpirequestnull ;
- }
-
- // Returns the MPI structure MPI_status * of a RequestId
- inline MPI_Status * MPIAccess::MPIStatus( int RequestId )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ];
- if ( aRequestStruct )
- return aRequestStruct->MPIStatus;
- return NULL ;
- }
-
- // Set the MPICompleted field of the structure Request corresponding to RequestId
- // identifier with the value completed
- inline void MPIAccess::setMPICompleted( int RequestId , bool completed )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- aRequestStruct->MPICompleted = completed;
- }
-
- // Set the MPIOutCount field of the structure Request corresponding to RequestId
- // identifier with the value outcount
- inline void MPIAccess::setMPIOutCount( int RequestId , int outcount )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- aRequestStruct->MPIOutCount = outcount;
- }
-
- // Nullify the MPIStatusfield of the structure Request corresponding to RequestId
- // identifier
- inline void MPIAccess::clearMPIStatus( int RequestId )
- {
- struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
- if ( aRequestStruct )
- aRequestStruct->MPIStatus = NULL ;
- }
-
- // Returns the _MessageIdent enum value corresponding to the MPI_Datatype datatype
- // Look at MPIAccess::NewSendTag/NewRecvTag in MPIAccess.cxx
- inline _MessageIdent MPIAccess::methodId( MPI_Datatype adatatype ) const
- {
- _MessageIdent aMethodIdent ;
- if ( adatatype == _MPI_TIME )
- aMethodIdent = _message_time;
- else if ( adatatype == MPI_INT )
- aMethodIdent = _message_int ;
- else if ( adatatype == MPI_DOUBLE )
- aMethodIdent = _message_double ;
- else
- aMethodIdent = _message_unknown ;
- return aMethodIdent ;
- }
-
- // Returns the MPI_Datatype corresponding to the _MessageIdent enum aMethodIdent
- inline MPI_Datatype MPIAccess::datatype( _MessageIdent aMethodIdent ) const
- {
- MPI_Datatype aDataType ;
- switch( aMethodIdent )
- {
- case _message_time :
- aDataType = _MPI_TIME ;
- break ;
- case _message_int :
- aDataType = MPI_INT ;
- break ;
- case _message_double :
- aDataType = MPI_DOUBLE ;
- break ;
- default :
- aDataType = (MPI_Datatype) -1 ;
- break ;
- }
- return aDataType ;
- }
-
- std::ostream & operator<< (std::ostream &,const _MessageIdent &);
-
- std::ostream & operator<< (std::ostream &,const TimeMessage &);
-
-}
-
-#endif
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include "MPIAccess/MPIAccess.hxx"
+
+#include "InterpolationUtils.hxx"
+
+#include <iostream>
+
+using namespace std;
+
+namespace MEDCoupling
+{
+ /**!
+ \anchor MPIAccess-det
+ \class MPIAccess
+
+ The class \a MPIAccess is the gateway to the MPI library.
+ It is a helper class that gathers the calls to the MPI
+ library that are made in the ParaMEDMEM library. This gathering
+ allows easier gathering of information about the communication
+ in the library. With MPIAccess, tags are managed automatically
+ and asynchronous operations are easier.
+
+ It is typically called after the MPI_Init() call in a program. It is afterwards passed as a parameter to the constructors of ParaMEDMEM objects so that they access the MPI library via the MPIAccess.
+
+ As an example, the following code initializes a processor group made of the zero processor.
+
+ \verbatim
+ #include "MPIAccess.hxx"
+ #include "ProcessorGroup.hxx"
+
+ int main(int argc, char** argv)
+ {
+ //initialization
+ MPI_Init(&argc, &argv);
+ MEDCoupling::CommInterface comm_interface;
+
+ //setting up a processor group with proc 0
+ set<int> procs;
+ procs.insert(0);
+ MEDCoupling::ProcessorGroup group(procs, comm_interface);
+
+ MEDCoupling::MPIAccess mpi_access(group);
+
+ //cleanup
+ MPI_Finalize();
+ }
+ \endverbatim
+ */
+
+
+ /*! Creates a MPIAccess that is based on the processors included in \a ProcessorGroup.
+ This class may be called for easier use of MPI API.
+
+ \param ProcessorGroup MPIProcessorGroup object giving access to group management
+ \param BaseTag and MaxTag define the range of tags to be used.
+ Tags are managed by MPIAccess. They are cyclically incremented.
+ When there is a Send or a Receive operation there is a new RequestId tag returned
+ to the caller. That RequestId may be used to manage the operation Wait, Check of
+ status etc... The MPITag internally managed by MPIAccess is used as "tag" argument
+ in MPI call.
+ */
+
+ MPIAccess::MPIAccess(MPIProcessorGroup * ProcessorGroup, int BaseTag, int MaxTag) :
+ _comm_interface( ProcessorGroup->getCommInterface() ) ,
+ _intra_communicator( ProcessorGroup->getComm() )
+ {
+ void *v ;
+ int mpitagub ;
+ int flag ;
+ //MPI_Comm_get_attr does not run with _IntraCommunicator ???
+ //MPI_Comm_get_attr(*_IntraCommunicator,MPID_TAG_UB,&mpitagub,&flag) ;
+ MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&v,&flag) ;
+ mpitagub=*(reinterpret_cast<int*>(v));
+ if ( BaseTag != 0 )
+ BaseTag = (BaseTag/MODULO_TAG)*MODULO_TAG ;
+ if ( MaxTag == 0 )
+ MaxTag = (mpitagub/MODULO_TAG-1)*MODULO_TAG ;
+ MPI_Comm_rank( *_intra_communicator, &_my_rank ) ;
+ if ( !flag | (BaseTag < 0) | (BaseTag >= MaxTag) | (MaxTag > mpitagub) )
+ throw INTERP_KERNEL::Exception("wrong call to MPIAccess constructor");
+
+ _processor_group = ProcessorGroup ;
+ _processor_group_size = _processor_group->size() ;
+ _trace = false ;
+
+ _base_request = -1 ;
+ _max_request = std::numeric_limits<int>::max() ;
+ _request = _base_request ;
+
+ _base_MPI_tag = BaseTag ;
+ _max_MPI_tag = MaxTag ;
+
+ _send_request = new int[ _processor_group_size ] ;
+ _recv_request = new int[ _processor_group_size ] ;
+
+ _send_requests.resize( _processor_group_size ) ;
+ _recv_requests.resize( _processor_group_size ) ;
+
+ _send_MPI_tag = new int[ _processor_group_size ] ;
+ _recv_MPI_Tag = new int[ _processor_group_size ] ;
+ int i ;
+ for (i = 0 ; i < _processor_group_size ; i++ )
+ {
+ _send_request[ i ] = _max_request ;
+ _recv_request[ i ] = _max_request ;
+ _send_requests[ i ].resize(0) ;
+ _recv_requests[ i ].resize(0) ;
+ _send_MPI_tag[ i ] = _max_MPI_tag ;
+ _recv_MPI_Tag[ i ] = _max_MPI_tag ;
+ }
+ MPI_Datatype array_of_types[3] ;
+ array_of_types[0] = MPI_DOUBLE ;
+ array_of_types[1] = MPI_DOUBLE ;
+ array_of_types[2] = MPI_INT ;
+ int array_of_blocklengths[3] ;
+ array_of_blocklengths[0] = 1 ;
+ array_of_blocklengths[1] = 1 ;
+ array_of_blocklengths[2] = 1 ;
+ MPI_Aint array_of_displacements[3] ;
+ array_of_displacements[0] = 0 ;
+ array_of_displacements[1] = sizeof(double) ;
+ array_of_displacements[2] = 2*sizeof(double) ;
+ MPI_Type_create_struct(3, array_of_blocklengths, array_of_displacements,
+ array_of_types, &_MPI_TIME) ;
+ MPI_Type_commit(&_MPI_TIME) ;
+ }
+
+ MPIAccess::~MPIAccess()
+ {
+ delete [] _send_request ;
+ delete [] _recv_request ;
+ delete [] _send_MPI_tag ;
+ delete [] _recv_MPI_Tag ;
+ MPI_Type_free(&_MPI_TIME) ;
+ }
+
+ /*
+ MPIAccess and "RequestIds" :
+ ============================
+
+ . WARNING : In the specification document, the distinction
+ between "MPITags" and "RequestIds" is not clear. "MPITags"
+ are arguments of calls to MPI. "RequestIds" does not concern
+ calls to MPI. "RequestIds" are named "tag"as arguments in/out
+ in the MPIAccess API in the specification documentation.
+ But in the implementation we have the right name RequestId (or
+ RecvRequestId/SendRequestId).
+
+ . When we have a MPI write/read request via MPIAccess, we get
+ an identifier "RequestId".
+ That identifier matches a structure RequestStruct of
+ MPIAccess. The access to that structure is done with the map
+ "_MapOfRequestStruct".
+ That structure RequestStruct give the possibility to manage
+ the structures MPI_Request and MPI_Status * of MPI. It give
+ also the possibility to get information about that request :
+ target, send/recv, tag, [a]synchronous, type, outcount.
+
+ . That identifier is used to control an asynchronous request
+ via MPIAccess : Wait, Test, Probe, etc...
+
+ . In practise "RequestId" is simply an integer fo the interval
+ [0 , 2**32-1]. There is only one such a cyclic for
+ [I]Sends and [I]Recvs.
+
+ . That "RequestIds" and their associated structures give an easy
+ way to manage asynchronous communications.
+ For example we have mpi_access->Wait( int RequestId ) instead of
+ MPI_Wait(MPI_Request *request, MPI_Status *status).
+
+ . The API of MPIAccess may give the "SendRequestIds" of a "target",
+ the "RecvRequestIds" from a "source" or the "SendRequestIds" of
+ all "targets" or the "RecvRequestIds" of all "sources".
+ That avoid to manage them in Presentation-ParaMEDMEM.
+ */
+
+ int MPIAccess::newRequest( MPI_Datatype datatype, int tag , int destsourcerank ,
+ bool fromsourcerank , bool asynchronous )
+ {
+ RequestStruct *mpiaccessstruct = new RequestStruct;
+ mpiaccessstruct->MPITag = tag ;
+ mpiaccessstruct->MPIDatatype = datatype ;
+ mpiaccessstruct->MPITarget = destsourcerank ;
+ mpiaccessstruct->MPIIsRecv = fromsourcerank ;
+ MPI_Status *aStatus = new MPI_Status ;
+ mpiaccessstruct->MPIStatus = aStatus ;
+ mpiaccessstruct->MPIAsynchronous = asynchronous ;
+ mpiaccessstruct->MPICompleted = !asynchronous ;
+ mpiaccessstruct->MPIOutCount = -1 ;
+ if ( !asynchronous )
+ {
+ mpiaccessstruct->MPIRequest = MPI_REQUEST_NULL ;
+ mpiaccessstruct->MPIStatus->MPI_SOURCE = destsourcerank ;
+ mpiaccessstruct->MPIStatus->MPI_TAG = tag ;
+ mpiaccessstruct->MPIStatus->MPI_ERROR = MPI_SUCCESS ;
+ }
+ if ( _request == _max_request )
+ _request = _base_request ;
+ _request += 1 ;
+ _map_of_request_struct[_request] = mpiaccessstruct ;
+ if ( fromsourcerank )
+ _recv_request[ destsourcerank ] = _request;
+ else
+ _send_request[ destsourcerank ] = _request;
+ if ( _trace )
+ cout << "NewRequest" << _my_rank << "( " << _request << " ) "
+ << mpiaccessstruct << endl ;
+ return _request ;
+ }
+
+ /*
+ MPIAccess and "tags" (or "MPITags") :
+ =====================================
+
+ . The constructor give the possibility to choose an interval of
+ tags to use : [BaseTag , MaxTag].
+ The default is [ 0 , MPI_TAG_UB], MPI_TAG_UB being the maximum
+ value in an implementation of MPI (minimum 32767 = 2**15-1).
+ On awa with the implementation lam MPI_TAG_UB value is
+ 7353944. The norm MPI specify that value is the same in all
+ processes started by mpirun.
+ In the case of the use of the same IntraCommunicator in a process
+ for several distinct data flows (or for several IntraCommunicators
+ with common processes), that permits to avoid ambiguity
+ and may help debug.
+
+ . In MPIAccess the tags have two parts (#define MODULO_TAG 10) :
+ + The last decimal digit decimal correspond to MPI_DataType ( 1 for
+ TimeMessages, 2 for MPI_INT and 3 for MPI_DOUBLE)
+ + The value of other digits correspond to a circular number for each
+ message.
+ + A TimeMessage and the associated DataMessage have the same number
+ (but the types are different and the tags also).
+
+ . For a Send of a message from a process "source" to a process
+ "target", we have _send_MPI_tag[target] in the process
+ source (it contains the last "tag" used for the Send of a
+ message to the process target).
+ And in the process "target" which receive that message, we have
+ _recv_MPI_Tag[source] (it contains the last "tag" used for the Recv
+ of messages from the process source).
+ Naturally in the MPI norm the values of that tags must be the same.
+ */
+ int MPIAccess::newSendTag( MPI_Datatype datatype, int destrank , int method ,
+ bool asynchronous, int &RequestId )
+ {
+ int tag ;
+ tag = incrTag( _send_MPI_tag[destrank] ) ;
+ tag = valTag( tag, method ) ;
+ _send_MPI_tag[ destrank ] = tag ;
+ RequestId = newRequest( datatype, tag, destrank , false , asynchronous ) ;
+ _send_request[ destrank ] = RequestId ;
+ _send_requests[ destrank ].push_back( RequestId ) ;
+ return tag ;
+ }
+
+ int MPIAccess::newRecvTag( MPI_Datatype datatype, int sourcerank , int method ,
+ bool asynchronous, int &RequestId )
+ {
+ int tag ;
+ tag = incrTag( _recv_MPI_Tag[sourcerank] ) ;
+ tag = valTag( tag, method ) ;
+ _recv_MPI_Tag[ sourcerank ] = tag ;
+ RequestId = newRequest( datatype, tag , sourcerank , true , asynchronous ) ;
+ _recv_request[ sourcerank ] = RequestId ;
+ _recv_requests[ sourcerank ].push_back( RequestId ) ;
+ return tag ;
+ }
+
+ // Returns the number of all SendRequestIds that may be used to allocate
+ // ArrayOfSendRequests for the call to SendRequestIds
+ int MPIAccess::sendRequestIdsSize()
+ {
+ int size = 0;
+ for (int i = 0 ; i < _processor_group_size ; i++ )
+ size += (int)_send_requests[ i ].size() ;
+ return size ;
+ }
+
+ // Returns in ArrayOfSendRequests with the dimension "size" all the
+ // SendRequestIds
+ int MPIAccess::sendRequestIds(int size, int *ArrayOfSendRequests)
+ {
+ int destrank ;
+ int i = 0 ;
+ for ( destrank = 0 ; destrank < _processor_group_size ; destrank++ )
+ {
+ list< int >::const_iterator iter ;
+ for (iter = _send_requests[ destrank ].begin() ; iter != _send_requests[destrank].end() ; iter++ )
+ ArrayOfSendRequests[i++] = *iter ;
+ }
+ return i ;
+ }
+
+ // Returns the number of all RecvRequestIds that may be used to allocate
+ // ArrayOfRecvRequests for the call to RecvRequestIds
+ int MPIAccess::recvRequestIdsSize()
+ {
+ int size = 0 ;
+ for (int i = 0 ; i < _processor_group_size ; i++ )
+ size += (int)_recv_requests[ i ].size() ;
+ return size ;
+ }
+
+ // Returns in ArrayOfRecvRequests with the dimension "size" all the
+ // RecvRequestIds
+ int MPIAccess::recvRequestIds(int size, int *ArrayOfRecvRequests)
+ {
+ int sourcerank ;
+ int i = 0 ;
+ for ( sourcerank = 0 ; sourcerank < _processor_group_size ; sourcerank++ )
+ {
+ list< int >::const_iterator iter ;
+ for (iter = _recv_requests[ sourcerank ].begin() ; iter != _recv_requests[sourcerank].end() ; iter++ )
+ ArrayOfRecvRequests[i++] = *iter ;
+ }
+ return i ;
+ }
+
+ // Returns in ArrayOfSendRequests with the dimension "size" all the
+ // SendRequestIds to a destination rank
+ int MPIAccess::sendRequestIds(int destrank, int size, int *ArrayOfSendRequests)
+ {
+ if (size < (int)_send_requests[destrank].size() )
+ throw INTERP_KERNEL::Exception("wrong call to MPIAccess::SendRequestIds");
+ int i = 0 ;
+ list< int >::const_iterator iter ;
+ for (iter = _send_requests[ destrank ].begin() ; iter != _send_requests[destrank].end() ; iter++ )
+ ArrayOfSendRequests[i++] = *iter ;
+ return (int)_send_requests[destrank].size() ;
+ }
+
+ // Returns in ArrayOfRecvRequests with the dimension "size" all the
+ // RecvRequestIds from a sourcerank
+ int MPIAccess::recvRequestIds(int sourcerank, int size, int *ArrayOfRecvRequests)
+ {
+ if (size < (int)_recv_requests[sourcerank].size() )
+ throw INTERP_KERNEL::Exception("wrong call to MPIAccess::RecvRequestIds");
+ int i = 0 ;
+ list< int >::const_iterator iter ;
+ _recv_requests[ sourcerank ] ;
+ for (iter = _recv_requests[ sourcerank ].begin() ; iter != _recv_requests[sourcerank].end() ; iter++ )
+ ArrayOfRecvRequests[i++] = *iter ;
+ return (int)_recv_requests[sourcerank].size() ;
+ }
+
+ // Send in synchronous mode count values of type datatype from buffer to target
+ // (returns RequestId identifier even if the corresponding structure is deleted :
+ // it is only in order to have the same signature as the asynchronous mode)
+ int MPIAccess::send(void* buffer, int count, MPI_Datatype datatype, int target, int &RequestId)
+ {
+ int sts = MPI_SUCCESS ;
+ RequestId = -1 ;
+ if ( count )
+ {
+ _MessageIdent aMethodIdent = methodId( datatype ) ;
+ int MPItag = newSendTag( datatype, target , aMethodIdent , false , RequestId ) ;
+ if ( aMethodIdent == _message_time )
+ {
+ TimeMessage *aTimeMsg = (TimeMessage *) buffer ;
+ aTimeMsg->tag = MPItag ;
+ }
+ deleteRequest( RequestId ) ;
+ sts = _comm_interface.send(buffer, count, datatype, target, MPItag,
+ *_intra_communicator ) ;
+ if ( _trace )
+ cout << "MPIAccess::Send" << _my_rank << " SendRequestId "
+ << RequestId << " count " << count << " target " << target
+ << " MPItag " << MPItag << endl ;
+ }
+ return sts ;
+ }
+
+ // Receive (read) in synchronous mode count values of type datatype in buffer from source
+ // (returns RequestId identifier even if the corresponding structure is deleted :
+ // it is only in order to have the same signature as the asynchronous mode)
+ // The output argument OutCount is optional : *OutCount <= count
+ int MPIAccess::recv(void* buffer, int count, MPI_Datatype datatype, int source, int &RequestId, int *OutCount)
+ {
+ int sts = MPI_SUCCESS ;
+ RequestId = -1 ;
+ if ( OutCount != NULL )
+ *OutCount = -1 ;
+ if ( count )
+ {
+ _MessageIdent aMethodIdent = methodId( datatype ) ;
+ int MPItag = newRecvTag( datatype, source , aMethodIdent , false , RequestId ) ;
+ sts = _comm_interface.recv(buffer, count, datatype, source, MPItag,
+ *_intra_communicator , MPIStatus( RequestId ) ) ;
+ int outcount = 0 ;
+ if ( sts == MPI_SUCCESS )
+ {
+ MPI_Datatype datatype2 = MPIDatatype( RequestId ) ;
+ _comm_interface.getCount(MPIStatus( RequestId ), datatype2, &outcount ) ;
+ setMPIOutCount( RequestId , outcount ) ;
+ setMPICompleted( RequestId , true ) ;
+ deleteStatus( RequestId ) ;
+ }
+ if ( OutCount != NULL )
+ *OutCount = outcount ;
+ if ( _trace )
+ cout << "MPIAccess::Recv" << _my_rank << " RecvRequestId "
+ << RequestId << " count " << count << " source " << source
+ << " MPItag " << MPItag << endl ;
+ deleteRequest( RequestId ) ;
+ }
+ return sts ;
+ }
+
+ // Send in asynchronous mode count values of type datatype from buffer to target
+ // Returns RequestId identifier.
+ int MPIAccess::ISend(void* buffer, int count, MPI_Datatype datatype, int target, int &RequestId)
+ {
+ int sts = MPI_SUCCESS ;
+ RequestId = -1 ;
+ if ( count )
+ {
+ _MessageIdent aMethodIdent = methodId( datatype ) ;
+ int MPItag = newSendTag( datatype, target , aMethodIdent , true , RequestId ) ;
+ if ( aMethodIdent == _message_time )
+ {
+ TimeMessage *aTimeMsg = (TimeMessage *) buffer ;
+ aTimeMsg->tag = MPItag ;
+ }
+ MPI_Request *aSendRequest = MPIRequest( RequestId ) ;
+ if ( _trace )
+ {
+ cout << "MPIAccess::ISend" << _my_rank << " ISendRequestId "
+ << RequestId << " count " << count << " target " << target
+ << " MPItag " << MPItag << endl ;
+ if ( MPItag == 1 )
+ cout << "MPIAccess::ISend" << _my_rank << " time "
+ << ((TimeMessage *)buffer)->time << " " << ((TimeMessage *)buffer)->deltatime
+ << endl ;
+ }
+ sts = _comm_interface.Isend(buffer, count, datatype, target, MPItag,
+ *_intra_communicator , aSendRequest) ;
+ }
+ return sts ;
+ }
+
+ // Receive (read) in asynchronous mode count values of type datatype in buffer from source
+ // returns RequestId identifier.
+ int MPIAccess::IRecv(void* buffer, int count, MPI_Datatype datatype, int source, int &RequestId)
+ {
+ int sts = MPI_SUCCESS ;
+ RequestId = -1 ;
+ if ( count )
+ {
+ _MessageIdent aMethodIdent = methodId( datatype ) ;
+ int MPItag = newRecvTag( datatype, source , aMethodIdent , true , RequestId ) ;
+ MPI_Request *aRecvRequest = MPIRequest( RequestId ) ;
+ if ( _trace )
+ {
+ cout << "MPIAccess::IRecv" << _my_rank << " IRecvRequestId "
+ << RequestId << " count " << count << " source " << source
+ << " MPItag " << MPItag << endl ;
+ if ( MPItag == 1 )
+ cout << "MPIAccess::ISend" << _my_rank << " time "
+ << ((TimeMessage *)buffer)->time << " " << ((TimeMessage *)buffer)->deltatime
+ << endl ;
+ }
+ sts = _comm_interface.Irecv(buffer, count, datatype, source, MPItag,
+ *_intra_communicator , aRecvRequest) ;
+ }
+ return sts ;
+ }
+
+ // Perform a Send and a Recv in synchronous mode
+ int MPIAccess::sendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ int dest, int &SendRequestId,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype,
+ int source, int &RecvRequestId, int *OutCount)
+ {
+ int sts = MPI_SUCCESS ;
+ SendRequestId = -1 ;
+ RecvRequestId = -1 ;
+ if ( recvcount )
+ sts = IRecv(recvbuf, recvcount, recvtype, source, RecvRequestId) ;
+ int outcount = -1 ;
+ if ( _trace )
+ cout << "MPIAccess::SendRecv" << _my_rank << " IRecv RecvRequestId "
+ << RecvRequestId << endl ;
+ if ( sts == MPI_SUCCESS )
+ {
+ if ( sendcount )
+ sts = send(sendbuf, sendcount, sendtype, dest, SendRequestId) ;
+ if ( _trace )
+ cout << "MPIAccess::SendRecv" << _my_rank << " Send SendRequestId "
+ << SendRequestId << endl ;
+ if ( sts == MPI_SUCCESS && recvcount )
+ {
+ sts = wait( RecvRequestId ) ;
+ outcount = MPIOutCount( RecvRequestId ) ;
+ if ( _trace )
+ cout << "MPIAccess::SendRecv" << _my_rank << " IRecv RecvRequestId "
+ << RecvRequestId << " outcount " << outcount << endl ;
+ }
+ }
+ if ( OutCount != NULL )
+ {
+ *OutCount = outcount ;
+ if ( _trace )
+ cout << "MPIAccess::SendRecv" << _my_rank << " *OutCount = " << *OutCount
+ << endl ;
+ }
+ deleteRequest( RecvRequestId ) ;
+ return sts ;
+ }
+
+ // Perform a Send and a Recv in asynchronous mode
+ int MPIAccess::ISendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ int dest, int &SendRequestId,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype,
+ int source, int &RecvRequestId)
+ {
+ int sts = MPI_SUCCESS ;
+ SendRequestId = -1 ;
+ RecvRequestId = -1 ;
+ if ( recvcount )
+ sts = IRecv(recvbuf, recvcount, recvtype, source, RecvRequestId) ;
+ if ( sts == MPI_SUCCESS )
+ if ( sendcount )
+ sts = ISend(sendbuf, sendcount, sendtype, dest, SendRequestId) ;
+ return sts ;
+ }
+
+ // Perform a wait of a Send or Recv asynchronous Request
+ // Do nothing for a synchronous Request
+ // Manage MPI_Request * and MPI_Status * structure
+ int MPIAccess::wait( int RequestId )
+ {
+ int status = MPI_SUCCESS ;
+ if ( !MPICompleted( RequestId ) )
+ {
+ if ( *MPIRequest( RequestId ) != MPI_REQUEST_NULL )
+ {
+ if ( _trace )
+ cout << "MPIAccess::Wait" << _my_rank << " -> wait( " << RequestId
+ << " ) MPIRequest " << MPIRequest( RequestId ) << " MPIStatus "
+ << MPIStatus( RequestId ) << " MPITag " << MPITag( RequestId )
+ << " MPIIsRecv " << MPIIsRecv( RequestId ) << endl ;
+ status = _comm_interface.wait(MPIRequest( RequestId ), MPIStatus( RequestId )) ;
+ }
+ else
+ {
+ if ( _trace )
+ cout << "MPIAccess::Wait" << _my_rank << " MPIRequest == MPI_REQUEST_NULL"
+ << endl ;
+ }
+ setMPICompleted( RequestId , true ) ;
+ if ( MPIIsRecv( RequestId ) && MPIStatus( RequestId ) )
+ {
+ MPI_Datatype datatype = MPIDatatype( RequestId ) ;
+ int outcount ;
+ status = _comm_interface.getCount(MPIStatus( RequestId ), datatype,
+ &outcount ) ;
+ if ( status == MPI_SUCCESS )
+ {
+ setMPIOutCount( RequestId , outcount ) ;
+ deleteStatus( RequestId ) ;
+ if ( _trace )
+ cout << "MPIAccess::Wait" << _my_rank << " RequestId " << RequestId
+ << "MPIIsRecv " << MPIIsRecv( RequestId ) << " outcount " << outcount
+ << endl ;
+ }
+ else
+ {
+ if ( _trace )
+ cout << "MPIAccess::Wait" << _my_rank << " MPIIsRecv "
+ << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ;
+ }
+ }
+ else
+ {
+ if ( _trace )
+ cout << "MPIAccess::Wait" << _my_rank << " MPIIsRecv " << MPIIsRecv( RequestId )
+ << " MPIOutCount " << MPIOutCount( RequestId ) << endl ;
+ }
+ }
+ if ( _trace )
+ cout << "MPIAccess::Wait" << _my_rank << " RequestId " << RequestId
+ << " Request " << MPIRequest( RequestId )
+ << " Status " << MPIStatus( RequestId ) << " MPICompleted "
+ << MPICompleted( RequestId ) << " MPIOutCount " << MPIOutCount( RequestId )
+ << endl ;
+ return status ;
+ }
+
+ // Perform a "test" of a Send or Recv asynchronous Request
+ // If the request is done, returns true in the flag argument
+ // If the request is not finished, returns false in the flag argument
+ // Do nothing for a synchronous Request
+ // Manage MPI_request * and MPI_status * structure
+ int MPIAccess::test(int RequestId, int &flag)
+ {
+ int status = MPI_SUCCESS ;
+ flag = MPICompleted( RequestId ) ;
+ if ( _trace )
+ cout << "MPIAccess::Test" << _my_rank << " flag " << flag ;
+ if ( MPIIsRecv( RequestId ) )
+ {
+ if ( _trace )
+ cout << " Recv" ;
+ }
+ else
+ {
+ if ( _trace )
+ cout << " Send" ;
+ }
+ if( _trace )
+ cout << "Request" << RequestId << " " << MPIRequest( RequestId )
+ << " Status " << MPIStatus( RequestId ) << endl ;
+ if ( !flag )
+ {
+ if ( *MPIRequest( RequestId ) != MPI_REQUEST_NULL )
+ {
+ if ( _trace )
+ cout << "MPIAccess::Test" << _my_rank << " -> test( " << RequestId
+ << " ) MPIRequest " << MPIRequest( RequestId )
+ << " MPIStatus " << MPIStatus( RequestId )
+ << " MPITag " << MPITag( RequestId )
+ << " MPIIsRecv " << MPIIsRecv( RequestId ) << endl ;
+ status = _comm_interface.test(MPIRequest( RequestId ), &flag,
+ MPIStatus( RequestId )) ;
+ }
+ else
+ {
+ if ( _trace )
+ cout << "MPIAccess::Test" << _my_rank << " MPIRequest == MPI_REQUEST_NULL"
+ << endl ;
+ }
+ if ( flag )
+ {
+ setMPICompleted( RequestId , true ) ;
+ if ( MPIIsRecv( RequestId ) && MPIStatus( RequestId ) )
+ {
+ int outcount ;
+ MPI_Datatype datatype = MPIDatatype( RequestId ) ;
+ status = _comm_interface.getCount( MPIStatus( RequestId ), datatype,
+ &outcount ) ;
+ if ( status == MPI_SUCCESS )
+ {
+ setMPIOutCount( RequestId , outcount ) ;
+ deleteStatus( RequestId ) ;
+ if ( _trace )
+ cout << "MPIAccess::Test" << _my_rank << " MPIIsRecv "
+ << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ;
+ }
+ else
+ {
+ if ( _trace )
+ cout << "MPIAccess::Test" << _my_rank << " MPIIsRecv "
+ << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ;
+ }
+ }
+ else
+ {
+ if ( _trace )
+ cout << "MPIAccess::Test" << _my_rank << " MPIIsRecv "
+ << MPIIsRecv( RequestId ) << " MPIOutCount "
+ << MPIOutCount( RequestId ) << endl ;
+ }
+ }
+ }
+ if ( _trace )
+ cout << "MPIAccess::Test" << _my_rank << " RequestId " << RequestId
+ << " flag " << flag << " MPICompleted " << MPICompleted( RequestId )
+ << " MPIOutCount " << MPIOutCount( RequestId ) << endl ;
+ return status ;
+ }
+
+ int MPIAccess::waitAny(int count, int *array_of_RequestIds, int &RequestId)
+ {
+ int status = MPI_ERR_OTHER ;
+ RequestId = -1 ;
+ cout << "MPIAccess::WaitAny not yet implemented" << endl ;
+ return status ;
+ }
+
+ int MPIAccess::testAny(int count, int *array_of_RequestIds, int &RequestId, int &flag)
+ {
+ int status = MPI_ERR_OTHER ;
+ RequestId = -1 ;
+ flag = 0 ;
+ cout << "MPIAccess::TestAny not yet implemented" << endl ;
+ return status ;
+ }
+
+ // Perform a wait of each Send or Recv asynchronous Request of the array
+ // array_of_RequestIds of size "count".
+ // That array may be filled with a call to SendRequestIdsSize or RecvRequestIdsSize
+ // Do nothing for a synchronous Request
+ // Manage MPI_Request * and MPI_Status * structure
+ int MPIAccess::waitAll(int count, int *array_of_RequestIds)
+ {
+ if ( _trace )
+ cout << "WaitAll" << _my_rank << " : count " << count << endl ;
+ int status ;
+ int retstatus = MPI_SUCCESS ;
+ int i ;
+ for ( i = 0 ; i < count ; i++ )
+ {
+ if ( _trace )
+ cout << "WaitAll" << _my_rank << " " << i << " -> Wait( "
+ << array_of_RequestIds[i] << " )" << endl ;
+ status = wait( array_of_RequestIds[i] ) ;
+ if ( status != MPI_SUCCESS )
+ retstatus = status ;
+ }
+ if ( _trace )
+ cout << "EndWaitAll" << _my_rank << endl ;
+ return retstatus ;
+ }
+
+ // Perform a "test" of each Send or Recv asynchronous Request of the array
+ // array_of_RequestIds of size "count".
+ // That array may be filled with a call to SendRequestIdsSize or RecvRequestIdsSize
+ // If all requests are done, returns true in the flag argument
+ // If all requests are not finished, returns false in the flag argument
+ // Do nothing for a synchronous Request
+ // Manage MPI_Request * and MPI_Status * structure
+ int MPIAccess::testAll(int count, int *array_of_RequestIds, int &flag)
+ {
+ if ( _trace )
+ cout << "TestAll" << _my_rank << " : count " << count << endl ;
+ int status ;
+ int retstatus = MPI_SUCCESS ;
+ bool retflag = true ;
+ int i ;
+ for ( i = 0 ; i < count ; i++ )
+ {
+ status = test( array_of_RequestIds[i] , flag ) ;
+ retflag = retflag && (flag != 0) ;
+ if ( status != MPI_SUCCESS )
+ retstatus = status ;
+ }
+ flag = retflag ;
+ if ( _trace )
+ cout << "EndTestAll" << _my_rank << endl ;
+ return retstatus ;
+ }
+
+ int MPIAccess::waitSome(int count, int *array_of_RequestIds, int outcount,
+ int *outarray_of_RequestIds)
+ {
+ int status = MPI_ERR_OTHER ;
+ cout << "MPIAccess::WaitSome not yet implemented" << endl ;
+ return status ;
+ }
+
+ int MPIAccess::testSome(int count, int *array_of_RequestIds, int outcounts,
+ int *outarray_of_RequestIds)
+ {
+ int status = MPI_ERR_OTHER ;
+ cout << "MPIAccess::TestSome not yet implemented" << endl ;
+ return status ;
+ }
+
+ // Probe checks if a message is available for read from FromSource rank.
+ // Returns the corresponding source, MPITag, datatype and outcount
+ // Probe is a blocking call which wait until a message is available
+ int MPIAccess::probe(int FromSource, int &source, int &MPITag,
+ MPI_Datatype &myDatatype, int &outcount)
+ {
+ MPI_Status aMPIStatus ;
+ int sts = _comm_interface.probe( FromSource, MPI_ANY_TAG,
+ *_intra_communicator , &aMPIStatus ) ;
+ if ( sts == MPI_SUCCESS )
+ {
+ source = aMPIStatus.MPI_SOURCE ;
+ MPITag = aMPIStatus.MPI_TAG ;
+ int MethodId = (MPITag % MODULO_TAG) ;
+ myDatatype = datatype( (MEDCoupling::_MessageIdent) MethodId ) ;
+ _comm_interface.getCount(&aMPIStatus, myDatatype, &outcount ) ;
+ if ( _trace )
+ cout << "MPIAccess::Probe" << _my_rank << " FromSource " << FromSource
+ << " source " << source << " MPITag " << MPITag << " MethodId "
+ << MethodId << " datatype " << myDatatype << " outcount " << outcount
+ << endl ;
+ }
+ else
+ {
+ source = -1 ;
+ MPITag = -1 ;
+ myDatatype = 0 ;
+ outcount = -1 ;
+ }
+ return sts ;
+ }
+
+ // IProbe checks if a message is available for read from FromSource rank.
+ // If there is a message available, returns the corresponding source,
+ // MPITag, datatype and outcount with flag = true
+ // If not, returns flag = false
+ int MPIAccess::IProbe(int FromSource, int &source, int &MPITag,
+ MPI_Datatype &myDataType, int &outcount, int &flag)
+ {
+ MPI_Status aMPIStatus ;
+ int sts = _comm_interface.Iprobe( FromSource, MPI_ANY_TAG,
+ *_intra_communicator , &flag,
+ &aMPIStatus ) ;
+ if ( sts == MPI_SUCCESS && flag )
+ {
+ source = aMPIStatus.MPI_SOURCE ;
+ MPITag = aMPIStatus.MPI_TAG ;
+ int MethodId = (MPITag % MODULO_TAG) ;
+ myDataType = datatype( (MEDCoupling::_MessageIdent) MethodId ) ;
+ _comm_interface.getCount(&aMPIStatus, myDataType, &outcount ) ;
+ if ( _trace )
+ cout << "MPIAccess::IProbe" << _my_rank << " FromSource " << FromSource
+ << " source " << source << " MPITag " << MPITag << " MethodId "
+ << MethodId << " datatype " << myDataType << " outcount " << outcount
+ << " flag " << flag << endl ;
+ }
+ else
+ {
+ source = -1 ;
+ MPITag = -1 ;
+ myDataType = 0 ;
+ outcount = -1 ;
+ }
+ return sts ;
+ }
+
+ // Cancel concerns a "posted" asynchronous IRecv
+ // Returns flag = true if the receiving request was successfully canceled
+ // Returns flag = false if the receiving request was finished but not canceled
+ // Use cancel, wait and test_cancelled of the MPI API
+ int MPIAccess::cancel( int RecvRequestId, int &flag )
+ {
+ flag = 0 ;
+ int sts = _comm_interface.cancel( MPIRequest( RecvRequestId ) ) ;
+ if ( sts == MPI_SUCCESS )
+ {
+ sts = _comm_interface.wait( MPIRequest( RecvRequestId ) ,
+ MPIStatus( RecvRequestId ) ) ;
+ if ( sts == MPI_SUCCESS )
+ sts = _comm_interface.testCancelled( MPIStatus( RecvRequestId ) , &flag ) ;
+ }
+ return sts ;
+ }
+
+ // Cancel concerns a "pending" receiving message (without IRecv "posted")
+ // Returns flag = true if the message was successfully canceled
+ // Returns flag = false if the receiving request was finished but not canceled
+ // Use Irecv, cancel, wait and test_cancelled of the MPI API
+ int MPIAccess::cancel( int source, int theMPITag, MPI_Datatype datatype, int outcount, int &flag )
+ {
+ int sts ;
+ MPI_Aint extent, lbound ;
+ flag = 0 ;
+ sts = MPI_Type_get_extent( datatype , &lbound, &extent ) ;
+ if ( sts == MPI_SUCCESS )
+ {
+ void * recvbuf = malloc( extent*outcount ) ;
+ MPI_Request aRecvRequest ;
+ if ( _trace )
+ cout << "MPIAccess::Cancel" << _my_rank << " Irecv extent " << extent
+ << " datatype " << datatype << " source " << source << " theMPITag "
+ << theMPITag << endl ;
+ sts = _comm_interface.Irecv( recvbuf, outcount, datatype, source, theMPITag,
+ *_intra_communicator , &aRecvRequest ) ;
+ if ( sts == MPI_SUCCESS )
+ {
+ sts = _comm_interface.cancel( &aRecvRequest ) ;
+ if ( _trace )
+ cout << "MPIAccess::Cancel" << _my_rank << " theMPITag " << theMPITag
+ << " cancel done" << endl ;
+ if ( sts == MPI_SUCCESS )
+ {
+ MPI_Status aStatus ;
+ if ( _trace )
+ cout << "MPIAccess::Cancel" << _my_rank << " wait" << endl ;
+ sts = _comm_interface.wait( &aRecvRequest , &aStatus ) ;
+ if ( sts == MPI_SUCCESS )
+ {
+ if ( _trace )
+ cout << "MPIAccess::Cancel" << _my_rank << " test_cancelled" << endl ;
+ sts = _comm_interface.testCancelled( &aStatus , &flag ) ;
+ }
+ }
+ }
+ if ( _trace && datatype == timeType() )
+ cout << "MPIAccess::Cancel" << _my_rank << " time "
+ << ((TimeMessage *) recvbuf)->time << " "
+ << ((TimeMessage *) recvbuf)->deltatime << endl ;
+ free( recvbuf ) ;
+ }
+ if ( _trace )
+ cout << "MPIAccess::Cancel" << _my_rank << " flag " << flag << endl ;
+ return sts ;
+ }
+
+
+ // CancelAll concerns all "pending" receiving message (without IRecv "posted")
+ // CancelAll use IProbe and Cancel (see obove)
+ int MPIAccess::cancelAll()
+ {
+ int sts = MPI_SUCCESS ;
+ int target ;
+ int source ;
+ int MPITag ;
+ MPI_Datatype datatype ;
+ int outcount ;
+ int flag ;
+ for ( target = 0 ; target < _processor_group_size ; target++ )
+ {
+ sts = IProbe(target, source, MPITag, datatype, outcount, flag) ;
+ if ( sts == MPI_SUCCESS && flag )
+ {
+ sts = cancel(source, MPITag, datatype, outcount, flag) ;
+ if ( _trace )
+ cout << "MPIAccess::CancelAll" << _my_rank << " source " << source
+ << " MPITag " << MPITag << " datatype " << datatype
+ << " outcount " << outcount << " Cancel flag " << flag << endl ;
+ if ( sts != MPI_SUCCESS )
+ break ;
+ }
+ else if ( sts != MPI_SUCCESS )
+ break ;
+ }
+ return sts ;
+ }
+
+ // Same as barrier of MPI API
+ int MPIAccess::barrier()
+ {
+ int status = _comm_interface.barrier( *_intra_communicator ) ;
+ return status ;
+ }
+
+ // Same as Error_string of MPI API
+ int MPIAccess::errorString(int errorcode, char *string, int *resultlen) const
+ {
+ return _comm_interface.errorString( errorcode, string, resultlen) ;
+ }
+
+ // Returns source, tag, error and outcount corresponding to receiving RequestId
+ // By default the corresponding structure of RequestId is deleted
+ int MPIAccess::status(int RequestId, int &source, int &tag, int &error,
+ int &outcount, bool keepRequestStruct)
+ {
+ MPI_Status *myStatus = MPIStatus( RequestId ) ;
+ if ( _trace )
+ cout << "MPIAccess::status" << _my_rank << " RequestId " << RequestId
+ << " status " << myStatus << endl ;
+ if ( myStatus != NULL && MPIAsynchronous( RequestId ) &&
+ MPICompleted( RequestId ) )
+ {
+ if ( MPIIsRecv( RequestId ) )
+ {
+ source = myStatus->MPI_SOURCE ;
+ tag = myStatus->MPI_TAG ;
+ error = myStatus->MPI_ERROR ;
+ MPI_Datatype datatype = MPIDatatype( RequestId ) ;
+ _comm_interface.getCount(myStatus, datatype, &outcount ) ;
+ if ( _trace )
+ cout << "MPIAccess::status" << _my_rank << " RequestId " << RequestId
+ << " status " << myStatus << " outcount " << outcount << endl ;
+ setMPIOutCount( RequestId , outcount ) ;
+ }
+ else
+ {
+ source = MPITarget( RequestId ) ;
+ tag = MPITag( RequestId ) ;
+ error = 0 ;
+ outcount = MPIOutCount( RequestId ) ;
+ }
+ if ( !keepRequestStruct )
+ deleteRequest( RequestId ) ;
+ return MPI_SUCCESS ;
+ }
+ else
+ {
+ source = MPITarget( RequestId ) ;
+ tag = MPITag( RequestId ) ;
+ error = 0 ;
+ outcount = MPIOutCount( RequestId ) ;
+ }
+ return MPI_SUCCESS ;
+ }
+
+ int MPIAccess::requestFree( MPI_Request *request )
+ {
+ return _comm_interface.requestFree( request ) ;
+ }
+
+ // Print all information of all known requests for debugging purpose
+ void MPIAccess::check() const
+ {
+ int i = 0 ;
+ map< int , RequestStruct * >::const_iterator MapOfRequestStructiterator ;
+ cout << "MPIAccess::Check" << _my_rank << "_map_of_request_struct_size "
+ << _map_of_request_struct.size() << endl ;
+ for ( MapOfRequestStructiterator = _map_of_request_struct.begin() ;
+ MapOfRequestStructiterator != _map_of_request_struct.end() ;
+ MapOfRequestStructiterator++ )
+ {
+ if ( MapOfRequestStructiterator->second != NULL )
+ {
+ cout << " Check" << _my_rank << " " << i << ". Request"
+ << MapOfRequestStructiterator->first << "-->" ;
+ if ( (MapOfRequestStructiterator->second)->MPIAsynchronous )
+ cout << "I" ;
+ if ( (MapOfRequestStructiterator->second)->MPIIsRecv )
+ cout << "Recv from " ;
+ else
+ cout << "Send to " ;
+ cout << (MapOfRequestStructiterator->second)->MPITarget
+ << " MPITag " << (MapOfRequestStructiterator->second)->MPITag
+ << " DataType " << (MapOfRequestStructiterator->second)->MPIDatatype
+ << " Request " << (MapOfRequestStructiterator->second)->MPIRequest
+ << " Status " << (MapOfRequestStructiterator->second)->MPIStatus
+ << " Completed " << (MapOfRequestStructiterator->second)->MPICompleted
+ << endl ;
+ }
+ i++ ;
+ }
+ }
+
+ // Returns the MPI size of a TimeMessage
+ MPI_Aint MPIAccess::timeExtent() const
+ {
+ MPI_Aint aextent, lbound ;
+ MPI_Type_get_extent( _MPI_TIME , &lbound, &aextent ) ;
+ return aextent ;
+ }
+
+ // Returns the MPI size of a MPI_INT
+ MPI_Aint MPIAccess::intExtent() const
+ {
+ MPI_Aint aextent, lbound ;
+ MPI_Type_get_extent( MPI_INT , &lbound, &aextent ) ;
+ return aextent ;
+ }
+
+ // Returns the MPI size of a MPI_LONG
+ MPI_Aint MPIAccess::longExtent() const
+ {
+ MPI_Aint aextent, lbound ;
+ MPI_Type_get_extent( MPI_LONG , &lbound, &aextent ) ;
+ return aextent ;
+ }
+
+ // Returns the MPI size of a MPI_DOUBLE
+ MPI_Aint MPIAccess::doubleExtent() const
+ {
+ MPI_Aint aextent, lbound ;
+ MPI_Type_get_extent( MPI_DOUBLE , &lbound, &aextent ) ;
+ return aextent ;
+ }
+
+ // Outputs fields of a TimeMessage structure
+ ostream & operator<< (ostream & f ,const TimeMessage & aTimeMsg )
+ {
+ f << " time " << aTimeMsg.time << " deltatime " << aTimeMsg.deltatime
+ << " tag " << aTimeMsg.tag ;
+ return f;
+ }
+
+ // Outputs the DataType coded in a Tag
+ ostream & operator<< (ostream & f ,const _MessageIdent & methodtype )
+ {
+ switch (methodtype)
+ {
+ case _message_time :
+ f << " MethodTime ";
+ break;
+ case _message_int :
+ f << " MPI_INT ";
+ break;
+ case _message_double :
+ f << " MPI_DOUBLE ";
+ break;
+ default :
+ f << " UnknownMethodType ";
+ break;
+ }
+ return f;
+ }
+}
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#ifndef __MPIACCESS_HXX__
+#define __MPIACCESS_HXX__
+
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+
+#include <map>
+#include <list>
+#include <vector>
+#include <iostream>
+
+namespace MEDCoupling
+{
+ typedef struct
+ {
+ double time ;
+ double deltatime ;
+ int tag ;
+ } TimeMessage;
+
+ static MPI_Request mpirequestnull = MPI_REQUEST_NULL ;
+ enum _MessageIdent { _message_unknown, _message_time, _message_int, _message_double } ;
+
+ class MPIAccess
+ {
+ private:
+ struct RequestStruct
+ {
+ int MPITarget ;
+ bool MPIIsRecv ;
+ int MPITag ;
+ bool MPIAsynchronous ;
+ bool MPICompleted ;
+ MPI_Datatype MPIDatatype ;
+ MPI_Request MPIRequest ;
+ MPI_Status *MPIStatus ;
+ int MPIOutCount ;
+ };
+ public:
+ MPIAccess(MPIProcessorGroup * ProcessorGroup, int BaseTag=0, int MaxTag=0) ;
+ virtual ~MPIAccess() ;
+
+ void trace( bool trace = true ) ;
+
+ void deleteRequest( int RequestId ) ;
+ void deleteRequests(int size , int *ArrayOfSendRequests ) ;
+
+ int sendMPITag(int destrank) ;
+ int recvMPITag(int sourcerank) ;
+
+ int sendRequestIdsSize() ;
+ int sendRequestIds(int size, int *ArrayOfSendRequests) ;
+ int recvRequestIdsSize() ;
+ int recvRequestIds(int size, int *ArrayOfRecvRequests) ;
+
+ int sendRequestIdsSize(int destrank) ;
+ int sendRequestIds(int destrank, int size, int *ArrayOfSendRequests) ;
+ int recvRequestIdsSize(int sourcerank) ;
+ int recvRequestIds(int sourcerank, int size, int *ArrayOfRecvRequests) ;
+
+ int send(void* buffer, int count, MPI_Datatype datatype, int target,
+ int &RequestId) ;
+ int ISend(void* buffer, int count, MPI_Datatype datatype, int target,
+ int &RequestId) ;
+ int recv(void* buffer, int count, MPI_Datatype datatype, int source,
+ int &RequestId, int *OutCount=NULL) ;
+ int IRecv(void* buffer, int count, MPI_Datatype datatype, int source,
+ int &RequestId) ;
+ int sendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, int dest,
+ int &SendRequestId, void* recvbuf, int recvcount,
+ MPI_Datatype recvtype, int source,
+ int &RecvRequestId, int *OutCount=NULL) ;
+ int ISendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, int dest,
+ int &SendRequestId, void* recvbuf, int recvcount,
+ MPI_Datatype recvtype, int source, int &RecvRequestId) ;
+
+ int wait(int RequestId) ;
+ int test(int RequestId, int &flag) ;
+ int waitAny(int count, int *array_of_RequestIds, int &RequestId) ;
+ int testAny(int count, int *array_of_RequestIds, int &RequestId, int &flag) ;
+ int waitAll(int count, int *array_of_RequestIds) ;
+ int testAll(int count, int *array_of_RequestIds, int &flag) ;
+ int waitSome(int count, int *array_of_RequestIds, int outcount,
+ int *outarray_of_RequestIds) ;
+ int testSome(int count, int *array_of_RequestIds, int outcounts,
+ int *outarray_of_RequestIds) ;
+ int probe(int FromSource, int &source, int &MPITag, MPI_Datatype &datatype,
+ int &outcount) ;
+ int IProbe(int FromSource, int &source, int &MPITag, MPI_Datatype &datatype,
+ int &outcount, int &flag) ;
+ int cancel( int RecvRequestId, int &flag ) ;
+ int cancel( int source, int MPITag, MPI_Datatype datatype, int outcount,
+ int &flag ) ;
+ int cancelAll() ;
+ int barrier() ;
+ int errorString(int errorcode, char *string, int *resultlen) const ;
+ int status(int RequestId, int &source, int &tag, int &error, int &outcount,
+ bool keepRequestStruct=false) ;
+ int requestFree( MPI_Request *request ) ;
+
+ void check() const ;
+
+ MPI_Datatype timeType() const ;
+ bool isTimeMessage( int MPITag ) const ;
+ MPI_Aint timeExtent() const ;
+ MPI_Aint intExtent() const ;
+ MPI_Aint longExtent() const ;
+ MPI_Aint doubleExtent() const ;
+ MPI_Aint extent( MPI_Datatype datatype ) const ;
+
+ int MPITag( int RequestId ) ;
+ int MPITarget( int RequestId ) ;
+ bool MPIIsRecv( int RequestId ) ;
+ bool MPIAsynchronous( int RequestId ) ;
+ bool MPICompleted( int RequestId ) ;
+ MPI_Datatype MPIDatatype( int RequestId ) ;
+ int MPIOutCount( int RequestId ) ;
+
+ private:
+ int newRequest( MPI_Datatype datatype, int tag , int destsourcerank ,
+ bool fromsourcerank , bool asynchronous ) ;
+ int newSendTag( MPI_Datatype datatype, int destrank , int method ,
+ bool asynchronous, int &RequestId ) ;
+ int newRecvTag( MPI_Datatype datatype, int sourcerank , int method ,
+ bool asynchronous, int &RequestId ) ;
+ int incrTag( int prevtag ) ;
+ int valTag( int tag, int method ) ;
+
+ void deleteSendRecvRequest( int RequestId ) ;
+
+ void deleteStatus( int RequestId ) ;
+
+ MPI_Request *MPIRequest( int RequestId ) ;
+ MPI_Status *MPIStatus( int RequestId ) ;
+ void setMPICompleted( int RequestId , bool completed ) ;
+ void setMPIOutCount( int RequestId , int outcount ) ;
+ void clearMPIStatus( int RequestId ) ;
+
+ _MessageIdent methodId( MPI_Datatype datatype ) const ;
+ MPI_Datatype datatype( _MessageIdent aMethodIdent ) const ;
+ private:
+ const CommInterface &_comm_interface ;
+ const MPI_Comm* _intra_communicator ;
+ MPIProcessorGroup * _processor_group ;
+ int _processor_group_size ;
+ int _my_rank ;
+ bool _trace ;
+ int _base_request ;
+ int _max_request ;
+ int _request ;
+ int * _send_request ;
+ int * _recv_request ;
+ std::vector< std::list< int > > _send_requests ;
+ std::vector< std::list< int > > _recv_requests ;
+ int _base_MPI_tag ;
+ int _max_MPI_tag ;
+ int * _send_MPI_tag ;
+ int * _recv_MPI_Tag ;
+ MPI_Datatype _MPI_TIME ;
+ static const int MODULO_TAG=10;
+ std::map< int , RequestStruct * > _map_of_request_struct ;
+
+ };
+
+ inline void MPIAccess::trace( bool atrace )
+ {
+ _trace = atrace ;
+ }
+
+ // Delete the structure Request corresponding to RequestId identifier after
+ // the deletion of the structures MPI_Request * and MPI_Status *
+ // remove it from _MapOfRequestStruct (erase)
+ inline void MPIAccess::deleteRequest( int RequestId )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ {
+ if ( _trace )
+ std::cout << "MPIAccess::DeleteRequest" << _my_rank << "( " << RequestId << " ) "
+ << aRequestStruct << " MPIRequest " << aRequestStruct->MPIRequest
+ << " MPIIsRecv " << aRequestStruct->MPIIsRecv << std::endl ;
+ if ( _map_of_request_struct[RequestId]->MPIRequest != MPI_REQUEST_NULL )
+ requestFree( &_map_of_request_struct[RequestId]->MPIRequest ) ;
+ deleteSendRecvRequest( RequestId ) ;
+ deleteStatus( RequestId ) ;
+ _map_of_request_struct.erase( RequestId ) ;
+ delete aRequestStruct ;
+ }
+ else
+ {
+ if ( _trace )
+ std::cout << "MPIAccess::DeleteRequest" << _my_rank << "( " << RequestId
+ << " ) Request not found" << std::endl ;
+ }
+ }
+
+ // Delete all requests of the array ArrayOfSendRequests
+ inline void MPIAccess::deleteRequests(int size , int *ArrayOfSendRequests )
+ {
+ for (int i = 0 ; i < size ; i++ )
+ deleteRequest( ArrayOfSendRequests[i] ) ;
+ }
+
+ // Returns the last MPITag of the destination rank destrank
+ inline int MPIAccess::sendMPITag(int destrank)
+ {
+ return _send_MPI_tag[destrank] ;
+ }
+
+ // Returns the last MPITag of the source rank sourcerank
+ inline int MPIAccess::recvMPITag(int sourcerank)
+ {
+ return _recv_MPI_Tag[sourcerank] ;
+ }
+
+ // Returns the number of all SendRequestIds matching a destination rank. It may be
+ // used to allocate ArrayOfSendRequests for the call to SendRequestIds
+ inline int MPIAccess::sendRequestIdsSize(int destrank)
+ {
+ return (int)_send_requests[destrank].size() ;
+ }
+
+ // Returns the number of all RecvRequestIds matching a source rank. It may be
+ // used to allocate ArrayOfRecvRequests for the call to RecvRequestIds
+ inline int MPIAccess::recvRequestIdsSize(int sourcerank)
+ {
+ return (int)_recv_requests[sourcerank].size() ;
+ }
+
+ // Returns the MPI_Datatype (registered in MPI in the constructor with
+ // MPI_Type_struct and MPI_Type_commit) for TimeMessages
+ inline MPI_Datatype MPIAccess::timeType() const
+ {
+ return _MPI_TIME ;
+ }
+
+ // Returns true if the tag MPITag corresponds to a TimeMessage
+ inline bool MPIAccess::isTimeMessage( int aMPITag ) const
+ {
+ return ((aMPITag%MODULO_TAG) == _message_time) ;
+ }
+
+ // Returns the MPI size of the MPI_Datatype datatype
+ inline MPI_Aint MPIAccess::extent( MPI_Datatype adatatype ) const
+ {
+ if ( adatatype == _MPI_TIME )
+ return timeExtent() ;
+ if ( adatatype == MPI_INT )
+ return intExtent() ;
+ if ( adatatype == MPI_LONG )
+ return longExtent() ;
+ if ( adatatype == MPI_DOUBLE )
+ return doubleExtent() ;
+ return 0 ;
+ }
+
+ // Returns the MPITag of the request corresponding to RequestId identifier
+ inline int MPIAccess::MPITag( int RequestId )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ return aRequestStruct->MPITag ;
+ return -1 ;
+ }
+
+ // Returns the MPITarget of the request corresponding to RequestId identifier
+ inline int MPIAccess::MPITarget( int RequestId )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ return aRequestStruct->MPITarget ;
+ return -1 ;
+ }
+
+ // Returns true if the request corresponding to RequestId identifier was [I]Recv
+ inline bool MPIAccess::MPIIsRecv( int RequestId )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ return aRequestStruct->MPIIsRecv ;
+ return false ;
+ }
+
+ // Returns true if the request corresponding to RequestId identifier was asynchronous
+ inline bool MPIAccess::MPIAsynchronous( int RequestId )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ return aRequestStruct->MPIAsynchronous ;
+ return false ;
+ }
+
+ // Returns true if the request corresponding to RequestId identifier was completed
+ inline bool MPIAccess::MPICompleted( int RequestId )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ return aRequestStruct->MPICompleted;
+ return true ;
+ }
+
+ // Returns the MPI_datatype of the request corresponding to RequestId identifier
+ inline MPI_Datatype MPIAccess::MPIDatatype( int RequestId )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ return aRequestStruct->MPIDatatype;
+ return MPI_DATATYPE_NULL;
+ }
+
+ // Returns the size of the receiving message of the request corresponding to
+ // RequestId identifier
+ inline int MPIAccess::MPIOutCount( int RequestId )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ return aRequestStruct->MPIOutCount;
+ return 0 ;
+ }
+
+ // Increments the previous tag value (cyclically)
+ // Look at MPIAccess::NewSendTag/NewRecvTag in MPIAccess.cxx
+ inline int MPIAccess::incrTag( int prevtag )
+ {
+ int tag;
+ if ( (prevtag % MODULO_TAG) == _message_time )
+ tag = ((prevtag/MODULO_TAG)*MODULO_TAG);
+ else
+ tag = ((prevtag/MODULO_TAG + 1)*MODULO_TAG);
+ if ( tag > _max_MPI_tag )
+ tag = _base_MPI_tag ;
+ return tag ;
+ }
+
+ // Returns the MPITag with the method-type field
+ // Look at MPIAccess::NewSendTag/NewRecvTag in MPIAccess.cxx
+ inline int MPIAccess::valTag( int tag, int method )
+ {
+ return ((tag/MODULO_TAG)*MODULO_TAG) + method;
+ }
+
+ // Remove a Request identifier from the list _RecvRequests/_SendRequests for
+ // the corresponding target.
+ inline void MPIAccess::deleteSendRecvRequest( int RequestId )
+ {
+ if ( _trace )
+ std::cout << "MPIAccess::DeleteSendRecvRequest" << _my_rank
+ << "( " << RequestId << " ) " << std::endl ;
+ if ( MPIIsRecv( RequestId ) )
+ _recv_requests[ MPITarget( RequestId ) ].remove( RequestId );
+ else
+ _send_requests[ MPITarget( RequestId ) ].remove( RequestId );
+ }
+
+ // Delete the MPI structure MPI_status * of a ReaquestId
+ inline void MPIAccess::deleteStatus( int RequestId )
+ {
+ if ( _map_of_request_struct[RequestId]->MPIStatus != NULL )
+ {
+ delete _map_of_request_struct[RequestId]->MPIStatus ;
+ clearMPIStatus( RequestId ) ;
+ }
+ }
+
+ // Returns the MPI structure MPI_request * of a RequestId
+ inline MPI_Request * MPIAccess::MPIRequest( int RequestId )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ return &aRequestStruct->MPIRequest;
+ return &mpirequestnull ;
+ }
+
+ // Returns the MPI structure MPI_status * of a RequestId
+ inline MPI_Status * MPIAccess::MPIStatus( int RequestId )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ];
+ if ( aRequestStruct )
+ return aRequestStruct->MPIStatus;
+ return NULL ;
+ }
+
+ // Set the MPICompleted field of the structure Request corresponding to RequestId
+ // identifier with the value completed
+ inline void MPIAccess::setMPICompleted( int RequestId , bool completed )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ aRequestStruct->MPICompleted = completed;
+ }
+
+ // Set the MPIOutCount field of the structure Request corresponding to RequestId
+ // identifier with the value outcount
+ inline void MPIAccess::setMPIOutCount( int RequestId , int outcount )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ aRequestStruct->MPIOutCount = outcount;
+ }
+
+ // Nullify the MPIStatusfield of the structure Request corresponding to RequestId
+ // identifier
+ inline void MPIAccess::clearMPIStatus( int RequestId )
+ {
+ struct RequestStruct *aRequestStruct = _map_of_request_struct[ RequestId ] ;
+ if ( aRequestStruct )
+ aRequestStruct->MPIStatus = NULL ;
+ }
+
+ // Returns the _MessageIdent enum value corresponding to the MPI_Datatype datatype
+ // Look at MPIAccess::NewSendTag/NewRecvTag in MPIAccess.cxx
+ inline _MessageIdent MPIAccess::methodId( MPI_Datatype adatatype ) const
+ {
+ _MessageIdent aMethodIdent ;
+ if ( adatatype == _MPI_TIME )
+ aMethodIdent = _message_time;
+ else if ( adatatype == MPI_INT )
+ aMethodIdent = _message_int ;
+ else if ( adatatype == MPI_DOUBLE )
+ aMethodIdent = _message_double ;
+ else
+ aMethodIdent = _message_unknown ;
+ return aMethodIdent ;
+ }
+
+ // Returns the MPI_Datatype corresponding to the _MessageIdent enum aMethodIdent
+ inline MPI_Datatype MPIAccess::datatype( _MessageIdent aMethodIdent ) const
+ {
+ MPI_Datatype aDataType ;
+ switch( aMethodIdent )
+ {
+ case _message_time :
+ aDataType = _MPI_TIME ;
+ break ;
+ case _message_int :
+ aDataType = MPI_INT ;
+ break ;
+ case _message_double :
+ aDataType = MPI_DOUBLE ;
+ break ;
+ default :
+ aDataType = (MPI_Datatype) -1 ;
+ break ;
+ }
+ return aDataType ;
+ }
+
+ std::ostream & operator<< (std::ostream &,const _MessageIdent &);
+
+ std::ostream & operator<< (std::ostream &,const TimeMessage &);
+
+}
+
+#endif
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include "MPIAccess/MPIAccessDEC.hxx"
+
+#include <cstring>
+
+using namespace std;
+
+namespace MEDCoupling
+{
+
+ /*!
+ This constructor creates an MPIAccessDEC which has \a source_group as a working side
+ and \a target_group as an idle side.
+ The constructor must be called synchronously on all processors of both processor groups.
+
+ \param source_group working side ProcessorGroup
+ \param target_group lazy side ProcessorGroup
+ \param Asynchronous Communication mode (default asynchronous)
+ */
+ MPIAccessDEC::MPIAccessDEC( const ProcessorGroup& source_group,
+ const ProcessorGroup& target_group,
+ bool Asynchronous )
+ {
+
+ ProcessorGroup * union_group = source_group.fuse(target_group) ;
+ int i ;
+ std::set<int> procs;
+ for ( i = 0 ; i < union_group->size() ; i++ )
+ {
+ procs.insert(i) ;
+ }
+ MPIProcessorGroup *mpilg = static_cast<MPIProcessorGroup *>(const_cast<ProcessorGroup *>(&source_group));
+ _MPI_union_group = new MEDCoupling::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm());
+ delete union_group ;
+ _my_rank = _MPI_union_group->myRank() ;
+ _group_size = _MPI_union_group->size() ;
+ _MPI_access = new MPIAccess( _MPI_union_group ) ;
+ _asynchronous = Asynchronous ;
+ _time_messages = new vector< vector< TimeMessage > > ;
+ _time_messages->resize( _group_size ) ;
+ _out_of_time = new vector< bool > ;
+ _out_of_time->resize( _group_size ) ;
+ _data_messages_recv_count = new vector< int > ;
+ _data_messages_recv_count->resize( _group_size ) ;
+ for ( i = 0 ; i < _group_size ; i++ )
+ {
+ (*_out_of_time)[i] = false ;
+ (*_data_messages_recv_count)[i] = 0 ;
+ }
+ _data_messages_type = new vector< MPI_Datatype > ;
+ _data_messages_type->resize( _group_size ) ;
+ _data_messages = new vector< vector< void * > > ;
+ _data_messages->resize( _group_size ) ;
+ _time_interpolator = NULL ;
+ _map_of_send_buffers = new map< int , SendBuffStruct * > ;
+ }
+
+ MPIAccessDEC::~MPIAccessDEC()
+ {
+ checkFinalSent() ;
+ checkFinalRecv() ;
+ delete _MPI_union_group ;
+ delete _MPI_access ;
+ if ( _time_interpolator )
+ delete _time_interpolator ;
+ if ( _time_messages )
+ delete _time_messages ;
+ if ( _out_of_time )
+ delete _out_of_time ;
+ if ( _data_messages_recv_count )
+ delete _data_messages_recv_count ;
+ if ( _data_messages_type )
+ delete _data_messages_type ;
+ if ( _data_messages )
+ delete _data_messages ;
+ if ( _map_of_send_buffers )
+ delete _map_of_send_buffers ;
+ }
+
+ void MPIAccessDEC::setTimeInterpolator( TimeInterpolationMethod aTimeInterp ,
+ double InterpPrecision, int nStepBefore,
+ int nStepAfter )
+ {
+ if ( _time_interpolator )
+ delete _time_interpolator ;
+ switch ( aTimeInterp )
+ {
+ case WithoutTimeInterp :
+ _time_interpolator = NULL ;
+ _n_step_before = 0 ;
+ _n_step_after = 0 ;
+ break ;
+ case LinearTimeInterp :
+ _time_interpolator = new LinearTimeInterpolator( InterpPrecision , nStepBefore ,
+ nStepAfter ) ;
+ _n_step_before = nStepBefore ;
+ _n_step_after = nStepAfter ;
+ int i ;
+ for ( i = 0 ; i < _group_size ; i++ )
+ {
+ (*_time_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
+ (*_data_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
+ int j ;
+ for ( j = 0 ; j < _n_step_before + _n_step_after ; j++ )
+ {
+ (*_time_messages)[ i ][ j ].time = -1 ;
+ (*_time_messages)[ i ][ j ].deltatime = -1 ;
+ (*_data_messages)[ i ][ j ] = NULL ;
+ }
+ }
+ break ;
+ }
+ }
+
+ /*!
+ Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
+ (Internal Protected method)
+
+ Returns the request identifier SendRequestId
+
+ */
+ int MPIAccessDEC::send( void* sendbuf, int sendcount , int offset ,
+ MPI_Datatype sendtype , int target , int &SendRequestId )
+ {
+ int sts ;
+ if ( _asynchronous )
+ {
+ if ( sendtype == MPI_INT )
+ {
+ sts = _MPI_access->ISend( &((int *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
+ else if ( sendtype == MPI_LONG )
+ {
+ sts = _MPI_access->ISend( &((long *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
+ else
+ {
+ sts = _MPI_access->ISend( &((double *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
+ }
+ else
+ {
+ if ( sendtype == MPI_INT )
+ {
+ sts = _MPI_access->send( &((int *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
+ else if ( sendtype == MPI_LONG )
+ {
+ sts = _MPI_access->send( &((long *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
+ else
+ {
+ sts = _MPI_access->send( &((double *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
+ }
+ return sts ;
+ }
+
+ /*!
+ Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
+ (Internal Protected method)
+
+ Returns the request identifier RecvRequestId
+
+ */
+ int MPIAccessDEC::recv( void* recvbuf, int recvcount , int offset ,
+ MPI_Datatype recvtype , int target , int &RecvRequestId )
+ {
+ int sts ;
+ if ( _asynchronous )
+ {
+ if ( recvtype == MPI_INT )
+ {
+ sts = _MPI_access->IRecv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
+ else if ( recvtype == MPI_LONG )
+ {
+ sts = _MPI_access->IRecv( &((long *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
+ else
+ {
+ sts = _MPI_access->IRecv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
+ }
+ else
+ {
+ if ( recvtype == MPI_INT )
+ {
+ sts = _MPI_access->recv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
+ else if ( recvtype == MPI_LONG )
+ {
+ sts = _MPI_access->recv( &((long *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
+ else
+ {
+ sts = _MPI_access->recv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
+ }
+ return sts ;
+ }
+
+ /*!
+ Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
+ Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
+ (Internal Protected method)
+
+ Returns the request identifier SendRequestId
+ Returns the request identifier RecvRequestId
+
+ */
+ int MPIAccessDEC::sendRecv( void* sendbuf, int sendcount , int sendoffset ,
+ MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , int recvoffset ,
+ MPI_Datatype recvtype , int target ,
+ int &SendRequestId , int &RecvRequestId )
+ {
+ int sts ;
+ if ( _asynchronous )
+ {
+ if ( sendtype == MPI_INT )
+ {
+ if ( recvtype == MPI_INT )
+ {
+ sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((int *) recvbuf)[recvoffset] , recvcount ,
+ recvtype , target , RecvRequestId ) ;
+ }
+ else
+ {
+ sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((double *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ }
+ else
+ {
+ if ( recvtype == MPI_INT )
+ {
+ sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((int *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ else
+ {
+ sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((double *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ }
+ }
+ else
+ {
+ if ( sendtype == MPI_INT )
+ {
+ if ( recvtype == MPI_INT )
+ {
+ sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((int *) recvbuf)[recvoffset] , recvcount ,
+ recvtype , target , RecvRequestId ) ;
+ }
+ else
+ {
+ sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((double *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ }
+ else
+ {
+ if ( recvtype == MPI_INT )
+ {
+ sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((int *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ else
+ {
+ sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((double *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ }
+ }
+ return sts ;
+ }
+
+ /*!
+ Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
+ Receive recvcount datas to recvbuf[offset] with type recvtype from all targets of IntraCommunicator
+
+ */
+ int MPIAccessDEC::allToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype )
+ {
+ if ( _time_interpolator )
+ {
+ return allToAllTime( sendbuf, sendcount, sendtype , recvbuf, recvcount, recvtype ) ;
+ }
+ int sts = 0;
+ int target ;
+ int sendoffset = 0 ;
+ int recvoffset = 0 ;
+ int SendRequestId ;
+ int RecvRequestId ;
+
+ //Free of SendBuffers
+ if ( _asynchronous )
+ checkSent() ;
+
+ //DoSend + DoRecv : SendRecv
+ SendBuffStruct * aSendDataStruct = NULL ;
+ if ( _asynchronous && sendbuf )
+ {
+ aSendDataStruct = new SendBuffStruct ;
+ aSendDataStruct->SendBuffer = sendbuf ;
+ aSendDataStruct->Counter = 0 ;
+ aSendDataStruct->DataType = sendtype ;
+ }
+ for ( target = 0 ; target < _group_size ; target++ )
+ {
+ sts = sendRecv( sendbuf , sendcount , sendoffset , sendtype ,
+ recvbuf , recvcount , recvoffset , recvtype ,
+ target , SendRequestId , RecvRequestId ) ;
+ if ( _asynchronous && sendbuf && sendcount )
+ {
+ aSendDataStruct->Counter += 1 ;
+ (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
+ }
+ sendoffset += sendcount ;
+ recvoffset += recvcount ;
+ }
+ if ( !_asynchronous && sendbuf )
+ {
+ if ( sendtype == MPI_INT )
+ {
+ delete [] (int *) sendbuf ;
+ }
+ else
+ {
+ delete [] (double *) sendbuf ;
+ }
+ }
+ return sts ;
+ }
+
+ /*!
+ Send sendcounts[target] datas from sendbuf[sdispls[target]] with type sendtype to all targets of IntraCommunicator
+ Receive recvcounts[target] datas to recvbuf[rdispls[target]] with type recvtype from all targets of IntraCommunicator
+
+ */
+ int MPIAccessDEC::allToAllv( void* sendbuf, int* sendcounts, int* sdispls,
+ MPI_Datatype sendtype ,
+ void* recvbuf, int* recvcounts, int* rdispls,
+ MPI_Datatype recvtype )
+ {
+ if ( _time_interpolator )
+ {
+ return allToAllvTime( sendbuf, sendcounts, sdispls, sendtype ,
+ recvbuf, recvcounts, rdispls, recvtype ) ;
+ }
+ int sts = 0;
+ int target ;
+ int SendRequestId ;
+ int RecvRequestId ;
+
+ //Free of SendBuffers
+ if ( _asynchronous )
+ {
+ checkSent() ;
+ }
+
+ //DoSend + DoRecv : SendRecv
+ SendBuffStruct * aSendDataStruct = NULL ;
+ if ( _asynchronous && sendbuf )
+ {
+ aSendDataStruct = new SendBuffStruct ;
+ aSendDataStruct->SendBuffer = sendbuf ;
+ aSendDataStruct->Counter = 0 ;
+ aSendDataStruct->DataType = sendtype ;
+ }
+ for ( target = 0 ; target < _group_size ; target++ )
+ {
+ if ( sendcounts[target] || recvcounts[target] )
+ {
+ sts = sendRecv( sendbuf , sendcounts[target] , sdispls[target] , sendtype ,
+ recvbuf , recvcounts[target] , rdispls[target] , recvtype ,
+ target , SendRequestId , RecvRequestId ) ;
+ if ( _asynchronous && sendbuf && sendcounts[target])
+ {
+ aSendDataStruct->Counter += 1 ;
+ (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
+ }
+ }
+ }
+ if ( !_asynchronous && sendbuf )
+ {
+ if ( sendtype == MPI_INT )
+ {
+ delete [] (int *) sendbuf ;
+ }
+ else
+ {
+ delete [] (double *) sendbuf ;
+ }
+ }
+ return sts ;
+ }
+
+ /*
+ MPIAccessDEC and the management of SendBuffers :
+ =================================================
+
+ . In the collective communications collectives we send only parts of
+ the same buffer to each "target". So in asynchronous mode it is
+ necessary that all parts are free before to delete/free the
+ buffer.
+
+ . We assume that buffers are allocated with a new double[]. so a
+ delete [] is done.
+
+ . The structure SendBuffStruct permit to keep the address of the buffer
+ and to manage a reference counter of that buffer. It contains
+ also MPI_Datatype for the delete [] (double *) ... when the counter
+ is null.
+
+ . The map _MapOfSendBuffers establish the correspondence between each
+ RequestId given by a MPI_Access->ISend(...) and a SendBuffStruct
+ for each "target" of a part of the buffer.
+
+ . All that concerns only asynchronous Send. In synchronous mode,
+ we delete senbuf just after the Send.
+ */
+
+ /*
+ MPIAccessDEC and the management of RecvBuffers :
+ =================================================
+
+ If there is no interpolation, no special action is done.
+
+ With interpolation for each target :
+ ------------------------------------
+ . We have _time_messages[target] which is a vector of TimesMessages.
+ We have 2 TimesMessages in our case with a linear interpolation.
+ They contain the previous time(t0)/deltatime and the last
+ time(t1)/deltatime.
+
+ . We have _data_messages[target] which is a vector of DatasMessages.
+ We have 2 DatasMessages in our case with a linear interpolation.
+ They contain the previous datas at time(t0)/deltatime and at last
+ time(t1)/deltatime.
+
+ . At time _t(t*) of current processus we do the interpolation of
+ the values of the 2 DatasMessages which are returned in the part of
+ recvbuf corresponding to the target with t0 < t* <= t1.
+
+ . Because of the difference of "deltatimes" between processes, we
+ may have t0 < t1 < t* and there is an extrapolation.
+
+ . The vectors _out_of_time, _DataMessagesRecvCount and _DataMessagesType
+ contain for each target true if t* > last t1, recvcount and
+ MPI_Datatype for the finalize of messages at the end.
+ */
+
+ /*!
+ Send a TimeMessage to all targets of IntraCommunicator
+ Receive the TimeMessages from targets of IntraCommunicator if necessary.
+
+ Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
+ Returns recvcount datas to recvbuf[offset] with type recvtype after an interpolation
+ with datas received from all targets of IntraCommunicator.
+
+ */
+ int MPIAccessDEC::allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , MPI_Datatype recvtype )
+ {
+ int sts = 0;
+ int target ;
+ int sendoffset = 0 ;
+ int SendTimeRequestId ;
+ int SendDataRequestId ;
+
+ if ( _time_interpolator == NULL )
+ {
+ return MPI_ERR_OTHER ;
+ }
+
+ //Free of SendBuffers
+ if ( _asynchronous )
+ {
+ checkSent() ;
+ }
+
+ //DoSend : Time + SendBuff
+ SendBuffStruct * aSendTimeStruct = NULL ;
+ SendBuffStruct * aSendDataStruct = NULL ;
+ if ( sendbuf && sendcount )
+ {
+ TimeMessage * aSendTimeMessage = new TimeMessage ;
+ if ( _asynchronous )
+ {
+ aSendTimeStruct = new SendBuffStruct ;
+ aSendTimeStruct->SendBuffer = aSendTimeMessage ;
+ aSendTimeStruct->Counter = 0 ;
+ aSendTimeStruct->DataType = _MPI_access->timeType() ;
+ aSendDataStruct = new SendBuffStruct ;
+ aSendDataStruct->SendBuffer = sendbuf ;
+ aSendDataStruct->Counter = 0 ;
+ aSendDataStruct->DataType = sendtype ;
+ }
+ aSendTimeMessage->time = _t ;
+ aSendTimeMessage->deltatime = _dt ;
+ for ( target = 0 ; target < _group_size ; target++ )
+ {
+ sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
+ SendTimeRequestId ) ;
+ sts = send( sendbuf , sendcount , sendoffset , sendtype , target , SendDataRequestId ) ;
+ if ( _asynchronous )
+ {
+ aSendTimeStruct->Counter += 1 ;
+ (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
+ aSendDataStruct->Counter += 1 ;
+ (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
+ }
+ sendoffset += sendcount ;
+ }
+ if ( !_asynchronous )
+ {
+ delete aSendTimeMessage ;
+ if ( sendtype == MPI_INT )
+ {
+ delete [] (int *) sendbuf ;
+ }
+ else
+ {
+ delete [] (double *) sendbuf ;
+ }
+ }
+ }
+
+ //CheckTime + DoRecv + DoInterp
+ if ( recvbuf && recvcount )
+ {
+ for ( target = 0 ; target < _group_size ; target++ )
+ {
+ int recvsize = (int)(recvcount*_MPI_access->extent( recvtype ));
+ checkTime( recvcount , recvtype , target , false ) ;
+ //===========================================================================
+ //TODO : it is assumed actually that we have only 1 timestep before and after
+ //===========================================================================
+ if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
+ {
+ if ( (*_out_of_time)[target] )
+ {
+ cout << " =====================================================" << endl
+ << "Recv" << _my_rank << " <-- target " << target << " t0 "
+ << (*_time_messages)[target][0].time << " < t1 "
+ << (*_time_messages)[target][1].time << " < t* " << _t << endl
+ << " =====================================================" << endl ;
+ }
+ if ( recvtype == MPI_INT )
+ {
+ _time_interpolator->doInterp( (*_time_messages)[target][0].time,
+ (*_time_messages)[target][1].time, _t, recvcount ,
+ _n_step_before, _n_step_after,
+ (int **) &(*_data_messages)[target][0],
+ (int **) &(*_data_messages)[target][1],
+ &((int *)recvbuf)[target*recvcount] ) ;
+ }
+ else
+ {
+ _time_interpolator->doInterp( (*_time_messages)[target][0].time,
+ (*_time_messages)[target][1].time, _t, recvcount ,
+ _n_step_before, _n_step_after,
+ (double **) &(*_data_messages)[target][0],
+ (double **) &(*_data_messages)[target][1],
+ &((double *)recvbuf)[target*recvcount] ) ;
+ }
+ }
+ else
+ {
+ char * buffdest = (char *) recvbuf ;
+ char * buffsrc = (char *) (*_data_messages)[target][1] ;
+ memcpy( &buffdest[target*recvsize] , buffsrc , recvsize ) ;
+ }
+ }
+ }
+
+ return sts ;
+ }
+
+ int MPIAccessDEC::allToAllvTime( void* sendbuf, int* sendcounts, int* sdispls,
+ MPI_Datatype sendtype ,
+ void* recvbuf, int* recvcounts, int* rdispls,
+ MPI_Datatype recvtype )
+ {
+ int sts = 0;
+ int target ;
+ int SendTimeRequestId ;
+ int SendDataRequestId ;
+
+ if ( _time_interpolator == NULL )
+ {
+ return MPI_ERR_OTHER ;
+ }
+
+ //Free of SendBuffers
+ if ( _asynchronous )
+ {
+ checkSent() ;
+ }
+
+ /*
+ . DoSend :
+ + We create a TimeMessage (look at that structure in MPI_Access).
+ + If we are in asynchronous mode, we create two structures SendBuffStruct
+ aSendTimeStruct and aSendDataStruct that we fill.
+ + We fill the structure aSendTimeMessage with time/deltatime of
+ the current process. "deltatime" must be nul if it is the last step of
+ Time.
+ + After that for each "target", we Send the TimeMessage and the part
+ of sendbuf corresponding to that target.
+ + If we are in asynchronous mode, we increment the counter and we add
+ aSendTimeStruct and aSendDataStruct to _MapOfSendBuffers with the
+ identifiers SendTimeRequestId and SendDataRequestId returned by
+ MPI_Access->Send(...).
+ + And if we are in synchronous mode we delete the SendMessages.
+ */
+ //DoSend : Time + SendBuff
+ SendBuffStruct * aSendTimeStruct = NULL ;
+ SendBuffStruct * aSendDataStruct = NULL ;
+ if ( sendbuf )
+ {
+ TimeMessage * aSendTimeMessage = new TimeMessage ;
+ if ( _asynchronous )
+ {
+ aSendTimeStruct = new SendBuffStruct ;
+ aSendTimeStruct->SendBuffer = aSendTimeMessage ;
+ aSendTimeStruct->Counter = 0 ;
+ aSendTimeStruct->DataType = _MPI_access->timeType() ;
+ aSendDataStruct = new SendBuffStruct ;
+ aSendDataStruct->SendBuffer = sendbuf ;
+ aSendDataStruct->Counter = 0 ;
+ aSendDataStruct->DataType = sendtype ;
+ }
+ aSendTimeMessage->time = _t ;
+ aSendTimeMessage->deltatime = _dt ;
+ for ( target = 0 ; target < _group_size ; target++ )
+ {
+ if ( sendcounts[target] )
+ {
+ sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
+ SendTimeRequestId ) ;
+ sts = send( sendbuf , sendcounts[target] , sdispls[target] , sendtype , target ,
+ SendDataRequestId ) ;
+ if ( _asynchronous )
+ {
+ aSendTimeStruct->Counter += 1 ;
+ (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
+ aSendDataStruct->Counter += 1 ;
+ (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
+ }
+ }
+ }
+ if ( !_asynchronous )
+ {
+ delete aSendTimeMessage ;
+ if ( sendtype == MPI_INT )
+ {
+ delete [] (int *) sendbuf ;
+ }
+ else
+ {
+ delete [] (double *) sendbuf ;
+ }
+ }
+ }
+
+ /*
+ . CheckTime + DoRecv + DoInterp
+ + For each target we call CheckTime
+ + If there is a TimeInterpolator and if the TimeMessage of the target
+ is not the first, we call the interpolator which return its
+ results in the part of the recv buffer corresponding to the "target".
+ + If not, there is a copy of received datas for that first step of time
+ in the part of the recv buffer corresponding to the "target".
+ */
+ //CheckTime + DoRecv + DoInterp
+ if ( recvbuf )
+ {
+ for ( target = 0 ; target < _group_size ; target++ )
+ {
+ if ( recvcounts[target] )
+ {
+ int recvsize = (int)(recvcounts[target]*_MPI_access->extent( recvtype ));
+ checkTime( recvcounts[target] , recvtype , target , false ) ;
+ //===========================================================================
+ //TODO : it is assumed actually that we have only 1 timestep before nad after
+ //===========================================================================
+ if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
+ {
+ if ( (*_out_of_time)[target] )
+ {
+ cout << " =====================================================" << endl
+ << "Recv" << _my_rank << " <-- target " << target << " t0 "
+ << (*_time_messages)[target][0].time << " < t1 "
+ << (*_time_messages)[target][1].time << " < t* " << _t << endl
+ << " =====================================================" << endl ;
+ }
+ if ( recvtype == MPI_INT )
+ {
+ _time_interpolator->doInterp( (*_time_messages)[target][0].time,
+ (*_time_messages)[target][1].time, _t,
+ recvcounts[target] , _n_step_before, _n_step_after,
+ (int **) &(*_data_messages)[target][0],
+ (int **) &(*_data_messages)[target][1],
+ &((int *)recvbuf)[rdispls[target]] ) ;
+ }
+ else
+ {
+ _time_interpolator->doInterp( (*_time_messages)[target][0].time,
+ (*_time_messages)[target][1].time, _t,
+ recvcounts[target] , _n_step_before, _n_step_after,
+ (double **) &(*_data_messages)[target][0],
+ (double **) &(*_data_messages)[target][1],
+ &((double *)recvbuf)[rdispls[target]] ) ;
+ }
+ }
+ else
+ {
+ char * buffdest = (char *) recvbuf ;
+ char * buffsrc = (char *) (*_data_messages)[target][1] ;
+ memcpy( &buffdest[rdispls[target]*_MPI_access->extent( recvtype )] , buffsrc ,
+ recvsize ) ;
+ }
+ }
+ }
+ }
+
+ return sts ;
+ }
+
+ /*
+ . CheckTime(recvcount , recvtype , target , UntilEnd)
+ + At the beginning, we read the first TimeMessage in
+ &(*_TimeMessages)[target][1] and the first DataMessage
+ in the allocated buffer (*_DataMessages)[target][1].
+ + deltatime of TimesMessages must be nul if it is the last one.
+ + While : _t(t*) is the current time of the processus.
+ "while _t(t*) is greater than the time of the "target"
+ (*_TimeMessages)[target][1].time and
+ (*_TimeMessages)[target][1].deltatime is not nul",
+ So at the end of the while we have :
+ _t(t*) <= (*_TimeMessages)[target][1].time with
+ _t(t*) > (*_TimeMessages)[target][0].time
+ or we have the last TimeMessage of the "target".
+ + If it is the finalization of the recv of TimeMessages and
+ DataMessages (UntilEnd value is true), we execute the while
+ until (*_TimeMessages)[target][1].deltatime is nul.
+ + In the while :
+ We copy the last TimeMessage in the previoud TimeMessage and
+ we read a new TimeMessage
+ We delete the previous DataMessage.
+ We copy the last DataMessage pointer in the previous one.
+ We allocate a new last DataMessage buffer
+ (*_DataMessages)[target][1] and we read the corresponding
+ datas in that buffe.
+ + If the current time of the current process is greater than the
+ last time (*_TimeMessages)[target][1].time du target, we give
+ a true value to (*_OutOfTime)[target].
+ (*_TimeMessages)[target][1].deltatime is nul.
+ */
+ int MPIAccessDEC::checkTime( int recvcount , MPI_Datatype recvtype , int target ,
+ bool UntilEnd )
+ {
+ int sts = MPI_SUCCESS ;
+ int RecvTimeRequestId ;
+ int RecvDataRequestId ;
+ //Pour l'instant on cherche _time_messages[target][0] < _t <= _time_messages[target][1]
+ //===========================================================================
+ //TODO : it is assumed actually that we have only 1 timestep before and after
+ // instead of _n_step_before and _n_step_after ...
+ //===========================================================================
+ (*_data_messages_recv_count)[target] = recvcount ;
+ (*_data_messages_type)[target] = recvtype ;
+ if ( (*_time_messages)[target][1].time == -1 )
+ {
+ (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
+ sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
+ target , RecvTimeRequestId ) ;
+ (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
+ if ( recvtype == MPI_INT )
+ {
+ (*_data_messages)[target][1] = new int[recvcount] ;
+ }
+ else
+ {
+ (*_data_messages)[target][1] = new double[recvcount] ;
+ }
+ sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
+ RecvDataRequestId ) ;
+ }
+ else
+ {
+ while ( ( _t > (*_time_messages)[target][1].time || UntilEnd ) &&
+ (*_time_messages)[target][1].deltatime != 0 )
+ {
+ (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
+ sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
+ target , RecvTimeRequestId ) ;
+ if ( UntilEnd )
+ {
+ cout << "CheckTime" << _my_rank << " TimeMessage target " << target
+ << " RecvTimeRequestId " << RecvTimeRequestId << " MPITag "
+ << _MPI_access->recvMPITag(target) << endl ;
+ }
+ if ( recvtype == MPI_INT )
+ {
+ delete [] (int *) (*_data_messages)[target][0] ;
+ }
+ else
+ {
+ delete [] (double *) (*_data_messages)[target][0] ;
+ }
+ (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
+ if ( recvtype == MPI_INT )
+ {
+ (*_data_messages)[target][1] = new int[recvcount] ;
+ }
+ else
+ {
+ (*_data_messages)[target][1] = new double[recvcount] ;
+ }
+ sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
+ RecvDataRequestId ) ;
+ if ( UntilEnd )
+ {
+ cout << "CheckTime" << _my_rank << " DataMessage target " << target
+ << " RecvDataRequestId " << RecvDataRequestId << " MPITag "
+ << _MPI_access->recvMPITag(target) << endl ;
+ }
+ }
+
+ if ( _t > (*_time_messages)[target][0].time &&
+ _t <= (*_time_messages)[target][1].time )
+ {
+ }
+ else
+ {
+ (*_out_of_time)[target] = true ;
+ }
+ }
+ return sts ;
+ }
+
+ /*
+ . CheckSent() :
+ + call SendRequestIds of MPI_Access in order to get all
+ RequestIds of SendMessages of all "targets".
+ + For each RequestId, CheckSent call "Test" of MPI_Access in order
+ to know if the buffer is "free" (flag = true). If it is the
+ FinalCheckSent (WithWait = true), we call Wait instead of Test.
+ + If the buffer is "free", the counter of the structure SendBuffStruct
+ (from _MapOfSendBuffers) is decremented.
+ + If that counter is nul we delete the TimeMessage or the
+ SendBuffer according to the DataType.
+ + And we delete the structure SendBuffStruct before the suppression
+ (erase) of that item of _MapOfSendBuffers
+ */
+ int MPIAccessDEC::checkSent(bool WithWait)
+ {
+ int sts = MPI_SUCCESS ;
+ int flag = WithWait ;
+ int size = _MPI_access->sendRequestIdsSize() ;
+ int * ArrayOfSendRequests = new int[ size ] ;
+ int nSendRequest = _MPI_access->sendRequestIds( size , ArrayOfSendRequests ) ;
+ bool SendTrace = false ;
+ int i ;
+ for ( i = 0 ; i < nSendRequest ; i++ )
+ {
+ if ( WithWait )
+ {
+ if (SendTrace)
+ {
+ cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
+ << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget "
+ << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag "
+ << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ;
+ }
+ sts = _MPI_access->wait( ArrayOfSendRequests[i] ) ;
+ }
+ else
+ {
+ sts = _MPI_access->test( ArrayOfSendRequests[i] , flag ) ;
+ }
+ if ( flag )
+ {
+ _MPI_access->deleteRequest( ArrayOfSendRequests[i] ) ;
+ if ( SendTrace )
+ {
+ cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
+ << " SendRequestId " << ArrayOfSendRequests[i]
+ << " flag " << flag
+ << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
+ << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
+ << endl ;
+ }
+ (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter -= 1 ;
+ if ( SendTrace )
+ {
+ if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
+ _MPI_access->timeType() )
+ {
+ cout << "CheckTimeSent" << _my_rank << " Request " ;
+ }
+ else
+ {
+ cout << "CheckDataSent" << _my_rank << " Request " ;
+ }
+ cout << ArrayOfSendRequests[i]
+ << " _map_of_send_buffers->SendBuffer "
+ << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
+ << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
+ << endl ;
+ }
+ if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter == 0 )
+ {
+ if ( SendTrace )
+ {
+ cout << "CheckSent" << _my_rank << " SendRequestId " << ArrayOfSendRequests[i]
+ << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
+ << " flag " << flag << " SendBuffer "
+ << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
+ << " deleted. Erase in _map_of_send_buffers :" << endl ;
+ }
+ if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
+ _MPI_access->timeType() )
+ {
+ delete (TimeMessage * ) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
+ }
+ else
+ {
+ if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType == MPI_INT )
+ {
+ delete [] (int *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
+ }
+ else
+ {
+ delete [] (double *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
+ }
+ }
+ delete (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ] ;
+ }
+ if ( SendTrace )
+ {
+ cout << "CheckSent" << _my_rank << " Erase in _map_of_send_buffers SendRequestId "
+ << ArrayOfSendRequests[i] << endl ;
+ }
+ (*_map_of_send_buffers).erase( ArrayOfSendRequests[i] ) ;
+ }
+ else if ( SendTrace )
+ {
+ cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
+ << " SendRequestId " << ArrayOfSendRequests[i]
+ << " flag " << flag
+ << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
+ << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
+ << endl ;
+ }
+ }
+ if ( SendTrace )
+ {
+ _MPI_access->check() ;
+ }
+ delete [] ArrayOfSendRequests ;
+ return sts ;
+ }
+
+ int MPIAccessDEC::checkFinalRecv()
+ {
+ int sts = MPI_SUCCESS ;
+ if ( _time_interpolator )
+ {
+ int target ;
+ for ( target = 0 ; target < _group_size ; target++ )
+ {
+ if ( (*_data_messages)[target][0] != NULL )
+ {
+ sts = checkTime( (*_data_messages_recv_count)[target] , (*_data_messages_type)[target] ,
+ target , true ) ;
+ if ( (*_data_messages_type)[target] == MPI_INT )
+ {
+ delete [] (int *) (*_data_messages)[target][0] ;
+ }
+ else
+ {
+ delete [] (double *) (*_data_messages)[target][0] ;
+ }
+ (*_data_messages)[target][0] = NULL ;
+ if ( (*_data_messages)[target][1] != NULL )
+ {
+ if ( (*_data_messages_type)[target] == MPI_INT )
+ {
+ delete [] (int *) (*_data_messages)[target][1] ;
+ }
+ else
+ {
+ delete [] (double *) (*_data_messages)[target][1] ;
+ }
+ (*_data_messages)[target][1] = NULL ;
+ }
+ }
+ }
+ }
+ return sts ;
+ }
+
+ ostream & operator<< (ostream & f ,const TimeInterpolationMethod & interpolationmethod )
+ {
+ switch (interpolationmethod)
+ {
+ case WithoutTimeInterp :
+ f << " WithoutTimeInterpolation ";
+ break;
+ case LinearTimeInterp :
+ f << " LinearTimeInterpolation ";
+ break;
+ default :
+ f << " UnknownTimeInterpolation ";
+ break;
+ }
+
+ return f;
+ }
+}
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#ifndef __MPIACCESSDEC_HXX__
+#define __MPIACCESSDEC_HXX__
+
+#include "DEC.hxx"
+#include "LinearTimeInterpolator.hxx"
+
+#include <map>
+#include <iostream>
+#include "MPIAccess.hxx"
+
+namespace MEDCoupling
+{
+ /*
+ * Internal class, not part of the public API.
+ *
+ * Another gateway to the MPI library?
+ */
+ class MPIAccessDEC
+ {
+ public:
+ MPIAccessDEC( const ProcessorGroup& local_group, const ProcessorGroup& distant_group,
+ bool Asynchronous = true );
+ virtual ~MPIAccessDEC();
+ MPIAccess * getMPIAccess() { return _MPI_access; }
+ const MPI_Comm* getComm() { return _MPI_union_group->getComm(); }
+ void asynchronous( bool Asynchronous = true ) { _asynchronous = Asynchronous; }
+ void setTimeInterpolator( TimeInterpolationMethod anInterp , double InterpPrecision=0 ,
+ int n_step_before=1, int nStepAfter=1 );
+
+ void setTime( double t ) { _t = t; _dt = -1; }
+ void setTime( double t , double dt ) { _t = t; _dt = dt; }
+ bool outOfTime( int target ) { return (*_out_of_time)[target]; }
+
+ int send( void* sendbuf, int sendcount , MPI_Datatype sendtype , int target );
+ int recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , int target );
+ int recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , int target ,
+ int &RecvRequestId , bool Asynchronous=false );
+ int sendRecv( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , MPI_Datatype recvtype , int target );
+
+ int allToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype );
+ int allToAllv( void* sendbuf, int* sendcounts, int* sdispls, MPI_Datatype sendtype ,
+ void* recvbuf, int* recvcounts, int* rdispls, MPI_Datatype recvtype );
+
+ int allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , MPI_Datatype recvtype );
+ int allToAllvTime( void* sendbuf, int* sendcounts, int* sdispls,
+ MPI_Datatype sendtype ,
+ void* recvbuf, int* recvcounts, int* rdispls,
+ MPI_Datatype recvtype );
+ int checkTime( int recvcount , MPI_Datatype recvtype , int target , bool UntilEnd );
+ int checkSent(bool WithWait=false);
+ int checkFinalSent() { return checkSent( true ); }
+ int checkFinalRecv();
+ protected:
+ int send( void* sendbuf, int sendcount , int sendoffset , MPI_Datatype sendtype ,
+ int target, int &SendRequestId );
+ int recv( void* recvbuf, int recvcount , int recvoffset , MPI_Datatype recvtype ,
+ int target, int &RecvRequestId );
+ int sendRecv( void* sendbuf, int sendcount , int sendoffset ,
+ MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , int recvoffset ,
+ MPI_Datatype recvtype , int target ,
+ int &SendRequestId ,int &RecvRequestId );
+ private :
+ bool _asynchronous;
+ MPIProcessorGroup* _MPI_union_group;
+
+ TimeInterpolator* _time_interpolator;
+ int _n_step_before;
+ int _n_step_after;
+
+ int _my_rank;
+ int _group_size;
+ MPIAccess* _MPI_access;
+
+ // Current time and deltatime of current process
+ double _t;
+ double _dt;
+
+ // TimeMessages from each target _TimeMessages[target][Step] : TimeMessage
+ std::vector< std::vector< TimeMessage > > *_time_messages;
+ // Corresponding DataMessages from each target _DataMessages[target][~TimeStep]
+ std::vector< bool >* _out_of_time;
+ std::vector< int >* _data_messages_recv_count;
+ std::vector< MPI_Datatype >* _data_messages_type;
+ std::vector< std::vector< void * > >* _data_messages;
+
+ typedef struct
+ {
+ void * SendBuffer;
+ int Counter;
+ MPI_Datatype DataType; }
+ SendBuffStruct;
+ std::map< int , SendBuffStruct * > *_map_of_send_buffers;
+ };
+
+ inline int MPIAccessDEC::send( void* sendbuf, int sendcount , MPI_Datatype sendtype , int target )
+ {
+ int SendRequestId;
+ int sts;
+ if ( _asynchronous )
+ {
+ sts = _MPI_access->ISend( sendbuf , sendcount , sendtype , target ,
+ SendRequestId );
+ }
+ else
+ {
+ sts = _MPI_access->send( sendbuf , sendcount , sendtype , target ,
+ SendRequestId );
+ if ( sts == MPI_SUCCESS )
+ free( sendbuf );
+ }
+ return sts;
+ }
+
+ inline int MPIAccessDEC::recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , int target )
+ {
+ int RecvRequestId;
+ int sts;
+ if ( _asynchronous )
+ sts = _MPI_access->IRecv( recvbuf , recvcount , recvtype , target , RecvRequestId );
+ else
+ sts = _MPI_access->recv( recvbuf , recvcount , recvtype , target , RecvRequestId );
+ return sts;
+ }
+
+ inline int MPIAccessDEC::recv( void* recvbuf, int recvcount , MPI_Datatype recvtype ,
+ int target , int &RecvRequestId , bool Asynchronous )
+ {
+ int sts;
+ if ( Asynchronous )
+ sts = _MPI_access->IRecv( recvbuf , recvcount , recvtype , target ,
+ RecvRequestId );
+ else
+ sts = _MPI_access->recv( recvbuf , recvcount , recvtype , target ,
+ RecvRequestId );
+ return sts;
+ }
+
+ inline int MPIAccessDEC::sendRecv( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , MPI_Datatype recvtype ,
+ int target )
+ {
+ int SendRequestId;
+ int RecvRequestId;
+ int sts;
+ if ( _asynchronous )
+ sts = _MPI_access->ISendRecv( sendbuf , sendcount , sendtype , target ,
+ SendRequestId ,
+ recvbuf , recvcount , recvtype , target ,
+ RecvRequestId );
+ else
+ sts = _MPI_access->sendRecv( sendbuf , sendcount , sendtype , target ,
+ SendRequestId ,
+ recvbuf , recvcount , recvtype , target ,
+ RecvRequestId );
+ return sts;
+ }
+
+ std::ostream & operator<< (std::ostream &,const TimeInterpolationMethod &);
+}
+
+#endif
--- /dev/null
+MPIAccess is not used anywhere in ParaMEDMEM or the rest of MEDCoupling.
+Keep it here in case we find a usage for it later ...
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include "MPIAccessDEC.hxx"
-
-#include <cstring>
-
-using namespace std;
-
-namespace MEDCoupling
-{
-
- /*!
- This constructor creates an MPIAccessDEC which has \a source_group as a working side
- and \a target_group as an idle side.
- The constructor must be called synchronously on all processors of both processor groups.
-
- \param source_group working side ProcessorGroup
- \param target_group lazy side ProcessorGroup
- \param Asynchronous Communication mode (default asynchronous)
- */
- MPIAccessDEC::MPIAccessDEC( const ProcessorGroup& source_group,
- const ProcessorGroup& target_group,
- bool Asynchronous )
- {
-
- ProcessorGroup * union_group = source_group.fuse(target_group) ;
- int i ;
- std::set<int> procs;
- for ( i = 0 ; i < union_group->size() ; i++ )
- {
- procs.insert(i) ;
- }
- MPIProcessorGroup *mpilg = static_cast<MPIProcessorGroup *>(const_cast<ProcessorGroup *>(&source_group));
- _MPI_union_group = new MEDCoupling::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm());
- delete union_group ;
- _my_rank = _MPI_union_group->myRank() ;
- _group_size = _MPI_union_group->size() ;
- _MPI_access = new MPIAccess( _MPI_union_group ) ;
- _asynchronous = Asynchronous ;
- _time_messages = new vector< vector< TimeMessage > > ;
- _time_messages->resize( _group_size ) ;
- _out_of_time = new vector< bool > ;
- _out_of_time->resize( _group_size ) ;
- _data_messages_recv_count = new vector< int > ;
- _data_messages_recv_count->resize( _group_size ) ;
- for ( i = 0 ; i < _group_size ; i++ )
- {
- (*_out_of_time)[i] = false ;
- (*_data_messages_recv_count)[i] = 0 ;
- }
- _data_messages_type = new vector< MPI_Datatype > ;
- _data_messages_type->resize( _group_size ) ;
- _data_messages = new vector< vector< void * > > ;
- _data_messages->resize( _group_size ) ;
- _time_interpolator = NULL ;
- _map_of_send_buffers = new map< int , SendBuffStruct * > ;
- }
-
- MPIAccessDEC::~MPIAccessDEC()
- {
- checkFinalSent() ;
- checkFinalRecv() ;
- delete _MPI_union_group ;
- delete _MPI_access ;
- if ( _time_interpolator )
- delete _time_interpolator ;
- if ( _time_messages )
- delete _time_messages ;
- if ( _out_of_time )
- delete _out_of_time ;
- if ( _data_messages_recv_count )
- delete _data_messages_recv_count ;
- if ( _data_messages_type )
- delete _data_messages_type ;
- if ( _data_messages )
- delete _data_messages ;
- if ( _map_of_send_buffers )
- delete _map_of_send_buffers ;
- }
-
- void MPIAccessDEC::setTimeInterpolator( TimeInterpolationMethod aTimeInterp ,
- double InterpPrecision, int nStepBefore,
- int nStepAfter )
- {
- if ( _time_interpolator )
- delete _time_interpolator ;
- switch ( aTimeInterp )
- {
- case WithoutTimeInterp :
- _time_interpolator = NULL ;
- _n_step_before = 0 ;
- _n_step_after = 0 ;
- break ;
- case LinearTimeInterp :
- _time_interpolator = new LinearTimeInterpolator( InterpPrecision , nStepBefore ,
- nStepAfter ) ;
- _n_step_before = nStepBefore ;
- _n_step_after = nStepAfter ;
- int i ;
- for ( i = 0 ; i < _group_size ; i++ )
- {
- (*_time_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
- (*_data_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
- int j ;
- for ( j = 0 ; j < _n_step_before + _n_step_after ; j++ )
- {
- (*_time_messages)[ i ][ j ].time = -1 ;
- (*_time_messages)[ i ][ j ].deltatime = -1 ;
- (*_data_messages)[ i ][ j ] = NULL ;
- }
- }
- break ;
- }
- }
-
- /*!
- Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
- (Internal Protected method)
-
- Returns the request identifier SendRequestId
-
- */
- int MPIAccessDEC::send( void* sendbuf, int sendcount , int offset ,
- MPI_Datatype sendtype , int target , int &SendRequestId )
- {
- int sts ;
- if ( _asynchronous )
- {
- if ( sendtype == MPI_INT )
- {
- sts = _MPI_access->ISend( &((int *) sendbuf)[offset] , sendcount , sendtype ,
- target , SendRequestId ) ;
- }
- else if ( sendtype == MPI_LONG )
- {
- sts = _MPI_access->ISend( &((long *) sendbuf)[offset] , sendcount , sendtype ,
- target , SendRequestId ) ;
- }
- else
- {
- sts = _MPI_access->ISend( &((double *) sendbuf)[offset] , sendcount , sendtype ,
- target , SendRequestId ) ;
- }
- }
- else
- {
- if ( sendtype == MPI_INT )
- {
- sts = _MPI_access->send( &((int *) sendbuf)[offset] , sendcount , sendtype ,
- target , SendRequestId ) ;
- }
- else if ( sendtype == MPI_LONG )
- {
- sts = _MPI_access->send( &((long *) sendbuf)[offset] , sendcount , sendtype ,
- target , SendRequestId ) ;
- }
- else
- {
- sts = _MPI_access->send( &((double *) sendbuf)[offset] , sendcount , sendtype ,
- target , SendRequestId ) ;
- }
- }
- return sts ;
- }
-
- /*!
- Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
- (Internal Protected method)
-
- Returns the request identifier RecvRequestId
-
- */
- int MPIAccessDEC::recv( void* recvbuf, int recvcount , int offset ,
- MPI_Datatype recvtype , int target , int &RecvRequestId )
- {
- int sts ;
- if ( _asynchronous )
- {
- if ( recvtype == MPI_INT )
- {
- sts = _MPI_access->IRecv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
- target , RecvRequestId ) ;
- }
- else if ( recvtype == MPI_LONG )
- {
- sts = _MPI_access->IRecv( &((long *) recvbuf)[offset] , recvcount , recvtype ,
- target , RecvRequestId ) ;
- }
- else
- {
- sts = _MPI_access->IRecv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
- target , RecvRequestId ) ;
- }
- }
- else
- {
- if ( recvtype == MPI_INT )
- {
- sts = _MPI_access->recv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
- target , RecvRequestId ) ;
- }
- else if ( recvtype == MPI_LONG )
- {
- sts = _MPI_access->recv( &((long *) recvbuf)[offset] , recvcount , recvtype ,
- target , RecvRequestId ) ;
- }
- else
- {
- sts = _MPI_access->recv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
- target , RecvRequestId ) ;
- }
- }
- return sts ;
- }
-
- /*!
- Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
- Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
- (Internal Protected method)
-
- Returns the request identifier SendRequestId
- Returns the request identifier RecvRequestId
-
- */
- int MPIAccessDEC::sendRecv( void* sendbuf, int sendcount , int sendoffset ,
- MPI_Datatype sendtype ,
- void* recvbuf, int recvcount , int recvoffset ,
- MPI_Datatype recvtype , int target ,
- int &SendRequestId , int &RecvRequestId )
- {
- int sts ;
- if ( _asynchronous )
- {
- if ( sendtype == MPI_INT )
- {
- if ( recvtype == MPI_INT )
- {
- sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
- sendtype , target , SendRequestId ,
- &((int *) recvbuf)[recvoffset] , recvcount ,
- recvtype , target , RecvRequestId ) ;
- }
- else
- {
- sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
- sendtype , target , SendRequestId ,
- &((double *) recvbuf)[recvoffset] ,
- recvcount , recvtype , target , RecvRequestId ) ;
- }
- }
- else
- {
- if ( recvtype == MPI_INT )
- {
- sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
- sendtype , target , SendRequestId ,
- &((int *) recvbuf)[recvoffset] ,
- recvcount , recvtype , target , RecvRequestId ) ;
- }
- else
- {
- sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
- sendtype , target , SendRequestId ,
- &((double *) recvbuf)[recvoffset] ,
- recvcount , recvtype , target , RecvRequestId ) ;
- }
- }
- }
- else
- {
- if ( sendtype == MPI_INT )
- {
- if ( recvtype == MPI_INT )
- {
- sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
- sendtype , target , SendRequestId ,
- &((int *) recvbuf)[recvoffset] , recvcount ,
- recvtype , target , RecvRequestId ) ;
- }
- else
- {
- sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
- sendtype , target , SendRequestId ,
- &((double *) recvbuf)[recvoffset] ,
- recvcount , recvtype , target , RecvRequestId ) ;
- }
- }
- else
- {
- if ( recvtype == MPI_INT )
- {
- sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
- sendtype , target , SendRequestId ,
- &((int *) recvbuf)[recvoffset] ,
- recvcount , recvtype , target , RecvRequestId ) ;
- }
- else
- {
- sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
- sendtype , target , SendRequestId ,
- &((double *) recvbuf)[recvoffset] ,
- recvcount , recvtype , target , RecvRequestId ) ;
- }
- }
- }
- return sts ;
- }
-
- /*!
- Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
- Receive recvcount datas to recvbuf[offset] with type recvtype from all targets of IntraCommunicator
-
- */
- int MPIAccessDEC::allToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype ,
- void* recvbuf, int recvcount, MPI_Datatype recvtype )
- {
- if ( _time_interpolator )
- {
- return allToAllTime( sendbuf, sendcount, sendtype , recvbuf, recvcount, recvtype ) ;
- }
- int sts = 0;
- int target ;
- int sendoffset = 0 ;
- int recvoffset = 0 ;
- int SendRequestId ;
- int RecvRequestId ;
-
- //Free of SendBuffers
- if ( _asynchronous )
- checkSent() ;
-
- //DoSend + DoRecv : SendRecv
- SendBuffStruct * aSendDataStruct = NULL ;
- if ( _asynchronous && sendbuf )
- {
- aSendDataStruct = new SendBuffStruct ;
- aSendDataStruct->SendBuffer = sendbuf ;
- aSendDataStruct->Counter = 0 ;
- aSendDataStruct->DataType = sendtype ;
- }
- for ( target = 0 ; target < _group_size ; target++ )
- {
- sts = sendRecv( sendbuf , sendcount , sendoffset , sendtype ,
- recvbuf , recvcount , recvoffset , recvtype ,
- target , SendRequestId , RecvRequestId ) ;
- if ( _asynchronous && sendbuf && sendcount )
- {
- aSendDataStruct->Counter += 1 ;
- (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
- }
- sendoffset += sendcount ;
- recvoffset += recvcount ;
- }
- if ( !_asynchronous && sendbuf )
- {
- if ( sendtype == MPI_INT )
- {
- delete [] (int *) sendbuf ;
- }
- else
- {
- delete [] (double *) sendbuf ;
- }
- }
- return sts ;
- }
-
- /*!
- Send sendcounts[target] datas from sendbuf[sdispls[target]] with type sendtype to all targets of IntraCommunicator
- Receive recvcounts[target] datas to recvbuf[rdispls[target]] with type recvtype from all targets of IntraCommunicator
-
- */
- int MPIAccessDEC::allToAllv( void* sendbuf, int* sendcounts, int* sdispls,
- MPI_Datatype sendtype ,
- void* recvbuf, int* recvcounts, int* rdispls,
- MPI_Datatype recvtype )
- {
- if ( _time_interpolator )
- {
- return allToAllvTime( sendbuf, sendcounts, sdispls, sendtype ,
- recvbuf, recvcounts, rdispls, recvtype ) ;
- }
- int sts = 0;
- int target ;
- int SendRequestId ;
- int RecvRequestId ;
-
- //Free of SendBuffers
- if ( _asynchronous )
- {
- checkSent() ;
- }
-
- //DoSend + DoRecv : SendRecv
- SendBuffStruct * aSendDataStruct = NULL ;
- if ( _asynchronous && sendbuf )
- {
- aSendDataStruct = new SendBuffStruct ;
- aSendDataStruct->SendBuffer = sendbuf ;
- aSendDataStruct->Counter = 0 ;
- aSendDataStruct->DataType = sendtype ;
- }
- for ( target = 0 ; target < _group_size ; target++ )
- {
- if ( sendcounts[target] || recvcounts[target] )
- {
- sts = sendRecv( sendbuf , sendcounts[target] , sdispls[target] , sendtype ,
- recvbuf , recvcounts[target] , rdispls[target] , recvtype ,
- target , SendRequestId , RecvRequestId ) ;
- if ( _asynchronous && sendbuf && sendcounts[target])
- {
- aSendDataStruct->Counter += 1 ;
- (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
- }
- }
- }
- if ( !_asynchronous && sendbuf )
- {
- if ( sendtype == MPI_INT )
- {
- delete [] (int *) sendbuf ;
- }
- else
- {
- delete [] (double *) sendbuf ;
- }
- }
- return sts ;
- }
-
- /*
- MPIAccessDEC and the management of SendBuffers :
- =================================================
-
- . In the collective communications collectives we send only parts of
- the same buffer to each "target". So in asynchronous mode it is
- necessary that all parts are free before to delete/free the
- buffer.
-
- . We assume that buffers are allocated with a new double[]. so a
- delete [] is done.
-
- . The structure SendBuffStruct permit to keep the address of the buffer
- and to manage a reference counter of that buffer. It contains
- also MPI_Datatype for the delete [] (double *) ... when the counter
- is null.
-
- . The map _MapOfSendBuffers establish the correspondence between each
- RequestId given by a MPI_Access->ISend(...) and a SendBuffStruct
- for each "target" of a part of the buffer.
-
- . All that concerns only asynchronous Send. In synchronous mode,
- we delete senbuf just after the Send.
- */
-
- /*
- MPIAccessDEC and the management of RecvBuffers :
- =================================================
-
- If there is no interpolation, no special action is done.
-
- With interpolation for each target :
- ------------------------------------
- . We have _time_messages[target] which is a vector of TimesMessages.
- We have 2 TimesMessages in our case with a linear interpolation.
- They contain the previous time(t0)/deltatime and the last
- time(t1)/deltatime.
-
- . We have _data_messages[target] which is a vector of DatasMessages.
- We have 2 DatasMessages in our case with a linear interpolation.
- They contain the previous datas at time(t0)/deltatime and at last
- time(t1)/deltatime.
-
- . At time _t(t*) of current processus we do the interpolation of
- the values of the 2 DatasMessages which are returned in the part of
- recvbuf corresponding to the target with t0 < t* <= t1.
-
- . Because of the difference of "deltatimes" between processes, we
- may have t0 < t1 < t* and there is an extrapolation.
-
- . The vectors _out_of_time, _DataMessagesRecvCount and _DataMessagesType
- contain for each target true if t* > last t1, recvcount and
- MPI_Datatype for the finalize of messages at the end.
- */
-
- /*!
- Send a TimeMessage to all targets of IntraCommunicator
- Receive the TimeMessages from targets of IntraCommunicator if necessary.
-
- Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
- Returns recvcount datas to recvbuf[offset] with type recvtype after an interpolation
- with datas received from all targets of IntraCommunicator.
-
- */
- int MPIAccessDEC::allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
- void* recvbuf, int recvcount , MPI_Datatype recvtype )
- {
- int sts = 0;
- int target ;
- int sendoffset = 0 ;
- int SendTimeRequestId ;
- int SendDataRequestId ;
-
- if ( _time_interpolator == NULL )
- {
- return MPI_ERR_OTHER ;
- }
-
- //Free of SendBuffers
- if ( _asynchronous )
- {
- checkSent() ;
- }
-
- //DoSend : Time + SendBuff
- SendBuffStruct * aSendTimeStruct = NULL ;
- SendBuffStruct * aSendDataStruct = NULL ;
- if ( sendbuf && sendcount )
- {
- TimeMessage * aSendTimeMessage = new TimeMessage ;
- if ( _asynchronous )
- {
- aSendTimeStruct = new SendBuffStruct ;
- aSendTimeStruct->SendBuffer = aSendTimeMessage ;
- aSendTimeStruct->Counter = 0 ;
- aSendTimeStruct->DataType = _MPI_access->timeType() ;
- aSendDataStruct = new SendBuffStruct ;
- aSendDataStruct->SendBuffer = sendbuf ;
- aSendDataStruct->Counter = 0 ;
- aSendDataStruct->DataType = sendtype ;
- }
- aSendTimeMessage->time = _t ;
- aSendTimeMessage->deltatime = _dt ;
- for ( target = 0 ; target < _group_size ; target++ )
- {
- sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
- SendTimeRequestId ) ;
- sts = send( sendbuf , sendcount , sendoffset , sendtype , target , SendDataRequestId ) ;
- if ( _asynchronous )
- {
- aSendTimeStruct->Counter += 1 ;
- (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
- aSendDataStruct->Counter += 1 ;
- (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
- }
- sendoffset += sendcount ;
- }
- if ( !_asynchronous )
- {
- delete aSendTimeMessage ;
- if ( sendtype == MPI_INT )
- {
- delete [] (int *) sendbuf ;
- }
- else
- {
- delete [] (double *) sendbuf ;
- }
- }
- }
-
- //CheckTime + DoRecv + DoInterp
- if ( recvbuf && recvcount )
- {
- for ( target = 0 ; target < _group_size ; target++ )
- {
- int recvsize = (int)(recvcount*_MPI_access->extent( recvtype ));
- checkTime( recvcount , recvtype , target , false ) ;
- //===========================================================================
- //TODO : it is assumed actually that we have only 1 timestep before and after
- //===========================================================================
- if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
- {
- if ( (*_out_of_time)[target] )
- {
- cout << " =====================================================" << endl
- << "Recv" << _my_rank << " <-- target " << target << " t0 "
- << (*_time_messages)[target][0].time << " < t1 "
- << (*_time_messages)[target][1].time << " < t* " << _t << endl
- << " =====================================================" << endl ;
- }
- if ( recvtype == MPI_INT )
- {
- _time_interpolator->doInterp( (*_time_messages)[target][0].time,
- (*_time_messages)[target][1].time, _t, recvcount ,
- _n_step_before, _n_step_after,
- (int **) &(*_data_messages)[target][0],
- (int **) &(*_data_messages)[target][1],
- &((int *)recvbuf)[target*recvcount] ) ;
- }
- else
- {
- _time_interpolator->doInterp( (*_time_messages)[target][0].time,
- (*_time_messages)[target][1].time, _t, recvcount ,
- _n_step_before, _n_step_after,
- (double **) &(*_data_messages)[target][0],
- (double **) &(*_data_messages)[target][1],
- &((double *)recvbuf)[target*recvcount] ) ;
- }
- }
- else
- {
- char * buffdest = (char *) recvbuf ;
- char * buffsrc = (char *) (*_data_messages)[target][1] ;
- memcpy( &buffdest[target*recvsize] , buffsrc , recvsize ) ;
- }
- }
- }
-
- return sts ;
- }
-
- int MPIAccessDEC::allToAllvTime( void* sendbuf, int* sendcounts, int* sdispls,
- MPI_Datatype sendtype ,
- void* recvbuf, int* recvcounts, int* rdispls,
- MPI_Datatype recvtype )
- {
- int sts = 0;
- int target ;
- int SendTimeRequestId ;
- int SendDataRequestId ;
-
- if ( _time_interpolator == NULL )
- {
- return MPI_ERR_OTHER ;
- }
-
- //Free of SendBuffers
- if ( _asynchronous )
- {
- checkSent() ;
- }
-
- /*
- . DoSend :
- + We create a TimeMessage (look at that structure in MPI_Access).
- + If we are in asynchronous mode, we create two structures SendBuffStruct
- aSendTimeStruct and aSendDataStruct that we fill.
- + We fill the structure aSendTimeMessage with time/deltatime of
- the current process. "deltatime" must be nul if it is the last step of
- Time.
- + After that for each "target", we Send the TimeMessage and the part
- of sendbuf corresponding to that target.
- + If we are in asynchronous mode, we increment the counter and we add
- aSendTimeStruct and aSendDataStruct to _MapOfSendBuffers with the
- identifiers SendTimeRequestId and SendDataRequestId returned by
- MPI_Access->Send(...).
- + And if we are in synchronous mode we delete the SendMessages.
- */
- //DoSend : Time + SendBuff
- SendBuffStruct * aSendTimeStruct = NULL ;
- SendBuffStruct * aSendDataStruct = NULL ;
- if ( sendbuf )
- {
- TimeMessage * aSendTimeMessage = new TimeMessage ;
- if ( _asynchronous )
- {
- aSendTimeStruct = new SendBuffStruct ;
- aSendTimeStruct->SendBuffer = aSendTimeMessage ;
- aSendTimeStruct->Counter = 0 ;
- aSendTimeStruct->DataType = _MPI_access->timeType() ;
- aSendDataStruct = new SendBuffStruct ;
- aSendDataStruct->SendBuffer = sendbuf ;
- aSendDataStruct->Counter = 0 ;
- aSendDataStruct->DataType = sendtype ;
- }
- aSendTimeMessage->time = _t ;
- aSendTimeMessage->deltatime = _dt ;
- for ( target = 0 ; target < _group_size ; target++ )
- {
- if ( sendcounts[target] )
- {
- sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
- SendTimeRequestId ) ;
- sts = send( sendbuf , sendcounts[target] , sdispls[target] , sendtype , target ,
- SendDataRequestId ) ;
- if ( _asynchronous )
- {
- aSendTimeStruct->Counter += 1 ;
- (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
- aSendDataStruct->Counter += 1 ;
- (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
- }
- }
- }
- if ( !_asynchronous )
- {
- delete aSendTimeMessage ;
- if ( sendtype == MPI_INT )
- {
- delete [] (int *) sendbuf ;
- }
- else
- {
- delete [] (double *) sendbuf ;
- }
- }
- }
-
- /*
- . CheckTime + DoRecv + DoInterp
- + For each target we call CheckTime
- + If there is a TimeInterpolator and if the TimeMessage of the target
- is not the first, we call the interpolator which return its
- results in the part of the recv buffer corresponding to the "target".
- + If not, there is a copy of received datas for that first step of time
- in the part of the recv buffer corresponding to the "target".
- */
- //CheckTime + DoRecv + DoInterp
- if ( recvbuf )
- {
- for ( target = 0 ; target < _group_size ; target++ )
- {
- if ( recvcounts[target] )
- {
- int recvsize = (int)(recvcounts[target]*_MPI_access->extent( recvtype ));
- checkTime( recvcounts[target] , recvtype , target , false ) ;
- //===========================================================================
- //TODO : it is assumed actually that we have only 1 timestep before nad after
- //===========================================================================
- if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
- {
- if ( (*_out_of_time)[target] )
- {
- cout << " =====================================================" << endl
- << "Recv" << _my_rank << " <-- target " << target << " t0 "
- << (*_time_messages)[target][0].time << " < t1 "
- << (*_time_messages)[target][1].time << " < t* " << _t << endl
- << " =====================================================" << endl ;
- }
- if ( recvtype == MPI_INT )
- {
- _time_interpolator->doInterp( (*_time_messages)[target][0].time,
- (*_time_messages)[target][1].time, _t,
- recvcounts[target] , _n_step_before, _n_step_after,
- (int **) &(*_data_messages)[target][0],
- (int **) &(*_data_messages)[target][1],
- &((int *)recvbuf)[rdispls[target]] ) ;
- }
- else
- {
- _time_interpolator->doInterp( (*_time_messages)[target][0].time,
- (*_time_messages)[target][1].time, _t,
- recvcounts[target] , _n_step_before, _n_step_after,
- (double **) &(*_data_messages)[target][0],
- (double **) &(*_data_messages)[target][1],
- &((double *)recvbuf)[rdispls[target]] ) ;
- }
- }
- else
- {
- char * buffdest = (char *) recvbuf ;
- char * buffsrc = (char *) (*_data_messages)[target][1] ;
- memcpy( &buffdest[rdispls[target]*_MPI_access->extent( recvtype )] , buffsrc ,
- recvsize ) ;
- }
- }
- }
- }
-
- return sts ;
- }
-
- /*
- . CheckTime(recvcount , recvtype , target , UntilEnd)
- + At the beginning, we read the first TimeMessage in
- &(*_TimeMessages)[target][1] and the first DataMessage
- in the allocated buffer (*_DataMessages)[target][1].
- + deltatime of TimesMessages must be nul if it is the last one.
- + While : _t(t*) is the current time of the processus.
- "while _t(t*) is greater than the time of the "target"
- (*_TimeMessages)[target][1].time and
- (*_TimeMessages)[target][1].deltatime is not nul",
- So at the end of the while we have :
- _t(t*) <= (*_TimeMessages)[target][1].time with
- _t(t*) > (*_TimeMessages)[target][0].time
- or we have the last TimeMessage of the "target".
- + If it is the finalization of the recv of TimeMessages and
- DataMessages (UntilEnd value is true), we execute the while
- until (*_TimeMessages)[target][1].deltatime is nul.
- + In the while :
- We copy the last TimeMessage in the previoud TimeMessage and
- we read a new TimeMessage
- We delete the previous DataMessage.
- We copy the last DataMessage pointer in the previous one.
- We allocate a new last DataMessage buffer
- (*_DataMessages)[target][1] and we read the corresponding
- datas in that buffe.
- + If the current time of the current process is greater than the
- last time (*_TimeMessages)[target][1].time du target, we give
- a true value to (*_OutOfTime)[target].
- (*_TimeMessages)[target][1].deltatime is nul.
- */
- int MPIAccessDEC::checkTime( int recvcount , MPI_Datatype recvtype , int target ,
- bool UntilEnd )
- {
- int sts = MPI_SUCCESS ;
- int RecvTimeRequestId ;
- int RecvDataRequestId ;
- //Pour l'instant on cherche _time_messages[target][0] < _t <= _time_messages[target][1]
- //===========================================================================
- //TODO : it is assumed actually that we have only 1 timestep before and after
- // instead of _n_step_before and _n_step_after ...
- //===========================================================================
- (*_data_messages_recv_count)[target] = recvcount ;
- (*_data_messages_type)[target] = recvtype ;
- if ( (*_time_messages)[target][1].time == -1 )
- {
- (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
- sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
- target , RecvTimeRequestId ) ;
- (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
- if ( recvtype == MPI_INT )
- {
- (*_data_messages)[target][1] = new int[recvcount] ;
- }
- else
- {
- (*_data_messages)[target][1] = new double[recvcount] ;
- }
- sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
- RecvDataRequestId ) ;
- }
- else
- {
- while ( ( _t > (*_time_messages)[target][1].time || UntilEnd ) &&
- (*_time_messages)[target][1].deltatime != 0 )
- {
- (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
- sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
- target , RecvTimeRequestId ) ;
- if ( UntilEnd )
- {
- cout << "CheckTime" << _my_rank << " TimeMessage target " << target
- << " RecvTimeRequestId " << RecvTimeRequestId << " MPITag "
- << _MPI_access->recvMPITag(target) << endl ;
- }
- if ( recvtype == MPI_INT )
- {
- delete [] (int *) (*_data_messages)[target][0] ;
- }
- else
- {
- delete [] (double *) (*_data_messages)[target][0] ;
- }
- (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
- if ( recvtype == MPI_INT )
- {
- (*_data_messages)[target][1] = new int[recvcount] ;
- }
- else
- {
- (*_data_messages)[target][1] = new double[recvcount] ;
- }
- sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
- RecvDataRequestId ) ;
- if ( UntilEnd )
- {
- cout << "CheckTime" << _my_rank << " DataMessage target " << target
- << " RecvDataRequestId " << RecvDataRequestId << " MPITag "
- << _MPI_access->recvMPITag(target) << endl ;
- }
- }
-
- if ( _t > (*_time_messages)[target][0].time &&
- _t <= (*_time_messages)[target][1].time )
- {
- }
- else
- {
- (*_out_of_time)[target] = true ;
- }
- }
- return sts ;
- }
-
- /*
- . CheckSent() :
- + call SendRequestIds of MPI_Access in order to get all
- RequestIds of SendMessages of all "targets".
- + For each RequestId, CheckSent call "Test" of MPI_Access in order
- to know if the buffer is "free" (flag = true). If it is the
- FinalCheckSent (WithWait = true), we call Wait instead of Test.
- + If the buffer is "free", the counter of the structure SendBuffStruct
- (from _MapOfSendBuffers) is decremented.
- + If that counter is nul we delete the TimeMessage or the
- SendBuffer according to the DataType.
- + And we delete the structure SendBuffStruct before the suppression
- (erase) of that item of _MapOfSendBuffers
- */
- int MPIAccessDEC::checkSent(bool WithWait)
- {
- int sts = MPI_SUCCESS ;
- int flag = WithWait ;
- int size = _MPI_access->sendRequestIdsSize() ;
- int * ArrayOfSendRequests = new int[ size ] ;
- int nSendRequest = _MPI_access->sendRequestIds( size , ArrayOfSendRequests ) ;
- bool SendTrace = false ;
- int i ;
- for ( i = 0 ; i < nSendRequest ; i++ )
- {
- if ( WithWait )
- {
- if (SendTrace)
- {
- cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
- << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget "
- << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag "
- << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ;
- }
- sts = _MPI_access->wait( ArrayOfSendRequests[i] ) ;
- }
- else
- {
- sts = _MPI_access->test( ArrayOfSendRequests[i] , flag ) ;
- }
- if ( flag )
- {
- _MPI_access->deleteRequest( ArrayOfSendRequests[i] ) ;
- if ( SendTrace )
- {
- cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
- << " SendRequestId " << ArrayOfSendRequests[i]
- << " flag " << flag
- << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
- << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
- << endl ;
- }
- (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter -= 1 ;
- if ( SendTrace )
- {
- if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
- _MPI_access->timeType() )
- {
- cout << "CheckTimeSent" << _my_rank << " Request " ;
- }
- else
- {
- cout << "CheckDataSent" << _my_rank << " Request " ;
- }
- cout << ArrayOfSendRequests[i]
- << " _map_of_send_buffers->SendBuffer "
- << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
- << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
- << endl ;
- }
- if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter == 0 )
- {
- if ( SendTrace )
- {
- cout << "CheckSent" << _my_rank << " SendRequestId " << ArrayOfSendRequests[i]
- << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
- << " flag " << flag << " SendBuffer "
- << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
- << " deleted. Erase in _map_of_send_buffers :" << endl ;
- }
- if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
- _MPI_access->timeType() )
- {
- delete (TimeMessage * ) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
- }
- else
- {
- if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType == MPI_INT )
- {
- delete [] (int *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
- }
- else
- {
- delete [] (double *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
- }
- }
- delete (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ] ;
- }
- if ( SendTrace )
- {
- cout << "CheckSent" << _my_rank << " Erase in _map_of_send_buffers SendRequestId "
- << ArrayOfSendRequests[i] << endl ;
- }
- (*_map_of_send_buffers).erase( ArrayOfSendRequests[i] ) ;
- }
- else if ( SendTrace )
- {
- cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
- << " SendRequestId " << ArrayOfSendRequests[i]
- << " flag " << flag
- << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
- << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
- << endl ;
- }
- }
- if ( SendTrace )
- {
- _MPI_access->check() ;
- }
- delete [] ArrayOfSendRequests ;
- return sts ;
- }
-
- int MPIAccessDEC::checkFinalRecv()
- {
- int sts = MPI_SUCCESS ;
- if ( _time_interpolator )
- {
- int target ;
- for ( target = 0 ; target < _group_size ; target++ )
- {
- if ( (*_data_messages)[target][0] != NULL )
- {
- sts = checkTime( (*_data_messages_recv_count)[target] , (*_data_messages_type)[target] ,
- target , true ) ;
- if ( (*_data_messages_type)[target] == MPI_INT )
- {
- delete [] (int *) (*_data_messages)[target][0] ;
- }
- else
- {
- delete [] (double *) (*_data_messages)[target][0] ;
- }
- (*_data_messages)[target][0] = NULL ;
- if ( (*_data_messages)[target][1] != NULL )
- {
- if ( (*_data_messages_type)[target] == MPI_INT )
- {
- delete [] (int *) (*_data_messages)[target][1] ;
- }
- else
- {
- delete [] (double *) (*_data_messages)[target][1] ;
- }
- (*_data_messages)[target][1] = NULL ;
- }
- }
- }
- }
- return sts ;
- }
-
- ostream & operator<< (ostream & f ,const TimeInterpolationMethod & interpolationmethod )
- {
- switch (interpolationmethod)
- {
- case WithoutTimeInterp :
- f << " WithoutTimeInterpolation ";
- break;
- case LinearTimeInterp :
- f << " LinearTimeInterpolation ";
- break;
- default :
- f << " UnknownTimeInterpolation ";
- break;
- }
-
- return f;
- }
-}
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#ifndef __MPIACCESSDEC_HXX__
-#define __MPIACCESSDEC_HXX__
-
-#include "MPIAccess.hxx"
-#include "DEC.hxx"
-#include "LinearTimeInterpolator.hxx"
-
-#include <map>
-#include <iostream>
-
-namespace MEDCoupling
-{
- /*
- * Internal class, not part of the public API.
- *
- * Another gateway to the MPI library?
- */
- class MPIAccessDEC
- {
- public:
- MPIAccessDEC( const ProcessorGroup& local_group, const ProcessorGroup& distant_group,
- bool Asynchronous = true );
- virtual ~MPIAccessDEC();
- MPIAccess * getMPIAccess() { return _MPI_access; }
- const MPI_Comm* getComm() { return _MPI_union_group->getComm(); }
- void asynchronous( bool Asynchronous = true ) { _asynchronous = Asynchronous; }
- void setTimeInterpolator( TimeInterpolationMethod anInterp , double InterpPrecision=0 ,
- int n_step_before=1, int nStepAfter=1 );
-
- void setTime( double t ) { _t = t; _dt = -1; }
- void setTime( double t , double dt ) { _t = t; _dt = dt; }
- bool outOfTime( int target ) { return (*_out_of_time)[target]; }
-
- int send( void* sendbuf, int sendcount , MPI_Datatype sendtype , int target );
- int recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , int target );
- int recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , int target ,
- int &RecvRequestId , bool Asynchronous=false );
- int sendRecv( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
- void* recvbuf, int recvcount , MPI_Datatype recvtype , int target );
-
- int allToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype ,
- void* recvbuf, int recvcount, MPI_Datatype recvtype );
- int allToAllv( void* sendbuf, int* sendcounts, int* sdispls, MPI_Datatype sendtype ,
- void* recvbuf, int* recvcounts, int* rdispls, MPI_Datatype recvtype );
-
- int allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
- void* recvbuf, int recvcount , MPI_Datatype recvtype );
- int allToAllvTime( void* sendbuf, int* sendcounts, int* sdispls,
- MPI_Datatype sendtype ,
- void* recvbuf, int* recvcounts, int* rdispls,
- MPI_Datatype recvtype );
- int checkTime( int recvcount , MPI_Datatype recvtype , int target , bool UntilEnd );
- int checkSent(bool WithWait=false);
- int checkFinalSent() { return checkSent( true ); }
- int checkFinalRecv();
- protected:
- int send( void* sendbuf, int sendcount , int sendoffset , MPI_Datatype sendtype ,
- int target, int &SendRequestId );
- int recv( void* recvbuf, int recvcount , int recvoffset , MPI_Datatype recvtype ,
- int target, int &RecvRequestId );
- int sendRecv( void* sendbuf, int sendcount , int sendoffset ,
- MPI_Datatype sendtype ,
- void* recvbuf, int recvcount , int recvoffset ,
- MPI_Datatype recvtype , int target ,
- int &SendRequestId ,int &RecvRequestId );
- private :
- bool _asynchronous;
- MPIProcessorGroup* _MPI_union_group;
-
- TimeInterpolator* _time_interpolator;
- int _n_step_before;
- int _n_step_after;
-
- int _my_rank;
- int _group_size;
- MPIAccess* _MPI_access;
-
- // Current time and deltatime of current process
- double _t;
- double _dt;
-
- // TimeMessages from each target _TimeMessages[target][Step] : TimeMessage
- std::vector< std::vector< TimeMessage > > *_time_messages;
- // Corresponding DataMessages from each target _DataMessages[target][~TimeStep]
- std::vector< bool >* _out_of_time;
- std::vector< int >* _data_messages_recv_count;
- std::vector< MPI_Datatype >* _data_messages_type;
- std::vector< std::vector< void * > >* _data_messages;
-
- typedef struct
- {
- void * SendBuffer;
- int Counter;
- MPI_Datatype DataType; }
- SendBuffStruct;
- std::map< int , SendBuffStruct * > *_map_of_send_buffers;
- };
-
- inline int MPIAccessDEC::send( void* sendbuf, int sendcount , MPI_Datatype sendtype , int target )
- {
- int SendRequestId;
- int sts;
- if ( _asynchronous )
- {
- sts = _MPI_access->ISend( sendbuf , sendcount , sendtype , target ,
- SendRequestId );
- }
- else
- {
- sts = _MPI_access->send( sendbuf , sendcount , sendtype , target ,
- SendRequestId );
- if ( sts == MPI_SUCCESS )
- free( sendbuf );
- }
- return sts;
- }
-
- inline int MPIAccessDEC::recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , int target )
- {
- int RecvRequestId;
- int sts;
- if ( _asynchronous )
- sts = _MPI_access->IRecv( recvbuf , recvcount , recvtype , target , RecvRequestId );
- else
- sts = _MPI_access->recv( recvbuf , recvcount , recvtype , target , RecvRequestId );
- return sts;
- }
-
- inline int MPIAccessDEC::recv( void* recvbuf, int recvcount , MPI_Datatype recvtype ,
- int target , int &RecvRequestId , bool Asynchronous )
- {
- int sts;
- if ( Asynchronous )
- sts = _MPI_access->IRecv( recvbuf , recvcount , recvtype , target ,
- RecvRequestId );
- else
- sts = _MPI_access->recv( recvbuf , recvcount , recvtype , target ,
- RecvRequestId );
- return sts;
- }
-
- inline int MPIAccessDEC::sendRecv( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
- void* recvbuf, int recvcount , MPI_Datatype recvtype ,
- int target )
- {
- int SendRequestId;
- int RecvRequestId;
- int sts;
- if ( _asynchronous )
- sts = _MPI_access->ISendRecv( sendbuf , sendcount , sendtype , target ,
- SendRequestId ,
- recvbuf , recvcount , recvtype , target ,
- RecvRequestId );
- else
- sts = _MPI_access->sendRecv( sendbuf , sendcount , sendtype , target ,
- SendRequestId ,
- recvbuf , recvcount , recvtype , target ,
- RecvRequestId );
- return sts;
- }
-
- std::ostream & operator<< (std::ostream &,const TimeInterpolationMethod &);
-}
-
-#endif
#include "Interpolation2D3D.txx"
#include "Interpolation2D1D.txx"
#include "MEDCouplingUMesh.hxx"
+#include "MEDCouplingFieldDouble.hxx"
#include "MEDCouplingNormalizedUnstructuredMesh.txx"
#include "InterpolationOptions.hxx"
#include "NormalizedUnstructuredMesh.hxx"
mpirun -np 2 xterm -e gdb src/ParaMEDMEMTest/TestMPIAccess
-
+[ABN-2021] MPIAccess is now isolated - it is only used in NxM_Mapping
+=====================================================================
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
+ADD_SUBDIRECTORY(MPIAccess) # maybe to delete one day ...
+
ADD_DEFINITIONS(${MPI_DEFINITIONS} ${CPPUNIT_DEFINITIONS})
INCLUDE_DIRECTORIES(
${CPPUNIT_INCLUDE_DIRS}
${CMAKE_CURRENT_SOURCE_DIR}/../ParaMEDLoader
${CMAKE_CURRENT_SOURCE_DIR}/../ParaMEDMEM
+ ${CMAKE_CURRENT_SOURCE_DIR}/../ParaMEDMEM/MPIAccess
${CMAKE_CURRENT_SOURCE_DIR}/../MEDLoader
${CMAKE_CURRENT_SOURCE_DIR}/../MEDCoupling
${CMAKE_CURRENT_SOURCE_DIR}/../ICoCo
ParaMEDMEMTest_FabienAPI.cxx
ParaMEDMEMTest_NonCoincidentDEC.cxx
ParaMEDMEMTest_OverlapDEC.cxx
- MPIAccessDECTest.cxx
- test_AllToAllDEC.cxx
- test_AllToAllvDEC.cxx
- test_AllToAllTimeDEC.cxx
- test_AllToAllvTimeDEC.cxx
- test_AllToAllvTimeDoubleDEC.cxx
- MPIAccessTest.cxx
- test_MPI_Access_Send_Recv.cxx
- test_MPI_Access_Cyclic_Send_Recv.cxx
- test_MPI_Access_SendRecv.cxx
- test_MPI_Access_ISend_IRecv.cxx
- test_MPI_Access_Cyclic_ISend_IRecv.cxx
- test_MPI_Access_ISendRecv.cxx
- test_MPI_Access_Probe.cxx
- test_MPI_Access_IProbe.cxx
- test_MPI_Access_Cancel.cxx
- test_MPI_Access_Send_Recv_Length.cxx
- test_MPI_Access_ISend_IRecv_Length.cxx
- test_MPI_Access_ISend_IRecv_Length_1.cxx
- test_MPI_Access_Time.cxx
- test_MPI_Access_Time_0.cxx
- test_MPI_Access_ISend_IRecv_BottleNeck.cxx
- )
+)
ADD_LIBRARY(ParaMEDMEMTest ${ParaMEDMEMTest_SOURCES})
SET_TARGET_PROPERTIES(ParaMEDMEMTest PROPERTIES COMPILE_FLAGS "")
SET(TestParaMEDMEM_SOURCES
TestParaMEDMEM.cxx
)
-SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} TestParaMEDMEM)
-
-SET(TestMPIAccessDEC_SOURCES
- TestMPIAccessDEC.cxx
- )
-SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} TestMPIAccessDEC)
-
-SET(TestMPIAccess_SOURCES
- TestMPIAccess.cxx
- )
-SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} TestMPIAccess)
+LIST(APPEND TESTSParaMEDMEM TestParaMEDMEM)
SET(test_perf_SOURCES
test_perf.cxx
)
-SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} test_perf)
+LIST(APPEND TESTSParaMEDMEM test_perf)
IF(MPI2_IS_OK)
SET(ParaMEDMEMTestMPI2_1_SOURCES
MPI2Connector.cxx
ParaMEDMEMTestMPI2_1.cxx
)
- SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} ParaMEDMEMTestMPI2_1)
+ LIST(APPEND TESTSParaMEDMEM ParaMEDMEMTestMPI2_1)
SET(ParaMEDMEMTestMPI2_2_SOURCES
MPI2Connector.cxx
ParaMEDMEMTestMPI2_2.cxx
)
- SET(TESTSParaMEDMEM ${TESTSParaMEDMEM} ParaMEDMEMTestMPI2_2)
+ LIST(APPEND TESTSParaMEDMEM ParaMEDMEMTestMPI2_2)
ENDIF(MPI2_IS_OK)
FOREACH(bintestparamem ${TESTSParaMEDMEM})
ADD_TEST(NAME TestParaMEDMEM_Proc5 COMMAND ${MPIEXEC} -np 5 ${_oversub_opt} $<TARGET_FILE:TestParaMEDMEM>)
SET_TESTS_PROPERTIES(TestParaMEDMEM_Proc5 PROPERTIES ENVIRONMENT "${tests_env}")
-ADD_TEST(NAME TestMPIAccess_Proc2 COMMAND ${MPIEXEC} -np 2 ${_oversub_opt} $<TARGET_FILE:TestMPIAccess>)
-SET_TESTS_PROPERTIES(TestMPIAccess_Proc2 PROPERTIES ENVIRONMENT "${tests_env}")
-ADD_TEST(NAME TestMPIAccess_Proc3 COMMAND ${MPIEXEC} -np 3 ${_oversub_opt} $<TARGET_FILE:TestMPIAccess>)
-SET_TESTS_PROPERTIES(TestMPIAccess_Proc3 PROPERTIES ENVIRONMENT "${tests_env}")
-
-ADD_TEST(NAME TestMPIAccessDEC_Proc4 COMMAND ${MPIEXEC} -np 4 ${_oversub_opt} $<TARGET_FILE:TestMPIAccessDEC>)
-SET_TESTS_PROPERTIES(TestMPIAccessDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}")
-
# Installation rules
INSTALL(TARGETS ${TESTSParaMEDMEM} DESTINATION ${MEDCOUPLING_INSTALL_BINS})
SET(COMMON_HEADERS_HXX
MPIMainTest.hxx
- MPIAccessDECTest.hxx
- MPIAccessTest.hxx
ParaMEDMEMTest.hxx
MPI2Connector.hxx
)
--- /dev/null
+# Copyright (C) 2012-2020 CEA/DEN, EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+
+ADD_DEFINITIONS(${MPI_DEFINITIONS} ${CPPUNIT_DEFINITIONS})
+
+INCLUDE_DIRECTORIES(
+ ${MPI_INCLUDE_DIRS}
+ ${CPPUNIT_INCLUDE_DIRS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/..
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../ParaMEDMEM
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../ParaMEDMEM/MPIAccess
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../MEDCoupling
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../INTERP_KERNEL
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../INTERP_KERNEL/Bases
+)
+
+SET(MPIAccessTest_SOURCES
+ MPIAccessDECTest.cxx
+ test_AllToAllDEC.cxx
+ test_AllToAllvDEC.cxx
+ test_AllToAllTimeDEC.cxx
+ test_AllToAllvTimeDEC.cxx
+ test_AllToAllvTimeDoubleDEC.cxx
+ MPIAccessTest.cxx
+ test_MPI_Access_Send_Recv.cxx
+ test_MPI_Access_Cyclic_Send_Recv.cxx
+ test_MPI_Access_SendRecv.cxx
+ test_MPI_Access_ISend_IRecv.cxx
+ test_MPI_Access_Cyclic_ISend_IRecv.cxx
+ test_MPI_Access_ISendRecv.cxx
+ test_MPI_Access_Probe.cxx
+ test_MPI_Access_IProbe.cxx
+ test_MPI_Access_Cancel.cxx
+ test_MPI_Access_Send_Recv_Length.cxx
+ test_MPI_Access_ISend_IRecv_Length.cxx
+ test_MPI_Access_ISend_IRecv_Length_1.cxx
+ test_MPI_Access_Time.cxx
+ test_MPI_Access_Time_0.cxx
+ test_MPI_Access_ISend_IRecv_BottleNeck.cxx
+ )
+
+ADD_LIBRARY(MPIAccessTest ${MPIAccessTest_SOURCES})
+SET_TARGET_PROPERTIES(MPIAccessTest PROPERTIES COMPILE_FLAGS "")
+TARGET_LINK_LIBRARIES(MPIAccessTest paramedmem ${CPPUNIT_LIBRARIES})
+INSTALL(TARGETS MPIAccessTest DESTINATION ${MEDCOUPLING_INSTALL_LIBS})
+
+SET(TESTSMPIAccess)
+SET(TestMPIAccessDEC_SOURCES
+ TestMPIAccessDEC.cxx
+ )
+LIST(APPEND TESTSMPIAccess TestMPIAccessDEC)
+
+SET(TestMPIAccess_SOURCES
+ TestMPIAccess.cxx
+ )
+LIST(APPEND TESTSMPIAccess TestMPIAccess)
+
+FOREACH(bintest ${TESTSMPIAccess})
+ ADD_EXECUTABLE(${bintest} ${${bintest}_SOURCES})
+ TARGET_LINK_LIBRARIES(${bintest} MPIAccessTest)
+ENDFOREACH()
+
+# Now add CMake tests
+SALOME_GENERATE_TESTS_ENVIRONMENT(tests_env)
+
+# -- some tests require 2, 3, 4 or 5 procs --
+# MPICH does not support --oversubscribe:
+IF(NOT ${MPIEXEC_EXECUTABLE} MATCHES "mpich")
+ SET(_oversub_opt "--oversubscribe")
+ENDIF()
+
+ADD_TEST(NAME TestMPIAccess_Proc2 COMMAND ${MPIEXEC} -np 2 ${_oversub_opt} $<TARGET_FILE:TestMPIAccess>)
+SET_TESTS_PROPERTIES(TestMPIAccess_Proc2 PROPERTIES ENVIRONMENT "${tests_env}")
+ADD_TEST(NAME TestMPIAccess_Proc3 COMMAND ${MPIEXEC} -np 3 ${_oversub_opt} $<TARGET_FILE:TestMPIAccess>)
+SET_TESTS_PROPERTIES(TestMPIAccess_Proc3 PROPERTIES ENVIRONMENT "${tests_env}")
+
+ADD_TEST(NAME TestMPIAccessDEC_Proc4 COMMAND ${MPIEXEC} -np 4 ${_oversub_opt} $<TARGET_FILE:TestMPIAccessDEC>)
+SET_TESTS_PROPERTIES(TestMPIAccessDEC_Proc4 PROPERTIES ENVIRONMENT "${tests_env}")
+
+# Installation rules
+INSTALL(TARGETS ${TESTSMPIAccess} DESTINATION ${MEDCOUPLING_INSTALL_BINS})
+SET(COMMON_HEADERS_HXX
+ MPIAccessDECTest.hxx
+ MPIAccessTest.hxx
+)
+INSTALL(FILES ${COMMON_HEADERS_HXX} DESTINATION ${MEDCOUPLING_INSTALL_HEADERS})
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include <sstream>
+#include <cmath>
+
+#ifndef WIN32
+#include <unistd.h>
+#endif
+
+using namespace std;
+
+
+
+/*!
+ * Tool to remove temporary files.
+ * Allows automatique removal of temporary files in case of test failure.
+ */
+MPIAccessDECTest_TmpFilesRemover::~MPIAccessDECTest_TmpFilesRemover()
+{
+ set<string>::iterator it = myTmpFiles.begin();
+ for (; it != myTmpFiles.end(); it++) {
+ if (access((*it).data(), F_OK) == 0)
+ remove((*it).data());
+ }
+ myTmpFiles.clear();
+ //cout << "~MPIAccessTest_TmpFilesRemover()" << endl;
+}
+
+bool MPIAccessDECTest_TmpFilesRemover::Register(const string theTmpFile)
+{
+ return (myTmpFiles.insert(theTmpFile)).second;
+}
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#ifndef _MPIACCESSDECTEST_HXX_
+#define _MPIACCESSDECTEST_HXX_
+
+#include <cppunit/extensions/HelperMacros.h>
+
+#include <set>
+#include <string>
+#include <iostream>
+#include "mpi.h"
+
+// (ABN]: too many text output in the MPIAccesTest - this renders
+// the analysis complicated:
+#define MPI_ACCESS_VERBOSE 0
+#define debugStream \
+ if (!MPI_ACCESS_VERBOSE) {} \
+ else std::cout
+
+class MPIAccessDECTest : public CppUnit::TestFixture
+{
+ CPPUNIT_TEST_SUITE( MPIAccessDECTest );
+ // CPPUNIT_TEST( test_AllToAllDECSynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllDECAsynchronousPointToPoint ) ;
+ //CPPUNIT_TEST( test_AllToAllvDECSynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllvDECAsynchronousPointToPoint ) ;
+ //CPPUNIT_TEST( test_AllToAllTimeDECSynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllTimeDECAsynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllvTimeDECSynchronousNative ) ;
+ //CPPUNIT_TEST( test_AllToAllvTimeDECSynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllvTimeDECAsynchronousPointToPoint ) ;
+ //CPPUNIT_TEST( test_AllToAllvTimeDoubleDECSynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllvTimeDoubleDECAsynchronousPointToPoint ) ;
+ CPPUNIT_TEST_SUITE_END();
+
+
+public:
+
+ MPIAccessDECTest():CppUnit::TestFixture(){}
+ ~MPIAccessDECTest(){}
+ void setUp(){}
+ void tearDown(){}
+ void test_AllToAllDECSynchronousPointToPoint() ;
+ void test_AllToAllDECAsynchronousPointToPoint() ;
+ void test_AllToAllvDECSynchronousPointToPoint() ;
+ void test_AllToAllvDECAsynchronousPointToPoint() ;
+ void test_AllToAllTimeDECSynchronousPointToPoint() ;
+ void test_AllToAllTimeDECAsynchronousPointToPoint() ;
+ void test_AllToAllvTimeDECSynchronousNative() ;
+ void test_AllToAllvTimeDECSynchronousPointToPoint() ;
+ void test_AllToAllvTimeDECAsynchronousPointToPoint() ;
+ void test_AllToAllvTimeDoubleDECSynchronousPointToPoint() ;
+ void test_AllToAllvTimeDoubleDECAsynchronousPointToPoint() ;
+
+private:
+ void test_AllToAllDEC( bool Asynchronous ) ;
+ void test_AllToAllvDEC( bool Asynchronous ) ;
+ void test_AllToAllTimeDEC( bool Asynchronous ) ;
+ void test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINative ) ;
+ void test_AllToAllvTimeDoubleDEC( bool Asynchronous ) ;
+ };
+
+// to automatically remove temporary files from disk
+class MPIAccessDECTest_TmpFilesRemover
+{
+public:
+ MPIAccessDECTest_TmpFilesRemover() {}
+ ~MPIAccessDECTest_TmpFilesRemover();
+ bool Register(const std::string theTmpFile);
+
+private:
+ std::set<std::string> myTmpFiles;
+};
+
+/*!
+ * Tool to print array to stream.
+ */
+template<class T>
+void MPIAccessDECTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text)
+{
+ stream << text << ": {";
+ if (length > 0) {
+ stream << array[0];
+ for (int i = 1; i < length; i++) {
+ stream << ", " << array[i];
+ }
+ }
+ stream << "}" << std::endl;
+};
+
+#endif
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include <sstream>
+#include <cmath>
+
+#ifndef WIN32
+#include <unistd.h>
+#endif
+
+using namespace std;
+
+
+
+/*!
+ * Tool to remove temporary files.
+ * Allows automatique removal of temporary files in case of test failure.
+ */
+MPIAccessTest_TmpFilesRemover::~MPIAccessTest_TmpFilesRemover()
+{
+ set<string>::iterator it = myTmpFiles.begin();
+ for (; it != myTmpFiles.end(); it++) {
+ if (access((*it).data(), F_OK) == 0)
+ remove((*it).data());
+ }
+ myTmpFiles.clear();
+ //cout << "~MPIAccessTest_TmpFilesRemover()" << endl;
+}
+
+bool MPIAccessTest_TmpFilesRemover::Register(const string theTmpFile)
+{
+ return (myTmpFiles.insert(theTmpFile)).second;
+}
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#ifndef _MPIACCESSTEST_HXX_
+#define _MPIACCESSTEST_HXX_
+
+#include <cppunit/extensions/HelperMacros.h>
+
+#include <set>
+#include <string>
+#include <iostream>
+#include "mpi.h"
+
+// (ABN]: too many text output in the MPIAccesTest - this renders
+// the analysis complicated:
+#define MPI_ACCESS_VERBOSE 0
+#define debugStream \
+ if (!MPI_ACCESS_VERBOSE) {} \
+ else std::cout
+
+class MPIAccessTest : public CppUnit::TestFixture
+{
+ CPPUNIT_TEST_SUITE( MPIAccessTest );
+ CPPUNIT_TEST( test_MPI_Access_Send_Recv ) ;
+ CPPUNIT_TEST( test_MPI_Access_Cyclic_Send_Recv ) ;
+ CPPUNIT_TEST( test_MPI_Access_SendRecv ) ;
+ CPPUNIT_TEST( test_MPI_Access_ISend_IRecv ) ;
+ CPPUNIT_TEST( test_MPI_Access_Cyclic_ISend_IRecv ) ;
+ CPPUNIT_TEST( test_MPI_Access_ISendRecv ) ;
+ CPPUNIT_TEST( test_MPI_Access_Probe ) ;
+ CPPUNIT_TEST( test_MPI_Access_IProbe ) ;
+ CPPUNIT_TEST( test_MPI_Access_Cancel ) ;
+ CPPUNIT_TEST( test_MPI_Access_Send_Recv_Length ) ;
+ CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_Length ) ;
+ CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_Length_1 ) ;
+ CPPUNIT_TEST( test_MPI_Access_Time ) ;
+ CPPUNIT_TEST( test_MPI_Access_Time_0 ) ;
+ CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_BottleNeck ) ;
+ CPPUNIT_TEST_SUITE_END();
+
+
+public:
+
+ MPIAccessTest():CppUnit::TestFixture(){}
+ ~MPIAccessTest(){}
+ void setUp(){}
+ void tearDown(){}
+ void test_MPI_Access_Send_Recv() ;
+ void test_MPI_Access_Cyclic_Send_Recv() ;
+ void test_MPI_Access_SendRecv() ;
+ void test_MPI_Access_ISend_IRecv() ;
+ void test_MPI_Access_Cyclic_ISend_IRecv() ;
+ void test_MPI_Access_ISendRecv() ;
+ void test_MPI_Access_Probe() ;
+ void test_MPI_Access_IProbe() ;
+ void test_MPI_Access_Cancel() ;
+ void test_MPI_Access_Send_Recv_Length() ;
+ void test_MPI_Access_ISend_IRecv_Length() ;
+ void test_MPI_Access_ISend_IRecv_Length_1() ;
+ void test_MPI_Access_Time() ;
+ void test_MPI_Access_Time_0() ;
+ void test_MPI_Access_ISend_IRecv_BottleNeck() ;
+
+private:
+ };
+
+// to automatically remove temporary files from disk
+class MPIAccessTest_TmpFilesRemover
+{
+public:
+ MPIAccessTest_TmpFilesRemover() {}
+ ~MPIAccessTest_TmpFilesRemover();
+ bool Register(const std::string theTmpFile);
+
+private:
+ std::set<std::string> myTmpFiles;
+};
+
+/*!
+ * Tool to print array to stream.
+ */
+template<class T>
+void MPIAccessTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text)
+{
+ stream << text << ": {";
+ if (length > 0) {
+ stream << array[0];
+ for (int i = 1; i < length; i++) {
+ stream << ", " << array[i];
+ }
+ }
+ stream << "}" << std::endl;
+}
+
+#endif
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+// --- include all MPIAccess Test
+//
+#include "MPIAccessTest.hxx"
+
+// --- Registers the fixture into the 'registry'
+
+CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessTest );
+
+// --- generic Main program from KERNEL_SRC/src/Basics/Test
+
+#include "MPIMainTest.hxx"
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+// --- include all MPIAccessDEC Test
+//
+#include "MPIAccessDECTest.hxx"
+
+// --- Registers the fixture into the 'registry'
+
+CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessDECTest );
+
+// --- generic Main program from KERNEL_SRC/src/Basics/Test
+
+#include "MPIMainTest.hxx"
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include "../ParaMEDMEM/MPIAccess/MPIAccessDEC.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessDECTest::test_AllToAllDECSynchronousPointToPoint() {
+ test_AllToAllDEC( false ) ;
+}
+void MPIAccessDECTest::test_AllToAllDECAsynchronousPointToPoint() {
+ test_AllToAllDEC( true ) ;
+}
+
+static void chksts( int sts , int myrank , MEDCoupling::MPIAccess mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "===========================================================" << endl
+ << "test_AllToAllDEC" << myrank << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ return ;
+}
+
+void MPIAccessDECTest::test_AllToAllDEC( bool Asynchronous ) {
+
+ debugStream << "test_AllToAllDEC" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 || size > 11 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_AllToAllDEC" << endl
+ << " (nbprocs >=2)" << endl
+ << "test must be run with more than 1 proc and less than 12 procs"
+ << endl ;
+ cerr << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ debugStream << "test_AllToAllDEC" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+ std::set<int> sourceprocs;
+ std::set<int> targetprocs;
+ int i ;
+ for ( i = 0 ; i < size/2 ; i++ ) {
+ sourceprocs.insert(i);
+ }
+ for ( i = size/2 ; i < size ; i++ ) {
+ targetprocs.insert(i);
+ }
+
+ MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ;
+ MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ;
+
+ MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
+ Asynchronous ) ;
+
+ MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
+
+#define maxreq 100
+#define datamsglength 10
+
+ // int sts ;
+ int sendcount = datamsglength ;
+ int recvcount = datamsglength ;
+ int * recvbuf = new int[datamsglength*size] ;
+
+ int ireq ;
+ for ( ireq = 0 ; ireq < maxreq ; ireq++ ) {
+ int * sendbuf = new int[datamsglength*size] ;
+ int j ;
+ for ( j = 0 ; j < datamsglength*size ; j++ ) {
+ sendbuf[j] = myrank*1000000 + ireq*1000 + j ;
+ recvbuf[j] = -1 ;
+ }
+
+ MyMPIAccessDEC->allToAll( sendbuf, sendcount , MPI_INT ,
+ recvbuf, recvcount , MPI_INT ) ;
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ int *ArrayOfRecvRequests = new int[nRecvReq] ;
+ int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
+ mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ;
+ delete [] ArrayOfRecvRequests ;
+ }
+
+ int nSendReq = mpi_access->sendRequestIdsSize() ;
+ debugStream << "test_AllToAllDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
+ << endl ;
+ if ( nSendReq ) {
+ int *ArrayOfSendRequests = new int[nSendReq] ;
+ int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
+ delete [] ArrayOfSendRequests ;
+ }
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq ) {
+ ostringstream strstream ;
+ strstream << "test_AllToAllDEC" << myrank << " final RecvRequestIds " << nRecvReq
+ << " RecvRequests # 0 Error" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "test_AllToAllDEC" << myrank << " final RecvRequestIds " << nRecvReq
+ << " RecvRequests = 0 OK" << endl ;
+ }
+
+ mpi_access->barrier() ;
+
+ delete sourcegroup ;
+ delete targetgroup ;
+ delete MyMPIAccessDEC ;
+ delete [] recvbuf ;
+
+ // MPI_Finalize();
+
+ debugStream << "test_AllToAllDEC" << myrank << " OK" << endl ;
+
+ return ;
+}
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include "../ParaMEDMEM/MPIAccess/MPIAccessDEC.hxx"
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "LinearTimeInterpolator.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessDECTest::test_AllToAllTimeDECSynchronousPointToPoint() {
+ test_AllToAllTimeDEC( false ) ;
+}
+void MPIAccessDECTest::test_AllToAllTimeDECAsynchronousPointToPoint() {
+ test_AllToAllTimeDEC( true ) ;
+}
+
+static void chksts( int sts , int myrank , MEDCoupling::MPIAccess * mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access->errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test_AllToAllTimeDEC" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test_AllToAllTimeDEC" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ return ;
+}
+
+void MPIAccessDECTest::test_AllToAllTimeDEC( bool Asynchronous ) {
+
+ debugStream << "test_AllToAllTimeDEC" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 || size > 11 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
+ << " (nbprocs >=2)" << endl
+ << "test must be run with more than 1 proc and less than 12 procs"
+ << endl ;
+ cerr << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ // int Asynchronous = atoi(argv[1]);
+
+ debugStream << "test_AllToAllTimeDEC" << myrank << " Asynchronous " << Asynchronous << endl ;
+
+ MEDCoupling::CommInterface interface ;
+ std::set<int> sourceprocs;
+ std::set<int> targetprocs;
+ int i ;
+ for ( i = 0 ; i < size/2 ; i++ ) {
+ sourceprocs.insert(i);
+ }
+ for ( i = size/2 ; i < size ; i++ ) {
+ targetprocs.insert(i);
+ }
+
+ MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ;
+ MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ;
+
+ // LinearTimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ;
+ MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
+ Asynchronous ) ;
+ // Asynchronous , LinearInterp , 0.5 ) ;
+ MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ;
+ MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
+
+ debugStream << "test_AllToAllTimeDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+ debugStream << "test_AllToAllTimeDEC" << myrank << " Barrier done" << endl ;
+
+#define maxproc 11
+#define maxreq 10000
+#define datamsglength 10
+
+ int sts ;
+ int sendcount = datamsglength ;
+ int recvcount = datamsglength ;
+
+ double time = 0 ;
+ // double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
+ double deltatime[maxproc] = {1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,11.} ;
+ double maxtime = maxreq ;
+ double nextdeltatime = deltatime[myrank] ;
+ // MyMPIAccessDEC->InitTime( time , deltatime[myrank] , maxtime ) ;
+ // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) {
+ for ( time = 0 ; time <= maxtime && nextdeltatime != 0 ; time+=nextdeltatime ) {
+ if ( time != 0 ) {
+ nextdeltatime = deltatime[myrank] ;
+ if ( time+nextdeltatime > maxtime ) {
+ nextdeltatime = 0 ;
+ }
+ // MyMPIAccessDEC->NextTime( nextdeltatime ) ;
+ }
+ MyMPIAccessDEC->setTime( time , nextdeltatime ) ;
+ debugStream << "test_AllToAllTimeDEC" << myrank << "=====TIME " << time << "=====DELTATIME "
+ << nextdeltatime << "=====MAXTIME " << maxtime << " ======" << endl ;
+ int * sendbuf = new int[datamsglength*size] ;
+ // int * sendbuf = (int *) malloc(sizeof(int)*datamsglength*size) ;
+ int * recvbuf = new int[datamsglength*size] ;
+ int j ;
+ for ( j = 0 ; j < datamsglength*size ; j++ ) {
+ sendbuf[j] = myrank*1000000 + (j/datamsglength)*1000 + j ;
+ recvbuf[j] = -1 ;
+ }
+
+ sts = MyMPIAccessDEC->allToAllTime( sendbuf, sendcount , MPI_INT ,
+ recvbuf, recvcount , MPI_INT ) ;
+ chksts( sts , myrank , mpi_access ) ;
+
+ // debugStream << "test_AllToAllTimeDEC" << myrank << " recvbuf before CheckSent" ;
+ // for ( i = 0 ; i < datamsglength*size ; i++ ) {
+ // debugStream << " " << recvbuf[i] ;
+ // }
+ // debugStream << endl ;
+
+ // debugStream << "test_AllToAllTimeDEC" << myrank << " sendbuf " << sendbuf << endl ;
+ // MyMPIAccessDEC->CheckSent() ;
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq != 0 ) {
+ ostringstream strstream ;
+ strstream << "=============================================================" << endl
+ << "test_AllToAllTimeDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests # 0 ERROR"
+ << endl << "============================================================="
+ << endl ;
+ int *ArrayOfRecvRequests = new int[nRecvReq] ;
+ int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
+ delete [] ArrayOfRecvRequests ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ // debugStream << "test_AllToAllTimeDEC" << myrank << " recvbuf" << endl ;
+ bool badrecvbuf = false ;
+ for ( i = 0 ; i < datamsglength*size ; i++ ) {
+ if ( recvbuf[i] != (i/datamsglength)*1000000 + myrank*1000 +
+ myrank*datamsglength+(i%datamsglength) ) {
+ badrecvbuf = true ;
+ debugStream << "test_AllToAllTimeDEC" << myrank << " recvbuf[" << i << "] "
+ << recvbuf[i] << " # " << (i/datamsglength)*1000000 + myrank*1000 +
+ myrank*datamsglength+(i%datamsglength) << endl ;
+ }
+ else if ( badrecvbuf ) {
+ debugStream << "test_AllToAllTimeDEC" << myrank << " recvbuf[" << i << "] "
+ << recvbuf[i] << " == " << (i/datamsglength)*1000000 + myrank*1000 +
+ myrank*datamsglength+(i%datamsglength) << endl ;
+ }
+ }
+ if ( badrecvbuf ) {
+ ostringstream strstream ;
+ strstream << "==============================================================" << endl
+ << "test_AllToAllTimeDEC" << myrank << " badrecvbuf"
+ << endl << "============================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ delete [] recvbuf ;
+ }
+
+ debugStream << "test_AllToAllTimeDEC" << myrank << " final CheckSent" << endl ;
+ sts = MyMPIAccessDEC->checkSent() ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "================================================================" << endl
+ << "test_AllToAllTimeDEC" << myrank << " final CheckSent ERROR"
+ << endl << "================================================================"
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ int nSendReq = mpi_access->sendRequestIdsSize() ;
+ debugStream << "test_AllToAllTimeDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
+ << endl ;
+ if ( nSendReq ) {
+ int *ArrayOfSendRequests = new int[nSendReq] ;
+ int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
+ delete [] ArrayOfSendRequests ;
+ }
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq ) {
+ ostringstream strstream ;
+ strstream << "===============================================================" << endl
+ << "test_AllToAllTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests # 0 Error"
+ << endl << "==============================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "test_AllToAllTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests = 0 OK" << endl ;
+ }
+
+ debugStream << "test_AllToAllTimeDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+ debugStream << "test_AllToAllTimeDEC" << myrank << " Barrier done" << endl ;
+
+ delete sourcegroup ;
+ delete targetgroup ;
+ // delete aLinearInterpDEC ;
+ delete MyMPIAccessDEC ;
+
+ // MPI_Finalize();
+
+ debugStream << "test_AllToAllTimeDEC" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "../ParaMEDMEM/MPIAccess/MPIAccessDEC.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessDECTest::test_AllToAllvDECSynchronousPointToPoint() {
+ test_AllToAllvDEC( false ) ;
+}
+void MPIAccessDECTest::test_AllToAllvDECAsynchronousPointToPoint() {
+ test_AllToAllvDEC( true ) ;
+}
+
+static void chksts( int sts , int myrank , MEDCoupling::MPIAccess mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test_AllToAllvDEC" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test_AllToAllvDEC" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ return ;
+}
+
+void MPIAccessDECTest::test_AllToAllvDEC( bool Asynchronous ) {
+
+ debugStream << "test_AllToAllvDEC" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 || size > 11 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_AllToAllvDEC" << endl
+ << " (nbprocs >=2)" << endl
+ << "test must be run with more than 1 proc and less than 12 procs"
+ << endl ;
+ cerr << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ // int Asynchronous = atoi(argv[1]);
+
+ debugStream << "test_AllToAllvDEC" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+ std::set<int> sourceprocs;
+ std::set<int> targetprocs;
+ int i ;
+ for ( i = 0 ; i < size/2 ; i++ ) {
+ sourceprocs.insert(i);
+ }
+ for ( i = size/2 ; i < size ; i++ ) {
+ targetprocs.insert(i);
+ }
+
+ MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ;
+ MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ;
+
+ MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
+ Asynchronous ) ;
+
+ MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
+
+#define maxreq 100
+#define datamsglength 10
+
+ // int sts ;
+ int *sendcounts = new int[size] ;
+ int *sdispls = new int[size] ;
+ int *recvcounts = new int[size] ;
+ int *rdispls = new int[size] ;
+ for ( i = 0 ; i < size ; i++ ) {
+ sendcounts[i] = datamsglength-i;
+ sdispls[i] = i*datamsglength ;
+ recvcounts[i] = datamsglength-myrank;
+ rdispls[i] = i*datamsglength ;
+ }
+ int * recvbuf = new int[datamsglength*size] ;
+
+ int ireq ;
+ for ( ireq = 0 ; ireq < maxreq ; ireq++ ) {
+ int * sendbuf = new int[datamsglength*size] ;
+ // int * sendbuf = (int *) malloc( sizeof(int)*datamsglength*size) ;
+ int j ;
+ for ( j = 0 ; j < datamsglength*size ; j++ ) {
+ sendbuf[j] = myrank*1000000 + ireq*1000 + j ;
+ recvbuf[j] = -1 ;
+ }
+
+ MyMPIAccessDEC->allToAllv( sendbuf, sendcounts , sdispls , MPI_INT ,
+ recvbuf, recvcounts , rdispls , MPI_INT ) ;
+
+ // debugStream << "test_AllToAllvDEC" << myrank << " recvbuf before CheckSent" ;
+ // for ( i = 0 ; i < datamsglength*size ; i++ ) {
+ // debugStream << " " << recvbuf[i] ;
+ // }
+ // debugStream << endl ;
+
+ // debugStream << "test_AllToAllvDEC" << myrank << " sendbuf " << sendbuf << endl ;
+ // MyMPIAccessDEC->CheckSent() ;
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ // debugStream << "test_AllToAllvDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests" << endl ;
+ int *ArrayOfRecvRequests = new int[nRecvReq] ;
+ int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
+ mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ;
+ delete [] ArrayOfRecvRequests ;
+
+ // debugStream << "test_AllToAllvDEC" << myrank << " recvbuf" ;
+ // for ( i = 0 ; i < datamsglength*size ; i++ ) {
+ // debugStream << " " << recvbuf[i] ;
+ // }
+ // debugStream << endl ;
+ }
+
+ // debugStream << "test_AllToAllvDEC" << myrank << " final CheckSent" << endl ;
+ // MyMPIAccessDEC->CheckSent() ;
+
+ int nSendReq = mpi_access->sendRequestIdsSize() ;
+ debugStream << "test_AllToAllvDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
+ << endl ;
+ if ( nSendReq ) {
+ int *ArrayOfSendRequests = new int[nSendReq] ;
+ int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
+ delete [] ArrayOfSendRequests ;
+ }
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq ) {
+ ostringstream strstream ;
+ strstream << "test_AllToAllvDEC" << myrank << " final RecvRequestIds " << nRecvReq
+ << " RecvRequests # 0 Error" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "test_AllToAllvDEC" << myrank << " final RecvRequestIds " << nRecvReq
+ << " RecvRequests = 0 OK" << endl ;
+ }
+
+ mpi_access->barrier() ;
+
+ delete sourcegroup ;
+ delete targetgroup ;
+ delete MyMPIAccessDEC ;
+ delete [] sendcounts ;
+ delete [] sdispls ;
+ delete [] recvcounts ;
+ delete [] rdispls ;
+ delete [] recvbuf ;
+
+ // MPI_Finalize();
+
+ debugStream << "test_AllToAllvDEC" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+#include <ctime>
+
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include "../ParaMEDMEM/MPIAccess/MPIAccessDEC.hxx"
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "LinearTimeInterpolator.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessDECTest::test_AllToAllvTimeDECSynchronousNative() {
+ test_AllToAllvTimeDEC( false , true ) ;
+}
+void MPIAccessDECTest::test_AllToAllvTimeDECSynchronousPointToPoint() {
+ test_AllToAllvTimeDEC( false , false ) ;
+}
+void MPIAccessDECTest::test_AllToAllvTimeDECAsynchronousPointToPoint() {
+ test_AllToAllvTimeDEC( true , false ) ;
+}
+
+static void chksts( int sts , int myrank , MEDCoupling::MPIAccess * mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access->errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test_AllToAllvTimeDEC" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ return ;
+}
+
+void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINative ) {
+
+ debugStream << "test_AllToAllvTimeDEC" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 || size > 11 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
+ << " (nbprocs >=2)" << endl
+ << "test must be run with more than 1 proc and less than 12 procs"
+ << endl ;
+ cerr << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ // int Asynchronous = atoi(argv[1]) ;
+ int UseMPI_Alltoallv = UseMPINative ;
+ // if ( argc == 3 ) {
+ // UseMPI_Alltoallv = atoi(argv[2]) ;
+ // }
+
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " Asynchronous " << Asynchronous
+ << " UseMPI_Alltoallv " << UseMPI_Alltoallv << endl ;
+
+ MEDCoupling::CommInterface interface ;
+ std::set<int> sourceprocs;
+ std::set<int> targetprocs;
+ int i ;
+ for ( i = 0 ; i < size/2 ; i++ ) {
+ sourceprocs.insert(i);
+ }
+ for ( i = size/2 ; i < size ; i++ ) {
+ targetprocs.insert(i);
+ }
+
+ MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ;
+ MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ;
+
+ // TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ;
+ MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
+ Asynchronous ) ;
+ // Asynchronous , LinearInterp , 0.5 ) ;
+ MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp , 0.5 ) ;
+ MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
+
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
+
+#define maxproc 11
+#define maxreq 10000
+#define datamsglength 10
+
+ int sts ;
+ int *sendcounts = new int[size] ;
+ int *sdispls = new int[size] ;
+ int *recvcounts = new int[size] ;
+ int *rdispls = new int[size] ;
+ int *sendtimecounts = new int[size] ;
+ int *stimedispls = new int[size] ;
+ int *recvtimecounts = new int[size] ;
+ int *rtimedispls = new int[size] ;
+ for ( i = 0 ; i < size ; i++ ) {
+ sendcounts[i] = datamsglength-i ;
+ sdispls[i] = i*datamsglength ;
+ recvcounts[i] = datamsglength-myrank ;
+ rdispls[i] = i*datamsglength ;
+ sendtimecounts[i] = 1 ;
+ stimedispls[i] = 0 ;
+ recvtimecounts[i] = 1 ;
+ rtimedispls[i] = i ;
+ //rtimedispls[i] = i*mpi_access->TimeExtent() ;
+ }
+
+ double timeLoc = 0 ;
+ double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
+ double maxtime ;
+ double nextdeltatime = deltatime[myrank] ;
+ if ( UseMPI_Alltoallv ) {
+ maxtime = maxreq*nextdeltatime - 0.1 ;
+ }
+ else {
+ maxtime = maxreq ;
+ // MyMPIAccessDEC->InitTime( time , nextdeltatime , maxtime ) ;
+ }
+ time_t begintime = time(NULL) ;
+ // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) {
+ for ( timeLoc = 0 ; timeLoc <= maxtime && nextdeltatime != 0 ; timeLoc+=nextdeltatime ) {
+ nextdeltatime = deltatime[myrank] ;
+ if ( timeLoc != 0 ) {
+ nextdeltatime = deltatime[myrank] ;
+ if ( timeLoc+nextdeltatime > maxtime ) {
+ nextdeltatime = 0 ;
+ }
+ // MyMPIAccessDEC->NextTime( nextdeltatime ) ;
+ }
+ MyMPIAccessDEC->setTime( timeLoc , nextdeltatime ) ;
+ debugStream << "test_AllToAllvTimeDEC" << myrank << "=====TIME " << timeLoc << "=====DELTATIME "
+ << nextdeltatime << "=====MAXTIME " << maxtime << " ======" << endl ;
+ int * sendbuf = new int[datamsglength*size] ;
+ // int * sendbuf = (int *) malloc(sizeof(int)*datamsglength*size) ;
+ int * recvbuf = new int[datamsglength*size] ;
+ int j ;
+ for ( j = 0 ; j < datamsglength*size ; j++ ) {
+ sendbuf[j] = myrank*1000000 + (j/datamsglength)*1000 + j ;
+ recvbuf[j] = -1 ;
+ }
+
+ if ( UseMPI_Alltoallv ) {
+ const MPI_Comm* comm = MyMPIAccessDEC->getComm();
+ TimeMessage * aSendTimeMessage = new TimeMessage ;
+ aSendTimeMessage->time = timeLoc ;
+ // aSendTimeMessage->deltatime = deltatime[myrank] ;
+ aSendTimeMessage->deltatime = nextdeltatime ;
+ // aSendTimeMessage->maxtime = maxtime ;
+ aSendTimeMessage->tag = (int ) (timeLoc/deltatime[myrank]) ;
+ TimeMessage * aRecvTimeMessage = new TimeMessage[size] ;
+ interface.allToAllV(aSendTimeMessage, sendtimecounts , stimedispls ,
+ mpi_access->timeType() ,
+ aRecvTimeMessage, recvtimecounts , rtimedispls ,
+ mpi_access->timeType() , *comm ) ;
+ // for ( j = 0 ; j < size ; j++ ) {
+ // debugStream << "test_AllToAllvTimeDEC" << myrank << " TimeMessage received " << j << " "
+ // << aRecvTimeMessage[j] << endl ;
+ // }
+ delete aSendTimeMessage ;
+ delete [] aRecvTimeMessage ;
+ interface.allToAllV(sendbuf, sendcounts , sdispls , MPI_INT ,
+ recvbuf, recvcounts , rdispls , MPI_INT , *comm ) ;
+ // free(sendbuf) ;
+ delete [] sendbuf ;
+ }
+ else {
+ int sts2 = MyMPIAccessDEC->allToAllvTime( sendbuf, sendcounts , sdispls , MPI_INT ,
+ recvbuf, recvcounts , rdispls , MPI_INT ) ;
+ chksts( sts2 , myrank , mpi_access ) ;
+ }
+
+ // debugStream << "test_AllToAllvTimeDEC" << myrank << " recvbuf before CheckSent" ;
+ // for ( i = 0 ; i < datamsglength*size ; i++ ) {
+ // debugStream << " " << recvbuf[i] ;
+ // }
+ // debugStream << endl ;
+
+ // debugStream << "test_AllToAllvTimeDEC" << myrank << " sendbuf " << sendbuf << endl ;
+ // MyMPIAccessDEC->CheckSent() ;
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq != 0 ) {
+ ostringstream strstream ;
+ strstream << "=============================================================" << endl
+ << "test_AllToAllvTimeDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests # 0 ERROR"
+ << endl << "============================================================="
+ << endl ;
+ int *ArrayOfRecvRequests = new int[nRecvReq] ;
+ int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
+ delete [] ArrayOfRecvRequests ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ // debugStream << "test_AllToAllvTimeDEC" << myrank << " check of recvbuf" << endl ;
+ bool badrecvbuf = false ;
+ for ( i = 0 ; i < size ; i++ ) {
+ for ( int jj = 0 ; jj < datamsglength ; jj++ ) {
+ int index = i*datamsglength+jj ;
+ if ( jj < recvcounts[i] ) {
+ if ( recvbuf[index] != (index/datamsglength)*1000000 + myrank*1000 +
+ myrank*datamsglength+(index%datamsglength) ) {
+ badrecvbuf = true ;
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
+ << recvbuf[index] << " # " << (index/datamsglength)*1000000 +
+ myrank*1000 +
+ myrank*datamsglength+(index%datamsglength) << endl ;
+ }
+ else if ( badrecvbuf ) {
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
+ << recvbuf[index] << " == " << (index/datamsglength)*1000000 +
+ myrank*1000 +
+ myrank*datamsglength+(index%datamsglength) << endl ;
+ }
+ }
+ else if ( recvbuf[index] != -1 ) {
+ badrecvbuf = true ;
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
+ << recvbuf[index] << " # -1" << endl ;
+ }
+ }
+ }
+ if ( badrecvbuf ) {
+ ostringstream strstream ;
+ strstream << "==============================================================" << endl
+ << "test_AllToAllvTimeDEC" << myrank << " badrecvbuf"
+ << endl << "============================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ delete [] recvbuf ;
+ }
+
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
+
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " CheckFinalSent" << endl ;
+ sts = MyMPIAccessDEC->checkFinalSent() ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "================================================================" << endl
+ << "test_AllToAllvTimeDEC" << myrank << " final CheckSent ERROR"
+ << endl << "================================================================"
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " CheckFinalRecv" << endl ;
+ sts = MyMPIAccessDEC->checkFinalRecv() ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "================================================================" << endl
+ << "test_AllToAllvTimeDEC" << myrank << " CheckFinalRecv ERROR"
+ << endl << "================================================================"
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq ) {
+ ostringstream strstream ;
+ strstream << "===============================================================" << endl
+ << "test_AllToAllvTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests # 0 Error"
+ << endl << "==============================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests = 0 OK" << endl ;
+ }
+
+ time_t endtime = time(NULL) ;
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " begintime " << begintime << " endtime " << endtime
+ << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank]
+ << " calls to AllToAll" << endl ;
+
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
+
+ delete sourcegroup ;
+ delete targetgroup ;
+ delete MyMPIAccessDEC ;
+ // delete aLinearInterpDEC ;
+
+ delete [] sendcounts ;
+ delete [] sdispls ;
+ delete [] recvcounts ;
+ delete [] rdispls ;
+ delete [] sendtimecounts ;
+ delete [] stimedispls ;
+ delete [] recvtimecounts ;
+ delete [] rtimedispls ;
+
+ // MPI_Finalize();
+
+ endtime = time(NULL) ;
+
+ debugStream << "test_AllToAllvTimeDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime
+ << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank]
+ << " calls to AllToAll" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <math.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+#include <ctime>
+
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include "../ParaMEDMEM/MPIAccess/MPIAccessDEC.hxx"
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "LinearTimeInterpolator.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessDECTest::test_AllToAllvTimeDoubleDECSynchronousPointToPoint() {
+ test_AllToAllvTimeDoubleDEC( false ) ;
+}
+void MPIAccessDECTest::test_AllToAllvTimeDoubleDECAsynchronousPointToPoint() {
+ test_AllToAllvTimeDoubleDEC( true ) ;
+}
+
+static void chksts( int sts , int myrank , MEDCoupling::MPIAccess * mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access->errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ return ;
+}
+
+void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) {
+
+ debugStream << "test_AllToAllvTimeDoubleDEC" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 || size > 11 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
+ << " (nbprocs >=2)" << endl
+ << "test must be run with more than 1 proc and less than 12 procs"
+ << endl ;
+ cerr << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+// int Asynchronous = atoi(argv[1]) ;
+
+ debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " Asynchronous " << Asynchronous << endl ;
+
+ MEDCoupling::CommInterface interface ;
+ std::set<int> sourceprocs;
+ std::set<int> targetprocs;
+ int i ;
+ for ( i = 0 ; i < size/2 ; i++ ) {
+ sourceprocs.insert(i);
+ }
+ for ( i = size/2 ; i < size ; i++ ) {
+ targetprocs.insert(i);
+ }
+
+ MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ;
+ MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ;
+
+// TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0 ) ;
+ MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
+ Asynchronous ) ;
+// Asynchronous , LinearInterp , 0.5 ) ;
+ MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ;
+ MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
+
+ debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+
+#define maxproc 11
+#define maxreq 100
+#define datamsglength 10
+
+ int sts ;
+ int *sendcounts = new int[size] ;
+ int *sdispls = new int[size] ;
+ int *recvcounts = new int[size] ;
+ int *rdispls = new int[size] ;
+ int *sendtimecounts = new int[size] ;
+ int *stimedispls = new int[size] ;
+ int *recvtimecounts = new int[size] ;
+ int *rtimedispls = new int[size] ;
+ for ( i = 0 ; i < size ; i++ ) {
+ sendcounts[i] = datamsglength-i ;
+ sdispls[i] = i*datamsglength ;
+ recvcounts[i] = datamsglength-myrank ;
+ rdispls[i] = i*datamsglength ;
+ sendtimecounts[i] = 1 ;
+ stimedispls[i] = 0 ;
+ recvtimecounts[i] = 1 ;
+ rtimedispls[i] = i ;
+ }
+
+ double timeLoc[maxproc] ;
+ double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
+ double maxtime[maxproc] ;
+ double nextdeltatime[maxproc] ;
+ for ( i = 0 ; i < size ; i++ ) {
+ timeLoc[i] = 0 ;
+ maxtime[i] = maxreq ;
+ nextdeltatime[i] = deltatime[i] ;
+ }
+ time_t begintime = time(NULL) ;
+ for ( timeLoc[myrank] = 0 ; timeLoc[myrank] <= maxtime[myrank] && nextdeltatime[myrank] != 0 ;
+ timeLoc[myrank]+=nextdeltatime[myrank] ) {
+//local and target times
+ int target ;
+ for ( target = 0 ; target < size ; target++ ) {
+ nextdeltatime[target] = deltatime[target] ;
+ if ( timeLoc[target] != 0 ) {
+ if ( timeLoc[target]+nextdeltatime[target] > maxtime[target] ) {
+ nextdeltatime[target] = 0 ;
+ }
+ }
+ if ( target != myrank ) {
+ while ( timeLoc[myrank] >= timeLoc[target] ) {
+ timeLoc[target] += deltatime[target] ;
+ }
+ }
+ }
+ MyMPIAccessDEC->setTime( timeLoc[myrank] , nextdeltatime[myrank] ) ;
+ debugStream << "test" << myrank << "=====TIME " << timeLoc[myrank] << "=====DELTATIME "
+ << nextdeltatime[myrank] << "=====MAXTIME " << maxtime[myrank] << " ======"
+ << endl ;
+ double * sendbuf = new double[datamsglength*size] ;
+// double * sendbuf = (double *) malloc(sizeof(double)*datamsglength*size) ;
+ double * recvbuf = new double[datamsglength*size] ;
+ int j ;
+ //debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " sendbuf" ;
+ for ( target = 0 ; target < size ; target++ ) {
+ for ( j = 0 ; j < datamsglength ; j++ ) {
+ //sendbuf[j] = myrank*10000 + (j/datamsglength)*100 + j ;
+ sendbuf[target*datamsglength+j] = myrank*1000000 + target*10000 +
+ (timeLoc[myrank]/deltatime[myrank])*100 + j ;
+ //debugStream << " " << (int ) sendbuf[target*datamsglength+j] ;
+ recvbuf[target*datamsglength+j] = -1 ;
+ }
+ //debugStream << endl ;
+ }
+
+ sts = MyMPIAccessDEC->allToAllvTime( sendbuf, sendcounts , sdispls , MPI_DOUBLE ,
+ recvbuf, recvcounts , rdispls , MPI_DOUBLE ) ;
+ chksts( sts , myrank , mpi_access ) ;
+
+// debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf before CheckSent" ;
+// for ( i = 0 ; i < datamsglength*size ; i++ ) {
+// debugStream << " " << recvbuf[i] ;
+// }
+// debugStream << endl ;
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq != 0 ) {
+ ostringstream strstream ;
+ strstream << "=============================================================" << endl
+ << "test_AllToAllvTimeDoubleDEC" << myrank << " WaitAllRecv "
+ << nRecvReq << " Requests # 0 ERROR"
+ << endl << "============================================================"
+ << endl ;
+ int *ArrayOfRecvRequests = new int[nRecvReq] ;
+ int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
+ delete [] ArrayOfRecvRequests ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+// debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " check of recvbuf" << endl ;
+ bool badrecvbuf = false ;
+ for ( target = 0 ; target < size ; target++ ) {
+ for ( int jj = 0 ; jj < datamsglength ; jj++ ) {
+ int index = target*datamsglength+jj ;
+ if ( jj < recvcounts[target] ) {
+ if ( fabs(recvbuf[index] - (target*1000000 + myrank*10000 +
+ (timeLoc[target]/deltatime[target])*100 + jj)) > 101) {
+ badrecvbuf = true ;
+ debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " target " << target << " timeLoc[target] "
+ << timeLoc[target] << " recvbuf[" << index << "] " << (int ) recvbuf[index]
+ << " # " << (int ) (target*1000000 +
+ myrank*10000 + (timeLoc[target]/deltatime[target])*100 + jj)
+ << endl ;
+ }
+ else if ( badrecvbuf ) {
+ debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf[" << index << "] "
+ << recvbuf[index] << " ~= " << (int ) (target*1000000 +
+ myrank*10000 + (timeLoc[target]/deltatime[target])*100 + jj) << endl ;
+ }
+ }
+ else if ( recvbuf[index] != -1 ) {
+ badrecvbuf = true ;
+ debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf[" << index << "] "
+ << recvbuf[index] << " # -1" << endl ;
+ }
+ }
+ }
+ if ( badrecvbuf ) {
+ ostringstream strstream ;
+ strstream << "==================================================================" << endl
+ << "test_AllToAllvTimeDoubleDEC" << myrank << " badrecvbuf"
+ << endl << "=================================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ delete [] recvbuf ;
+ }
+
+ debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+
+ debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalSent" << endl ;
+ sts = MyMPIAccessDEC->checkFinalSent() ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "=================================================================" << endl
+ << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalSent ERROR"
+ << endl << "================================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalRecv" << endl ;
+ sts = MyMPIAccessDEC->checkFinalRecv() ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "=================================================================" << endl
+ << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalRecv ERROR"
+ << endl << "================================================================"
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq ) {
+ ostringstream strstream ;
+ strstream << "===============================================================" << endl
+ << "test_AllToAllvTimeDoubleDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests # 0 Error"
+ << endl << "==============================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests = 0 OK" << endl ;
+ }
+
+ time_t endtime = time(NULL) ;
+ debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " begintime " << begintime << " endtime " << endtime
+ << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank]
+ << " calls to AllToAll" << endl ;
+
+ debugStream << "test" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+
+ delete sourcegroup ;
+ delete targetgroup ;
+ delete MyMPIAccessDEC ;
+// delete aLinearInterpDEC ;
+
+ delete [] sendcounts ;
+ delete [] sdispls ;
+ delete [] recvcounts ;
+ delete [] rdispls ;
+ delete [] sendtimecounts ;
+ delete [] stimedispls ;
+ delete [] recvtimecounts ;
+ delete [] rtimedispls ;
+
+// MPI_Finalize();
+
+ endtime = time(NULL) ;
+
+ debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime
+ << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank]
+ << " calls to AllToAll" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <time.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#ifndef WIN32
+#include <unistd.h>
+#endif
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_Cancel() {
+
+ debugStream << "test_MPI_Access_Cancel" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_Cancel must be run with 2 procs" << endl ;
+ cerr << strstream.str() << endl ;
+ //CPPUNIT_FAIL( strstream.str() ) ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_Cancel" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int intsendbuf[5] ;
+ double doublesendbuf[10] ;
+ int RequestId[10] ;
+ int sts = 0;
+ int i , j ;
+ for ( j = 0 ; j < 3 ; j++ ) {
+ for ( i = 0 ; i < 10 ; i++ ) {
+ debugStream << "test" << myrank << " ============================ i " << i
+ << "============================" << endl ;
+ if ( myrank == 0 ) {
+ if ( i < 5 ) {
+ intsendbuf[i] = i ;
+ sts = mpi_access.ISend(&intsendbuf[i],1,MPI_INT,target, RequestId[i]) ;
+ debugStream << "test" << myrank << " Send MPI_INT RequestId " << RequestId[i]
+ << endl ;
+ }
+ else {
+ doublesendbuf[i] = i ;
+ sts = mpi_access.ISend(&doublesendbuf[i],1,MPI_DOUBLE,target,
+ RequestId[i]) ;
+ debugStream << "test" << myrank << " Send MPI_DOUBLE RequestId " << RequestId[i]
+ << endl ;
+ }
+ }
+ else {
+ int flag = false ;
+ while ( !flag ) {
+ int source, tag, outcount ;
+ MPI_Datatype datatype ;
+ sts = mpi_access.IProbe(target, source, tag, datatype, outcount,
+ flag ) ;
+ if ( flag ) {
+ debugStream << "test" << myrank << " " << i << " IProbe target " << target
+ << " source " << source << " tag " << tag
+ << " outcount " << outcount << " flag " << flag << endl ;
+ }
+ else {
+ debugStream << "test" << myrank << " flag " << flag << endl ;
+ sleep( 1 ) ;
+ }
+ if ( flag ) {
+ int recvbuf ;
+ sts = mpi_access.IRecv(&recvbuf,outcount,MPI_INT,source,
+ RequestId[i] ) ;
+ if ( datatype == MPI_INT ) {
+ int error;
+ mpi_access.wait( RequestId[i] ) ;
+ mpi_access.status( RequestId[i], source, tag, error, outcount,
+ true ) ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "======================================================"
+ << endl << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO" << endl
+ << "======================================================"
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ debugStream << "========================================================"
+ << endl << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " OK" << endl
+ << "========================================================"
+ << endl ;
+ }
+ }
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ }
+
+ if ( myrank != 0 ) {
+ int iprobe ;
+ for ( iprobe = 5 ; iprobe < 10 ; iprobe++ ) {
+ debugStream << "test" << myrank << " ============================ iprobe "
+ << iprobe << "============================" << endl ;
+ int source, tag, outcount ;
+ MPI_Datatype datatype ;
+ int probeflag = false ;
+ while ( !probeflag ) {
+ sts = mpi_access.IProbe( target, source, tag, datatype, outcount,
+ probeflag ) ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " IProbe iprobe " << iprobe
+ << " target " << target << " probeflag " << probeflag
+ << " tag " << tag << " outcount " << outcount << " datatype "
+ << datatype << " lenerr " << lenerr << " " << msgerr << endl ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "=========================================================="
+ << endl << "test" << myrank << " IProbe KO iprobe " << iprobe
+ << endl
+ << "=========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if ( !probeflag ) {
+ //debugStream << "========================================================"
+ // << endl << "test" << myrank << " IProbe KO(OK) iprobe " << iprobe
+ // << " probeflag " << probeflag << endl
+ // << "========================================================"
+ // << endl ;
+ }
+ else {
+ debugStream << "test" << myrank << " " << iprobe << " IProbe target "
+ << target << " source " << source << " tag " << tag
+ << " outcount " << outcount << " probeflag " << probeflag
+ << endl ;
+ if ( datatype != MPI_DOUBLE ) {
+ ostringstream strstream ;
+ strstream << "========================================================"
+ << endl << "test" << myrank << " MPI_DOUBLE KO" << endl
+ << "========================================================"
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ int flag ;
+ sts = mpi_access.cancel( source, tag, datatype, outcount, flag ) ;
+ if ( sts != MPI_SUCCESS || !flag ) {
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "======================================================"
+ << endl << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl << "test" << myrank
+ << " Cancel PendingIrecv KO flag " << flag << " iprobe "
+ << iprobe << " Irecv completed" << endl
+ << "======================================================"
+ << endl ;
+ //return 1 ;
+ }
+ else {
+ debugStream << "======================================================"
+ << endl << "test" << myrank
+ << " Cancel PendingIrecv OK RequestId " << " flag "
+ << flag << " iprobe " << iprobe << endl
+ << "======================================================"
+ << endl ;
+ }
+ }
+ int Reqtarget, Reqtag, Reqerror, Reqoutcount ;
+ mpi_access.status( RequestId[iprobe], Reqtarget, Reqtag, Reqerror,
+ Reqoutcount, true ) ;
+ debugStream << "test" << myrank << " Status Reqtarget "<< Reqtarget
+ << " Reqtag " << Reqtag << " Reqoutcount " << Reqoutcount
+ << endl ;
+ int Reqflag ;
+ sts = mpi_access.cancel( RequestId[iprobe] , Reqflag ) ;
+ debugStream << "test" << myrank << " " << iprobe
+ << " Cancel Irecv done Reqtarget " << Reqtarget
+ << " Reqtag " << Reqtag << " Reqoutcount " << Reqoutcount
+ << " Reqflag " << Reqflag << endl ;
+ if ( sts != MPI_SUCCESS || !Reqflag ) {
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ ostringstream strstream ;
+ strstream << "========================================================"
+ << endl << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl << "test" << myrank
+ << " Cancel Irecv KO Reqflag " << Reqflag << " iprobe "
+ << iprobe << endl
+ << "========================================================"
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "========================================================"
+ << endl << "test" << myrank
+ << " Cancel Irecv OK RequestId " << RequestId[iprobe]
+ << " Reqflag " << Reqflag << " iprobe " << iprobe << endl
+ << "========================================================"
+ << endl ;
+ probeflag = Reqflag ;
+ }
+ }
+ }
+ }
+ }
+ mpi_access.waitAll(10,RequestId) ;
+ mpi_access.deleteRequests(10,RequestId) ;
+ }
+
+ int source, tag, outcount, flag ;
+ MPI_Datatype datatype ;
+ sts = mpi_access.IProbe(target, source, tag, datatype, outcount, flag ) ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ if ( sts != MPI_SUCCESS || flag ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " IProbe KO flag " << flag
+ << " remaining unread/cancelled message :" << endl
+ << " source " << source << " tag " << tag << endl
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ mpi_access.testAll(10,RequestId,flag) ;
+ mpi_access.waitAll(10,RequestId) ;
+ mpi_access.deleteRequests(10,RequestId) ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_Cyclic_ISend_IRecv() {
+
+ debugStream << "test_MPI_Access_Cyclic_ISend_IRecv" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 3 ) {
+ cerr << "test_MPI_Access_Cyclic_ISend_IRecv must be run with 3 procs" << endl ;
+ //CPPUNIT_FAIL("test_MPI_Access_Cyclic_ISend_IRecv must be run with 3 procs") ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_Cyclic_ISend_IRecv" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+#define maxsend 100
+
+ if ( myrank >= 3 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int alltarget[3] = {1 , 2 , 0 } ;
+ int allsource[3] = {2 , 0 , 1 } ;
+ int SendRequestId[maxsend] ;
+ int RecvRequestId[maxsend] ;
+ int sendbuf[maxsend] ;
+ int recvbuf[maxsend] ;
+ int sts ;
+ int i = 0 ;
+ if ( myrank == 0 ) {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,alltarget[myrank],
+ SendRequestId[i]) ;
+ debugStream << "test" << myrank << " Send RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ for ( i = 0 ; i < maxsend ; i++ ) {
+ recvbuf[i] = -1 ;
+ sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,allsource[myrank],
+ RecvRequestId[i]) ;
+ debugStream << "test" << myrank << " Recv RequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(allsource[myrank]) << endl ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr
+ << " " << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ int j ;
+ for (j = 0 ; j <= i ; j++) {
+ int flag ;
+ if ( j < i ) {
+ debugStream << "test" << myrank << " " << j << " -> Test-Send("<< SendRequestId[j]
+ << ")" << endl ;
+ mpi_access.test( SendRequestId[j], flag ) ;
+ if ( flag ) {
+ int target, tag, error, outcount ;
+ mpi_access.status( SendRequestId[j], target, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Send RequestId " << SendRequestId[j]
+ << " target " << target << " tag " << tag << " error " << error
+ << endl ;
+ mpi_access.deleteRequest( SendRequestId[j] ) ;
+ }
+ }
+ debugStream << "test" << myrank << " " << j << " -> Test-Recv("<< SendRequestId[j]
+ << ")" << endl ;
+ mpi_access.test( RecvRequestId[j], flag ) ;
+ if ( flag ) {
+ int source, tag, error, outcount ;
+ mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Recv RequestId" << j << " "
+ << RecvRequestId[j] << " source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount << endl ;
+ if ( (outcount != 1) | (recvbuf[j] != j) ) {
+ ostringstream strstream ;
+ strstream << "====================================================="
+ << endl << "test" << myrank << " outcount "
+ << outcount << " recvbuf[ " << j << " ] " << recvbuf[j] << " KO"
+ << endl << "====================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ }
+ if ( myrank == 0 ) {
+ if ( i != maxsend-1 ) {
+ sendbuf[i+1] = i + 1 ;
+ sts = mpi_access.ISend(&sendbuf[i+1],1,MPI_INT,alltarget[myrank],
+ SendRequestId[i+1]) ;
+ debugStream << "test" << myrank << " Send RequestId " << SendRequestId[i+1]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ }
+ else {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,alltarget[myrank],
+ SendRequestId[i]) ;
+ debugStream << "test" << myrank << " Send RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr
+ << " " << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ }
+
+ int flag ;
+ mpi_access.testAll(maxsend,SendRequestId,flag) ;
+ mpi_access.testAll(maxsend,RecvRequestId,flag) ;
+ mpi_access.waitAll(maxsend,SendRequestId) ;
+ mpi_access.deleteRequests(maxsend,SendRequestId) ;
+ mpi_access.waitAll(maxsend,RecvRequestId) ;
+ mpi_access.deleteRequests(maxsend,RecvRequestId) ;
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ mpi_access.testAll(maxsend,SendRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " TestAllSendflag " << flag << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " TestAllSendflag " << flag << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ mpi_access.testAll(maxsend,RecvRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " TestAllRecvflag " << flag << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " TestAllRecvflag " << flag << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+
+ int sendrequests[maxsend] ;
+ int sendreqsize = mpi_access.sendRequestIds( alltarget[myrank] , maxsend ,
+ sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ int source, tag, error, outcount ;
+ mpi_access.status(sendrequests[0], source, tag, error, outcount, true) ;
+ debugStream << "test" << myrank << " RequestId " << sendrequests[0]
+ << " source " << source << " tag " << tag << " error " << error
+ << " outcount " << outcount << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ int recvrequests[maxsend] ;
+ int recvreqsize = mpi_access.sendRequestIds( allsource[myrank] , maxsend ,
+ recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_Cyclic_Send_Recv() {
+
+ debugStream << "test_MPI_Access_Cyclic_Send_Recv" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 3 ) {
+ cerr << "test_MPI_Access_Send_Recv must be run with 3 procs" << endl ;
+ //CPPUNIT_FAIL("test_MPI_Access_Send_Recv must be run with 3 procs") ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_Cyclic_Send_Recv" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 3 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int alltarget[3] = {1 , 2 , 0 } ;
+ int allsource[3] = {2 , 0 , 1 } ;
+ int RequestId[10] ;
+ int sts ;
+ int i = 0 ;
+ if ( myrank == 0 ) {
+ sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
+ debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ for ( i = 0 ; i < 10 ; i++ ) {
+ int recvbuf ;
+ int outcount ;
+ if ( i & 1 ) {
+ outcount = 0 ;
+ sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i],
+ &outcount) ;
+ }
+ else {
+ sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i]) ;
+ outcount = 1 ;
+ }
+ //int source, tag, error, outcount ;
+ //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
+ debugStream << "test" << myrank << " Recv RequestId " << RequestId[i]
+ << " tag " << mpi_access.recvMPITag(allsource[myrank])
+ << " outcount " << outcount << endl ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " outcount "
+ << outcount << " recvbuf " << recvbuf << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if ( myrank == 0 ) {
+ if ( i != 9 ) {
+ int ii = i + 1 ;
+ sts = mpi_access.send(&ii,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
+ debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ }
+ else {
+ sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
+ debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr
+ << " " << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ }
+
+ int flag ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,RequestId) ;
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+
+ int sendrequests[10] ;
+ int sendreqsize = mpi_access.sendRequestIds( alltarget[myrank] , 10 ,
+ sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ int recvrequests[10] ;
+ int recvreqsize = mpi_access.sendRequestIds( allsource[myrank] , 10 ,
+ recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <time.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#ifndef WIN32
+#include <unistd.h>
+#endif
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_IProbe() {
+
+ debugStream << "test_MPI_Access_IProbe" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_IProbe must be run with 2 procs" << endl ;
+ cerr << strstream.str() << endl ;
+ //CPPUNIT_FAIL( strstream.str() ) ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_IProbe" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int sendbuf[10] ;
+ int RequestId[10] ;
+ int sts = 0;
+ int i ;
+ for ( i = 0 ; i < 10 ; i++ ) {
+ if ( myrank == 0 ) {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, RequestId[i]) ;
+ debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
+ << endl ;
+ }
+ else {
+ int flag = false ;
+ while ( !flag ) {
+ int source, tag, outcount ;
+ MPI_Datatype datatype ;
+ sts = mpi_access.IProbe(target, source, tag, datatype, outcount, flag ) ;
+ if ( flag ) {
+ debugStream << "test" << myrank << " " << i << " IProbe target " << target
+ << " source " << source << " tag " << tag
+ << " outcount " << outcount << " flag " << flag << endl ;
+ }
+ else {
+ debugStream << "test" << myrank << " IProbe flag " << flag << endl ;
+ sleep( 1 ) ;
+ }
+ if ( flag ) {
+ int recvbuf ;
+ sts = mpi_access.recv(&recvbuf,outcount,datatype,source, RequestId[i],
+ &outcount) ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ debugStream << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ }
+ int flag ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ mpi_access.waitAll(10,RequestId) ;
+ mpi_access.deleteRequests(10,RequestId) ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_ISendRecv() {
+
+ debugStream << "test_MPI_Access_ISendRecv" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ cerr << "test_MPI_Access_ISendRecv must be run with 2 procs" << endl ;
+ //CPPUNIT_FAIL("test_MPI_Access_ISendRecv must be run with 2 procs") ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_ISendRecv" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendRequestId[10] ;
+ int RecvRequestId[10] ;
+ int sendbuf[10] ;
+ int recvbuf[10] ;
+ int sts ;
+ int i ;
+ for ( i = 0 ; i < 10 ; i++ ) {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISendRecv(&sendbuf[i],1,MPI_INT,target, SendRequestId[i],
+ &recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
+ debugStream << "test" << myrank << " Send sendRequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target)
+ << " recvRequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr
+ << " " << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ int j ;
+ for (j = 0 ; j <= i ; j++) {
+ int flag ;
+ if ( j < i ) {
+ debugStream << "test" << myrank << " " << j << " -> Test-Send("<< SendRequestId[j]
+ << ")" << endl ;
+ mpi_access.test( SendRequestId[j], flag ) ;
+ if ( flag ) {
+ int tag, error, outcount ;
+ mpi_access.status( SendRequestId[j], target, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Send RequestId " << SendRequestId[j]
+ << " target " << target << " tag " << tag << " error " << error
+ << endl ;
+ mpi_access.deleteRequest( SendRequestId[j] ) ;
+ }
+ }
+ debugStream << "test" << myrank << " " << j << " -> Test-Recv("<< SendRequestId[j]
+ << ")" << endl ;
+ mpi_access.test( RecvRequestId[j], flag ) ;
+ if ( flag ) {
+ int source, tag, error, outcount ;
+ mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Recv RequestId" << j << " "
+ << RecvRequestId[j] << " source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount << endl ;
+ if ( (outcount != 1) | (recvbuf[j] != j) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " outcount "
+ << outcount << " recvbuf[ " << j << " ] " << recvbuf[j] << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ }
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ }
+
+ int flag ;
+ mpi_access.testAll(10,SendRequestId,flag) ;
+ mpi_access.waitAll(10,SendRequestId) ;
+ mpi_access.deleteRequests(10,SendRequestId) ;
+ mpi_access.testAll(10,SendRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ mpi_access.testAll(10,RecvRequestId,flag) ;
+ mpi_access.waitAll(10,RecvRequestId) ;
+ mpi_access.deleteRequests(10,RecvRequestId) ;
+ mpi_access.testAll(10,RecvRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+
+ int sendrequests[10] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ int recvrequests[10] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_ISend_IRecv() {
+
+ debugStream << "test_MPI_Access_ISend_IRecv" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ cerr << "test_MPI_Access_ISend_IRecv must be run with 2 procs" << endl ;
+ //CPPUNIT_FAIL("test_MPI_Access_ISend_IRecv must be run with 2 procs") ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_ISend_IRecv" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+#define maxreq 100
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts ;
+ int sendbuf[maxreq] ;
+ int recvbuf[maxreq] ;
+ int i ;
+ for ( i = 0 ; i < maxreq ; i++ ) {
+ if ( myrank == 0 ) {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, SendRequestId[i]) ;
+ debugStream << "test" << myrank << " ISend RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
+ debugStream << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ }
+ int j ;
+ for (j = 0 ; j <= i ; j++) {
+ int flag ;
+ if ( myrank == 0 ) {
+ mpi_access.test( SendRequestId[j], flag ) ;
+ }
+ else {
+ mpi_access.test( RecvRequestId[j], flag ) ;
+ }
+ if ( flag ) {
+ int source, tag, error, outcount ;
+ if ( myrank == 0 ) {
+ mpi_access.status( SendRequestId[j], target, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
+ << ") : target " << target << " tag " << tag << " error " << error
+ << " flag " << flag << endl ;
+ }
+ else {
+ mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Test(Recv RequestId "
+ << RecvRequestId[j] << ") : source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount
+ << " flag " << flag << endl ;
+ if ( (outcount != 1) | (recvbuf[j] != j) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount "
+ << outcount << " recvbuf " << recvbuf[j] << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ //else {
+ // debugStream << "==========================================================="
+ // << endl << "test" << myrank << " outcount " << outcount
+ // << " RequestId " << RecvRequestId[j] << " recvbuf "
+ // << recvbuf[j] << " OK" << endl
+ // << "==========================================================="
+ // << endl ;
+ //}
+ }
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ if ( myrank == 0 ) {
+ mpi_access.waitAll(maxreq, SendRequestId) ;
+ mpi_access.deleteRequests(maxreq, SendRequestId) ;
+ }
+ else {
+ mpi_access.waitAll(maxreq, RecvRequestId) ;
+ mpi_access.deleteRequests(maxreq, RecvRequestId) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[maxreq] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ for ( i = 0 ; i < sendreqsize ; i++ ) {
+ debugStream << "test" << myrank << " sendrequests[ " << i << " ] = "
+ << sendrequests[i] << endl ;
+ }
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[maxreq] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+ // MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <time.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_ISend_IRecv_BottleNeck() {
+
+ debugStream << "test_MPI_Access_ISend_IRecv_BottleNeck" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_ISend_IRecv_BottleNeck must be run with 2 procs"
+ << endl ;
+ cerr << strstream.str() << endl ;
+ //CPPUNIT_FAIL( strstream.str() ) ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_ISend_IRecv_BottleNeck" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+#define maxreq 10000
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts ;
+ int sendbuf[maxreq] ;
+ int recvbuf[maxreq] ;
+ int i ;
+ for ( i = 0 ; i < maxreq ; i++ ) {
+ if ( myrank == 0 ) {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(sendbuf,i,MPI_INT,target, SendRequestId[i]) ;
+ debugStream << "test" << myrank << " ISend RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ //sleep( 1 ) ;
+ sts = mpi_access.IRecv(recvbuf,i,MPI_INT,target, RecvRequestId[i]) ;
+ debugStream << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ int recvreqsize = mpi_access.recvRequestIdsSize() ;
+ int * recvrequests = new int[ recvreqsize ] ;
+ recvreqsize = mpi_access.recvRequestIds( target , recvreqsize , recvrequests ) ;
+ int j ;
+ for (j = 0 ; j < recvreqsize ; j++) {
+ int flag ;
+ mpi_access.test( recvrequests[j], flag ) ;
+ if ( flag ) {
+ int source, tag, error, outcount ;
+ mpi_access.status( recvrequests[j], source, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Test(Recv RequestId "
+ << recvrequests[j] << ") : source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount
+ << " flag " << flag << " : DeleteRequest" << endl ;
+ mpi_access.deleteRequest( recvrequests[j] ) ;
+ }
+ else {
+// debugStream << "test" << myrank << " Test(Recv RequestId "
+// << recvrequests[j] << ") flag " << flag << endl ;
+ }
+ }
+ delete [] recvrequests ;
+ }
+ if ( sts != MPI_SUCCESS ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ }
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ if ( myrank == 0 ) {
+ int size2 = mpi_access.sendRequestIdsSize() ;
+ debugStream << "test" << myrank << " before WaitAll sendreqsize " << size2 << endl ;
+ mpi_access.waitAll(maxreq, SendRequestId) ;
+ size2 = mpi_access.sendRequestIdsSize() ;
+ debugStream << "test" << myrank << " after WaitAll sendreqsize " << size2 << endl ;
+ int * ArrayOfSendRequests = new int[ size2 ] ;
+ int nSendRequest = mpi_access.sendRequestIds( size2 , ArrayOfSendRequests ) ;
+ for ( i = 0 ; i < nSendRequest ; i++ ) {
+ mpi_access.deleteRequest( ArrayOfSendRequests[i] ) ;
+ }
+ delete [] ArrayOfSendRequests ;
+ }
+ else {
+ int size2 = mpi_access.recvRequestIdsSize() ;
+ debugStream << "test" << myrank << " before WaitAll recvreqsize " << size2 << endl ;
+ mpi_access.waitAll(maxreq, RecvRequestId) ;
+ size2 = mpi_access.recvRequestIdsSize() ;
+ debugStream << "test" << myrank << " after WaitAll recvreqsize " << size2 << endl ;
+ int * ArrayOfRecvRequests = new int[ size2 ] ;
+ int nRecvRequest = mpi_access.recvRequestIds( size2 , ArrayOfRecvRequests ) ;
+ for ( i = 0 ; i < nRecvRequest ; i++ ) {
+ mpi_access.deleteRequest( ArrayOfRecvRequests[i] ) ;
+ }
+ delete [] ArrayOfRecvRequests ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[maxreq] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ for ( i = 0 ; i < sendreqsize ; i++ ) {
+ debugStream << "test" << myrank << " sendrequests[ " << i << " ] = "
+ << sendrequests[i] << endl ;
+ }
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[maxreq] ;
+ int recvreqsize = mpi_access.recvRequestIds( target , maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length() {
+
+ debugStream << "test_MPI_Access_ISend_IRecv_Length" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_ISend_IRecv_Length must be run with 2 procs" << endl ;
+ cerr << strstream.str() << endl ;
+ //CPPUNIT_FAIL( strstream.str() ) ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_ISend_IRecv_Length" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+#define maxreq 10
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts ;
+ int sendbuf[1000*(maxreq-1)] ;
+ int recvbuf[maxreq][1000*(maxreq-1)] ;
+ int i ;
+ for ( i = 0 ; i < 1000*(maxreq-1) ; i++ ) {
+ sendbuf[i] = i ;
+ }
+ for ( i = 0 ; i < maxreq ; i++ ) {
+ if ( myrank == 0 ) {
+ sts = mpi_access.ISend( sendbuf, 1000*i, MPI_INT, target, SendRequestId[i] ) ;
+ debugStream << "test" << myrank << " ISend RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ sts = mpi_access.IRecv( recvbuf[i], 1000*i, MPI_INT, target,
+ RecvRequestId[i] ) ;
+ debugStream << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ }
+ int j ;
+ for (j = 0 ; j <= i ; j++) {
+ int flag ;
+ if ( myrank == 0 ) {
+ mpi_access.test( SendRequestId[j], flag ) ;
+ }
+ else {
+ mpi_access.test( RecvRequestId[j], flag ) ;
+ }
+ if ( flag ) {
+ int target2,source, tag, error, outcount ;
+ if ( myrank == 0 ) {
+ mpi_access.status( SendRequestId[j], target2, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
+ << ") : target " << target2 << " tag " << tag << " error " << error
+ << " flag " << flag << endl ;
+ }
+ else {
+ mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Test(Recv RequestId "
+ << RecvRequestId[j] << ") : source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount
+ << " flag " << flag << endl ;
+ if ( outcount != 0 ) {
+ if ( (outcount != 1000*j) |
+ (recvbuf[j][outcount-1] != (outcount-1)) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount "
+ << outcount << " recvbuf " << recvbuf[j][outcount-1] << " KO"
+ << endl
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " RequestId " << RecvRequestId[j] << " recvbuf "
+ << recvbuf[j][outcount-1] << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ else {
+ debugStream << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " RequestId " << RecvRequestId[j] << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ debugStream << "test" << myrank << " WaitAll" << endl ;
+ if ( myrank == 0 ) {
+ mpi_access.waitAll(maxreq, SendRequestId) ;
+ mpi_access.deleteRequests(maxreq, SendRequestId) ;
+ }
+ else {
+ mpi_access.waitAll(maxreq, RecvRequestId) ;
+ mpi_access.deleteRequests(maxreq, RecvRequestId) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[maxreq] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[maxreq] ;
+ int recvreqsize = mpi_access.recvRequestIds( target , maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+ // MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length_1() {
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_ISend_IRecv_Length_1 must be run with 2 procs" << endl ;
+ cerr << strstream.str() << endl ;
+ //CPPUNIT_FAIL( strstream.str() ) ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_ISend_IRecv_Length_1" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+#define maxreq 10
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts ;
+ int sendbuf[1000*(maxreq-1)] ;
+ int recvbuf[maxreq][1000*(maxreq-1)] ;
+ int maxirecv = 1 ;
+ int i ;
+ RecvRequestId[0] = -1 ;
+ for ( i = 0 ; i < 1000*(maxreq-1) ; i++ ) {
+ sendbuf[i] = i ;
+ }
+ for ( i = 0 ; i < maxreq ; i++ ) {
+ sts = MPI_SUCCESS ;
+ if ( myrank == 0 ) {
+ sts = mpi_access.ISend( sendbuf, 1000*i, MPI_INT, target, SendRequestId[i] ) ;
+ debugStream << "test" << myrank << " ISend RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ int j ;
+ for (j = 1 ; j <= i ; j++) {
+ int source = 0;
+ MPI_Datatype datatype = 0;
+ int outcount = 0;
+ int flag ;
+ if ( myrank == 0 ) {
+ mpi_access.test( SendRequestId[j], flag ) ;
+ }
+ else {
+ int MPITag ;
+ sts = mpi_access.IProbe( target , source, MPITag, datatype,
+ outcount, flag) ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " IProbe lenerr " << lenerr << " "
+ << msgerr << endl ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " IProbe KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ debugStream << "test" << myrank << " IProbe i/j " << i << "/" << j
+ << " MPITag " << MPITag << " datatype " << datatype
+ << " outcount " << outcount << " flag " << flag << endl ;
+ }
+ if ( flag ) {
+ if ( myrank == 0 ) {
+ int tag, error ;
+ mpi_access.status( SendRequestId[j], target, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
+ << ") : target " << target << " tag " << tag << " error " << error
+ << " flag " << flag << endl ;
+ }
+ else {
+ sts = mpi_access.IRecv( recvbuf[maxirecv], outcount, datatype, source,
+ RecvRequestId[maxirecv] ) ;
+ debugStream << "test" << myrank << " maxirecv " << maxirecv << " IRecv RequestId "
+ << RecvRequestId[maxirecv] << " source " << source
+ << " outcount " << outcount << " tag "
+ << mpi_access.recvMPITag(target) << endl ;
+ maxirecv = maxirecv + 1 ;
+ }
+ }
+ else if ( myrank == 1 && i == maxreq-1 && j >= maxirecv ) {
+ sts = mpi_access.IRecv( recvbuf[j], 1000*j, MPI_INT, target,
+ RecvRequestId[j] ) ;
+ debugStream << "test" << myrank << " maxirecv " << maxirecv << " IRecv RequestId "
+ << RecvRequestId[j] << " target " << target << " length " << 1000*j
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ maxirecv = maxirecv + 1 ;
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ int flag ;
+ if ( myrank == 0 ) {
+ mpi_access.testAll( maxreq, SendRequestId, flag ) ;
+ debugStream << "test" << myrank << " TestAll SendRequest flag " << flag << endl ;
+ }
+ else {
+ int source ;
+ int outcount ;
+ if ( maxirecv != maxreq ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " KO" << " maxirecv " << maxirecv
+ << " != maxreq " << maxreq << endl
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ while ( maxirecv > 0 ) {
+ for ( i = 1 ; i < maxreq ; i++ ) {
+ debugStream << "test" << myrank << " IProbe : " << endl ;
+ sts = mpi_access.test( RecvRequestId[i] , flag ) ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " flag " << flag << " lenerr "
+ << lenerr << " " << msgerr << " maxirecv " << maxirecv << endl ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ debugStream << "test" << myrank << " Test flag " << flag << endl ;
+ if ( flag ) {
+ int tag, error ;
+ mpi_access.status( RecvRequestId[i] , source , tag , error ,
+ outcount ) ;
+ if ( i != 0 ) {
+ if (( outcount != 1000*i ) ||
+ ((recvbuf[i][outcount-1] != (outcount-1)))) {
+ ostringstream strstream ;
+ strstream << "========================================================"
+ << endl << "test" << myrank << " outcount " << outcount
+ << " KO" << " i " << i
+ << " recvbuf " << recvbuf[i][outcount-1] << endl
+ << "========================================================"
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ else if ( outcount != 0 ) {
+ ostringstream strstream ;
+ strstream << "========================================================"
+ << endl << "test" << myrank << " outcount " << outcount
+ << " KO" << " i " << i << endl
+ << "========================================================"
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ maxirecv = maxirecv - 1 ;
+ }
+ }
+ }
+ mpi_access.testAll( maxreq, RecvRequestId, flag ) ;
+ debugStream << "test" << myrank << " TestAll RecvRequest flag " << flag << endl ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ debugStream << "test" << myrank << " WaitAll :" << endl ;
+ if ( myrank == 0 ) {
+ mpi_access.waitAll( maxreq, SendRequestId ) ;
+ mpi_access.deleteRequests( maxreq, SendRequestId ) ;
+ }
+ else {
+ mpi_access.waitAll( maxreq, RecvRequestId ) ;
+ mpi_access.deleteRequests( maxreq, RecvRequestId ) ;
+ }
+
+ if ( myrank == 0 ) {
+ int sendrequests[maxreq] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
+ sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[maxreq] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+ // MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_Probe() {
+
+ debugStream << "test_MPI_Access_Probe" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ cerr << "test_MPI_Access_Probe must be run with 2 procs" << endl ;
+ //CPPUNIT_FAIL("test_MPI_Access_Probe must be run with 2 procs") ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_Probe" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int RequestId[10] ;
+ int sts ;
+ int i ;
+ for ( i = 0 ; i < 10 ; i++ ) {
+ if ( myrank == 0 ) {
+ sts = mpi_access.send(&i,1,MPI_INT,target, RequestId[i]) ;
+ debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
+ << endl ;
+ }
+ else {
+ int source, tag, outcount ;
+ MPI_Datatype datatype ;
+ sts = mpi_access.probe(target, source, tag, datatype, outcount ) ;
+ debugStream << "test" << myrank << " Probe target " << target << " source " << source
+ << " tag " << tag << " outcount " << outcount << endl ;
+ int recvbuf ;
+ sts = mpi_access.recv(&recvbuf,outcount,datatype,source, RequestId[i],
+ &outcount) ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ }
+ int flag ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,RequestId) ;
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_SendRecv() {
+
+ debugStream << "MPIAccessTest::test_MPI_Access_SendRecv" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ cerr << "MPIAccessTest::test_MPI_Access_SendRecv must be run with 2 procs" << endl ;
+ //CPPUNIT_FAIL("test_MPI_Access_SendRecv must be run with 2 procs") ;
+ return;
+ }
+
+ debugStream << "MPIAccessTest::test_MPI_Access_SendRecv" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int sendRequestId[10] ;
+ int recvRequestId[10] ;
+ int sts ;
+ int i ;
+ for ( i = 0 ; i < 10 ; i++ ) {
+ int recvbuf ;
+ int outcount ;
+ if ( i & 1 ) {
+ outcount = -1 ;
+ sts = mpi_access.sendRecv(&i,1,MPI_INT,target, sendRequestId[i],
+ &recvbuf,1,MPI_INT,target, recvRequestId[i],
+ &outcount) ;
+ }
+ else {
+ sts = mpi_access.sendRecv(&i,1,MPI_INT,target, sendRequestId[i],
+ &recvbuf,1,MPI_INT,target, recvRequestId[i]) ;
+// outcount = mpi_access.MPIOutCount( recvRequestId[i] ) ;
+ outcount = 1 ;
+ }
+ debugStream << "test" << myrank << " Send sendRequestId " << sendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target)
+ << " recvRequestId " << recvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target)
+ << " outcount " << outcount << " MPIOutCount "
+ << mpi_access.MPIOutCount( recvRequestId[i] ) << endl ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ }
+
+ int flag ;
+ mpi_access.testAll(10,sendRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,sendRequestId) ;
+ mpi_access.testAll(10,recvRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,recvRequestId) ;
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+
+ int sendrequests[10] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ int recvrequests[10] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_Send_Recv() {
+
+ debugStream << "test_MPI_Access_Send_Recv" << endl ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ cerr << "test_MPI_Access_Send_Recv must be run with 2 procs" << endl ;
+ //CPPUNIT_FAIL("test_MPI_Access_Send_Recv must be run with 2 procs") ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_Send_Recv" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int RequestId[10] ;
+ int sts ;
+ int i ;
+ for ( i = 0 ; i < 10 ; i++ ) {
+ if ( myrank == 0 ) {
+ sts = mpi_access.send(&i,1,MPI_INT,target, RequestId[i]) ;
+ debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ int recvbuf ;
+ int outcount ;
+ sts = mpi_access.recv(&recvbuf,1,MPI_INT,target, RequestId[i],&outcount) ;
+ //int source, tag, error, outcount ;
+ //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
+ debugStream << "test" << myrank << " Recv RequestId " << RequestId[i]
+ << " tag " << mpi_access.recvMPITag(target)
+ << " outcount " << outcount << endl ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check();
+ }
+ int flag ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,RequestId) ;
+ if(MPI_ACCESS_VERBOSE) mpi_access.check();
+
+ if ( myrank == 0 ) {
+ int sendrequests[10] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ else {
+ int recvrequests[10] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_Send_Recv_Length() {
+
+ debugStream << "test_MPI_Access_Send_Recv_Length" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_Send_Recv_Length must be run with 2 procs" << endl ;
+ cerr << strstream.str() << endl ;
+ //CPPUNIT_FAIL( strstream.str() ) ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_Send_Recv_Length" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int RequestId[10] ;
+ int sendbuf[9000] ;
+ int recvbuf[9000] ;
+ bool recvbufok ;
+ int sts ;
+ int i , j ;
+ for ( i = 0 ; i < 9000 ; i++ ) {
+ sendbuf[i] = i ;
+ }
+ for ( i = 0 ; i < 10 ; i++ ) {
+ if ( myrank == 0 ) {
+ sts = mpi_access.send( sendbuf, 1000*i, MPI_INT, target, RequestId[i] ) ;
+ debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ sts = MPI_SUCCESS ;
+ RequestId[i] = -1 ;
+ int outcount = 0 ;
+ if ( i != 0 ) {
+ sts = mpi_access.recv( recvbuf,1000*i+1,MPI_INT,target, RequestId[i],
+ &outcount ) ;
+ }
+ //int source, tag, error, outcount ;
+ //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
+ debugStream << "test" << myrank << " Recv RequestId " << RequestId[i]
+ << " tag " << mpi_access.recvMPITag(target)
+ << " outcount " << outcount << endl ;
+ recvbufok = true ;
+ for ( j = 0 ; j < outcount ; j++ ) {
+ if ( recvbuf[j] != j ) {
+ debugStream << "test" << myrank << " recvbuf[ " << j << " ] = " << recvbuf[j]
+ << endl ;
+ recvbufok = false ;
+ break ;
+ }
+ }
+ if ( (outcount != 1000*i) | !recvbufok ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ }
+ int flag ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,RequestId) ;
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[10] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ else {
+ int recvrequests[10] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void MPIAccessTest::test_MPI_Access_Time() {
+
+ debugStream << "test_MPI_Access_Time" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_Time must be run with 2 procs" << endl ;
+ cerr << strstream.str() << endl ;
+ //CPPUNIT_FAIL( strstream.str() ) ;
+ return;
+ }
+
+ debugStream << "test_MPI_Access_Time" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess mpi_access( group ) ;
+
+#define maxreq 10
+
+ if ( myrank >= 2 ) {
+ debugStream << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->Barrier" << endl ;
+ mpi_access.barrier() ;
+ debugStream << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->Barrier" << endl ;
+ delete group ;
+ debugStream << "test_MPI_Access_Time" << myrank << " OK" << endl ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendTimeRequestId[maxreq] ;
+ int RecvTimeRequestId[maxreq] ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts ;
+ int sendbuf[maxreq] ;
+ int recvbuf[maxreq] ;
+ int i = 0 ;
+ MEDCoupling::TimeMessage aSendTimeMsg[maxreq] ;
+ MEDCoupling::TimeMessage aRecvTimeMsg[maxreq] ;
+ double t ;
+ double dt = 1. ;
+ double maxt = 10. ;
+ for ( t = 0 ; t < maxt ; t = t+dt ) {
+ if ( myrank == 0 ) {
+ aSendTimeMsg[i].time = t ;
+ aSendTimeMsg[i].deltatime = dt ;
+ //aSendTimeMsg[i].maxtime = maxt ;
+ //sts = mpi_access.ISend( &aSendTimeMsg , mpi_access.timeExtent() ,
+ sts = mpi_access.ISend( &aSendTimeMsg[i] , 1 ,
+ mpi_access.timeType() , target ,
+ SendTimeRequestId[i]) ;
+ debugStream << "test" << myrank << " ISend RequestId " << SendTimeRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, SendRequestId[i]) ;
+ debugStream << "test" << myrank << " ISend RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ //sts = mpi_access.IRecv( &aRecvTimeMsg , mpi_access.timeExtent() ,
+ sts = mpi_access.IRecv( &aRecvTimeMsg[i] , 1 ,
+ mpi_access.timeType() , target ,
+ RecvTimeRequestId[i]) ;
+ debugStream << "test" << myrank << " IRecv RequestId " << RecvTimeRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
+ debugStream << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ }
+ int j ;
+ for (j = 0 ; j <= i ; j++) {
+ int flag ;
+ if ( myrank == 0 ) {
+ mpi_access.test( SendTimeRequestId[j], flag ) ;
+ }
+ else {
+ mpi_access.test( RecvTimeRequestId[j], flag ) ;
+ }
+ if ( flag ) {
+ int source, tag, error, outcount ;
+ if ( myrank == 0 ) {
+ mpi_access.status( SendTimeRequestId[j], target, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Test(Send TimeRequestId " << SendTimeRequestId[j]
+ << ") : target " << target << " tag " << tag << " error " << error
+ << " flag " << flag << aSendTimeMsg[j] << endl ;
+ }
+ else {
+ mpi_access.status( RecvTimeRequestId[j], source, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Test(Recv TimeRequestId "
+ << RecvTimeRequestId[j] << ") : source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount
+ << " flag " << flag << aRecvTimeMsg[j] << endl ;
+ if ( (outcount != 1) | (aRecvTimeMsg[j].time != j) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount << " KO"
+ << " RecvTimeRequestId " << RecvTimeRequestId[j] << endl
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " RecvTimeRequestId " << RecvTimeRequestId[j] << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ }
+ if ( myrank == 0 ) {
+ mpi_access.test( SendRequestId[j], flag ) ;
+ }
+ else {
+ mpi_access.test( RecvRequestId[j], flag ) ;
+ }
+ if ( flag ) {
+ int source, tag, error, outcount ;
+ if ( myrank == 0 ) {
+ mpi_access.status( SendRequestId[j], target, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
+ << ") : target " << target << " tag " << tag << " error " << error
+ << " flag " << flag << endl ;
+ }
+ else {
+ mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
+ true ) ;
+ debugStream << "test" << myrank << " Test(Recv RequestId "
+ << RecvRequestId[j] << ") : source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount
+ << " flag " << flag << endl ;
+ if ( (outcount != 1) | (recvbuf[j] != j) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount "
+ << outcount << " recvbuf " << recvbuf[j] << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " RequestId " << RecvRequestId[j] << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ i = i + 1 ;
+ }
+
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+ if ( myrank == 0 ) {
+ mpi_access.waitAll(maxreq, SendTimeRequestId) ;
+ mpi_access.deleteRequests(maxreq, SendTimeRequestId) ;
+ mpi_access.waitAll(maxreq, SendRequestId) ;
+ mpi_access.deleteRequests(maxreq, SendRequestId) ;
+ }
+ else {
+ mpi_access.waitAll(maxreq, RecvTimeRequestId) ;
+ mpi_access.deleteRequests(maxreq, RecvTimeRequestId) ;
+ mpi_access.waitAll(maxreq, RecvRequestId) ;
+ mpi_access.deleteRequests(maxreq, RecvRequestId) ;
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[2*maxreq] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , 2*maxreq , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[2*maxreq] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , 2*maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ debugStream << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->Barrier" << endl ;
+ mpi_access.barrier() ;
+ debugStream << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->Barrier" << endl ;
+
+ delete group ;
+
+ // MPI_Finalize();
+
+ debugStream << "test_MPI_Access_Time" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace MEDCoupling;
+
+void chksts( int sts , int myrank , MEDCoupling::MPIAccess * mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access->errorString(sts, msgerr, &lenerr) ;
+ debugStream << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+return ;
+}
+
+void MPIAccessTest::test_MPI_Access_Time_0() {
+
+ debugStream << "test_MPI_Access_Time_0" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_MPI_Access_Time_0" <<endl
+ << " nbprocs =2" << endl
+ << "test must be run with 2 procs" << endl ;
+ cerr << strstream.str() << endl ;
+ //CPPUNIT_FAIL( strstream.str() ) ;
+ return;
+ }
+
+#define maxreq 100
+
+ double t ;
+ double dt[2] = {2., 1.} ;
+ double maxt = maxreq/dt[myrank] ;
+
+ debugStream << "test_MPI_Access_Time_0 rank" << myrank << endl ;
+
+ MEDCoupling::CommInterface interface ;
+
+ MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
+
+ MEDCoupling::MPIAccess * mpi_access = new MEDCoupling::MPIAccess( group ) ;
+
+ if ( myrank >= 2 ) {
+ debugStream << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
+ mpi_access->barrier() ;
+ debugStream << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
+ debugStream << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
+ mpi_access->barrier() ;
+ debugStream << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
+ delete group ;
+ delete mpi_access ;
+ debugStream << "test_MPI_Access_Time" << myrank << " OK" << endl ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendTimeRequestId[maxreq] ;
+ int RecvTimeRequestId[maxreq] ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts = 0;
+ int sendbuf[maxreq] ;
+ int recvbuf[maxreq] ;
+ MEDCoupling::TimeMessage aSendTimeMsg[maxreq] ;
+ int lasttime = -1 ;
+ MEDCoupling::TimeMessage RecvTimeMessages[maxreq+1] ;
+ MEDCoupling::TimeMessage *aRecvTimeMsg = &RecvTimeMessages[1] ;
+// mpi_access->Trace() ;
+ int istep = 0 ;
+ for ( t = 0 ; t < maxt ; t = t+dt[myrank] ) {
+ debugStream << "test" << myrank << " ==========================TIME " << t
+ << " ==========================" << endl ;
+ if ( myrank == 0 ) {
+ aSendTimeMsg[istep].time = t ;
+ aSendTimeMsg[istep].deltatime = dt[myrank] ;
+ //aSendTimeMsg[istep].maxtime = maxt ;
+ if ( t+dt[myrank] >= maxt ) {
+ aSendTimeMsg[istep].deltatime = 0 ;
+ }
+ sts = mpi_access->ISend( &aSendTimeMsg[istep] , 1 ,
+ mpi_access->timeType() , target ,
+ SendTimeRequestId[istep]) ;
+ debugStream << "test" << myrank << " ISend TimeRequestId " << SendTimeRequestId[istep]
+ << " tag " << mpi_access->MPITag(SendTimeRequestId[istep]) << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ sendbuf[istep] = istep ;
+ sts = mpi_access->ISend(&sendbuf[istep],1,MPI_INT,target, SendRequestId[istep]) ;
+ debugStream << "test" << myrank << " ISend Data RequestId " << SendRequestId[istep]
+ << " tag " << mpi_access->MPITag(SendRequestId[istep]) << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+//CheckSent
+//=========
+ int sendrequests[2*maxreq] ;
+ int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq ,
+ sendrequests ) ;
+ int j , flag ;
+ for ( j = 0 ; j < sendreqsize ; j++ ) {
+ sts = mpi_access->test( sendrequests[j] , flag ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ if ( flag ) {
+ mpi_access->deleteRequest( sendrequests[j] ) ;
+ debugStream << "test" << myrank << " " << j << ". " << sendrequests[j]
+ << " sendrequest deleted" << endl ;
+ }
+ }
+ }
+ else {
+//InitRecv
+//========
+ if ( t == 0 ) {
+ aRecvTimeMsg[lasttime].time = 0 ;
+ sts = mpi_access->IRecv( &aRecvTimeMsg[lasttime+1] , 1 ,
+ mpi_access->timeType() ,
+ target , RecvTimeRequestId[lasttime+1]) ;
+ debugStream << "test" << myrank << " t == 0 IRecv TimeRequestId "
+ << RecvTimeRequestId[lasttime+1]
+ << " MPITag " << mpi_access->MPITag( RecvTimeRequestId[lasttime+1] )
+ << " MPICompleted "
+ << mpi_access->MPICompleted( RecvTimeRequestId[lasttime+1] ) << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+ else {
+ debugStream << "test" << myrank << " t # 0 lasttime " << lasttime << endl ;
+//InitialOutTime
+//==============
+ bool outtime = false ;
+ if ( lasttime != -1 ) {
+ if ( t <= aRecvTimeMsg[lasttime-1].time ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " t " << t << " <= "
+ << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
+ << aRecvTimeMsg[lasttime-1].time << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "==========================================================="
+ << endl << "test" << myrank << " t " << t << " > "
+ << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
+ << aRecvTimeMsg[lasttime-1].time << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ //outtime = ((aRecvTimeMsg[lasttime].time +
+ // aRecvTimeMsg[lasttime].deltatime) >=
+ // aRecvTimeMsg[lasttime].maxtime) ;
+ outtime = aRecvTimeMsg[lasttime].deltatime == 0 ;
+ }
+// CheckRecv - CheckTime
+// On a lasttime tel que :
+// aRecvTimeMsg[ lasttime-1 ].time < T(i-1) <= aRecvTimeMsg[ lasttime ].time
+// On cherche lasttime tel que :
+// aRecvTimeMsg[ lasttime-1 ].time < T(i) <= aRecvTimeMsg[ lasttime ].time
+ if ( t <= aRecvTimeMsg[lasttime].time ) {
+ outtime = false ;
+ }
+ debugStream << "test" << myrank << " while outtime( " << outtime << " && t " << t
+ << " > aRecvTimeMsg[ " << lasttime << " ] "
+ << aRecvTimeMsg[lasttime].time << " )" << endl ;
+ while ( !outtime && (t > aRecvTimeMsg[lasttime].time) ) {
+ lasttime += 1 ;
+//TimeMessage
+//===========
+ sts = mpi_access->wait( RecvTimeRequestId[lasttime] ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ debugStream << "test" << myrank << " Wait done RecvTimeRequestId "
+ << RecvTimeRequestId[lasttime] << " lasttime " << lasttime
+ << " tag " << mpi_access->MPITag(RecvTimeRequestId[lasttime])
+ << aRecvTimeMsg[lasttime] << endl ;
+ if ( lasttime == 0 ) {
+ aRecvTimeMsg[lasttime-1] = aRecvTimeMsg[lasttime] ;
+ }
+ mpi_access->deleteRequest( RecvTimeRequestId[lasttime] ) ;
+
+ double deltatime = aRecvTimeMsg[lasttime].deltatime ;
+ //double maxtime = aRecvTimeMsg[lasttime].maxtime ;
+ double nexttime = aRecvTimeMsg[lasttime].time + deltatime ;
+ debugStream << "test" << myrank << " t " << t << " lasttime " << lasttime
+ << " deltatime " << deltatime
+ << " nexttime " << nexttime << endl ;
+ //if ( nexttime < maxtime && t > nexttime ) {
+ if ( deltatime != 0 && t > nexttime ) {
+//CheckRecv :
+//=========
+ //while ( nexttime < maxtime && t > nexttime ) {
+ while ( deltatime != 0 && t > nexttime ) {
+ int source, MPITag, outcount ;
+ MPI_Datatype datatype ;
+ sts = mpi_access->probe( target , source, MPITag, datatype,
+ outcount ) ;
+ chksts( sts , myrank , mpi_access ) ;
+// Cancel DataMessages jusqu'a un TimeMessage
+ int cancelflag ;
+ while ( !mpi_access->isTimeMessage( MPITag ) ) {
+ sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
+ //sts = mpi_access->cancel( source, datatype, outcount ,
+ //RecvRequestId[lasttime] ,
+ cancelflag ) ;
+ debugStream << "test" << myrank << " Recv TO CANCEL RequestId "
+ << RecvRequestId[lasttime]
+ << " tag " << mpi_access->recvMPITag( target )
+ << " cancelflag " << cancelflag << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ sts = mpi_access->probe( target , source, MPITag, datatype,
+ outcount ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+//On peut avancer en temps
+ nexttime += deltatime ;
+ //if ( nexttime < maxtime && t > nexttime ) {
+ if ( deltatime != 0 && t > nexttime ) {
+// Cancel du TimeMessage
+ sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
+ //sts = mpi_access->cancel( source, datatype, outcount ,
+ //RecvRequestId[lasttime] ,
+ cancelflag ) ;
+ debugStream << "test" << myrank << " Time TO CANCEL RequestId "
+ << RecvRequestId[lasttime]
+ << " tag " << mpi_access->recvMPITag( target )
+ << " cancelflag " << cancelflag << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+ }
+ }
+ else {
+//DoRecv
+//======
+ debugStream << "test" << myrank << " Recv target " << target
+ << " lasttime " << lasttime
+ << " lasttime-1 " << aRecvTimeMsg[lasttime-1]
+ << " lasttime " << aRecvTimeMsg[lasttime]
+ << endl ;
+ sts = mpi_access->recv(&recvbuf[lasttime],1,MPI_INT,target,
+ RecvRequestId[lasttime]) ;
+ debugStream << "test" << myrank << " Recv RequestId "
+ << RecvRequestId[lasttime]
+ << " tag " << mpi_access->recvMPITag( target )
+ << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+ //outtime = ((aRecvTimeMsg[lasttime].time +
+ // aRecvTimeMsg[lasttime].deltatime) >=
+ // aRecvTimeMsg[lasttime].maxtime) ;
+ outtime = aRecvTimeMsg[lasttime].deltatime == 0 ;
+ if ( !outtime ) {
+// Une lecture asynchrone d'un message temps a l'avance
+ sts = mpi_access->IRecv( &aRecvTimeMsg[lasttime+1] , 1 ,
+ mpi_access->timeType() , target ,
+ RecvTimeRequestId[lasttime+1]) ;
+ debugStream << "test" << myrank << " IRecv TimeRequestId "
+ << RecvTimeRequestId[lasttime+1] << " MPITag "
+ << mpi_access->MPITag( RecvTimeRequestId[lasttime+1] )
+ << " MPICompleted "
+ << mpi_access->MPICompleted( RecvTimeRequestId[lasttime+1] )
+ << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+ else if ( t <= aRecvTimeMsg[lasttime].time ) {
+ outtime = false ;
+ }
+ }
+
+ //printf("DEBUG t %.15f Msg[lasttime-1] %.15f Msg[lasttime] %.15f \n",t,
+ // aRecvTimeMsg[lasttime-1].time,aRecvTimeMsg[lasttime].time) ;
+ if ( ((t <= aRecvTimeMsg[lasttime-1].time) ||
+ (t > aRecvTimeMsg[lasttime].time)) && !outtime ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " t " << t << " <= "
+ << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
+ << aRecvTimeMsg[lasttime-1].time << " ou t " << t << " > "
+ << "aRecvTimeMsg[ " << lasttime << " ].time "
+ << aRecvTimeMsg[lasttime].time << endl
+ << " ou bien outtime " << outtime << " KO RequestTimeIds "
+ << RecvTimeRequestId[lasttime-1] << " " << RecvTimeRequestId[lasttime]
+ << " RequestIds "
+ << RecvRequestId[lasttime-1] << " " << RecvRequestId[lasttime] << endl
+ << "==========================================================="
+ << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "==========================================================="
+ << endl << "test" << myrank
+ << " aRecvTimeMsg[ " << lasttime << "-1 ].time "
+ << aRecvTimeMsg[lasttime-1].time << " < t " << t << " <= "
+ << "aRecvTimeMsg[ " << lasttime << " ].time "
+ << aRecvTimeMsg[lasttime].time << endl
+ << " ou bien outtime " << outtime << " OK RequestTimeIds "
+ << RecvTimeRequestId[lasttime-1] << " " << RecvTimeRequestId[lasttime]
+ << " RequestIds "
+ << RecvRequestId[lasttime-1] << " " << RecvRequestId[lasttime] << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ }
+ chksts( sts , myrank , mpi_access ) ;
+ istep = istep + 1 ;
+ }
+
+ debugStream << "test" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+
+ if (MPI_ACCESS_VERBOSE) mpi_access->check() ;
+
+ if ( myrank == 0 ) {
+//CheckFinalSent
+//==============
+ debugStream << "test" << myrank << " CheckFinalSent :" << endl ;
+ int sendrequests[2*maxreq] ;
+ int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ;
+ int j ;
+ for ( j = 0 ; j < sendreqsize ; j++ ) {
+ sts = mpi_access->wait( sendrequests[j] ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ mpi_access->deleteRequest( sendrequests[j] ) ;
+ debugStream << "test" << myrank << " " << j << ". " << sendrequests[j] << " deleted"
+ << endl ;
+ }
+ }
+ else {
+ debugStream << "test" << myrank << " CheckFinalRecv :" << endl ;
+ int recvrequests[2*maxreq] ;
+ int recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ;
+ int cancelflag ;
+ int j ;
+ for ( j = 0 ; j < recvreqsize ; j++ ) {
+ sts = mpi_access->cancel( recvrequests[j] , cancelflag ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ mpi_access->deleteRequest( recvrequests[j] ) ;
+ debugStream << "test" << myrank << " " << j << ". " << recvrequests[j] << " deleted"
+ << " cancelflag " << cancelflag << endl ;
+ }
+ int source, MPITag, outcount , flag ;
+ MPI_Datatype datatype ;
+ sts = mpi_access->IProbe( target , source, MPITag, datatype,
+ outcount , flag ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ while ( flag ) {
+ sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
+ //sts = mpi_access->cancel( source, datatype, outcount ,
+ //RecvRequestId[lasttime] ,
+ cancelflag ) ;
+ debugStream << "test" << myrank << " TO CANCEL RequestId "
+ << RecvRequestId[lasttime]
+ << " tag " << mpi_access->recvMPITag( target )
+ << " cancelflag " << cancelflag << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ sts = mpi_access->IProbe( target , source, MPITag, datatype,
+ outcount , flag ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+ }
+ if(MPI_ACCESS_VERBOSE) mpi_access->check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[2*maxreq] ;
+ int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[2*maxreq] ;
+ int recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ debugStream << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ debugStream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ int i ;
+ for ( i = 0 ; i <= lasttime ; i++ ) {
+ debugStream << "test" << myrank << " " << i << ". RecvTimeMsg "
+ << aRecvTimeMsg[i].time << " recvbuf " << recvbuf[i] << endl ;
+ }
+
+ debugStream << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
+ mpi_access->barrier() ;
+ debugStream << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
+
+ delete group ;
+ delete mpi_access ;
+
+// MPI_Finalize();
+
+ debugStream << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-
-#include <sstream>
-#include <cmath>
-
-#ifndef WIN32
-#include <unistd.h>
-#endif
-
-using namespace std;
-
-
-
-/*!
- * Tool to remove temporary files.
- * Allows automatique removal of temporary files in case of test failure.
- */
-MPIAccessDECTest_TmpFilesRemover::~MPIAccessDECTest_TmpFilesRemover()
-{
- set<string>::iterator it = myTmpFiles.begin();
- for (; it != myTmpFiles.end(); it++) {
- if (access((*it).data(), F_OK) == 0)
- remove((*it).data());
- }
- myTmpFiles.clear();
- //cout << "~MPIAccessTest_TmpFilesRemover()" << endl;
-}
-
-bool MPIAccessDECTest_TmpFilesRemover::Register(const string theTmpFile)
-{
- return (myTmpFiles.insert(theTmpFile)).second;
-}
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#ifndef _MPIACCESSDECTEST_HXX_
-#define _MPIACCESSDECTEST_HXX_
-
-#include <cppunit/extensions/HelperMacros.h>
-
-#include <set>
-#include <string>
-#include <iostream>
-#include "mpi.h"
-
-// (ABN]: too many text output in the MPIAccesTest - this renders
-// the analysis complicated:
-#define MPI_ACCESS_VERBOSE 0
-#define debugStream \
- if (!MPI_ACCESS_VERBOSE) {} \
- else std::cout
-
-class MPIAccessDECTest : public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE( MPIAccessDECTest );
- // CPPUNIT_TEST( test_AllToAllDECSynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllDECAsynchronousPointToPoint ) ;
- //CPPUNIT_TEST( test_AllToAllvDECSynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllvDECAsynchronousPointToPoint ) ;
- //CPPUNIT_TEST( test_AllToAllTimeDECSynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllTimeDECAsynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllvTimeDECSynchronousNative ) ;
- //CPPUNIT_TEST( test_AllToAllvTimeDECSynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllvTimeDECAsynchronousPointToPoint ) ;
- //CPPUNIT_TEST( test_AllToAllvTimeDoubleDECSynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllvTimeDoubleDECAsynchronousPointToPoint ) ;
- CPPUNIT_TEST_SUITE_END();
-
-
-public:
-
- MPIAccessDECTest():CppUnit::TestFixture(){}
- ~MPIAccessDECTest(){}
- void setUp(){}
- void tearDown(){}
- void test_AllToAllDECSynchronousPointToPoint() ;
- void test_AllToAllDECAsynchronousPointToPoint() ;
- void test_AllToAllvDECSynchronousPointToPoint() ;
- void test_AllToAllvDECAsynchronousPointToPoint() ;
- void test_AllToAllTimeDECSynchronousPointToPoint() ;
- void test_AllToAllTimeDECAsynchronousPointToPoint() ;
- void test_AllToAllvTimeDECSynchronousNative() ;
- void test_AllToAllvTimeDECSynchronousPointToPoint() ;
- void test_AllToAllvTimeDECAsynchronousPointToPoint() ;
- void test_AllToAllvTimeDoubleDECSynchronousPointToPoint() ;
- void test_AllToAllvTimeDoubleDECAsynchronousPointToPoint() ;
-
-private:
- void test_AllToAllDEC( bool Asynchronous ) ;
- void test_AllToAllvDEC( bool Asynchronous ) ;
- void test_AllToAllTimeDEC( bool Asynchronous ) ;
- void test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINative ) ;
- void test_AllToAllvTimeDoubleDEC( bool Asynchronous ) ;
- };
-
-// to automatically remove temporary files from disk
-class MPIAccessDECTest_TmpFilesRemover
-{
-public:
- MPIAccessDECTest_TmpFilesRemover() {}
- ~MPIAccessDECTest_TmpFilesRemover();
- bool Register(const std::string theTmpFile);
-
-private:
- std::set<std::string> myTmpFiles;
-};
-
-/*!
- * Tool to print array to stream.
- */
-template<class T>
-void MPIAccessDECTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text)
-{
- stream << text << ": {";
- if (length > 0) {
- stream << array[0];
- for (int i = 1; i < length; i++) {
- stream << ", " << array[i];
- }
- }
- stream << "}" << std::endl;
-};
-
-#endif
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-#include <sstream>
-#include <cmath>
-
-#ifndef WIN32
-#include <unistd.h>
-#endif
-
-using namespace std;
-
-
-
-/*!
- * Tool to remove temporary files.
- * Allows automatique removal of temporary files in case of test failure.
- */
-MPIAccessTest_TmpFilesRemover::~MPIAccessTest_TmpFilesRemover()
-{
- set<string>::iterator it = myTmpFiles.begin();
- for (; it != myTmpFiles.end(); it++) {
- if (access((*it).data(), F_OK) == 0)
- remove((*it).data());
- }
- myTmpFiles.clear();
- //cout << "~MPIAccessTest_TmpFilesRemover()" << endl;
-}
-
-bool MPIAccessTest_TmpFilesRemover::Register(const string theTmpFile)
-{
- return (myTmpFiles.insert(theTmpFile)).second;
-}
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#ifndef _MPIACCESSTEST_HXX_
-#define _MPIACCESSTEST_HXX_
-
-#include <cppunit/extensions/HelperMacros.h>
-
-#include <set>
-#include <string>
-#include <iostream>
-#include "mpi.h"
-
-// (ABN]: too many text output in the MPIAccesTest - this renders
-// the analysis complicated:
-#define MPI_ACCESS_VERBOSE 0
-#define debugStream \
- if (!MPI_ACCESS_VERBOSE) {} \
- else std::cout
-
-class MPIAccessTest : public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE( MPIAccessTest );
- CPPUNIT_TEST( test_MPI_Access_Send_Recv ) ;
- CPPUNIT_TEST( test_MPI_Access_Cyclic_Send_Recv ) ;
- CPPUNIT_TEST( test_MPI_Access_SendRecv ) ;
- CPPUNIT_TEST( test_MPI_Access_ISend_IRecv ) ;
- CPPUNIT_TEST( test_MPI_Access_Cyclic_ISend_IRecv ) ;
- CPPUNIT_TEST( test_MPI_Access_ISendRecv ) ;
- CPPUNIT_TEST( test_MPI_Access_Probe ) ;
- CPPUNIT_TEST( test_MPI_Access_IProbe ) ;
- CPPUNIT_TEST( test_MPI_Access_Cancel ) ;
- CPPUNIT_TEST( test_MPI_Access_Send_Recv_Length ) ;
- CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_Length ) ;
- CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_Length_1 ) ;
- CPPUNIT_TEST( test_MPI_Access_Time ) ;
- CPPUNIT_TEST( test_MPI_Access_Time_0 ) ;
- CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_BottleNeck ) ;
- CPPUNIT_TEST_SUITE_END();
-
-
-public:
-
- MPIAccessTest():CppUnit::TestFixture(){}
- ~MPIAccessTest(){}
- void setUp(){}
- void tearDown(){}
- void test_MPI_Access_Send_Recv() ;
- void test_MPI_Access_Cyclic_Send_Recv() ;
- void test_MPI_Access_SendRecv() ;
- void test_MPI_Access_ISend_IRecv() ;
- void test_MPI_Access_Cyclic_ISend_IRecv() ;
- void test_MPI_Access_ISendRecv() ;
- void test_MPI_Access_Probe() ;
- void test_MPI_Access_IProbe() ;
- void test_MPI_Access_Cancel() ;
- void test_MPI_Access_Send_Recv_Length() ;
- void test_MPI_Access_ISend_IRecv_Length() ;
- void test_MPI_Access_ISend_IRecv_Length_1() ;
- void test_MPI_Access_Time() ;
- void test_MPI_Access_Time_0() ;
- void test_MPI_Access_ISend_IRecv_BottleNeck() ;
-
-private:
- };
-
-// to automatically remove temporary files from disk
-class MPIAccessTest_TmpFilesRemover
-{
-public:
- MPIAccessTest_TmpFilesRemover() {}
- ~MPIAccessTest_TmpFilesRemover();
- bool Register(const std::string theTmpFile);
-
-private:
- std::set<std::string> myTmpFiles;
-};
-
-/*!
- * Tool to print array to stream.
- */
-template<class T>
-void MPIAccessTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text)
-{
- stream << text << ": {";
- if (length > 0) {
- stream << array[0];
- for (int i = 1; i < length; i++) {
- stream << ", " << array[i];
- }
- }
- stream << "}" << std::endl;
-}
-
-#endif
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-// --- include all MPIAccess Test
-//
-#include "MPIAccessTest.hxx"
-
-// --- Registers the fixture into the 'registry'
-
-CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessTest );
-
-// --- generic Main program from KERNEL_SRC/src/Basics/Test
-
-#include "MPIMainTest.hxx"
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-// --- include all MPIAccessDEC Test
-//
-#include "MPIAccessDECTest.hxx"
-
-// --- Registers the fixture into the 'registry'
-
-CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessDECTest );
-
-// --- generic Main program from KERNEL_SRC/src/Basics/Test
-
-#include "MPIMainTest.hxx"
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-#include "MPIAccessDEC.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessDECTest::test_AllToAllDECSynchronousPointToPoint() {
- test_AllToAllDEC( false ) ;
-}
-void MPIAccessDECTest::test_AllToAllDECAsynchronousPointToPoint() {
- test_AllToAllDEC( true ) ;
-}
-
-static void chksts( int sts , int myrank , MEDCoupling::MPIAccess mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "===========================================================" << endl
- << "test_AllToAllDEC" << myrank << " KO" << endl
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- return ;
-}
-
-void MPIAccessDECTest::test_AllToAllDEC( bool Asynchronous ) {
-
- debugStream << "test_AllToAllDEC" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 || size > 11 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_AllToAllDEC" << endl
- << " (nbprocs >=2)" << endl
- << "test must be run with more than 1 proc and less than 12 procs"
- << endl ;
- cerr << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- debugStream << "test_AllToAllDEC" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
- std::set<int> sourceprocs;
- std::set<int> targetprocs;
- int i ;
- for ( i = 0 ; i < size/2 ; i++ ) {
- sourceprocs.insert(i);
- }
- for ( i = size/2 ; i < size ; i++ ) {
- targetprocs.insert(i);
- }
-
- MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ;
- MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ;
-
- MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
- Asynchronous ) ;
-
- MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
-
-#define maxreq 100
-#define datamsglength 10
-
- // int sts ;
- int sendcount = datamsglength ;
- int recvcount = datamsglength ;
- int * recvbuf = new int[datamsglength*size] ;
-
- int ireq ;
- for ( ireq = 0 ; ireq < maxreq ; ireq++ ) {
- int * sendbuf = new int[datamsglength*size] ;
- int j ;
- for ( j = 0 ; j < datamsglength*size ; j++ ) {
- sendbuf[j] = myrank*1000000 + ireq*1000 + j ;
- recvbuf[j] = -1 ;
- }
-
- MyMPIAccessDEC->allToAll( sendbuf, sendcount , MPI_INT ,
- recvbuf, recvcount , MPI_INT ) ;
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- int *ArrayOfRecvRequests = new int[nRecvReq] ;
- int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
- mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ;
- delete [] ArrayOfRecvRequests ;
- }
-
- int nSendReq = mpi_access->sendRequestIdsSize() ;
- debugStream << "test_AllToAllDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
- << endl ;
- if ( nSendReq ) {
- int *ArrayOfSendRequests = new int[nSendReq] ;
- int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
- delete [] ArrayOfSendRequests ;
- }
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq ) {
- ostringstream strstream ;
- strstream << "test_AllToAllDEC" << myrank << " final RecvRequestIds " << nRecvReq
- << " RecvRequests # 0 Error" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "test_AllToAllDEC" << myrank << " final RecvRequestIds " << nRecvReq
- << " RecvRequests = 0 OK" << endl ;
- }
-
- mpi_access->barrier() ;
-
- delete sourcegroup ;
- delete targetgroup ;
- delete MyMPIAccessDEC ;
- delete [] recvbuf ;
-
- // MPI_Finalize();
-
- debugStream << "test_AllToAllDEC" << myrank << " OK" << endl ;
-
- return ;
-}
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccessDEC.hxx"
-#include "LinearTimeInterpolator.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessDECTest::test_AllToAllTimeDECSynchronousPointToPoint() {
- test_AllToAllTimeDEC( false ) ;
-}
-void MPIAccessDECTest::test_AllToAllTimeDECAsynchronousPointToPoint() {
- test_AllToAllTimeDEC( true ) ;
-}
-
-static void chksts( int sts , int myrank , MEDCoupling::MPIAccess * mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access->errorString(sts, msgerr, &lenerr) ;
- debugStream << "test_AllToAllTimeDEC" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test_AllToAllTimeDEC" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- return ;
-}
-
-void MPIAccessDECTest::test_AllToAllTimeDEC( bool Asynchronous ) {
-
- debugStream << "test_AllToAllTimeDEC" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 || size > 11 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
- << " (nbprocs >=2)" << endl
- << "test must be run with more than 1 proc and less than 12 procs"
- << endl ;
- cerr << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- // int Asynchronous = atoi(argv[1]);
-
- debugStream << "test_AllToAllTimeDEC" << myrank << " Asynchronous " << Asynchronous << endl ;
-
- MEDCoupling::CommInterface interface ;
- std::set<int> sourceprocs;
- std::set<int> targetprocs;
- int i ;
- for ( i = 0 ; i < size/2 ; i++ ) {
- sourceprocs.insert(i);
- }
- for ( i = size/2 ; i < size ; i++ ) {
- targetprocs.insert(i);
- }
-
- MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ;
- MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ;
-
- // LinearTimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ;
- MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
- Asynchronous ) ;
- // Asynchronous , LinearInterp , 0.5 ) ;
- MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ;
- MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
-
- debugStream << "test_AllToAllTimeDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
- debugStream << "test_AllToAllTimeDEC" << myrank << " Barrier done" << endl ;
-
-#define maxproc 11
-#define maxreq 10000
-#define datamsglength 10
-
- int sts ;
- int sendcount = datamsglength ;
- int recvcount = datamsglength ;
-
- double time = 0 ;
- // double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
- double deltatime[maxproc] = {1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,11.} ;
- double maxtime = maxreq ;
- double nextdeltatime = deltatime[myrank] ;
- // MyMPIAccessDEC->InitTime( time , deltatime[myrank] , maxtime ) ;
- // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) {
- for ( time = 0 ; time <= maxtime && nextdeltatime != 0 ; time+=nextdeltatime ) {
- if ( time != 0 ) {
- nextdeltatime = deltatime[myrank] ;
- if ( time+nextdeltatime > maxtime ) {
- nextdeltatime = 0 ;
- }
- // MyMPIAccessDEC->NextTime( nextdeltatime ) ;
- }
- MyMPIAccessDEC->setTime( time , nextdeltatime ) ;
- debugStream << "test_AllToAllTimeDEC" << myrank << "=====TIME " << time << "=====DELTATIME "
- << nextdeltatime << "=====MAXTIME " << maxtime << " ======" << endl ;
- int * sendbuf = new int[datamsglength*size] ;
- // int * sendbuf = (int *) malloc(sizeof(int)*datamsglength*size) ;
- int * recvbuf = new int[datamsglength*size] ;
- int j ;
- for ( j = 0 ; j < datamsglength*size ; j++ ) {
- sendbuf[j] = myrank*1000000 + (j/datamsglength)*1000 + j ;
- recvbuf[j] = -1 ;
- }
-
- sts = MyMPIAccessDEC->allToAllTime( sendbuf, sendcount , MPI_INT ,
- recvbuf, recvcount , MPI_INT ) ;
- chksts( sts , myrank , mpi_access ) ;
-
- // debugStream << "test_AllToAllTimeDEC" << myrank << " recvbuf before CheckSent" ;
- // for ( i = 0 ; i < datamsglength*size ; i++ ) {
- // debugStream << " " << recvbuf[i] ;
- // }
- // debugStream << endl ;
-
- // debugStream << "test_AllToAllTimeDEC" << myrank << " sendbuf " << sendbuf << endl ;
- // MyMPIAccessDEC->CheckSent() ;
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq != 0 ) {
- ostringstream strstream ;
- strstream << "=============================================================" << endl
- << "test_AllToAllTimeDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests # 0 ERROR"
- << endl << "============================================================="
- << endl ;
- int *ArrayOfRecvRequests = new int[nRecvReq] ;
- int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
- delete [] ArrayOfRecvRequests ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- // debugStream << "test_AllToAllTimeDEC" << myrank << " recvbuf" << endl ;
- bool badrecvbuf = false ;
- for ( i = 0 ; i < datamsglength*size ; i++ ) {
- if ( recvbuf[i] != (i/datamsglength)*1000000 + myrank*1000 +
- myrank*datamsglength+(i%datamsglength) ) {
- badrecvbuf = true ;
- debugStream << "test_AllToAllTimeDEC" << myrank << " recvbuf[" << i << "] "
- << recvbuf[i] << " # " << (i/datamsglength)*1000000 + myrank*1000 +
- myrank*datamsglength+(i%datamsglength) << endl ;
- }
- else if ( badrecvbuf ) {
- debugStream << "test_AllToAllTimeDEC" << myrank << " recvbuf[" << i << "] "
- << recvbuf[i] << " == " << (i/datamsglength)*1000000 + myrank*1000 +
- myrank*datamsglength+(i%datamsglength) << endl ;
- }
- }
- if ( badrecvbuf ) {
- ostringstream strstream ;
- strstream << "==============================================================" << endl
- << "test_AllToAllTimeDEC" << myrank << " badrecvbuf"
- << endl << "============================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- delete [] recvbuf ;
- }
-
- debugStream << "test_AllToAllTimeDEC" << myrank << " final CheckSent" << endl ;
- sts = MyMPIAccessDEC->checkSent() ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "================================================================" << endl
- << "test_AllToAllTimeDEC" << myrank << " final CheckSent ERROR"
- << endl << "================================================================"
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- int nSendReq = mpi_access->sendRequestIdsSize() ;
- debugStream << "test_AllToAllTimeDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
- << endl ;
- if ( nSendReq ) {
- int *ArrayOfSendRequests = new int[nSendReq] ;
- int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
- delete [] ArrayOfSendRequests ;
- }
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq ) {
- ostringstream strstream ;
- strstream << "===============================================================" << endl
- << "test_AllToAllTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests # 0 Error"
- << endl << "==============================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "test_AllToAllTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests = 0 OK" << endl ;
- }
-
- debugStream << "test_AllToAllTimeDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
- debugStream << "test_AllToAllTimeDEC" << myrank << " Barrier done" << endl ;
-
- delete sourcegroup ;
- delete targetgroup ;
- // delete aLinearInterpDEC ;
- delete MyMPIAccessDEC ;
-
- // MPI_Finalize();
-
- debugStream << "test_AllToAllTimeDEC" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccessDEC.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessDECTest::test_AllToAllvDECSynchronousPointToPoint() {
- test_AllToAllvDEC( false ) ;
-}
-void MPIAccessDECTest::test_AllToAllvDECAsynchronousPointToPoint() {
- test_AllToAllvDEC( true ) ;
-}
-
-static void chksts( int sts , int myrank , MEDCoupling::MPIAccess mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test_AllToAllvDEC" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test_AllToAllvDEC" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- return ;
-}
-
-void MPIAccessDECTest::test_AllToAllvDEC( bool Asynchronous ) {
-
- debugStream << "test_AllToAllvDEC" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 || size > 11 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_AllToAllvDEC" << endl
- << " (nbprocs >=2)" << endl
- << "test must be run with more than 1 proc and less than 12 procs"
- << endl ;
- cerr << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- // int Asynchronous = atoi(argv[1]);
-
- debugStream << "test_AllToAllvDEC" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
- std::set<int> sourceprocs;
- std::set<int> targetprocs;
- int i ;
- for ( i = 0 ; i < size/2 ; i++ ) {
- sourceprocs.insert(i);
- }
- for ( i = size/2 ; i < size ; i++ ) {
- targetprocs.insert(i);
- }
-
- MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ;
- MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ;
-
- MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
- Asynchronous ) ;
-
- MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
-
-#define maxreq 100
-#define datamsglength 10
-
- // int sts ;
- int *sendcounts = new int[size] ;
- int *sdispls = new int[size] ;
- int *recvcounts = new int[size] ;
- int *rdispls = new int[size] ;
- for ( i = 0 ; i < size ; i++ ) {
- sendcounts[i] = datamsglength-i;
- sdispls[i] = i*datamsglength ;
- recvcounts[i] = datamsglength-myrank;
- rdispls[i] = i*datamsglength ;
- }
- int * recvbuf = new int[datamsglength*size] ;
-
- int ireq ;
- for ( ireq = 0 ; ireq < maxreq ; ireq++ ) {
- int * sendbuf = new int[datamsglength*size] ;
- // int * sendbuf = (int *) malloc( sizeof(int)*datamsglength*size) ;
- int j ;
- for ( j = 0 ; j < datamsglength*size ; j++ ) {
- sendbuf[j] = myrank*1000000 + ireq*1000 + j ;
- recvbuf[j] = -1 ;
- }
-
- MyMPIAccessDEC->allToAllv( sendbuf, sendcounts , sdispls , MPI_INT ,
- recvbuf, recvcounts , rdispls , MPI_INT ) ;
-
- // debugStream << "test_AllToAllvDEC" << myrank << " recvbuf before CheckSent" ;
- // for ( i = 0 ; i < datamsglength*size ; i++ ) {
- // debugStream << " " << recvbuf[i] ;
- // }
- // debugStream << endl ;
-
- // debugStream << "test_AllToAllvDEC" << myrank << " sendbuf " << sendbuf << endl ;
- // MyMPIAccessDEC->CheckSent() ;
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- // debugStream << "test_AllToAllvDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests" << endl ;
- int *ArrayOfRecvRequests = new int[nRecvReq] ;
- int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
- mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ;
- delete [] ArrayOfRecvRequests ;
-
- // debugStream << "test_AllToAllvDEC" << myrank << " recvbuf" ;
- // for ( i = 0 ; i < datamsglength*size ; i++ ) {
- // debugStream << " " << recvbuf[i] ;
- // }
- // debugStream << endl ;
- }
-
- // debugStream << "test_AllToAllvDEC" << myrank << " final CheckSent" << endl ;
- // MyMPIAccessDEC->CheckSent() ;
-
- int nSendReq = mpi_access->sendRequestIdsSize() ;
- debugStream << "test_AllToAllvDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
- << endl ;
- if ( nSendReq ) {
- int *ArrayOfSendRequests = new int[nSendReq] ;
- int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
- delete [] ArrayOfSendRequests ;
- }
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq ) {
- ostringstream strstream ;
- strstream << "test_AllToAllvDEC" << myrank << " final RecvRequestIds " << nRecvReq
- << " RecvRequests # 0 Error" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "test_AllToAllvDEC" << myrank << " final RecvRequestIds " << nRecvReq
- << " RecvRequests = 0 OK" << endl ;
- }
-
- mpi_access->barrier() ;
-
- delete sourcegroup ;
- delete targetgroup ;
- delete MyMPIAccessDEC ;
- delete [] sendcounts ;
- delete [] sdispls ;
- delete [] recvcounts ;
- delete [] rdispls ;
- delete [] recvbuf ;
-
- // MPI_Finalize();
-
- debugStream << "test_AllToAllvDEC" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-#include <ctime>
-
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccessDEC.hxx"
-#include "LinearTimeInterpolator.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessDECTest::test_AllToAllvTimeDECSynchronousNative() {
- test_AllToAllvTimeDEC( false , true ) ;
-}
-void MPIAccessDECTest::test_AllToAllvTimeDECSynchronousPointToPoint() {
- test_AllToAllvTimeDEC( false , false ) ;
-}
-void MPIAccessDECTest::test_AllToAllvTimeDECAsynchronousPointToPoint() {
- test_AllToAllvTimeDEC( true , false ) ;
-}
-
-static void chksts( int sts , int myrank , MEDCoupling::MPIAccess * mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access->errorString(sts, msgerr, &lenerr) ;
- debugStream << "test_AllToAllvTimeDEC" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test_AllToAllvTimeDEC" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- return ;
-}
-
-void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINative ) {
-
- debugStream << "test_AllToAllvTimeDEC" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 || size > 11 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
- << " (nbprocs >=2)" << endl
- << "test must be run with more than 1 proc and less than 12 procs"
- << endl ;
- cerr << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- // int Asynchronous = atoi(argv[1]) ;
- int UseMPI_Alltoallv = UseMPINative ;
- // if ( argc == 3 ) {
- // UseMPI_Alltoallv = atoi(argv[2]) ;
- // }
-
- debugStream << "test_AllToAllvTimeDEC" << myrank << " Asynchronous " << Asynchronous
- << " UseMPI_Alltoallv " << UseMPI_Alltoallv << endl ;
-
- MEDCoupling::CommInterface interface ;
- std::set<int> sourceprocs;
- std::set<int> targetprocs;
- int i ;
- for ( i = 0 ; i < size/2 ; i++ ) {
- sourceprocs.insert(i);
- }
- for ( i = size/2 ; i < size ; i++ ) {
- targetprocs.insert(i);
- }
-
- MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ;
- MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ;
-
- // TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ;
- MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
- Asynchronous ) ;
- // Asynchronous , LinearInterp , 0.5 ) ;
- MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp , 0.5 ) ;
- MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
-
- debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
- debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
-
-#define maxproc 11
-#define maxreq 10000
-#define datamsglength 10
-
- int sts ;
- int *sendcounts = new int[size] ;
- int *sdispls = new int[size] ;
- int *recvcounts = new int[size] ;
- int *rdispls = new int[size] ;
- int *sendtimecounts = new int[size] ;
- int *stimedispls = new int[size] ;
- int *recvtimecounts = new int[size] ;
- int *rtimedispls = new int[size] ;
- for ( i = 0 ; i < size ; i++ ) {
- sendcounts[i] = datamsglength-i ;
- sdispls[i] = i*datamsglength ;
- recvcounts[i] = datamsglength-myrank ;
- rdispls[i] = i*datamsglength ;
- sendtimecounts[i] = 1 ;
- stimedispls[i] = 0 ;
- recvtimecounts[i] = 1 ;
- rtimedispls[i] = i ;
- //rtimedispls[i] = i*mpi_access->TimeExtent() ;
- }
-
- double timeLoc = 0 ;
- double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
- double maxtime ;
- double nextdeltatime = deltatime[myrank] ;
- if ( UseMPI_Alltoallv ) {
- maxtime = maxreq*nextdeltatime - 0.1 ;
- }
- else {
- maxtime = maxreq ;
- // MyMPIAccessDEC->InitTime( time , nextdeltatime , maxtime ) ;
- }
- time_t begintime = time(NULL) ;
- // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) {
- for ( timeLoc = 0 ; timeLoc <= maxtime && nextdeltatime != 0 ; timeLoc+=nextdeltatime ) {
- nextdeltatime = deltatime[myrank] ;
- if ( timeLoc != 0 ) {
- nextdeltatime = deltatime[myrank] ;
- if ( timeLoc+nextdeltatime > maxtime ) {
- nextdeltatime = 0 ;
- }
- // MyMPIAccessDEC->NextTime( nextdeltatime ) ;
- }
- MyMPIAccessDEC->setTime( timeLoc , nextdeltatime ) ;
- debugStream << "test_AllToAllvTimeDEC" << myrank << "=====TIME " << timeLoc << "=====DELTATIME "
- << nextdeltatime << "=====MAXTIME " << maxtime << " ======" << endl ;
- int * sendbuf = new int[datamsglength*size] ;
- // int * sendbuf = (int *) malloc(sizeof(int)*datamsglength*size) ;
- int * recvbuf = new int[datamsglength*size] ;
- int j ;
- for ( j = 0 ; j < datamsglength*size ; j++ ) {
- sendbuf[j] = myrank*1000000 + (j/datamsglength)*1000 + j ;
- recvbuf[j] = -1 ;
- }
-
- if ( UseMPI_Alltoallv ) {
- const MPI_Comm* comm = MyMPIAccessDEC->getComm();
- TimeMessage * aSendTimeMessage = new TimeMessage ;
- aSendTimeMessage->time = timeLoc ;
- // aSendTimeMessage->deltatime = deltatime[myrank] ;
- aSendTimeMessage->deltatime = nextdeltatime ;
- // aSendTimeMessage->maxtime = maxtime ;
- aSendTimeMessage->tag = (int ) (timeLoc/deltatime[myrank]) ;
- TimeMessage * aRecvTimeMessage = new TimeMessage[size] ;
- interface.allToAllV(aSendTimeMessage, sendtimecounts , stimedispls ,
- mpi_access->timeType() ,
- aRecvTimeMessage, recvtimecounts , rtimedispls ,
- mpi_access->timeType() , *comm ) ;
- // for ( j = 0 ; j < size ; j++ ) {
- // debugStream << "test_AllToAllvTimeDEC" << myrank << " TimeMessage received " << j << " "
- // << aRecvTimeMessage[j] << endl ;
- // }
- delete aSendTimeMessage ;
- delete [] aRecvTimeMessage ;
- interface.allToAllV(sendbuf, sendcounts , sdispls , MPI_INT ,
- recvbuf, recvcounts , rdispls , MPI_INT , *comm ) ;
- // free(sendbuf) ;
- delete [] sendbuf ;
- }
- else {
- int sts2 = MyMPIAccessDEC->allToAllvTime( sendbuf, sendcounts , sdispls , MPI_INT ,
- recvbuf, recvcounts , rdispls , MPI_INT ) ;
- chksts( sts2 , myrank , mpi_access ) ;
- }
-
- // debugStream << "test_AllToAllvTimeDEC" << myrank << " recvbuf before CheckSent" ;
- // for ( i = 0 ; i < datamsglength*size ; i++ ) {
- // debugStream << " " << recvbuf[i] ;
- // }
- // debugStream << endl ;
-
- // debugStream << "test_AllToAllvTimeDEC" << myrank << " sendbuf " << sendbuf << endl ;
- // MyMPIAccessDEC->CheckSent() ;
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq != 0 ) {
- ostringstream strstream ;
- strstream << "=============================================================" << endl
- << "test_AllToAllvTimeDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests # 0 ERROR"
- << endl << "============================================================="
- << endl ;
- int *ArrayOfRecvRequests = new int[nRecvReq] ;
- int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
- delete [] ArrayOfRecvRequests ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- // debugStream << "test_AllToAllvTimeDEC" << myrank << " check of recvbuf" << endl ;
- bool badrecvbuf = false ;
- for ( i = 0 ; i < size ; i++ ) {
- for ( int jj = 0 ; jj < datamsglength ; jj++ ) {
- int index = i*datamsglength+jj ;
- if ( jj < recvcounts[i] ) {
- if ( recvbuf[index] != (index/datamsglength)*1000000 + myrank*1000 +
- myrank*datamsglength+(index%datamsglength) ) {
- badrecvbuf = true ;
- debugStream << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
- << recvbuf[index] << " # " << (index/datamsglength)*1000000 +
- myrank*1000 +
- myrank*datamsglength+(index%datamsglength) << endl ;
- }
- else if ( badrecvbuf ) {
- debugStream << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
- << recvbuf[index] << " == " << (index/datamsglength)*1000000 +
- myrank*1000 +
- myrank*datamsglength+(index%datamsglength) << endl ;
- }
- }
- else if ( recvbuf[index] != -1 ) {
- badrecvbuf = true ;
- debugStream << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
- << recvbuf[index] << " # -1" << endl ;
- }
- }
- }
- if ( badrecvbuf ) {
- ostringstream strstream ;
- strstream << "==============================================================" << endl
- << "test_AllToAllvTimeDEC" << myrank << " badrecvbuf"
- << endl << "============================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- delete [] recvbuf ;
- }
-
- debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
- debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
-
- debugStream << "test_AllToAllvTimeDEC" << myrank << " CheckFinalSent" << endl ;
- sts = MyMPIAccessDEC->checkFinalSent() ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "================================================================" << endl
- << "test_AllToAllvTimeDEC" << myrank << " final CheckSent ERROR"
- << endl << "================================================================"
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- debugStream << "test_AllToAllvTimeDEC" << myrank << " CheckFinalRecv" << endl ;
- sts = MyMPIAccessDEC->checkFinalRecv() ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "================================================================" << endl
- << "test_AllToAllvTimeDEC" << myrank << " CheckFinalRecv ERROR"
- << endl << "================================================================"
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq ) {
- ostringstream strstream ;
- strstream << "===============================================================" << endl
- << "test_AllToAllvTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests # 0 Error"
- << endl << "==============================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "test_AllToAllvTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests = 0 OK" << endl ;
- }
-
- time_t endtime = time(NULL) ;
- debugStream << "test_AllToAllvTimeDEC" << myrank << " begintime " << begintime << " endtime " << endtime
- << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank]
- << " calls to AllToAll" << endl ;
-
- debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
- debugStream << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
-
- delete sourcegroup ;
- delete targetgroup ;
- delete MyMPIAccessDEC ;
- // delete aLinearInterpDEC ;
-
- delete [] sendcounts ;
- delete [] sdispls ;
- delete [] recvcounts ;
- delete [] rdispls ;
- delete [] sendtimecounts ;
- delete [] stimedispls ;
- delete [] recvtimecounts ;
- delete [] rtimedispls ;
-
- // MPI_Finalize();
-
- endtime = time(NULL) ;
-
- debugStream << "test_AllToAllvTimeDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime
- << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank]
- << " calls to AllToAll" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <math.h>
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-#include <ctime>
-
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccessDEC.hxx"
-#include "LinearTimeInterpolator.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessDECTest::test_AllToAllvTimeDoubleDECSynchronousPointToPoint() {
- test_AllToAllvTimeDoubleDEC( false ) ;
-}
-void MPIAccessDECTest::test_AllToAllvTimeDoubleDECAsynchronousPointToPoint() {
- test_AllToAllvTimeDoubleDEC( true ) ;
-}
-
-static void chksts( int sts , int myrank , MEDCoupling::MPIAccess * mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access->errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- return ;
-}
-
-void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) {
-
- debugStream << "test_AllToAllvTimeDoubleDEC" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 || size > 11 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
- << " (nbprocs >=2)" << endl
- << "test must be run with more than 1 proc and less than 12 procs"
- << endl ;
- cerr << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
-// int Asynchronous = atoi(argv[1]) ;
-
- debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " Asynchronous " << Asynchronous << endl ;
-
- MEDCoupling::CommInterface interface ;
- std::set<int> sourceprocs;
- std::set<int> targetprocs;
- int i ;
- for ( i = 0 ; i < size/2 ; i++ ) {
- sourceprocs.insert(i);
- }
- for ( i = size/2 ; i < size ; i++ ) {
- targetprocs.insert(i);
- }
-
- MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ;
- MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ;
-
-// TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0 ) ;
- MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
- Asynchronous ) ;
-// Asynchronous , LinearInterp , 0.5 ) ;
- MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ;
- MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
-
- debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
-
-#define maxproc 11
-#define maxreq 100
-#define datamsglength 10
-
- int sts ;
- int *sendcounts = new int[size] ;
- int *sdispls = new int[size] ;
- int *recvcounts = new int[size] ;
- int *rdispls = new int[size] ;
- int *sendtimecounts = new int[size] ;
- int *stimedispls = new int[size] ;
- int *recvtimecounts = new int[size] ;
- int *rtimedispls = new int[size] ;
- for ( i = 0 ; i < size ; i++ ) {
- sendcounts[i] = datamsglength-i ;
- sdispls[i] = i*datamsglength ;
- recvcounts[i] = datamsglength-myrank ;
- rdispls[i] = i*datamsglength ;
- sendtimecounts[i] = 1 ;
- stimedispls[i] = 0 ;
- recvtimecounts[i] = 1 ;
- rtimedispls[i] = i ;
- }
-
- double timeLoc[maxproc] ;
- double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
- double maxtime[maxproc] ;
- double nextdeltatime[maxproc] ;
- for ( i = 0 ; i < size ; i++ ) {
- timeLoc[i] = 0 ;
- maxtime[i] = maxreq ;
- nextdeltatime[i] = deltatime[i] ;
- }
- time_t begintime = time(NULL) ;
- for ( timeLoc[myrank] = 0 ; timeLoc[myrank] <= maxtime[myrank] && nextdeltatime[myrank] != 0 ;
- timeLoc[myrank]+=nextdeltatime[myrank] ) {
-//local and target times
- int target ;
- for ( target = 0 ; target < size ; target++ ) {
- nextdeltatime[target] = deltatime[target] ;
- if ( timeLoc[target] != 0 ) {
- if ( timeLoc[target]+nextdeltatime[target] > maxtime[target] ) {
- nextdeltatime[target] = 0 ;
- }
- }
- if ( target != myrank ) {
- while ( timeLoc[myrank] >= timeLoc[target] ) {
- timeLoc[target] += deltatime[target] ;
- }
- }
- }
- MyMPIAccessDEC->setTime( timeLoc[myrank] , nextdeltatime[myrank] ) ;
- debugStream << "test" << myrank << "=====TIME " << timeLoc[myrank] << "=====DELTATIME "
- << nextdeltatime[myrank] << "=====MAXTIME " << maxtime[myrank] << " ======"
- << endl ;
- double * sendbuf = new double[datamsglength*size] ;
-// double * sendbuf = (double *) malloc(sizeof(double)*datamsglength*size) ;
- double * recvbuf = new double[datamsglength*size] ;
- int j ;
- //debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " sendbuf" ;
- for ( target = 0 ; target < size ; target++ ) {
- for ( j = 0 ; j < datamsglength ; j++ ) {
- //sendbuf[j] = myrank*10000 + (j/datamsglength)*100 + j ;
- sendbuf[target*datamsglength+j] = myrank*1000000 + target*10000 +
- (timeLoc[myrank]/deltatime[myrank])*100 + j ;
- //debugStream << " " << (int ) sendbuf[target*datamsglength+j] ;
- recvbuf[target*datamsglength+j] = -1 ;
- }
- //debugStream << endl ;
- }
-
- sts = MyMPIAccessDEC->allToAllvTime( sendbuf, sendcounts , sdispls , MPI_DOUBLE ,
- recvbuf, recvcounts , rdispls , MPI_DOUBLE ) ;
- chksts( sts , myrank , mpi_access ) ;
-
-// debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf before CheckSent" ;
-// for ( i = 0 ; i < datamsglength*size ; i++ ) {
-// debugStream << " " << recvbuf[i] ;
-// }
-// debugStream << endl ;
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq != 0 ) {
- ostringstream strstream ;
- strstream << "=============================================================" << endl
- << "test_AllToAllvTimeDoubleDEC" << myrank << " WaitAllRecv "
- << nRecvReq << " Requests # 0 ERROR"
- << endl << "============================================================"
- << endl ;
- int *ArrayOfRecvRequests = new int[nRecvReq] ;
- int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
- delete [] ArrayOfRecvRequests ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
-// debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " check of recvbuf" << endl ;
- bool badrecvbuf = false ;
- for ( target = 0 ; target < size ; target++ ) {
- for ( int jj = 0 ; jj < datamsglength ; jj++ ) {
- int index = target*datamsglength+jj ;
- if ( jj < recvcounts[target] ) {
- if ( fabs(recvbuf[index] - (target*1000000 + myrank*10000 +
- (timeLoc[target]/deltatime[target])*100 + jj)) > 101) {
- badrecvbuf = true ;
- debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " target " << target << " timeLoc[target] "
- << timeLoc[target] << " recvbuf[" << index << "] " << (int ) recvbuf[index]
- << " # " << (int ) (target*1000000 +
- myrank*10000 + (timeLoc[target]/deltatime[target])*100 + jj)
- << endl ;
- }
- else if ( badrecvbuf ) {
- debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf[" << index << "] "
- << recvbuf[index] << " ~= " << (int ) (target*1000000 +
- myrank*10000 + (timeLoc[target]/deltatime[target])*100 + jj) << endl ;
- }
- }
- else if ( recvbuf[index] != -1 ) {
- badrecvbuf = true ;
- debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf[" << index << "] "
- << recvbuf[index] << " # -1" << endl ;
- }
- }
- }
- if ( badrecvbuf ) {
- ostringstream strstream ;
- strstream << "==================================================================" << endl
- << "test_AllToAllvTimeDoubleDEC" << myrank << " badrecvbuf"
- << endl << "=================================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- delete [] recvbuf ;
- }
-
- debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
-
- debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalSent" << endl ;
- sts = MyMPIAccessDEC->checkFinalSent() ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "=================================================================" << endl
- << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalSent ERROR"
- << endl << "================================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalRecv" << endl ;
- sts = MyMPIAccessDEC->checkFinalRecv() ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "=================================================================" << endl
- << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalRecv ERROR"
- << endl << "================================================================"
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq ) {
- ostringstream strstream ;
- strstream << "===============================================================" << endl
- << "test_AllToAllvTimeDoubleDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests # 0 Error"
- << endl << "==============================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests = 0 OK" << endl ;
- }
-
- time_t endtime = time(NULL) ;
- debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " begintime " << begintime << " endtime " << endtime
- << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank]
- << " calls to AllToAll" << endl ;
-
- debugStream << "test" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
-
- delete sourcegroup ;
- delete targetgroup ;
- delete MyMPIAccessDEC ;
-// delete aLinearInterpDEC ;
-
- delete [] sendcounts ;
- delete [] sdispls ;
- delete [] recvcounts ;
- delete [] rdispls ;
- delete [] sendtimecounts ;
- delete [] stimedispls ;
- delete [] recvtimecounts ;
- delete [] rtimedispls ;
-
-// MPI_Finalize();
-
- endtime = time(NULL) ;
-
- debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime
- << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank]
- << " calls to AllToAll" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <time.h>
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#ifndef WIN32
-#include <unistd.h>
-#endif
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_Cancel() {
-
- debugStream << "test_MPI_Access_Cancel" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_Cancel must be run with 2 procs" << endl ;
- cerr << strstream.str() << endl ;
- //CPPUNIT_FAIL( strstream.str() ) ;
- return;
- }
-
- debugStream << "test_MPI_Access_Cancel" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int intsendbuf[5] ;
- double doublesendbuf[10] ;
- int RequestId[10] ;
- int sts = 0;
- int i , j ;
- for ( j = 0 ; j < 3 ; j++ ) {
- for ( i = 0 ; i < 10 ; i++ ) {
- debugStream << "test" << myrank << " ============================ i " << i
- << "============================" << endl ;
- if ( myrank == 0 ) {
- if ( i < 5 ) {
- intsendbuf[i] = i ;
- sts = mpi_access.ISend(&intsendbuf[i],1,MPI_INT,target, RequestId[i]) ;
- debugStream << "test" << myrank << " Send MPI_INT RequestId " << RequestId[i]
- << endl ;
- }
- else {
- doublesendbuf[i] = i ;
- sts = mpi_access.ISend(&doublesendbuf[i],1,MPI_DOUBLE,target,
- RequestId[i]) ;
- debugStream << "test" << myrank << " Send MPI_DOUBLE RequestId " << RequestId[i]
- << endl ;
- }
- }
- else {
- int flag = false ;
- while ( !flag ) {
- int source, tag, outcount ;
- MPI_Datatype datatype ;
- sts = mpi_access.IProbe(target, source, tag, datatype, outcount,
- flag ) ;
- if ( flag ) {
- debugStream << "test" << myrank << " " << i << " IProbe target " << target
- << " source " << source << " tag " << tag
- << " outcount " << outcount << " flag " << flag << endl ;
- }
- else {
- debugStream << "test" << myrank << " flag " << flag << endl ;
- sleep( 1 ) ;
- }
- if ( flag ) {
- int recvbuf ;
- sts = mpi_access.IRecv(&recvbuf,outcount,MPI_INT,source,
- RequestId[i] ) ;
- if ( datatype == MPI_INT ) {
- int error;
- mpi_access.wait( RequestId[i] ) ;
- mpi_access.status( RequestId[i], source, tag, error, outcount,
- true ) ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "======================================================"
- << endl << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO" << endl
- << "======================================================"
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- debugStream << "========================================================"
- << endl << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " OK" << endl
- << "========================================================"
- << endl ;
- }
- }
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- }
-
- if ( myrank != 0 ) {
- int iprobe ;
- for ( iprobe = 5 ; iprobe < 10 ; iprobe++ ) {
- debugStream << "test" << myrank << " ============================ iprobe "
- << iprobe << "============================" << endl ;
- int source, tag, outcount ;
- MPI_Datatype datatype ;
- int probeflag = false ;
- while ( !probeflag ) {
- sts = mpi_access.IProbe( target, source, tag, datatype, outcount,
- probeflag ) ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " IProbe iprobe " << iprobe
- << " target " << target << " probeflag " << probeflag
- << " tag " << tag << " outcount " << outcount << " datatype "
- << datatype << " lenerr " << lenerr << " " << msgerr << endl ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "=========================================================="
- << endl << "test" << myrank << " IProbe KO iprobe " << iprobe
- << endl
- << "=========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if ( !probeflag ) {
- //debugStream << "========================================================"
- // << endl << "test" << myrank << " IProbe KO(OK) iprobe " << iprobe
- // << " probeflag " << probeflag << endl
- // << "========================================================"
- // << endl ;
- }
- else {
- debugStream << "test" << myrank << " " << iprobe << " IProbe target "
- << target << " source " << source << " tag " << tag
- << " outcount " << outcount << " probeflag " << probeflag
- << endl ;
- if ( datatype != MPI_DOUBLE ) {
- ostringstream strstream ;
- strstream << "========================================================"
- << endl << "test" << myrank << " MPI_DOUBLE KO" << endl
- << "========================================================"
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- int flag ;
- sts = mpi_access.cancel( source, tag, datatype, outcount, flag ) ;
- if ( sts != MPI_SUCCESS || !flag ) {
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "======================================================"
- << endl << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl << "test" << myrank
- << " Cancel PendingIrecv KO flag " << flag << " iprobe "
- << iprobe << " Irecv completed" << endl
- << "======================================================"
- << endl ;
- //return 1 ;
- }
- else {
- debugStream << "======================================================"
- << endl << "test" << myrank
- << " Cancel PendingIrecv OK RequestId " << " flag "
- << flag << " iprobe " << iprobe << endl
- << "======================================================"
- << endl ;
- }
- }
- int Reqtarget, Reqtag, Reqerror, Reqoutcount ;
- mpi_access.status( RequestId[iprobe], Reqtarget, Reqtag, Reqerror,
- Reqoutcount, true ) ;
- debugStream << "test" << myrank << " Status Reqtarget "<< Reqtarget
- << " Reqtag " << Reqtag << " Reqoutcount " << Reqoutcount
- << endl ;
- int Reqflag ;
- sts = mpi_access.cancel( RequestId[iprobe] , Reqflag ) ;
- debugStream << "test" << myrank << " " << iprobe
- << " Cancel Irecv done Reqtarget " << Reqtarget
- << " Reqtag " << Reqtag << " Reqoutcount " << Reqoutcount
- << " Reqflag " << Reqflag << endl ;
- if ( sts != MPI_SUCCESS || !Reqflag ) {
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- ostringstream strstream ;
- strstream << "========================================================"
- << endl << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl << "test" << myrank
- << " Cancel Irecv KO Reqflag " << Reqflag << " iprobe "
- << iprobe << endl
- << "========================================================"
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "========================================================"
- << endl << "test" << myrank
- << " Cancel Irecv OK RequestId " << RequestId[iprobe]
- << " Reqflag " << Reqflag << " iprobe " << iprobe << endl
- << "========================================================"
- << endl ;
- probeflag = Reqflag ;
- }
- }
- }
- }
- }
- mpi_access.waitAll(10,RequestId) ;
- mpi_access.deleteRequests(10,RequestId) ;
- }
-
- int source, tag, outcount, flag ;
- MPI_Datatype datatype ;
- sts = mpi_access.IProbe(target, source, tag, datatype, outcount, flag ) ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- if ( sts != MPI_SUCCESS || flag ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " IProbe KO flag " << flag
- << " remaining unread/cancelled message :" << endl
- << " source " << source << " tag " << tag << endl
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- mpi_access.testAll(10,RequestId,flag) ;
- mpi_access.waitAll(10,RequestId) ;
- mpi_access.deleteRequests(10,RequestId) ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_Cyclic_ISend_IRecv() {
-
- debugStream << "test_MPI_Access_Cyclic_ISend_IRecv" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 3 ) {
- cerr << "test_MPI_Access_Cyclic_ISend_IRecv must be run with 3 procs" << endl ;
- //CPPUNIT_FAIL("test_MPI_Access_Cyclic_ISend_IRecv must be run with 3 procs") ;
- return;
- }
-
- debugStream << "test_MPI_Access_Cyclic_ISend_IRecv" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
-#define maxsend 100
-
- if ( myrank >= 3 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int alltarget[3] = {1 , 2 , 0 } ;
- int allsource[3] = {2 , 0 , 1 } ;
- int SendRequestId[maxsend] ;
- int RecvRequestId[maxsend] ;
- int sendbuf[maxsend] ;
- int recvbuf[maxsend] ;
- int sts ;
- int i = 0 ;
- if ( myrank == 0 ) {
- sendbuf[i] = i ;
- sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,alltarget[myrank],
- SendRequestId[i]) ;
- debugStream << "test" << myrank << " Send RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- for ( i = 0 ; i < maxsend ; i++ ) {
- recvbuf[i] = -1 ;
- sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,allsource[myrank],
- RecvRequestId[i]) ;
- debugStream << "test" << myrank << " Recv RequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(allsource[myrank]) << endl ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr
- << " " << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- int j ;
- for (j = 0 ; j <= i ; j++) {
- int flag ;
- if ( j < i ) {
- debugStream << "test" << myrank << " " << j << " -> Test-Send("<< SendRequestId[j]
- << ")" << endl ;
- mpi_access.test( SendRequestId[j], flag ) ;
- if ( flag ) {
- int target, tag, error, outcount ;
- mpi_access.status( SendRequestId[j], target, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Send RequestId " << SendRequestId[j]
- << " target " << target << " tag " << tag << " error " << error
- << endl ;
- mpi_access.deleteRequest( SendRequestId[j] ) ;
- }
- }
- debugStream << "test" << myrank << " " << j << " -> Test-Recv("<< SendRequestId[j]
- << ")" << endl ;
- mpi_access.test( RecvRequestId[j], flag ) ;
- if ( flag ) {
- int source, tag, error, outcount ;
- mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Recv RequestId" << j << " "
- << RecvRequestId[j] << " source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount << endl ;
- if ( (outcount != 1) | (recvbuf[j] != j) ) {
- ostringstream strstream ;
- strstream << "====================================================="
- << endl << "test" << myrank << " outcount "
- << outcount << " recvbuf[ " << j << " ] " << recvbuf[j] << " KO"
- << endl << "====================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- }
- if ( myrank == 0 ) {
- if ( i != maxsend-1 ) {
- sendbuf[i+1] = i + 1 ;
- sts = mpi_access.ISend(&sendbuf[i+1],1,MPI_INT,alltarget[myrank],
- SendRequestId[i+1]) ;
- debugStream << "test" << myrank << " Send RequestId " << SendRequestId[i+1]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- }
- else {
- sendbuf[i] = i ;
- sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,alltarget[myrank],
- SendRequestId[i]) ;
- debugStream << "test" << myrank << " Send RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr
- << " " << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- }
-
- int flag ;
- mpi_access.testAll(maxsend,SendRequestId,flag) ;
- mpi_access.testAll(maxsend,RecvRequestId,flag) ;
- mpi_access.waitAll(maxsend,SendRequestId) ;
- mpi_access.deleteRequests(maxsend,SendRequestId) ;
- mpi_access.waitAll(maxsend,RecvRequestId) ;
- mpi_access.deleteRequests(maxsend,RecvRequestId) ;
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- mpi_access.testAll(maxsend,SendRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " TestAllSendflag " << flag << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " TestAllSendflag " << flag << " OK" << endl
- << "=========================================================" << endl ;
- }
- mpi_access.testAll(maxsend,RecvRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " TestAllRecvflag " << flag << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " TestAllRecvflag " << flag << " OK" << endl
- << "=========================================================" << endl ;
- }
-
- int sendrequests[maxsend] ;
- int sendreqsize = mpi_access.sendRequestIds( alltarget[myrank] , maxsend ,
- sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- int source, tag, error, outcount ;
- mpi_access.status(sendrequests[0], source, tag, error, outcount, true) ;
- debugStream << "test" << myrank << " RequestId " << sendrequests[0]
- << " source " << source << " tag " << tag << " error " << error
- << " outcount " << outcount << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- int recvrequests[maxsend] ;
- int recvreqsize = mpi_access.sendRequestIds( allsource[myrank] , maxsend ,
- recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_Cyclic_Send_Recv() {
-
- debugStream << "test_MPI_Access_Cyclic_Send_Recv" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 3 ) {
- cerr << "test_MPI_Access_Send_Recv must be run with 3 procs" << endl ;
- //CPPUNIT_FAIL("test_MPI_Access_Send_Recv must be run with 3 procs") ;
- return;
- }
-
- debugStream << "test_MPI_Access_Cyclic_Send_Recv" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 3 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int alltarget[3] = {1 , 2 , 0 } ;
- int allsource[3] = {2 , 0 , 1 } ;
- int RequestId[10] ;
- int sts ;
- int i = 0 ;
- if ( myrank == 0 ) {
- sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
- debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- for ( i = 0 ; i < 10 ; i++ ) {
- int recvbuf ;
- int outcount ;
- if ( i & 1 ) {
- outcount = 0 ;
- sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i],
- &outcount) ;
- }
- else {
- sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i]) ;
- outcount = 1 ;
- }
- //int source, tag, error, outcount ;
- //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
- debugStream << "test" << myrank << " Recv RequestId " << RequestId[i]
- << " tag " << mpi_access.recvMPITag(allsource[myrank])
- << " outcount " << outcount << endl ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " outcount "
- << outcount << " recvbuf " << recvbuf << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if ( myrank == 0 ) {
- if ( i != 9 ) {
- int ii = i + 1 ;
- sts = mpi_access.send(&ii,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
- debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- }
- else {
- sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
- debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr
- << " " << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- }
-
- int flag ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,RequestId) ;
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
-
- int sendrequests[10] ;
- int sendreqsize = mpi_access.sendRequestIds( alltarget[myrank] , 10 ,
- sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- int recvrequests[10] ;
- int recvreqsize = mpi_access.sendRequestIds( allsource[myrank] , 10 ,
- recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <time.h>
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#ifndef WIN32
-#include <unistd.h>
-#endif
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_IProbe() {
-
- debugStream << "test_MPI_Access_IProbe" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_IProbe must be run with 2 procs" << endl ;
- cerr << strstream.str() << endl ;
- //CPPUNIT_FAIL( strstream.str() ) ;
- return;
- }
-
- debugStream << "test_MPI_Access_IProbe" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int sendbuf[10] ;
- int RequestId[10] ;
- int sts = 0;
- int i ;
- for ( i = 0 ; i < 10 ; i++ ) {
- if ( myrank == 0 ) {
- sendbuf[i] = i ;
- sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, RequestId[i]) ;
- debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
- << endl ;
- }
- else {
- int flag = false ;
- while ( !flag ) {
- int source, tag, outcount ;
- MPI_Datatype datatype ;
- sts = mpi_access.IProbe(target, source, tag, datatype, outcount, flag ) ;
- if ( flag ) {
- debugStream << "test" << myrank << " " << i << " IProbe target " << target
- << " source " << source << " tag " << tag
- << " outcount " << outcount << " flag " << flag << endl ;
- }
- else {
- debugStream << "test" << myrank << " IProbe flag " << flag << endl ;
- sleep( 1 ) ;
- }
- if ( flag ) {
- int recvbuf ;
- sts = mpi_access.recv(&recvbuf,outcount,datatype,source, RequestId[i],
- &outcount) ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO" << endl
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- debugStream << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- }
- int flag ;
- mpi_access.testAll(10,RequestId,flag) ;
- mpi_access.waitAll(10,RequestId) ;
- mpi_access.deleteRequests(10,RequestId) ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_ISendRecv() {
-
- debugStream << "test_MPI_Access_ISendRecv" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- cerr << "test_MPI_Access_ISendRecv must be run with 2 procs" << endl ;
- //CPPUNIT_FAIL("test_MPI_Access_ISendRecv must be run with 2 procs") ;
- return;
- }
-
- debugStream << "test_MPI_Access_ISendRecv" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendRequestId[10] ;
- int RecvRequestId[10] ;
- int sendbuf[10] ;
- int recvbuf[10] ;
- int sts ;
- int i ;
- for ( i = 0 ; i < 10 ; i++ ) {
- sendbuf[i] = i ;
- sts = mpi_access.ISendRecv(&sendbuf[i],1,MPI_INT,target, SendRequestId[i],
- &recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
- debugStream << "test" << myrank << " Send sendRequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target)
- << " recvRequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr
- << " " << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- int j ;
- for (j = 0 ; j <= i ; j++) {
- int flag ;
- if ( j < i ) {
- debugStream << "test" << myrank << " " << j << " -> Test-Send("<< SendRequestId[j]
- << ")" << endl ;
- mpi_access.test( SendRequestId[j], flag ) ;
- if ( flag ) {
- int tag, error, outcount ;
- mpi_access.status( SendRequestId[j], target, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Send RequestId " << SendRequestId[j]
- << " target " << target << " tag " << tag << " error " << error
- << endl ;
- mpi_access.deleteRequest( SendRequestId[j] ) ;
- }
- }
- debugStream << "test" << myrank << " " << j << " -> Test-Recv("<< SendRequestId[j]
- << ")" << endl ;
- mpi_access.test( RecvRequestId[j], flag ) ;
- if ( flag ) {
- int source, tag, error, outcount ;
- mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Recv RequestId" << j << " "
- << RecvRequestId[j] << " source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount << endl ;
- if ( (outcount != 1) | (recvbuf[j] != j) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " outcount "
- << outcount << " recvbuf[ " << j << " ] " << recvbuf[j] << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- }
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- }
-
- int flag ;
- mpi_access.testAll(10,SendRequestId,flag) ;
- mpi_access.waitAll(10,SendRequestId) ;
- mpi_access.deleteRequests(10,SendRequestId) ;
- mpi_access.testAll(10,SendRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- mpi_access.testAll(10,RecvRequestId,flag) ;
- mpi_access.waitAll(10,RecvRequestId) ;
- mpi_access.deleteRequests(10,RecvRequestId) ;
- mpi_access.testAll(10,RecvRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
-
- int sendrequests[10] ;
- int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- int recvrequests[10] ;
- int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_ISend_IRecv() {
-
- debugStream << "test_MPI_Access_ISend_IRecv" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- cerr << "test_MPI_Access_ISend_IRecv must be run with 2 procs" << endl ;
- //CPPUNIT_FAIL("test_MPI_Access_ISend_IRecv must be run with 2 procs") ;
- return;
- }
-
- debugStream << "test_MPI_Access_ISend_IRecv" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
-#define maxreq 100
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts ;
- int sendbuf[maxreq] ;
- int recvbuf[maxreq] ;
- int i ;
- for ( i = 0 ; i < maxreq ; i++ ) {
- if ( myrank == 0 ) {
- sendbuf[i] = i ;
- sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, SendRequestId[i]) ;
- debugStream << "test" << myrank << " ISend RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
- debugStream << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- }
- int j ;
- for (j = 0 ; j <= i ; j++) {
- int flag ;
- if ( myrank == 0 ) {
- mpi_access.test( SendRequestId[j], flag ) ;
- }
- else {
- mpi_access.test( RecvRequestId[j], flag ) ;
- }
- if ( flag ) {
- int source, tag, error, outcount ;
- if ( myrank == 0 ) {
- mpi_access.status( SendRequestId[j], target, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
- << ") : target " << target << " tag " << tag << " error " << error
- << " flag " << flag << endl ;
- }
- else {
- mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Test(Recv RequestId "
- << RecvRequestId[j] << ") : source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount
- << " flag " << flag << endl ;
- if ( (outcount != 1) | (recvbuf[j] != j) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount "
- << outcount << " recvbuf " << recvbuf[j] << " KO" << endl
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- //else {
- // debugStream << "==========================================================="
- // << endl << "test" << myrank << " outcount " << outcount
- // << " RequestId " << RecvRequestId[j] << " recvbuf "
- // << recvbuf[j] << " OK" << endl
- // << "==========================================================="
- // << endl ;
- //}
- }
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- if ( myrank == 0 ) {
- mpi_access.waitAll(maxreq, SendRequestId) ;
- mpi_access.deleteRequests(maxreq, SendRequestId) ;
- }
- else {
- mpi_access.waitAll(maxreq, RecvRequestId) ;
- mpi_access.deleteRequests(maxreq, RecvRequestId) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[maxreq] ;
- int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- for ( i = 0 ; i < sendreqsize ; i++ ) {
- debugStream << "test" << myrank << " sendrequests[ " << i << " ] = "
- << sendrequests[i] << endl ;
- }
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[maxreq] ;
- int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
- // MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <time.h>
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_ISend_IRecv_BottleNeck() {
-
- debugStream << "test_MPI_Access_ISend_IRecv_BottleNeck" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_ISend_IRecv_BottleNeck must be run with 2 procs"
- << endl ;
- cerr << strstream.str() << endl ;
- //CPPUNIT_FAIL( strstream.str() ) ;
- return;
- }
-
- debugStream << "test_MPI_Access_ISend_IRecv_BottleNeck" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
-#define maxreq 10000
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts ;
- int sendbuf[maxreq] ;
- int recvbuf[maxreq] ;
- int i ;
- for ( i = 0 ; i < maxreq ; i++ ) {
- if ( myrank == 0 ) {
- sendbuf[i] = i ;
- sts = mpi_access.ISend(sendbuf,i,MPI_INT,target, SendRequestId[i]) ;
- debugStream << "test" << myrank << " ISend RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- //sleep( 1 ) ;
- sts = mpi_access.IRecv(recvbuf,i,MPI_INT,target, RecvRequestId[i]) ;
- debugStream << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- int recvreqsize = mpi_access.recvRequestIdsSize() ;
- int * recvrequests = new int[ recvreqsize ] ;
- recvreqsize = mpi_access.recvRequestIds( target , recvreqsize , recvrequests ) ;
- int j ;
- for (j = 0 ; j < recvreqsize ; j++) {
- int flag ;
- mpi_access.test( recvrequests[j], flag ) ;
- if ( flag ) {
- int source, tag, error, outcount ;
- mpi_access.status( recvrequests[j], source, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Test(Recv RequestId "
- << recvrequests[j] << ") : source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount
- << " flag " << flag << " : DeleteRequest" << endl ;
- mpi_access.deleteRequest( recvrequests[j] ) ;
- }
- else {
-// debugStream << "test" << myrank << " Test(Recv RequestId "
-// << recvrequests[j] << ") flag " << flag << endl ;
- }
- }
- delete [] recvrequests ;
- }
- if ( sts != MPI_SUCCESS ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- }
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- if ( myrank == 0 ) {
- int size2 = mpi_access.sendRequestIdsSize() ;
- debugStream << "test" << myrank << " before WaitAll sendreqsize " << size2 << endl ;
- mpi_access.waitAll(maxreq, SendRequestId) ;
- size2 = mpi_access.sendRequestIdsSize() ;
- debugStream << "test" << myrank << " after WaitAll sendreqsize " << size2 << endl ;
- int * ArrayOfSendRequests = new int[ size2 ] ;
- int nSendRequest = mpi_access.sendRequestIds( size2 , ArrayOfSendRequests ) ;
- for ( i = 0 ; i < nSendRequest ; i++ ) {
- mpi_access.deleteRequest( ArrayOfSendRequests[i] ) ;
- }
- delete [] ArrayOfSendRequests ;
- }
- else {
- int size2 = mpi_access.recvRequestIdsSize() ;
- debugStream << "test" << myrank << " before WaitAll recvreqsize " << size2 << endl ;
- mpi_access.waitAll(maxreq, RecvRequestId) ;
- size2 = mpi_access.recvRequestIdsSize() ;
- debugStream << "test" << myrank << " after WaitAll recvreqsize " << size2 << endl ;
- int * ArrayOfRecvRequests = new int[ size2 ] ;
- int nRecvRequest = mpi_access.recvRequestIds( size2 , ArrayOfRecvRequests ) ;
- for ( i = 0 ; i < nRecvRequest ; i++ ) {
- mpi_access.deleteRequest( ArrayOfRecvRequests[i] ) ;
- }
- delete [] ArrayOfRecvRequests ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[maxreq] ;
- int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- for ( i = 0 ; i < sendreqsize ; i++ ) {
- debugStream << "test" << myrank << " sendrequests[ " << i << " ] = "
- << sendrequests[i] << endl ;
- }
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[maxreq] ;
- int recvreqsize = mpi_access.recvRequestIds( target , maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length() {
-
- debugStream << "test_MPI_Access_ISend_IRecv_Length" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_ISend_IRecv_Length must be run with 2 procs" << endl ;
- cerr << strstream.str() << endl ;
- //CPPUNIT_FAIL( strstream.str() ) ;
- return;
- }
-
- debugStream << "test_MPI_Access_ISend_IRecv_Length" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
-#define maxreq 10
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts ;
- int sendbuf[1000*(maxreq-1)] ;
- int recvbuf[maxreq][1000*(maxreq-1)] ;
- int i ;
- for ( i = 0 ; i < 1000*(maxreq-1) ; i++ ) {
- sendbuf[i] = i ;
- }
- for ( i = 0 ; i < maxreq ; i++ ) {
- if ( myrank == 0 ) {
- sts = mpi_access.ISend( sendbuf, 1000*i, MPI_INT, target, SendRequestId[i] ) ;
- debugStream << "test" << myrank << " ISend RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- sts = mpi_access.IRecv( recvbuf[i], 1000*i, MPI_INT, target,
- RecvRequestId[i] ) ;
- debugStream << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- }
- int j ;
- for (j = 0 ; j <= i ; j++) {
- int flag ;
- if ( myrank == 0 ) {
- mpi_access.test( SendRequestId[j], flag ) ;
- }
- else {
- mpi_access.test( RecvRequestId[j], flag ) ;
- }
- if ( flag ) {
- int target2,source, tag, error, outcount ;
- if ( myrank == 0 ) {
- mpi_access.status( SendRequestId[j], target2, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
- << ") : target " << target2 << " tag " << tag << " error " << error
- << " flag " << flag << endl ;
- }
- else {
- mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Test(Recv RequestId "
- << RecvRequestId[j] << ") : source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount
- << " flag " << flag << endl ;
- if ( outcount != 0 ) {
- if ( (outcount != 1000*j) |
- (recvbuf[j][outcount-1] != (outcount-1)) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount "
- << outcount << " recvbuf " << recvbuf[j][outcount-1] << " KO"
- << endl
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " RequestId " << RecvRequestId[j] << " recvbuf "
- << recvbuf[j][outcount-1] << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- }
- else {
- debugStream << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " RequestId " << RecvRequestId[j] << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- }
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- debugStream << "test" << myrank << " WaitAll" << endl ;
- if ( myrank == 0 ) {
- mpi_access.waitAll(maxreq, SendRequestId) ;
- mpi_access.deleteRequests(maxreq, SendRequestId) ;
- }
- else {
- mpi_access.waitAll(maxreq, RecvRequestId) ;
- mpi_access.deleteRequests(maxreq, RecvRequestId) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[maxreq] ;
- int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[maxreq] ;
- int recvreqsize = mpi_access.recvRequestIds( target , maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
- // MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length_1() {
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_ISend_IRecv_Length_1 must be run with 2 procs" << endl ;
- cerr << strstream.str() << endl ;
- //CPPUNIT_FAIL( strstream.str() ) ;
- return;
- }
-
- debugStream << "test_MPI_Access_ISend_IRecv_Length_1" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
-#define maxreq 10
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts ;
- int sendbuf[1000*(maxreq-1)] ;
- int recvbuf[maxreq][1000*(maxreq-1)] ;
- int maxirecv = 1 ;
- int i ;
- RecvRequestId[0] = -1 ;
- for ( i = 0 ; i < 1000*(maxreq-1) ; i++ ) {
- sendbuf[i] = i ;
- }
- for ( i = 0 ; i < maxreq ; i++ ) {
- sts = MPI_SUCCESS ;
- if ( myrank == 0 ) {
- sts = mpi_access.ISend( sendbuf, 1000*i, MPI_INT, target, SendRequestId[i] ) ;
- debugStream << "test" << myrank << " ISend RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- int j ;
- for (j = 1 ; j <= i ; j++) {
- int source = 0;
- MPI_Datatype datatype = 0;
- int outcount = 0;
- int flag ;
- if ( myrank == 0 ) {
- mpi_access.test( SendRequestId[j], flag ) ;
- }
- else {
- int MPITag ;
- sts = mpi_access.IProbe( target , source, MPITag, datatype,
- outcount, flag) ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " IProbe lenerr " << lenerr << " "
- << msgerr << endl ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " IProbe KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- debugStream << "test" << myrank << " IProbe i/j " << i << "/" << j
- << " MPITag " << MPITag << " datatype " << datatype
- << " outcount " << outcount << " flag " << flag << endl ;
- }
- if ( flag ) {
- if ( myrank == 0 ) {
- int tag, error ;
- mpi_access.status( SendRequestId[j], target, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
- << ") : target " << target << " tag " << tag << " error " << error
- << " flag " << flag << endl ;
- }
- else {
- sts = mpi_access.IRecv( recvbuf[maxirecv], outcount, datatype, source,
- RecvRequestId[maxirecv] ) ;
- debugStream << "test" << myrank << " maxirecv " << maxirecv << " IRecv RequestId "
- << RecvRequestId[maxirecv] << " source " << source
- << " outcount " << outcount << " tag "
- << mpi_access.recvMPITag(target) << endl ;
- maxirecv = maxirecv + 1 ;
- }
- }
- else if ( myrank == 1 && i == maxreq-1 && j >= maxirecv ) {
- sts = mpi_access.IRecv( recvbuf[j], 1000*j, MPI_INT, target,
- RecvRequestId[j] ) ;
- debugStream << "test" << myrank << " maxirecv " << maxirecv << " IRecv RequestId "
- << RecvRequestId[j] << " target " << target << " length " << 1000*j
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- maxirecv = maxirecv + 1 ;
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " KO" << endl
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- int flag ;
- if ( myrank == 0 ) {
- mpi_access.testAll( maxreq, SendRequestId, flag ) ;
- debugStream << "test" << myrank << " TestAll SendRequest flag " << flag << endl ;
- }
- else {
- int source ;
- int outcount ;
- if ( maxirecv != maxreq ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " KO" << " maxirecv " << maxirecv
- << " != maxreq " << maxreq << endl
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- while ( maxirecv > 0 ) {
- for ( i = 1 ; i < maxreq ; i++ ) {
- debugStream << "test" << myrank << " IProbe : " << endl ;
- sts = mpi_access.test( RecvRequestId[i] , flag ) ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " flag " << flag << " lenerr "
- << lenerr << " " << msgerr << " maxirecv " << maxirecv << endl ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- debugStream << "test" << myrank << " Test flag " << flag << endl ;
- if ( flag ) {
- int tag, error ;
- mpi_access.status( RecvRequestId[i] , source , tag , error ,
- outcount ) ;
- if ( i != 0 ) {
- if (( outcount != 1000*i ) ||
- ((recvbuf[i][outcount-1] != (outcount-1)))) {
- ostringstream strstream ;
- strstream << "========================================================"
- << endl << "test" << myrank << " outcount " << outcount
- << " KO" << " i " << i
- << " recvbuf " << recvbuf[i][outcount-1] << endl
- << "========================================================"
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- else if ( outcount != 0 ) {
- ostringstream strstream ;
- strstream << "========================================================"
- << endl << "test" << myrank << " outcount " << outcount
- << " KO" << " i " << i << endl
- << "========================================================"
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- maxirecv = maxirecv - 1 ;
- }
- }
- }
- mpi_access.testAll( maxreq, RecvRequestId, flag ) ;
- debugStream << "test" << myrank << " TestAll RecvRequest flag " << flag << endl ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- debugStream << "test" << myrank << " WaitAll :" << endl ;
- if ( myrank == 0 ) {
- mpi_access.waitAll( maxreq, SendRequestId ) ;
- mpi_access.deleteRequests( maxreq, SendRequestId ) ;
- }
- else {
- mpi_access.waitAll( maxreq, RecvRequestId ) ;
- mpi_access.deleteRequests( maxreq, RecvRequestId ) ;
- }
-
- if ( myrank == 0 ) {
- int sendrequests[maxreq] ;
- int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
- sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[maxreq] ;
- int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
- // MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_Probe() {
-
- debugStream << "test_MPI_Access_Probe" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- cerr << "test_MPI_Access_Probe must be run with 2 procs" << endl ;
- //CPPUNIT_FAIL("test_MPI_Access_Probe must be run with 2 procs") ;
- return;
- }
-
- debugStream << "test_MPI_Access_Probe" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int RequestId[10] ;
- int sts ;
- int i ;
- for ( i = 0 ; i < 10 ; i++ ) {
- if ( myrank == 0 ) {
- sts = mpi_access.send(&i,1,MPI_INT,target, RequestId[i]) ;
- debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
- << endl ;
- }
- else {
- int source, tag, outcount ;
- MPI_Datatype datatype ;
- sts = mpi_access.probe(target, source, tag, datatype, outcount ) ;
- debugStream << "test" << myrank << " Probe target " << target << " source " << source
- << " tag " << tag << " outcount " << outcount << endl ;
- int recvbuf ;
- sts = mpi_access.recv(&recvbuf,outcount,datatype,source, RequestId[i],
- &outcount) ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- }
- int flag ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,RequestId) ;
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_SendRecv() {
-
- debugStream << "MPIAccessTest::test_MPI_Access_SendRecv" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- cerr << "MPIAccessTest::test_MPI_Access_SendRecv must be run with 2 procs" << endl ;
- //CPPUNIT_FAIL("test_MPI_Access_SendRecv must be run with 2 procs") ;
- return;
- }
-
- debugStream << "MPIAccessTest::test_MPI_Access_SendRecv" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int sendRequestId[10] ;
- int recvRequestId[10] ;
- int sts ;
- int i ;
- for ( i = 0 ; i < 10 ; i++ ) {
- int recvbuf ;
- int outcount ;
- if ( i & 1 ) {
- outcount = -1 ;
- sts = mpi_access.sendRecv(&i,1,MPI_INT,target, sendRequestId[i],
- &recvbuf,1,MPI_INT,target, recvRequestId[i],
- &outcount) ;
- }
- else {
- sts = mpi_access.sendRecv(&i,1,MPI_INT,target, sendRequestId[i],
- &recvbuf,1,MPI_INT,target, recvRequestId[i]) ;
-// outcount = mpi_access.MPIOutCount( recvRequestId[i] ) ;
- outcount = 1 ;
- }
- debugStream << "test" << myrank << " Send sendRequestId " << sendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target)
- << " recvRequestId " << recvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target)
- << " outcount " << outcount << " MPIOutCount "
- << mpi_access.MPIOutCount( recvRequestId[i] ) << endl ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- }
-
- int flag ;
- mpi_access.testAll(10,sendRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,sendRequestId) ;
- mpi_access.testAll(10,recvRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,recvRequestId) ;
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
-
- int sendrequests[10] ;
- int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- int recvrequests[10] ;
- int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_Send_Recv() {
-
- debugStream << "test_MPI_Access_Send_Recv" << endl ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- cerr << "test_MPI_Access_Send_Recv must be run with 2 procs" << endl ;
- //CPPUNIT_FAIL("test_MPI_Access_Send_Recv must be run with 2 procs") ;
- return;
- }
-
- debugStream << "test_MPI_Access_Send_Recv" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int RequestId[10] ;
- int sts ;
- int i ;
- for ( i = 0 ; i < 10 ; i++ ) {
- if ( myrank == 0 ) {
- sts = mpi_access.send(&i,1,MPI_INT,target, RequestId[i]) ;
- debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- int recvbuf ;
- int outcount ;
- sts = mpi_access.recv(&recvbuf,1,MPI_INT,target, RequestId[i],&outcount) ;
- //int source, tag, error, outcount ;
- //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
- debugStream << "test" << myrank << " Recv RequestId " << RequestId[i]
- << " tag " << mpi_access.recvMPITag(target)
- << " outcount " << outcount << endl ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check();
- }
- int flag ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,RequestId) ;
- if(MPI_ACCESS_VERBOSE) mpi_access.check();
-
- if ( myrank == 0 ) {
- int sendrequests[10] ;
- int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- else {
- int recvrequests[10] ;
- int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_Send_Recv_Length() {
-
- debugStream << "test_MPI_Access_Send_Recv_Length" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_Send_Recv_Length must be run with 2 procs" << endl ;
- cerr << strstream.str() << endl ;
- //CPPUNIT_FAIL( strstream.str() ) ;
- return;
- }
-
- debugStream << "test_MPI_Access_Send_Recv_Length" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int RequestId[10] ;
- int sendbuf[9000] ;
- int recvbuf[9000] ;
- bool recvbufok ;
- int sts ;
- int i , j ;
- for ( i = 0 ; i < 9000 ; i++ ) {
- sendbuf[i] = i ;
- }
- for ( i = 0 ; i < 10 ; i++ ) {
- if ( myrank == 0 ) {
- sts = mpi_access.send( sendbuf, 1000*i, MPI_INT, target, RequestId[i] ) ;
- debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- sts = MPI_SUCCESS ;
- RequestId[i] = -1 ;
- int outcount = 0 ;
- if ( i != 0 ) {
- sts = mpi_access.recv( recvbuf,1000*i+1,MPI_INT,target, RequestId[i],
- &outcount ) ;
- }
- //int source, tag, error, outcount ;
- //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
- debugStream << "test" << myrank << " Recv RequestId " << RequestId[i]
- << " tag " << mpi_access.recvMPITag(target)
- << " outcount " << outcount << endl ;
- recvbufok = true ;
- for ( j = 0 ; j < outcount ; j++ ) {
- if ( recvbuf[j] != j ) {
- debugStream << "test" << myrank << " recvbuf[ " << j << " ] = " << recvbuf[j]
- << endl ;
- recvbufok = false ;
- break ;
- }
- }
- if ( (outcount != 1000*i) | !recvbufok ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- }
- int flag ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,RequestId) ;
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[10] ;
- int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- else {
- int recvrequests[10] ;
- int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void MPIAccessTest::test_MPI_Access_Time() {
-
- debugStream << "test_MPI_Access_Time" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_Time must be run with 2 procs" << endl ;
- cerr << strstream.str() << endl ;
- //CPPUNIT_FAIL( strstream.str() ) ;
- return;
- }
-
- debugStream << "test_MPI_Access_Time" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess mpi_access( group ) ;
-
-#define maxreq 10
-
- if ( myrank >= 2 ) {
- debugStream << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->Barrier" << endl ;
- mpi_access.barrier() ;
- debugStream << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->Barrier" << endl ;
- delete group ;
- debugStream << "test_MPI_Access_Time" << myrank << " OK" << endl ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendTimeRequestId[maxreq] ;
- int RecvTimeRequestId[maxreq] ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts ;
- int sendbuf[maxreq] ;
- int recvbuf[maxreq] ;
- int i = 0 ;
- MEDCoupling::TimeMessage aSendTimeMsg[maxreq] ;
- MEDCoupling::TimeMessage aRecvTimeMsg[maxreq] ;
- double t ;
- double dt = 1. ;
- double maxt = 10. ;
- for ( t = 0 ; t < maxt ; t = t+dt ) {
- if ( myrank == 0 ) {
- aSendTimeMsg[i].time = t ;
- aSendTimeMsg[i].deltatime = dt ;
- //aSendTimeMsg[i].maxtime = maxt ;
- //sts = mpi_access.ISend( &aSendTimeMsg , mpi_access.timeExtent() ,
- sts = mpi_access.ISend( &aSendTimeMsg[i] , 1 ,
- mpi_access.timeType() , target ,
- SendTimeRequestId[i]) ;
- debugStream << "test" << myrank << " ISend RequestId " << SendTimeRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- sendbuf[i] = i ;
- sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, SendRequestId[i]) ;
- debugStream << "test" << myrank << " ISend RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- //sts = mpi_access.IRecv( &aRecvTimeMsg , mpi_access.timeExtent() ,
- sts = mpi_access.IRecv( &aRecvTimeMsg[i] , 1 ,
- mpi_access.timeType() , target ,
- RecvTimeRequestId[i]) ;
- debugStream << "test" << myrank << " IRecv RequestId " << RecvTimeRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
- debugStream << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- }
- int j ;
- for (j = 0 ; j <= i ; j++) {
- int flag ;
- if ( myrank == 0 ) {
- mpi_access.test( SendTimeRequestId[j], flag ) ;
- }
- else {
- mpi_access.test( RecvTimeRequestId[j], flag ) ;
- }
- if ( flag ) {
- int source, tag, error, outcount ;
- if ( myrank == 0 ) {
- mpi_access.status( SendTimeRequestId[j], target, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Test(Send TimeRequestId " << SendTimeRequestId[j]
- << ") : target " << target << " tag " << tag << " error " << error
- << " flag " << flag << aSendTimeMsg[j] << endl ;
- }
- else {
- mpi_access.status( RecvTimeRequestId[j], source, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Test(Recv TimeRequestId "
- << RecvTimeRequestId[j] << ") : source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount
- << " flag " << flag << aRecvTimeMsg[j] << endl ;
- if ( (outcount != 1) | (aRecvTimeMsg[j].time != j) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount << " KO"
- << " RecvTimeRequestId " << RecvTimeRequestId[j] << endl
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " RecvTimeRequestId " << RecvTimeRequestId[j] << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- }
- }
- if ( myrank == 0 ) {
- mpi_access.test( SendRequestId[j], flag ) ;
- }
- else {
- mpi_access.test( RecvRequestId[j], flag ) ;
- }
- if ( flag ) {
- int source, tag, error, outcount ;
- if ( myrank == 0 ) {
- mpi_access.status( SendRequestId[j], target, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
- << ") : target " << target << " tag " << tag << " error " << error
- << " flag " << flag << endl ;
- }
- else {
- mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
- true ) ;
- debugStream << "test" << myrank << " Test(Recv RequestId "
- << RecvRequestId[j] << ") : source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount
- << " flag " << flag << endl ;
- if ( (outcount != 1) | (recvbuf[j] != j) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount "
- << outcount << " recvbuf " << recvbuf[j] << " KO" << endl
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " RequestId " << RecvRequestId[j] << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- }
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- i = i + 1 ;
- }
-
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
- if ( myrank == 0 ) {
- mpi_access.waitAll(maxreq, SendTimeRequestId) ;
- mpi_access.deleteRequests(maxreq, SendTimeRequestId) ;
- mpi_access.waitAll(maxreq, SendRequestId) ;
- mpi_access.deleteRequests(maxreq, SendRequestId) ;
- }
- else {
- mpi_access.waitAll(maxreq, RecvTimeRequestId) ;
- mpi_access.deleteRequests(maxreq, RecvTimeRequestId) ;
- mpi_access.waitAll(maxreq, RecvRequestId) ;
- mpi_access.deleteRequests(maxreq, RecvRequestId) ;
- }
- if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[2*maxreq] ;
- int sendreqsize = mpi_access.sendRequestIds( target , 2*maxreq , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[2*maxreq] ;
- int recvreqsize = mpi_access.sendRequestIds( target , 2*maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- debugStream << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->Barrier" << endl ;
- mpi_access.barrier() ;
- debugStream << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->Barrier" << endl ;
-
- delete group ;
-
- // MPI_Finalize();
-
- debugStream << "test_MPI_Access_Time" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace MEDCoupling;
-
-void chksts( int sts , int myrank , MEDCoupling::MPIAccess * mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access->errorString(sts, msgerr, &lenerr) ;
- debugStream << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-return ;
-}
-
-void MPIAccessTest::test_MPI_Access_Time_0() {
-
- debugStream << "test_MPI_Access_Time_0" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_MPI_Access_Time_0" <<endl
- << " nbprocs =2" << endl
- << "test must be run with 2 procs" << endl ;
- cerr << strstream.str() << endl ;
- //CPPUNIT_FAIL( strstream.str() ) ;
- return;
- }
-
-#define maxreq 100
-
- double t ;
- double dt[2] = {2., 1.} ;
- double maxt = maxreq/dt[myrank] ;
-
- debugStream << "test_MPI_Access_Time_0 rank" << myrank << endl ;
-
- MEDCoupling::CommInterface interface ;
-
- MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
-
- MEDCoupling::MPIAccess * mpi_access = new MEDCoupling::MPIAccess( group ) ;
-
- if ( myrank >= 2 ) {
- debugStream << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
- mpi_access->barrier() ;
- debugStream << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
- debugStream << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
- mpi_access->barrier() ;
- debugStream << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
- delete group ;
- delete mpi_access ;
- debugStream << "test_MPI_Access_Time" << myrank << " OK" << endl ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendTimeRequestId[maxreq] ;
- int RecvTimeRequestId[maxreq] ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts = 0;
- int sendbuf[maxreq] ;
- int recvbuf[maxreq] ;
- MEDCoupling::TimeMessage aSendTimeMsg[maxreq] ;
- int lasttime = -1 ;
- MEDCoupling::TimeMessage RecvTimeMessages[maxreq+1] ;
- MEDCoupling::TimeMessage *aRecvTimeMsg = &RecvTimeMessages[1] ;
-// mpi_access->Trace() ;
- int istep = 0 ;
- for ( t = 0 ; t < maxt ; t = t+dt[myrank] ) {
- debugStream << "test" << myrank << " ==========================TIME " << t
- << " ==========================" << endl ;
- if ( myrank == 0 ) {
- aSendTimeMsg[istep].time = t ;
- aSendTimeMsg[istep].deltatime = dt[myrank] ;
- //aSendTimeMsg[istep].maxtime = maxt ;
- if ( t+dt[myrank] >= maxt ) {
- aSendTimeMsg[istep].deltatime = 0 ;
- }
- sts = mpi_access->ISend( &aSendTimeMsg[istep] , 1 ,
- mpi_access->timeType() , target ,
- SendTimeRequestId[istep]) ;
- debugStream << "test" << myrank << " ISend TimeRequestId " << SendTimeRequestId[istep]
- << " tag " << mpi_access->MPITag(SendTimeRequestId[istep]) << endl ;
- chksts( sts , myrank , mpi_access ) ;
- sendbuf[istep] = istep ;
- sts = mpi_access->ISend(&sendbuf[istep],1,MPI_INT,target, SendRequestId[istep]) ;
- debugStream << "test" << myrank << " ISend Data RequestId " << SendRequestId[istep]
- << " tag " << mpi_access->MPITag(SendRequestId[istep]) << endl ;
- chksts( sts , myrank , mpi_access ) ;
-//CheckSent
-//=========
- int sendrequests[2*maxreq] ;
- int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq ,
- sendrequests ) ;
- int j , flag ;
- for ( j = 0 ; j < sendreqsize ; j++ ) {
- sts = mpi_access->test( sendrequests[j] , flag ) ;
- chksts( sts , myrank , mpi_access ) ;
- if ( flag ) {
- mpi_access->deleteRequest( sendrequests[j] ) ;
- debugStream << "test" << myrank << " " << j << ". " << sendrequests[j]
- << " sendrequest deleted" << endl ;
- }
- }
- }
- else {
-//InitRecv
-//========
- if ( t == 0 ) {
- aRecvTimeMsg[lasttime].time = 0 ;
- sts = mpi_access->IRecv( &aRecvTimeMsg[lasttime+1] , 1 ,
- mpi_access->timeType() ,
- target , RecvTimeRequestId[lasttime+1]) ;
- debugStream << "test" << myrank << " t == 0 IRecv TimeRequestId "
- << RecvTimeRequestId[lasttime+1]
- << " MPITag " << mpi_access->MPITag( RecvTimeRequestId[lasttime+1] )
- << " MPICompleted "
- << mpi_access->MPICompleted( RecvTimeRequestId[lasttime+1] ) << endl ;
- chksts( sts , myrank , mpi_access ) ;
- }
- else {
- debugStream << "test" << myrank << " t # 0 lasttime " << lasttime << endl ;
-//InitialOutTime
-//==============
- bool outtime = false ;
- if ( lasttime != -1 ) {
- if ( t <= aRecvTimeMsg[lasttime-1].time ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " t " << t << " <= "
- << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
- << aRecvTimeMsg[lasttime-1].time << " KO" << endl
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "==========================================================="
- << endl << "test" << myrank << " t " << t << " > "
- << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
- << aRecvTimeMsg[lasttime-1].time << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- //outtime = ((aRecvTimeMsg[lasttime].time +
- // aRecvTimeMsg[lasttime].deltatime) >=
- // aRecvTimeMsg[lasttime].maxtime) ;
- outtime = aRecvTimeMsg[lasttime].deltatime == 0 ;
- }
-// CheckRecv - CheckTime
-// On a lasttime tel que :
-// aRecvTimeMsg[ lasttime-1 ].time < T(i-1) <= aRecvTimeMsg[ lasttime ].time
-// On cherche lasttime tel que :
-// aRecvTimeMsg[ lasttime-1 ].time < T(i) <= aRecvTimeMsg[ lasttime ].time
- if ( t <= aRecvTimeMsg[lasttime].time ) {
- outtime = false ;
- }
- debugStream << "test" << myrank << " while outtime( " << outtime << " && t " << t
- << " > aRecvTimeMsg[ " << lasttime << " ] "
- << aRecvTimeMsg[lasttime].time << " )" << endl ;
- while ( !outtime && (t > aRecvTimeMsg[lasttime].time) ) {
- lasttime += 1 ;
-//TimeMessage
-//===========
- sts = mpi_access->wait( RecvTimeRequestId[lasttime] ) ;
- chksts( sts , myrank , mpi_access ) ;
- debugStream << "test" << myrank << " Wait done RecvTimeRequestId "
- << RecvTimeRequestId[lasttime] << " lasttime " << lasttime
- << " tag " << mpi_access->MPITag(RecvTimeRequestId[lasttime])
- << aRecvTimeMsg[lasttime] << endl ;
- if ( lasttime == 0 ) {
- aRecvTimeMsg[lasttime-1] = aRecvTimeMsg[lasttime] ;
- }
- mpi_access->deleteRequest( RecvTimeRequestId[lasttime] ) ;
-
- double deltatime = aRecvTimeMsg[lasttime].deltatime ;
- //double maxtime = aRecvTimeMsg[lasttime].maxtime ;
- double nexttime = aRecvTimeMsg[lasttime].time + deltatime ;
- debugStream << "test" << myrank << " t " << t << " lasttime " << lasttime
- << " deltatime " << deltatime
- << " nexttime " << nexttime << endl ;
- //if ( nexttime < maxtime && t > nexttime ) {
- if ( deltatime != 0 && t > nexttime ) {
-//CheckRecv :
-//=========
- //while ( nexttime < maxtime && t > nexttime ) {
- while ( deltatime != 0 && t > nexttime ) {
- int source, MPITag, outcount ;
- MPI_Datatype datatype ;
- sts = mpi_access->probe( target , source, MPITag, datatype,
- outcount ) ;
- chksts( sts , myrank , mpi_access ) ;
-// Cancel DataMessages jusqu'a un TimeMessage
- int cancelflag ;
- while ( !mpi_access->isTimeMessage( MPITag ) ) {
- sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
- //sts = mpi_access->cancel( source, datatype, outcount ,
- //RecvRequestId[lasttime] ,
- cancelflag ) ;
- debugStream << "test" << myrank << " Recv TO CANCEL RequestId "
- << RecvRequestId[lasttime]
- << " tag " << mpi_access->recvMPITag( target )
- << " cancelflag " << cancelflag << endl ;
- chksts( sts , myrank , mpi_access ) ;
- sts = mpi_access->probe( target , source, MPITag, datatype,
- outcount ) ;
- chksts( sts , myrank , mpi_access ) ;
- }
-//On peut avancer en temps
- nexttime += deltatime ;
- //if ( nexttime < maxtime && t > nexttime ) {
- if ( deltatime != 0 && t > nexttime ) {
-// Cancel du TimeMessage
- sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
- //sts = mpi_access->cancel( source, datatype, outcount ,
- //RecvRequestId[lasttime] ,
- cancelflag ) ;
- debugStream << "test" << myrank << " Time TO CANCEL RequestId "
- << RecvRequestId[lasttime]
- << " tag " << mpi_access->recvMPITag( target )
- << " cancelflag " << cancelflag << endl ;
- chksts( sts , myrank , mpi_access ) ;
- }
- }
- }
- else {
-//DoRecv
-//======
- debugStream << "test" << myrank << " Recv target " << target
- << " lasttime " << lasttime
- << " lasttime-1 " << aRecvTimeMsg[lasttime-1]
- << " lasttime " << aRecvTimeMsg[lasttime]
- << endl ;
- sts = mpi_access->recv(&recvbuf[lasttime],1,MPI_INT,target,
- RecvRequestId[lasttime]) ;
- debugStream << "test" << myrank << " Recv RequestId "
- << RecvRequestId[lasttime]
- << " tag " << mpi_access->recvMPITag( target )
- << endl ;
- chksts( sts , myrank , mpi_access ) ;
- }
- //outtime = ((aRecvTimeMsg[lasttime].time +
- // aRecvTimeMsg[lasttime].deltatime) >=
- // aRecvTimeMsg[lasttime].maxtime) ;
- outtime = aRecvTimeMsg[lasttime].deltatime == 0 ;
- if ( !outtime ) {
-// Une lecture asynchrone d'un message temps a l'avance
- sts = mpi_access->IRecv( &aRecvTimeMsg[lasttime+1] , 1 ,
- mpi_access->timeType() , target ,
- RecvTimeRequestId[lasttime+1]) ;
- debugStream << "test" << myrank << " IRecv TimeRequestId "
- << RecvTimeRequestId[lasttime+1] << " MPITag "
- << mpi_access->MPITag( RecvTimeRequestId[lasttime+1] )
- << " MPICompleted "
- << mpi_access->MPICompleted( RecvTimeRequestId[lasttime+1] )
- << endl ;
- chksts( sts , myrank , mpi_access ) ;
- }
- else if ( t <= aRecvTimeMsg[lasttime].time ) {
- outtime = false ;
- }
- }
-
- //printf("DEBUG t %.15f Msg[lasttime-1] %.15f Msg[lasttime] %.15f \n",t,
- // aRecvTimeMsg[lasttime-1].time,aRecvTimeMsg[lasttime].time) ;
- if ( ((t <= aRecvTimeMsg[lasttime-1].time) ||
- (t > aRecvTimeMsg[lasttime].time)) && !outtime ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " t " << t << " <= "
- << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
- << aRecvTimeMsg[lasttime-1].time << " ou t " << t << " > "
- << "aRecvTimeMsg[ " << lasttime << " ].time "
- << aRecvTimeMsg[lasttime].time << endl
- << " ou bien outtime " << outtime << " KO RequestTimeIds "
- << RecvTimeRequestId[lasttime-1] << " " << RecvTimeRequestId[lasttime]
- << " RequestIds "
- << RecvRequestId[lasttime-1] << " " << RecvRequestId[lasttime] << endl
- << "==========================================================="
- << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "==========================================================="
- << endl << "test" << myrank
- << " aRecvTimeMsg[ " << lasttime << "-1 ].time "
- << aRecvTimeMsg[lasttime-1].time << " < t " << t << " <= "
- << "aRecvTimeMsg[ " << lasttime << " ].time "
- << aRecvTimeMsg[lasttime].time << endl
- << " ou bien outtime " << outtime << " OK RequestTimeIds "
- << RecvTimeRequestId[lasttime-1] << " " << RecvTimeRequestId[lasttime]
- << " RequestIds "
- << RecvRequestId[lasttime-1] << " " << RecvRequestId[lasttime] << endl
- << "==========================================================="
- << endl ;
- }
- }
- }
- chksts( sts , myrank , mpi_access ) ;
- istep = istep + 1 ;
- }
-
- debugStream << "test" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
-
- if (MPI_ACCESS_VERBOSE) mpi_access->check() ;
-
- if ( myrank == 0 ) {
-//CheckFinalSent
-//==============
- debugStream << "test" << myrank << " CheckFinalSent :" << endl ;
- int sendrequests[2*maxreq] ;
- int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ;
- int j ;
- for ( j = 0 ; j < sendreqsize ; j++ ) {
- sts = mpi_access->wait( sendrequests[j] ) ;
- chksts( sts , myrank , mpi_access ) ;
- mpi_access->deleteRequest( sendrequests[j] ) ;
- debugStream << "test" << myrank << " " << j << ". " << sendrequests[j] << " deleted"
- << endl ;
- }
- }
- else {
- debugStream << "test" << myrank << " CheckFinalRecv :" << endl ;
- int recvrequests[2*maxreq] ;
- int recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ;
- int cancelflag ;
- int j ;
- for ( j = 0 ; j < recvreqsize ; j++ ) {
- sts = mpi_access->cancel( recvrequests[j] , cancelflag ) ;
- chksts( sts , myrank , mpi_access ) ;
- mpi_access->deleteRequest( recvrequests[j] ) ;
- debugStream << "test" << myrank << " " << j << ". " << recvrequests[j] << " deleted"
- << " cancelflag " << cancelflag << endl ;
- }
- int source, MPITag, outcount , flag ;
- MPI_Datatype datatype ;
- sts = mpi_access->IProbe( target , source, MPITag, datatype,
- outcount , flag ) ;
- chksts( sts , myrank , mpi_access ) ;
- while ( flag ) {
- sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
- //sts = mpi_access->cancel( source, datatype, outcount ,
- //RecvRequestId[lasttime] ,
- cancelflag ) ;
- debugStream << "test" << myrank << " TO CANCEL RequestId "
- << RecvRequestId[lasttime]
- << " tag " << mpi_access->recvMPITag( target )
- << " cancelflag " << cancelflag << endl ;
- chksts( sts , myrank , mpi_access ) ;
- sts = mpi_access->IProbe( target , source, MPITag, datatype,
- outcount , flag ) ;
- chksts( sts , myrank , mpi_access ) ;
- }
- }
- if(MPI_ACCESS_VERBOSE) mpi_access->check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[2*maxreq] ;
- int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[2*maxreq] ;
- int recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- debugStream << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- debugStream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- int i ;
- for ( i = 0 ; i <= lasttime ; i++ ) {
- debugStream << "test" << myrank << " " << i << ". RecvTimeMsg "
- << aRecvTimeMsg[i].time << " recvbuf " << recvbuf[i] << endl ;
- }
-
- debugStream << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
- mpi_access->barrier() ;
- debugStream << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
-
- delete group ;
- delete mpi_access ;
-
-// MPI_Finalize();
-
- debugStream << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
${MPI_INCLUDE_DIRS}
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/../ParaMEDMEM
+ ${CMAKE_CURRENT_SOURCE_DIR}/../ParaMEDMEM/MPIAccess
${CMAKE_CURRENT_SOURCE_DIR}/../ICoCo
${CMAKE_CURRENT_SOURCE_DIR}/../MEDCoupling_Swig
${CMAKE_CURRENT_SOURCE_DIR}/../MEDCoupling
${CMAKE_CURRENT_SOURCE_DIR}/../MEDPartitioner_Swig
${CMAKE_CURRENT_SOURCE_DIR}/../ICoCo
${CMAKE_CURRENT_SOURCE_DIR}/../ParaMEDMEM
+ ${CMAKE_CURRENT_SOURCE_DIR}/../ParaMEDMEM/MPIAccess
${CMAKE_CURRENT_SOURCE_DIR}/../ParaMEDMEM_Swig
${PROJECT_BINARY_DIR}/doc
)