public:
CommInterface(){}
virtual ~CommInterface(){}
- int commSize(MPI_Comm comm, int* size) const { return MPI_Comm_size(comm,size);}
- int commRank(MPI_Comm comm, int* rank) const { return MPI_Comm_rank(comm,rank);}
- int commGroup(MPI_Comm comm, MPI_Group* group) const
- {return MPI_Comm_group(comm, group);}
- int groupIncl(MPI_Group group, int size, int* ranks, MPI_Group* group_output) const
- {return MPI_Group_incl(group, size, ranks, group_output);}
- int commCreate(MPI_Comm comm, MPI_Group group, MPI_Comm* comm_output) const
- {return MPI_Comm_create(comm,group,comm_output);}
- int groupFree(MPI_Group* group) const {return MPI_Group_free(group);}
+ int worldSize() const {
+ int size;
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ return size;}
+ int commSize(MPI_Comm comm, int* size) const {
+ return MPI_Comm_size(comm,size);}
+ int commRank(MPI_Comm comm, int* rank) const {
+ return MPI_Comm_rank(comm,rank);}
+ int commGroup(MPI_Comm comm, MPI_Group* group) const {
+ return MPI_Comm_group(comm, group);}
+ int groupIncl(MPI_Group group, int size, int* ranks,
+ MPI_Group* group_output) const {
+ return MPI_Group_incl(group, size, ranks, group_output);}
+ int commCreate(MPI_Comm comm, MPI_Group group, MPI_Comm* comm_output) const {
+ return MPI_Comm_create(comm,group,comm_output);}
+ int groupFree(MPI_Group* group) const {
+ return MPI_Group_free(group);}
int commFree(MPI_Comm* comm) const {return MPI_Comm_free(comm);}
- int broadcast(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm)const
- {return MPI_Bcast(buffer, count, datatype, root, comm);}
- int send(void* buffer, int count, MPI_Datatype datatype, int target, int tag, MPI_Comm comm) const
- {return MPI_Send(buffer,count, datatype, target, tag, comm);}
- int recv(void* buffer, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status* status) const
- {return MPI_Recv(buffer,count, datatype, source, tag, comm, status);}
- int allToAllV(void* sendbuf, int* sendcounts, int* senddispls, MPI_Datatype sendtype,
- void* recvbuf, int* recvcounts, int* recvdispls, MPI_Datatype recvtype,
- MPI_Comm comm) const
- {return MPI_Alltoallv(sendbuf, sendcounts, senddispls, sendtype,
- recvbuf, recvcounts, recvdispls, recvtype,
- comm);}
- int allToAll(void* sendbuf, int sendcount, MPI_Datatype sendtype,
- void* recvbuf, int recvcount, MPI_Datatype recvtype,
- MPI_Comm comm) const
- {
- return MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
- }
+
+ int send(void* buffer, int count, MPI_Datatype datatype, int target,
+ int tag, MPI_Comm comm) const {
+ return MPI_Send(buffer,count, datatype, target, tag, comm);}
+ int recv(void* buffer, int count, MPI_Datatype datatype, int source,
+ int tag, MPI_Comm comm, MPI_Status* status) const {
+ return MPI_Recv(buffer,count, datatype, source, tag, comm, status);}
+ int sendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ int dest, int sendtag, void* recvbuf, int recvcount,
+ MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm,
+ MPI_Status* status) {
+ return MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, sendtag,
+ recvbuf, recvcount, recvtype, source, recvtag,
+ comm,status); }
+
+ int Isend(void* buffer, int count, MPI_Datatype datatype, int target,
+ int tag, MPI_Comm comm, MPI_Request *request) const {
+ return MPI_Isend(buffer,count, datatype, target, tag, comm, request);}
+ int Irecv(void* buffer, int count, MPI_Datatype datatype, int source,
+ int tag, MPI_Comm comm, MPI_Request* request) const {
+ return MPI_Irecv(buffer,count, datatype, source, tag, comm, request);}
+
+ int wait(MPI_Request *request, MPI_Status *status) const {
+ return MPI_Wait(request, status) ;}
+ int test(MPI_Request *request, int *flag, MPI_Status *status) const {
+ return MPI_Test(request, flag, status) ; }
+ int request_free(MPI_Request *request) const {
+ return MPI_Request_free(request) ; }
+ int waitany(int count, MPI_Request *array_of_requests, int *index,
+ MPI_Status *status) const {
+ return MPI_Waitany(count, array_of_requests, index, status) ; }
+ int testany(int count, MPI_Request *array_of_requests, int *index,
+ int *flag, MPI_Status *status) const {
+ return MPI_Testany(count, array_of_requests, index, flag, status) ; }
+ int waitall(int count, MPI_Request *array_of_requests,
+ MPI_Status *array_of_status) const {
+ return MPI_Waitall(count, array_of_requests, array_of_status) ;}
+ int testall(int count, MPI_Request *array_of_requests, int *flag,
+ MPI_Status *array_of_status) const {
+ return MPI_Testall(count, array_of_requests, flag, array_of_status) ; }
+ int waitsome(int incount, MPI_Request *array_of_requests,int *outcount,
+ int *array_of_indices, MPI_Status *array_of_status) const {
+ return MPI_Waitsome(incount, array_of_requests, outcount,
+ array_of_indices, array_of_status) ;}
+ int testsome(int incount, MPI_Request *array_of_requests, int *outcount,
+ int *array_of_indices, MPI_Status *array_of_status) const {
+ return MPI_Testsome(incount, array_of_requests, outcount,
+ array_of_indices, array_of_status) ; }
+ int probe(int source, int tag, MPI_Comm comm, MPI_Status *status) const {
+ return MPI_Probe(source, tag, comm, status) ; }
+ int Iprobe(int source, int tag, MPI_Comm comm, int *flag,
+ MPI_Status *status) const {
+ return MPI_Iprobe(source, tag, comm, flag, status) ; }
+ int cancel(MPI_Request *request) const {
+ return MPI_Cancel(request) ; }
+ int test_cancelled(MPI_Status *status, int *flag) const {
+ return MPI_Test_cancelled(status, flag) ; }
+ int barrier(MPI_Comm comm) const {
+ return MPI_Barrier(comm) ; }
+ int error_string(int errorcode, char *string, int *resultlen) const {
+ return MPI_Error_string(errorcode, string, resultlen) ; }
+ int get_count(MPI_Status *status, MPI_Datatype datatype, int *count) const {
+ return MPI_Get_count(status, datatype, count) ; }
+
+ int broadcast(void* buffer, int count, MPI_Datatype datatype, int root,
+ MPI_Comm comm) const {
+ return MPI_Bcast(buffer, count, datatype, root, comm);}
int allGather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
- void* recvbuf, int recvcount, MPI_Datatype recvtype,
- MPI_Comm comm) const
- {return MPI_Allgather(sendbuf,sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
- }
- int sendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
- int dest, int sendtag, void* recvbuf, int recvcount,
- MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm,
- MPI_Status* status)
- {
- return
- MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, sendtag, recvbuf,
- recvcount, recvtype, source, recvtag, comm,status);
- }
- int worldSize() const {int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size;}
- int reduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op,
- int root, MPI_Comm comm) const
- {return MPI_Reduce(sendbuf, recvbuf, count, datatype, op, root, comm);}
- int allReduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op,
- MPI_Comm comm) const
- {return MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm);}
+ void* recvbuf, int recvcount, MPI_Datatype recvtype,
+ MPI_Comm comm) const {
+ return MPI_Allgather(sendbuf,sendcount, sendtype,
+ recvbuf, recvcount, recvtype, comm); }
+ int allToAll(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype,
+ MPI_Comm comm) const {
+ return MPI_Alltoall(sendbuf, sendcount, sendtype,
+ recvbuf, recvcount, recvtype, comm); }
+ int allToAllV(void* sendbuf, int* sendcounts, int* senddispls,
+ MPI_Datatype sendtype, void* recvbuf, int* recvcounts,
+ int* recvdispls, MPI_Datatype recvtype,
+ MPI_Comm comm) const {
+ return MPI_Alltoallv(sendbuf, sendcounts, senddispls, sendtype,
+ recvbuf, recvcounts, recvdispls, recvtype,
+ comm);}
+
+ int reduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype,
+ MPI_Op op, int root, MPI_Comm comm) const {
+ return MPI_Reduce(sendbuf, recvbuf, count, datatype, op, root, comm);}
+ int allReduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype,
+ MPI_Op op, MPI_Comm comm) const {
+ return MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm);}
};
}
#include "MEDMEM_Meshing.hxx"
#include <set>
+using namespace std;
+
namespace ParaMEDMEM
{
{
_union_group = _local_group.fuse(distant_group);
_computeBoundingBoxes();
- registerOption(&_adjustment_eps,"BoundingBoxAdjustement",0.1);
+//JR registerOption(&_adjustment_eps,"BoundingBoxAdjustement",0.1);
+ registerOption(&_adjustment_eps,"BoundingBoxAdjustment",0.1);
}
ElementLocator::ElementLocator(const ParaSUPPORT& support, const ProcessorGroup& distant_group)
_distant_group(distant_group),
_union_group(_local_group.fuse(distant_group))
{
- registerOption(&_adjustment_eps,"BoundingBoxAdjustement",0.1);
+// registerOption(&_adjustment_eps,"BoundingBoxAdjustement",0.1);
+ registerOption(&_adjustment_eps,"BoundingBoxAdjustment",0.1);
throw ("Element Locator SUPPORT constructor not implemented yet");
}
target_value[i*nbcomp+icomp] /= _target_volume[i];
}
+ if (_target_group.containsMyRank())
+ {
+ int nbelems=field.getSupport()->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);
+ int nbcomp = field.getNumberOfComponents();
+ double* value = const_cast<double*> (field.getValue());
+ for (int i=0; i<nbelems*nbcomp;i++)
+ value[i]=0.0;
+ }
//on source side : sending T=VT^(-1).(W.S)
//on target side :: receiving T and storing it in field
_mapping.sendRecv(&target_value[0],field);
#define INTERPOLATIONMATRIX_HXX_
#include "MEDMEM_Field.hxx"
+#include "MPI_AccessDEC.hxx"
#include "MxN_Mapping.hxx"
namespace ParaMEDMEM
void transposeMultiply(MEDMEM::FIELD<double>&)const;
void prepare();
int getNbRows() const {return _row_offsets.size();}
-
+ void setAllToAllMethod(const AllToAllMethod& method)
+ { _mapping.setAllToAllMethod(method);}
+ MPI_AccessDEC* getAccessDEC(){return _mapping.getAccessDEC();}
+
private:
MEDMEM::FIELD<double>* InterpolationMatrix::getSupportVolumes(const MEDMEM::SUPPORT& field);
- * \section intersectiondec_options Options
+/* \section intersectiondec_options Options
* On top of \ref dec_options, options supported by IntersectionDEC objects are
*
* <TABLE BORDER=1 >
*/
IntersectionDEC::IntersectionDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):
- DEC(local_group, distant_group),_interpolation_matrix(0)
+ DEC(local_group, distant_group),_interpolation_matrix(0),_asynchronous(false),_timeinterpolationmethod(WithoutTimeInterp),_allToAllMethod(Native)
{
registerOption(&_method,string("Method"),string("P0"));
registerOption(&_bb_adjustment, "BoundingBoxAdjustment",0.1);
+ registerOption(&_asynchronous, "Asynchronous",false);
+ registerOption(&_timeinterpolationmethod,"TimeInterpolation",WithoutTimeInterp);
+ registerOption(&_allToAllMethod,"AllToAllMethod",Native);
}
IntersectionDEC::~IntersectionDEC()
const ParaMEDMEM::ParaMESH* para_mesh = _local_field->getSupport()->getMesh();
cout <<"size of Interpolation Matrix"<<sizeof(InterpolationMatrix)<<endl;
_interpolation_matrix = new InterpolationMatrix (*para_mesh, *_source_group,*_target_group,"P0");
+ _interpolation_matrix->setAllToAllMethod(_allToAllMethod);
+ _interpolation_matrix->getAccessDEC()->Asynchronous( _asynchronous ) ;
+ _interpolation_matrix->getAccessDEC()->SetTimeInterpolator( _timeinterpolationmethod ) ;
//setting up the communication DEC on both sides
if (_source_group->containsMyRank())
*/
void IntersectionDEC::recvData()
{
+
+// _interpolation_matrix->setAllToAllMethod(_allToAllMethod);
if (_source_group->containsMyRank())
_interpolation_matrix->transposeMultiply(*_local_field->getField());
else if (_target_group->containsMyRank())
}
+void IntersectionDEC::recvData( double time )
+{
+ _interpolation_matrix->getAccessDEC()->SetTime(time);
+ recvData() ;
+}
/*!
Sends the data whether the processor is on the working side or on the lazy side.
*/
void IntersectionDEC::sendData()
{
+// _interpolation_matrix->setAllToAllMethod(_allToAllMethod);
if (_source_group->containsMyRank())
{
+
_interpolation_matrix->multiply(*_local_field->getField());
if (_forced_renormalization_flag)
renormalizeTargetField();
_interpolation_matrix->transposeMultiply(*_local_field->getField());
}
+void IntersectionDEC::sendData( double time , double deltatime )
+{
+ _interpolation_matrix->getAccessDEC()->SetTime(time,deltatime);
+ sendData() ;
+}
+
/*! @} */
}
#define INTERSECTIONDEC_HXX_
#include "MEDMEM_OptionManager.hxx"
+#include "MPI_AccessDEC.hxx"
namespace ParaMEDMEM
{
class DEC;
class InterpolationMatrix;
-
+
class IntersectionDEC:public DEC
{
public:
void synchronize();
void recvData();
+ void recvData( double time );
void sendData();
+ void sendData( double time , double deltatime );
void prepareSourceDE(){};
void prepareTargetDE(){};
InterpolationMatrix* _interpolation_matrix;
double _bb_adjustment;
+
+ bool _asynchronous;
+ TimeInterpolationMethod _timeinterpolationmethod ;
+ AllToAllMethod _allToAllMethod;
};
}
--- /dev/null
+
+#include "LinearTimeInterpolator.hxx"
+
+using namespace std;
+
+namespace ParaMEDMEM {
+
+LinearTimeInterpolator::LinearTimeInterpolator( double InterpPrecision, int nStepBefore,
+ int nStepAfter ):
+ TimeInterpolator( InterpPrecision, nStepBefore, nStepAfter ) {
+}
+
+LinearTimeInterpolator::~LinearTimeInterpolator() {
+}
+
+void LinearTimeInterpolator::DoInterp( double time0, double time1, double time,
+ int recvcount , int nbuff0, int nbuff1,
+ int **recvbuff0, int **recvbuff1, int *result ) {
+ int i ;
+ for ( i = 0 ; i < recvcount ; i++ ) {
+ result[i] = (int ) ((recvbuff0[0][i]*(time1 - time) + recvbuff1[0][i]*(time - time0))/(time1 - time0) + _InterpPrecision) ;
+ //cout << "DoInterpint time " << time << " time0 " << time0 << " time1 " << time1
+ // << " recvbuff0 " << recvbuff0[0][i] << " recvbuff1 " << recvbuff1[0][i]
+ // << " --> " << result[i] << endl ;
+ }
+}
+
+void LinearTimeInterpolator::DoInterp( double time0, double time1, double time,
+ int recvcount , int nbuff0, int nbuff1,
+ double **recvbuff0, double **recvbuff1,
+ double *result ) {
+ int i ;
+ for ( i = 0 ; i < recvcount ; i++ ) {
+ result[i] = (recvbuff0[0][i]*(time1 - time) + recvbuff1[0][i]*(time - time0))/(time1 - time0) ;
+ //cout << "DoInterpdouble time " << time << " time0 " << time0 << " time1 " << time1
+ // << " recvbuff0 " << recvbuff0[0][i] << " recvbuff1 " << recvbuff1[0][i]
+ // << " --> " << result[i] << endl ;
+ }
+}
+
+}
--- /dev/null
+#ifndef LINEARTIMEINTERPOLATOR_HXX_
+#define LINEARTIMEINTERPOLATOR_HXX_
+
+#include <map>
+#include <iostream>
+
+#include "TimeInterpolator.hxx"
+
+namespace ParaMEDMEM {
+
+ class DEC;
+
+ class LinearTimeInterpolator:public TimeInterpolator {
+
+ public:
+ LinearTimeInterpolator( double InterpPrecision=0, int nStepBefore=1,
+ int nStepAfter=1 ) ;
+ virtual ~LinearTimeInterpolator();
+
+ void DoInterp( double time0, double time1, double time, int recvcount,
+ int nbuff0, int nbuff1,
+ int **recvbuff0, int **recvbuff1, int *result ) ;
+ void DoInterp( double time0, double time1, double time, int recvcount,
+ int nbuff0, int nbuff1,
+ double **recvbuff0, double **recvbuff1, double *result ) ;
+
+ private :
+ };
+}
+
+#endif
--- /dev/null
+#include <iostream>
+
+#include "MPI_Access.hxx"
+#include "MEDMEM_Exception.hxx"
+
+using namespace std;
+
+namespace ParaMEDMEM
+{
+/*! \defgroup mpi_access MPI_Access
+ Class \a MPI_Access is the gateway to the MPI library.
+ It is a helper class that gathers the calls to the MPI
+ library that are made in the ParaMEDMEM library. This gathering
+ allows easier gathering of information about the communication
+ in the library. With MPI_Access, tags are managed automatically
+ and asynchronous operations are easier.
+
+It is typically called after the MPI_Init() call in a program. It is afterwards passed as a parameter to the constructors of ParaMEDMEM objects so that they access the MPI library via the MPI_Access.
+
+As an example, the following code initializes a processor group made of the zero processor.
+
+\verbatim
+#include "MPI_Access.hxx"
+#include "ProcessorGroup.hxx"
+
+int main(int argc, char** argv)
+{
+ //initialization
+ MPI_Init(&argc, &argv);
+ ParaMEDMEM::CommInterface comm_interface;
+
+ //setting up a processor group with proc 0
+ set<int> procs;
+ procs.insert(0);
+ ParaMEDMEM::ProcessorGroup group(procs, comm_interface);
+
+ ParaMEDMEM::MPI_Access mpi_access(group);
+
+ //cleanup
+ MPI_Finalize();
+}
+\endverbatim
+*/
+
+
+ /*! Creates a MPI_Access that is based on the processors included in \a ProcessorGroup.
+This routine may be called for easier use of MPI API.
+
+\param ProcessorGroup MPIProcessorGroup object giving access to group management
+\param BaseTag and MaxTag define the range of tags to be used.
+Tags are managed by MPI_Access. They are cyclically incremented.
+When there is a Send or a Receive operation there is a new RequestId tag returned
+to the caller. That RequestId may be used to manage the operation Wait, Check of
+status etc... The MPITag internally managed by MPI_Access is used as "tag" argument
+in MPI call.
+ */
+
+MPI_Access::MPI_Access(MPIProcessorGroup * ProcessorGroup, int BaseTag, int MaxTag) :
+ _CommInterface( ProcessorGroup->getCommInterface() ) ,
+ _IntraCommunicator( ProcessorGroup->getComm() ) {
+ int mpitagub ;
+ int flag ;
+//MPI_Attr_get does not run with _IntraCommunicator ???
+ //MPI_Attr_get(*_IntraCommunicator,MPI_TAG_UB,&mpitagub,&flag) ;
+ MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&mpitagub,&flag) ;
+ if ( BaseTag != 0 ) {
+ BaseTag = (BaseTag/ModuloTag)*ModuloTag ;
+ }
+ if ( MaxTag == 0 ) {
+ MaxTag = (mpitagub/ModuloTag-1)*ModuloTag ;
+ }
+ MPI_Comm_rank( *_IntraCommunicator, &_MyRank ) ;
+ cout << "MPI_Access::MPI_Access" << _MyRank << " this " << this << " BaseTag " << BaseTag
+ << " MaxTag " << MaxTag << " mpitagub " << mpitagub << " (minimum 32767) "
+ << " flag " << flag << endl ;
+ if ( !flag | (BaseTag < 0) | (BaseTag >= MaxTag) | (MaxTag > mpitagub) )
+ throw MEDMEM::MEDEXCEPTION("wrong call to MPI_Access constructor");
+
+ _ProcessorGroup = ProcessorGroup ;
+ _ProcessorGroupSize = _ProcessorGroup->size() ;
+ _Trace = false ;
+
+ cout << "MPI_Access::MPI_Access" << _MyRank << " _ProcessorGroupSize "
+ << _ProcessorGroupSize << endl ;
+
+ _BaseRequest = -1 ;
+ _MaxRequest = 4294967295 ;
+ _Request = _BaseRequest ;
+
+ _BaseMPITag = BaseTag ;
+ _MaxMPITag = MaxTag ;
+
+ _SendRequest = new int[ _ProcessorGroupSize ] ;
+ _RecvRequest = new int[ _ProcessorGroupSize ] ;
+
+ _SendRequests.resize( _ProcessorGroupSize ) ;
+ _RecvRequests.resize( _ProcessorGroupSize ) ;
+
+ _SendMPITag = new int[ _ProcessorGroupSize ] ;
+ _RecvMPITag = new int[ _ProcessorGroupSize ] ;
+ int i ;
+ for (i = 0 ; i < _ProcessorGroupSize ; i++ ) {
+ _SendRequest[ i ] = _MaxRequest ;
+ _RecvRequest[ i ] = _MaxRequest ;
+ _SendRequests[ i ].resize(0) ;
+ _RecvRequests[ i ].resize(0) ;
+ _SendMPITag[ i ] = _MaxMPITag ;
+ _RecvMPITag[ i ] = _MaxMPITag ;
+ }
+ MPI_Datatype array_of_types[3] ;
+ array_of_types[0] = MPI_DOUBLE ;
+ array_of_types[1] = MPI_DOUBLE ;
+ array_of_types[2] = MPI_INT ;
+ int array_of_blocklengths[3] ;
+ array_of_blocklengths[0] = 1 ;
+ array_of_blocklengths[1] = 1 ;
+ array_of_blocklengths[2] = 1 ;
+ MPI_Aint array_of_displacements[3] ;
+ array_of_displacements[0] = 0 ;
+ array_of_displacements[1] = sizeof(double) ;
+ array_of_displacements[2] = 2*sizeof(double) ;
+ MPI_Type_struct(3, array_of_blocklengths, array_of_displacements,
+ array_of_types, &_MPI_TIME) ;
+ MPI_Type_commit(&_MPI_TIME) ;
+}
+
+MPI_Access::~MPI_Access() {
+ cout << "MPI_Access::~MPI_Access" << _MyRank << " this " << this << endl ;
+ delete [] _SendRequest ;
+ delete [] _RecvRequest ;
+ delete [] _SendMPITag ;
+ delete [] _RecvMPITag ;
+ cout << "End of MPI_Access::~MPI_Access" << _MyRank << " this " << this << endl ;
+}
+
+int MPI_Access::NewRequest( MPI_Datatype datatype, int tag , int destsourcerank ,
+ bool fromsourcerank , bool asynchronous ) {
+ RequestStruct *mpiaccessstruct = new RequestStruct;
+ mpiaccessstruct->MPITag = tag ;
+ mpiaccessstruct->MPIDatatype = datatype ;
+ mpiaccessstruct->MPITarget = destsourcerank ;
+ mpiaccessstruct->MPIIsRecv = fromsourcerank ;
+ MPI_Status *aStatus = new MPI_Status ;
+ mpiaccessstruct->MPIStatus = aStatus ;
+ mpiaccessstruct->MPIAsynchronous = asynchronous ;
+ mpiaccessstruct->MPICompleted = !asynchronous ;
+ mpiaccessstruct->MPIOutCount = -1 ;
+ if ( !asynchronous ) {
+ mpiaccessstruct->MPIRequest = MPI_REQUEST_NULL ;
+ mpiaccessstruct->MPIStatus->MPI_SOURCE = destsourcerank ;
+ mpiaccessstruct->MPIStatus->MPI_TAG = tag ;
+ mpiaccessstruct->MPIStatus->MPI_ERROR = MPI_SUCCESS ;
+ }
+ //cout << "NewRequest old _Request" << _MyRank << " " << _Request
+ // << " _MaxRequest " << _MaxRequest << endl ;
+ if ( _Request == _MaxRequest ) {
+ _Request = _BaseRequest ;
+ }
+ _Request += 1 ;
+ _MapOfRequestStruct[_Request] = mpiaccessstruct ;
+ if ( fromsourcerank ) {
+ _RecvRequest[ destsourcerank ] = _Request ;
+ }
+ else {
+ _SendRequest[ destsourcerank ] = _Request ;
+ }
+ if ( _Trace )
+ cout << "NewRequest" << _MyRank << "( " << _Request << " ) "
+ << mpiaccessstruct << endl ;
+ return _Request ;
+}
+
+int MPI_Access::NewSendTag( MPI_Datatype datatype, int destrank , int method ,
+ bool asynchronous, int &RequestId ) {
+ int tag ;
+ if ( method == _MessageTime ) {
+ tag = method ;
+ }
+ else {
+ tag = IncrTag( _SendMPITag[destrank] ) ;
+ tag = ValTag( tag, method ) ;
+ _SendMPITag[ destrank ] = tag ;
+ }
+ RequestId = NewRequest( datatype, tag, destrank , false , asynchronous ) ;
+ _SendRequest[ destrank ] = RequestId ;
+ _SendRequests[ destrank ].push_back( RequestId ) ;
+ return tag ;
+}
+
+int MPI_Access::NewRecvTag( MPI_Datatype datatype, int sourcerank , int method ,
+ bool asynchronous, int &RequestId ) {
+ int tag ;
+ if ( method == _MessageTime ) {
+ tag = method ;
+ }
+ else {
+ tag = IncrTag( _RecvMPITag[sourcerank] ) ;
+ tag = ValTag( tag, method ) ;
+ _RecvMPITag[ sourcerank ] = tag ;
+ }
+ RequestId = NewRequest( datatype, tag , sourcerank , true , asynchronous ) ;
+ _RecvRequest[ sourcerank ] = RequestId ;
+ _RecvRequests[ sourcerank ].push_back( RequestId ) ;
+ return tag ;
+}
+
+int MPI_Access::SendRequestIdsSize() {
+ int size = 0 ;
+ int i ;
+ for (i = 0 ; i < _ProcessorGroupSize ; i++ ) {
+ size += _SendRequests[ i ].size() ;
+ }
+ return size ;
+}
+
+int MPI_Access::SendRequestIds(int size, int *ArrayOfSendRequests) {
+ int destrank ;
+ int i = 0 ;
+ for ( destrank = 0 ; destrank < _ProcessorGroupSize ; destrank++ ) {
+ list< int >::const_iterator iter ;
+ for (iter = _SendRequests[ destrank ].begin() ; iter != _SendRequests[destrank].end() ; iter++ ) {
+ ArrayOfSendRequests[i++] = *iter ;
+ }
+ }
+ return i ;
+}
+
+int MPI_Access::RecvRequestIdsSize() {
+ int size = 0 ;
+ int i ;
+ for (i = 0 ; i < _ProcessorGroupSize ; i++ ) {
+ size += _RecvRequests[ i ].size() ;
+ }
+ return size ;
+}
+
+int MPI_Access::RecvRequestIds(int size, int *ArrayOfRecvRequests) {
+ int sourcerank ;
+ int i = 0 ;
+ for ( sourcerank = 0 ; sourcerank < _ProcessorGroupSize ; sourcerank++ ) {
+ list< int >::const_iterator iter ;
+ for (iter = _RecvRequests[ sourcerank ].begin() ; iter != _RecvRequests[sourcerank].end() ; iter++ ) {
+ ArrayOfRecvRequests[i++] = *iter ;
+ }
+ }
+ return i ;
+}
+
+int MPI_Access::SendRequestIds(int destrank, int size, int *ArrayOfSendRequests) {
+ if (size < _SendRequests[destrank].size() ) throw MEDMEM::MEDEXCEPTION("wrong call to MPI_Access::SendRequestIds");
+ int i = 0 ;
+ list< int >::const_iterator iter ;
+ for (iter = _SendRequests[ destrank ].begin() ; iter != _SendRequests[destrank].end() ; iter++ ) {
+ ArrayOfSendRequests[i++] = *iter ;
+ }
+ return _SendRequests[destrank].size() ;
+}
+
+int MPI_Access::RecvRequestIds(int sourcerank, int size, int *ArrayOfRecvRequests) {
+ if (size < _RecvRequests[sourcerank].size() ) throw MEDMEM::MEDEXCEPTION("wrong call to MPI_Access::RecvRequestIds");
+ int i = 0 ;
+ list< int >::const_iterator iter ;
+ _RecvRequests[ sourcerank ] ;
+ for (iter = _RecvRequests[ sourcerank ].begin() ; iter != _RecvRequests[sourcerank].end() ; iter++ ) {
+ ArrayOfRecvRequests[i++] = *iter ;
+ }
+ return _RecvRequests[sourcerank].size() ;
+}
+
+int MPI_Access::Send(void* buffer, int count, MPI_Datatype datatype, int target,
+ int &RequestId) {
+ int sts = MPI_SUCCESS ;
+ if ( count ) {
+ _MessageIdent aMethodIdent = MethodId( datatype ) ;
+ int MPItag = NewSendTag( datatype, target , aMethodIdent , false , RequestId ) ;
+ if ( aMethodIdent == _MessageTime ) {
+ TimeMessage *aTimeMsg = (TimeMessage *) buffer ;
+ aTimeMsg->tag = MPItag ;
+ }
+ DeleteRequest( RequestId ) ;
+ sts = _CommInterface.send(buffer, count, datatype, target, MPItag,
+ *_IntraCommunicator ) ;
+ }
+ return sts ;
+}
+
+int MPI_Access::Recv(void* buffer, int count, MPI_Datatype datatype, int source,
+ int &RequestId, int *OutCount) {
+ int sts = MPI_SUCCESS ;
+ if ( count ) {
+ _MessageIdent aMethodIdent = MethodId( datatype ) ;
+ int MPItag = NewRecvTag( datatype, source , aMethodIdent , false , RequestId ) ;
+ sts = _CommInterface.recv(buffer, count, datatype, source, MPItag,
+ *_IntraCommunicator , MPIStatus( RequestId ) ) ;
+ int outcount = 0 ;
+ if ( sts == MPI_SUCCESS ) {
+ MPI_Datatype datatype = MPIDatatype( RequestId ) ;
+ _CommInterface.get_count(MPIStatus( RequestId ), datatype, &outcount ) ;
+ SetMPIOutCount( RequestId , outcount ) ;
+ SetMPICompleted( RequestId , true ) ;
+ DeleteStatus( RequestId ) ;
+ }
+ if ( OutCount != NULL ) {
+ *OutCount = outcount ;
+ }
+ DeleteRequest( RequestId ) ;
+ }
+ return sts ;
+}
+
+int MPI_Access::ISend(void* buffer, int count, MPI_Datatype datatype, int target,
+ int &RequestId) {
+ int sts = MPI_SUCCESS ;
+ if ( count ) {
+ _MessageIdent aMethodIdent = MethodId( datatype ) ;
+ int MPItag = NewSendTag( datatype, target , aMethodIdent , true , RequestId ) ;
+ if ( aMethodIdent == _MessageTime ) {
+ TimeMessage *aTimeMsg = (TimeMessage *) buffer ;
+ aTimeMsg->tag = MPItag ;
+ }
+ MPI_Request *aSendRequest = MPIRequest( RequestId ) ;
+ sts = _CommInterface.Isend(buffer, count, datatype, target, MPItag,
+ *_IntraCommunicator , aSendRequest) ;
+ }
+ return sts ;
+}
+
+int MPI_Access::IRecv(void* buffer, int count, MPI_Datatype datatype, int source,
+ int &RequestId) {
+ int sts = MPI_SUCCESS ;
+ if ( count ) {
+ _MessageIdent aMethodIdent = MethodId( datatype ) ;
+ int MPItag = NewRecvTag( datatype, source , aMethodIdent , true , RequestId ) ;
+ MPI_Request *aRecvRequest = MPIRequest( RequestId ) ;
+ sts = _CommInterface.Irecv(buffer, count, datatype, source, MPItag,
+ *_IntraCommunicator , aRecvRequest) ;
+ }
+ return sts ;
+}
+
+int MPI_Access::SendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ int dest, int &SendRequestId,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype,
+ int source, int &RecvRequestId, int *OutCount) {
+ int sts = MPI_SUCCESS ;
+ SendRequestId = -1 ;
+ RecvRequestId = -1 ;
+ if ( recvcount ) {
+ sts = IRecv(recvbuf, recvcount, recvtype, source, RecvRequestId) ;
+ }
+ int outcount = -1 ;
+ if ( _Trace )
+ cout << "MPI_Access::SendRecv" << _MyRank << " IRecv RecvRequestId "
+ << RecvRequestId << endl ;
+ if ( sts == MPI_SUCCESS ) {
+ if ( sendcount ) {
+ sts = Send(sendbuf, sendcount, sendtype, dest, SendRequestId) ;
+ }
+ if ( _Trace )
+ cout << "MPI_Access::SendRecv" << _MyRank << " Send SendRequestId "
+ << SendRequestId << endl ;
+ if ( sts == MPI_SUCCESS && recvcount ) {
+ sts = Wait( RecvRequestId ) ;
+ outcount = MPIOutCount( RecvRequestId ) ;
+ if ( _Trace )
+ cout << "MPI_Access::SendRecv" << _MyRank << " IRecv RecvRequestId "
+ << RecvRequestId << " outcount " << outcount << endl ;
+ }
+ }
+ if ( OutCount != NULL ) {
+ *OutCount = outcount ;
+ if ( _Trace )
+ cout << "MPI_Access::SendRecv" << _MyRank << " *OutCount = " << *OutCount
+ << endl ;
+ }
+ DeleteRequest( RecvRequestId ) ;
+ return sts ;
+}
+
+int MPI_Access::ISendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ int dest, int &SendRequestId,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype,
+ int source, int &RecvRequestId) {
+ int sts = MPI_SUCCESS ;
+ SendRequestId = -1 ;
+ RecvRequestId = -1 ;
+ if ( recvcount ) {
+ sts = IRecv(recvbuf, recvcount, recvtype, source, RecvRequestId) ;
+ }
+ if ( sts == MPI_SUCCESS ) {
+ if ( sendcount ) {
+ sts = ISend(sendbuf, sendcount, sendtype, dest, SendRequestId) ;
+ }
+ }
+ return sts ;
+}
+
+int MPI_Access::Wait( int RequestId ) {
+ int status = MPI_SUCCESS ;
+ if ( !MPICompleted( RequestId ) ) {
+ if ( *MPIRequest( RequestId ) != MPI_REQUEST_NULL ) {
+ if ( _Trace )
+ cout << "MPI_Access::Wait" << _MyRank << " -> wait( " << RequestId
+ << " ) MPIRequest " << MPIRequest( RequestId ) << " MPIStatus "
+ << MPIStatus( RequestId ) << " MPITag " << MPITag( RequestId )
+ << " MPIIsRecv " << MPIIsRecv( RequestId ) << endl ;
+ status = _CommInterface.wait(MPIRequest( RequestId ), MPIStatus( RequestId )) ;
+ }
+ else {
+ if ( _Trace )
+ cout << "MPI_Access::Wait" << _MyRank << " MPIRequest == MPI_REQUEST_NULL"
+ << endl ;
+ }
+ SetMPICompleted( RequestId , true ) ;
+ if ( MPIIsRecv( RequestId ) && MPIStatus( RequestId ) ) {
+ MPI_Datatype datatype = MPIDatatype( RequestId ) ;
+ int outcount ;
+ status = _CommInterface.get_count(MPIStatus( RequestId ), datatype,
+ &outcount ) ;
+ if ( status == MPI_SUCCESS ) {
+ SetMPIOutCount( RequestId , outcount ) ;
+ DeleteStatus( RequestId ) ;
+ if ( _Trace )
+ cout << "MPI_Access::Wait" << _MyRank << " RequestId " << RequestId
+ << "MPIIsRecv " << MPIIsRecv( RequestId ) << " outcount " << outcount
+ << endl ;
+ }
+ else {
+ if ( _Trace )
+ cout << "MPI_Access::Wait" << _MyRank << " MPIIsRecv "
+ << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ;
+ }
+ }
+ else {
+ if ( _Trace )
+ cout << "MPI_Access::Wait" << _MyRank << " MPIIsRecv " << MPIIsRecv( RequestId )
+ << " MPIOutCount " << MPIOutCount( RequestId ) << endl ;
+ }
+ }
+ if ( _Trace )
+ cout << "MPI_Access::Wait" << _MyRank << " RequestId " << RequestId
+ << " Request " << MPIRequest( RequestId )
+ << " Status " << MPIStatus( RequestId ) << " MPICompleted "
+ << MPICompleted( RequestId ) << " MPIOutCount " << MPIOutCount( RequestId )
+ << endl ;
+ return status ;
+}
+
+int MPI_Access::Test(int RequestId, int &flag) {
+ int status = MPI_SUCCESS ;
+ flag = MPICompleted( RequestId ) ;
+ if ( _Trace )
+ cout << "MPI_Access::Test" << _MyRank << " flag " << flag ;
+ if ( MPIIsRecv( RequestId ) ) {
+ if ( _Trace )
+ cout << " Recv" ;
+ }
+ else {
+ if ( _Trace )
+ cout << " Send" ;
+ }
+ if ( _Trace )
+ cout << "Request" << RequestId << " " << MPIRequest( RequestId )
+ << " Status " << MPIStatus( RequestId ) << endl ;
+ if ( !flag ) {
+ if ( *MPIRequest( RequestId ) != MPI_REQUEST_NULL ) {
+ if ( _Trace )
+ cout << "MPI_Access::Test" << _MyRank << " -> test( " << RequestId
+ << " ) MPIRequest " << MPIRequest( RequestId )
+ << " MPIStatus " << MPIStatus( RequestId )
+ << " MPITag " << MPITag( RequestId )
+ << " MPIIsRecv " << MPIIsRecv( RequestId ) << endl ;
+ status = _CommInterface.test(MPIRequest( RequestId ), &flag,
+ MPIStatus( RequestId )) ;
+ }
+ else {
+ if ( _Trace )
+ cout << "MPI_Access::Test" << _MyRank << " MPIRequest == MPI_REQUEST_NULL"
+ << endl ;
+ }
+ if ( flag ) {
+ SetMPICompleted( RequestId , true ) ;
+ if ( MPIIsRecv( RequestId ) && MPIStatus( RequestId ) ) {
+ int outcount ;
+ MPI_Datatype datatype = MPIDatatype( RequestId ) ;
+ status = _CommInterface.get_count( MPIStatus( RequestId ), datatype,
+ &outcount ) ;
+ if ( status == MPI_SUCCESS ) {
+ SetMPIOutCount( RequestId , outcount ) ;
+ DeleteStatus( RequestId ) ;
+ if ( _Trace )
+ cout << "MPI_Access::Test" << _MyRank << " MPIIsRecv "
+ << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ;
+ }
+ else {
+ if ( _Trace )
+ cout << "MPI_Access::Test" << _MyRank << " MPIIsRecv "
+ << MPIIsRecv( RequestId ) << " outcount " << outcount << endl ;
+ }
+ }
+ else {
+ if ( _Trace )
+ cout << "MPI_Access::Test" << _MyRank << " MPIIsRecv "
+ << MPIIsRecv( RequestId ) << " MPIOutCount "
+ << MPIOutCount( RequestId ) << endl ;
+ }
+ }
+ }
+ if ( _Trace )
+ cout << "MPI_Access::Test" << _MyRank << " RequestId " << RequestId
+ << " flag " << flag << " MPICompleted " << MPICompleted( RequestId )
+ << " MPIOutCount " << MPIOutCount( RequestId ) << endl ;
+ return status ;
+}
+
+int MPI_Access::WaitAny(int count, int *array_of_RequestIds, int &RequestId) {
+ int status ;
+ return status ;
+}
+
+int MPI_Access::TestAny(int count, int *array_of_RequestIds, int &RequestId,
+ int &flag) {
+ int status ;
+ return status ;
+}
+
+int MPI_Access::WaitAll(int count, int *array_of_RequestIds) {
+ if ( _Trace )
+ cout << "WaitAll" << _MyRank << " : count " << count << endl ;
+ int status ;
+ int retstatus = MPI_SUCCESS ;
+ int i ;
+ for ( i = 0 ; i < count ; i++ ) {
+ if ( _Trace )
+ cout << "WaitAll" << _MyRank << " " << i << " -> Wait( "
+ << array_of_RequestIds[i] << " )" << endl ;
+ status = Wait( array_of_RequestIds[i] ) ;
+ if ( status != MPI_SUCCESS ) {
+ retstatus = status ;
+ }
+ }
+ if ( _Trace )
+ cout << "EndWaitAll" << _MyRank << endl ;
+ return retstatus ;
+}
+
+int MPI_Access::TestAll(int count, int *array_of_RequestIds, int &flag) {
+ if ( _Trace )
+ cout << "TestAll" << _MyRank << " : count " << count << endl ;
+ int status ;
+ int retstatus = MPI_SUCCESS ;
+ bool retflag = true ;
+ int i ;
+ for ( i = 0 ; i < count ; i++ ) {
+ status = Test( array_of_RequestIds[i] , flag ) ;
+ retflag = retflag && (flag != 0) ;
+ if ( status != MPI_SUCCESS ) {
+ retstatus = status ;
+ }
+ }
+ flag = retflag ;
+ if ( _Trace )
+ cout << "EndTestAll" << _MyRank << endl ;
+ return retstatus ;
+}
+
+int MPI_Access::WaitSome(int count, int *array_of_RequestIds, int outcount,
+ int *outarray_of_RequestIds) {
+ int status ;
+ return status ;
+}
+
+int MPI_Access::TestSome(int count, int *array_of_RequestIds, int outcounts,
+ int *outarray_of_RequestIds) {
+ int status ;
+ return status ;
+}
+
+int MPI_Access::Probe(int FromSource, int &source, int &MPITag,
+ MPI_Datatype &datatype, int &outcount) {
+ MPI_Status aMPIStatus ;
+ int sts = _CommInterface.probe( FromSource, MPI_ANY_TAG,
+ *_IntraCommunicator , &aMPIStatus ) ;
+ if ( sts == MPI_SUCCESS ) {
+ source = aMPIStatus.MPI_SOURCE ;
+ MPITag = aMPIStatus.MPI_TAG ;
+ int MethodId = (MPITag % ModuloTag) ;
+ datatype = Datatype( (ParaMEDMEM::_MessageIdent) MethodId ) ;
+ _CommInterface.get_count(&aMPIStatus, datatype, &outcount ) ;
+ if ( _Trace )
+ cout << "MPI_Access::Probe" << _MyRank << " FromSource " << FromSource
+ << " source " << source << " MPITag " << MPITag << " MethodId "
+ << MethodId << " datatype " << datatype << " outcount " << outcount
+ << endl ;
+ }
+ else {
+ source = -1 ;
+ MPITag = -1 ;
+ outcount = -1 ;
+ }
+ return sts ;
+}
+
+int MPI_Access::IProbe(int FromSource, int &source, int &MPITag,
+ MPI_Datatype &datatype, int &outcount, int &flag) {
+ MPI_Status aMPIStatus ;
+ int sts = _CommInterface.Iprobe( FromSource, MPI_ANY_TAG,
+ *_IntraCommunicator , &flag,
+ &aMPIStatus ) ;
+ if ( sts == MPI_SUCCESS && flag ) {
+ source = aMPIStatus.MPI_SOURCE ;
+ MPITag = aMPIStatus.MPI_TAG ;
+ int MethodId = (MPITag % ModuloTag) ;
+ datatype = Datatype( (ParaMEDMEM::_MessageIdent) MethodId ) ;
+ _CommInterface.get_count(&aMPIStatus, datatype, &outcount ) ;
+ if ( _Trace )
+ cout << "MPI_Access::IProbe" << _MyRank << " FromSource " << FromSource
+ << " source " << source << " MPITag " << MPITag << " MethodId "
+ << MethodId << " datatype " << datatype << " outcount " << outcount
+ << " flag " << flag << endl ;
+ }
+ else {
+ source = -1 ;
+ MPITag = -1 ;
+ outcount = -1 ;
+ }
+ return sts ;
+}
+
+int MPI_Access::Cancel( int RecvRequestId, int &flag ) {
+ flag = 0 ;
+ int sts = _CommInterface.cancel( MPIRequest( RecvRequestId ) ) ;
+ if ( sts == MPI_SUCCESS ) {
+ sts = _CommInterface.wait( MPIRequest( RecvRequestId ) ,
+ MPIStatus( RecvRequestId ) ) ;
+ if ( sts == MPI_SUCCESS ) {
+ sts = _CommInterface.test_cancelled( MPIStatus( RecvRequestId ) , &flag ) ;
+ }
+ }
+ return sts ;
+}
+
+int MPI_Access::Cancel( int source, int theMPITag, MPI_Datatype datatype,
+ int outcount, int &flag ) {
+ int sts ;
+ MPI_Aint extent ;
+ flag = 0 ;
+ sts = MPI_Type_extent( datatype , &extent ) ;
+ if ( sts == MPI_SUCCESS ) {
+ void * recvbuf = malloc( extent*outcount ) ;
+ MPI_Request aRecvRequest ;
+ if ( _Trace )
+ cout << "MPI_Access::Cancel" << _MyRank << " Irecv extent " << extent
+ << " datatype " << datatype << " source " << source << " theMPITag "
+ << theMPITag << endl ;
+ sts = _CommInterface.Irecv( recvbuf, outcount, datatype, source, theMPITag,
+ *_IntraCommunicator , &aRecvRequest ) ;
+ if ( sts == MPI_SUCCESS ) {
+ sts = _CommInterface.cancel( &aRecvRequest ) ;
+ if ( _Trace )
+ cout << "MPI_Access::Cancel" << _MyRank << " theMPITag " << theMPITag
+ << " cancel done" << endl ;
+ if ( sts == MPI_SUCCESS ) {
+ MPI_Status aStatus ;
+ if ( _Trace )
+ cout << "MPI_Access::Cancel" << _MyRank << " wait" << endl ;
+ sts = _CommInterface.wait( &aRecvRequest , &aStatus ) ;
+ if ( sts == MPI_SUCCESS ) {
+ if ( _Trace )
+ cout << "MPI_Access::Cancel" << _MyRank << " test_cancelled" << endl ;
+ sts = _CommInterface.test_cancelled( &aStatus , &flag ) ;
+ }
+ }
+ }
+ if ( _Trace && datatype == TimeType() )
+ cout << "MPI_Access::Cancel" << _MyRank << " time "
+ << ((TimeMessage *) recvbuf)->time << " "
+ << ((TimeMessage *) recvbuf)->deltatime << endl ;
+ free( recvbuf ) ;
+ }
+ if ( _Trace )
+ cout << "MPI_Access::Cancel" << _MyRank << " flag " << flag << endl ;
+ return sts ;
+}
+
+int MPI_Access::CancelAll() {
+ int sts = MPI_SUCCESS ;
+ int target ;
+ int source ;
+ int MPITag ;
+ MPI_Datatype datatype ;
+ int outcount ;
+ int flag ;
+ for ( target = 0 ; target < _ProcessorGroupSize ; target++ ) {
+ sts = IProbe(target, source, MPITag, datatype, outcount, flag) ;
+ if ( sts == MPI_SUCCESS && flag ) {
+ sts = Cancel(source, MPITag, datatype, outcount, flag) ;
+ if ( _Trace )
+ cout << "MPI_Access::CancelAll" << _MyRank << " source " << source
+ << " MPITag " << MPITag << " datatype " << datatype
+ << " outcount " << outcount << " Cancel flag " << flag << endl ;
+ if ( sts != MPI_SUCCESS ) {
+ break ;
+ }
+ }
+ else if ( sts != MPI_SUCCESS ) {
+ break ;
+ }
+ }
+ return sts ;
+}
+
+int MPI_Access::Barrier() {
+ int status = _CommInterface.barrier( *_IntraCommunicator ) ;
+ return status ;
+}
+
+int MPI_Access::Error_String(int errorcode, char *string, int *resultlen) const {
+ return _CommInterface.error_string( errorcode, string, resultlen) ;
+}
+
+int MPI_Access::Status(int RequestId, int &source, int &tag, int &error,
+ int &outcount, bool keepRequestStruct) {
+ MPI_Status *status = MPIStatus( RequestId ) ;
+ if ( _Trace )
+ cout << "MPI_Access::Status" << _MyRank << " RequestId " << RequestId
+ << " status " << status << endl ;
+ if ( status != NULL && MPIAsynchronous( RequestId ) &&
+ MPICompleted( RequestId ) ) {
+ if ( MPIIsRecv( RequestId ) ) {
+ source = status->MPI_SOURCE ;
+ tag = status->MPI_TAG ;
+ error = status->MPI_ERROR ;
+ MPI_Datatype datatype = MPIDatatype( RequestId ) ;
+ _CommInterface.get_count(status, datatype, &outcount ) ;
+ if ( _Trace )
+ cout << "MPI_Access::Status" << _MyRank << " RequestId " << RequestId
+ << " status " << status << " outcount " << outcount << endl ;
+ SetMPIOutCount( RequestId , outcount ) ;
+ }
+ else {
+ source = MPITarget( RequestId ) ;
+ tag = MPITag( RequestId ) ;
+ error = 0 ;
+ outcount = MPIOutCount( RequestId ) ;
+ }
+ if ( !keepRequestStruct ) {
+ DeleteRequest( RequestId ) ;
+ }
+ return MPI_SUCCESS ;
+ }
+ else {
+ source = MPITarget( RequestId ) ;
+ tag = MPITag( RequestId ) ;
+ error = 0 ;
+ outcount = MPIOutCount( RequestId ) ;
+ }
+ return MPI_SUCCESS ;
+}
+
+int MPI_Access::Request_Free( MPI_Request *request ) {
+ return _CommInterface.request_free( request ) ;
+}
+
+void MPI_Access::Check() const {
+ int i = 0 ;
+ map< int , RequestStruct * >::const_iterator MapOfRequestStructiterator ;
+ cout << "MPI_Access::Check" << _MyRank << "_MapOfRequestStructSize "
+ << _MapOfRequestStruct.size() << endl ;
+ for ( MapOfRequestStructiterator = _MapOfRequestStruct.begin() ;
+ MapOfRequestStructiterator != _MapOfRequestStruct.end() ;
+ MapOfRequestStructiterator++ ) {
+ cout << " Check" << _MyRank << " " << i << ". Request"
+ << MapOfRequestStructiterator->first << "-->" ;
+ if ( MapOfRequestStructiterator->second == NULL ) {
+ cout << " MapOfRequestStructiterator->second == NULL" << endl ;
+ }
+ else {
+ if ( (MapOfRequestStructiterator->second)->MPIAsynchronous ) {
+ cout << "I" ;
+ }
+ if ( (MapOfRequestStructiterator->second)->MPIIsRecv ) {
+ cout << "Recv from " ;
+ }
+ else {
+ cout << "Send to " ;
+ }
+ cout << (MapOfRequestStructiterator->second)->MPITarget
+ << " MPITag " << (MapOfRequestStructiterator->second)->MPITag
+ << " DataType " << (MapOfRequestStructiterator->second)->MPIDatatype
+ << " Request " << (MapOfRequestStructiterator->second)->MPIRequest
+ << " Status " << (MapOfRequestStructiterator->second)->MPIStatus
+ << " Completed " << (MapOfRequestStructiterator->second)->MPICompleted
+ << endl ;
+ }
+ i++ ;
+ }
+ cout << "EndCheck" << _MyRank << endl ;
+}
+
+ostream & operator<< (ostream & f ,const TimeMessage & aTimeMsg ) {
+ f << " time " << aTimeMsg.time << " deltatime " << aTimeMsg.deltatime
+ << " tag " << aTimeMsg.tag ;
+
+ return f;
+}
+
+ostream & operator<< (ostream & f ,const _MessageIdent & methodtype ) {
+ switch (methodtype) {
+ case _MessageTime :
+ f << " MethodTime ";
+ break;
+ case _MessageInt :
+ f << " MPI_INT ";
+ break;
+ case _MessageDouble :
+ f << " MPI_DOUBLE ";
+ break;
+ default :
+ f << " UnknownMethodType ";
+ break;
+ }
+
+ return f;
+}
+
+}
+
--- /dev/null
+#ifndef MPI_ACCESS_HXX_
+#define MPI_ACCESS_HXX_
+
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+
+#include <map>
+#include <list>
+#include <vector>
+
+namespace ParaMEDMEM
+{
+typedef struct { double time ;
+ double deltatime ;
+ int tag ; } TimeMessage ;
+
+static MPI_Request mpirequestnull = MPI_REQUEST_NULL ;
+enum _MessageIdent { _MessageUnknown, _MessageTime, _MessageInt, _MessageDouble } ;
+
+class MPI_Access {
+ public:
+#define ModuloTag 10
+ MPI_Access(MPIProcessorGroup * ProcessorGroup, int BaseTag=0,
+ int MaxTag=0) ;
+ virtual ~MPI_Access() ;
+
+ void Trace( bool trace = true ) ;
+
+ void DeleteRequest( int RequestId ) ;
+ void DeleteRequests(int size , int *ArrayOfSendRequests ) ;
+
+ int SendMPITag(int destrank) ;
+ int RecvMPITag(int sourcerank) ;
+
+ int SendRequestIdsSize() ;
+ int SendRequestIds(int size, int *ArrayOfSendRequests) ;
+ int RecvRequestIdsSize() ;
+ int RecvRequestIds(int size, int *ArrayOfRecvRequests) ;
+
+ int SendRequestIdsSize(int destrank) ;
+ int SendRequestIds(int destrank, int size, int *ArrayOfSendRequests) ;
+ int RecvRequestIdsSize(int sourcerank) ;
+ int RecvRequestIds(int sourcerank, int size, int *ArrayOfRecvRequests) ;
+
+ int Send(void* buffer, int count, MPI_Datatype datatype, int target,
+ int &RequestId) ;
+ int ISend(void* buffer, int count, MPI_Datatype datatype, int target,
+ int &RequestId) ;
+ int Recv(void* buffer, int count, MPI_Datatype datatype, int source,
+ int &RequestId, int *OutCount=NULL) ;
+ int IRecv(void* buffer, int count, MPI_Datatype datatype, int source,
+ int &RequestId) ;
+ int SendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, int dest,
+ int &SendRequestId, void* recvbuf, int recvcount,
+ MPI_Datatype recvtype, int source,
+ int &RecvRequestId, int *OutCount=NULL) ;
+ int ISendRecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, int dest,
+ int &SendRequestId, void* recvbuf, int recvcount,
+ MPI_Datatype recvtype, int source, int &RecvRequestId) ;
+
+ int Wait(int RequestId) ;
+ int Test(int RequestId, int &flag) ;
+ int WaitAny(int count, int *array_of_RequestIds, int &RequestId) ;
+ int TestAny(int count, int *array_of_RequestIds, int &RequestId, int &flag) ;
+ int WaitAll(int count, int *array_of_RequestIds) ;
+ int TestAll(int count, int *array_of_RequestIds, int &flag) ;
+ int WaitSome(int count, int *array_of_RequestIds, int outcount,
+ int *outarray_of_RequestIds) ;
+ int TestSome(int count, int *array_of_RequestIds, int outcounts,
+ int *outarray_of_RequestIds) ;
+ int Probe(int FromSource, int &source, int &MPITag, MPI_Datatype &datatype,
+ int &outcount) ;
+ int IProbe(int FromSource, int &source, int &MPITag, MPI_Datatype &datatype,
+ int &outcount, int &flag) ;
+ int Cancel( int RecvRequestId, int &flag ) ;
+ int Cancel( int source, int MPITag, MPI_Datatype datatype, int outcount,
+ int &flag ) ;
+ int CancelAll() ;
+ int Barrier() ;
+ int Error_String(int errorcode, char *string, int *resultlen) const ;
+ int Status(int RequestId, int &source, int &tag, int &error, int &outcount,
+ bool keepRequestStruct=false) ;
+ int Request_Free( MPI_Request *request ) ;
+
+ void Check() const ;
+
+ MPI_Datatype TimeType() const ;
+ bool IsTimeMessage( int MPITag ) const ;
+ MPI_Aint TimeExtent() const ;
+ MPI_Aint IntExtent() const ;
+ MPI_Aint DoubleExtent() const ;
+ MPI_Aint Extent( MPI_Datatype datatype ) const ;
+
+ int MPITag( int RequestId ) ;
+ int MPITarget( int RequestId ) ;
+ bool MPIIsRecv( int RequestId ) ;
+ bool MPIAsynchronous( int RequestId ) ;
+ bool MPICompleted( int RequestId ) ;
+ MPI_Datatype MPIDatatype( int RequestId ) ;
+ int MPIOutCount( int RequestId ) ;
+
+ private:
+ int NewRequest( MPI_Datatype datatype, int tag , int destsourcerank ,
+ bool fromsourcerank , bool asynchronous ) ;
+ int NewSendTag( MPI_Datatype datatype, int destrank , int method ,
+ bool asynchronous, int &RequestId ) ;
+ int NewRecvTag( MPI_Datatype datatype, int sourcerank , int method ,
+ bool asynchronous, int &RequestId ) ;
+ int IncrTag( int prevtag ) ;
+ int ValTag( int tag, int method ) ;
+
+ void DeleteSendRecvRequest( int RequestId ) ;
+
+ void DeleteStatus( int RequestId ) ;
+
+ MPI_Request *MPIRequest( int RequestId ) ;
+ MPI_Status *MPIStatus( int RequestId ) ;
+ void SetMPICompleted( int RequestId , bool completed ) ;
+ void SetMPIOutCount( int RequestId , int outcount ) ;
+ void ClearMPIStatus( int RequestId ) ;
+
+ _MessageIdent MethodId( MPI_Datatype datatype ) const ;
+ MPI_Datatype Datatype( _MessageIdent aMethodIdent ) const ;
+
+ const CommInterface &_CommInterface ;
+ const MPI_Comm* _IntraCommunicator ;
+ MPIProcessorGroup * _ProcessorGroup ;
+ int _ProcessorGroupSize ;
+ int _MyRank ;
+ bool _Trace ;
+
+ int _BaseRequest ;
+ int _MaxRequest ;
+ int _Request ;
+ int * _SendRequest ;
+ int * _RecvRequest ;
+ vector< list< int > > _SendRequests ;
+ vector< list< int > > _RecvRequests ;
+
+ int _BaseMPITag ;
+ int _MaxMPITag ;
+ int * _SendMPITag ;
+ int * _RecvMPITag ;
+
+ MPI_Datatype _MPI_TIME ;
+
+ struct RequestStruct { int MPITarget ;
+ bool MPIIsRecv ;
+ int MPITag ;
+ bool MPIAsynchronous ;
+ bool MPICompleted ;
+ MPI_Datatype MPIDatatype ;
+ MPI_Request MPIRequest ;
+ MPI_Status *MPIStatus ;
+ int MPIOutCount ; } ;
+ map< int , RequestStruct * > _MapOfRequestStruct ;
+
+};
+
+ inline void MPI_Access::Trace( bool trace ) {
+ _Trace = trace ;
+ }
+
+ inline void MPI_Access::DeleteRequest( int RequestId ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ if ( aRequestStruct ) {
+ if ( _Trace )
+ cout << "MPI_Access::DeleteRequest" << _MyRank << "( " << RequestId << " ) "
+ << aRequestStruct << " MPIRequest " << aRequestStruct->MPIRequest
+ << " MPIIsRecv " << aRequestStruct->MPIIsRecv << endl ;
+ if ( _MapOfRequestStruct[RequestId]->MPIRequest != MPI_REQUEST_NULL ) {
+ Request_Free( &_MapOfRequestStruct[RequestId]->MPIRequest ) ;
+ }
+ DeleteSendRecvRequest( RequestId ) ;
+ DeleteStatus( RequestId ) ;
+ _MapOfRequestStruct.erase( RequestId ) ;
+ delete aRequestStruct ;
+ }
+ else {
+ if ( _Trace )
+ cout << "MPI_Access::DeleteRequest" << _MyRank << "( " << RequestId
+ << " ) Request not found" << endl ;
+ }
+ }
+ inline void MPI_Access::DeleteRequests(int size , int *ArrayOfSendRequests ) {
+ int i ;
+ for ( i = 0 ; i < size ; i++ ) {
+ DeleteRequest( ArrayOfSendRequests[i] ) ;
+ }
+ }
+
+ inline int MPI_Access::SendMPITag(int destrank) {
+ return _SendMPITag[destrank] ;
+ }
+ inline int MPI_Access::RecvMPITag(int sourcerank) {
+ return _RecvMPITag[sourcerank] ;
+ }
+
+ inline int MPI_Access::SendRequestIdsSize(int destrank) {
+ return _SendRequests[destrank].size() ;
+ }
+ inline int MPI_Access::RecvRequestIdsSize(int sourcerank) {
+ return _RecvRequests[sourcerank].size() ;
+ }
+ inline MPI_Datatype MPI_Access::TimeType() const {
+ return _MPI_TIME ;
+ }
+ inline bool MPI_Access::IsTimeMessage( int MPITag ) const {
+ return ((MPITag%ModuloTag) == _MessageTime) ; } ;
+ inline MPI_Aint MPI_Access::TimeExtent() const {
+ MPI_Aint extent ;
+ MPI_Type_extent( _MPI_TIME , &extent ) ;
+ return extent ;
+ }
+ inline MPI_Aint MPI_Access::IntExtent() const {
+ MPI_Aint extent ;
+ MPI_Type_extent( MPI_INT , &extent ) ;
+ return extent ;
+ }
+ inline MPI_Aint MPI_Access::DoubleExtent() const {
+ MPI_Aint extent ;
+ MPI_Type_extent( MPI_DOUBLE , &extent ) ;
+ return extent ;
+ }
+ inline MPI_Aint MPI_Access::Extent( MPI_Datatype datatype ) const {
+ if ( datatype == _MPI_TIME )
+ return TimeExtent() ;
+ if ( datatype == MPI_INT )
+ return IntExtent() ;
+ if ( datatype == MPI_DOUBLE )
+ return DoubleExtent() ;
+ return 0 ;
+ }
+
+ inline int MPI_Access::MPITag( int RequestId ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ if ( aRequestStruct ) {
+ return aRequestStruct->MPITag ;
+ }
+ return -1 ;
+ }
+ inline int MPI_Access::MPITarget( int RequestId ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ if ( aRequestStruct ) {
+ return aRequestStruct->MPITarget ;
+ }
+ return -1 ;
+ }
+ inline bool MPI_Access::MPIIsRecv( int RequestId ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ if ( aRequestStruct ) {
+ return aRequestStruct->MPIIsRecv ;
+ }
+ return false ;
+ }
+ inline bool MPI_Access::MPIAsynchronous( int RequestId ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ if ( aRequestStruct ) {
+ return aRequestStruct->MPIAsynchronous ;
+ }
+ return false ;
+ }
+ inline bool MPI_Access::MPICompleted( int RequestId ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ if ( aRequestStruct ) {
+ return aRequestStruct->MPICompleted ;
+ }
+ return true ;
+ }
+ inline MPI_Datatype MPI_Access::MPIDatatype( int RequestId ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ if ( aRequestStruct ) {
+ return aRequestStruct->MPIDatatype ; ;
+ }
+ return (MPI_Datatype ) NULL ;
+ }
+ inline int MPI_Access::MPIOutCount( int RequestId ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ if ( aRequestStruct ) {
+ return aRequestStruct->MPIOutCount ;
+ }
+ return 0 ;
+ }
+
+ inline int MPI_Access::IncrTag( int prevtag ) {
+ int tag = ((prevtag/ModuloTag + 1)*ModuloTag) ;
+ if ( tag > _MaxMPITag )
+ tag = _BaseMPITag ;
+ return tag ;
+ }
+ inline int MPI_Access::ValTag( int tag, int method ) {
+ return ((tag/ModuloTag)*ModuloTag) + method ;
+ }
+
+ inline void MPI_Access::DeleteSendRecvRequest( int RequestId ) {
+ if ( MPIIsRecv( RequestId ) ) {
+ _RecvRequests[ MPITarget( RequestId ) ].remove( RequestId ) ;
+ }
+ else {
+ _SendRequests[ MPITarget( RequestId ) ].remove( RequestId ) ;
+ }
+ }
+
+ inline void MPI_Access::DeleteStatus( int RequestId ) {
+ if ( _MapOfRequestStruct[RequestId]->MPIStatus != NULL ) {
+ delete _MapOfRequestStruct[RequestId]->MPIStatus ;
+ ClearMPIStatus( RequestId ) ;
+ }
+ }
+
+ inline MPI_Request * MPI_Access::MPIRequest( int RequestId ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ //cout << "MPIRequest" << _MyRank << "(" << RequestId
+ // << " ) aRequestStruct->MPIRequest " << aRequestStruct->MPIRequest
+ // << endl ;
+ if ( aRequestStruct ) {
+ return &aRequestStruct->MPIRequest ;
+ }
+ return &mpirequestnull ;
+ }
+ inline MPI_Status * MPI_Access::MPIStatus( int RequestId ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ //cout << "MPIStatus" << _MyRank << "(" << RequestId
+ // << " ) aRequestStruct->MPIStatus " << aRequestStruct->MPIStatus
+ // << endl ;
+ if ( aRequestStruct ) {
+ return aRequestStruct->MPIStatus ; ;
+ }
+ return NULL ;
+ }
+ inline void MPI_Access::SetMPICompleted( int RequestId , bool completed ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ if ( aRequestStruct ) {
+ aRequestStruct->MPICompleted = completed ;
+ //if ( completed ) {
+ // DeleteRequest( RequestId ) ;
+ //}
+ }
+ }
+ inline void MPI_Access::SetMPIOutCount( int RequestId , int outcount ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ if ( aRequestStruct ) {
+ aRequestStruct->MPIOutCount = outcount ;
+ }
+ }
+ inline void MPI_Access::ClearMPIStatus( int RequestId ) {
+ struct RequestStruct *aRequestStruct = _MapOfRequestStruct[ RequestId ] ;
+ if ( aRequestStruct ) {
+ aRequestStruct->MPIStatus = NULL ;
+ }
+ }
+
+ inline _MessageIdent MPI_Access::MethodId( MPI_Datatype datatype ) const {
+ _MessageIdent aMethodIdent ;
+ if ( datatype == _MPI_TIME ) {
+ aMethodIdent = _MessageTime ;
+ }
+ else if ( datatype == MPI_INT ) {
+ aMethodIdent = _MessageInt ;
+ }
+ else if ( datatype == MPI_DOUBLE ) {
+ aMethodIdent = _MessageDouble ;
+ }
+ else {
+ aMethodIdent = _MessageUnknown ;
+ }
+ return aMethodIdent ;
+ }
+ inline MPI_Datatype MPI_Access::Datatype( _MessageIdent aMethodIdent ) const {
+ MPI_Datatype aDataType ;
+ switch( aMethodIdent ) {
+ case _MessageTime :
+ aDataType = _MPI_TIME ;
+ break ;
+ case _MessageInt :
+ aDataType = MPI_INT ;
+ break ;
+ case _MessageDouble :
+ aDataType = MPI_DOUBLE ;
+ break ;
+ default :
+ aDataType = (MPI_Datatype) -1 ;
+ break ;
+ }
+ return aDataType ;
+ }
+
+ostream & operator<< (ostream &,const _MessageIdent &);
+
+ostream & operator<< (ostream &,const TimeMessage &);
+
+}
+
+#endif /*MPI_ACCESS_HXX_*/
--- /dev/null
+
+#include "MPI_AccessDEC.hxx"
+
+using namespace std;
+
+namespace ParaMEDMEM {
+
+/*!
+This constructor creates an MPI_AccessDEC which has \a local_group as a working side
+and \a distant_group as an idle side.
+The constructor must be called synchronously on all processors of both processor groups.
+
+\param local_group working side ProcessorGroup
+\param distant_group lazy side ProcessorGroup
+\param Asynchronous Communication mode (default asynchronous)
+\param nStepBefore Number of Time step needed for the interpolation before current time
+\param nStepAfter Number of Time step needed for the interpolation after current time
+
+*/
+//MPI_AccessDEC::MPI_AccessDEC( ProcessorGroup& local_group,
+// ProcessorGroup& distant_group,
+// TimeInterpolator * aTimeInterpolator,
+// bool Asynchronous ) {
+MPI_AccessDEC::MPI_AccessDEC( const ProcessorGroup& local_group,
+ const ProcessorGroup& distant_group,
+ bool Asynchronous ) {
+ _local_group = &local_group ;
+ _distant_group = &distant_group ;
+ ProcessorGroup * union_group = _local_group->fuse( *_distant_group ) ;
+ int i ;
+ std::set<int> procs;
+ for ( i = 0 ; i < union_group->size() ; i++ ) {
+ procs.insert(i) ;
+ }
+ _MPI_union_group = new ParaMEDMEM::MPIProcessorGroup(
+ union_group->getCommInterface() , procs);
+ delete union_group ;
+ _MyRank = _MPI_union_group->myRank() ;
+ _GroupSize = _MPI_union_group->size() ;
+ _MPIAccess = new MPI_Access( _MPI_union_group ) ;
+ _Asynchronous = Asynchronous ;
+ _TimeMessages = new vector< vector< TimeMessage > > ;
+ _TimeMessages->resize( _GroupSize ) ;
+ _DataMessages = new vector< vector< void * > > ;
+ _DataMessages->resize( _GroupSize ) ;
+ _TimeInterpolator = NULL ;
+ _MapOfSendBuffers = new map< int , SendBuffStruct * > ;
+ cout << "MPI_AccessDEC" << _MyRank << " Asynchronous " << _Asynchronous << endl ;
+}
+
+MPI_AccessDEC::~MPI_AccessDEC() {
+ CheckFinalSent() ;
+ CheckFinalRecv() ;
+ delete _MPI_union_group ;
+ delete _MPIAccess ;
+ if ( _TimeInterpolator )
+ delete _TimeInterpolator ;
+ if ( _TimeMessages )
+ delete _TimeMessages ;
+ if ( _DataMessages )
+ delete _DataMessages ;
+ if ( _MapOfSendBuffers )
+ delete _MapOfSendBuffers ;
+}
+
+void MPI_AccessDEC::SetTimeInterpolator( TimeInterpolationMethod aTimeInterp ,
+ double InterpPrecision, int nStepBefore,
+ int nStepAfter ) {
+ cout << "MPI_AccessDEC::SetTimeInterpolator" << _MyRank << " Asynchronous "
+ << _Asynchronous << " TimeInterpolationMethod " << aTimeInterp
+ << " InterpPrecision " << InterpPrecision << " nStepBefore " << nStepBefore
+ << " nStepAfter " << nStepAfter << endl ;
+ if ( _TimeInterpolator )
+ delete _TimeInterpolator ;
+ switch ( aTimeInterp ) {
+ case WithoutTimeInterp :
+ _TimeInterpolator = NULL ;
+ _nStepBefore = 0 ;
+ _nStepAfter = 0 ;
+ break ;
+ case LinearTimeInterp :
+ _TimeInterpolator = new LinearTimeInterpolator( InterpPrecision , nStepBefore ,
+ nStepAfter ) ;
+ _nStepBefore = nStepBefore ;
+ _nStepAfter = nStepAfter ;
+ int i ;
+ for ( i = 0 ; i < _GroupSize ; i++ ) {
+ (*_TimeMessages)[ i ].resize( _nStepBefore + _nStepAfter ) ;
+ (*_DataMessages)[ i ].resize( _nStepBefore + _nStepAfter ) ;
+ int j ;
+ for ( j = 0 ; j < _nStepBefore + _nStepAfter ; j++ ) {
+ (*_TimeMessages)[ i ][ j ].time = -1 ;
+ (*_TimeMessages)[ i ][ j ].deltatime = -1 ;
+ (*_DataMessages)[ i ][ j ] = NULL ;
+ }
+ }
+ break ;
+ }
+}
+
+/*!
+Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
+(Internal Protected method)
+
+Returns the request identifier SendRequestId
+
+*/
+int MPI_AccessDEC::Send( void* sendbuf, int sendcount , int offset ,
+ MPI_Datatype sendtype , int target , int &SendRequestId ) {
+ int sts ;
+ if ( _Asynchronous ) {
+ if ( sendtype == MPI_INT ) {
+ sts = _MPIAccess->ISend( &((int *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
+ else {
+ sts = _MPIAccess->ISend( &((double *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
+ }
+ else {
+ if ( sendtype == MPI_INT ) {
+ sts = _MPIAccess->Send( &((int *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
+ else {
+ sts = _MPIAccess->Send( &((double *) sendbuf)[offset] , sendcount , sendtype ,
+ target , SendRequestId ) ;
+ }
+ }
+ return sts ;
+}
+
+/*!
+Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
+(Internal Protected method)
+
+Returns the request identifier RecvRequestId
+
+*/
+int MPI_AccessDEC::Recv( void* recvbuf, int recvcount , int offset ,
+ MPI_Datatype recvtype , int target , int &RecvRequestId ) {
+ int sts ;
+ if ( _Asynchronous ) {
+ if ( recvtype == MPI_INT ) {
+ sts = _MPIAccess->IRecv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
+ else {
+ sts = _MPIAccess->IRecv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
+ }
+ else {
+ if ( recvtype == MPI_INT ) {
+ sts = _MPIAccess->Recv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
+ else {
+ sts = _MPIAccess->Recv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
+ target , RecvRequestId ) ;
+ }
+ }
+ return sts ;
+}
+
+/*!
+Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
+Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
+(Internal Protected method)
+
+Returns the request identifier SendRequestId
+Returns the request identifier RecvRequestId
+
+*/
+int MPI_AccessDEC::SendRecv( void* sendbuf, int sendcount , int sendoffset ,
+ MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , int recvoffset ,
+ MPI_Datatype recvtype , int target ,
+ int &SendRequestId , int &RecvRequestId ) {
+ int sts ;
+ if ( _Asynchronous ) {
+ if ( sendtype == MPI_INT ) {
+ if ( recvtype == MPI_INT ) {
+ sts = _MPIAccess->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((int *) recvbuf)[recvoffset] , recvcount ,
+ recvtype , target , RecvRequestId ) ;
+ }
+ else {
+ sts = _MPIAccess->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((double *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ }
+ else {
+ if ( recvtype == MPI_INT ) {
+ sts = _MPIAccess->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((int *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ else {
+ sts = _MPIAccess->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((double *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ }
+ }
+ else {
+ if ( sendtype == MPI_INT ) {
+ if ( recvtype == MPI_INT ) {
+ sts = _MPIAccess->SendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((int *) recvbuf)[recvoffset] , recvcount ,
+ recvtype , target , RecvRequestId ) ;
+ }
+ else {
+ sts = _MPIAccess->SendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((double *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ }
+ else {
+ if ( recvtype == MPI_INT ) {
+ sts = _MPIAccess->SendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((int *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ else {
+ cout << "SendRecv" << _MyRank << " target " << target << " sendbuf "
+ << &((double *) sendbuf)[sendoffset] << " sendcount " << sendcount
+ << " recvbuf " << &((double *) recvbuf)[recvoffset] << " recvcount "
+ << recvcount << endl ;
+ sts = _MPIAccess->SendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
+ sendtype , target , SendRequestId ,
+ &((double *) recvbuf)[recvoffset] ,
+ recvcount , recvtype , target , RecvRequestId ) ;
+ }
+ }
+ }
+ return sts ;
+}
+
+/*!
+Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
+Receive recvcount datas to recvbuf[offset] with type recvtype from all targets of IntraCommunicator
+
+*/
+int MPI_AccessDEC::AllToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype ) {
+ if ( _TimeInterpolator ) {
+ return AllToAllTime( sendbuf, sendcount, sendtype , recvbuf, recvcount, recvtype ) ;
+ }
+ int sts ;
+ int target ;
+ int sendoffset = 0 ;
+ int recvoffset = 0 ;
+ int SendRequestId ;
+ int RecvRequestId ;
+ SendBuffStruct * aSendDataStruct = NULL ;
+ cout << "AllToAll" << _MyRank << " sendbuf " << sendbuf << " recvbuf " << recvbuf << endl ;
+ if ( _Asynchronous && sendbuf ) {
+ aSendDataStruct = new SendBuffStruct ;
+ aSendDataStruct->SendBuffer = sendbuf ;
+ aSendDataStruct->Counter = 0 ;
+ aSendDataStruct->DataType = sendtype ;
+ }
+ for ( target = 0 ; target < _GroupSize ; target++ ) {
+ sts = SendRecv( sendbuf , sendcount , sendoffset , sendtype ,
+ recvbuf , recvcount , recvoffset , recvtype ,
+ target , SendRequestId , RecvRequestId ) ;
+ if ( _Asynchronous && sendbuf && sendcount ) {
+ aSendDataStruct->Counter += 1 ;
+ (*_MapOfSendBuffers)[ SendRequestId ] = aSendDataStruct ;
+ }
+ sendoffset += sendcount ;
+ recvoffset += recvcount ;
+ }
+ if ( !_Asynchronous && sendbuf ) {
+ cout << "AllToAll" << _MyRank << " free of sendbuf " << sendbuf << endl ;
+// free( sendbuf ) ;
+ if ( sendtype == MPI_INT ) {
+ delete [] (int *) sendbuf ;
+ }
+ else {
+ delete [] (double *) sendbuf ;
+ }
+ }
+ return sts ;
+}
+
+/*!
+Send sendcounts[target] datas from sendbuf[sdispls[target]] with type sendtype to all targets of IntraCommunicator
+Receive recvcounts[target] datas to recvbuf[rdispls[target]] with type recvtype from all targets of IntraCommunicator
+
+*/
+int MPI_AccessDEC::AllToAllv( void* sendbuf, int* sendcounts, int* sdispls,
+ MPI_Datatype sendtype ,
+ void* recvbuf, int* recvcounts, int* rdispls,
+ MPI_Datatype recvtype ) {
+ if ( _TimeInterpolator ) {
+ return AllToAllvTime( sendbuf, sendcounts, sdispls, sendtype ,
+ recvbuf, recvcounts, rdispls, recvtype ) ;
+ }
+ int sts ;
+ int target ;
+ int SendRequestId ;
+ int RecvRequestId ;
+ SendBuffStruct * aSendDataStruct = NULL ;
+ cout << "AllToAllv" << _MyRank << " sendbuf " << sendbuf << " recvbuf " << recvbuf
+ << endl ;
+ if ( _Asynchronous && sendbuf ) {
+ aSendDataStruct = new SendBuffStruct ;
+ aSendDataStruct->SendBuffer = sendbuf ;
+ aSendDataStruct->Counter = 0 ;
+ aSendDataStruct->DataType = sendtype ;
+ }
+ for ( target = 0 ; target < _GroupSize ; target++ ) {
+ if ( sendcounts[target] || recvcounts[target] ) {
+ sts = SendRecv( sendbuf , sendcounts[target] , sdispls[target] , sendtype ,
+ recvbuf , recvcounts[target] , rdispls[target] , recvtype ,
+ target , SendRequestId , RecvRequestId ) ;
+ if ( _Asynchronous && sendbuf && sendcounts[target]) {
+ aSendDataStruct->Counter += 1 ;
+ (*_MapOfSendBuffers)[ SendRequestId ] = aSendDataStruct ;
+ }
+ }
+ }
+ if ( !_Asynchronous && sendbuf ) {
+ cout << "AllToAllv" << _MyRank << " free of sendbuf " << sendbuf << endl ;
+// free( sendbuf ) ;
+ if ( sendtype == MPI_INT ) {
+ delete [] (int *) sendbuf ;
+ }
+ else {
+ delete [] (double *) sendbuf ;
+ }
+ }
+ return sts ;
+}
+
+/*!
+Send a TimeMessage to all targets of IntraCommunicator
+Receive the TimeMessages from targets of IntraCommunicator if necessary.
+
+Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
+Returns recvcount datas to recvbuf[offset] with type recvtype after an interpolation
+with datas received from all targets of IntraCommunicator.
+
+*/
+int MPI_AccessDEC::AllToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , MPI_Datatype recvtype ) {
+ int sts ;
+ int target ;
+ int sendoffset = 0 ;
+ int SendTimeRequestId ;
+ int SendRequestId ;
+
+ if ( _TimeInterpolator == NULL ) {
+ return MPI_ERR_OTHER ;
+ }
+
+//Free of SendBuffers
+ CheckSent() ;
+
+//DoSend : Time + SendBuff
+ SendBuffStruct * aSendTimeStruct = NULL ;
+ SendBuffStruct * aSendDataStruct = NULL ;
+ cout << "AllToAllTime" << _MyRank << " sendbuf " << sendbuf << " recvbuf " << recvbuf
+ << endl ;
+ if ( sendbuf && sendcount ) {
+ TimeMessage * aSendTimeMessage = new TimeMessage ;
+ if ( _Asynchronous ) {
+ aSendTimeStruct = new SendBuffStruct ;
+ aSendTimeStruct->SendBuffer = aSendTimeMessage ;
+ aSendTimeStruct->Counter = 0 ;
+ aSendTimeStruct->DataType = _MPIAccess->TimeType() ;
+ aSendDataStruct = new SendBuffStruct ;
+ aSendDataStruct->SendBuffer = sendbuf ;
+ aSendDataStruct->Counter = 0 ;
+ aSendDataStruct->DataType = sendtype ;
+ }
+ aSendTimeMessage->time = _t ;
+ aSendTimeMessage->deltatime = _dt ;
+ for ( target = 0 ; target < _GroupSize ; target++ ) {
+ sts = Send( aSendTimeMessage , 1 , 0 , _MPIAccess->TimeType() , target ,
+ SendTimeRequestId ) ;
+// cout << "Send" << _MyRank << " --> target " << target << " _t " << _t
+// << " SendTimeRequestId " << SendTimeRequestId << endl ;
+// << " aSendTimeStruct/aSendTimeMessage " << aSendTimeStruct << "/"
+// << aSendTimeMessage << endl ;
+ sts = Send( sendbuf , sendcount , sendoffset , sendtype , target , SendRequestId ) ;
+// cout << "Send" << _MyRank << " --> target " << target << " SendRequestId "
+// << SendRequestId << " aSendDataStruct/sendbuf " << aSendDataStruct << "/"
+// << sendbuf << endl ;
+ if ( _Asynchronous ) {
+ aSendTimeStruct->Counter += 1 ;
+ (*_MapOfSendBuffers)[ SendTimeRequestId ] = aSendTimeStruct ;
+ aSendDataStruct->Counter += 1 ;
+ (*_MapOfSendBuffers)[ SendRequestId ] = aSendDataStruct ;
+ }
+ sendoffset += sendcount ;
+ }
+ if ( !_Asynchronous ) {
+ cout << "SynchronousAllToAllTime" << _MyRank << " free of SendTimeMessage & sendbuf "
+ << sendbuf << endl ;
+ delete aSendTimeMessage ;
+// free( sendbuf ) ;
+ if ( sendtype == MPI_INT ) {
+ delete [] (int *) sendbuf ;
+ }
+ else {
+ delete [] (double *) sendbuf ;
+ }
+ }
+ }
+
+//CheckTime + DoRecv
+ if ( recvbuf && recvcount ) {
+ for ( target = 0 ; target < _GroupSize ; target++ ) {
+ int recvsize = recvcount*_MPIAccess->Extent( recvtype ) ;
+// bool OutOfTime ;
+// CheckTime( recvcount , recvsize , recvtype , target , OutOfTime) ;
+ CheckTime( recvcount , recvsize , recvtype , target ) ;
+ //cout << "AllToAllTime" << _MyRank << " memcpy to recvbuf " << recvbuf
+ // << "+target" << target << "*" << recvsize << " bytes" << endl ;
+//===========================================================================
+//TODO : it is assumed actually that we have only 1 timestep before nad after
+//===========================================================================
+ if ( _TimeInterpolator && (*_TimeMessages)[target][0].time != -1 ) {
+// if ( _TimeInterpolator && !OutOfTime ) {
+ if ( _OutOfTime ) {
+ cout << " =====================================================" << endl
+ << "Recv" << _MyRank << " <-- target " << target << " t0 "
+ << (*_TimeMessages)[target][0].time << " < t1 "
+ << (*_TimeMessages)[target][1].time << " < t* " << _t << endl
+ << " =====================================================" << endl ;
+ }
+ if ( recvtype == MPI_INT ) {
+ _TimeInterpolator->DoInterp( (*_TimeMessages)[target][0].time,
+ (*_TimeMessages)[target][1].time, _t, recvcount ,
+ _nStepBefore, _nStepAfter,
+ (int **) &(*_DataMessages)[target][0],
+ (int **) &(*_DataMessages)[target][1],
+ &((int *)recvbuf)[target*recvcount] ) ;
+ }
+ else {
+ _TimeInterpolator->DoInterp( (*_TimeMessages)[target][0].time,
+ (*_TimeMessages)[target][1].time, _t, recvcount ,
+ _nStepBefore, _nStepAfter,
+ (double **) &(*_DataMessages)[target][0],
+ (double **) &(*_DataMessages)[target][1],
+ &((double *)recvbuf)[target*recvcount] ) ;
+ }
+ }
+ else {
+ char * buffdest = (char *) recvbuf ;
+ char * buffsrc = (char *) (*_DataMessages)[target][1] ;
+ memcpy( &buffdest[target*recvsize] , buffsrc , recvsize ) ;
+ }
+ }
+ }
+
+ return sts ;
+}
+
+int MPI_AccessDEC::AllToAllvTime( void* sendbuf, int* sendcounts, int* sdispls,
+ MPI_Datatype sendtype ,
+ void* recvbuf, int* recvcounts, int* rdispls,
+ MPI_Datatype recvtype ) {
+ int sts ;
+ int target ;
+ int SendTimeRequestId ;
+ int SendRequestId ;
+
+ if ( _TimeInterpolator == NULL ) {
+ return MPI_ERR_OTHER ;
+ }
+
+//Free of SendBuffers
+ CheckSent() ;
+
+//DoSend : Time + SendBuff
+ SendBuffStruct * aSendTimeStruct = NULL ;
+ SendBuffStruct * aSendDataStruct = NULL ;
+ cout << "AllToAllvTime" << _MyRank << " sendbuf " << sendbuf << " recvbuf " << recvbuf
+ << endl ;
+ if ( sendbuf ) {
+ TimeMessage * aSendTimeMessage = new TimeMessage ;
+ if ( _Asynchronous ) {
+ aSendTimeStruct = new SendBuffStruct ;
+ aSendTimeStruct->SendBuffer = aSendTimeMessage ;
+ aSendTimeStruct->Counter = 0 ;
+ aSendTimeStruct->DataType = _MPIAccess->TimeType() ;
+ aSendDataStruct = new SendBuffStruct ;
+ aSendDataStruct->SendBuffer = sendbuf ;
+ aSendDataStruct->Counter = 0 ;
+ aSendDataStruct->DataType = sendtype ;
+ }
+ aSendTimeMessage->time = _t ;
+ aSendTimeMessage->deltatime = _dt ;
+ for ( target = 0 ; target < _GroupSize ; target++ ) {
+ if ( sendcounts[target] ) {
+ sts = Send( aSendTimeMessage , 1 , 0 , _MPIAccess->TimeType() , target ,
+ SendTimeRequestId ) ;
+// cout << "Send" << _MyRank << " --> target " << target << " _t " << _t
+// << " SendTimeRequestId " << SendTimeRequestId << endl ;
+// << " aSendTimeStruct/aSendTimeMessage " << aSendTimeStruct << "/"
+// << aSendTimeMessage << endl ;
+ //int i ;
+ //cout << "Send" << _MyRank << " --> target " << target
+ // << "sendcount" << sendcounts[target] << endl ;
+ //for ( i = 0 ; i < sendcounts[target] ; i++ ) {
+ // cout << " " << ((double *) sendbuf)[sdispls[target]+i] ;
+ //}
+ //cout << endl ;
+ sts = Send( sendbuf , sendcounts[target] , sdispls[target] , sendtype , target ,
+ SendRequestId ) ;
+ //cout << "Send" << _MyRank << " --> target " << target << " SendRequestId "
+ // << SendRequestId << " aSendDataStruct/sendbuf " << aSendDataStruct << "/"
+ // << sendbuf << " sendcount " << sendcounts[target] << " sdispls "
+ // << sdispls[target] << endl ;
+ //int i ;
+ //cout << "Send" << _MyRank << " --> target " << target << " count "
+ // << sendcounts[target] << endl ;
+ //for ( i = 0 ; i < sendcounts[target] ; i++ ) {
+ // cout << " " << ((int *)sendbuf)[sdispls[target]+i] ;
+ //}
+ //cout << endl ;
+ if ( _Asynchronous ) {
+ aSendTimeStruct->Counter += 1 ;
+ (*_MapOfSendBuffers)[ SendTimeRequestId ] = aSendTimeStruct ;
+ //cout << "TimeSent" << _MyRank << " Request " << SendTimeRequestId
+ // << " _MapOfSendBuffers->SendBuffer "
+ // << (*_MapOfSendBuffers)[ SendTimeRequestId ]->SendBuffer
+ // << " Counter " << (*_MapOfSendBuffers)[ SendTimeRequestId ]->Counter
+ // << endl ;
+ aSendDataStruct->Counter += 1 ;
+ (*_MapOfSendBuffers)[ SendRequestId ] = aSendDataStruct ;
+ //cout << "DataSent" << _MyRank << " Request " << SendRequestId
+ // << " _MapOfSendBuffers->SendBuffer "
+ // << (*_MapOfSendBuffers)[ SendRequestId ]->SendBuffer
+ // << " Counter " << (*_MapOfSendBuffers)[ SendRequestId ]->Counter
+ // << endl ;
+ }
+ }
+ }
+ if ( !_Asynchronous ) {
+ //cout << "SynchronousAllToAllv" << _MyRank << " free of SendTimeMessage & sendbuf "
+ // << sendbuf << endl ;
+ delete aSendTimeMessage ;
+// free( sendbuf ) ;
+ if ( sendtype == MPI_INT ) {
+ delete [] (int *) sendbuf ;
+ }
+ else {
+ delete [] (double *) sendbuf ;
+ }
+ }
+ }
+
+//CheckTime + DoRecv
+ if ( recvbuf ) {
+ for ( target = 0 ; target < _GroupSize ; target++ ) {
+ if ( recvcounts[target] ) {
+ int recvsize = recvcounts[target]*_MPIAccess->Extent( recvtype ) ;
+// bool OutOfTime ;
+// CheckTime( recvcounts[target] , recvsize , recvtype , target , OutOfTime ) ;
+ CheckTime( recvcounts[target] , recvsize , recvtype , target ) ;
+ //cout << "AllToAllvTime" << _MyRank << " memcpy to recvbuf " << recvbuf
+ // << "+target" << target << "*" << recvsize << " bytes" << endl ;
+//===========================================================================
+//TODO : it is assumed actually that we have only 1 timestep before nad after
+//===========================================================================
+ if ( _TimeInterpolator && (*_TimeMessages)[target][0].time != -1 ) {
+// if ( _TimeInterpolator && !OutOfTime ) {
+ if ( _OutOfTime ) {
+ cout << " =====================================================" << endl
+ << "Recv" << _MyRank << " <-- target " << target << " t0 "
+ << (*_TimeMessages)[target][0].time << " < t1 "
+ << (*_TimeMessages)[target][1].time << " < t* " << _t << endl
+ << " =====================================================" << endl ;
+ }
+ if ( recvtype == MPI_INT ) {
+ _TimeInterpolator->DoInterp( (*_TimeMessages)[target][0].time,
+ (*_TimeMessages)[target][1].time, _t,
+ recvcounts[target] , _nStepBefore, _nStepAfter,
+ (int **) &(*_DataMessages)[target][0],
+ (int **) &(*_DataMessages)[target][1],
+ &((int *)recvbuf)[rdispls[target]] ) ;
+ }
+ else {
+ _TimeInterpolator->DoInterp( (*_TimeMessages)[target][0].time,
+ (*_TimeMessages)[target][1].time, _t,
+ recvcounts[target] , _nStepBefore, _nStepAfter,
+ (double **) &(*_DataMessages)[target][0],
+ (double **) &(*_DataMessages)[target][1],
+ &((double *)recvbuf)[rdispls[target]] ) ;
+ }
+ }
+ else {
+ char * buffdest = (char *) recvbuf ;
+ char * buffsrc = (char *) (*_DataMessages)[target][1] ;
+ memcpy( &buffdest[rdispls[target]*_MPIAccess->Extent( recvtype )] , buffsrc ,
+ recvsize ) ;
+ }
+ //int i ;
+ //cout << "Recv" << _MyRank << " --> target " << target << " recvbuf " << recvbuf
+ // << " recvbuf " << recvbuf << " &recvbuf[rdispls[target]] "
+ // << (&((int *) recvbuf)[rdispls[target]]) << endl ;
+ //for ( i = 0 ; i < recvcounts[target] ; i++ ) {
+ // cout << " " << ((int *) (*_DataMessages)[target][1])[i] ;
+ //}
+ //cout << endl ;
+ //cout << "Recv" << _MyRank << " <-- target " << target << " count "
+ // << recvcounts[target] << endl ;
+ //for ( i = 0 ; i < recvcounts[target] ; i++ ) {
+ // cout << " " << (&((int *) recvbuf)[rdispls[target]])[i] ;
+ //}
+ //cout << endl ;
+ }
+ }
+ }
+
+ return sts ;
+}
+
+int MPI_AccessDEC::CheckTime( int recvcount , int recvsize , MPI_Datatype recvtype ,
+ int target ) {
+// int target , bool &OutOfTime ) {
+ //cout << "CheckTime" << _MyRank << " time " << _t << " deltatime " << _dt << endl ;
+
+ int sts ;
+ int RecvTimeRequestId ;
+ int RecvDataRequestId ;
+//Pour l'instant on cherche _TimeMessages[target][0] < _t <= _TimeMessages[target][1]
+//===========================================================================
+//TODO : it is assumed actually that we have only 1 timestep before and after
+// instead of _nStepBefore and _nStepAfter ...
+//===========================================================================
+ _DataMessagesType = recvtype ;
+ _OutOfTime = false ;
+//Actually we need 1 timestep before and after
+// if ( _steptime <= 1 ) {
+ if ( (*_TimeMessages)[target][1].time == -1 ) {
+ (*_TimeMessages)[target][0] = (*_TimeMessages)[target][1] ;
+ sts = Recv( &(*_TimeMessages)[target][1] , 1 , _MPIAccess->TimeType() ,
+ target , RecvTimeRequestId ) ;
+ (*_DataMessages)[target][0] = (*_DataMessages)[target][1] ;
+// (*_DataMessages)[target][1] = malloc( recvsize ) ;
+ if ( recvtype == MPI_INT ) {
+ (*_DataMessages)[target][1] = new int[recvcount] ;
+ }
+ else {
+ (*_DataMessages)[target][1] = new double[recvcount] ;
+ }
+ sts = Recv( (*_DataMessages)[target][1] , recvcount , recvtype , target ,
+ RecvDataRequestId ) ;
+// _OutOfTime = true ;
+ }
+// if ( _steptime > 0 ) {
+ else {
+ while ( _t > (*_TimeMessages)[target][1].time &&
+ (*_TimeMessages)[target][1].deltatime != 0 ) {
+ //cout << "CheckTime" << _MyRank << " TimeMessage target " << target << " _t "
+ // << _t << " > " << (*_TimeMessages)[target][1].time << " et "
+ // << (*_TimeMessages)[target][1].deltatime
+ // << " != 0 ==> Recv" << endl ;
+ (*_TimeMessages)[target][0] = (*_TimeMessages)[target][1] ;
+ sts = Recv( &(*_TimeMessages)[target][1] , 1 , _MPIAccess->TimeType() ,
+ target , RecvTimeRequestId ) ;
+// free( (*_DataMessages)[target][0] ) ;
+ if ( recvtype == MPI_INT ) {
+ delete [] (int *) (*_DataMessages)[target][0] ;
+ }
+ else {
+ delete [] (double *) (*_DataMessages)[target][0] ;
+ }
+ (*_DataMessages)[target][0] = (*_DataMessages)[target][1] ;
+// (*_DataMessages)[target][1] = malloc( recvsize ) ;
+ if ( recvtype == MPI_INT ) {
+ (*_DataMessages)[target][1] = new int[recvcount] ;
+ }
+ else {
+ (*_DataMessages)[target][1] = new double[recvcount] ;
+ }
+ sts = Recv( (*_DataMessages)[target][1] , recvcount , recvtype , target ,
+ RecvDataRequestId ) ;
+ }
+
+ if ( _t > (*_TimeMessages)[target][0].time &&
+ _t <= (*_TimeMessages)[target][1].time ) {
+//Ok
+ }
+ else {
+ _OutOfTime = true ;
+ }
+ }
+
+ //cout << "CheckTime" << _MyRank << " TimeMessage target " << target << " Time "
+ // << (*_TimeMessages)[target][1].time << " DataMessage" ;
+ //int i ;
+ //void * buff = (*_DataMessages)[target][1] ;
+ //for ( i = 0 ; i < recvcount ; i++ ) {
+ // cout << " " << ((int *) buff)[i] ;
+ //}
+ //cout << endl ;
+ return sts ;
+}
+
+int MPI_AccessDEC::CheckSent(bool WithWait) {
+ int sts = MPI_SUCCESS ;
+ int flag = WithWait ;
+ int size = _MPIAccess->SendRequestIdsSize() ;
+ int * ArrayOfSendRequests = new int[ size ] ;
+ int nSendRequest = _MPIAccess->SendRequestIds( size , ArrayOfSendRequests ) ;
+ //cout << "CheckSent" << _MyRank << " nSendRequest " << nSendRequest << " :" << endl ;
+ int i ;
+ for ( i = 0 ; i < nSendRequest ; i++ ) {
+ if ( WithWait ) {
+ sts = _MPIAccess->Wait( ArrayOfSendRequests[i] ) ;
+ }
+ else {
+ sts = _MPIAccess->Test( ArrayOfSendRequests[i] , flag ) ;
+ }
+ if ( flag ) {
+ _MPIAccess->DeleteRequest( ArrayOfSendRequests[i] ) ;
+// cout << "CheckSent" << _MyRank << " " << i << "./" << nSendRequest
+// << " SendRequestId " << ArrayOfSendRequests[i]
+// << " flag " << flag << " SendBuffStruct/SendBuffer "
+// << (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ] << "/"
+// << (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->SendBuffer
+// << " Counter " << (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->Counter
+// << " TimeBuf " << (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->TimeBuf
+// << endl ;
+ (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->Counter -= 1 ;
+// if ( (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->TimeBuf ) {
+// cout << "CheckTimeSent" << _MyRank << " Request " ;
+// }
+// else {
+// cout << "CheckDataSent" << _MyRank << " Request " ;
+// }
+// cout << ArrayOfSendRequests[i]
+// << " _MapOfSendBuffers->SendBuffer "
+// << (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->SendBuffer
+// << " Counter " << (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->Counter
+// << endl ;
+ if ( (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->Counter == 0 ) {
+// cout << "CheckSent" << _MyRank << " SendRequestId " << ArrayOfSendRequests[i]
+// << " Counter " << (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->Counter
+// << " flag " << flag << " SendBuffer "
+// << (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->SendBuffer
+// << " deleted. Erase in _MapOfSendBuffers :" << endl ;
+ if ( (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->DataType ==
+ _MPIAccess->TimeType() ) {
+ delete (TimeMessage * ) (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
+ }
+ else {
+// free( (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->SendBuffer ) ;
+ if ( (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->DataType == MPI_INT ) {
+ delete [] (int *) (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
+ }
+ else {
+ delete [] (double *) (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
+ }
+ }
+ delete (*_MapOfSendBuffers)[ ArrayOfSendRequests[i] ] ;
+ }
+// cout << "CheckSent" << _MyRank << " Erase in _MapOfSendBuffers SendRequestId "
+// << ArrayOfSendRequests[i] << endl ;
+ (*_MapOfSendBuffers).erase( ArrayOfSendRequests[i] ) ;
+ }
+ }
+ delete [] ArrayOfSendRequests ;
+ return sts ;
+}
+
+int MPI_AccessDEC::CheckFinalRecv() {
+ int target ;
+ for ( target = 0 ; target < _GroupSize ; target++ ) {
+ if ( (*_DataMessages)[target][0] != NULL ) {
+ if ( _DataMessagesType == MPI_INT ) {
+ delete [] (int *) (*_DataMessages)[target][0] ;
+ }
+ else {
+ delete [] (double *) (*_DataMessages)[target][0] ;
+ }
+ }
+ if ( (*_DataMessages)[target][1] != NULL ) {
+ if ( _DataMessagesType == MPI_INT ) {
+ delete [] (int *) (*_DataMessages)[target][1] ;
+ }
+ else {
+ delete [] (double *) (*_DataMessages)[target][1] ;
+ }
+ }
+ }
+ return _MPIAccess->CancelAll() ;
+}
+
+ostream & operator<< (ostream & f ,const TimeInterpolationMethod & interpolationmethod ) {
+ switch (interpolationmethod) {
+ case WithoutTimeInterp :
+ f << " WithoutTimeInterpolation ";
+ break;
+ case LinearTimeInterp :
+ f << " LinearTimeInterpolation ";
+ break;
+ default :
+ f << " UnknownTimeInterpolation ";
+ break;
+ }
+
+ return f;
+}
+
+
+}
+
--- /dev/null
+#ifndef MPI_ACESSDEC_HXX_
+#define MPI_ACESSDEC_HXX_
+
+#include <map>
+#include <iostream>
+
+#include "MPI_Access.hxx"
+#include "DEC.hxx"
+#include "LinearTimeInterpolator.hxx"
+
+namespace ParaMEDMEM {
+
+ typedef enum{WithoutTimeInterp,LinearTimeInterp} TimeInterpolationMethod;
+
+ class MPI_AccessDEC {
+
+ public:
+ MPI_AccessDEC( const ProcessorGroup& local_group, const ProcessorGroup& distant_group,
+ bool Asynchronous = true ) ;
+ //MPI_AccessDEC( ProcessorGroup& local_group, ProcessorGroup& distant_group,
+ // TimeInterpolator * aTimeInterpolator ,
+ // bool Asynchronous = true ) ;
+ virtual ~MPI_AccessDEC();
+ MPI_Access * MPIAccess() ;
+ const MPI_Comm* GetComm() ;
+
+ void Asynchronous( bool Asynchronous = true ) ;
+ void SetTimeInterpolator( TimeInterpolationMethod anInterp , double InterpPrecision=0 ,
+ int nStepBefore=1, int nStepAfter=1 ) ;
+
+ void SetTime( double t ) ;
+ void SetTime( double t , double dt ) ;
+ bool OutOfTime() ;
+
+ int Send( void* sendbuf, int sendcount , MPI_Datatype sendtype , int target ) ;
+ int Recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , int target ) ;
+ int Recv( void* recvbuf, int recvcount , MPI_Datatype recvtype , int target ,
+ int RecvRequestId , bool Asynchronous=false ) ;
+ int SendRecv( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , MPI_Datatype recvtype , int target ) ;
+
+ int AllToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype ) ;
+ int AllToAllv( void* sendbuf, int* sendcounts, int* sdispls, MPI_Datatype sendtype ,
+ void* recvbuf, int* recvcounts, int* rdispls, MPI_Datatype recvtype ) ;
+
+ int AllToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , MPI_Datatype recvtype ) ;
+ int AllToAllvTime( void* sendbuf, int* sendcounts, int* sdispls,
+ MPI_Datatype sendtype ,
+ void* recvbuf, int* recvcounts, int* rdispls,
+ MPI_Datatype recvtype ) ;
+// int CheckTime( int recvcount , int recvsize , MPI_Datatype recvtype , int target ,
+// bool &OutOfTime ) ;
+ int CheckTime( int recvcount , int recvsize , MPI_Datatype recvtype , int target ) ;
+ int CheckSent(bool WithWait=false) ;
+ int CheckFinalSent() {
+ return CheckSent( true ) ; } ;
+ int CheckFinalRecv() ;
+
+ protected:
+ int Send( void* sendbuf, int sendcount , int sendoffset , MPI_Datatype sendtype ,
+ int target ) ;
+ int Recv( void* recvbuf, int recvcount , int recvoffset , MPI_Datatype recvtype ,
+ int target ) ;
+
+ int Send( void* sendbuf, int sendcount , int sendoffset , MPI_Datatype sendtype ,
+ int target, int &SendRequestId ) ;
+ int Recv( void* recvbuf, int recvcount , int recvoffset , MPI_Datatype recvtype ,
+ int target, int &RecvRequestId ) ;
+ int SendRecv( void* sendbuf, int sendcount , int sendoffset ,
+ MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , int recvoffset ,
+ MPI_Datatype recvtype , int target ,
+ int &SendRequestId ,int &RecvRequestId ) ;
+
+ private :
+ bool _Asynchronous ;
+ const ProcessorGroup * _local_group ;
+ const ProcessorGroup * _distant_group ;
+ MPIProcessorGroup * _MPI_union_group ;
+
+ TimeInterpolator * _TimeInterpolator ;
+ int _nStepBefore ;
+ int _nStepAfter ;
+
+ int _MyRank ;
+ int _GroupSize ;
+ MPI_Access * _MPIAccess ;
+
+ double _t ;
+ double _dt ;
+ bool _OutOfTime ;
+
+// TimeMessages from each target _TimeMessages[target][Step] : TimeMessage
+ vector< vector< TimeMessage > > *_TimeMessages ;
+// Corresponding DataMessages from each target _DataMessages[target][~TimeStep]
+ MPI_Datatype _DataMessagesType ;
+ vector< vector< void * > > *_DataMessages ;
+
+ typedef struct { void * SendBuffer ;
+ int Counter ;
+ MPI_Datatype DataType ; } SendBuffStruct ;
+// int RequestId -> SendBuffStruct :
+ map< int , SendBuffStruct * > *_MapOfSendBuffers ;
+
+ };
+
+ inline MPI_Access * MPI_AccessDEC::MPIAccess() {
+ return _MPIAccess ; } ;
+ inline const MPI_Comm* MPI_AccessDEC::GetComm() {
+ return _MPI_union_group->getComm() ; } ;
+
+ inline void MPI_AccessDEC::Asynchronous( bool Asynchronous ) {
+ _Asynchronous = Asynchronous ; } ;
+
+ inline void MPI_AccessDEC::SetTime( double t ) {
+ _t = t ; _dt = -1 ; } ;
+ inline void MPI_AccessDEC::SetTime( double t , double dt ) {
+ _t = t ; _dt = dt ; } ;
+ inline bool MPI_AccessDEC::OutOfTime() {
+ return _OutOfTime ; } ;
+
+ inline int MPI_AccessDEC::Send( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
+ int target ) {
+ int SendRequestId ;
+ int sts ;
+ if ( _Asynchronous ) {
+ sts = _MPIAccess->ISend( sendbuf , sendcount , sendtype , target ,
+ SendRequestId ) ;
+ }
+ else {
+ sts = _MPIAccess->Send( sendbuf , sendcount , sendtype , target ,
+ SendRequestId ) ;
+ if ( sts == MPI_SUCCESS ) {
+ free( sendbuf ) ;
+ }
+ }
+ return sts ; } ;
+ inline int MPI_AccessDEC::Recv( void* recvbuf, int recvcount , MPI_Datatype recvtype ,
+ int target ) {
+ int RecvRequestId ;
+ int sts ;
+ if ( _Asynchronous ) {
+ sts = _MPIAccess->IRecv( recvbuf , recvcount , recvtype , target ,
+ RecvRequestId ) ;
+ }
+ else {
+ sts = _MPIAccess->Recv( recvbuf , recvcount , recvtype , target ,
+ RecvRequestId ) ;
+ }
+ return sts ; } ;
+ inline int MPI_AccessDEC::Recv( void* recvbuf, int recvcount , MPI_Datatype recvtype ,
+ int target , int RecvRequestId , bool Asynchronous ) {
+ int sts ;
+ if ( Asynchronous ) {
+ sts = _MPIAccess->IRecv( recvbuf , recvcount , recvtype , target ,
+ RecvRequestId ) ;
+ }
+ else {
+ sts = _MPIAccess->Recv( recvbuf , recvcount , recvtype , target ,
+ RecvRequestId ) ;
+ }
+ return sts ; } ;
+ inline int MPI_AccessDEC::SendRecv( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
+ void* recvbuf, int recvcount , MPI_Datatype recvtype ,
+ int target ) {
+ int SendRequestId ;
+ int RecvRequestId ;
+ int sts ;
+ if ( _Asynchronous ) {
+ sts = _MPIAccess->ISendRecv( sendbuf , sendcount , sendtype , target ,
+ SendRequestId ,
+ recvbuf , recvcount , recvtype , target ,
+ RecvRequestId ) ;
+ }
+ else {
+ sts = _MPIAccess->SendRecv( sendbuf , sendcount , sendtype , target ,
+ SendRequestId ,
+ recvbuf , recvcount , recvtype , target ,
+ RecvRequestId ) ;
+ }
+ return sts ; } ;
+
+ inline int MPI_AccessDEC::Send( void* sendbuf, int sendcount , int sendoffset ,
+ MPI_Datatype sendtype , int target ) {
+ int SendRequestId ;
+ return Send( sendbuf , sendcount , sendoffset , sendtype , target ,
+ SendRequestId ) ; } ;
+ inline int MPI_AccessDEC::Recv( void* recvbuf, int recvcount , int recvoffset ,
+ MPI_Datatype recvtype , int target ) {
+ int RecvRequestId ;
+ return Recv( recvbuf , recvcount , recvoffset , recvtype , target ,
+ RecvRequestId ) ; } ;
+
+
+ostream & operator<< (ostream &,const TimeInterpolationMethod &);
+
+}
+
+#endif
ICoCoMEDField.hxx \
ICoCoTrioField.hxx \
BBTree.H\
+MPI_Access.hxx \
+MPI_AccessDEC.hxx \
+TimeInterpolator.hxx \
+LinearTimeInterpolator.hxx
# Libraries targets
ElementLocator.cxx\
ExplicitTopology.cxx\
ICoCoMEDField.cxx
+MPI_Access.cxx \
+MPI_AccessDEC.cxx \
+TimeInterpolator.cxx \
+LinearTimeInterpolator.cxx
# Executables targets
BIN =
MxN_Mapping::MxN_Mapping(const ProcessorGroup& local_group, const ProcessorGroup& distant_group)
: _union_group(local_group.fuse(distant_group))
{
+ _accessDEC = new MPI_AccessDEC(local_group,distant_group);
_send_proc_offsets.resize(_union_group->size()+1,0);
_recv_proc_offsets.resize(_union_group->size()+1,0);
MxN_Mapping::~MxN_Mapping()
{
delete _union_group;
+ delete _accessDEC ;
}
delete[] recvdispls;
}
-
-/*! Exchanging field data between two groups of processes
- *
- * \param field MEDMEM field containing the values to be sent
- *
- * The ids that were defined by addElementFromSource method
- * are sent.
- */
-void MxN_Mapping::sendRecv(MEDMEM::FIELD<double>& field)
-{
- CommInterface comm_interface=_union_group->getCommInterface();
- const MPIProcessorGroup* group = static_cast<const MPIProcessorGroup*>(_union_group);
+// /*! Exchanging field data between two groups of processes
+// *
+// * \param field MEDMEM field containing the values to be sent
+// *
+// * The ids that were defined by addElementFromSource method
+// * are sent.
+// */
+// void MxN_Mapping::sendRecv(MEDMEM::FIELD<double>& field)
+// {
+// CommInterface comm_interface=_union_group->getCommInterface();
+// const MPIProcessorGroup* group = static_cast<const MPIProcessorGroup*>(_union_group);
- int nbcomp=field.getNumberOfComponents();
- double* sendbuf=0;
- double* recvbuf=0;
- if (_sending_ids.size() >0)
- sendbuf = new double[_sending_ids.size()*nbcomp];
- if (_recv_ids.size()>0)
- recvbuf = new double[_recv_ids.size()*nbcomp];
+// int nbcomp=field.getNumberOfComponents();
+// double* sendbuf=0;
+// double* recvbuf=0;
+// if (_sending_ids.size() >0)
+// sendbuf = new double[_sending_ids.size()*nbcomp];
+// if (_recv_ids.size()>0)
+// recvbuf = new double[_recv_ids.size()*nbcomp];
- int* sendcounts = new int[_union_group->size()];
- int* senddispls=new int[_union_group->size()];
- int* recvcounts=new int[_union_group->size()];
- int* recvdispls=new int[_union_group->size()];
+// int* sendcounts = new int[_union_group->size()];
+// int* senddispls=new int[_union_group->size()];
+// int* recvcounts=new int[_union_group->size()];
+// int* recvdispls=new int[_union_group->size()];
- for (int i=0; i< _union_group->size(); i++)
- {
- sendcounts[i]=nbcomp*(_send_proc_offsets[i+1]-_send_proc_offsets[i]);
- senddispls[i]=nbcomp*(_send_proc_offsets[i]);
- recvcounts[i]=nbcomp*(_recv_proc_offsets[i+1]-_recv_proc_offsets[i]);
- recvdispls[i]=nbcomp*(_recv_proc_offsets[i]);
- }
- //building the buffer of the elements to be sent
- vector<int> offsets = _send_proc_offsets;
- for (int i=0; i<_sending_ids.size();i++)
- {
- int iproc = _sending_ids[i].first;
- for (int icomp=0; icomp<nbcomp; icomp++)
- sendbuf[offsets[iproc]*nbcomp+icomp]=field.getValueIJ(i+1, icomp+1);
- offsets[iproc]++;
- }
+// for (int i=0; i< _union_group->size(); i++)
+// {
+// sendcounts[i]=nbcomp*(_send_proc_offsets[i+1]-_send_proc_offsets[i]);
+// senddispls[i]=nbcomp*(_send_proc_offsets[i]);
+// recvcounts[i]=nbcomp*(_recv_proc_offsets[i+1]-_recv_proc_offsets[i]);
+// recvdispls[i]=nbcomp*(_recv_proc_offsets[i]);
+// }
+// //building the buffer of the elements to be sent
+// vector<int> offsets = _send_proc_offsets;
+// for (int i=0; i<_sending_ids.size();i++)
+// {
+// int iproc = _sending_ids[i].first;
+// for (int icomp=0; icomp<nbcomp; icomp++)
+// sendbuf[offsets[iproc]*nbcomp+icomp]=field.getValueIJ(i+1, icomp+1);
+// offsets[iproc]++;
+// }
- //communication phase
- const MPI_Comm* comm = group->getComm();
- comm_interface.allToAllV(sendbuf, sendcounts, senddispls, MPI_INT,
- recvbuf, recvcounts, recvdispls, MPI_INT,
- *comm);
+// //communication phase
+// const MPI_Comm* comm = group->getComm();
+// comm_interface.allToAllV(sendbuf, sendcounts, senddispls, MPI_INT,
+// recvbuf, recvcounts, recvdispls, MPI_INT,
+// *comm);
- //setting the received values in the field
- double* recvptr;
- for (int i=0; i< _recv_proc_offsets[_union_group->size()]; i++)
- {
- for (int icomp=0; icomp<nbcomp; icomp++){
- field.setValueIJ(_recv_ids[i],icomp+1,*recvptr);
- recvptr++;
- }
- }
+// //setting the received values in the field
+// double* recvptr;
+// for (int i=0; i< _recv_proc_offsets[_union_group->size()]; i++)
+// {
+// for (int icomp=0; icomp<nbcomp; icomp++){
+// field.setValueIJ(_recv_ids[i],icomp+1,*recvptr);
+// recvptr++;
+// }
+// }
- if (sendbuf!=0) delete[] sendbuf;
- if (recvbuf!=0) delete[] recvbuf;
- delete[] sendcounts;
- delete[] recvcounts;
- delete[] senddispls;
- delete[] recvdispls;
+// if (sendbuf!=0) delete[] sendbuf;
+// if (recvbuf!=0) delete[] recvbuf;
+// delete[] sendcounts;
+// delete[] recvcounts;
+// delete[] senddispls;
+// delete[] recvdispls;
-}
+// }
/*! Exchanging field data between two groups of processes
*
* \param field MEDMEM field containing the values to be sent
}
//communication phase
- const MPI_Comm* comm = group->getComm();
- comm_interface.allToAllV(sendbuf, sendcounts, senddispls, MPI_DOUBLE,
- recvbuf, recvcounts, recvdispls, MPI_DOUBLE,
- *comm);
-
+ switch (_allToAllMethod) {
+ case Native:
+ const MPI_Comm* comm = group->getComm();
+ comm_interface.allToAllV(sendbuf, sendcounts, senddispls, MPI_DOUBLE,
+ recvbuf, recvcounts, recvdispls, MPI_DOUBLE,
+ *comm);
+ break;
+ case PointToPoint:
+ _accessDEC->AllToAllv(sendbuf, sendcounts, senddispls, MPI_DOUBLE,
+ recvbuf, recvcounts, recvdispls, MPI_DOUBLE);
+ break;
+ }
//setting zero values in the field
// for (int i=0; i< field.getSupport()->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);i++)
}
}
- if (sendbuf!=0) delete[] sendbuf;
+// if (sendbuf!=0) delete[] sendbuf;
+ if (sendbuf!=0 && _allToAllMethod == Native) delete[] sendbuf;
if (recvbuf!=0) delete[] recvbuf;
delete[] sendcounts;
delete[] recvcounts;
// }
//communication phase
- const MPI_Comm* comm = group->getComm();
- comm_interface.allToAllV(sendbuf, sendcounts, senddispls, MPI_DOUBLE,
- recvbuf, recvcounts, recvdispls, MPI_DOUBLE,
- *comm);
+ switch (_allToAllMethod) {
+ case Native:
+ const MPI_Comm* comm = group->getComm();
+ comm_interface.allToAllV(sendbuf, sendcounts, senddispls, MPI_DOUBLE,
+ recvbuf, recvcounts, recvdispls, MPI_DOUBLE,
+ *comm);
+ break;
+ case PointToPoint:
+ _accessDEC->AllToAllv(sendbuf, sendcounts, senddispls, MPI_DOUBLE,
+ recvbuf, recvcounts, recvdispls, MPI_DOUBLE);
+ break;
+ }
//setting zero values in the field
}
- if (sendbuf!=0) delete[] sendbuf;
+// if (sendbuf!=0) delete[] sendbuf;
+ if (sendbuf!=0 && _allToAllMethod == Native) delete[] sendbuf;
if (recvbuf!=0) delete[] recvbuf;
delete[] sendcounts;
delete[] recvcounts;
}
+ostream & operator<< (ostream & f ,const AllToAllMethod & alltoallmethod ) {
+ switch (alltoallmethod) {
+ case Native :
+ f << " Native ";
+ break;
+ case PointToPoint :
+ f << " PointToPoint ";
+ break;
+ default :
+ f << " UnknownAllToAllMethod ";
+ break;
+ }
+
+ return f;
+}
+
}
#include <vector>
#include "MEDMEM_Field.hxx"
+#include "MPI_AccessDEC.hxx"
namespace ParaMEDMEM
{
+typedef enum{Native,PointToPoint} AllToAllMethod;
+
class ProcessorGroup;
class MxN_Mapping
void sendRecv(MEDMEM::FIELD<double>& field);
void sendRecv(double* field, MEDMEM::FIELD<double>& field) const ;
void reverseSendRecv(double* field, MEDMEM::FIELD<double>& field) const ;
-
+ void setAllToAllMethod(const AllToAllMethod& method){_allToAllMethod = method ;};
+ MPI_AccessDEC* getAccessDEC(){return _accessDEC;}
private :
// ProcessorGroup& _local_group;
// ProcessorGroup& _distant_group;
ProcessorGroup* _union_group;
+ MPI_AccessDEC * _accessDEC;
+ AllToAllMethod _allToAllMethod ;
int _nb_comps;
std::vector<pair<int,int> > _sending_ids;
std::vector<int> _recv_ids;
std::vector<int> _send_proc_offsets;
std::vector<int> _recv_proc_offsets;
-
};
+ostream & operator<< (ostream &,const AllToAllMethod &);
+
}
#endif /*MXN_MAPPING_HXX_*/
CPPUNIT_TEST(testBlockTopology_constructor);
CPPUNIT_TEST(testBlockTopology_serialize);
CPPUNIT_TEST(testIntersectionDEC_2D);
+ CPPUNIT_TEST(testSynchronousEqualIntersectionWithoutInterpDEC_2D);
+ CPPUNIT_TEST(testSynchronousEqualIntersectionDEC_2D);
+ CPPUNIT_TEST(testSynchronousFasterSourceIntersectionDEC_2D);
+ CPPUNIT_TEST(testSynchronousSlowerSourceIntersectionDEC_2D);
+ CPPUNIT_TEST(testSynchronousSlowSourceIntersectionDEC_2D);
+ CPPUNIT_TEST(testSynchronousFastSourceIntersectionDEC_2D);
+ CPPUNIT_TEST(testAsynchronousEqualIntersectionDEC_2D);
+ CPPUNIT_TEST(testAsynchronousFasterSourceIntersectionDEC_2D);
+ CPPUNIT_TEST(testAsynchronousSlowerSourceIntersectionDEC_2D);
+ CPPUNIT_TEST(testAsynchronousSlowSourceIntersectionDEC_2D);
+ CPPUNIT_TEST(testAsynchronousFastSourceIntersectionDEC_2D);
//can be added again after FVM correction for 2D
// CPPUNIT_TEST(testNonCoincidentDEC_2D);
CPPUNIT_TEST(testNonCoincidentDEC_3D);
void testIntersectionDEC_2D();
void testNonCoincidentDEC_2D();
void testNonCoincidentDEC_3D();
+ void testSynchronousEqualIntersectionWithoutInterpDEC_2D();
+ void testSynchronousEqualIntersectionDEC_2D();
+ void testSynchronousFasterSourceIntersectionDEC_2D();
+ void testSynchronousSlowerSourceIntersectionDEC_2D();
+ void testSynchronousSlowSourceIntersectionDEC_2D();
+ void testSynchronousFastSourceIntersectionDEC_2D();
void testAsynchronousEqualIntersectionDEC_2D();
void testAsynchronousFasterSourceIntersectionDEC_2D();
void testAsynchronousSlowerSourceIntersectionDEC_2D();
const std::string& meshname2,
int nbprocsource);
void testAsynchronousIntersectionDEC_2D(double dtA, double tmaxA,
- double dtB, double tmaxB);
+ double dtB, double tmaxB, bool Asynchronous, bool WithInterp );
};
#include "MPIProcessorGroup.hxx"
#include "Topology.hxx"
#include "DEC.hxx"
+#include "MxN_Mapping.hxx"
#include "IntersectionDEC.hxx"
#include "ParaMESH.hxx"
#include "ParaFIELD.hxx"
cout << "end of IntersectionDEC_2D test"<<endl;
}
+//Synchronous tests without interpolation :
+void ParaMEDMEMTest::testSynchronousEqualIntersectionWithoutInterpDEC_2D()
+{
+ testAsynchronousIntersectionDEC_2D(0.1,1,0.1,1,false,false);
+}
+
+//Synchronous tests with interpolation :
+void ParaMEDMEMTest::testSynchronousEqualIntersectionDEC_2D()
+{
+ testAsynchronousIntersectionDEC_2D(0.1,1,0.1,1,false,true);
+}
+void ParaMEDMEMTest::testSynchronousFasterSourceIntersectionDEC_2D()
+{
+ testAsynchronousIntersectionDEC_2D(0.09,1,0.1,1,false,true);
+}
+void ParaMEDMEMTest::testSynchronousSlowerSourceIntersectionDEC_2D()
+{
+ testAsynchronousIntersectionDEC_2D(0.11,1,0.1,1,false,true);
+}
+void ParaMEDMEMTest::testSynchronousSlowSourceIntersectionDEC_2D()
+{
+ testAsynchronousIntersectionDEC_2D(0.11,1,0.01,1,false,true);
+}
+void ParaMEDMEMTest::testSynchronousFastSourceIntersectionDEC_2D()
+{
+ testAsynchronousIntersectionDEC_2D(0.01,1,0.11,1,false,true);
+}
+//Asynchronous tests with interpolation :
void ParaMEDMEMTest::testAsynchronousEqualIntersectionDEC_2D()
{
- testAsynchronousIntersectionDEC_2D(0.1,1,0.1,1);
+ testAsynchronousIntersectionDEC_2D(0.1,1,0.1,1,true,true);
}
void ParaMEDMEMTest::testAsynchronousFasterSourceIntersectionDEC_2D()
{
- testAsynchronousIntersectionDEC_2D(0.09,1,0.1,1);
+ testAsynchronousIntersectionDEC_2D(0.09,1,0.1,1,true,true);
}
void ParaMEDMEMTest::testAsynchronousSlowerSourceIntersectionDEC_2D()
{
- testAsynchronousIntersectionDEC_2D(0.11,1,0.1,1);
+ testAsynchronousIntersectionDEC_2D(0.11,1,0.1,1,true,true);
}
void ParaMEDMEMTest::testAsynchronousSlowSourceIntersectionDEC_2D()
{
- testAsynchronousIntersectionDEC_2D(0.11,1,0.01,1);
+ testAsynchronousIntersectionDEC_2D(0.11,1,0.01,1,true,true);
}
void ParaMEDMEMTest::testAsynchronousFastSourceIntersectionDEC_2D()
{
- testAsynchronousIntersectionDEC_2D(0.01,1,0.11,1);
+ testAsynchronousIntersectionDEC_2D(0.01,1,0.11,1,true,true);
}
/*!
* the other one receives with dtB as an interval, the max time being tmaxB
*/
void ParaMEDMEMTest::testAsynchronousIntersectionDEC_2D(double dtA, double tmaxA,
- double dtB, double tmaxB)
+ double dtB, double tmaxB, bool Asynchronous, bool WithInterp )
{
int size;
int rank;
MEDMEM::MESH* mesh;
MEDMEM::SUPPORT* support;
- MEDMEM::FIELD<double>* field;
+// MEDMEM::FIELD<double>* field;
ParaMEDMEM::ParaMESH* paramesh;
ParaMEDMEM::ParaFIELD* parafield;
+ ParaMEDMEM::ParaSUPPORT* parasupport ;
+ double * value ;
+ ICoCo::Field* icocofield ;
+
string data_dir = getenv("MED_ROOT_DIR");
string tmp_dir = getenv("TMP");
if (tmp_dir == "")
ParaMEDMEMTest_TmpFilesRemover aRemover;
MPI_Barrier(MPI_COMM_WORLD);
+
if (source_group->containsMyRank())
{
string master = filename_xml1;
paramesh=new ParaMESH (*mesh,*source_group,"source mesh");
- ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT( support,*source_group);
+// ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT( support,*source_group);
+ parasupport=new UnstructuredParaSUPPORT( support,*source_group);
ParaMEDMEM::ComponentTopology comptopo;
parafield = new ParaFIELD(parasupport, comptopo);
int nb_local=support->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);
- double * value= new double[nb_local];
+// double * value= new double[nb_local];
+ value = new double[nb_local];
for(int ielem=0; ielem<nb_local;ielem++)
value[ielem]=0.0;
parafield->getField()->setValue(value);
- ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
+// ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
+ icocofield=new ICoCo::MEDField(paramesh,parafield);
dec.attachLocalField(icocofield);
}
support=new MEDMEM::SUPPORT(mesh,"all elements",MED_EN::MED_CELL);
paramesh=new ParaMESH (*mesh,*target_group,"target mesh");
- ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT(support,*target_group);
+// ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT(support,*target_group);
+ parasupport=new UnstructuredParaSUPPORT(support,*target_group);
ParaMEDMEM::ComponentTopology comptopo;
parafield = new ParaFIELD(parasupport, comptopo);
int nb_local=support->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);
- double * value= new double[nb_local];
+// double * value= new double[nb_local];
+ value = new double[nb_local];
for(int ielem=0; ielem<nb_local;ielem++)
value[ielem]=0.0;
parafield->getField()->setValue(value);
- ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
+// ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
+ icocofield=new ICoCo::MEDField(paramesh,parafield);
dec.attachLocalField(icocofield);
}
if (source_group->containsMyRank())
{
- dec.synchronize();
cout<<"DEC usage"<<endl;
+ dec.setOption("Asynchronous",Asynchronous);
+ if ( WithInterp ) {
+ dec.setOption("TimeInterpolation",LinearTimeInterp);
+ }
+ dec.setOption("AllToAllMethod",PointToPoint);
+ dec.synchronize();
dec.setOption("ForcedRenormalization",false);
- dec.setOption("TimeInterpolation","Linear");
for (double time=0; time<tmaxA+1e-10; time+=dtA)
{
- dec.sendData(time, dtA);
- double* value = const_cast<double*> (parafield->getField()->getValue());
- int nb_local=parafield->getField()->getSupport()->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);
- for (int i=0; i<nb_local;i++)
- value[i]= time+dtA;
+ cout << "testAsynchronousIntersectionDEC_2D" << rank << " time " << time
+ << " dtA " << dtA << " tmaxA " << tmaxA << endl ;
+ if ( time+dtA < tmaxA+1e-10 ) {
+ dec.sendData( time , dtA );
+ }
+ else {
+ dec.sendData( time , 0 );
+ }
+ double* value = const_cast<double*> (parafield->getField()->getValue());
+ int nb_local=parafield->getField()->getSupport()->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);
+ for (int i=0; i<nb_local;i++)
+ value[i]= time+dtA;
}
}
//attaching a DEC to the target group
if (target_group->containsMyRank())
{
+ cout<<"DEC usage"<<endl;
+ dec.setOption("Asynchronous",Asynchronous);
+ if ( WithInterp ) {
+ dec.setOption("TimeInterpolation",LinearTimeInterp);
+ }
+ dec.setOption("AllToAllMethod",PointToPoint);
dec.synchronize();
dec.setOption("ForcedRenormalization",false);
- dec.setOption("TimeInterpolation","Linear");
vector<double> times;
for (double time=0; time<tmaxB+1e-10; time+=dtB)
{
- dec.recvData(time);
+ cout << "testAsynchronousIntersectionDEC_2D" << rank << " time " << time
+ << " dtB " << dtB << " tmaxB " << tmaxB << endl ;
+ dec.recvData( time );
+ cout << "testAsynchronousIntersectionDEC_2D" << rank << " time " << time
+ << " VolumeIntegral " << parafield->getVolumeIntegral(1)
+ << " time*10000 " << time*10000 << endl ;
CPPUNIT_ASSERT_DOUBLES_EQUAL(parafield->getVolumeIntegral(1),time*10000,0.001);
}
}
- delete source_group;
+ delete source_group;
delete target_group;
delete self_group;
+ delete mesh ;
+ delete support ;
+ delete paramesh ;
+ delete parafield ;
+ delete parasupport ;
+ delete [] value ;
+ delete icocofield ;
+ cout << "testAsynchronousIntersectionDEC_2D" << rank << " MPI_Barrier " << endl ;
MPI_Barrier(MPI_COMM_WORLD);
cout << "end of IntersectionDEC_2D test"<<endl;
--- /dev/null
+
+#include "TimeInterpolator.hxx"
+
+using namespace std;
+
+namespace ParaMEDMEM {
+
+TimeInterpolator::TimeInterpolator( double InterpPrecision, int nStepBefore,
+ int nStepAfter ){
+ _InterpPrecision = InterpPrecision ;
+ _nStepBefore = nStepBefore ;
+ _nStepAfter = nStepAfter ;
+}
+
+TimeInterpolator::~TimeInterpolator() {
+}
+
+}
--- /dev/null
+#ifndef TIMEINTERPOLATOR_HXX_
+#define TIMEINTERPOLATOR_HXX_
+
+#include <map>
+#include <iostream>
+
+#include "ProcessorGroup.hxx"
+
+namespace ParaMEDMEM {
+
+ class TimeInterpolator {
+
+ public:
+ TimeInterpolator( double InterpPrecision, int nStepBefore=1, int nStepAfter=1 ) ;
+ virtual ~TimeInterpolator();
+
+ void SetInterpParams( double InterpPrecision, int nStepBefore=1, int nStepAfter=1 ) {
+ _InterpPrecision = InterpPrecision ;
+ _nStepBefore = nStepBefore ;
+ _nStepAfter = nStepAfter ; } ;
+ void Steps( int &nStepBefore, int &nStepAfter ) {
+ nStepBefore = _nStepBefore ;
+ nStepAfter = _nStepAfter ; } ;
+ virtual void DoInterp( double time0, double time1, double time, int recvcount ,
+ int nbuff0, int nbuff1,
+ int **recvbuff0, int **recvbuff1, int *result )= 0 ;
+ virtual void DoInterp( double time0, double time1, double time, int recvcount ,
+ int nbuff0, int nbuff1,
+ double **recvbuff0, double **recvbuff1, double *result )= 0 ;
+
+ protected :
+ double _InterpPrecision ;
+ int _nStepBefore ;
+ int _nStepAfter ;
+ };
+}
+
+#endif
if (argc !=2 || size < 2)
{
cout << "usage :"<<endl;
- cout << "mpirun -n <nbprocs> test_NonCoincidentDEC <nbprocsource>"<<endl;
+ cout << "mpirun -np <nbprocs> test_IntersectionDEC <nbprocsource>"<<endl;
cout << " (nbprocs >=2)"<<endl;
return 1;
}