1 // Copyright (C) 2007-2024 CEA, EDF
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "MPIAccess/MPIAccessDEC.hxx"
30 This constructor creates an MPIAccessDEC which has \a source_group as a working side
31 and \a target_group as an idle side.
32 The constructor must be called synchronously on all processors of both processor groups.
34 \param source_group working side ProcessorGroup
35 \param target_group lazy side ProcessorGroup
36 \param Asynchronous Communication mode (default asynchronous)
38 MPIAccessDEC::MPIAccessDEC( const ProcessorGroup& source_group,
39 const ProcessorGroup& target_group,
43 ProcessorGroup * union_group = source_group.fuse(target_group) ;
46 for ( i = 0 ; i < union_group->size() ; i++ )
50 MPIProcessorGroup *mpilg = static_cast<MPIProcessorGroup *>(const_cast<ProcessorGroup *>(&source_group));
51 _MPI_union_group = new MEDCoupling::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm());
53 _my_rank = _MPI_union_group->myRank() ;
54 _group_size = _MPI_union_group->size() ;
55 _MPI_access = new MPIAccess( _MPI_union_group ) ;
56 _asynchronous = Asynchronous ;
57 _time_messages = new vector< vector< TimeMessage > > ;
58 _time_messages->resize( _group_size ) ;
59 _out_of_time = new vector< bool > ;
60 _out_of_time->resize( _group_size ) ;
61 _data_messages_recv_count = new vector< int > ;
62 _data_messages_recv_count->resize( _group_size ) ;
63 for ( i = 0 ; i < _group_size ; i++ )
65 (*_out_of_time)[i] = false ;
66 (*_data_messages_recv_count)[i] = 0 ;
68 _data_messages_type = new vector< MPI_Datatype > ;
69 _data_messages_type->resize( _group_size ) ;
70 _data_messages = new vector< vector< void * > > ;
71 _data_messages->resize( _group_size ) ;
72 _time_interpolator = NULL ;
73 _map_of_send_buffers = new map< int , SendBuffStruct * > ;
76 MPIAccessDEC::~MPIAccessDEC()
80 delete _MPI_union_group ;
82 if ( _time_interpolator )
83 delete _time_interpolator ;
85 delete _time_messages ;
88 if ( _data_messages_recv_count )
89 delete _data_messages_recv_count ;
90 if ( _data_messages_type )
91 delete _data_messages_type ;
93 delete _data_messages ;
94 if ( _map_of_send_buffers )
95 delete _map_of_send_buffers ;
98 void MPIAccessDEC::setTimeInterpolator( TimeInterpolationMethod aTimeInterp ,
99 double InterpPrecision, int nStepBefore,
102 if ( _time_interpolator )
103 delete _time_interpolator ;
104 switch ( aTimeInterp )
106 case WithoutTimeInterp :
107 _time_interpolator = NULL ;
111 case LinearTimeInterp :
112 _time_interpolator = new LinearTimeInterpolator( InterpPrecision , nStepBefore ,
114 _n_step_before = nStepBefore ;
115 _n_step_after = nStepAfter ;
117 for ( i = 0 ; i < _group_size ; i++ )
119 (*_time_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
120 (*_data_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
122 for ( j = 0 ; j < _n_step_before + _n_step_after ; j++ )
124 (*_time_messages)[ i ][ j ].time = -1 ;
125 (*_time_messages)[ i ][ j ].deltatime = -1 ;
126 (*_data_messages)[ i ][ j ] = NULL ;
134 Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
135 (Internal Protected method)
137 Returns the request identifier SendRequestId
140 int MPIAccessDEC::send( void* sendbuf, int sendcount , int offset ,
141 MPI_Datatype sendtype , int target , int &SendRequestId )
146 if ( sendtype == MPI_INT )
148 sts = _MPI_access->ISend( &((int *) sendbuf)[offset] , sendcount , sendtype ,
149 target , SendRequestId ) ;
151 else if ( sendtype == MPI_LONG )
153 sts = _MPI_access->ISend( &((long *) sendbuf)[offset] , sendcount , sendtype ,
154 target , SendRequestId ) ;
158 sts = _MPI_access->ISend( &((double *) sendbuf)[offset] , sendcount , sendtype ,
159 target , SendRequestId ) ;
164 if ( sendtype == MPI_INT )
166 sts = _MPI_access->send( &((int *) sendbuf)[offset] , sendcount , sendtype ,
167 target , SendRequestId ) ;
169 else if ( sendtype == MPI_LONG )
171 sts = _MPI_access->send( &((long *) sendbuf)[offset] , sendcount , sendtype ,
172 target , SendRequestId ) ;
176 sts = _MPI_access->send( &((double *) sendbuf)[offset] , sendcount , sendtype ,
177 target , SendRequestId ) ;
184 Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
185 (Internal Protected method)
187 Returns the request identifier RecvRequestId
190 int MPIAccessDEC::recv( void* recvbuf, int recvcount , int offset ,
191 MPI_Datatype recvtype , int target , int &RecvRequestId )
196 if ( recvtype == MPI_INT )
198 sts = _MPI_access->IRecv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
199 target , RecvRequestId ) ;
201 else if ( recvtype == MPI_LONG )
203 sts = _MPI_access->IRecv( &((long *) recvbuf)[offset] , recvcount , recvtype ,
204 target , RecvRequestId ) ;
208 sts = _MPI_access->IRecv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
209 target , RecvRequestId ) ;
214 if ( recvtype == MPI_INT )
216 sts = _MPI_access->recv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
217 target , RecvRequestId ) ;
219 else if ( recvtype == MPI_LONG )
221 sts = _MPI_access->recv( &((long *) recvbuf)[offset] , recvcount , recvtype ,
222 target , RecvRequestId ) ;
226 sts = _MPI_access->recv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
227 target , RecvRequestId ) ;
234 Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
235 Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
236 (Internal Protected method)
238 Returns the request identifier SendRequestId
239 Returns the request identifier RecvRequestId
242 int MPIAccessDEC::sendRecv( void* sendbuf, int sendcount , int sendoffset ,
243 MPI_Datatype sendtype ,
244 void* recvbuf, int recvcount , int recvoffset ,
245 MPI_Datatype recvtype , int target ,
246 int &SendRequestId , int &RecvRequestId )
251 if ( sendtype == MPI_INT )
253 if ( recvtype == MPI_INT )
255 sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
256 sendtype , target , SendRequestId ,
257 &((int *) recvbuf)[recvoffset] , recvcount ,
258 recvtype , target , RecvRequestId ) ;
262 sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
263 sendtype , target , SendRequestId ,
264 &((double *) recvbuf)[recvoffset] ,
265 recvcount , recvtype , target , RecvRequestId ) ;
270 if ( recvtype == MPI_INT )
272 sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
273 sendtype , target , SendRequestId ,
274 &((int *) recvbuf)[recvoffset] ,
275 recvcount , recvtype , target , RecvRequestId ) ;
279 sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
280 sendtype , target , SendRequestId ,
281 &((double *) recvbuf)[recvoffset] ,
282 recvcount , recvtype , target , RecvRequestId ) ;
288 if ( sendtype == MPI_INT )
290 if ( recvtype == MPI_INT )
292 sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
293 sendtype , target , SendRequestId ,
294 &((int *) recvbuf)[recvoffset] , recvcount ,
295 recvtype , target , RecvRequestId ) ;
299 sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
300 sendtype , target , SendRequestId ,
301 &((double *) recvbuf)[recvoffset] ,
302 recvcount , recvtype , target , RecvRequestId ) ;
307 if ( recvtype == MPI_INT )
309 sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
310 sendtype , target , SendRequestId ,
311 &((int *) recvbuf)[recvoffset] ,
312 recvcount , recvtype , target , RecvRequestId ) ;
316 sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
317 sendtype , target , SendRequestId ,
318 &((double *) recvbuf)[recvoffset] ,
319 recvcount , recvtype , target , RecvRequestId ) ;
327 Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
328 Receive recvcount datas to recvbuf[offset] with type recvtype from all targets of IntraCommunicator
331 int MPIAccessDEC::allToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype ,
332 void* recvbuf, int recvcount, MPI_Datatype recvtype )
334 if ( _time_interpolator )
336 return allToAllTime( sendbuf, sendcount, sendtype , recvbuf, recvcount, recvtype ) ;
345 //Free of SendBuffers
349 //DoSend + DoRecv : SendRecv
350 SendBuffStruct * aSendDataStruct = NULL ;
351 if ( _asynchronous && sendbuf )
353 aSendDataStruct = new SendBuffStruct ;
354 aSendDataStruct->SendBuffer = sendbuf ;
355 aSendDataStruct->Counter = 0 ;
356 aSendDataStruct->DataType = sendtype ;
358 for ( target = 0 ; target < _group_size ; target++ )
360 sts = sendRecv( sendbuf , sendcount , sendoffset , sendtype ,
361 recvbuf , recvcount , recvoffset , recvtype ,
362 target , SendRequestId , RecvRequestId ) ;
363 if ( _asynchronous && sendbuf && sendcount )
365 aSendDataStruct->Counter += 1 ;
366 (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
368 sendoffset += sendcount ;
369 recvoffset += recvcount ;
371 if ( !_asynchronous && sendbuf )
373 if ( sendtype == MPI_INT )
375 delete [] (int *) sendbuf ;
379 delete [] (double *) sendbuf ;
386 Send sendcounts[target] datas from sendbuf[sdispls[target]] with type sendtype to all targets of IntraCommunicator
387 Receive recvcounts[target] datas to recvbuf[rdispls[target]] with type recvtype from all targets of IntraCommunicator
390 int MPIAccessDEC::allToAllv( void* sendbuf, int* sendcounts, int* sdispls,
391 MPI_Datatype sendtype ,
392 void* recvbuf, int* recvcounts, int* rdispls,
393 MPI_Datatype recvtype )
395 if ( _time_interpolator )
397 return allToAllvTime( sendbuf, sendcounts, sdispls, sendtype ,
398 recvbuf, recvcounts, rdispls, recvtype ) ;
405 //Free of SendBuffers
411 //DoSend + DoRecv : SendRecv
412 SendBuffStruct * aSendDataStruct = NULL ;
413 if ( _asynchronous && sendbuf )
415 aSendDataStruct = new SendBuffStruct ;
416 aSendDataStruct->SendBuffer = sendbuf ;
417 aSendDataStruct->Counter = 0 ;
418 aSendDataStruct->DataType = sendtype ;
420 for ( target = 0 ; target < _group_size ; target++ )
422 if ( sendcounts[target] || recvcounts[target] )
424 sts = sendRecv( sendbuf , sendcounts[target] , sdispls[target] , sendtype ,
425 recvbuf , recvcounts[target] , rdispls[target] , recvtype ,
426 target , SendRequestId , RecvRequestId ) ;
427 if ( _asynchronous && sendbuf && sendcounts[target])
429 aSendDataStruct->Counter += 1 ;
430 (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
434 if ( !_asynchronous && sendbuf )
436 if ( sendtype == MPI_INT )
438 delete [] (int *) sendbuf ;
442 delete [] (double *) sendbuf ;
449 MPIAccessDEC and the management of SendBuffers :
450 =================================================
452 . In the collective communications collectives we send only parts of
453 the same buffer to each "target". So in asynchronous mode it is
454 necessary that all parts are free before to delete/free the
457 . We assume that buffers are allocated with a new double[]. so a
460 . The structure SendBuffStruct permit to keep the address of the buffer
461 and to manage a reference counter of that buffer. It contains
462 also MPI_Datatype for the delete [] (double *) ... when the counter
465 . The map _MapOfSendBuffers establish the correspondence between each
466 RequestId given by a MPI_Access->ISend(...) and a SendBuffStruct
467 for each "target" of a part of the buffer.
469 . All that concerns only asynchronous Send. In synchronous mode,
470 we delete senbuf just after the Send.
474 MPIAccessDEC and the management of RecvBuffers :
475 =================================================
477 If there is no interpolation, no special action is done.
479 With interpolation for each target :
480 ------------------------------------
481 . We have _time_messages[target] which is a vector of TimesMessages.
482 We have 2 TimesMessages in our case with a linear interpolation.
483 They contain the previous time(t0)/deltatime and the last
486 . We have _data_messages[target] which is a vector of DatasMessages.
487 We have 2 DatasMessages in our case with a linear interpolation.
488 They contain the previous datas at time(t0)/deltatime and at last
491 . At time _t(t*) of current processus we do the interpolation of
492 the values of the 2 DatasMessages which are returned in the part of
493 recvbuf corresponding to the target with t0 < t* <= t1.
495 . Because of the difference of "deltatimes" between processes, we
496 may have t0 < t1 < t* and there is an extrapolation.
498 . The vectors _out_of_time, _DataMessagesRecvCount and _DataMessagesType
499 contain for each target true if t* > last t1, recvcount and
500 MPI_Datatype for the finalize of messages at the end.
504 Send a TimeMessage to all targets of IntraCommunicator
505 Receive the TimeMessages from targets of IntraCommunicator if necessary.
507 Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
508 Returns recvcount datas to recvbuf[offset] with type recvtype after an interpolation
509 with datas received from all targets of IntraCommunicator.
512 int MPIAccessDEC::allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
513 void* recvbuf, int recvcount , MPI_Datatype recvtype )
518 int SendTimeRequestId ;
519 int SendDataRequestId ;
521 if ( _time_interpolator == NULL )
523 return MPI_ERR_OTHER ;
526 //Free of SendBuffers
532 //DoSend : Time + SendBuff
533 SendBuffStruct * aSendTimeStruct = NULL ;
534 SendBuffStruct * aSendDataStruct = NULL ;
535 if ( sendbuf && sendcount )
537 TimeMessage * aSendTimeMessage = new TimeMessage ;
540 aSendTimeStruct = new SendBuffStruct ;
541 aSendTimeStruct->SendBuffer = aSendTimeMessage ;
542 aSendTimeStruct->Counter = 0 ;
543 aSendTimeStruct->DataType = _MPI_access->timeType() ;
544 aSendDataStruct = new SendBuffStruct ;
545 aSendDataStruct->SendBuffer = sendbuf ;
546 aSendDataStruct->Counter = 0 ;
547 aSendDataStruct->DataType = sendtype ;
549 aSendTimeMessage->time = _t ;
550 aSendTimeMessage->deltatime = _dt ;
551 for ( target = 0 ; target < _group_size ; target++ )
553 sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
554 SendTimeRequestId ) ;
555 sts = send( sendbuf , sendcount , sendoffset , sendtype , target , SendDataRequestId ) ;
558 aSendTimeStruct->Counter += 1 ;
559 (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
560 aSendDataStruct->Counter += 1 ;
561 (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
563 sendoffset += sendcount ;
565 if ( !_asynchronous )
567 delete aSendTimeMessage ;
568 if ( sendtype == MPI_INT )
570 delete [] (int *) sendbuf ;
574 delete [] (double *) sendbuf ;
579 //CheckTime + DoRecv + DoInterp
580 if ( recvbuf && recvcount )
582 for ( target = 0 ; target < _group_size ; target++ )
584 int recvsize = (int)(recvcount*_MPI_access->extent( recvtype ));
585 checkTime( recvcount , recvtype , target , false ) ;
586 //===========================================================================
587 //TODO : it is assumed actually that we have only 1 timestep before and after
588 //===========================================================================
589 if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
591 if ( (*_out_of_time)[target] )
593 cout << " =====================================================" << endl
594 << "Recv" << _my_rank << " <-- target " << target << " t0 "
595 << (*_time_messages)[target][0].time << " < t1 "
596 << (*_time_messages)[target][1].time << " < t* " << _t << endl
597 << " =====================================================" << endl ;
599 if ( recvtype == MPI_INT )
601 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
602 (*_time_messages)[target][1].time, _t, recvcount ,
603 _n_step_before, _n_step_after,
604 (int **) &(*_data_messages)[target][0],
605 (int **) &(*_data_messages)[target][1],
606 &((int *)recvbuf)[target*recvcount] ) ;
610 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
611 (*_time_messages)[target][1].time, _t, recvcount ,
612 _n_step_before, _n_step_after,
613 (double **) &(*_data_messages)[target][0],
614 (double **) &(*_data_messages)[target][1],
615 &((double *)recvbuf)[target*recvcount] ) ;
620 char * buffdest = (char *) recvbuf ;
621 char * buffsrc = (char *) (*_data_messages)[target][1] ;
622 memcpy( &buffdest[target*recvsize] , buffsrc , recvsize ) ;
630 int MPIAccessDEC::allToAllvTime( void* sendbuf, int* sendcounts, int* sdispls,
631 MPI_Datatype sendtype ,
632 void* recvbuf, int* recvcounts, int* rdispls,
633 MPI_Datatype recvtype )
637 int SendTimeRequestId ;
638 int SendDataRequestId ;
640 if ( _time_interpolator == NULL )
642 return MPI_ERR_OTHER ;
645 //Free of SendBuffers
653 + We create a TimeMessage (look at that structure in MPI_Access).
654 + If we are in asynchronous mode, we create two structures SendBuffStruct
655 aSendTimeStruct and aSendDataStruct that we fill.
656 + We fill the structure aSendTimeMessage with time/deltatime of
657 the current process. "deltatime" must be nul if it is the last step of
659 + After that for each "target", we Send the TimeMessage and the part
660 of sendbuf corresponding to that target.
661 + If we are in asynchronous mode, we increment the counter and we add
662 aSendTimeStruct and aSendDataStruct to _MapOfSendBuffers with the
663 identifiers SendTimeRequestId and SendDataRequestId returned by
664 MPI_Access->Send(...).
665 + And if we are in synchronous mode we delete the SendMessages.
667 //DoSend : Time + SendBuff
668 SendBuffStruct * aSendTimeStruct = NULL ;
669 SendBuffStruct * aSendDataStruct = NULL ;
672 TimeMessage * aSendTimeMessage = new TimeMessage ;
675 aSendTimeStruct = new SendBuffStruct ;
676 aSendTimeStruct->SendBuffer = aSendTimeMessage ;
677 aSendTimeStruct->Counter = 0 ;
678 aSendTimeStruct->DataType = _MPI_access->timeType() ;
679 aSendDataStruct = new SendBuffStruct ;
680 aSendDataStruct->SendBuffer = sendbuf ;
681 aSendDataStruct->Counter = 0 ;
682 aSendDataStruct->DataType = sendtype ;
684 aSendTimeMessage->time = _t ;
685 aSendTimeMessage->deltatime = _dt ;
686 for ( target = 0 ; target < _group_size ; target++ )
688 if ( sendcounts[target] )
690 sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
691 SendTimeRequestId ) ;
692 sts = send( sendbuf , sendcounts[target] , sdispls[target] , sendtype , target ,
693 SendDataRequestId ) ;
696 aSendTimeStruct->Counter += 1 ;
697 (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
698 aSendDataStruct->Counter += 1 ;
699 (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
703 if ( !_asynchronous )
705 delete aSendTimeMessage ;
706 if ( sendtype == MPI_INT )
708 delete [] (int *) sendbuf ;
712 delete [] (double *) sendbuf ;
718 . CheckTime + DoRecv + DoInterp
719 + For each target we call CheckTime
720 + If there is a TimeInterpolator and if the TimeMessage of the target
721 is not the first, we call the interpolator which return its
722 results in the part of the recv buffer corresponding to the "target".
723 + If not, there is a copy of received datas for that first step of time
724 in the part of the recv buffer corresponding to the "target".
726 //CheckTime + DoRecv + DoInterp
729 for ( target = 0 ; target < _group_size ; target++ )
731 if ( recvcounts[target] )
733 int recvsize = (int)(recvcounts[target]*_MPI_access->extent( recvtype ));
734 checkTime( recvcounts[target] , recvtype , target , false ) ;
735 //===========================================================================
736 //TODO : it is assumed actually that we have only 1 timestep before nad after
737 //===========================================================================
738 if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
740 if ( (*_out_of_time)[target] )
742 cout << " =====================================================" << endl
743 << "Recv" << _my_rank << " <-- target " << target << " t0 "
744 << (*_time_messages)[target][0].time << " < t1 "
745 << (*_time_messages)[target][1].time << " < t* " << _t << endl
746 << " =====================================================" << endl ;
748 if ( recvtype == MPI_INT )
750 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
751 (*_time_messages)[target][1].time, _t,
752 recvcounts[target] , _n_step_before, _n_step_after,
753 (int **) &(*_data_messages)[target][0],
754 (int **) &(*_data_messages)[target][1],
755 &((int *)recvbuf)[rdispls[target]] ) ;
759 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
760 (*_time_messages)[target][1].time, _t,
761 recvcounts[target] , _n_step_before, _n_step_after,
762 (double **) &(*_data_messages)[target][0],
763 (double **) &(*_data_messages)[target][1],
764 &((double *)recvbuf)[rdispls[target]] ) ;
769 char * buffdest = (char *) recvbuf ;
770 char * buffsrc = (char *) (*_data_messages)[target][1] ;
771 memcpy( &buffdest[rdispls[target]*_MPI_access->extent( recvtype )] , buffsrc ,
782 . CheckTime(recvcount , recvtype , target , UntilEnd)
783 + At the beginning, we read the first TimeMessage in
784 &(*_TimeMessages)[target][1] and the first DataMessage
785 in the allocated buffer (*_DataMessages)[target][1].
786 + deltatime of TimesMessages must be nul if it is the last one.
787 + While : _t(t*) is the current time of the processus.
788 "while _t(t*) is greater than the time of the "target"
789 (*_TimeMessages)[target][1].time and
790 (*_TimeMessages)[target][1].deltatime is not nul",
791 So at the end of the while we have :
792 _t(t*) <= (*_TimeMessages)[target][1].time with
793 _t(t*) > (*_TimeMessages)[target][0].time
794 or we have the last TimeMessage of the "target".
795 + If it is the finalization of the recv of TimeMessages and
796 DataMessages (UntilEnd value is true), we execute the while
797 until (*_TimeMessages)[target][1].deltatime is nul.
799 We copy the last TimeMessage in the previoud TimeMessage and
800 we read a new TimeMessage
801 We delete the previous DataMessage.
802 We copy the last DataMessage pointer in the previous one.
803 We allocate a new last DataMessage buffer
804 (*_DataMessages)[target][1] and we read the corresponding
806 + If the current time of the current process is greater than the
807 last time (*_TimeMessages)[target][1].time du target, we give
808 a true value to (*_OutOfTime)[target].
809 (*_TimeMessages)[target][1].deltatime is nul.
811 int MPIAccessDEC::checkTime( int recvcount , MPI_Datatype recvtype , int target ,
814 int sts = MPI_SUCCESS ;
815 int RecvTimeRequestId ;
816 int RecvDataRequestId ;
817 //Pour l'instant on cherche _time_messages[target][0] < _t <= _time_messages[target][1]
818 //===========================================================================
819 //TODO : it is assumed actually that we have only 1 timestep before and after
820 // instead of _n_step_before and _n_step_after ...
821 //===========================================================================
822 (*_data_messages_recv_count)[target] = recvcount ;
823 (*_data_messages_type)[target] = recvtype ;
824 if ( (*_time_messages)[target][1].time == -1 )
826 (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
827 sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
828 target , RecvTimeRequestId ) ;
829 (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
830 if ( recvtype == MPI_INT )
832 (*_data_messages)[target][1] = new int[recvcount] ;
836 (*_data_messages)[target][1] = new double[recvcount] ;
838 sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
839 RecvDataRequestId ) ;
843 while ( ( _t > (*_time_messages)[target][1].time || UntilEnd ) &&
844 (*_time_messages)[target][1].deltatime != 0 )
846 (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
847 sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
848 target , RecvTimeRequestId ) ;
851 cout << "CheckTime" << _my_rank << " TimeMessage target " << target
852 << " RecvTimeRequestId " << RecvTimeRequestId << " MPITag "
853 << _MPI_access->recvMPITag(target) << endl ;
855 if ( recvtype == MPI_INT )
857 delete [] (int *) (*_data_messages)[target][0] ;
861 delete [] (double *) (*_data_messages)[target][0] ;
863 (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
864 if ( recvtype == MPI_INT )
866 (*_data_messages)[target][1] = new int[recvcount] ;
870 (*_data_messages)[target][1] = new double[recvcount] ;
872 sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
873 RecvDataRequestId ) ;
876 cout << "CheckTime" << _my_rank << " DataMessage target " << target
877 << " RecvDataRequestId " << RecvDataRequestId << " MPITag "
878 << _MPI_access->recvMPITag(target) << endl ;
882 if ( _t > (*_time_messages)[target][0].time &&
883 _t <= (*_time_messages)[target][1].time )
888 (*_out_of_time)[target] = true ;
896 + call SendRequestIds of MPI_Access in order to get all
897 RequestIds of SendMessages of all "targets".
898 + For each RequestId, CheckSent call "Test" of MPI_Access in order
899 to know if the buffer is "free" (flag = true). If it is the
900 FinalCheckSent (WithWait = true), we call Wait instead of Test.
901 + If the buffer is "free", the counter of the structure SendBuffStruct
902 (from _MapOfSendBuffers) is decremented.
903 + If that counter is nul we delete the TimeMessage or the
904 SendBuffer according to the DataType.
905 + And we delete the structure SendBuffStruct before the suppression
906 (erase) of that item of _MapOfSendBuffers
908 int MPIAccessDEC::checkSent(bool WithWait)
910 int sts = MPI_SUCCESS ;
911 int flag = WithWait ;
912 int size = _MPI_access->sendRequestIdsSize() ;
913 int * ArrayOfSendRequests = new int[ size ] ;
914 int nSendRequest = _MPI_access->sendRequestIds( size , ArrayOfSendRequests ) ;
915 bool SendTrace = false ;
917 for ( i = 0 ; i < nSendRequest ; i++ )
923 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
924 << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget "
925 << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag "
926 << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ;
928 sts = _MPI_access->wait( ArrayOfSendRequests[i] ) ;
932 sts = _MPI_access->test( ArrayOfSendRequests[i] , flag ) ;
936 _MPI_access->deleteRequest( ArrayOfSendRequests[i] ) ;
939 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
940 << " SendRequestId " << ArrayOfSendRequests[i]
942 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
943 << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
946 (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter -= 1 ;
949 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
950 _MPI_access->timeType() )
952 cout << "CheckTimeSent" << _my_rank << " Request " ;
956 cout << "CheckDataSent" << _my_rank << " Request " ;
958 cout << ArrayOfSendRequests[i]
959 << " _map_of_send_buffers->SendBuffer "
960 << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
961 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
964 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter == 0 )
968 cout << "CheckSent" << _my_rank << " SendRequestId " << ArrayOfSendRequests[i]
969 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
970 << " flag " << flag << " SendBuffer "
971 << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
972 << " deleted. Erase in _map_of_send_buffers :" << endl ;
974 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
975 _MPI_access->timeType() )
977 delete (TimeMessage * ) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
981 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType == MPI_INT )
983 delete [] (int *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
987 delete [] (double *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
990 delete (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ] ;
994 cout << "CheckSent" << _my_rank << " Erase in _map_of_send_buffers SendRequestId "
995 << ArrayOfSendRequests[i] << endl ;
997 (*_map_of_send_buffers).erase( ArrayOfSendRequests[i] ) ;
999 else if ( SendTrace )
1001 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
1002 << " SendRequestId " << ArrayOfSendRequests[i]
1004 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
1005 << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
1011 _MPI_access->check() ;
1013 delete [] ArrayOfSendRequests ;
1017 int MPIAccessDEC::checkFinalRecv()
1019 int sts = MPI_SUCCESS ;
1020 if ( _time_interpolator )
1023 for ( target = 0 ; target < _group_size ; target++ )
1025 if ( (*_data_messages)[target][0] != NULL )
1027 sts = checkTime( (*_data_messages_recv_count)[target] , (*_data_messages_type)[target] ,
1029 if ( (*_data_messages_type)[target] == MPI_INT )
1031 delete [] (int *) (*_data_messages)[target][0] ;
1035 delete [] (double *) (*_data_messages)[target][0] ;
1037 (*_data_messages)[target][0] = NULL ;
1038 if ( (*_data_messages)[target][1] != NULL )
1040 if ( (*_data_messages_type)[target] == MPI_INT )
1042 delete [] (int *) (*_data_messages)[target][1] ;
1046 delete [] (double *) (*_data_messages)[target][1] ;
1048 (*_data_messages)[target][1] = NULL ;
1056 ostream & operator<< (ostream & f ,const TimeInterpolationMethod & interpolationmethod )
1058 switch (interpolationmethod)
1060 case WithoutTimeInterp :
1061 f << " WithoutTimeInterpolation ";
1063 case LinearTimeInterp :
1064 f << " LinearTimeInterpolation ";
1067 f << " UnknownTimeInterpolation ";