1 // Copyright (C) 2007-2008 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "MPIAccessDEC.hxx"
28 This constructor creates an MPIAccessDEC which has \a source_group as a working side
29 and \a target_group as an idle side.
30 The constructor must be called synchronously on all processors of both processor groups.
32 \param source_group working side ProcessorGroup
33 \param target_group lazy side ProcessorGroup
34 \param Asynchronous Communication mode (default asynchronous)
35 \param nStepBefore Number of Time step needed for the interpolation before current time
36 \param nStepAfter Number of Time step needed for the interpolation after current time
40 MPIAccessDEC::MPIAccessDEC( const ProcessorGroup& source_group,
41 const ProcessorGroup& target_group,
45 ProcessorGroup * union_group = source_group.fuse(target_group) ;
48 for ( i = 0 ; i < union_group->size() ; i++ )
52 MPIProcessorGroup *mpilg = (MPIProcessorGroup *)&source_group;
53 _MPI_union_group = new ParaMEDMEM::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm());
55 _my_rank = _MPI_union_group->myRank() ;
56 _group_size = _MPI_union_group->size() ;
57 _MPI_access = new MPIAccess( _MPI_union_group ) ;
58 _asynchronous = Asynchronous ;
59 _time_messages = new vector< vector< TimeMessage > > ;
60 _time_messages->resize( _group_size ) ;
61 _out_of_time = new vector< bool > ;
62 _out_of_time->resize( _group_size ) ;
63 _data_messages_recv_count = new vector< int > ;
64 _data_messages_recv_count->resize( _group_size ) ;
65 for ( i = 0 ; i < _group_size ; i++ )
67 (*_out_of_time)[i] = false ;
68 (*_data_messages_recv_count)[i] = 0 ;
70 _data_messages_type = new vector< MPI_Datatype > ;
71 _data_messages_type->resize( _group_size ) ;
72 _data_messages = new vector< vector< void * > > ;
73 _data_messages->resize( _group_size ) ;
74 _time_interpolator = NULL ;
75 _map_of_send_buffers = new map< int , SendBuffStruct * > ;
76 cout << "MPIAccessDEC" << _my_rank << " Asynchronous " << _asynchronous << endl ;
79 MPIAccessDEC::~MPIAccessDEC()
83 delete _MPI_union_group ;
85 if ( _time_interpolator )
86 delete _time_interpolator ;
88 delete _time_messages ;
91 if ( _data_messages_recv_count )
92 delete _data_messages_recv_count ;
93 if ( _data_messages_type )
94 delete _data_messages_type ;
96 delete _data_messages ;
97 if ( _map_of_send_buffers )
98 delete _map_of_send_buffers ;
101 void MPIAccessDEC::setTimeInterpolator( TimeInterpolationMethod aTimeInterp ,
102 double InterpPrecision, int nStepBefore,
105 cout << "MPIAccessDEC::SetTimeInterpolator" << _my_rank << " Asynchronous "
106 << _asynchronous << " TimeInterpolationMethod " << aTimeInterp
107 << " InterpPrecision " << InterpPrecision << " nStepBefore " << nStepBefore
108 << " nStepAfter " << nStepAfter << endl ;
109 if ( _time_interpolator )
110 delete _time_interpolator ;
111 switch ( aTimeInterp )
113 case WithoutTimeInterp :
114 _time_interpolator = NULL ;
118 case LinearTimeInterp :
119 _time_interpolator = new LinearTimeInterpolator( InterpPrecision , nStepBefore ,
121 _n_step_before = nStepBefore ;
122 _n_step_after = nStepAfter ;
124 for ( i = 0 ; i < _group_size ; i++ )
126 (*_time_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
127 (*_data_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
129 for ( j = 0 ; j < _n_step_before + _n_step_after ; j++ )
131 (*_time_messages)[ i ][ j ].time = -1 ;
132 (*_time_messages)[ i ][ j ].deltatime = -1 ;
133 (*_data_messages)[ i ][ j ] = NULL ;
141 Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
142 (Internal Protected method)
144 Returns the request identifier SendRequestId
147 int MPIAccessDEC::send( void* sendbuf, int sendcount , int offset ,
148 MPI_Datatype sendtype , int target , int &SendRequestId )
153 if ( sendtype == MPI_INT )
155 sts = _MPI_access->ISend( &((int *) sendbuf)[offset] , sendcount , sendtype ,
156 target , SendRequestId ) ;
160 sts = _MPI_access->ISend( &((double *) sendbuf)[offset] , sendcount , sendtype ,
161 target , SendRequestId ) ;
166 if ( sendtype == MPI_INT )
168 sts = _MPI_access->send( &((int *) sendbuf)[offset] , sendcount , sendtype ,
169 target , SendRequestId ) ;
173 sts = _MPI_access->send( &((double *) sendbuf)[offset] , sendcount , sendtype ,
174 target , SendRequestId ) ;
181 Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
182 (Internal Protected method)
184 Returns the request identifier RecvRequestId
187 int MPIAccessDEC::recv( void* recvbuf, int recvcount , int offset ,
188 MPI_Datatype recvtype , int target , int &RecvRequestId )
193 if ( recvtype == MPI_INT )
195 sts = _MPI_access->IRecv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
196 target , RecvRequestId ) ;
200 sts = _MPI_access->IRecv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
201 target , RecvRequestId ) ;
206 if ( recvtype == MPI_INT )
208 sts = _MPI_access->recv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
209 target , RecvRequestId ) ;
213 sts = _MPI_access->recv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
214 target , RecvRequestId ) ;
221 Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
222 Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
223 (Internal Protected method)
225 Returns the request identifier SendRequestId
226 Returns the request identifier RecvRequestId
229 int MPIAccessDEC::sendRecv( void* sendbuf, int sendcount , int sendoffset ,
230 MPI_Datatype sendtype ,
231 void* recvbuf, int recvcount , int recvoffset ,
232 MPI_Datatype recvtype , int target ,
233 int &SendRequestId , int &RecvRequestId )
238 if ( sendtype == MPI_INT )
240 if ( recvtype == MPI_INT )
242 sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
243 sendtype , target , SendRequestId ,
244 &((int *) recvbuf)[recvoffset] , recvcount ,
245 recvtype , target , RecvRequestId ) ;
249 sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
250 sendtype , target , SendRequestId ,
251 &((double *) recvbuf)[recvoffset] ,
252 recvcount , recvtype , target , RecvRequestId ) ;
257 if ( recvtype == MPI_INT )
259 sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
260 sendtype , target , SendRequestId ,
261 &((int *) recvbuf)[recvoffset] ,
262 recvcount , recvtype , target , RecvRequestId ) ;
266 sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
267 sendtype , target , SendRequestId ,
268 &((double *) recvbuf)[recvoffset] ,
269 recvcount , recvtype , target , RecvRequestId ) ;
275 if ( sendtype == MPI_INT )
277 if ( recvtype == MPI_INT )
279 sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
280 sendtype , target , SendRequestId ,
281 &((int *) recvbuf)[recvoffset] , recvcount ,
282 recvtype , target , RecvRequestId ) ;
286 sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
287 sendtype , target , SendRequestId ,
288 &((double *) recvbuf)[recvoffset] ,
289 recvcount , recvtype , target , RecvRequestId ) ;
294 if ( recvtype == MPI_INT )
296 sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
297 sendtype , target , SendRequestId ,
298 &((int *) recvbuf)[recvoffset] ,
299 recvcount , recvtype , target , RecvRequestId ) ;
303 cout << "SendRecv" << _my_rank << " target " << target << " sendbuf "
304 << &((double *) sendbuf)[sendoffset] << " sendcount " << sendcount
305 << " recvbuf " << &((double *) recvbuf)[recvoffset] << " recvcount "
306 << recvcount << endl ;
307 sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
308 sendtype , target , SendRequestId ,
309 &((double *) recvbuf)[recvoffset] ,
310 recvcount , recvtype , target , RecvRequestId ) ;
318 Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
319 Receive recvcount datas to recvbuf[offset] with type recvtype from all targets of IntraCommunicator
322 int MPIAccessDEC::allToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype ,
323 void* recvbuf, int recvcount, MPI_Datatype recvtype )
325 if ( _time_interpolator )
327 return allToAllTime( sendbuf, sendcount, sendtype , recvbuf, recvcount, recvtype ) ;
336 //Free of SendBuffers
340 //DoSend + DoRecv : SendRecv
341 SendBuffStruct * aSendDataStruct = NULL ;
342 if ( _asynchronous && sendbuf )
344 aSendDataStruct = new SendBuffStruct ;
345 aSendDataStruct->SendBuffer = sendbuf ;
346 aSendDataStruct->Counter = 0 ;
347 aSendDataStruct->DataType = sendtype ;
349 for ( target = 0 ; target < _group_size ; target++ )
351 sts = sendRecv( sendbuf , sendcount , sendoffset , sendtype ,
352 recvbuf , recvcount , recvoffset , recvtype ,
353 target , SendRequestId , RecvRequestId ) ;
354 if ( _asynchronous && sendbuf && sendcount )
356 aSendDataStruct->Counter += 1 ;
357 (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
359 sendoffset += sendcount ;
360 recvoffset += recvcount ;
362 if ( !_asynchronous && sendbuf )
364 if ( sendtype == MPI_INT )
366 delete [] (int *) sendbuf ;
370 delete [] (double *) sendbuf ;
377 Send sendcounts[target] datas from sendbuf[sdispls[target]] with type sendtype to all targets of IntraCommunicator
378 Receive recvcounts[target] datas to recvbuf[rdispls[target]] with type recvtype from all targets of IntraCommunicator
381 int MPIAccessDEC::allToAllv( void* sendbuf, int* sendcounts, int* sdispls,
382 MPI_Datatype sendtype ,
383 void* recvbuf, int* recvcounts, int* rdispls,
384 MPI_Datatype recvtype )
386 if ( _time_interpolator )
388 return allToAllvTime( sendbuf, sendcounts, sdispls, sendtype ,
389 recvbuf, recvcounts, rdispls, recvtype ) ;
396 //Free of SendBuffers
402 //DoSend + DoRecv : SendRecv
403 SendBuffStruct * aSendDataStruct = NULL ;
404 if ( _asynchronous && sendbuf )
406 aSendDataStruct = new SendBuffStruct ;
407 aSendDataStruct->SendBuffer = sendbuf ;
408 aSendDataStruct->Counter = 0 ;
409 aSendDataStruct->DataType = sendtype ;
411 for ( target = 0 ; target < _group_size ; target++ )
413 if ( sendcounts[target] || recvcounts[target] )
415 sts = sendRecv( sendbuf , sendcounts[target] , sdispls[target] , sendtype ,
416 recvbuf , recvcounts[target] , rdispls[target] , recvtype ,
417 target , SendRequestId , RecvRequestId ) ;
418 if ( _asynchronous && sendbuf && sendcounts[target])
420 aSendDataStruct->Counter += 1 ;
421 (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
425 if ( !_asynchronous && sendbuf )
427 if ( sendtype == MPI_INT )
429 delete [] (int *) sendbuf ;
433 delete [] (double *) sendbuf ;
440 MPIAccessDEC and the management of SendBuffers :
441 =================================================
443 . In the collective communications collectives we send only parts of
444 the same buffer to each "target". So in asynchronous mode it is
445 necessary that all parts are free before to delete/free the
448 . We assume that buffers are allocated with a new double[]. so a
451 . The structure SendBuffStruct permit to keep the adress of the buffer
452 and to manage a reference counter of that buffer. It contains
453 also MPI_Datatype for the delete [] (double *) ... when the counter
456 . The map _MapOfSendBuffers etablish the correspondance between each
457 RequestId given by a MPI_Access->ISend(...) and a SendBuffStruct
458 for each "target" of a part of the buffer.
460 . All that concerns only asynchronous Send. In synchronous mode,
461 we delete senbuf just after the Send.
465 MPIAccessDEC and the management of RecvBuffers :
466 =================================================
468 If there is no interpolation, no special action is done.
470 With interpolation for each target :
471 ------------------------------------
472 . We have _time_messages[target] which is a vector of TimesMessages.
473 We have 2 TimesMessages in our case with a linear interpolation.
474 They contain the previous time(t0)/deltatime and the last
477 . We have _data_messages[target] which is a vector of DatasMessages.
478 We have 2 DatasMessages in our case with a linear interpolation.
479 They contain the previous datas at time(t0)/deltatime and at last
482 . At time _t(t*) of current processus we do the interpolation of
483 the values of the 2 DatasMessages which are returned in the part of
484 recvbuf corresponding to the target with t0 < t* <= t1.
486 . Because of the difference of "deltatimes" between processes, we
487 may have t0 < t1 < t* and there is an extrapolation.
489 . The vectors _out_of_time, _DataMessagesRecvCount and _DataMessagesType
490 contain for each target true if t* > last t1, recvcount and
491 MPI_Datatype for the finalize of messages at the end.
495 Send a TimeMessage to all targets of IntraCommunicator
496 Receive the TimeMessages from targets of IntraCommunicator if necessary.
498 Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
499 Returns recvcount datas to recvbuf[offset] with type recvtype after an interpolation
500 with datas received from all targets of IntraCommunicator.
503 int MPIAccessDEC::allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
504 void* recvbuf, int recvcount , MPI_Datatype recvtype )
509 int SendTimeRequestId ;
510 int SendDataRequestId ;
512 if ( _time_interpolator == NULL )
514 return MPI_ERR_OTHER ;
517 //Free of SendBuffers
523 //DoSend : Time + SendBuff
524 SendBuffStruct * aSendTimeStruct = NULL ;
525 SendBuffStruct * aSendDataStruct = NULL ;
526 if ( sendbuf && sendcount )
528 TimeMessage * aSendTimeMessage = new TimeMessage ;
531 aSendTimeStruct = new SendBuffStruct ;
532 aSendTimeStruct->SendBuffer = aSendTimeMessage ;
533 aSendTimeStruct->Counter = 0 ;
534 aSendTimeStruct->DataType = _MPI_access->timeType() ;
535 aSendDataStruct = new SendBuffStruct ;
536 aSendDataStruct->SendBuffer = sendbuf ;
537 aSendDataStruct->Counter = 0 ;
538 aSendDataStruct->DataType = sendtype ;
540 aSendTimeMessage->time = _t ;
541 aSendTimeMessage->deltatime = _dt ;
542 for ( target = 0 ; target < _group_size ; target++ )
544 sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
545 SendTimeRequestId ) ;
546 sts = send( sendbuf , sendcount , sendoffset , sendtype , target , SendDataRequestId ) ;
549 aSendTimeStruct->Counter += 1 ;
550 (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
551 aSendDataStruct->Counter += 1 ;
552 (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
554 sendoffset += sendcount ;
556 if ( !_asynchronous )
558 delete aSendTimeMessage ;
559 if ( sendtype == MPI_INT )
561 delete [] (int *) sendbuf ;
565 delete [] (double *) sendbuf ;
570 //CheckTime + DoRecv + DoInterp
571 if ( recvbuf && recvcount )
573 for ( target = 0 ; target < _group_size ; target++ )
575 int recvsize = recvcount*_MPI_access->extent( recvtype ) ;
576 checkTime( recvcount , recvtype , target , false ) ;
577 //===========================================================================
578 //TODO : it is assumed actually that we have only 1 timestep before nad after
579 //===========================================================================
580 if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
582 if ( (*_out_of_time)[target] )
584 cout << " =====================================================" << endl
585 << "Recv" << _my_rank << " <-- target " << target << " t0 "
586 << (*_time_messages)[target][0].time << " < t1 "
587 << (*_time_messages)[target][1].time << " < t* " << _t << endl
588 << " =====================================================" << endl ;
590 if ( recvtype == MPI_INT )
592 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
593 (*_time_messages)[target][1].time, _t, recvcount ,
594 _n_step_before, _n_step_after,
595 (int **) &(*_data_messages)[target][0],
596 (int **) &(*_data_messages)[target][1],
597 &((int *)recvbuf)[target*recvcount] ) ;
601 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
602 (*_time_messages)[target][1].time, _t, recvcount ,
603 _n_step_before, _n_step_after,
604 (double **) &(*_data_messages)[target][0],
605 (double **) &(*_data_messages)[target][1],
606 &((double *)recvbuf)[target*recvcount] ) ;
611 char * buffdest = (char *) recvbuf ;
612 char * buffsrc = (char *) (*_data_messages)[target][1] ;
613 memcpy( &buffdest[target*recvsize] , buffsrc , recvsize ) ;
621 int MPIAccessDEC::allToAllvTime( void* sendbuf, int* sendcounts, int* sdispls,
622 MPI_Datatype sendtype ,
623 void* recvbuf, int* recvcounts, int* rdispls,
624 MPI_Datatype recvtype )
628 int SendTimeRequestId ;
629 int SendDataRequestId ;
631 if ( _time_interpolator == NULL )
633 return MPI_ERR_OTHER ;
636 //Free of SendBuffers
644 + We create a TimeMessage (look at that structure in MPI_Access).
645 + If we are in asynchronous mode, we create two structures SendBuffStruct
646 aSendTimeStruct and aSendDataStruct that we fill.
647 + We fill the structure aSendTimeMessage with time/deltatime of
648 the current process. "deltatime" must be nul if it is the last step of
650 + After that for each "target", we Send the TimeMessage and the part
651 of sendbuf corresponding to that target.
652 + If we are in asynchronous mode, we increment the counter and we add
653 aSendTimeStruct and aSendDataStruct to _MapOfSendBuffers with the
654 identifiers SendTimeRequestId and SendDataRequestId returned by
655 MPI_Access->Send(...).
656 + And if we are in synchronous mode we delete the SendMessages.
658 //DoSend : Time + SendBuff
659 SendBuffStruct * aSendTimeStruct = NULL ;
660 SendBuffStruct * aSendDataStruct = NULL ;
663 TimeMessage * aSendTimeMessage = new TimeMessage ;
666 aSendTimeStruct = new SendBuffStruct ;
667 aSendTimeStruct->SendBuffer = aSendTimeMessage ;
668 aSendTimeStruct->Counter = 0 ;
669 aSendTimeStruct->DataType = _MPI_access->timeType() ;
670 aSendDataStruct = new SendBuffStruct ;
671 aSendDataStruct->SendBuffer = sendbuf ;
672 aSendDataStruct->Counter = 0 ;
673 aSendDataStruct->DataType = sendtype ;
675 aSendTimeMessage->time = _t ;
676 aSendTimeMessage->deltatime = _dt ;
677 for ( target = 0 ; target < _group_size ; target++ )
679 if ( sendcounts[target] )
681 sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
682 SendTimeRequestId ) ;
683 sts = send( sendbuf , sendcounts[target] , sdispls[target] , sendtype , target ,
684 SendDataRequestId ) ;
687 aSendTimeStruct->Counter += 1 ;
688 (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
689 aSendDataStruct->Counter += 1 ;
690 (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
694 if ( !_asynchronous )
696 delete aSendTimeMessage ;
697 if ( sendtype == MPI_INT )
699 delete [] (int *) sendbuf ;
703 delete [] (double *) sendbuf ;
709 . CheckTime + DoRecv + DoInterp
710 + For each target we call CheckTime
711 + If there is a TimeInterpolator and if the TimeMessage of the target
712 is not the first, we call the interpolator which return its
713 results in the part of the recv buffer corresponding to the "target".
714 + If not, there is a copy of received datas for that first step of time
715 in the part of the recv buffer corresponding to the "target".
717 //CheckTime + DoRecv + DoInterp
720 for ( target = 0 ; target < _group_size ; target++ )
722 if ( recvcounts[target] )
724 int recvsize = recvcounts[target]*_MPI_access->extent( recvtype ) ;
725 checkTime( recvcounts[target] , recvtype , target , false ) ;
726 //===========================================================================
727 //TODO : it is assumed actually that we have only 1 timestep before nad after
728 //===========================================================================
729 if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
731 if ( (*_out_of_time)[target] )
733 cout << " =====================================================" << endl
734 << "Recv" << _my_rank << " <-- target " << target << " t0 "
735 << (*_time_messages)[target][0].time << " < t1 "
736 << (*_time_messages)[target][1].time << " < t* " << _t << endl
737 << " =====================================================" << endl ;
739 if ( recvtype == MPI_INT )
741 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
742 (*_time_messages)[target][1].time, _t,
743 recvcounts[target] , _n_step_before, _n_step_after,
744 (int **) &(*_data_messages)[target][0],
745 (int **) &(*_data_messages)[target][1],
746 &((int *)recvbuf)[rdispls[target]] ) ;
750 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
751 (*_time_messages)[target][1].time, _t,
752 recvcounts[target] , _n_step_before, _n_step_after,
753 (double **) &(*_data_messages)[target][0],
754 (double **) &(*_data_messages)[target][1],
755 &((double *)recvbuf)[rdispls[target]] ) ;
760 char * buffdest = (char *) recvbuf ;
761 char * buffsrc = (char *) (*_data_messages)[target][1] ;
762 memcpy( &buffdest[rdispls[target]*_MPI_access->extent( recvtype )] , buffsrc ,
773 . CheckTime(recvcount , recvtype , target , UntilEnd)
774 + At the beginning, we read the first TimeMessage in
775 &(*_TimeMessages)[target][1] and the first DataMessage
776 in the allocated buffer (*_DataMessages)[target][1].
777 + deltatime of TimesMessages must be nul if it is the last one.
778 + While : _t(t*) is the current time of the processus.
779 "while _t(t*) is greater than the time of the "target"
780 (*_TimeMessages)[target][1].time and
781 (*_TimeMessages)[target][1].deltatime is not nul",
782 So at the end of the while we have :
783 _t(t*) <= (*_TimeMessages)[target][1].time with
784 _t(t*) > (*_TimeMessages)[target][0].time
785 or we have the last TimeMessage of the "target".
786 + If it is the finalization of the recv of TimeMessages and
787 DataMessages (UntilEnd value is true), we execute the while
788 until (*_TimeMessages)[target][1].deltatime is nul.
790 We copy the last TimeMessage in the previoud TimeMessage and
791 we read a new TimeMessage
792 We delete the previous DataMessage.
793 We copy the last DataMessage pointer in the previous one.
794 We allocate a new last DataMessage buffer
795 (*_DataMessages)[target][1] and we read the corresponding
797 + If the current time of the current process is greater than the
798 last time (*_TimeMessages)[target][1].time du target, we give
799 a true value to (*_OutOfTime)[target].
800 (*_TimeMessages)[target][1].deltatime is nul.
802 int MPIAccessDEC::checkTime( int recvcount , MPI_Datatype recvtype , int target ,
805 int sts = MPI_SUCCESS ;
806 int RecvTimeRequestId ;
807 int RecvDataRequestId ;
808 //Pour l'instant on cherche _time_messages[target][0] < _t <= _time_messages[target][1]
809 //===========================================================================
810 //TODO : it is assumed actually that we have only 1 timestep before and after
811 // instead of _n_step_before and _n_step_after ...
812 //===========================================================================
813 (*_data_messages_recv_count)[target] = recvcount ;
814 (*_data_messages_type)[target] = recvtype ;
815 if ( (*_time_messages)[target][1].time == -1 )
817 (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
818 sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
819 target , RecvTimeRequestId ) ;
820 (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
821 if ( recvtype == MPI_INT )
823 (*_data_messages)[target][1] = new int[recvcount] ;
827 (*_data_messages)[target][1] = new double[recvcount] ;
829 sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
830 RecvDataRequestId ) ;
834 while ( ( _t > (*_time_messages)[target][1].time || UntilEnd ) &&
835 (*_time_messages)[target][1].deltatime != 0 )
837 (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
838 sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
839 target , RecvTimeRequestId ) ;
842 cout << "CheckTime" << _my_rank << " TimeMessage target " << target
843 << " RecvTimeRequestId " << RecvTimeRequestId << " MPITag "
844 << _MPI_access->recvMPITag(target) << endl ;
846 if ( recvtype == MPI_INT )
848 delete [] (int *) (*_data_messages)[target][0] ;
852 delete [] (double *) (*_data_messages)[target][0] ;
854 (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
855 if ( recvtype == MPI_INT )
857 (*_data_messages)[target][1] = new int[recvcount] ;
861 (*_data_messages)[target][1] = new double[recvcount] ;
863 sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
864 RecvDataRequestId ) ;
867 cout << "CheckTime" << _my_rank << " DataMessage target " << target
868 << " RecvDataRequestId " << RecvDataRequestId << " MPITag "
869 << _MPI_access->recvMPITag(target) << endl ;
873 if ( _t > (*_time_messages)[target][0].time &&
874 _t <= (*_time_messages)[target][1].time )
879 (*_out_of_time)[target] = true ;
887 + call SendRequestIds of MPI_Access in order to get all
888 RequestIds of SendMessages of all "targets".
889 + For each RequestId, CheckSent call "Test" of MPI_Access in order
890 to know if the buffer is "free" (flag = true). If it is the
891 FinalCheckSent (WithWait = true), we call Wait instead of Test.
892 + If the buffer is "free", the counter of the structure SendBuffStruct
893 (from _MapOfSendBuffers) is decremented.
894 + If that counter is nul we delete the TimeMessage or the
895 SendBuffer according to the DataType.
896 + And we delete the structure SendBuffStruct before the suppression
897 (erase) of that item of _MapOfSendBuffers
899 int MPIAccessDEC::checkSent(bool WithWait)
901 int sts = MPI_SUCCESS ;
902 int flag = WithWait ;
903 int size = _MPI_access->sendRequestIdsSize() ;
904 int * ArrayOfSendRequests = new int[ size ] ;
905 int nSendRequest = _MPI_access->sendRequestIds( size , ArrayOfSendRequests ) ;
906 bool SendTrace = false ;
908 for ( i = 0 ; i < nSendRequest ; i++ )
912 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
913 << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget "
914 << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag "
915 << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ;
916 sts = _MPI_access->wait( ArrayOfSendRequests[i] ) ;
920 sts = _MPI_access->test( ArrayOfSendRequests[i] , flag ) ;
924 _MPI_access->deleteRequest( ArrayOfSendRequests[i] ) ;
927 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
928 << " SendRequestId " << ArrayOfSendRequests[i]
930 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
931 << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
934 (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter -= 1 ;
937 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
938 _MPI_access->timeType() )
940 cout << "CheckTimeSent" << _my_rank << " Request " ;
944 cout << "CheckDataSent" << _my_rank << " Request " ;
946 cout << ArrayOfSendRequests[i]
947 << " _map_of_send_buffers->SendBuffer "
948 << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
949 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
952 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter == 0 )
956 cout << "CheckSent" << _my_rank << " SendRequestId " << ArrayOfSendRequests[i]
957 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
958 << " flag " << flag << " SendBuffer "
959 << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
960 << " deleted. Erase in _map_of_send_buffers :" << endl ;
962 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
963 _MPI_access->timeType() )
965 delete (TimeMessage * ) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
969 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType == MPI_INT )
971 delete [] (int *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
975 delete [] (double *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
978 delete (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ] ;
982 cout << "CheckSent" << _my_rank << " Erase in _map_of_send_buffers SendRequestId "
983 << ArrayOfSendRequests[i] << endl ;
985 (*_map_of_send_buffers).erase( ArrayOfSendRequests[i] ) ;
987 else if ( SendTrace )
989 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
990 << " SendRequestId " << ArrayOfSendRequests[i]
992 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
993 << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
999 _MPI_access->check() ;
1001 delete [] ArrayOfSendRequests ;
1005 int MPIAccessDEC::checkFinalRecv()
1007 int sts = MPI_SUCCESS ;
1008 if ( _time_interpolator )
1011 for ( target = 0 ; target < _group_size ; target++ )
1013 if ( (*_data_messages)[target][0] != NULL )
1015 sts = checkTime( (*_data_messages_recv_count)[target] , (*_data_messages_type)[target] ,
1017 if ( (*_data_messages_type)[target] == MPI_INT )
1019 delete [] (int *) (*_data_messages)[target][0] ;
1023 delete [] (double *) (*_data_messages)[target][0] ;
1025 (*_data_messages)[target][0] = NULL ;
1026 if ( (*_data_messages)[target][1] != NULL )
1028 if ( (*_data_messages_type)[target] == MPI_INT )
1030 delete [] (int *) (*_data_messages)[target][1] ;
1034 delete [] (double *) (*_data_messages)[target][1] ;
1036 (*_data_messages)[target][1] = NULL ;
1044 ostream & operator<< (ostream & f ,const TimeInterpolationMethod & interpolationmethod )
1046 switch (interpolationmethod)
1048 case WithoutTimeInterp :
1049 f << " WithoutTimeInterpolation ";
1051 case LinearTimeInterp :
1052 f << " LinearTimeInterpolation ";
1055 f << " UnknownTimeInterpolation ";