1 // Copyright (C) 2007-2019 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "MPIAccessDEC.hxx"
30 This constructor creates an MPIAccessDEC which has \a source_group as a working side
31 and \a target_group as an idle side.
32 The constructor must be called synchronously on all processors of both processor groups.
34 \param source_group working side ProcessorGroup
35 \param target_group lazy side ProcessorGroup
36 \param Asynchronous Communication mode (default asynchronous)
37 \param nStepBefore Number of Time step needed for the interpolation before current time
38 \param nStepAfter Number of Time step needed for the interpolation after current time
42 MPIAccessDEC::MPIAccessDEC( const ProcessorGroup& source_group,
43 const ProcessorGroup& target_group,
47 ProcessorGroup * union_group = source_group.fuse(target_group) ;
50 for ( i = 0 ; i < union_group->size() ; i++ )
54 MPIProcessorGroup *mpilg = static_cast<MPIProcessorGroup *>(const_cast<ProcessorGroup *>(&source_group));
55 _MPI_union_group = new MEDCoupling::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm());
57 _my_rank = _MPI_union_group->myRank() ;
58 _group_size = _MPI_union_group->size() ;
59 _MPI_access = new MPIAccess( _MPI_union_group ) ;
60 _asynchronous = Asynchronous ;
61 _time_messages = new vector< vector< TimeMessage > > ;
62 _time_messages->resize( _group_size ) ;
63 _out_of_time = new vector< bool > ;
64 _out_of_time->resize( _group_size ) ;
65 _data_messages_recv_count = new vector< int > ;
66 _data_messages_recv_count->resize( _group_size ) ;
67 for ( i = 0 ; i < _group_size ; i++ )
69 (*_out_of_time)[i] = false ;
70 (*_data_messages_recv_count)[i] = 0 ;
72 _data_messages_type = new vector< MPI_Datatype > ;
73 _data_messages_type->resize( _group_size ) ;
74 _data_messages = new vector< vector< void * > > ;
75 _data_messages->resize( _group_size ) ;
76 _time_interpolator = NULL ;
77 _map_of_send_buffers = new map< int , SendBuffStruct * > ;
80 MPIAccessDEC::~MPIAccessDEC()
84 delete _MPI_union_group ;
86 if ( _time_interpolator )
87 delete _time_interpolator ;
89 delete _time_messages ;
92 if ( _data_messages_recv_count )
93 delete _data_messages_recv_count ;
94 if ( _data_messages_type )
95 delete _data_messages_type ;
97 delete _data_messages ;
98 if ( _map_of_send_buffers )
99 delete _map_of_send_buffers ;
102 void MPIAccessDEC::setTimeInterpolator( TimeInterpolationMethod aTimeInterp ,
103 double InterpPrecision, int nStepBefore,
106 if ( _time_interpolator )
107 delete _time_interpolator ;
108 switch ( aTimeInterp )
110 case WithoutTimeInterp :
111 _time_interpolator = NULL ;
115 case LinearTimeInterp :
116 _time_interpolator = new LinearTimeInterpolator( InterpPrecision , nStepBefore ,
118 _n_step_before = nStepBefore ;
119 _n_step_after = nStepAfter ;
121 for ( i = 0 ; i < _group_size ; i++ )
123 (*_time_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
124 (*_data_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
126 for ( j = 0 ; j < _n_step_before + _n_step_after ; j++ )
128 (*_time_messages)[ i ][ j ].time = -1 ;
129 (*_time_messages)[ i ][ j ].deltatime = -1 ;
130 (*_data_messages)[ i ][ j ] = NULL ;
138 Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
139 (Internal Protected method)
141 Returns the request identifier SendRequestId
144 int MPIAccessDEC::send( void* sendbuf, int sendcount , int offset ,
145 MPI_Datatype sendtype , int target , int &SendRequestId )
150 if ( sendtype == MPI_INT )
152 sts = _MPI_access->ISend( &((int *) sendbuf)[offset] , sendcount , sendtype ,
153 target , SendRequestId ) ;
155 else if ( sendtype == MPI_LONG )
157 sts = _MPI_access->ISend( &((long *) sendbuf)[offset] , sendcount , sendtype ,
158 target , SendRequestId ) ;
162 sts = _MPI_access->ISend( &((double *) sendbuf)[offset] , sendcount , sendtype ,
163 target , SendRequestId ) ;
168 if ( sendtype == MPI_INT )
170 sts = _MPI_access->send( &((int *) sendbuf)[offset] , sendcount , sendtype ,
171 target , SendRequestId ) ;
173 else if ( sendtype == MPI_LONG )
175 sts = _MPI_access->send( &((long *) sendbuf)[offset] , sendcount , sendtype ,
176 target , SendRequestId ) ;
180 sts = _MPI_access->send( &((double *) sendbuf)[offset] , sendcount , sendtype ,
181 target , SendRequestId ) ;
188 Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
189 (Internal Protected method)
191 Returns the request identifier RecvRequestId
194 int MPIAccessDEC::recv( void* recvbuf, int recvcount , int offset ,
195 MPI_Datatype recvtype , int target , int &RecvRequestId )
200 if ( recvtype == MPI_INT )
202 sts = _MPI_access->IRecv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
203 target , RecvRequestId ) ;
205 else if ( recvtype == MPI_LONG )
207 sts = _MPI_access->IRecv( &((long *) recvbuf)[offset] , recvcount , recvtype ,
208 target , RecvRequestId ) ;
212 sts = _MPI_access->IRecv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
213 target , RecvRequestId ) ;
218 if ( recvtype == MPI_INT )
220 sts = _MPI_access->recv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
221 target , RecvRequestId ) ;
223 else if ( recvtype == MPI_LONG )
225 sts = _MPI_access->recv( &((long *) recvbuf)[offset] , recvcount , recvtype ,
226 target , RecvRequestId ) ;
230 sts = _MPI_access->recv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
231 target , RecvRequestId ) ;
238 Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
239 Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
240 (Internal Protected method)
242 Returns the request identifier SendRequestId
243 Returns the request identifier RecvRequestId
246 int MPIAccessDEC::sendRecv( void* sendbuf, int sendcount , int sendoffset ,
247 MPI_Datatype sendtype ,
248 void* recvbuf, int recvcount , int recvoffset ,
249 MPI_Datatype recvtype , int target ,
250 int &SendRequestId , int &RecvRequestId )
255 if ( sendtype == MPI_INT )
257 if ( recvtype == MPI_INT )
259 sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
260 sendtype , target , SendRequestId ,
261 &((int *) recvbuf)[recvoffset] , recvcount ,
262 recvtype , target , RecvRequestId ) ;
266 sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
267 sendtype , target , SendRequestId ,
268 &((double *) recvbuf)[recvoffset] ,
269 recvcount , recvtype , target , RecvRequestId ) ;
274 if ( recvtype == MPI_INT )
276 sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
277 sendtype , target , SendRequestId ,
278 &((int *) recvbuf)[recvoffset] ,
279 recvcount , recvtype , target , RecvRequestId ) ;
283 sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
284 sendtype , target , SendRequestId ,
285 &((double *) recvbuf)[recvoffset] ,
286 recvcount , recvtype , target , RecvRequestId ) ;
292 if ( sendtype == MPI_INT )
294 if ( recvtype == MPI_INT )
296 sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
297 sendtype , target , SendRequestId ,
298 &((int *) recvbuf)[recvoffset] , recvcount ,
299 recvtype , target , RecvRequestId ) ;
303 sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
304 sendtype , target , SendRequestId ,
305 &((double *) recvbuf)[recvoffset] ,
306 recvcount , recvtype , target , RecvRequestId ) ;
311 if ( recvtype == MPI_INT )
313 sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
314 sendtype , target , SendRequestId ,
315 &((int *) recvbuf)[recvoffset] ,
316 recvcount , recvtype , target , RecvRequestId ) ;
320 sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
321 sendtype , target , SendRequestId ,
322 &((double *) recvbuf)[recvoffset] ,
323 recvcount , recvtype , target , RecvRequestId ) ;
331 Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
332 Receive recvcount datas to recvbuf[offset] with type recvtype from all targets of IntraCommunicator
335 int MPIAccessDEC::allToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype ,
336 void* recvbuf, int recvcount, MPI_Datatype recvtype )
338 if ( _time_interpolator )
340 return allToAllTime( sendbuf, sendcount, sendtype , recvbuf, recvcount, recvtype ) ;
349 //Free of SendBuffers
353 //DoSend + DoRecv : SendRecv
354 SendBuffStruct * aSendDataStruct = NULL ;
355 if ( _asynchronous && sendbuf )
357 aSendDataStruct = new SendBuffStruct ;
358 aSendDataStruct->SendBuffer = sendbuf ;
359 aSendDataStruct->Counter = 0 ;
360 aSendDataStruct->DataType = sendtype ;
362 for ( target = 0 ; target < _group_size ; target++ )
364 sts = sendRecv( sendbuf , sendcount , sendoffset , sendtype ,
365 recvbuf , recvcount , recvoffset , recvtype ,
366 target , SendRequestId , RecvRequestId ) ;
367 if ( _asynchronous && sendbuf && sendcount )
369 aSendDataStruct->Counter += 1 ;
370 (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
372 sendoffset += sendcount ;
373 recvoffset += recvcount ;
375 if ( !_asynchronous && sendbuf )
377 if ( sendtype == MPI_INT )
379 delete [] (int *) sendbuf ;
383 delete [] (double *) sendbuf ;
390 Send sendcounts[target] datas from sendbuf[sdispls[target]] with type sendtype to all targets of IntraCommunicator
391 Receive recvcounts[target] datas to recvbuf[rdispls[target]] with type recvtype from all targets of IntraCommunicator
394 int MPIAccessDEC::allToAllv( void* sendbuf, int* sendcounts, int* sdispls,
395 MPI_Datatype sendtype ,
396 void* recvbuf, int* recvcounts, int* rdispls,
397 MPI_Datatype recvtype )
399 if ( _time_interpolator )
401 return allToAllvTime( sendbuf, sendcounts, sdispls, sendtype ,
402 recvbuf, recvcounts, rdispls, recvtype ) ;
409 //Free of SendBuffers
415 //DoSend + DoRecv : SendRecv
416 SendBuffStruct * aSendDataStruct = NULL ;
417 if ( _asynchronous && sendbuf )
419 aSendDataStruct = new SendBuffStruct ;
420 aSendDataStruct->SendBuffer = sendbuf ;
421 aSendDataStruct->Counter = 0 ;
422 aSendDataStruct->DataType = sendtype ;
424 for ( target = 0 ; target < _group_size ; target++ )
426 if ( sendcounts[target] || recvcounts[target] )
428 sts = sendRecv( sendbuf , sendcounts[target] , sdispls[target] , sendtype ,
429 recvbuf , recvcounts[target] , rdispls[target] , recvtype ,
430 target , SendRequestId , RecvRequestId ) ;
431 if ( _asynchronous && sendbuf && sendcounts[target])
433 aSendDataStruct->Counter += 1 ;
434 (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
438 if ( !_asynchronous && sendbuf )
440 if ( sendtype == MPI_INT )
442 delete [] (int *) sendbuf ;
446 delete [] (double *) sendbuf ;
453 MPIAccessDEC and the management of SendBuffers :
454 =================================================
456 . In the collective communications collectives we send only parts of
457 the same buffer to each "target". So in asynchronous mode it is
458 necessary that all parts are free before to delete/free the
461 . We assume that buffers are allocated with a new double[]. so a
464 . The structure SendBuffStruct permit to keep the address of the buffer
465 and to manage a reference counter of that buffer. It contains
466 also MPI_Datatype for the delete [] (double *) ... when the counter
469 . The map _MapOfSendBuffers establish the correspondence between each
470 RequestId given by a MPI_Access->ISend(...) and a SendBuffStruct
471 for each "target" of a part of the buffer.
473 . All that concerns only asynchronous Send. In synchronous mode,
474 we delete senbuf just after the Send.
478 MPIAccessDEC and the management of RecvBuffers :
479 =================================================
481 If there is no interpolation, no special action is done.
483 With interpolation for each target :
484 ------------------------------------
485 . We have _time_messages[target] which is a vector of TimesMessages.
486 We have 2 TimesMessages in our case with a linear interpolation.
487 They contain the previous time(t0)/deltatime and the last
490 . We have _data_messages[target] which is a vector of DatasMessages.
491 We have 2 DatasMessages in our case with a linear interpolation.
492 They contain the previous datas at time(t0)/deltatime and at last
495 . At time _t(t*) of current processus we do the interpolation of
496 the values of the 2 DatasMessages which are returned in the part of
497 recvbuf corresponding to the target with t0 < t* <= t1.
499 . Because of the difference of "deltatimes" between processes, we
500 may have t0 < t1 < t* and there is an extrapolation.
502 . The vectors _out_of_time, _DataMessagesRecvCount and _DataMessagesType
503 contain for each target true if t* > last t1, recvcount and
504 MPI_Datatype for the finalize of messages at the end.
508 Send a TimeMessage to all targets of IntraCommunicator
509 Receive the TimeMessages from targets of IntraCommunicator if necessary.
511 Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
512 Returns recvcount datas to recvbuf[offset] with type recvtype after an interpolation
513 with datas received from all targets of IntraCommunicator.
516 int MPIAccessDEC::allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
517 void* recvbuf, int recvcount , MPI_Datatype recvtype )
522 int SendTimeRequestId ;
523 int SendDataRequestId ;
525 if ( _time_interpolator == NULL )
527 return MPI_ERR_OTHER ;
530 //Free of SendBuffers
536 //DoSend : Time + SendBuff
537 SendBuffStruct * aSendTimeStruct = NULL ;
538 SendBuffStruct * aSendDataStruct = NULL ;
539 if ( sendbuf && sendcount )
541 TimeMessage * aSendTimeMessage = new TimeMessage ;
544 aSendTimeStruct = new SendBuffStruct ;
545 aSendTimeStruct->SendBuffer = aSendTimeMessage ;
546 aSendTimeStruct->Counter = 0 ;
547 aSendTimeStruct->DataType = _MPI_access->timeType() ;
548 aSendDataStruct = new SendBuffStruct ;
549 aSendDataStruct->SendBuffer = sendbuf ;
550 aSendDataStruct->Counter = 0 ;
551 aSendDataStruct->DataType = sendtype ;
553 aSendTimeMessage->time = _t ;
554 aSendTimeMessage->deltatime = _dt ;
555 for ( target = 0 ; target < _group_size ; target++ )
557 sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
558 SendTimeRequestId ) ;
559 sts = send( sendbuf , sendcount , sendoffset , sendtype , target , SendDataRequestId ) ;
562 aSendTimeStruct->Counter += 1 ;
563 (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
564 aSendDataStruct->Counter += 1 ;
565 (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
567 sendoffset += sendcount ;
569 if ( !_asynchronous )
571 delete aSendTimeMessage ;
572 if ( sendtype == MPI_INT )
574 delete [] (int *) sendbuf ;
578 delete [] (double *) sendbuf ;
583 //CheckTime + DoRecv + DoInterp
584 if ( recvbuf && recvcount )
586 for ( target = 0 ; target < _group_size ; target++ )
588 int recvsize = (int)(recvcount*_MPI_access->extent( recvtype ));
589 checkTime( recvcount , recvtype , target , false ) ;
590 //===========================================================================
591 //TODO : it is assumed actually that we have only 1 timestep before and after
592 //===========================================================================
593 if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
595 if ( (*_out_of_time)[target] )
597 cout << " =====================================================" << endl
598 << "Recv" << _my_rank << " <-- target " << target << " t0 "
599 << (*_time_messages)[target][0].time << " < t1 "
600 << (*_time_messages)[target][1].time << " < t* " << _t << endl
601 << " =====================================================" << endl ;
603 if ( recvtype == MPI_INT )
605 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
606 (*_time_messages)[target][1].time, _t, recvcount ,
607 _n_step_before, _n_step_after,
608 (int **) &(*_data_messages)[target][0],
609 (int **) &(*_data_messages)[target][1],
610 &((int *)recvbuf)[target*recvcount] ) ;
614 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
615 (*_time_messages)[target][1].time, _t, recvcount ,
616 _n_step_before, _n_step_after,
617 (double **) &(*_data_messages)[target][0],
618 (double **) &(*_data_messages)[target][1],
619 &((double *)recvbuf)[target*recvcount] ) ;
624 char * buffdest = (char *) recvbuf ;
625 char * buffsrc = (char *) (*_data_messages)[target][1] ;
626 memcpy( &buffdest[target*recvsize] , buffsrc , recvsize ) ;
634 int MPIAccessDEC::allToAllvTime( void* sendbuf, int* sendcounts, int* sdispls,
635 MPI_Datatype sendtype ,
636 void* recvbuf, int* recvcounts, int* rdispls,
637 MPI_Datatype recvtype )
641 int SendTimeRequestId ;
642 int SendDataRequestId ;
644 if ( _time_interpolator == NULL )
646 return MPI_ERR_OTHER ;
649 //Free of SendBuffers
657 + We create a TimeMessage (look at that structure in MPI_Access).
658 + If we are in asynchronous mode, we create two structures SendBuffStruct
659 aSendTimeStruct and aSendDataStruct that we fill.
660 + We fill the structure aSendTimeMessage with time/deltatime of
661 the current process. "deltatime" must be nul if it is the last step of
663 + After that for each "target", we Send the TimeMessage and the part
664 of sendbuf corresponding to that target.
665 + If we are in asynchronous mode, we increment the counter and we add
666 aSendTimeStruct and aSendDataStruct to _MapOfSendBuffers with the
667 identifiers SendTimeRequestId and SendDataRequestId returned by
668 MPI_Access->Send(...).
669 + And if we are in synchronous mode we delete the SendMessages.
671 //DoSend : Time + SendBuff
672 SendBuffStruct * aSendTimeStruct = NULL ;
673 SendBuffStruct * aSendDataStruct = NULL ;
676 TimeMessage * aSendTimeMessage = new TimeMessage ;
679 aSendTimeStruct = new SendBuffStruct ;
680 aSendTimeStruct->SendBuffer = aSendTimeMessage ;
681 aSendTimeStruct->Counter = 0 ;
682 aSendTimeStruct->DataType = _MPI_access->timeType() ;
683 aSendDataStruct = new SendBuffStruct ;
684 aSendDataStruct->SendBuffer = sendbuf ;
685 aSendDataStruct->Counter = 0 ;
686 aSendDataStruct->DataType = sendtype ;
688 aSendTimeMessage->time = _t ;
689 aSendTimeMessage->deltatime = _dt ;
690 for ( target = 0 ; target < _group_size ; target++ )
692 if ( sendcounts[target] )
694 sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
695 SendTimeRequestId ) ;
696 sts = send( sendbuf , sendcounts[target] , sdispls[target] , sendtype , target ,
697 SendDataRequestId ) ;
700 aSendTimeStruct->Counter += 1 ;
701 (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
702 aSendDataStruct->Counter += 1 ;
703 (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
707 if ( !_asynchronous )
709 delete aSendTimeMessage ;
710 if ( sendtype == MPI_INT )
712 delete [] (int *) sendbuf ;
716 delete [] (double *) sendbuf ;
722 . CheckTime + DoRecv + DoInterp
723 + For each target we call CheckTime
724 + If there is a TimeInterpolator and if the TimeMessage of the target
725 is not the first, we call the interpolator which return its
726 results in the part of the recv buffer corresponding to the "target".
727 + If not, there is a copy of received datas for that first step of time
728 in the part of the recv buffer corresponding to the "target".
730 //CheckTime + DoRecv + DoInterp
733 for ( target = 0 ; target < _group_size ; target++ )
735 if ( recvcounts[target] )
737 int recvsize = (int)(recvcounts[target]*_MPI_access->extent( recvtype ));
738 checkTime( recvcounts[target] , recvtype , target , false ) ;
739 //===========================================================================
740 //TODO : it is assumed actually that we have only 1 timestep before nad after
741 //===========================================================================
742 if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
744 if ( (*_out_of_time)[target] )
746 cout << " =====================================================" << endl
747 << "Recv" << _my_rank << " <-- target " << target << " t0 "
748 << (*_time_messages)[target][0].time << " < t1 "
749 << (*_time_messages)[target][1].time << " < t* " << _t << endl
750 << " =====================================================" << endl ;
752 if ( recvtype == MPI_INT )
754 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
755 (*_time_messages)[target][1].time, _t,
756 recvcounts[target] , _n_step_before, _n_step_after,
757 (int **) &(*_data_messages)[target][0],
758 (int **) &(*_data_messages)[target][1],
759 &((int *)recvbuf)[rdispls[target]] ) ;
763 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
764 (*_time_messages)[target][1].time, _t,
765 recvcounts[target] , _n_step_before, _n_step_after,
766 (double **) &(*_data_messages)[target][0],
767 (double **) &(*_data_messages)[target][1],
768 &((double *)recvbuf)[rdispls[target]] ) ;
773 char * buffdest = (char *) recvbuf ;
774 char * buffsrc = (char *) (*_data_messages)[target][1] ;
775 memcpy( &buffdest[rdispls[target]*_MPI_access->extent( recvtype )] , buffsrc ,
786 . CheckTime(recvcount , recvtype , target , UntilEnd)
787 + At the beginning, we read the first TimeMessage in
788 &(*_TimeMessages)[target][1] and the first DataMessage
789 in the allocated buffer (*_DataMessages)[target][1].
790 + deltatime of TimesMessages must be nul if it is the last one.
791 + While : _t(t*) is the current time of the processus.
792 "while _t(t*) is greater than the time of the "target"
793 (*_TimeMessages)[target][1].time and
794 (*_TimeMessages)[target][1].deltatime is not nul",
795 So at the end of the while we have :
796 _t(t*) <= (*_TimeMessages)[target][1].time with
797 _t(t*) > (*_TimeMessages)[target][0].time
798 or we have the last TimeMessage of the "target".
799 + If it is the finalization of the recv of TimeMessages and
800 DataMessages (UntilEnd value is true), we execute the while
801 until (*_TimeMessages)[target][1].deltatime is nul.
803 We copy the last TimeMessage in the previoud TimeMessage and
804 we read a new TimeMessage
805 We delete the previous DataMessage.
806 We copy the last DataMessage pointer in the previous one.
807 We allocate a new last DataMessage buffer
808 (*_DataMessages)[target][1] and we read the corresponding
810 + If the current time of the current process is greater than the
811 last time (*_TimeMessages)[target][1].time du target, we give
812 a true value to (*_OutOfTime)[target].
813 (*_TimeMessages)[target][1].deltatime is nul.
815 int MPIAccessDEC::checkTime( int recvcount , MPI_Datatype recvtype , int target ,
818 int sts = MPI_SUCCESS ;
819 int RecvTimeRequestId ;
820 int RecvDataRequestId ;
821 //Pour l'instant on cherche _time_messages[target][0] < _t <= _time_messages[target][1]
822 //===========================================================================
823 //TODO : it is assumed actually that we have only 1 timestep before and after
824 // instead of _n_step_before and _n_step_after ...
825 //===========================================================================
826 (*_data_messages_recv_count)[target] = recvcount ;
827 (*_data_messages_type)[target] = recvtype ;
828 if ( (*_time_messages)[target][1].time == -1 )
830 (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
831 sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
832 target , RecvTimeRequestId ) ;
833 (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
834 if ( recvtype == MPI_INT )
836 (*_data_messages)[target][1] = new int[recvcount] ;
840 (*_data_messages)[target][1] = new double[recvcount] ;
842 sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
843 RecvDataRequestId ) ;
847 while ( ( _t > (*_time_messages)[target][1].time || UntilEnd ) &&
848 (*_time_messages)[target][1].deltatime != 0 )
850 (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
851 sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
852 target , RecvTimeRequestId ) ;
855 cout << "CheckTime" << _my_rank << " TimeMessage target " << target
856 << " RecvTimeRequestId " << RecvTimeRequestId << " MPITag "
857 << _MPI_access->recvMPITag(target) << endl ;
859 if ( recvtype == MPI_INT )
861 delete [] (int *) (*_data_messages)[target][0] ;
865 delete [] (double *) (*_data_messages)[target][0] ;
867 (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
868 if ( recvtype == MPI_INT )
870 (*_data_messages)[target][1] = new int[recvcount] ;
874 (*_data_messages)[target][1] = new double[recvcount] ;
876 sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
877 RecvDataRequestId ) ;
880 cout << "CheckTime" << _my_rank << " DataMessage target " << target
881 << " RecvDataRequestId " << RecvDataRequestId << " MPITag "
882 << _MPI_access->recvMPITag(target) << endl ;
886 if ( _t > (*_time_messages)[target][0].time &&
887 _t <= (*_time_messages)[target][1].time )
892 (*_out_of_time)[target] = true ;
900 + call SendRequestIds of MPI_Access in order to get all
901 RequestIds of SendMessages of all "targets".
902 + For each RequestId, CheckSent call "Test" of MPI_Access in order
903 to know if the buffer is "free" (flag = true). If it is the
904 FinalCheckSent (WithWait = true), we call Wait instead of Test.
905 + If the buffer is "free", the counter of the structure SendBuffStruct
906 (from _MapOfSendBuffers) is decremented.
907 + If that counter is nul we delete the TimeMessage or the
908 SendBuffer according to the DataType.
909 + And we delete the structure SendBuffStruct before the suppression
910 (erase) of that item of _MapOfSendBuffers
912 int MPIAccessDEC::checkSent(bool WithWait)
914 int sts = MPI_SUCCESS ;
915 int flag = WithWait ;
916 int size = _MPI_access->sendRequestIdsSize() ;
917 int * ArrayOfSendRequests = new int[ size ] ;
918 int nSendRequest = _MPI_access->sendRequestIds( size , ArrayOfSendRequests ) ;
919 bool SendTrace = false ;
921 for ( i = 0 ; i < nSendRequest ; i++ )
927 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
928 << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget "
929 << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag "
930 << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ;
932 sts = _MPI_access->wait( ArrayOfSendRequests[i] ) ;
936 sts = _MPI_access->test( ArrayOfSendRequests[i] , flag ) ;
940 _MPI_access->deleteRequest( ArrayOfSendRequests[i] ) ;
943 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
944 << " SendRequestId " << ArrayOfSendRequests[i]
946 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
947 << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
950 (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter -= 1 ;
953 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
954 _MPI_access->timeType() )
956 cout << "CheckTimeSent" << _my_rank << " Request " ;
960 cout << "CheckDataSent" << _my_rank << " Request " ;
962 cout << ArrayOfSendRequests[i]
963 << " _map_of_send_buffers->SendBuffer "
964 << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
965 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
968 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter == 0 )
972 cout << "CheckSent" << _my_rank << " SendRequestId " << ArrayOfSendRequests[i]
973 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
974 << " flag " << flag << " SendBuffer "
975 << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
976 << " deleted. Erase in _map_of_send_buffers :" << endl ;
978 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
979 _MPI_access->timeType() )
981 delete (TimeMessage * ) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
985 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType == MPI_INT )
987 delete [] (int *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
991 delete [] (double *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
994 delete (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ] ;
998 cout << "CheckSent" << _my_rank << " Erase in _map_of_send_buffers SendRequestId "
999 << ArrayOfSendRequests[i] << endl ;
1001 (*_map_of_send_buffers).erase( ArrayOfSendRequests[i] ) ;
1003 else if ( SendTrace )
1005 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
1006 << " SendRequestId " << ArrayOfSendRequests[i]
1008 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
1009 << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
1015 _MPI_access->check() ;
1017 delete [] ArrayOfSendRequests ;
1021 int MPIAccessDEC::checkFinalRecv()
1023 int sts = MPI_SUCCESS ;
1024 if ( _time_interpolator )
1027 for ( target = 0 ; target < _group_size ; target++ )
1029 if ( (*_data_messages)[target][0] != NULL )
1031 sts = checkTime( (*_data_messages_recv_count)[target] , (*_data_messages_type)[target] ,
1033 if ( (*_data_messages_type)[target] == MPI_INT )
1035 delete [] (int *) (*_data_messages)[target][0] ;
1039 delete [] (double *) (*_data_messages)[target][0] ;
1041 (*_data_messages)[target][0] = NULL ;
1042 if ( (*_data_messages)[target][1] != NULL )
1044 if ( (*_data_messages_type)[target] == MPI_INT )
1046 delete [] (int *) (*_data_messages)[target][1] ;
1050 delete [] (double *) (*_data_messages)[target][1] ;
1052 (*_data_messages)[target][1] = NULL ;
1060 ostream & operator<< (ostream & f ,const TimeInterpolationMethod & interpolationmethod )
1062 switch (interpolationmethod)
1064 case WithoutTimeInterp :
1065 f << " WithoutTimeInterpolation ";
1067 case LinearTimeInterp :
1068 f << " LinearTimeInterpolation ";
1071 f << " UnknownTimeInterpolation ";