1 // Copyright (C) 2007-2013 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "MPIAccessDEC.hxx"
30 This constructor creates an MPIAccessDEC which has \a source_group as a working side
31 and \a target_group as an idle side.
32 The constructor must be called synchronously on all processors of both processor groups.
34 \param source_group working side ProcessorGroup
35 \param target_group lazy side ProcessorGroup
36 \param Asynchronous Communication mode (default asynchronous)
37 \param nStepBefore Number of Time step needed for the interpolation before current time
38 \param nStepAfter Number of Time step needed for the interpolation after current time
42 MPIAccessDEC::MPIAccessDEC( const ProcessorGroup& source_group,
43 const ProcessorGroup& target_group,
47 ProcessorGroup * union_group = source_group.fuse(target_group) ;
50 for ( i = 0 ; i < union_group->size() ; i++ )
54 MPIProcessorGroup *mpilg = static_cast<MPIProcessorGroup *>(const_cast<ProcessorGroup *>(&source_group));
55 _MPI_union_group = new ParaMEDMEM::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm());
57 _my_rank = _MPI_union_group->myRank() ;
58 _group_size = _MPI_union_group->size() ;
59 _MPI_access = new MPIAccess( _MPI_union_group ) ;
60 _asynchronous = Asynchronous ;
61 _time_messages = new vector< vector< TimeMessage > > ;
62 _time_messages->resize( _group_size ) ;
63 _out_of_time = new vector< bool > ;
64 _out_of_time->resize( _group_size ) ;
65 _data_messages_recv_count = new vector< int > ;
66 _data_messages_recv_count->resize( _group_size ) ;
67 for ( i = 0 ; i < _group_size ; i++ )
69 (*_out_of_time)[i] = false ;
70 (*_data_messages_recv_count)[i] = 0 ;
72 _data_messages_type = new vector< MPI_Datatype > ;
73 _data_messages_type->resize( _group_size ) ;
74 _data_messages = new vector< vector< void * > > ;
75 _data_messages->resize( _group_size ) ;
76 _time_interpolator = NULL ;
77 _map_of_send_buffers = new map< int , SendBuffStruct * > ;
80 MPIAccessDEC::~MPIAccessDEC()
84 delete _MPI_union_group ;
86 if ( _time_interpolator )
87 delete _time_interpolator ;
89 delete _time_messages ;
92 if ( _data_messages_recv_count )
93 delete _data_messages_recv_count ;
94 if ( _data_messages_type )
95 delete _data_messages_type ;
97 delete _data_messages ;
98 if ( _map_of_send_buffers )
99 delete _map_of_send_buffers ;
102 void MPIAccessDEC::setTimeInterpolator( TimeInterpolationMethod aTimeInterp ,
103 double InterpPrecision, int nStepBefore,
106 if ( _time_interpolator )
107 delete _time_interpolator ;
108 switch ( aTimeInterp )
110 case WithoutTimeInterp :
111 _time_interpolator = NULL ;
115 case LinearTimeInterp :
116 _time_interpolator = new LinearTimeInterpolator( InterpPrecision , nStepBefore ,
118 _n_step_before = nStepBefore ;
119 _n_step_after = nStepAfter ;
121 for ( i = 0 ; i < _group_size ; i++ )
123 (*_time_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
124 (*_data_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
126 for ( j = 0 ; j < _n_step_before + _n_step_after ; j++ )
128 (*_time_messages)[ i ][ j ].time = -1 ;
129 (*_time_messages)[ i ][ j ].deltatime = -1 ;
130 (*_data_messages)[ i ][ j ] = NULL ;
138 Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
139 (Internal Protected method)
141 Returns the request identifier SendRequestId
144 int MPIAccessDEC::send( void* sendbuf, int sendcount , int offset ,
145 MPI_Datatype sendtype , int target , int &SendRequestId )
150 if ( sendtype == MPI_INT )
152 sts = _MPI_access->ISend( &((int *) sendbuf)[offset] , sendcount , sendtype ,
153 target , SendRequestId ) ;
157 sts = _MPI_access->ISend( &((double *) sendbuf)[offset] , sendcount , sendtype ,
158 target , SendRequestId ) ;
163 if ( sendtype == MPI_INT )
165 sts = _MPI_access->send( &((int *) sendbuf)[offset] , sendcount , sendtype ,
166 target , SendRequestId ) ;
170 sts = _MPI_access->send( &((double *) sendbuf)[offset] , sendcount , sendtype ,
171 target , SendRequestId ) ;
178 Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
179 (Internal Protected method)
181 Returns the request identifier RecvRequestId
184 int MPIAccessDEC::recv( void* recvbuf, int recvcount , int offset ,
185 MPI_Datatype recvtype , int target , int &RecvRequestId )
190 if ( recvtype == MPI_INT )
192 sts = _MPI_access->IRecv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
193 target , RecvRequestId ) ;
197 sts = _MPI_access->IRecv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
198 target , RecvRequestId ) ;
203 if ( recvtype == MPI_INT )
205 sts = _MPI_access->recv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
206 target , RecvRequestId ) ;
210 sts = _MPI_access->recv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
211 target , RecvRequestId ) ;
218 Send sendcount datas from sendbuf[offset] with type sendtype to target of IntraCommunicator
219 Receive recvcount datas to recvbuf[offset] with type recvtype from target of IntraCommunicator
220 (Internal Protected method)
222 Returns the request identifier SendRequestId
223 Returns the request identifier RecvRequestId
226 int MPIAccessDEC::sendRecv( void* sendbuf, int sendcount , int sendoffset ,
227 MPI_Datatype sendtype ,
228 void* recvbuf, int recvcount , int recvoffset ,
229 MPI_Datatype recvtype , int target ,
230 int &SendRequestId , int &RecvRequestId )
235 if ( sendtype == MPI_INT )
237 if ( recvtype == MPI_INT )
239 sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
240 sendtype , target , SendRequestId ,
241 &((int *) recvbuf)[recvoffset] , recvcount ,
242 recvtype , target , RecvRequestId ) ;
246 sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
247 sendtype , target , SendRequestId ,
248 &((double *) recvbuf)[recvoffset] ,
249 recvcount , recvtype , target , RecvRequestId ) ;
254 if ( recvtype == MPI_INT )
256 sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
257 sendtype , target , SendRequestId ,
258 &((int *) recvbuf)[recvoffset] ,
259 recvcount , recvtype , target , RecvRequestId ) ;
263 sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
264 sendtype , target , SendRequestId ,
265 &((double *) recvbuf)[recvoffset] ,
266 recvcount , recvtype , target , RecvRequestId ) ;
272 if ( sendtype == MPI_INT )
274 if ( recvtype == MPI_INT )
276 sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
277 sendtype , target , SendRequestId ,
278 &((int *) recvbuf)[recvoffset] , recvcount ,
279 recvtype , target , RecvRequestId ) ;
283 sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
284 sendtype , target , SendRequestId ,
285 &((double *) recvbuf)[recvoffset] ,
286 recvcount , recvtype , target , RecvRequestId ) ;
291 if ( recvtype == MPI_INT )
293 sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
294 sendtype , target , SendRequestId ,
295 &((int *) recvbuf)[recvoffset] ,
296 recvcount , recvtype , target , RecvRequestId ) ;
300 sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
301 sendtype , target , SendRequestId ,
302 &((double *) recvbuf)[recvoffset] ,
303 recvcount , recvtype , target , RecvRequestId ) ;
311 Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
312 Receive recvcount datas to recvbuf[offset] with type recvtype from all targets of IntraCommunicator
315 int MPIAccessDEC::allToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype ,
316 void* recvbuf, int recvcount, MPI_Datatype recvtype )
318 if ( _time_interpolator )
320 return allToAllTime( sendbuf, sendcount, sendtype , recvbuf, recvcount, recvtype ) ;
329 //Free of SendBuffers
333 //DoSend + DoRecv : SendRecv
334 SendBuffStruct * aSendDataStruct = NULL ;
335 if ( _asynchronous && sendbuf )
337 aSendDataStruct = new SendBuffStruct ;
338 aSendDataStruct->SendBuffer = sendbuf ;
339 aSendDataStruct->Counter = 0 ;
340 aSendDataStruct->DataType = sendtype ;
342 for ( target = 0 ; target < _group_size ; target++ )
344 sts = sendRecv( sendbuf , sendcount , sendoffset , sendtype ,
345 recvbuf , recvcount , recvoffset , recvtype ,
346 target , SendRequestId , RecvRequestId ) ;
347 if ( _asynchronous && sendbuf && sendcount )
349 aSendDataStruct->Counter += 1 ;
350 (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
352 sendoffset += sendcount ;
353 recvoffset += recvcount ;
355 if ( !_asynchronous && sendbuf )
357 if ( sendtype == MPI_INT )
359 delete [] (int *) sendbuf ;
363 delete [] (double *) sendbuf ;
370 Send sendcounts[target] datas from sendbuf[sdispls[target]] with type sendtype to all targets of IntraCommunicator
371 Receive recvcounts[target] datas to recvbuf[rdispls[target]] with type recvtype from all targets of IntraCommunicator
374 int MPIAccessDEC::allToAllv( void* sendbuf, int* sendcounts, int* sdispls,
375 MPI_Datatype sendtype ,
376 void* recvbuf, int* recvcounts, int* rdispls,
377 MPI_Datatype recvtype )
379 if ( _time_interpolator )
381 return allToAllvTime( sendbuf, sendcounts, sdispls, sendtype ,
382 recvbuf, recvcounts, rdispls, recvtype ) ;
389 //Free of SendBuffers
395 //DoSend + DoRecv : SendRecv
396 SendBuffStruct * aSendDataStruct = NULL ;
397 if ( _asynchronous && sendbuf )
399 aSendDataStruct = new SendBuffStruct ;
400 aSendDataStruct->SendBuffer = sendbuf ;
401 aSendDataStruct->Counter = 0 ;
402 aSendDataStruct->DataType = sendtype ;
404 for ( target = 0 ; target < _group_size ; target++ )
406 if ( sendcounts[target] || recvcounts[target] )
408 sts = sendRecv( sendbuf , sendcounts[target] , sdispls[target] , sendtype ,
409 recvbuf , recvcounts[target] , rdispls[target] , recvtype ,
410 target , SendRequestId , RecvRequestId ) ;
411 if ( _asynchronous && sendbuf && sendcounts[target])
413 aSendDataStruct->Counter += 1 ;
414 (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
418 if ( !_asynchronous && sendbuf )
420 if ( sendtype == MPI_INT )
422 delete [] (int *) sendbuf ;
426 delete [] (double *) sendbuf ;
433 MPIAccessDEC and the management of SendBuffers :
434 =================================================
436 . In the collective communications collectives we send only parts of
437 the same buffer to each "target". So in asynchronous mode it is
438 necessary that all parts are free before to delete/free the
441 . We assume that buffers are allocated with a new double[]. so a
444 . The structure SendBuffStruct permit to keep the adress of the buffer
445 and to manage a reference counter of that buffer. It contains
446 also MPI_Datatype for the delete [] (double *) ... when the counter
449 . The map _MapOfSendBuffers etablish the correspondance between each
450 RequestId given by a MPI_Access->ISend(...) and a SendBuffStruct
451 for each "target" of a part of the buffer.
453 . All that concerns only asynchronous Send. In synchronous mode,
454 we delete senbuf just after the Send.
458 MPIAccessDEC and the management of RecvBuffers :
459 =================================================
461 If there is no interpolation, no special action is done.
463 With interpolation for each target :
464 ------------------------------------
465 . We have _time_messages[target] which is a vector of TimesMessages.
466 We have 2 TimesMessages in our case with a linear interpolation.
467 They contain the previous time(t0)/deltatime and the last
470 . We have _data_messages[target] which is a vector of DatasMessages.
471 We have 2 DatasMessages in our case with a linear interpolation.
472 They contain the previous datas at time(t0)/deltatime and at last
475 . At time _t(t*) of current processus we do the interpolation of
476 the values of the 2 DatasMessages which are returned in the part of
477 recvbuf corresponding to the target with t0 < t* <= t1.
479 . Because of the difference of "deltatimes" between processes, we
480 may have t0 < t1 < t* and there is an extrapolation.
482 . The vectors _out_of_time, _DataMessagesRecvCount and _DataMessagesType
483 contain for each target true if t* > last t1, recvcount and
484 MPI_Datatype for the finalize of messages at the end.
488 Send a TimeMessage to all targets of IntraCommunicator
489 Receive the TimeMessages from targets of IntraCommunicator if necessary.
491 Send sendcount datas from sendbuf[offset] with type sendtype to all targets of IntraCommunicator
492 Returns recvcount datas to recvbuf[offset] with type recvtype after an interpolation
493 with datas received from all targets of IntraCommunicator.
496 int MPIAccessDEC::allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
497 void* recvbuf, int recvcount , MPI_Datatype recvtype )
502 int SendTimeRequestId ;
503 int SendDataRequestId ;
505 if ( _time_interpolator == NULL )
507 return MPI_ERR_OTHER ;
510 //Free of SendBuffers
516 //DoSend : Time + SendBuff
517 SendBuffStruct * aSendTimeStruct = NULL ;
518 SendBuffStruct * aSendDataStruct = NULL ;
519 if ( sendbuf && sendcount )
521 TimeMessage * aSendTimeMessage = new TimeMessage ;
524 aSendTimeStruct = new SendBuffStruct ;
525 aSendTimeStruct->SendBuffer = aSendTimeMessage ;
526 aSendTimeStruct->Counter = 0 ;
527 aSendTimeStruct->DataType = _MPI_access->timeType() ;
528 aSendDataStruct = new SendBuffStruct ;
529 aSendDataStruct->SendBuffer = sendbuf ;
530 aSendDataStruct->Counter = 0 ;
531 aSendDataStruct->DataType = sendtype ;
533 aSendTimeMessage->time = _t ;
534 aSendTimeMessage->deltatime = _dt ;
535 for ( target = 0 ; target < _group_size ; target++ )
537 sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
538 SendTimeRequestId ) ;
539 sts = send( sendbuf , sendcount , sendoffset , sendtype , target , SendDataRequestId ) ;
542 aSendTimeStruct->Counter += 1 ;
543 (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
544 aSendDataStruct->Counter += 1 ;
545 (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
547 sendoffset += sendcount ;
549 if ( !_asynchronous )
551 delete aSendTimeMessage ;
552 if ( sendtype == MPI_INT )
554 delete [] (int *) sendbuf ;
558 delete [] (double *) sendbuf ;
563 //CheckTime + DoRecv + DoInterp
564 if ( recvbuf && recvcount )
566 for ( target = 0 ; target < _group_size ; target++ )
568 int recvsize = recvcount*_MPI_access->extent( recvtype ) ;
569 checkTime( recvcount , recvtype , target , false ) ;
570 //===========================================================================
571 //TODO : it is assumed actually that we have only 1 timestep before nad after
572 //===========================================================================
573 if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
575 if ( (*_out_of_time)[target] )
577 cout << " =====================================================" << endl
578 << "Recv" << _my_rank << " <-- target " << target << " t0 "
579 << (*_time_messages)[target][0].time << " < t1 "
580 << (*_time_messages)[target][1].time << " < t* " << _t << endl
581 << " =====================================================" << endl ;
583 if ( recvtype == MPI_INT )
585 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
586 (*_time_messages)[target][1].time, _t, recvcount ,
587 _n_step_before, _n_step_after,
588 (int **) &(*_data_messages)[target][0],
589 (int **) &(*_data_messages)[target][1],
590 &((int *)recvbuf)[target*recvcount] ) ;
594 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
595 (*_time_messages)[target][1].time, _t, recvcount ,
596 _n_step_before, _n_step_after,
597 (double **) &(*_data_messages)[target][0],
598 (double **) &(*_data_messages)[target][1],
599 &((double *)recvbuf)[target*recvcount] ) ;
604 char * buffdest = (char *) recvbuf ;
605 char * buffsrc = (char *) (*_data_messages)[target][1] ;
606 memcpy( &buffdest[target*recvsize] , buffsrc , recvsize ) ;
614 int MPIAccessDEC::allToAllvTime( void* sendbuf, int* sendcounts, int* sdispls,
615 MPI_Datatype sendtype ,
616 void* recvbuf, int* recvcounts, int* rdispls,
617 MPI_Datatype recvtype )
621 int SendTimeRequestId ;
622 int SendDataRequestId ;
624 if ( _time_interpolator == NULL )
626 return MPI_ERR_OTHER ;
629 //Free of SendBuffers
637 + We create a TimeMessage (look at that structure in MPI_Access).
638 + If we are in asynchronous mode, we create two structures SendBuffStruct
639 aSendTimeStruct and aSendDataStruct that we fill.
640 + We fill the structure aSendTimeMessage with time/deltatime of
641 the current process. "deltatime" must be nul if it is the last step of
643 + After that for each "target", we Send the TimeMessage and the part
644 of sendbuf corresponding to that target.
645 + If we are in asynchronous mode, we increment the counter and we add
646 aSendTimeStruct and aSendDataStruct to _MapOfSendBuffers with the
647 identifiers SendTimeRequestId and SendDataRequestId returned by
648 MPI_Access->Send(...).
649 + And if we are in synchronous mode we delete the SendMessages.
651 //DoSend : Time + SendBuff
652 SendBuffStruct * aSendTimeStruct = NULL ;
653 SendBuffStruct * aSendDataStruct = NULL ;
656 TimeMessage * aSendTimeMessage = new TimeMessage ;
659 aSendTimeStruct = new SendBuffStruct ;
660 aSendTimeStruct->SendBuffer = aSendTimeMessage ;
661 aSendTimeStruct->Counter = 0 ;
662 aSendTimeStruct->DataType = _MPI_access->timeType() ;
663 aSendDataStruct = new SendBuffStruct ;
664 aSendDataStruct->SendBuffer = sendbuf ;
665 aSendDataStruct->Counter = 0 ;
666 aSendDataStruct->DataType = sendtype ;
668 aSendTimeMessage->time = _t ;
669 aSendTimeMessage->deltatime = _dt ;
670 for ( target = 0 ; target < _group_size ; target++ )
672 if ( sendcounts[target] )
674 sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
675 SendTimeRequestId ) ;
676 sts = send( sendbuf , sendcounts[target] , sdispls[target] , sendtype , target ,
677 SendDataRequestId ) ;
680 aSendTimeStruct->Counter += 1 ;
681 (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
682 aSendDataStruct->Counter += 1 ;
683 (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
687 if ( !_asynchronous )
689 delete aSendTimeMessage ;
690 if ( sendtype == MPI_INT )
692 delete [] (int *) sendbuf ;
696 delete [] (double *) sendbuf ;
702 . CheckTime + DoRecv + DoInterp
703 + For each target we call CheckTime
704 + If there is a TimeInterpolator and if the TimeMessage of the target
705 is not the first, we call the interpolator which return its
706 results in the part of the recv buffer corresponding to the "target".
707 + If not, there is a copy of received datas for that first step of time
708 in the part of the recv buffer corresponding to the "target".
710 //CheckTime + DoRecv + DoInterp
713 for ( target = 0 ; target < _group_size ; target++ )
715 if ( recvcounts[target] )
717 int recvsize = recvcounts[target]*_MPI_access->extent( recvtype ) ;
718 checkTime( recvcounts[target] , recvtype , target , false ) ;
719 //===========================================================================
720 //TODO : it is assumed actually that we have only 1 timestep before nad after
721 //===========================================================================
722 if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
724 if ( (*_out_of_time)[target] )
726 cout << " =====================================================" << endl
727 << "Recv" << _my_rank << " <-- target " << target << " t0 "
728 << (*_time_messages)[target][0].time << " < t1 "
729 << (*_time_messages)[target][1].time << " < t* " << _t << endl
730 << " =====================================================" << endl ;
732 if ( recvtype == MPI_INT )
734 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
735 (*_time_messages)[target][1].time, _t,
736 recvcounts[target] , _n_step_before, _n_step_after,
737 (int **) &(*_data_messages)[target][0],
738 (int **) &(*_data_messages)[target][1],
739 &((int *)recvbuf)[rdispls[target]] ) ;
743 _time_interpolator->doInterp( (*_time_messages)[target][0].time,
744 (*_time_messages)[target][1].time, _t,
745 recvcounts[target] , _n_step_before, _n_step_after,
746 (double **) &(*_data_messages)[target][0],
747 (double **) &(*_data_messages)[target][1],
748 &((double *)recvbuf)[rdispls[target]] ) ;
753 char * buffdest = (char *) recvbuf ;
754 char * buffsrc = (char *) (*_data_messages)[target][1] ;
755 memcpy( &buffdest[rdispls[target]*_MPI_access->extent( recvtype )] , buffsrc ,
766 . CheckTime(recvcount , recvtype , target , UntilEnd)
767 + At the beginning, we read the first TimeMessage in
768 &(*_TimeMessages)[target][1] and the first DataMessage
769 in the allocated buffer (*_DataMessages)[target][1].
770 + deltatime of TimesMessages must be nul if it is the last one.
771 + While : _t(t*) is the current time of the processus.
772 "while _t(t*) is greater than the time of the "target"
773 (*_TimeMessages)[target][1].time and
774 (*_TimeMessages)[target][1].deltatime is not nul",
775 So at the end of the while we have :
776 _t(t*) <= (*_TimeMessages)[target][1].time with
777 _t(t*) > (*_TimeMessages)[target][0].time
778 or we have the last TimeMessage of the "target".
779 + If it is the finalization of the recv of TimeMessages and
780 DataMessages (UntilEnd value is true), we execute the while
781 until (*_TimeMessages)[target][1].deltatime is nul.
783 We copy the last TimeMessage in the previoud TimeMessage and
784 we read a new TimeMessage
785 We delete the previous DataMessage.
786 We copy the last DataMessage pointer in the previous one.
787 We allocate a new last DataMessage buffer
788 (*_DataMessages)[target][1] and we read the corresponding
790 + If the current time of the current process is greater than the
791 last time (*_TimeMessages)[target][1].time du target, we give
792 a true value to (*_OutOfTime)[target].
793 (*_TimeMessages)[target][1].deltatime is nul.
795 int MPIAccessDEC::checkTime( int recvcount , MPI_Datatype recvtype , int target ,
798 int sts = MPI_SUCCESS ;
799 int RecvTimeRequestId ;
800 int RecvDataRequestId ;
801 //Pour l'instant on cherche _time_messages[target][0] < _t <= _time_messages[target][1]
802 //===========================================================================
803 //TODO : it is assumed actually that we have only 1 timestep before and after
804 // instead of _n_step_before and _n_step_after ...
805 //===========================================================================
806 (*_data_messages_recv_count)[target] = recvcount ;
807 (*_data_messages_type)[target] = recvtype ;
808 if ( (*_time_messages)[target][1].time == -1 )
810 (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
811 sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
812 target , RecvTimeRequestId ) ;
813 (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
814 if ( recvtype == MPI_INT )
816 (*_data_messages)[target][1] = new int[recvcount] ;
820 (*_data_messages)[target][1] = new double[recvcount] ;
822 sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
823 RecvDataRequestId ) ;
827 while ( ( _t > (*_time_messages)[target][1].time || UntilEnd ) &&
828 (*_time_messages)[target][1].deltatime != 0 )
830 (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
831 sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
832 target , RecvTimeRequestId ) ;
835 cout << "CheckTime" << _my_rank << " TimeMessage target " << target
836 << " RecvTimeRequestId " << RecvTimeRequestId << " MPITag "
837 << _MPI_access->recvMPITag(target) << endl ;
839 if ( recvtype == MPI_INT )
841 delete [] (int *) (*_data_messages)[target][0] ;
845 delete [] (double *) (*_data_messages)[target][0] ;
847 (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
848 if ( recvtype == MPI_INT )
850 (*_data_messages)[target][1] = new int[recvcount] ;
854 (*_data_messages)[target][1] = new double[recvcount] ;
856 sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
857 RecvDataRequestId ) ;
860 cout << "CheckTime" << _my_rank << " DataMessage target " << target
861 << " RecvDataRequestId " << RecvDataRequestId << " MPITag "
862 << _MPI_access->recvMPITag(target) << endl ;
866 if ( _t > (*_time_messages)[target][0].time &&
867 _t <= (*_time_messages)[target][1].time )
872 (*_out_of_time)[target] = true ;
880 + call SendRequestIds of MPI_Access in order to get all
881 RequestIds of SendMessages of all "targets".
882 + For each RequestId, CheckSent call "Test" of MPI_Access in order
883 to know if the buffer is "free" (flag = true). If it is the
884 FinalCheckSent (WithWait = true), we call Wait instead of Test.
885 + If the buffer is "free", the counter of the structure SendBuffStruct
886 (from _MapOfSendBuffers) is decremented.
887 + If that counter is nul we delete the TimeMessage or the
888 SendBuffer according to the DataType.
889 + And we delete the structure SendBuffStruct before the suppression
890 (erase) of that item of _MapOfSendBuffers
892 int MPIAccessDEC::checkSent(bool WithWait)
894 int sts = MPI_SUCCESS ;
895 int flag = WithWait ;
896 int size = _MPI_access->sendRequestIdsSize() ;
897 int * ArrayOfSendRequests = new int[ size ] ;
898 int nSendRequest = _MPI_access->sendRequestIds( size , ArrayOfSendRequests ) ;
899 bool SendTrace = false ;
901 for ( i = 0 ; i < nSendRequest ; i++ )
905 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
906 << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget "
907 << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag "
908 << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ;
909 sts = _MPI_access->wait( ArrayOfSendRequests[i] ) ;
913 sts = _MPI_access->test( ArrayOfSendRequests[i] , flag ) ;
917 _MPI_access->deleteRequest( ArrayOfSendRequests[i] ) ;
920 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
921 << " SendRequestId " << ArrayOfSendRequests[i]
923 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
924 << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
927 (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter -= 1 ;
930 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
931 _MPI_access->timeType() )
933 cout << "CheckTimeSent" << _my_rank << " Request " ;
937 cout << "CheckDataSent" << _my_rank << " Request " ;
939 cout << ArrayOfSendRequests[i]
940 << " _map_of_send_buffers->SendBuffer "
941 << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
942 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
945 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter == 0 )
949 cout << "CheckSent" << _my_rank << " SendRequestId " << ArrayOfSendRequests[i]
950 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
951 << " flag " << flag << " SendBuffer "
952 << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
953 << " deleted. Erase in _map_of_send_buffers :" << endl ;
955 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
956 _MPI_access->timeType() )
958 delete (TimeMessage * ) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
962 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType == MPI_INT )
964 delete [] (int *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
968 delete [] (double *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
971 delete (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ] ;
975 cout << "CheckSent" << _my_rank << " Erase in _map_of_send_buffers SendRequestId "
976 << ArrayOfSendRequests[i] << endl ;
978 (*_map_of_send_buffers).erase( ArrayOfSendRequests[i] ) ;
980 else if ( SendTrace )
982 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
983 << " SendRequestId " << ArrayOfSendRequests[i]
985 << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
986 << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
992 _MPI_access->check() ;
994 delete [] ArrayOfSendRequests ;
998 int MPIAccessDEC::checkFinalRecv()
1000 int sts = MPI_SUCCESS ;
1001 if ( _time_interpolator )
1004 for ( target = 0 ; target < _group_size ; target++ )
1006 if ( (*_data_messages)[target][0] != NULL )
1008 sts = checkTime( (*_data_messages_recv_count)[target] , (*_data_messages_type)[target] ,
1010 if ( (*_data_messages_type)[target] == MPI_INT )
1012 delete [] (int *) (*_data_messages)[target][0] ;
1016 delete [] (double *) (*_data_messages)[target][0] ;
1018 (*_data_messages)[target][0] = NULL ;
1019 if ( (*_data_messages)[target][1] != NULL )
1021 if ( (*_data_messages_type)[target] == MPI_INT )
1023 delete [] (int *) (*_data_messages)[target][1] ;
1027 delete [] (double *) (*_data_messages)[target][1] ;
1029 (*_data_messages)[target][1] = NULL ;
1037 ostream & operator<< (ostream & f ,const TimeInterpolationMethod & interpolationmethod )
1039 switch (interpolationmethod)
1041 case WithoutTimeInterp :
1042 f << " WithoutTimeInterpolation ";
1044 case LinearTimeInterp :
1045 f << " LinearTimeInterpolation ";
1048 f << " UnknownTimeInterpolation ";