-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+// Copyright (C) 2007-2019 CEA/DEN, EDF R&D
//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
//
#include "MPIAccessDEC.hxx"
+#include <cstring>
+
using namespace std;
-namespace ParaMEDMEM
+namespace MEDCoupling
{
/*!
{
procs.insert(i) ;
}
- MPIProcessorGroup *mpilg = (MPIProcessorGroup *)&source_group;
- _MPI_union_group = new ParaMEDMEM::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm());
+ MPIProcessorGroup *mpilg = static_cast<MPIProcessorGroup *>(const_cast<ProcessorGroup *>(&source_group));
+ _MPI_union_group = new MEDCoupling::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm());
delete union_group ;
_my_rank = _MPI_union_group->myRank() ;
_group_size = _MPI_union_group->size() ;
_data_messages->resize( _group_size ) ;
_time_interpolator = NULL ;
_map_of_send_buffers = new map< int , SendBuffStruct * > ;
- cout << "MPIAccessDEC" << _my_rank << " Asynchronous " << _asynchronous << endl ;
}
MPIAccessDEC::~MPIAccessDEC()
double InterpPrecision, int nStepBefore,
int nStepAfter )
{
- cout << "MPIAccessDEC::SetTimeInterpolator" << _my_rank << " Asynchronous "
- << _asynchronous << " TimeInterpolationMethod " << aTimeInterp
- << " InterpPrecision " << InterpPrecision << " nStepBefore " << nStepBefore
- << " nStepAfter " << nStepAfter << endl ;
if ( _time_interpolator )
delete _time_interpolator ;
switch ( aTimeInterp )
}
else
{
- cout << "SendRecv" << _my_rank << " target " << target << " sendbuf "
- << &((double *) sendbuf)[sendoffset] << " sendcount " << sendcount
- << " recvbuf " << &((double *) recvbuf)[recvoffset] << " recvcount "
- << recvcount << endl ;
sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
sendtype , target , SendRequestId ,
&((double *) recvbuf)[recvoffset] ,
. We assume that buffers are allocated with a new double[]. so a
delete [] is done.
- . The structure SendBuffStruct permit to keep the adress of the buffer
+ . The structure SendBuffStruct permit to keep the address of the buffer
and to manage a reference counter of that buffer. It contains
also MPI_Datatype for the delete [] (double *) ... when the counter
is null.
- . The map _MapOfSendBuffers etablish the correspondance between each
+ . The map _MapOfSendBuffers establish the correspondence between each
RequestId given by a MPI_Access->ISend(...) and a SendBuffStruct
for each "target" of a part of the buffer.
int recvsize = recvcount*_MPI_access->extent( recvtype ) ;
checkTime( recvcount , recvtype , target , false ) ;
//===========================================================================
- //TODO : it is assumed actually that we have only 1 timestep before nad after
+ //TODO : it is assumed actually that we have only 1 timestep before and after
//===========================================================================
if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
{
{
if ( WithWait )
{
- cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
- << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget "
- << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag "
- << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ;
+ if (SendTrace)
+ {
+ cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
+ << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget "
+ << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag "
+ << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ;
+ }
sts = _MPI_access->wait( ArrayOfSendRequests[i] ) ;
}
else