-// Copyright (C) 2007-2014 CEA/DEN, EDF R&D
+// Copyright (C) 2007-2020 CEA/DEN, EDF R&D
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
using namespace std;
-namespace ParaMEDMEM
+namespace MEDCoupling
{
- /*! \defgroup explicitcoincidentdec ExplicitCoincidentDEC
+
+ /*!
+ \anchor ExplicitCoincidentDEC-det
+ \class ExplicitCoincidentDEC
+
+
+ This class aims at \ref interpolation "remapping fields" that have identical
+ supports (=the same underlying mesh) but different parallel topologies
+ (=different sub-domains in the mesh). It can be used to couple
+ together multi-physics codes that operate on the same domain
+ with different partitioning.
+
+ It is very similar to what the \ref StructuredCoincidentDEC-det "StructuredCoincidentDEC"
+ does, except that it works with an arbitrary user-defined topology.
+
+ The remapping between the two supports is based on identity of global
+ ids, instead of geometrical considerations (as it is the case for
+ \ref InterpKernelDEC-det "InterpKernelDEC").
+ Therefore, beware that this \ref para-dec "DEC" can not be used
+ for coincident meshes if they do *not* have the exact same numbering.
+
+ With this \ref para-dec "DEC" no projection, and no interpolation of the field data is done, contrary
+ to what happens in \ref InterpKernelDEC-det "InterpKernelDEC". It is just
+ a matter of allocating the values from one side to the other, using directly the cell
+ identifiers.
+
+ As all the other DECs, its usage requires two phases :
+ - a setup phase during which the topologies are exchanged so that
+ the target side knows from which processors it should expect
+ the data.
+ - a send/recv phase during which the field data is actually transferred.
+
+ This example illustrates the sending of a field with
+ the \c ExplicitCoincidentDEC :
+ \code
+ ...
+ ExplicitCoincidentDEC dec(groupA, groupB);
+ dec.attachLocalField(field);
+ dec.synchronize();
+ if (groupA.containsMyRank())
+ dec.recvData();
+ else if (groupB.containsMyRank())
+ dec.sendData();
+ ...
+ \endcode
+
+ Creating a ParaFIELD to be attached to the %DEC is done in exactly the same way as for
+ the other DECs, if only the partitioning of the support mesh differs.
+ In the case where the
+ fields have also different *component* topologies, creating the ParaFIELD
+ requires some more effort. See the \ref para-over "parallelism" section for more details.
+ */
+
+
+ /*! Constructor
*/
- ExplicitCoincidentDEC::ExplicitCoincidentDEC():_toposource(0),_topotarget(0)
+ ExplicitCoincidentDEC::ExplicitCoincidentDEC():
+ _toposource(0),_topotarget(0),
+ _targetgroup(0), _sourcegroup(0),
+ _sendcounts(0), _recvcounts(0),
+ _senddispls(0), _recvdispls(0),
+ _recvbuffer(0), _sendbuffer(0),
+ _distant_elems(), _explicit_mapping()
{
}
{
}
-
- /*!
- \addtogroup explicitcoincidentdec
- @{
- */
-
/*! Synchronization process for exchanging topologies
*/
void ExplicitCoincidentDEC::synchronize()
vector<int>* target_arrays=new vector<int>[target_size];
- int nb_local = _toposource-> getNbLocalElements();
+ mcIdType nb_local = _toposource-> getNbLocalElements();
- int union_size=group->size();
+ std::size_t union_size=group->size();
_sendcounts=new int[union_size];
_senddispls=new int[union_size];
_recvcounts=new int[union_size];
_recvdispls=new int[union_size];
- for (int i=0; i< union_size; i++)
+ for (std::size_t i=0; i< union_size; i++)
{
_sendcounts[i]=0;
_recvcounts[i]=0;
int* counter=new int [target_size];
counter[0]=0;
for (int i=1; i<target_size; i++)
- counter[i]=counter[i-1]+target_arrays[i-1].size();
+ counter[i]=counter[i-1]+(int)target_arrays[i-1].size();
const double* value = _local_field->getField()->getArray()->getPointer();
return;
MPIProcessorGroup* group=new MPIProcessorGroup(_topotarget->getProcGroup()->getCommInterface());
- vector < vector <int> > source_arrays(_sourcegroup->size());
- int nb_local = _topotarget-> getNbLocalElements();
- for (int ielem=0; ielem< nb_local ; ielem++)
+ vector < vector <mcIdType> > source_arrays(_sourcegroup->size());
+ mcIdType nb_local = _topotarget-> getNbLocalElements();
+ for (mcIdType ielem=0; ielem< nb_local ; ielem++)
{
- //pair<int,int> source_local =_distant_elems[ielem];
- pair <int,int> source_local=_explicit_mapping.getDistantNumbering(ielem);
- source_arrays[source_local.first].push_back(source_local.second);
+ //pair<int,mcIdType> source_local =_distant_elems[ielem];
+ pair <int,mcIdType> source_local=_explicit_mapping.getDistantNumbering(ielem);
+ source_arrays[source_local.first].push_back(source_local.second);
}
- int union_size=group->size();
+ std::size_t union_size=group->size();
_recvcounts=new int[union_size];
_recvdispls=new int[union_size];
_sendcounts=new int[union_size];
_senddispls=new int[union_size];
- for (int i=0; i< union_size; i++)
+ for (std::size_t i=0; i< union_size; i++)
{
_sendcounts[i]=0;
_recvcounts[i]=0;
{
//converts the rank in target to the rank in union communicator
int unionrank=group->translateRank(_sourcegroup,iproc);
- _recvcounts[unionrank]=source_arrays[iproc].size()*_topotarget->getNbComponents();
+ _recvcounts[unionrank]=(int)(source_arrays[iproc].size()*_topotarget->getNbComponents());
}
- for (int i=1; i<union_size; i++)
+ for (std::size_t i=1; i<union_size; i++)
_recvdispls[i]=_recvdispls[i-1]+_recvcounts[i-1];
_recvbuffer=new double[nb_local*_topotarget->getNbComponents()];
/*!
- * Synchronizing a topology so that all the
- * group possesses it.
+ * Synchronizing a topology so that all the groups get it.
*
* \param toposend Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
* \param toporecv Topology which is received.
{
MPI_Status status;
- int* serializer=0;
- int size;
+ mcIdType* serializer=0;
+ mcIdType size;
MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
int itarget=iproc;
if (!toposend->getProcGroup()->contains(itarget))
{
- _comm_interface->send(&size,1,MPI_INT, itarget,tag+itarget,*(group->getComm()));
- _comm_interface->send(serializer, size, MPI_INT, itarget, tag+itarget,*(group->getComm()));
+ _comm_interface->send(&size,1,MPI_ID_TYPE, itarget,tag+itarget,*(group->getComm()));
+ _comm_interface->send(serializer, (int)size, MPI_ID_TYPE, itarget, tag+itarget,*(group->getComm()));
}
}
}
else
{
- vector <int> size (group->size());
+ vector <int> size2(group->size());
int myworldrank=group->myRank();
for (int iproc=0; iproc<group->size();iproc++)
{
int isource = iproc;
if (!toporecv->getProcGroup()->contains(isource))
{
- int nbelem;
- _comm_interface->recv(&nbelem, 1, MPI_INT, isource, tag+myworldrank, *(group->getComm()), &status);
- int* buffer = new int[nbelem];
- _comm_interface->recv(buffer, nbelem, MPI_INT, isource,tag+myworldrank, *(group->getComm()), &status);
+ mcIdType nbelem;
+ _comm_interface->recv(&nbelem, 1, MPI_ID_TYPE, isource, tag+myworldrank, *(group->getComm()), &status);
+ mcIdType* buffer = new mcIdType[nbelem];
+ _comm_interface->recv(buffer, (int)nbelem, MPI_ID_TYPE, isource,tag+myworldrank, *(group->getComm()), &status);
ExplicitTopology* topotemp=new ExplicitTopology();
topotemp->unserialize(buffer, *_comm_interface);
delete[] buffer;
- for (int ielem=0; ielem<toporecv->getNbLocalElements(); ielem++)
+ for (mcIdType ielem=0; ielem<toporecv->getNbLocalElements(); ielem++)
{
- int global = toporecv->localToGlobal(ielem);
- int sendlocal=topotemp->globalToLocal(global);
+ mcIdType global = toporecv->localToGlobal(ielem);
+ mcIdType sendlocal=topotemp->globalToLocal(global);
if (sendlocal!=-1)
{
- size[iproc]++;
+ size2[iproc]++;
_explicit_mapping.pushBackElem(make_pair(iproc,sendlocal));
}
}
}
_comm_interface->allToAll(nb_transfer_union, 1, MPI_INT, dummy_recv, 1, MPI_INT, MPI_COMM_WORLD);
- int* sendbuffer= _explicit_mapping.serialize(_topotarget->getProcGroup()->myRank());
+ mcIdType* sendbuffer= _explicit_mapping.serialize(_topotarget->getProcGroup()->myRank());
int* sendcounts= new int [world_size];
int* senddispls = new int [world_size];
recvcounts[i]=0;
recvdispls[i]=0;
}
- _comm_interface->allToAllV(sendbuffer, sendcounts, senddispls, MPI_INT, dummyrecv, recvcounts, senddispls, MPI_INT, MPI_COMM_WORLD);
+ _comm_interface->allToAllV(sendbuffer, sendcounts, senddispls, MPI_ID_TYPE, dummyrecv, recvcounts, senddispls, MPI_ID_TYPE, MPI_COMM_WORLD);
}
//receiving in the source subdomains the mapping sent by targets
int* targetranks = new int[ nbtarget];
for (int i=0; i<nbtarget; i++)
targetranks[i]=group->translateRank(_targetgroup,i);
- int* mappingbuffer= new int [total_size*2];
+ mcIdType* mappingbuffer= new mcIdType [total_size*2];
int* sendcounts= new int [world_size];
int* senddispls = new int [world_size];
int* recvcounts=new int[world_size];
sendcounts[i]=0;
senddispls[i]=0;
}
- _comm_interface->allToAllV(dummysend, sendcounts, senddispls, MPI_INT, mappingbuffer, recvcounts, recvdispls, MPI_INT, MPI_COMM_WORLD);
+ _comm_interface->allToAllV(dummysend, sendcounts, senddispls, MPI_ID_TYPE, mappingbuffer, recvcounts, recvdispls, MPI_ID_TYPE, MPI_COMM_WORLD);
_explicit_mapping.unserialize(world_size,nb_transfer_union,nbtarget, targetranks, mappingbuffer);
}
}
_comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
_recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
cout<<"end AllToAll"<<endl;
- int nb_local = _topotarget->getNbLocalElements();
+ mcIdType nb_local = _topotarget->getNbLocalElements();
double* value=new double[nb_local*_topotarget->getNbComponents()];
vector<int> counters(_sourcegroup->size());
value[ielem*ncomp+icomp]=_recvbuffer[counters[iproc]*ncomp+icomp];
counters[iproc]++;
}
- _local_field->getField()->getArray()->useArray(value,true,CPP_DEALLOC,nb_local,_topotarget->getNbComponents());
+ _local_field->getField()->getArray()->useArray(value,true,DeallocType::CPP_DEALLOC,nb_local,_topotarget->getNbComponents());
}
void ExplicitCoincidentDEC::sendData()
_comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
_recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
}
- /*!
- @}
- */
}