nbcomp=0;
return nbcomp;
}
+
+int ComponentTopology::firstLocalComponent() const{
+ if (_proc_group==0) return 0;
+
+ int icomp;
+ int myrank = _proc_group->myRank();
+ if (myrank!=-1)
+ icomp = component_array[myrank];
+ else
+ icomp=-1;
+ return icomp;
+}
}
int nbComponents() const {return component_array[component_array.size()-1];}
//!returns the number of MED components on local processor
int nbLocalComponents() const ;
+ //!returns the number of the first MED component on local processor
+ int firstLocalComponent() const ;
//!returns the number of blocks in the topology
int nbBlocks()const {return component_array.size()-1;}
//!returns the block structure
return;
MPIProcessorGroup* group=new MPIProcessorGroup(_sourcegroup->getCommInterface());
- int myranksource = _sourcegroup->myRank();
-
// Warning : the size of the target side is implicitly deduced
//from the size of MPI_COMM_WORLD
int target_size = _toposource->getProcGroup()->getCommInterface().worldSize()- _toposource->getProcGroup()->size() ;
else
{
vector <int> size (group->size());
- int myrank=toporecv->getProcGroup()->myRank();
int myworldrank=group->myRank();
for (int iproc=0; iproc<group->size();iproc++)
{
cout<<"end AllToAll"<<endl;
int nb_local = _topotarget->getNbLocalElements();
double* value=new double[nb_local*_topotarget->getNbComponents()];
- int myranktarget=_topotarget->getProcGroup()->myRank();
+
vector<int> counters(_sourcegroup->size());
counters[0]=0;
for (int i=0; i<_sourcegroup->size()-1; i++)
0 & 0 & 0.92 & 0.05 \\
\end{tabular}
\f]
-*/
+
* \section intersectiondec_options Options
MEDMEM::MESH* mesh = _local_field->getField()->getSupport()->getMesh();
fvm_nodal_t* source_nodal = ParaMEDMEM::medmemMeshToFVMMesh(mesh);
- const ProcessorGroup* proc_group = _local_field->getTopology()->getProcGroup();
int target_size = _target_group->size() ;
int start_rank= _source_group->size();
//const MPI_Comm* comm = (dynamic_cast<const MPIProcessorGroup*> (_source_group))->getComm();
nbcells,
NULL,
coords);
- int nb_internal_points= fvm_locator_get_n_interior(_locator);
- int nb_exterior_points= fvm_locator_get_n_exterior(_locator);
}
}
using namespace MEDMEM;
+
namespace ParaMEDMEM
{
- /*! Constructing a \c ParaFIELD from a \c ParaSUPPORT and a \c ComponentTopology.
- This constructor creates an empty field based on the ParaSUPPORT description
+
+/*!
+\defgroup parafield ParaFIELD
+This class encapsulates parallel fields. It basically encapsulates
+a MEDMEM::FIELD<double> with extra information related to parallel
+topology.
+It is most conveniently created by giving a pointer to a MEDMEM::FIELD<double>
+object and a \c ProcessorGroup.
+By default, a ParaFIELD object will be constructed with all field components
+located on the same processors. In some specific cases, it might be necessary to scatter components over several processors. In this case, the constructor
+using a ComponentTopology is required.
+
+@{ */
+
+/*!
+
+\brief Constructing a \c ParaFIELD from a \c ParaSUPPORT and a \c ComponentTopology.
+
+This constructor creates an empty field based on the ParaSUPPORT description
and the partitioning of components described in \a component_topology.
It takes ownership over the \c _field object that it creates.
-
- */
+Here come the three ComponentTopology constructors :
+\verbatim
+ComponentTopology c; // one component in the field
+ComponentTopology c(6); //six components, all of them on the same processor
+ComponentTopology c(6, proc_group); // six components, evenly distributed over the processors of procgroup
+\endverbatim
+
+*/
ParaFIELD::ParaFIELD(const ParaSUPPORT* para_support, const ComponentTopology& component_topology)
:_support(para_support),
_component_topology(component_topology),
delete[] compunit;
}
-/*! Constructor creating the ParaFIELD
+/*! \brief Constructor creating the ParaFIELD
from a given FIELD and a processor group.
This constructor supposes that support underlying \a subdomain_field has no ParaSUPPORT
-attached and it therefore recreates one. It therefore takes ownership over _support.
+attached and it therefore recreates one. It therefore takes ownership over _support. The component topology associated with the field is a basic one (all components on the same processor).
*/
+
ParaFIELD::ParaFIELD(MEDMEM::FIELD<double>* subdomain_field, const ProcessorGroup& proc_group):
_field(subdomain_field),
_support(new UnstructuredParaSUPPORT(subdomain_field->getSupport(), proc_group)),
}
+ /*! writes the contents of the field in MED v2.3 file \a filename :
+- \c filename is the name of the master file
+- files containing the subdomains are named \c filename1.med, \c filename2.med, etc...
+ */
void ParaFIELD::write(MEDMEM::driverTypes driverType, const string& fileName, const string& meshName){
// Topology* topo = dynamic_cast<BlockTopology*> (_topology);
int myrank = _topology->getProcGroup()->myRank();
delete data_channel;
}
+ /*! This method retrieves the integral of component \a icomp
+ over the all domain. */
double ParaFIELD::getVolumeIntegral(int icomp) const
{
CommInterface comm_interface = _topology->getProcGroup()->getCommInterface();
throw MEDMEM::MEDEXCEPTION("interpolation is not available for this dimension");
}
}
+
+/*! @} */
}
+
{
public:
- ParaFIELD(const ParaMEDMEM::ParaSUPPORT* support, const ParaMEDMEM::ComponentTopology& component_topology);
+ ParaFIELD(const ParaSUPPORT* support, const ComponentTopology& component_topology);
ParaFIELD(MEDMEM::driverTypes driver_type, const string& file_name,
const string& driver_name, const ComponentTopology& component_topology)
delete _mesh;
}
+ const int* ParaSUPPORT::getGlobalNumbering() const
+ {
+ if (! _support->isOnAllElements())
+ throw MEDMEM::MEDEXCEPTION("GlobalNumbering can only be retrieved on supports on all elements");
+ return _mesh->getGlobalNumbering(_support->getEntity());
+ }
+
}
virtual const Topology* getTopology() const =0;
virtual const MEDMEM::SUPPORT* getSupport() const {return _support;}
virtual const ParaMESH* getMesh() const {return _mesh;}
+ virtual const int* getGlobalNumbering() const;
+
protected :
const MEDMEM::SUPPORT* _support;
#include "MPIProcessorGroup.hxx"
#include "StructuredCoincidentDEC.hxx"
+
+/*! \defgroup structuredcoincidentdec StructuredCoincidentDEC
+
+This class is meant for remapping fields that have identical
+supports with different parallel topologies. It can be used to couple
+ together multiphysics codes that operate on the same domain
+ with different partitionings, which can be useful if one of
+ the computation is much faster than the other. It can also be used
+to couple together codes that share an interface that was generated
+ in the same manner (with identical global ids).
+Also, this DEC can be used for fields that have component topologies,
+i.e., components that are scattered over several processors.
+
+The remapping between the two supports is based on identity of global
+ ids, instead of geometrical considerations as it is the case for
+ NonCoincidentDEC and IntersectionDEC. Therefore, this DEC must not be used
+for coincident meshes that do not have the same numbering.
+
+As all the other DECs, its use is made of two phases :
+- a setup phase during whih the topologies are exchanged so that
+ the target side knows from which processors it should expect
+ the data.
+- a send/recv phase during which the field data is actually transferred.
+
+This example illustrates the sending of a field with
+the DEC :
+\code
+...
+StructuredCoincidentDEC dec(groupA, groupB);
+dec.attachLocalField(field);
+dec.synchronize();
+if (groupA.containsMyRank())
+ dec.recvData();
+else if (groupB.containsMyRank())
+ dec.sendData();
+...
+\endcode
+
+Creating a ParaFIELD to be attached to the DEC is exactly the same as for
+other DECs in the case when the remapping concerns similar meshes
+that only have different partitionings. In the case when the
+ fields have also different component topologies, creating the ParaFIELD
+requires some more effort. See \ref parafield section for more details.
+*/
namespace ParaMEDMEM
{
{
}
+StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):DEC(local_group,distant_group),_toposource(0),_topotarget(0)
+{
+}
/*! Synchronization process for exchanging topologies
*/
-void StructuredCoincidentDEC::synchronize()
+void StructuredCoincidentDEC::synchronizeTopology()
{
if (_source_group->containsMyRank())
_toposource = dynamic_cast<BlockTopology*>(_local_field->getTopology());
broadcastTopology(_toposource,1000);
// Transmitting target topology to source code
broadcastTopology(_topotarget,2000);
- //checkCompatibility(_toposource,_topotarget);
+ if (_toposource->getNbElements() != _topotarget->getNbElements())
+ throw MEDEXCEPTION("Incompatible dimensions for target and source topologies");
+
}
/*! Creates the arrays necessary for the data transfer
cout <<endl;
cout<<"start AllToAll"<<endl;
+ MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
_comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
- _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
+ _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,comm);
cout<<"end AllToAll"<<endl;
int nb_local = _topotarget->getNbLocalElements();
for (int i=0; i< 4; i++)
cout << _senddispls[i]<<" ";
cout <<endl;
- //MPI_COMM_WORLD is used instead of group because there is no
- //mechanism for creating the union group yet
cout <<"start AllToAll"<<endl;
-
+ MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
_comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE,
- _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
+ _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,comm);
cout<<"end AllToAll"<<endl;
}
-
+/*! Prepares a DEC for data exchange
+
+This method broadcasts the topologies from source to target
+so that the target side can analyse from which processors it
+is expected to receive data.
+*/
+
+void StructuredCoincidentDEC::synchronize()
+{
+ if (_source_group->containsMyRank())
+ {
+ synchronizeTopology();
+ prepareSourceDE();
+ }
+ else if (_target_group->containsMyRank())
+ {
+ synchronizeTopology();
+ prepareTargetDE();
+ }
+}
}
{
public:
StructuredCoincidentDEC();
+ StructuredCoincidentDEC( ProcessorGroup& source, ProcessorGroup& target);
virtual ~StructuredCoincidentDEC();
void synchronize();
- void broadcastTopology(BlockTopology*&, int tag);
- void prepareSourceDE();
- void prepareTargetDE();
void recvData();
void sendData();
+ void prepareSourceDE();
+ void prepareTargetDE();
+
private :
-
+ void synchronizeTopology();
+ void broadcastTopology(BlockTopology*&, int tag);
+
BlockTopology* _toposource;
BlockTopology* _topotarget;
int* _sendcounts;
virtual ~UnstructuredParaSUPPORT();
const Topology* UnstructuredParaSUPPORT::getTopology() const
{return _explicit_topology;}
+
private:
const Topology* _explicit_topology;
const MED_EN::medEntityMesh _entity;