From: ageay Date: Wed, 16 Feb 2011 15:39:46 +0000 (+0000) Subject: *** empty log message *** X-Git-Tag: V6_main_FINAL~1071 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=e9a0c83e0eb761930560f2b1234325181e397299;p=tools%2Fmedcoupling.git *** empty log message *** --- diff --git a/src/MEDCoupling/MEDCouplingRemapper.cxx b/src/MEDCoupling/MEDCouplingRemapper.cxx index 25e5331b4..0fc9fc3fb 100644 --- a/src/MEDCoupling/MEDCouplingRemapper.cxx +++ b/src/MEDCoupling/MEDCouplingRemapper.cxx @@ -28,7 +28,7 @@ #include "Interpolation2DCurve.hxx" #include "Interpolation2D.txx" #include "Interpolation3D.txx" -#include "Interpolation3DSurf.txx" +#include "Interpolation3DSurf.hxx" using namespace ParaMEDMEM; diff --git a/src/MEDCoupling/Test/MEDCouplingBasicsTestInterp.cxx b/src/MEDCoupling/Test/MEDCouplingBasicsTestInterp.cxx index 240452510..817756d21 100644 --- a/src/MEDCoupling/Test/MEDCouplingBasicsTestInterp.cxx +++ b/src/MEDCoupling/Test/MEDCouplingBasicsTestInterp.cxx @@ -23,7 +23,7 @@ #include "MEDCouplingFieldDouble.hxx" #include "MEDCouplingMemArray.hxx" #include "Interpolation2D.txx" -#include "Interpolation3DSurf.txx" +#include "Interpolation3DSurf.hxx" #include "Interpolation3D.txx" #include "InterpolationCC.txx" #include "InterpolationCU.txx" diff --git a/src/ParaMEDMEM/DEC.cxx b/src/ParaMEDMEM/DEC.cxx index 4003b65fc..799002254 100644 --- a/src/ParaMEDMEM/DEC.cxx +++ b/src/ParaMEDMEM/DEC.cxx @@ -31,334 +31,9 @@ #include -/*! \defgroup dec DEC - * - * \section decintroduction Introduction - * - * Interface class for creation of a link between two - * processor groups for exhanging mesh or field data. - * The DEC is defined by attaching a field on the receiving or on the - * sending side. - * On top of attaching a ParaMEDMEM::FIELD, it is possible to - * attach a ICoCo::Field. This class is an abstract class that enables - * coupling of codes that respect the ICoCo interface \ref icoco. It has two implementations: - * one for codes that express their fields as MEDCoupling fields (ICoCo::MEDField) and one - * for codes that express their fields as Trio/U fields. - * - * \section dec_options DEC Options - * Options supported by DEC objects are - * - * - * - * - *
OptionDescriptionDefault value
ForcedRenormalizationAfter receiving data, the target field is renormalized so that L2-norms of the source and target fields match. false
- - - The following code excerpt shows how to set options for an object that inherits from DEC : - - \code - InterpKernelDEC dec(source_group,target_group); - dec.setOptions("ForcedRenormalization",true); - dec.attachLocalField(field); - dec.synchronize(); - if (source_group.containsMyRank()) - dec.sendData(); - else - dec.recvData(); - \endcode -*/ - namespace ParaMEDMEM { - - - /*! \addtogroup dec - @{ - */ - DEC::DEC(ProcessorGroup& source_group, ProcessorGroup& target_group):_local_field(0), - _source_group(&source_group), - _target_group(&target_group), - _owns_field(false), - _owns_groups(false), - _icoco_field(0) - { - _union_group = source_group.fuse(target_group); - } - DEC::DEC(const std::set& source_ids, const std::set& target_ids, const MPI_Comm& world_comm):_local_field(0), - _owns_field(false), - _owns_groups(true), - _icoco_field(0) - { - ParaMEDMEM::CommInterface comm; - // Create the list of procs including source and target - std::set union_ids; // source and target ids in world_comm - union_ids.insert(source_ids.begin(),source_ids.end()); - union_ids.insert(target_ids.begin(),target_ids.end()); - int* union_ranks_world=new int[union_ids.size()]; // ranks of sources and targets in world_comm - std::copy::const_iterator,int*> (union_ids.begin(), union_ids.end(), union_ranks_world); - - // Create a communicator on these procs - MPI_Group union_group,world_group; - comm.commGroup(world_comm,&world_group); - comm.groupIncl(world_group,union_ids.size(),union_ranks_world,&union_group); - MPI_Comm union_comm; - comm.commCreate(world_comm,union_group,&union_comm); - delete[] union_ranks_world; - - if (union_comm==MPI_COMM_NULL) - { // This process is not in union - _source_group=0; - _target_group=0; - _union_group=0; - return; - } - - // Translate source_ids and target_ids from world_comm to union_comm - int* source_ranks_world=new int[source_ids.size()]; // ranks of sources in world_comm - std::copy::const_iterator,int*> (source_ids.begin(), source_ids.end(),source_ranks_world); - int* source_ranks_union=new int[source_ids.size()]; // ranks of sources in union_comm - int* target_ranks_world=new int[target_ids.size()]; // ranks of targets in world_comm - std::copy::const_iterator,int*> (target_ids.begin(), target_ids.end(),target_ranks_world); - int* target_ranks_union=new int[target_ids.size()]; // ranks of targets in union_comm - MPI_Group_translate_ranks(world_group,source_ids.size(),source_ranks_world,union_group,source_ranks_union); - MPI_Group_translate_ranks(world_group,target_ids.size(),target_ranks_world,union_group,target_ranks_union); - std::set source_ids_union; - for (int i=0;i<(int)source_ids.size();i++) - source_ids_union.insert(source_ranks_union[i]); - std::set target_ids_union; - for (int i=0;i<(int)target_ids.size();i++) - target_ids_union.insert(target_ranks_union[i]); - delete [] source_ranks_world; - delete [] source_ranks_union; - delete [] target_ranks_world; - delete [] target_ranks_union; - - // Create the MPIProcessorGroups - _source_group= new MPIProcessorGroup(comm,source_ids_union,union_comm); - _target_group = new MPIProcessorGroup(comm,target_ids_union,union_comm); - _union_group = _source_group->fuse(*_target_group); - - } - DEC::~DEC() { - if(_owns_field) - delete _local_field; - if(_owns_groups) - { - delete _source_group; - delete _target_group; - } - delete _icoco_field; - delete _union_group; - } - - void DEC::setNature(NatureOfField nature) - { - if(_local_field) - _local_field->getField()->setNature(nature); - } - - /*! Attaches a local field to a DEC. - If the processor is on the receiving end of the DEC, the field - will be updated by a recvData() call. - Reversely, if the processor is on the sending end, the field will be read, possibly transformed, and sent appropriately to the other side. - */ - void DEC::attachLocalField(const ParaFIELD* field, bool ownPt) - { - if(!isInUnion()) - return ; - if(_owns_field) - delete _local_field; - _local_field=field; - _owns_field=ownPt; - _comm_interface=&(field->getTopology()->getProcGroup()->getCommInterface()); - compareFieldAndMethod(); - } - - /*! Attaches a local field to a DEC. The method will test whether the processor - is on the source or the target side and will associate the mesh underlying the - field to the local side. - - If the processor is on the receiving end of the DEC, the field - will be updated by a recvData() call. - Reversely, if the processor is on the sending end, the field will be read, possibly transformed, - and sent appropriately to the other side. - */ - - void DEC::attachLocalField(MEDCouplingFieldDouble* field) - { - if(!isInUnion()) - return ; - ProcessorGroup* local_group; - if (_source_group->containsMyRank()) - local_group=_source_group; - else if (_target_group->containsMyRank()) - local_group=_target_group; - else - throw INTERP_KERNEL::Exception("Invalid procgroup for field attachment to DEC"); - ParaMESH *paramesh=new ParaMESH((MEDCouplingPointSet *)field->getMesh(),*local_group,field->getMesh()->getName()); - ParaFIELD *tmp=new ParaFIELD(field, paramesh, *local_group); - tmp->setOwnSupport(true); - attachLocalField(tmp,true); - //_comm_interface=&(local_group->getCommInterface()); - } - - /*! - Attaches a local field to a DEC. - If the processor is on the receiving end of the DEC, the field - will be updated by a recvData() call. - Reversely, if the processor is on the sending end, the field will be read, possibly transformed, and sent appropriately to the other side. - The field type is a generic ICoCo Field, so that the DEC can couple a number of different fields : - - a ICoCo::MEDField, that is created from a MEDCoupling structure - - a ICOCo::TrioField, that is created from tables extracted from a TRIO-U structure. - - */ - - void DEC::attachLocalField(const ICoCo::Field* field) - { - if(!isInUnion()) - return ; - const ICoCo::MEDField* medfield=dynamic_cast (field); - if(medfield !=0) - { - attachLocalField(medfield->getField()); - return; - } - const ICoCo::TrioField* triofield=dynamic_cast (field); - if (triofield !=0) - { - ProcessorGroup* localgroup; - if (_source_group->containsMyRank()) - localgroup=_source_group; - else - localgroup=_target_group; - delete _icoco_field; - - _icoco_field=new ICoCo::MEDField(*const_cast(triofield)); - attachLocalField(_icoco_field); - return; - } - throw INTERP_KERNEL::Exception("incompatible field type"); - } - - /*! - Computes the field norm over its support - on the source side and renormalizes the field on the target side - so that the norms match. - - \f[ - I_{source}=\sum_{i=1}^{n_{source}}V_{i}.|\Phi^{source}_{i}|^2, - \f] - - \f[ - I_{target}=\sum_{i=1}^{n_{target}}V_{i}.|\Phi^{target}_{i}|^2, - \f] - - \f[ - \Phi^{target}:=\Phi^{target}.\sqrt{I_{source}/I_{target}}. - \f] - - */ - void DEC::renormalizeTargetField(bool isWAbs) - { - if (_source_group->containsMyRank()) - for (int icomp=0; icomp<_local_field->getField()->getArray()->getNumberOfComponents(); icomp++) - { - double total_norm = _local_field->getVolumeIntegral(icomp+1,isWAbs); - double source_norm = total_norm; - _comm_interface->broadcast(&source_norm, 1, MPI_DOUBLE, 0,* dynamic_cast(_union_group)->getComm()); - - } - if (_target_group->containsMyRank()) - { - for (int icomp=0; icomp<_local_field->getField()->getArray()->getNumberOfComponents(); icomp++) - { - double total_norm = _local_field->getVolumeIntegral(icomp+1,isWAbs); - double source_norm=total_norm; - _comm_interface->broadcast(&source_norm, 1, MPI_DOUBLE, 0,* dynamic_cast(_union_group)->getComm()); - - if (fabs(total_norm)>1e-100) - _local_field->getField()->applyLin(source_norm/total_norm,0.0,icomp+1); - } - } - } - /*! @} */ - - bool DEC::isInSourceSide() const - { - if(!_source_group) - return false; - return _source_group->containsMyRank(); - } - - bool DEC::isInTargetSide() const - { - if(!_target_group) - return false; - return _target_group->containsMyRank(); - } - - bool DEC::isInUnion() const - { - if(!_union_group) - return false; - return _union_group->containsMyRank(); - } - - void DEC::compareFieldAndMethod() const throw(INTERP_KERNEL::Exception) - { - if (_local_field) - { - TypeOfField entity = _local_field->getField()->getTypeOfField(); - if ( getMethod() == "P0" ) - { - if ( entity != ON_CELLS ) - throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch." - " For P0 interpolation, field must be on MED_CELL's"); - } - else if ( getMethod() == "P1" ) - { - if ( entity != ON_NODES ) - throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch." - " For P1 interpolation, field must be on MED_NODE's"); - } - else if ( getMethod() == "P1d" ) - { - if ( entity != ON_CELLS ) - throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch." - " For P1d interpolation, field must be on MED_CELL's"); - if ( _target_group->containsMyRank() ) - throw INTERP_KERNEL::Exception("Projection to P1d field not supported"); - } - else - { - throw INTERP_KERNEL::Exception("Unknown interpolation method. Possible methods: P0, P1, P1d"); - } - } - } - - /*! - If way==true, source procs call sendData() and target procs call recvData(). - if way==false, it's the other way round. - */ - void DEC::sendRecvData(bool way) - { - if(!isInUnion()) - return; - if(isInSourceSide()) - { - if(way) - sendData(); - else - recvData(); - } - else if(isInTargetSide()) - { - if(way) - recvData(); - else - sendData(); - } } } diff --git a/src/ParaMEDMEM/DEC.hxx b/src/ParaMEDMEM/DEC.hxx index 3c5ddf8fb..10b8eec45 100644 --- a/src/ParaMEDMEM/DEC.hxx +++ b/src/ParaMEDMEM/DEC.hxx @@ -24,59 +24,17 @@ #include "NormalizedUnstructuredMesh.hxx" #include "DECOptions.hxx" -#include - -namespace ICoCo -{ - class Field; -} - namespace ParaMEDMEM { - class ProcessorGroup; - class ParaFIELD; class CommInterface; class DEC : public DECOptions { public: - DEC():_local_field(0) { } - DEC(ProcessorGroup& source_group, ProcessorGroup& target_group); - DEC(const std::set& src_ids, const std::set& trg_ids, - const MPI_Comm& world_comm=MPI_COMM_WORLD); - void setNature(NatureOfField nature); - void attachLocalField( MEDCouplingFieldDouble* field); - void attachLocalField(const ParaFIELD* field, bool ownPt=false); - void attachLocalField(const ICoCo::Field* field); - - virtual void prepareSourceDE() = 0; - virtual void prepareTargetDE() = 0; - virtual void recvData() = 0; - virtual void sendData() = 0; - void sendRecvData(bool way=true); virtual void synchronize() = 0; + virtual void sendRecvData(bool way=true) = 0; virtual ~DEC(); - virtual void computeProcGroup() { } - void renormalizeTargetField(bool isWAbs); - // - ProcessorGroup *getSourceGrp() const { return _source_group; } - ProcessorGroup *getTargetGrp() const { return _target_group; } - bool isInSourceSide() const; - bool isInTargetSide() const; - bool isInUnion() const; - protected: - void compareFieldAndMethod() const throw(INTERP_KERNEL::Exception); protected: - const ParaFIELD* _local_field; - //! Processor group representing the union of target and source processors - ProcessorGroup* _union_group; - ProcessorGroup* _source_group; - ProcessorGroup* _target_group; - const CommInterface* _comm_interface; - bool _owns_field; - bool _owns_groups; - private: - ICoCo::Field* _icoco_field; }; } diff --git a/src/ParaMEDMEM/DisjointDEC.cxx b/src/ParaMEDMEM/DisjointDEC.cxx new file mode 100644 index 000000000..c3a92b451 --- /dev/null +++ b/src/ParaMEDMEM/DisjointDEC.cxx @@ -0,0 +1,366 @@ +// Copyright (C) 2007-2010 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "DisjointDEC.hxx" +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" +#include "ParaMESH.hxx" +#include "ICoCoField.hxx" +#include "ICoCoMEDField.hxx" +#include "ICoCoTrioField.hxx" +#include "MPIProcessorGroup.hxx" + +#include + +/*! \defgroup dec DEC + * + * \section decintroduction Introduction + * + * Interface class for creation of a link between two + * processor groups for exhanging mesh or field data. + * The DEC is defined by attaching a field on the receiving or on the + * sending side. + * On top of attaching a ParaMEDMEM::FIELD, it is possible to + * attach a ICoCo::Field. This class is an abstract class that enables + * coupling of codes that respect the ICoCo interface \ref icoco. It has two implementations: + * one for codes that express their fields as MEDCoupling fields (ICoCo::MEDField) and one + * for codes that express their fields as Trio/U fields. + * + * \section dec_options DEC Options + * Options supported by DEC objects are + * + * + * + * + *
OptionDescriptionDefault value
ForcedRenormalizationAfter receiving data, the target field is renormalized so that L2-norms of the source and target fields match. false
+ + + The following code excerpt shows how to set options for an object that inherits from DEC : + + \code + InterpKernelDEC dec(source_group,target_group); + dec.setOptions("ForcedRenormalization",true); + dec.attachLocalField(field); + dec.synchronize(); + if (source_group.containsMyRank()) + dec.sendData(); + else + dec.recvData(); + \endcode +*/ + +namespace ParaMEDMEM +{ + + + /*! \addtogroup dec + @{ + */ + DisjointDEC::DisjointDEC(ProcessorGroup& source_group, ProcessorGroup& target_group):_local_field(0), + _source_group(&source_group), + _target_group(&target_group), + _owns_field(false), + _owns_groups(false), + _icoco_field(0) + { + _union_group = source_group.fuse(target_group); + } + + DisjointDEC::DisjointDEC(const std::set& source_ids, const std::set& target_ids, const MPI_Comm& world_comm):_local_field(0), + _owns_field(false), + _owns_groups(true), + _icoco_field(0) + { + ParaMEDMEM::CommInterface comm; + // Create the list of procs including source and target + std::set union_ids; // source and target ids in world_comm + union_ids.insert(source_ids.begin(),source_ids.end()); + union_ids.insert(target_ids.begin(),target_ids.end()); + if(union_ids.size()!=(source_ids.size()+target_ids.size())) + throw INTERP_KERNEL::Exception("DisjointDEC constructor : source_ids and target_ids overlap partially or fully. This type of DEC does not support it ! OverlapDEC class could be the solution !"); + int* union_ranks_world=new int[union_ids.size()]; // ranks of sources and targets in world_comm + std::copy(union_ids.begin(), union_ids.end(), union_ranks_world); + + // Create a communicator on these procs + MPI_Group union_group,world_group; + comm.commGroup(world_comm,&world_group); + comm.groupIncl(world_group,union_ids.size(),union_ranks_world,&union_group); + MPI_Comm union_comm; + comm.commCreate(world_comm,union_group,&union_comm); + delete[] union_ranks_world; + + if (union_comm==MPI_COMM_NULL) + { // This process is not in union + _source_group=0; + _target_group=0; + _union_group=0; + return; + } + + // Translate source_ids and target_ids from world_comm to union_comm + int* source_ranks_world=new int[source_ids.size()]; // ranks of sources in world_comm + std::copy(source_ids.begin(), source_ids.end(),source_ranks_world); + int* source_ranks_union=new int[source_ids.size()]; // ranks of sources in union_comm + int* target_ranks_world=new int[target_ids.size()]; // ranks of targets in world_comm + std::copy(target_ids.begin(), target_ids.end(),target_ranks_world); + int* target_ranks_union=new int[target_ids.size()]; // ranks of targets in union_comm + MPI_Group_translate_ranks(world_group,source_ids.size(),source_ranks_world,union_group,source_ranks_union); + MPI_Group_translate_ranks(world_group,target_ids.size(),target_ranks_world,union_group,target_ranks_union); + std::set source_ids_union; + for (int i=0;i<(int)source_ids.size();i++) + source_ids_union.insert(source_ranks_union[i]); + std::set target_ids_union; + for (int i=0;i<(int)target_ids.size();i++) + target_ids_union.insert(target_ranks_union[i]); + delete [] source_ranks_world; + delete [] source_ranks_union; + delete [] target_ranks_world; + delete [] target_ranks_union; + + // Create the MPIProcessorGroups + _source_group = new MPIProcessorGroup(comm,source_ids_union,union_comm); + _target_group = new MPIProcessorGroup(comm,target_ids_union,union_comm); + _union_group = _source_group->fuse(*_target_group); + + } + + DisjointDEC::~DisjointDEC() + { + if(_owns_field) + delete _local_field; + if(_owns_groups) + { + delete _source_group; + delete _target_group; + } + delete _icoco_field; + delete _union_group; + } + + void DisjointDEC::setNature(NatureOfField nature) + { + if(_local_field) + _local_field->getField()->setNature(nature); + } + + /*! Attaches a local field to a DEC. + If the processor is on the receiving end of the DEC, the field + will be updated by a recvData() call. + Reversely, if the processor is on the sending end, the field will be read, possibly transformed, and sent appropriately to the other side. + */ + void DisjointDEC::attachLocalField(const ParaFIELD* field, bool ownPt) + { + if(!isInUnion()) + return ; + if(_owns_field) + delete _local_field; + _local_field=field; + _owns_field=ownPt; + _comm_interface=&(field->getTopology()->getProcGroup()->getCommInterface()); + compareFieldAndMethod(); + } + + /*! Attaches a local field to a DEC. The method will test whether the processor + is on the source or the target side and will associate the mesh underlying the + field to the local side. + + If the processor is on the receiving end of the DEC, the field + will be updated by a recvData() call. + Reversely, if the processor is on the sending end, the field will be read, possibly transformed, + and sent appropriately to the other side. + */ + + void DisjointDEC::attachLocalField(MEDCouplingFieldDouble* field) + { + if(!isInUnion()) + return ; + ProcessorGroup* local_group; + if (_source_group->containsMyRank()) + local_group=_source_group; + else if (_target_group->containsMyRank()) + local_group=_target_group; + else + throw INTERP_KERNEL::Exception("Invalid procgroup for field attachment to DEC"); + ParaMESH *paramesh=new ParaMESH((MEDCouplingPointSet *)field->getMesh(),*local_group,field->getMesh()->getName()); + ParaFIELD *tmp=new ParaFIELD(field, paramesh, *local_group); + tmp->setOwnSupport(true); + attachLocalField(tmp,true); + //_comm_interface=&(local_group->getCommInterface()); + } + + /*! + Attaches a local field to a DEC. + If the processor is on the receiving end of the DEC, the field + will be updated by a recvData() call. + Reversely, if the processor is on the sending end, the field will be read, possibly transformed, and sent appropriately to the other side. + The field type is a generic ICoCo Field, so that the DEC can couple a number of different fields : + - a ICoCo::MEDField, that is created from a MEDCoupling structure + - a ICOCo::TrioField, that is created from tables extracted from a TRIO-U structure. + + */ + void DisjointDEC::attachLocalField(const ICoCo::Field* field) + { + if(!isInUnion()) + return ; + const ICoCo::MEDField* medfield=dynamic_cast (field); + if(medfield !=0) + { + attachLocalField(medfield->getField()); + return; + } + const ICoCo::TrioField* triofield=dynamic_cast (field); + if (triofield !=0) + { + ProcessorGroup* localgroup; + if (_source_group->containsMyRank()) + localgroup=_source_group; + else + localgroup=_target_group; + delete _icoco_field; + + _icoco_field=new ICoCo::MEDField(*const_cast(triofield)); + attachLocalField(_icoco_field); + return; + } + throw INTERP_KERNEL::Exception("incompatible field type"); + } + + /*! + Computes the field norm over its support + on the source side and renormalizes the field on the target side + so that the norms match. + + \f[ + I_{source}=\sum_{i=1}^{n_{source}}V_{i}.|\Phi^{source}_{i}|^2, + \f] + + \f[ + I_{target}=\sum_{i=1}^{n_{target}}V_{i}.|\Phi^{target}_{i}|^2, + \f] + + \f[ + \Phi^{target}:=\Phi^{target}.\sqrt{I_{source}/I_{target}}. + \f] + + */ + void DisjointDEC::renormalizeTargetField(bool isWAbs) + { + if (_source_group->containsMyRank()) + for (int icomp=0; icomp<_local_field->getField()->getArray()->getNumberOfComponents(); icomp++) + { + double total_norm = _local_field->getVolumeIntegral(icomp+1,isWAbs); + double source_norm = total_norm; + _comm_interface->broadcast(&source_norm, 1, MPI_DOUBLE, 0,* dynamic_cast(_union_group)->getComm()); + + } + if (_target_group->containsMyRank()) + { + for (int icomp=0; icomp<_local_field->getField()->getArray()->getNumberOfComponents(); icomp++) + { + double total_norm = _local_field->getVolumeIntegral(icomp+1,isWAbs); + double source_norm=total_norm; + _comm_interface->broadcast(&source_norm, 1, MPI_DOUBLE, 0,* dynamic_cast(_union_group)->getComm()); + + if (fabs(total_norm)>1e-100) + _local_field->getField()->applyLin(source_norm/total_norm,0.0,icomp+1); + } + } + } + /*! @} */ + + bool DisjointDEC::isInSourceSide() const + { + if(!_source_group) + return false; + return _source_group->containsMyRank(); + } + + bool DisjointDEC::isInTargetSide() const + { + if(!_target_group) + return false; + return _target_group->containsMyRank(); + } + + bool DisjointDEC::isInUnion() const + { + if(!_union_group) + return false; + return _union_group->containsMyRank(); + } + + void DisjointDEC::compareFieldAndMethod() const throw(INTERP_KERNEL::Exception) + { + if (_local_field) + { + TypeOfField entity = _local_field->getField()->getTypeOfField(); + if ( getMethod() == "P0" ) + { + if ( entity != ON_CELLS ) + throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch." + " For P0 interpolation, field must be on MED_CELL's"); + } + else if ( getMethod() == "P1" ) + { + if ( entity != ON_NODES ) + throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch." + " For P1 interpolation, field must be on MED_NODE's"); + } + else if ( getMethod() == "P1d" ) + { + if ( entity != ON_CELLS ) + throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch." + " For P1d interpolation, field must be on MED_CELL's"); + if ( _target_group->containsMyRank() ) + throw INTERP_KERNEL::Exception("Projection to P1d field not supported"); + } + else + { + throw INTERP_KERNEL::Exception("Unknown interpolation method. Possible methods: P0, P1, P1d"); + } + } + } + + /*! + If way==true, source procs call sendData() and target procs call recvData(). + if way==false, it's the other way round. + */ + void DisjointDEC::sendRecvData(bool way) + { + if(!isInUnion()) + return; + if(isInSourceSide()) + { + if(way) + sendData(); + else + recvData(); + } + else if(isInTargetSide()) + { + if(way) + recvData(); + else + sendData(); + } + } +} diff --git a/src/ParaMEDMEM/DisjointDEC.hxx b/src/ParaMEDMEM/DisjointDEC.hxx new file mode 100644 index 000000000..a7fd97c37 --- /dev/null +++ b/src/ParaMEDMEM/DisjointDEC.hxx @@ -0,0 +1,84 @@ +// Copyright (C) 2007-2010 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __DISJOINTDEC_HXX__ +#define __DISJOINTDEC_HXX__ + +#include "MEDCouplingFieldDouble.hxx" +#include "NormalizedUnstructuredMesh.hxx" +#include "DEC.hxx" + +#include +#include + +namespace ICoCo +{ + class Field; +} + +namespace ParaMEDMEM +{ + class ProcessorGroup; + class ParaFIELD; + + class DisjointDEC : public DEC + { + public: + DisjointDEC():_local_field(0) { } + DisjointDEC(ProcessorGroup& source_group, ProcessorGroup& target_group); + DisjointDEC(const std::set& src_ids, const std::set& trg_ids, + const MPI_Comm& world_comm=MPI_COMM_WORLD); + void setNature(NatureOfField nature); + void attachLocalField( MEDCouplingFieldDouble* field); + void attachLocalField(const ParaFIELD* field, bool ownPt=false); + void attachLocalField(const ICoCo::Field* field); + + virtual void prepareSourceDE() = 0; + virtual void prepareTargetDE() = 0; + virtual void recvData() = 0; + virtual void sendData() = 0; + void sendRecvData(bool way=true); + virtual void synchronize() = 0; + virtual ~DisjointDEC(); + virtual void computeProcGroup() { } + void renormalizeTargetField(bool isWAbs); + // + ProcessorGroup *getSourceGrp() const { return _source_group; } + ProcessorGroup *getTargetGrp() const { return _target_group; } + bool isInSourceSide() const; + bool isInTargetSide() const; + bool isInUnion() const; + protected: + void compareFieldAndMethod() const throw(INTERP_KERNEL::Exception); + protected: + const ParaFIELD* _local_field; + //! Processor group representing the union of target and source processors + ProcessorGroup* _union_group; + ProcessorGroup* _source_group; + ProcessorGroup* _target_group; + + const CommInterface* _comm_interface; + bool _owns_field; + bool _owns_groups; + private: + ICoCo::Field* _icoco_field; + }; +} + +#endif diff --git a/src/ParaMEDMEM/ExplicitCoincidentDEC.hxx b/src/ParaMEDMEM/ExplicitCoincidentDEC.hxx index 75ffa101b..45f6ac92c 100644 --- a/src/ParaMEDMEM/ExplicitCoincidentDEC.hxx +++ b/src/ParaMEDMEM/ExplicitCoincidentDEC.hxx @@ -20,7 +20,7 @@ #ifndef __EXPLICITCOINCIDENTDEC_HXX__ #define __EXPLICITCOINCIDENTDEC_HXX__ -#include "DEC.hxx" +#include "DisjointDEC.hxx" #include "ExplicitMapping.hxx" #include "ExplicitTopology.hxx" @@ -30,7 +30,7 @@ namespace ParaMEDMEM { class BlockTopology; - class ExplicitCoincidentDEC : public DEC + class ExplicitCoincidentDEC : public DisjointDEC { public: ExplicitCoincidentDEC(); diff --git a/src/ParaMEDMEM/InterpKernelDEC.cxx b/src/ParaMEDMEM/InterpKernelDEC.cxx index 6c4674f84..4beb32bb2 100644 --- a/src/ParaMEDMEM/InterpKernelDEC.cxx +++ b/src/ParaMEDMEM/InterpKernelDEC.cxx @@ -118,13 +118,13 @@ namespace ParaMEDMEM */ InterpKernelDEC::InterpKernelDEC(ProcessorGroup& source_group, ProcessorGroup& target_group): - DEC(source_group, target_group),_interpolation_matrix(0) + DisjointDEC(source_group, target_group),_interpolation_matrix(0) { } InterpKernelDEC::InterpKernelDEC(const std::set& src_ids, const std::set& trg_ids, - const MPI_Comm& world_comm):DEC(src_ids,trg_ids,world_comm), + const MPI_Comm& world_comm):DisjointDEC(src_ids,trg_ids,world_comm), _interpolation_matrix(0) { } diff --git a/src/ParaMEDMEM/InterpKernelDEC.hxx b/src/ParaMEDMEM/InterpKernelDEC.hxx index 7930a044d..a4f09a083 100644 --- a/src/ParaMEDMEM/InterpKernelDEC.hxx +++ b/src/ParaMEDMEM/InterpKernelDEC.hxx @@ -20,7 +20,7 @@ #ifndef __INTERPKERNELDEC_HXX__ #define __INTERPKERNELDEC_HXX__ -#include "DEC.hxx" +#include "DisjointDEC.hxx" #include "MxN_Mapping.hxx" #include "InterpolationOptions.hxx" @@ -28,7 +28,7 @@ namespace ParaMEDMEM { class InterpolationMatrix; - class InterpKernelDEC : public DEC, public INTERP_KERNEL::InterpolationOptions + class InterpKernelDEC : public DisjointDEC, public INTERP_KERNEL::InterpolationOptions { public: InterpKernelDEC(); diff --git a/src/ParaMEDMEM/InterpolationMatrix.cxx b/src/ParaMEDMEM/InterpolationMatrix.cxx index 1022280b1..a45ad45b1 100644 --- a/src/ParaMEDMEM/InterpolationMatrix.cxx +++ b/src/ParaMEDMEM/InterpolationMatrix.cxx @@ -25,9 +25,9 @@ #include "TranslationRotationMatrix.hxx" #include "Interpolation.hxx" #include "Interpolation1D.txx" -#include "Interpolation2DCurve.txx" +#include "Interpolation2DCurve.hxx" #include "Interpolation2D.txx" -#include "Interpolation3DSurf.txx" +#include "Interpolation3DSurf.hxx" #include "Interpolation3D.txx" #include "MEDCouplingUMesh.hxx" #include "MEDCouplingNormalizedUnstructuredMesh.txx" diff --git a/src/ParaMEDMEM/MPIProcessorGroup.hxx b/src/ParaMEDMEM/MPIProcessorGroup.hxx index c43a4476d..953973a57 100644 --- a/src/ParaMEDMEM/MPIProcessorGroup.hxx +++ b/src/ParaMEDMEM/MPIProcessorGroup.hxx @@ -20,12 +20,13 @@ #ifndef __MPIPROCESSORGROUP_HXX__ #define __MPIPROCESSORGROUP_HXX__ +#include "ProcessorGroup.hxx" + #include #include namespace ParaMEDMEM { - class ProcessorGroup; class CommInterface; class MPIProcessorGroup : public ProcessorGroup diff --git a/src/ParaMEDMEM/Makefile.am b/src/ParaMEDMEM/Makefile.am index 5bc571a2b..b0bd1a51f 100644 --- a/src/ParaMEDMEM/Makefile.am +++ b/src/ParaMEDMEM/Makefile.am @@ -33,13 +33,17 @@ ComponentTopology.hxx\ ExplicitTopology.hxx\ ParaFIELD.hxx\ DEC.hxx\ +DisjointDEC.hxx\ +OverlapDEC.hxx\ DECOptions.hxx\ MxN_Mapping.hxx\ StructuredCoincidentDEC.hxx\ InterpolationMatrix.hxx\ +OverlapInterpolationMatrix.hxx\ InterpKernelDEC.hxx\ ExplicitCoincidentDEC.hxx\ ElementLocator.hxx\ +OverlapElementLocator.hxx\ ExplicitMapping.hxx\ ICoCoField.hxx \ ICoCoMEDField.hxx \ @@ -56,14 +60,18 @@ ParaMESH.cxx\ ComponentTopology.cxx\ MPIAccess.cxx \ InterpolationMatrix.cxx\ +OverlapInterpolationMatrix.cxx\ StructuredCoincidentDEC.cxx\ ExplicitCoincidentDEC.cxx\ InterpKernelDEC.cxx\ ElementLocator.cxx\ +OverlapElementLocator.cxx\ MPIAccessDEC.cxx \ TimeInterpolator.cxx \ LinearTimeInterpolator.cxx\ DEC.cxx\ +DisjointDEC.cxx\ +OverlapDEC.cxx\ ExplicitTopology.cxx\ MxN_Mapping.cxx\ ICoCoMEDField.cxx\ diff --git a/src/ParaMEDMEM/MxN_Mapping.cxx b/src/ParaMEDMEM/MxN_Mapping.cxx index 179017a9a..74f83070f 100644 --- a/src/ParaMEDMEM/MxN_Mapping.cxx +++ b/src/ParaMEDMEM/MxN_Mapping.cxx @@ -27,6 +27,10 @@ using namespace std; namespace ParaMEDMEM { + MxN_Mapping::MxN_Mapping() + { + } + MxN_Mapping::MxN_Mapping(const ProcessorGroup& source_group, const ProcessorGroup& target_group,const DECOptions& dec_options) : DECOptions(dec_options),_union_group(source_group.fuse(target_group)) diff --git a/src/ParaMEDMEM/OverlapDEC.cxx b/src/ParaMEDMEM/OverlapDEC.cxx new file mode 100644 index 000000000..f5e0707a1 --- /dev/null +++ b/src/ParaMEDMEM/OverlapDEC.cxx @@ -0,0 +1,102 @@ +// Copyright (C) 2007-2010 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "OverlapDEC.hxx" +#include "CommInterface.hxx" +#include "ParaFIELD.hxx" +#include "MPIProcessorGroup.hxx" +#include "OverlapElementLocator.hxx" +#include "OverlapInterpolationMatrix.hxx" + +namespace ParaMEDMEM +{ + OverlapDEC::OverlapDEC(const std::set& procIds, const MPI_Comm& world_comm):_own_group(true),_interpolation_matrix(0), + _source_field(0),_own_source_field(false), + _target_field(0),_own_target_field(false) + { + ParaMEDMEM::CommInterface comm; + int *ranks_world=new int[procIds.size()]; // ranks of sources and targets in world_comm + std::copy(procIds.begin(),procIds.end(),ranks_world); + MPI_Group group,world_group; + comm.commGroup(world_comm,&world_group); + comm.groupIncl(world_group,procIds.size(),ranks_world,&group); + delete [] ranks_world; + MPI_Comm theComm; + comm.commCreate(world_comm,group,&theComm); + if(theComm==MPI_COMM_NULL) + { + _group=0; + return ; + } + std::set idsUnion; + for(std::size_t i=0;icontainsMyRank(); + } +} diff --git a/src/ParaMEDMEM/OverlapDEC.hxx b/src/ParaMEDMEM/OverlapDEC.hxx new file mode 100644 index 000000000..ef6f97fd1 --- /dev/null +++ b/src/ParaMEDMEM/OverlapDEC.hxx @@ -0,0 +1,57 @@ +// Copyright (C) 2007-2010 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __OVERLAPDEC_HXX__ +#define __OVERLAPDEC_HXX__ + +#include "DEC.hxx" +#include "InterpolationOptions.hxx" + +#include + +namespace ParaMEDMEM +{ + class OverlapInterpolationMatrix; + class ProcessorGroup; + class ParaFIELD; + + class OverlapDEC : public DEC, public INTERP_KERNEL::InterpolationOptions + { + public: + OverlapDEC(const std::set& procIds,const MPI_Comm& world_comm=MPI_COMM_WORLD); + virtual ~OverlapDEC(); + void sendRecvData(bool way=true); + void synchronize(); + void attachSourceLocalField(ParaFIELD *field, bool ownPt=true); + void attachTargetLocalField(ParaFIELD *field, bool ownPt=true); + ProcessorGroup *getGrp() { return _group; } + bool isInGroup() const; + private: + bool _own_group; + OverlapInterpolationMatrix* _interpolation_matrix; + ProcessorGroup *_group; + private: + ParaFIELD *_source_field; + bool _own_source_field; + ParaFIELD *_target_field; + bool _own_target_field; + }; +} + +#endif diff --git a/src/ParaMEDMEM/OverlapElementLocator.cxx b/src/ParaMEDMEM/OverlapElementLocator.cxx new file mode 100644 index 000000000..bc9244f7b --- /dev/null +++ b/src/ParaMEDMEM/OverlapElementLocator.cxx @@ -0,0 +1,783 @@ +// Copyright (C) 2007-2010 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "OverlapElementLocator.hxx" + +#include +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ParaFIELD.hxx" +#include "ParaMESH.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "MEDCouplingFieldDouble.hxx" +#include "DirectedBoundingBox.hxx" + +#include +#include +#include + +using namespace std; + +namespace ParaMEDMEM +{ + OverlapElementLocator::OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField, const ProcessorGroup& group) + : _local_source_field(sourceField), + _local_target_field(targetField), + _local_source_mesh(0), + _local_target_mesh(0), + _domain_bounding_boxes(0), + _group(group) + { + if(_local_source_field) + _local_source_mesh=_local_source_field->getSupport()->getCellMesh(); + if(_local_target_field) + _local_target_mesh=_local_target_field->getSupport()->getCellMesh(); + _comm=getCommunicator(); + _computeBoundingBoxes(); + } + + OverlapElementLocator::~OverlapElementLocator() + { + delete [] _domain_bounding_boxes; + } + + const MPI_Comm *OverlapElementLocator::getCommunicator() const + { + const MPIProcessorGroup* group=static_cast(&_group); + return group->getComm(); + } + + void OverlapElementLocator::_computeBoundingBoxes() + { + CommInterface comm_interface=_group.getCommInterface(); + const MPIProcessorGroup* group=static_cast (&_group); + _local_space_dim=0; + if(_local_source_mesh) + _local_space_dim=_local_source_mesh->getSpaceDimension(); + else + _local_space_dim=_local_target_mesh->getSpaceDimension(); + // + const MPI_Comm* comm = group->getComm(); + int bbSize=2*2*_local_space_dim;//2 (for source/target) 2 (min/max) + _domain_bounding_boxes=new double[bbSize*_group.size()]; + double * minmax=new double[bbSize]; + if(_local_source_mesh) + _local_source_mesh->getBoundingBox(minmax); + else + { + for(int i=0;i<_local_space_dim;i++) + { + minmax[i*2]=std::numeric_limits::max(); + minmax[i*2+1]=-std::numeric_limits::max(); + } + } + if(_local_target_mesh) + _local_target_mesh->getBoundingBox(minmax+2*_local_space_dim); + else + { + for(int i=0;i<_local_space_dim;i++) + { + minmax[i*2+2*_local_space_dim]=std::numeric_limits::max(); + minmax[i*2+1+2*_local_space_dim]=-std::numeric_limits::max(); + } + } + comm_interface.allGather(minmax, bbSize, MPI_DOUBLE, + _domain_bounding_boxes,bbSize, MPI_DOUBLE, + *comm); + + /*std::vector + for(int i=0;i<_group.size();i++) + for(int j=0;j<_group.size();j++) + if(_intersectsBoundingBox(i,j)) + { + _distant_proc_ids.push_back(rank); + }*/ + } + + bool OverlapElementLocator::_intersectsBoundingBox(int i, int j) const + { + // double* local_bb = _domain_bounding_boxes+_union_group->myRank()*2*_local_cell_mesh_space_dim; + // double* distant_bb = _domain_bounding_boxes+irank*2*_local_cell_mesh_space_dim; + + // for (int idim=0; idim < _local_cell_mesh_space_dim; idim++) + // { + // const double eps = 1e-12; + // bool intersects = (distant_bb[idim*2]getNature(); + } + + // ========================================================================== + // Procedure for exchanging mesh between a distant proc and a local processor + // param idistantrank proc id on distant group + // param distant_mesh on return , points to a local reconstruction of + // the distant mesh + // param distant_ids on return, contains a vector defining a correspondence + // between the distant ids and the ids of the local reconstruction + // ========================================================================== + void ElementLocator::exchangeMesh(int idistantrank, + MEDCouplingPointSet*& distant_mesh, + int*& distant_ids) + { + int rank = _union_group->translateRank(&_distant_group,idistantrank); + + if (find(_distant_proc_ids.begin(), _distant_proc_ids.end(),rank)==_distant_proc_ids.end()) + return; + + vector elems; +#ifdef USE_DIRECTED_BB + INTERP_KERNEL::DirectedBoundingBox dbb; + double* distant_bb = _domain_bounding_boxes+rank*dbb.dataSize(_local_cell_mesh_space_dim); + dbb.setData(distant_bb); + _local_cell_mesh->getCellsInBoundingBox(dbb,getBoundingBoxAdjustment(),elems); +#else + double* distant_bb = _domain_bounding_boxes+rank*2*_local_cell_mesh_space_dim; + _local_cell_mesh->getCellsInBoundingBox(distant_bb,getBoundingBoxAdjustment(),elems); +#endif + + DataArrayInt *distant_ids_send; + MEDCouplingPointSet *send_mesh = (MEDCouplingPointSet *)_local_para_field.getField()->buildSubMeshData(&elems[0],&elems[elems.size()],distant_ids_send); + _exchangeMesh(send_mesh, distant_mesh, idistantrank, distant_ids_send, distant_ids); + distant_ids_send->decrRef(); + + if(send_mesh) + send_mesh->decrRef(); + } + + void ElementLocator::exchangeMethod(const std::string& sourceMeth, int idistantrank, std::string& targetMeth) + { + CommInterface comm_interface=_union_group->getCommInterface(); + MPIProcessorGroup* group=static_cast (_union_group); + const MPI_Comm* comm=(group->getComm()); + MPI_Status status; + // it must be converted to union numbering before communication + int idistRankInUnion = group->translateRank(&_distant_group,idistantrank); + char *recv_buffer=new char[4]; + std::vector send_buffer(4); + std::copy(sourceMeth.begin(),sourceMeth.end(),send_buffer.begin()); + comm_interface.sendRecv(&send_buffer[0], 4, MPI_CHAR,idistRankInUnion, 1112, + recv_buffer, 4, MPI_CHAR,idistRankInUnion, 1112, + *comm, &status); + targetMeth=recv_buffer; + delete [] recv_buffer; + } + + + // ====================== + // Compute bounding boxes + // ====================== + + void ElementLocator::_computeBoundingBoxes() + { + CommInterface comm_interface =_union_group->getCommInterface(); + MPIProcessorGroup* group=static_cast (_union_group); + const MPI_Comm* comm = group->getComm(); + _local_cell_mesh_space_dim = -1; + if(_local_cell_mesh->getMeshDimension() != -1) + _local_cell_mesh_space_dim=_local_cell_mesh->getSpaceDimension(); + int *spaceDimForAll=new int[_union_group->size()]; + comm_interface.allGather(&_local_cell_mesh_space_dim, 1, MPI_INT, + spaceDimForAll,1, MPI_INT, + *comm); + _local_cell_mesh_space_dim=*std::max_element(spaceDimForAll,spaceDimForAll+_union_group->size()); + _is_m1d_corr=((*std::min_element(spaceDimForAll,spaceDimForAll+_union_group->size()))==-1); + for(int i=0;i<_union_group->size();i++) + if(spaceDimForAll[i]!=_local_cell_mesh_space_dim && spaceDimForAll[i]!=-1) + throw INTERP_KERNEL::Exception("Spacedim not matches !"); + delete [] spaceDimForAll; +#ifdef USE_DIRECTED_BB + INTERP_KERNEL::DirectedBoundingBox dbb; + int bbSize = dbb.dataSize(_local_cell_mesh_space_dim); + _domain_bounding_boxes = new double[bbSize*_union_group->size()]; + if(_local_cell_mesh->getMeshDimension() != -1) + dbb = INTERP_KERNEL::DirectedBoundingBox(_local_cell_mesh->getCoords()->getPointer(), + _local_cell_mesh->getNumberOfNodes(), + _local_cell_mesh_space_dim); + std::vector dbbData = dbb.getData(); + if ( dbbData.size() < bbSize ) dbbData.resize(bbSize,0); + double * minmax= &dbbData[0]; +#else + int bbSize = 2*_local_cell_mesh_space_dim; + _domain_bounding_boxes = new double[bbSize*_union_group->size()]; + double * minmax=new double [bbSize]; + if(_local_cell_mesh->getMeshDimension() != -1) + _local_cell_mesh->getBoundingBox(minmax); + else + for(int i=0;i<_local_cell_mesh_space_dim;i++) + { + minmax[i*2]=-std::numeric_limits::max(); + minmax[i*2+1]=std::numeric_limits::max(); + } +#endif + + comm_interface.allGather(minmax, bbSize, MPI_DOUBLE, + _domain_bounding_boxes,bbSize, MPI_DOUBLE, + *comm); + + for (int i=0; i< _distant_group.size(); i++) + { + int rank=_union_group->translateRank(&_distant_group,i); + + if (_intersectsBoundingBox(rank)) + { + _distant_proc_ids.push_back(rank); + } + } +#ifdef USE_DIRECTED_BB +#else + delete [] minmax; +#endif + } + + + // ============================================= + // Intersect Bounding Box (with a given "irank") + // ============================================= + bool ElementLocator::_intersectsBoundingBox(int irank) + { +#ifdef USE_DIRECTED_BB + INTERP_KERNEL::DirectedBoundingBox local_dbb, distant_dbb; + local_dbb.setData( _domain_bounding_boxes+_union_group->myRank()*local_dbb.dataSize( _local_cell_mesh_space_dim )); + distant_dbb.setData( _domain_bounding_boxes+irank*distant_dbb.dataSize( _local_cell_mesh_space_dim )); + return !local_dbb.isDisjointWith( distant_dbb ); +#else + double* local_bb = _domain_bounding_boxes+_union_group->myRank()*2*_local_cell_mesh_space_dim; + double* distant_bb = _domain_bounding_boxes+irank*2*_local_cell_mesh_space_dim; + + for (int idim=0; idim < _local_cell_mesh_space_dim; idim++) + { + const double eps = 1e-12; + bool intersects = (distant_bb[idim*2]getCommInterface(); + + // First stage : exchanging sizes + // ------------------------------ + vector tinyInfoLocalD,tinyInfoDistantD(1);//not used for the moment + vector tinyInfoLocal,tinyInfoDistant; + vector tinyInfoLocalS; + //Getting tiny info of local mesh to allow the distant proc to initialize and allocate + //the transmitted mesh. + local_mesh->getTinySerializationInformation(tinyInfoLocalD,tinyInfoLocal,tinyInfoLocalS); + tinyInfoLocal.push_back(distant_ids_send->getNumberOfTuples()); + tinyInfoDistant.resize(tinyInfoLocal.size()); + std::fill(tinyInfoDistant.begin(),tinyInfoDistant.end(),0); + MPIProcessorGroup* group=static_cast (_union_group); + const MPI_Comm* comm=group->getComm(); + MPI_Status status; + + // iproc_distant is the number of proc in distant group + // it must be converted to union numbering before communication + int iprocdistant_in_union = group->translateRank(&_distant_group, + iproc_distant); + + comm_interface.sendRecv(&tinyInfoLocal[0], tinyInfoLocal.size(), MPI_INT, iprocdistant_in_union, 1112, + &tinyInfoDistant[0], tinyInfoDistant.size(), MPI_INT,iprocdistant_in_union,1112, + *comm, &status); + DataArrayInt *v1Local=0; + DataArrayDouble *v2Local=0; + DataArrayInt *v1Distant=DataArrayInt::New(); + DataArrayDouble *v2Distant=DataArrayDouble::New(); + //serialization of local mesh to send data to distant proc. + local_mesh->serialize(v1Local,v2Local); + //Building the right instance of copy of distant mesh. + MEDCouplingPointSet *distant_mesh_tmp=MEDCouplingPointSet::BuildInstanceFromMeshType((MEDCouplingMeshType)tinyInfoDistant[0]); + std::vector unusedTinyDistantSts; + distant_mesh_tmp->resizeForUnserialization(tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts); + int nbLocalElems=0; + int nbDistElem=0; + int *ptLocal=0; + int *ptDist=0; + if(v1Local) + { + nbLocalElems=v1Local->getNbOfElems(); + ptLocal=v1Local->getPointer(); + } + if(v1Distant) + { + nbDistElem=v1Distant->getNbOfElems(); + ptDist=v1Distant->getPointer(); + } + comm_interface.sendRecv(ptLocal, nbLocalElems, MPI_INT, + iprocdistant_in_union, 1111, + ptDist, nbDistElem, MPI_INT, + iprocdistant_in_union,1111, + *comm, &status); + nbLocalElems=0; + double *ptLocal2=0; + double *ptDist2=0; + if(v2Local) + { + nbLocalElems=v2Local->getNbOfElems(); + ptLocal2=v2Local->getPointer(); + } + nbDistElem=0; + if(v2Distant) + { + nbDistElem=v2Distant->getNbOfElems(); + ptDist2=v2Distant->getPointer(); + } + comm_interface.sendRecv(ptLocal2, nbLocalElems, MPI_DOUBLE, + iprocdistant_in_union, 1112, + ptDist2, nbDistElem, MPI_DOUBLE, + iprocdistant_in_union, 1112, + *comm, &status); + // + distant_mesh=distant_mesh_tmp; + //finish unserialization + distant_mesh->unserialization(tinyInfoDistantD,tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts); + // + distant_ids_recv=new int[tinyInfoDistant.back()]; + comm_interface.sendRecv((void *)distant_ids_send->getConstPointer(),tinyInfoLocal.back(), MPI_INT, + iprocdistant_in_union, 1113, + distant_ids_recv,tinyInfoDistant.back(), MPI_INT, + iprocdistant_in_union,1113, + *comm, &status); + if(v1Local) + v1Local->decrRef(); + if(v2Local) + v2Local->decrRef(); + if(v1Distant) + v1Distant->decrRef(); + if(v2Distant) + v2Distant->decrRef(); + } + + /*! + * connected with ElementLocator::sendPolicyToWorkingSideL + */ + void ElementLocator::recvPolicyFromLazySideW(std::vector& policy) + { + policy.resize(_distant_proc_ids.size()); + int procId=0; + CommInterface comm; + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int toRecv; + comm.recv((void *)&toRecv,1,MPI_INT,*iter,1120,*_comm,&status); + policy[procId]=toRecv; + } + } + + /*! + * connected with ElementLocator::recvFromWorkingSideL + */ + void ElementLocator::sendSumToLazySideW(const std::vector< std::vector >& distantLocEltIds, const std::vector< std::vector >& partialSumRelToDistantIds) + { + int procId=0; + CommInterface comm; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& eltIds=distantLocEltIds[procId]; + const vector& valued=partialSumRelToDistantIds[procId]; + int lgth=eltIds.size(); + comm.send(&lgth,1,MPI_INT,*iter,1114,*_comm); + comm.send((void *)&eltIds[0],lgth,MPI_INT,*iter,1115,*_comm); + comm.send((void *)&valued[0],lgth,MPI_DOUBLE,*iter,1116,*_comm); + } + } + + /*! + * connected with ElementLocator::sendToWorkingSideL + */ + void ElementLocator::recvSumFromLazySideW(std::vector< std::vector >& globalSumRelToDistantIds) + { + int procId=0; + CommInterface comm; + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + std::vector& vec=globalSumRelToDistantIds[procId]; + comm.recv(&vec[0],vec.size(),MPI_DOUBLE,*iter,1117,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvLocalIdsFromWorkingSideL + */ + void ElementLocator::sendLocalIdsToLazyProcsW(const std::vector< std::vector >& distantLocEltIds) + { + int procId=0; + CommInterface comm; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& eltIds=distantLocEltIds[procId]; + int lgth=eltIds.size(); + comm.send(&lgth,1,MPI_INT,*iter,1121,*_comm); + comm.send((void *)&eltIds[0],lgth,MPI_INT,*iter,1122,*_comm); + } + } + + /*! + * connected with ElementLocator::sendGlobalIdsToWorkingSideL + */ + void ElementLocator::recvGlobalIdsFromLazyProcsW(const std::vector< std::vector >& distantLocEltIds, std::vector< std::vector >& globalIds) + { + int procId=0; + CommInterface comm; + MPI_Status status; + globalIds.resize(_distant_proc_ids.size()); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const std::vector& vec=distantLocEltIds[procId]; + std::vector& global=globalIds[procId]; + global.resize(vec.size()); + comm.recv(&global[0],vec.size(),MPI_INT,*iter,1123,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::sendCandidatesGlobalIdsToWorkingSideL + */ + void ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW(std::vector< std::vector >& globalIds) + { + int procId=0; + CommInterface comm; + MPI_Status status; + globalIds.resize(_distant_proc_ids.size()); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + std::vector& global=globalIds[procId]; + int lgth; + comm.recv(&lgth,1,MPI_INT,*iter,1132,*_comm,&status); + global.resize(lgth); + comm.recv(&global[0],lgth,MPI_INT,*iter,1133,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvSumFromWorkingSideL + */ + void ElementLocator::sendPartialSumToLazyProcsW(const std::vector& distantGlobIds, const std::vector& sum) + { + int procId=0; + CommInterface comm; + int lgth=distantGlobIds.size(); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + comm.send(&lgth,1,MPI_INT,*iter,1124,*_comm); + comm.send((void*)&distantGlobIds[0],lgth,MPI_INT,*iter,1125,*_comm); + comm.send((void*)&sum[0],lgth,MPI_DOUBLE,*iter,1126,*_comm); + } + } + + /*! + * connected with ElementLocator::recvCandidatesForAddElementsL + */ + void ElementLocator::sendCandidatesForAddElementsW(const std::vector& distantGlobIds) + { + int procId=0; + CommInterface comm; + int lgth=distantGlobIds.size(); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + comm.send(&lgth,1,MPI_INT,*iter,1128,*_comm); + comm.send((void*)&distantGlobIds[0],lgth,MPI_INT,*iter,1129,*_comm); + } + } + + /*! + * connected with ElementLocator::sendAddElementsToWorkingSideL + */ + void ElementLocator::recvAddElementsFromLazyProcsW(std::vector >& elementsToAdd) + { + int procId=0; + CommInterface comm; + MPI_Status status; + int lgth=_distant_proc_ids.size(); + elementsToAdd.resize(lgth); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int locLgth; + std::vector& eltToFeed=elementsToAdd[procId]; + comm.recv(&locLgth,1,MPI_INT,*iter,1130,*_comm,&status); + eltToFeed.resize(locLgth); + comm.recv(&eltToFeed[0],locLgth,MPI_INT,*iter,1131,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvPolicyFromLazySideW + */ + int ElementLocator::sendPolicyToWorkingSideL() + { + CommInterface comm; + int toSend; + DataArrayInt *isCumulative=_local_para_field.returnCumulativeGlobalNumbering(); + if(isCumulative) + { + toSend=CUMULATIVE_POLICY; + isCumulative->decrRef(); + } + else + toSend=NO_POST_TREATMENT_POLICY; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++) + comm.send(&toSend,1,MPI_INT,*iter,1120,*_comm); + return toSend; + } + + /*! + * connected with ElementLocator::sendSumToLazySideW + */ + void ElementLocator::recvFromWorkingSideL() + { + _values_added.resize(_local_para_field.getField()->getNumberOfTuples()); + int procId=0; + CommInterface comm; + _ids_per_working_proc.resize(_distant_proc_ids.size()); + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int lgth; + comm.recv(&lgth,1,MPI_INT,*iter,1114,*_comm,&status); + vector& ids=_ids_per_working_proc[procId]; + ids.resize(lgth); + vector values(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1115,*_comm,&status); + comm.recv(&values[0],lgth,MPI_DOUBLE,*iter,1116,*_comm,&status); + for(int i=0;i::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + vector& ids=_ids_per_working_proc[procId]; + vector valsToSend(ids.size()); + vector::iterator iter3=valsToSend.begin(); + for(vector::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter3++) + *iter3=_values_added[*iter2]; + comm.send(&valsToSend[0],ids.size(),MPI_DOUBLE,*iter,1117,*_comm); + //ids.clear(); + } + //_ids_per_working_proc.clear(); + } + + /*! + * connected with ElementLocator::sendLocalIdsToLazyProcsW + */ + void ElementLocator::recvLocalIdsFromWorkingSideL() + { + int procId=0; + CommInterface comm; + _ids_per_working_proc.resize(_distant_proc_ids.size()); + MPI_Status status; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int lgth; + vector& ids=_ids_per_working_proc[procId]; + comm.recv(&lgth,1,MPI_INT,*iter,1121,*_comm,&status); + ids.resize(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1122,*_comm,&status); + } + } + + /*! + * connected with ElementLocator::recvGlobalIdsFromLazyProcsW + */ + void ElementLocator::sendGlobalIdsToWorkingSideL() + { + int procId=0; + CommInterface comm; + DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering(); + const int *globalIdsC=globalIds->getConstPointer(); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& ids=_ids_per_working_proc[procId]; + vector valsToSend(ids.size()); + vector::iterator iter1=valsToSend.begin(); + for(vector::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++) + *iter1=globalIdsC[*iter2]; + comm.send(&valsToSend[0],ids.size(),MPI_INT,*iter,1123,*_comm); + } + if(globalIds) + globalIds->decrRef(); + } + + /*! + * connected with ElementLocator::sendPartialSumToLazyProcsW + */ + void ElementLocator::recvSumFromWorkingSideL() + { + int procId=0; + int wProcSize=_distant_proc_ids.size(); + CommInterface comm; + _ids_per_working_proc.resize(wProcSize); + _values_per_working_proc.resize(wProcSize); + MPI_Status status; + std::map sums; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + int lgth; + comm.recv(&lgth,1,MPI_INT,*iter,1124,*_comm,&status); + vector& ids=_ids_per_working_proc[procId]; + vector& vals=_values_per_working_proc[procId]; + ids.resize(lgth); + vals.resize(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1125,*_comm,&status); + comm.recv(&vals[0],lgth,MPI_DOUBLE,*iter,1126,*_comm,&status); + vector::const_iterator iter1=ids.begin(); + vector::const_iterator iter2=vals.begin(); + for(;iter1!=ids.end();iter1++,iter2++) + sums[*iter1]+=*iter2; + } + //assign sum to prepare sending to working side + for(procId=0;procId& ids=_ids_per_working_proc[procId]; + vector& vals=_values_per_working_proc[procId]; + vector::const_iterator iter1=ids.begin(); + vector::iterator iter2=vals.begin(); + for(;iter1!=ids.end();iter1++,iter2++) + *iter2=sums[*iter1]; + ids.clear(); + } + } + + /*! + * Foreach working procs Wi compute and push it in _ids_per_working_proc3, + * if it exist, local id of nodes that are in interaction with an another lazy proc than this + * and that exists in this \b but with no interaction with this. + * The computation is performed here. sendAddElementsToWorkingSideL is only in charge to send + * precomputed _ids_per_working_proc3 attribute. + * connected with ElementLocator::sendCandidatesForAddElementsW + */ + void ElementLocator::recvCandidatesForAddElementsL() + { + int procId=0; + int wProcSize=_distant_proc_ids.size(); + CommInterface comm; + _ids_per_working_proc3.resize(wProcSize); + MPI_Status status; + std::map sums; + DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering(); + const int *globalIdsC=globalIds->getConstPointer(); + int nbElts=globalIds->getNumberOfTuples(); + std::set globalIdsS(globalIdsC,globalIdsC+nbElts); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const std::vector& ids0=_ids_per_working_proc[procId]; + int lgth0=ids0.size(); + std::set elts0; + for(int i=0;i ids(lgth); + comm.recv(&ids[0],lgth,MPI_INT,*iter,1129,*_comm,&status); + set ids1(ids.begin(),ids.end()); + ids.clear(); + set tmp5,tmp6; + set_intersection(globalIdsS.begin(),globalIdsS.end(),ids1.begin(),ids1.end(),inserter(tmp5,tmp5.begin())); + set_difference(tmp5.begin(),tmp5.end(),elts0.begin(),elts0.end(),inserter(tmp6,tmp6.begin())); + std::vector& ids2=_ids_per_working_proc3[procId]; + ids2.resize(tmp6.size()); + std::copy(tmp6.begin(),tmp6.end(),ids2.begin()); + //global->local + for(std::vector::iterator iter2=ids2.begin();iter2!=ids2.end();iter2++) + *iter2=std::find(globalIdsC,globalIdsC+nbElts,*iter2)-globalIdsC; + } + if(globalIds) + globalIds->decrRef(); + } + + /*! + * connected with ElementLocator::recvAddElementsFromLazyProcsW + */ + void ElementLocator::sendAddElementsToWorkingSideL() + { + int procId=0; + CommInterface comm; + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const std::vector& vals=_ids_per_working_proc3[procId]; + int size=vals.size(); + comm.send((void *)&size,1,MPI_INT,*iter,1130,*_comm); + comm.send((void *)&vals[0],size,MPI_INT,*iter,1131,*_comm); + } + } + + /*! + * This method sends to working side Wi only nodes in interaction with Wi \b and located on boundary, to reduce number. + * connected with ElementLocator::recvCandidatesGlobalIdsFromLazyProcsW + */ + void ElementLocator::sendCandidatesGlobalIdsToWorkingSideL() + { + int procId=0; + CommInterface comm; + DataArrayInt *globalIds=_local_para_field.returnGlobalNumbering(); + const int *globalIdsC=globalIds->getConstPointer(); + std::vector candidates; + _local_para_field.getSupport()->getCellMesh()->findBoundaryNodes(candidates); + for(std::vector::iterator iter1=candidates.begin();iter1!=candidates.end();iter1++) + (*iter1)=globalIdsC[*iter1]; + std::set candidatesS(candidates.begin(),candidates.end()); + for(vector::const_iterator iter=_distant_proc_ids.begin();iter!=_distant_proc_ids.end();iter++,procId++) + { + const vector& ids=_ids_per_working_proc[procId]; + vector valsToSend(ids.size()); + vector::iterator iter1=valsToSend.begin(); + for(vector::const_iterator iter2=ids.begin();iter2!=ids.end();iter2++,iter1++) + *iter1=globalIdsC[*iter2]; + std::set tmp2(valsToSend.begin(),valsToSend.end()); + std::vector tmp3; + set_intersection(candidatesS.begin(),candidatesS.end(),tmp2.begin(),tmp2.end(),std::back_insert_iterator< std::vector >(tmp3)); + int lgth=tmp3.size(); + comm.send(&lgth,1,MPI_INT,*iter,1132,*_comm); + comm.send(&tmp3[0],lgth,MPI_INT,*iter,1133,*_comm); + } + if(globalIds) + globalIds->decrRef(); + } +#endif +} diff --git a/src/ParaMEDMEM/OverlapElementLocator.hxx b/src/ParaMEDMEM/OverlapElementLocator.hxx new file mode 100644 index 000000000..d5446e9d3 --- /dev/null +++ b/src/ParaMEDMEM/OverlapElementLocator.hxx @@ -0,0 +1,69 @@ +// Copyright (C) 2007-2010 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __OVERLAPELEMENTLOCATOR_HXX__ +#define __OVERLAPELEMENTLOCATOR_HXX__ + +#include "InterpolationOptions.hxx" +#include "MEDCouplingNatureOfField.hxx" + +#include +#include +#include + +namespace ParaMEDMEM +{ + class ParaFIELD; + class ProcessorGroup; + class ParaSUPPORT; + class InterpolationMatrix; + class MEDCouplingPointSet; + class DataArrayInt; + + class OverlapElementLocator : public INTERP_KERNEL::InterpolationOptions + { + public: + OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField, const ProcessorGroup& group); + virtual ~OverlapElementLocator(); + const MPI_Comm *getCommunicator() const; + private: + void _computeBoundingBoxes(); + bool _intersectsBoundingBox(int i, int j) const; + private: + const ParaFIELD *_local_source_field; + const ParaFIELD *_local_target_field; + int _local_space_dim; + MEDCouplingPointSet *_local_source_mesh; + MEDCouplingPointSet *_local_target_mesh; + std::vector _distant_cell_meshes; + std::vector _distant_face_meshes; + double* _domain_bounding_boxes; + const ProcessorGroup& _group; + std::vector _distant_proc_ids; + const MPI_Comm *_comm; + //Attributes only used by lazy side + //std::vector _values_added; + //std::vector< std::vector > _ids_per_working_proc; + //std::vector< std::vector > _ids_per_working_proc3; + //std::vector< std::vector > _values_per_working_proc; + }; + +} + +#endif diff --git a/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx b/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx new file mode 100644 index 000000000..3fa396c13 --- /dev/null +++ b/src/ParaMEDMEM/OverlapInterpolationMatrix.cxx @@ -0,0 +1,67 @@ +// Copyright (C) 2007-2010 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#include "OverlapInterpolationMatrix.hxx" +#include "ParaMESH.hxx" +#include "ParaFIELD.hxx" +#include "ProcessorGroup.hxx" +#include "MxN_Mapping.hxx" +#include "TranslationRotationMatrix.hxx" +#include "Interpolation.hxx" +#include "Interpolation1D.txx" +#include "Interpolation2DCurve.hxx" +#include "Interpolation2D.txx" +#include "Interpolation3DSurf.hxx" +#include "Interpolation3D.txx" +#include "MEDCouplingUMesh.hxx" +#include "MEDCouplingNormalizedUnstructuredMesh.txx" +#include "InterpolationOptions.hxx" +#include "NormalizedUnstructuredMesh.hxx" +#include "ElementLocator.hxx" + +#include + +using namespace std; + +namespace ParaMEDMEM +{ + OverlapInterpolationMatrix::OverlapInterpolationMatrix(const ParaFIELD *source_field, + const ParaFIELD *target_field, + const ProcessorGroup& group, + const DECOptions& dec_options, + const INTERP_KERNEL::InterpolationOptions& i_opt): + INTERP_KERNEL::InterpolationOptions(i_opt), + DECOptions(dec_options), + _source_field(source_field), + _target_field(target_field), + _source_support(source_field->getSupport()->getCellMesh()), + _target_support(target_field->getSupport()->getCellMesh()), + //_mapping(source_group, target_group, dec_options), + _group(group) + { + int nbelems = source_field->getField()->getNumberOfTuples(); + _row_offsets.resize(nbelems+1); + _coeffs.resize(nbelems); + _target_volume.resize(nbelems); + } + + OverlapInterpolationMatrix::~OverlapInterpolationMatrix() + { + } +} diff --git a/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx b/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx new file mode 100644 index 000000000..e75564868 --- /dev/null +++ b/src/ParaMEDMEM/OverlapInterpolationMatrix.hxx @@ -0,0 +1,106 @@ +// Copyright (C) 2007-2010 CEA/DEN, EDF R&D +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +// + +#ifndef __OVERLAPINTERPOLATIONMATRIX_HXX__ +#define __OVERLAPINTERPOLATIONMATRIX_HXX__ + +#include "MPIAccessDEC.hxx" +#include "MxN_Mapping.hxx" +#include "InterpolationOptions.hxx" +#include "DECOptions.hxx" + +namespace ParaMEDMEM +{ + class ParaFIELD; + class MEDCouplingPointSet; + + class OverlapInterpolationMatrix : public INTERP_KERNEL::InterpolationOptions, + public DECOptions + { + public: + + OverlapInterpolationMatrix(const ParaFIELD *source_field, + const ParaFIELD *target_field, + const ProcessorGroup& group, + const DECOptions& dec_opt, + const InterpolationOptions& i_opt); + + virtual ~OverlapInterpolationMatrix(); +#if 0 + void addContribution(MEDCouplingPointSet& distant_support, int iproc_distant, + const int* distant_elems, const std::string& srcMeth, const std::string& targetMeth); + void finishContributionW(ElementLocator& elementLocator); + void finishContributionL(ElementLocator& elementLocator); + void multiply(MEDCouplingFieldDouble& field) const; + void transposeMultiply(MEDCouplingFieldDouble& field)const; + void prepare(); + int getNbRows() const { return _row_offsets.size(); } + MPIAccessDEC* getAccessDEC() { return _mapping.getAccessDEC(); } + private: + void computeConservVolDenoW(ElementLocator& elementLocator); + void computeIntegralDenoW(ElementLocator& elementLocator); + void computeRevIntegralDenoW(ElementLocator& elementLocator); + void computeGlobConstraintDenoW(ElementLocator& elementLocator); + void computeConservVolDenoL(ElementLocator& elementLocator); + void computeIntegralDenoL(ElementLocator& elementLocator); + void computeRevIntegralDenoL(ElementLocator& elementLocator); + + void computeLocalColSum(std::vector& res) const; + void computeLocalRowSum(const std::vector& distantProcs, std::vector >& resPerProcI, + std::vector >& resPerProcD) const; + void computeGlobalRowSum(ElementLocator& elementLocator, std::vector >& denoStrorage, std::vector >& denoStrorageInv); + void computeGlobalColSum(std::vector >& denoStrorage); + void resizeGlobalColSum(std::vector >& denoStrorage); + void fillDSFromVM(int iproc_distant, const int* distant_elems, const std::vector< std::map >& values, MEDCouplingFieldDouble *surf); + void serializeMe(std::vector< std::vector< std::map > >& data1, std::vector& data2) const; + void initialize(); + void findAdditionnalElements(ElementLocator& elementLocator, std::vector >& elementsToAdd, + const std::vector >& resPerProcI, const std::vector >& globalIdsPartial); + void addGhostElements(const std::vector& distantProcs, const std::vector >& elementsToAdd); + int mergePolicies(const std::vector& policyPartial); + void mergeRowSum(const std::vector< std::vector >& rowsPartialSumD, const std::vector< std::vector >& globalIdsPartial, + std::vector& globalIdsLazySideInteraction, std::vector& sumCorresponding); + void mergeRowSum2(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD, + const std::vector& globalIdsLazySideInteraction, const std::vector& sumCorresponding); + void mergeRowSum3(const std::vector< std::vector >& globalIdsPartial, std::vector< std::vector >& rowsPartialSumD); + void mergeCoeffs(const std::vector& procsInInteraction, const std::vector< std::vector >& rowsPartialSumI, + const std::vector >& globalIdsPartial, std::vector >& denoStrorageInv); + void divideByGlobalRowSum(const std::vector& distantProcs, const std::vector >& resPerProcI, + const std::vector >& resPerProcD, std::vector >& deno); + private: + bool isSurfaceComputationNeeded(const std::string& method) const; +#endif + private: + const ParaMEDMEM::ParaFIELD *_source_field; + const ParaMEDMEM::ParaFIELD *_target_field; + std::vector _row_offsets; + std::map, int > _col_offsets; + MEDCouplingPointSet *_source_support; + MEDCouplingPointSet *_target_support; + MxN_Mapping _mapping; + + const ProcessorGroup& _group; + std::vector< std::vector > _target_volume; + std::vector > > _coeffs; + std::vector > _deno_multiply; + std::vector > _deno_reverse_multiply; + }; +} + +#endif diff --git a/src/ParaMEDMEM/ParaFIELD.cxx b/src/ParaMEDMEM/ParaFIELD.cxx index e0f0ba085..854e824ea 100644 --- a/src/ParaMEDMEM/ParaFIELD.cxx +++ b/src/ParaMEDMEM/ParaFIELD.cxx @@ -128,7 +128,7 @@ namespace ParaMEDMEM void ParaFIELD::synchronizeTarget(ParaFIELD* source_field) { - DEC* data_channel; + DisjointDEC* data_channel; if (dynamic_cast(_topology)!=0) { data_channel=new StructuredCoincidentDEC; @@ -147,7 +147,7 @@ namespace ParaMEDMEM void ParaFIELD::synchronizeSource(ParaFIELD* target_field) { - DEC* data_channel; + DisjointDEC* data_channel; if (dynamic_cast(_topology)!=0) { data_channel=new StructuredCoincidentDEC; diff --git a/src/ParaMEDMEM/StructuredCoincidentDEC.cxx b/src/ParaMEDMEM/StructuredCoincidentDEC.cxx index 1f787b881..bd9368225 100644 --- a/src/ParaMEDMEM/StructuredCoincidentDEC.cxx +++ b/src/ParaMEDMEM/StructuredCoincidentDEC.cxx @@ -105,7 +105,7 @@ namespace ParaMEDMEM \addtogroup structuredcoincidentdec @{ */ - StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):DEC(local_group,distant_group), + StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):DisjointDEC(local_group,distant_group), _topo_source(0),_topo_target(0), _send_counts(0),_recv_counts(0), _send_displs(0),_recv_displs(0), diff --git a/src/ParaMEDMEM/StructuredCoincidentDEC.hxx b/src/ParaMEDMEM/StructuredCoincidentDEC.hxx index f25227194..0a1073b35 100644 --- a/src/ParaMEDMEM/StructuredCoincidentDEC.hxx +++ b/src/ParaMEDMEM/StructuredCoincidentDEC.hxx @@ -20,7 +20,7 @@ #ifndef __STRUCTUREDCOINCIDENTDEC_HXX__ #define __STRUCTUREDCOINCIDENTDEC_HXX__ -#include "DEC.hxx" +#include "DisjointDEC.hxx" #include "BlockTopology.hxx" @@ -28,7 +28,7 @@ namespace ParaMEDMEM { class DEC; class BlockTopology; - class StructuredCoincidentDEC : public DEC + class StructuredCoincidentDEC : public DisjointDEC { public: StructuredCoincidentDEC(); diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx new file mode 100644 index 000000000..6f520d023 --- /dev/null +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx @@ -0,0 +1,172 @@ +void ParaMEDMEMTest::testOverlapDEC() +{ + std::string srcM(srcMeth); + std::string targetM(targetMeth); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + if (size != 3) return ; + + int nproc = 3; + set procs; + + for (int i=0; isetMeshDimension(2); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(5,2); + std::copy(coordsS,coordsS+10,myCoords->getPointer()); + meshS->setCoords(myCoords); + myCoords->decrRef(); + int connS[7]={0,3,4,1, 1,4,2}; + meshS->allocateCells(2); + meshS->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,connS); + meshS->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connS+4); + meshS->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + parameshS=new ParaMESH(meshS,*dec.getGrp(),"source mesh"); + parafieldS=new ParaFIELD(ON_CELLS,NO_TIME,parameshS,comptopo); + parafieldS->getField()->setNature(ConservativeVolumic); + double *vals=parafieldS->getField()->getArray()->getPointer(); + vals[0]=7.; vals[1]=8.; + // + meshT=MEDCouplingUMesh::New(); + meshT->setMeshDimension(2); + myCoords=DataArrayDouble::New(); + myCoords->alloc(3,2); + std::copy(coordsT,coordsT+6,myCoords->getPointer()); + meshT->setCoords(myCoords); + myCoords->decrRef(); + int connT[3]={0,2,1}; + meshT->allocateCells(1); + meshT->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connT); + meshT->finishInsertingCells(); + parameshT=new ParaMESH(meshT,*dec.getGrp(),"target mesh"); + parafieldT=new ParaFIELD(ON_CELLS,NO_TIME,parameshT,comptopo); + parafieldT->getField()->setNature(ConservativeVolumic); + } + // + if(rank==1) + { + const double coordsS[10]={1.,0.,0.5,0.5,1.,0.5,0.5,1.,1.,1.}; + const double coordsT[6]={0.,0.,0.5,0.5,0.,1.}; + meshS=MEDCouplingUMesh::New(); + meshS->setMeshDimension(2); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(5,2); + std::copy(coordsS,coordsS+10,myCoords->getPointer()); + meshS->setCoords(myCoords); + myCoords->decrRef(); + int connS[7]={0,1,2, 1,3,4,2}; + meshS->allocateCells(2); + meshS->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connS); + meshS->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,connS+3); + meshS->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + parameshS=new ParaMESH(meshS,*dec.getGrp(),"source mesh"); + parafieldS=new ParaFIELD(ON_CELLS,NO_TIME,parameshS,comptopo); + parafieldS->getField()->setNature(ConservativeVolumic); + double *vals=parafieldS->getField()->getArray()->getPointer(); + vals[0]=9.; vals[1]=11.; + // + meshT=MEDCouplingUMesh::New(); + meshT->setMeshDimension(2); + myCoords=DataArrayDouble::New(); + myCoords->alloc(3,2); + std::copy(coordsT,coordsT+6,myCoords->getPointer()); + meshT->setCoords(myCoords); + myCoords->decrRef(); + int connT[3]={0,2,1}; + meshT->allocateCells(1); + meshT->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connT); + meshT->finishInsertingCells(); + parameshT=new ParaMESH(meshT,*dec.getGrp(),"target mesh"); + parafieldT=new ParaFIELD(ON_CELLS,NO_TIME,parameshT,comptopo); + parafieldT->getField()->setNature(ConservativeVolumic); + } + // + if(rank==2) + { + const double coordsS[8]={0.,0.5, 0.5,0.5, 0.,1., 0.5,1.}; + const double coordsT[6]={0.5,0.5,0.,1.,1.,1.}; + meshS=MEDCouplingUMesh::New(); + meshS->setMeshDimension(2); + DataArrayDouble *myCoords=DataArrayDouble::New(); + myCoords->alloc(4,2); + std::copy(coordsS,coordsS+8,myCoords->getPointer()); + meshS->setCoords(myCoords); + myCoords->decrRef(); + int connS[4]={0,2,3,1}; + meshS->allocateCells(1); + meshS->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,connS); + meshS->finishInsertingCells(); + ParaMEDMEM::ComponentTopology comptopo; + parameshS=new ParaMESH(meshS,*dec.getGrp(),"source mesh"); + parafieldS=new ParaFIELD(ON_CELLS,NO_TIME,parameshS,comptopo); + parafieldS->getField()->setNature(ConservativeVolumic); + double *vals=parafieldS->getField()->getArray()->getPointer(); + vals[0]=10.; + // + meshT=MEDCouplingUMesh::New(); + meshT->setMeshDimension(2); + myCoords=DataArrayDouble::New(); + myCoords->alloc(3,2); + std::copy(coordsT,coordsT+6,myCoords->getPointer()); + meshT->setCoords(myCoords); + myCoords->decrRef(); + int connT[3]={0,1,2}; + meshT->allocateCells(1); + meshT->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connT); + meshT->finishInsertingCells(); + parameshT=new ParaMESH(meshT,*dec.getGrp(),"target mesh"); + parafieldT=new ParaFIELD(ON_CELLS,NO_TIME,parameshT,comptopo); + parafieldT->getField()->setNature(ConservativeVolumic); + } + dec.attachSourceLocalField(parafieldS); + dec.attachTargetLocalField(parafieldT); + dec.synchronize(); + dec.sendRecvData(true); + // + if(rank==0) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(9.75,parafieldT->getField()->getArray()->getIJ(0,0)); + } + if(rank==1) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(8.5,parafieldT->getField()->getArray()->getIJ(0,0)); + } + if(rank==2) + { + CPPUNIT_ASSERT_DOUBLES_EQUAL(10.5,parafieldT->getField()->getArray()->getIJ(0,0)); + } + + delete parafieldS; + delete parafieldT; + delete parameshS; + delete parameshT; + meshS->decrRef(); + meshT->decrRef(); + + MPI_Barrier(MPI_COMM_WORLD); +} +