namespace ParaMEDMEM
{
+ DEC::DEC():_comm_interface(0)
+ {
+ }
+
+ void DEC::copyFrom(const DEC& other)
+ {
+ _comm_interface=other._comm_interface;
+ }
+
DEC::~DEC()
{
}
class DEC : public DECOptions
{
public:
+ DEC();
+ void copyFrom(const DEC& other);
virtual void synchronize() = 0;
virtual void sendRecvData(bool way=true) = 0;
virtual ~DEC();
#include "MPIProcessorGroup.hxx"
#include <cmath>
+#include <iostream>
/*! \defgroup dec DEC
*
_union_group = source_group.fuse(target_group);
}
+ DisjointDEC::DisjointDEC(const DisjointDEC& s):DEC(s),_local_field(0),_union_group(0),_source_group(0),_target_group(0),_owns_field(false),_owns_groups(false),_icoco_field(0)
+ {
+ copyInstance(s);
+ }
+
+ DisjointDEC & DisjointDEC::operator=(const DisjointDEC& s)
+ {
+ cleanInstance();
+ copyInstance(s);
+ return *this;
+
+ }
+
+ void DisjointDEC::copyInstance(const DisjointDEC& other)
+ {
+ DEC::copyFrom(other);
+ if(other._target_group)
+ {
+ _target_group=other._target_group->deepCpy();
+ _owns_groups=true;
+ }
+ if(other._source_group)
+ {
+ _source_group=other._source_group->deepCpy();
+ _owns_groups=true;
+ }
+ if (_source_group && _target_group)
+ _union_group = _source_group->fuse(*_target_group);
+ }
+
DisjointDEC::DisjointDEC(const std::set<int>& source_ids, const std::set<int>& target_ids, const MPI_Comm& world_comm):_local_field(0),
_owns_field(false),
_owns_groups(true),
}
DisjointDEC::~DisjointDEC()
+ {
+ cleanInstance();
+ }
+
+ void DisjointDEC::cleanInstance()
{
if(_owns_field)
- delete _local_field;
+ {
+ delete _local_field;
+ }
+ _local_field=0;
+ _owns_field=false;
if(_owns_groups)
{
delete _source_group;
delete _target_group;
}
+ _owns_groups=false;
+ _source_group=0;
+ _target_group=0;
delete _icoco_field;
+ _icoco_field=0;
delete _union_group;
+ _union_group=0;
}
void DisjointDEC::setNature(NatureOfField nature)
class DisjointDEC : public DEC
{
public:
- DisjointDEC():_local_field(0) { }
+ DisjointDEC():_local_field(0),_union_group(0),_source_group(0),_target_group(0),_owns_field(false),_owns_groups(false),_icoco_field(0) { }
DisjointDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
+ DisjointDEC(const DisjointDEC&);
+ DisjointDEC &operator=(const DisjointDEC& s);
DisjointDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids,
const MPI_Comm& world_comm=MPI_COMM_WORLD);
void setNature(NatureOfField nature);
bool isInUnion() const;
protected:
void compareFieldAndMethod() const throw(INTERP_KERNEL::Exception);
+ void cleanInstance();
+ void copyInstance(const DisjointDEC& other);
protected:
const ParaFIELD* _local_field;
//! Processor group representing the union of target and source processors
_has_field_ownership(false) { }
// Copy constructor
-TrioField::TrioField(const TrioField& OtherField) {
+TrioField::TrioField(const TrioField& OtherField):_connectivity(0),_coords(0),_field(0) {
(*this)=OtherField;
}
else if (_type==1)
return _nbnodes;
throw 0;
- //exit(-1);
return -1;
}
@{
*/
- InterpKernelDEC::InterpKernelDEC()
+ InterpKernelDEC::InterpKernelDEC():_interpolation_matrix(0)
{
}
MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids, const MPI_Comm& world_comm):
ProcessorGroup(interface, proc_ids),_world_comm(world_comm)
+ {
+ updateMPISpecificAttributes();
+ }
+
+
+ void MPIProcessorGroup::updateMPISpecificAttributes()
{
//Creation of a communicator
MPI_Group group_world;
_comm_interface.commRank(_world_comm,&rank_world);
_comm_interface.commGroup(_world_comm, &group_world);
- int* ranks=new int[proc_ids.size()];
+ int* ranks=new int[_proc_ids.size()];
// copying proc_ids in ranks
- copy<set<int>::const_iterator,int*> (proc_ids.begin(), proc_ids.end(), ranks);
- for (int i=0; i< (int)proc_ids.size();i++)
+ copy<set<int>::const_iterator,int*> (_proc_ids.begin(), _proc_ids.end(), ranks);
+ for (int i=0; i< (int)_proc_ids.size();i++)
if (ranks[i]>size_world-1)
throw INTERP_KERNEL::Exception("invalid rank in set<int> argument of MPIProcessorGroup constructor");
- _comm_interface.groupIncl(group_world, proc_ids.size(), ranks, &_group);
+ _comm_interface.groupIncl(group_world, _proc_ids.size(), ranks, &_group);
_comm_interface.commCreate(_world_comm, _group, &_comm);
delete[] ranks;
}
+
/*! Creates a processor group that is based on the processors between \a pstart and \a pend.
This routine must be called by all processors in MPI_COMM_WORLD.
exit(1);
}
+ MPIProcessorGroup::MPIProcessorGroup(const MPIProcessorGroup& other):ProcessorGroup(other),_world_comm(other._world_comm)
+ {
+ updateMPISpecificAttributes();
+ }
+
MPIProcessorGroup::~MPIProcessorGroup()
{
_comm_interface.groupFree(&_group);
}
+ ProcessorGroup *MPIProcessorGroup::deepCpy() const
+ {
+ return new MPIProcessorGroup(*this);
+ }
+
/*!Adding processors of group \a group to local group.
\param group group that is to be fused with current group
\return new group formed by the fusion of local group and \a group.
MPIProcessorGroup(const CommInterface& interface, std::set<int> proc_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD);
MPIProcessorGroup (const ProcessorGroup& proc_group, std::set<int> proc_ids);
MPIProcessorGroup(const CommInterface& interface,int pstart, int pend, const MPI_Comm& world_comm=MPI_COMM_WORLD);
+ MPIProcessorGroup(const MPIProcessorGroup& other);
virtual ~MPIProcessorGroup();
+ virtual ProcessorGroup *deepCpy() const;
virtual ProcessorGroup* fuse (const ProcessorGroup&) const;
void intersect (ProcessorGroup&) { }
int myRank() const;
ProcessorGroup* createComplementProcGroup() const;
ProcessorGroup* createProcGroup() const;
MPI_Comm getWorldComm() { return _world_comm; }
+ private:
+ void updateMPISpecificAttributes();
private:
const MPI_Comm _world_comm;
MPI_Group _group;
ProcessorGroup(const CommInterface& interface, std::set<int> proc_ids):
_comm_interface(interface),_proc_ids(proc_ids) { }
ProcessorGroup (const ProcessorGroup& proc_group, std::set<int> proc_ids):
- _comm_interface(proc_group.getCommInterface()) { }
+ _comm_interface(proc_group.getCommInterface()),_proc_ids(proc_ids) { }
+ ProcessorGroup (const ProcessorGroup& other):
+ _comm_interface(other.getCommInterface()),_proc_ids(other._proc_ids) { }
ProcessorGroup (const CommInterface& interface, int start, int end);
virtual ~ProcessorGroup() { }
+ virtual ProcessorGroup *deepCpy() const = 0;
virtual ProcessorGroup* fuse (const ProcessorGroup&) const = 0;
virtual void intersect (ProcessorGroup&) = 0;
bool contains(int rank) const { return _proc_ids.find(rank)!=_proc_ids.end(); }