]> SALOME platform Git repositories - tools/medcoupling.git/commitdiff
Salome HOME
Correction of bugs reported by GF.
authorageay <ageay>
Mon, 26 Sep 2011 12:31:05 +0000 (12:31 +0000)
committerageay <ageay>
Mon, 26 Sep 2011 12:31:05 +0000 (12:31 +0000)
src/ParaMEDMEM/DEC.cxx
src/ParaMEDMEM/DEC.hxx
src/ParaMEDMEM/DisjointDEC.cxx
src/ParaMEDMEM/DisjointDEC.hxx
src/ParaMEDMEM/ICoCoTrioField.cxx
src/ParaMEDMEM/InterpKernelDEC.cxx
src/ParaMEDMEM/MPIProcessorGroup.cxx
src/ParaMEDMEM/MPIProcessorGroup.hxx
src/ParaMEDMEM/ProcessorGroup.hxx

index d3be38c22c863d917afab33e2ead4d79790e4e01..18a699bed561766c58a12837a28dc36bb6835af5 100644 (file)
 
 namespace ParaMEDMEM
 {
+  DEC::DEC():_comm_interface(0)
+  {
+  }
+
+  void DEC::copyFrom(const DEC& other)
+  {
+    _comm_interface=other._comm_interface;
+  }
+  
   DEC::~DEC()
   {
   }
index 8550b90878876d2dda73bc0fdbf7951c9e1c7285..b6f292a9a3cf18861ab593b37713c2447b82de10 100644 (file)
@@ -30,6 +30,8 @@ namespace ParaMEDMEM
   class DEC : public DECOptions
   {
   public:
+    DEC();
+    void copyFrom(const DEC& other);
     virtual void synchronize() = 0;
     virtual void sendRecvData(bool way=true) = 0;
     virtual ~DEC();
index f4250036371a454c904294fbeb8897e164b9ebb8..81cea18e02d88939cab4e5ed7a71e5b70e893558 100644 (file)
@@ -30,6 +30,7 @@
 #include "MPIProcessorGroup.hxx"
 
 #include <cmath>
+#include <iostream>
 
 /*! \defgroup dec DEC
  *
@@ -85,6 +86,36 @@ namespace ParaMEDMEM
     _union_group = source_group.fuse(target_group);  
   }
   
+  DisjointDEC::DisjointDEC(const DisjointDEC& s):DEC(s),_local_field(0),_union_group(0),_source_group(0),_target_group(0),_owns_field(false),_owns_groups(false),_icoco_field(0)
+  {
+    copyInstance(s);
+  }
+
+  DisjointDEC & DisjointDEC::operator=(const DisjointDEC& s)
+  {
+    cleanInstance();
+    copyInstance(s);
+    return *this;
+     
+  }
+
+  void DisjointDEC::copyInstance(const DisjointDEC& other)
+  {
+    DEC::copyFrom(other);
+    if(other._target_group)
+      {
+        _target_group=other._target_group->deepCpy();
+        _owns_groups=true;
+      }
+    if(other._source_group)
+      {
+        _source_group=other._source_group->deepCpy();
+        _owns_groups=true;
+      }
+    if (_source_group && _target_group)
+      _union_group = _source_group->fuse(*_target_group);
+  }
+
   DisjointDEC::DisjointDEC(const std::set<int>& source_ids, const std::set<int>& target_ids, const MPI_Comm& world_comm):_local_field(0), 
                                                                                                                          _owns_field(false),
                                                                                                                          _owns_groups(true),
@@ -144,16 +175,30 @@ namespace ParaMEDMEM
   }
 
   DisjointDEC::~DisjointDEC()
+  {
+    cleanInstance();
+  }
+
+  void DisjointDEC::cleanInstance()
   {
     if(_owns_field)
-      delete _local_field;
+      {
+        delete _local_field;
+      }
+    _local_field=0;
+    _owns_field=false;
     if(_owns_groups)
       {
         delete _source_group;
         delete _target_group;
       }
+    _owns_groups=false;
+    _source_group=0;
+    _target_group=0;
     delete _icoco_field;
+    _icoco_field=0;
     delete _union_group;
+    _union_group=0;
   }
 
   void DisjointDEC::setNature(NatureOfField nature)
index 441c7ea2b9f05cce3cad776fcb11440763edf177..9644a9e57e421f831e2697138e740b411ba9018e 100644 (file)
@@ -40,8 +40,10 @@ namespace ParaMEDMEM
   class DisjointDEC : public DEC
   {
   public:
-    DisjointDEC():_local_field(0) { }
+    DisjointDEC():_local_field(0),_union_group(0),_source_group(0),_target_group(0),_owns_field(false),_owns_groups(false),_icoco_field(0) { }
     DisjointDEC(ProcessorGroup& source_group, ProcessorGroup& target_group);
+    DisjointDEC(const DisjointDEC&);
+    DisjointDEC &operator=(const DisjointDEC& s);
     DisjointDEC(const std::set<int>& src_ids, const std::set<int>& trg_ids,
                 const MPI_Comm& world_comm=MPI_COMM_WORLD);
     void setNature(NatureOfField nature);
@@ -66,6 +68,8 @@ namespace ParaMEDMEM
     bool isInUnion() const;
   protected:
     void compareFieldAndMethod() const throw(INTERP_KERNEL::Exception);
+    void cleanInstance();
+    void copyInstance(const DisjointDEC& other);
   protected:
     const ParaFIELD* _local_field;
     //! Processor group representing the union of target and source processors
index 02ac1fa0622cf8299df5c4e63b49eed6642e5eb9..a1b1a2a8d51a4dab61caa5f5a8241e83c9bca093 100644 (file)
@@ -47,7 +47,7 @@ TrioField::TrioField() :
   _has_field_ownership(false) { }
 
 // Copy constructor
-TrioField::TrioField(const TrioField& OtherField) {
+TrioField::TrioField(const TrioField& OtherField):_connectivity(0),_coords(0),_field(0) {
   (*this)=OtherField;
 }
 
@@ -79,7 +79,6 @@ int TrioField::nb_values() const {
   else if (_type==1)
     return _nbnodes;
   throw 0;
-  //exit(-1);
   return -1;
 }
 
index 2157ab9b0a3ddb4b3fea35619bcecccd9c6c4e89..c360aa9ac4d9fedf96d039f3768a941f898d3c84 100644 (file)
@@ -104,7 +104,7 @@ namespace ParaMEDMEM
     @{
   */
   
-  InterpKernelDEC::InterpKernelDEC()
+  InterpKernelDEC::InterpKernelDEC():_interpolation_matrix(0)
   {  
   }
 
index 5acd38614af4832b4181294b015af4773e719fa8..342e07413cf5e14f3e19d2b4dff7a354606b5b5f 100644 (file)
@@ -88,6 +88,12 @@ namespace ParaMEDMEM
 
   MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids, const MPI_Comm& world_comm):
     ProcessorGroup(interface, proc_ids),_world_comm(world_comm)
+  {
+    updateMPISpecificAttributes();
+  }
+
+
+  void MPIProcessorGroup::updateMPISpecificAttributes()
   {
     //Creation of a communicator 
     MPI_Group group_world;
@@ -98,19 +104,20 @@ namespace ParaMEDMEM
     _comm_interface.commRank(_world_comm,&rank_world);
     _comm_interface.commGroup(_world_comm, &group_world);
 
-    int* ranks=new int[proc_ids.size()];
+    int* ranks=new int[_proc_ids.size()];
    
     // copying proc_ids in ranks
-    copy<set<int>::const_iterator,int*> (proc_ids.begin(), proc_ids.end(), ranks);
-    for (int i=0; i< (int)proc_ids.size();i++)
+    copy<set<int>::const_iterator,int*> (_proc_ids.begin(), _proc_ids.end(), ranks);
+    for (int i=0; i< (int)_proc_ids.size();i++)
       if (ranks[i]>size_world-1)
         throw INTERP_KERNEL::Exception("invalid rank in set<int> argument of MPIProcessorGroup constructor");
       
-    _comm_interface.groupIncl(group_world, proc_ids.size(), ranks, &_group);
+    _comm_interface.groupIncl(group_world, _proc_ids.size(), ranks, &_group);
   
     _comm_interface.commCreate(_world_comm, _group, &_comm);
     delete[] ranks;
   }
+
   /*! Creates a processor group that is based on the processors between \a pstart and \a pend.
     This routine must be called by all processors in MPI_COMM_WORLD.
 
@@ -156,6 +163,11 @@ namespace ParaMEDMEM
     exit(1);
   }
 
+  MPIProcessorGroup::MPIProcessorGroup(const MPIProcessorGroup& other):ProcessorGroup(other),_world_comm(other._world_comm)
+  {
+    updateMPISpecificAttributes();
+  }
+
   MPIProcessorGroup::~MPIProcessorGroup()
   {
     _comm_interface.groupFree(&_group);
@@ -199,6 +211,11 @@ namespace ParaMEDMEM
     
   }
 
+  ProcessorGroup *MPIProcessorGroup::deepCpy() const
+  {
+    return new MPIProcessorGroup(*this);
+  }
+
   /*!Adding processors of group \a group to local group.
     \param group group that is to be fused with current group
     \return new group formed by the fusion of local group and \a group.
index c240edb93356654f971db31a025365a5b7ebbb6d..1783cdba9afd0a8c08256d7ee10e573bf7c5fe90 100644 (file)
@@ -36,7 +36,9 @@ namespace ParaMEDMEM
     MPIProcessorGroup(const CommInterface& interface, std::set<int> proc_ids, const MPI_Comm& world_comm=MPI_COMM_WORLD);
     MPIProcessorGroup (const ProcessorGroup& proc_group, std::set<int> proc_ids);
     MPIProcessorGroup(const CommInterface& interface,int pstart, int pend, const MPI_Comm& world_comm=MPI_COMM_WORLD);
+    MPIProcessorGroup(const MPIProcessorGroup& other);
     virtual ~MPIProcessorGroup();
+    virtual ProcessorGroup *deepCpy() const;
     virtual ProcessorGroup* fuse (const ProcessorGroup&) const;
     void intersect (ProcessorGroup&) { }
     int myRank() const;
@@ -46,6 +48,8 @@ namespace ParaMEDMEM
     ProcessorGroup* createComplementProcGroup() const;
     ProcessorGroup* createProcGroup() const;
     MPI_Comm getWorldComm() { return _world_comm; }
+  private:
+    void updateMPISpecificAttributes();
   private:
     const MPI_Comm _world_comm;
     MPI_Group _group;
index 9c157943a6aedd5aeac26dfe7c9e2dee8a8d5d42..3c233108940fb79b2a178f94b1b297f29b337ef6 100644 (file)
@@ -34,9 +34,12 @@ namespace ParaMEDMEM
     ProcessorGroup(const CommInterface& interface, std::set<int> proc_ids):
       _comm_interface(interface),_proc_ids(proc_ids) { }
     ProcessorGroup (const ProcessorGroup& proc_group, std::set<int> proc_ids):
-      _comm_interface(proc_group.getCommInterface()) { }
+      _comm_interface(proc_group.getCommInterface()),_proc_ids(proc_ids) { }
+    ProcessorGroup (const ProcessorGroup& other):
+      _comm_interface(other.getCommInterface()),_proc_ids(other._proc_ids) { }
     ProcessorGroup (const CommInterface& interface, int start, int end);
     virtual ~ProcessorGroup() { }
+    virtual ProcessorGroup *deepCpy() const = 0;
     virtual ProcessorGroup* fuse (const ProcessorGroup&) const = 0;
     virtual void intersect (ProcessorGroup&) = 0;
     bool contains(int rank) const { return _proc_ids.find(rank)!=_proc_ids.end(); }