]> SALOME platform Git repositories - tools/medcoupling.git/commitdiff
Salome HOME
creating ParaMEDMEM
authorvbd <vbd>
Wed, 24 Jan 2007 11:07:54 +0000 (11:07 +0000)
committervbd <vbd>
Wed, 24 Jan 2007 11:07:54 +0000 (11:07 +0000)
42 files changed:
src/ParaMEDMEM/BlockTopology.cxx [new file with mode: 0644]
src/ParaMEDMEM/BlockTopology.hxx [new file with mode: 0644]
src/ParaMEDMEM/CommInterface.cxx [new file with mode: 0644]
src/ParaMEDMEM/CommInterface.hxx [new file with mode: 0644]
src/ParaMEDMEM/ComponentTopology.cxx [new file with mode: 0644]
src/ParaMEDMEM/ComponentTopology.hxx [new file with mode: 0644]
src/ParaMEDMEM/DEC.cxx [new file with mode: 0644]
src/ParaMEDMEM/DEC.hxx [new file with mode: 0644]
src/ParaMEDMEM/ExplicitCoincidentDEC.cxx [new file with mode: 0644]
src/ParaMEDMEM/ExplicitCoincidentDEC.hxx [new file with mode: 0644]
src/ParaMEDMEM/ExplicitTopology.cxx [new file with mode: 0644]
src/ParaMEDMEM/ExplicitTopology.hxx [new file with mode: 0644]
src/ParaMEDMEM/MPIProcessorGroup.cxx [new file with mode: 0644]
src/ParaMEDMEM/MPIProcessorGroup.hxx [new file with mode: 0644]
src/ParaMEDMEM/Makefile.in [new file with mode: 0644]
src/ParaMEDMEM/ParaFIELD.cxx [new file with mode: 0644]
src/ParaMEDMEM/ParaFIELD.hxx [new file with mode: 0644]
src/ParaMEDMEM/ParaGRID.cxx [new file with mode: 0644]
src/ParaMEDMEM/ParaGRID.hxx [new file with mode: 0644]
src/ParaMEDMEM/ParaMESH.cxx [new file with mode: 0644]
src/ParaMEDMEM/ParaMESH.hxx [new file with mode: 0644]
src/ParaMEDMEM/ParaMESHCollection.cxx [new file with mode: 0644]
src/ParaMEDMEM/ParaMESHCollection.hxx [new file with mode: 0644]
src/ParaMEDMEM/ParaSUPPORT.cxx [new file with mode: 0644]
src/ParaMEDMEM/ParaSUPPORT.hxx [new file with mode: 0644]
src/ParaMEDMEM/ProcessorGroup.cxx [new file with mode: 0644]
src/ParaMEDMEM/ProcessorGroup.hxx [new file with mode: 0644]
src/ParaMEDMEM/StructuredCoincidentDEC.cxx [new file with mode: 0644]
src/ParaMEDMEM/StructuredCoincidentDEC.hxx [new file with mode: 0644]
src/ParaMEDMEM/StructuredParaSUPPORT.cxx [new file with mode: 0644]
src/ParaMEDMEM/StructuredParaSUPPORT.hxx [new file with mode: 0644]
src/ParaMEDMEM/Topology.cxx [new file with mode: 0644]
src/ParaMEDMEM/Topology.hxx [new file with mode: 0644]
src/ParaMEDMEM/UnstructuredParaSUPPORT.cxx [new file with mode: 0644]
src/ParaMEDMEM/UnstructuredParaSUPPORT.hxx [new file with mode: 0644]
src/ParaMEDMEM/test_BlockTopology.cxx [new file with mode: 0644]
src/ParaMEDMEM/test_DEC.cxx [new file with mode: 0644]
src/ParaMEDMEM/test_ExplicitDEC.cxx [new file with mode: 0644]
src/ParaMEDMEM/test_ParaField.cxx [new file with mode: 0644]
src/ParaMEDMEM/test_ParaStructuredSupport.cxx [new file with mode: 0644]
src/ParaMEDMEM/test_ProcessorGroup.cxx [new file with mode: 0644]
src/ParaMEDMEM/test_UnstructuredDEC.cxx [new file with mode: 0644]

diff --git a/src/ParaMEDMEM/BlockTopology.cxx b/src/ParaMEDMEM/BlockTopology.cxx
new file mode 100644 (file)
index 0000000..0469ed4
--- /dev/null
@@ -0,0 +1,236 @@
+#include "MEDMEM_Grid.hxx"
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "ComponentTopology.hxx"
+
+#include <vector>
+#include <algorithm>
+
+using namespace std;
+using namespace MEDMEM;
+namespace ParaMEDMEM
+{
+
+/*!
+ * Constructor of a block topology from a grid. 
+ * This preliminary version simply splits along the first axis
+ * instead of making the best choice with respect to the 
+ * values of the different axes. 
+ */
+BlockTopology::BlockTopology(const ProcessorGroup& group, const GRID& grid):
+_proc_group(&group), _dimension(grid.getSpaceDimension())  
+{
+       vector <int> axis_length(_dimension);
+       
+       // idim + 1, because MEDMEM numbering of the axis start at one.
+       _nb_elems=1;
+       for (int idim=0; idim <_dimension; idim++)
+       {
+               axis_length[idim]=grid.getArrayLength(idim+1);
+               _nb_elems*=axis_length[idim];
+       }       
+       //default splitting along 1st dimension
+       _local_array_indices.resize(_dimension);
+       _nb_procs_per_dim.resize(_dimension);
+       
+       _local_array_indices[0].resize(_proc_group->size()+1);
+       _local_array_indices[0][0]=0;
+       _nb_procs_per_dim[0]=_proc_group->size();
+       
+       for (int i=1; i<=_proc_group->size(); i++)
+               {
+                       _local_array_indices[0][i]=_local_array_indices[0][i-1]+
+                                       axis_length[0]/_proc_group->size();
+                       if (i<= axis_length[0]%_proc_group->size())
+                               _local_array_indices[0][i]+=1;
+               }
+       for (int i=1; i<_dimension; i++)
+               {
+                       _local_array_indices[i].resize(2);
+                       _local_array_indices[i][0]=0;
+                       _local_array_indices[i][1]=axis_length[i];
+                       _nb_procs_per_dim[i]=1;
+               }
+       _cycle_type.resize(_dimension);
+       for (int i=0; i<_dimension; i++)
+               _cycle_type[i]=ParaMEDMEM::Block;
+               
+               
+}
+
+/*!
+ * Creation of a block topology by composing 
+ * a geometrical topology and a component topology.
+ * This constructor is intended for creating fields 
+ * for which the parallel distribution is made on the
+ * components of the field rather than on the geometrical 
+ * partitioning of the underlying mesh.
+ * 
+ */ 
+BlockTopology::BlockTopology(const BlockTopology& geom_topo, const ComponentTopology& comp_topo)
+{
+       // so far, the block topology can only be created if the proc group 
+       // is either on geom_topo or on comp_topo
+       if (geom_topo.getProcGroup()->size()>1 && comp_topo.nbBlocks()>1)
+               throw MEDEXCEPTION(LOCALIZED(
+               "BlockTopology cannot yet be constructed with both complex geo and components topology"));
+       
+       if (comp_topo.nbComponents()==1)
+       {
+               *this=geom_topo;
+               return;
+       }
+       else
+       {
+               _dimension = geom_topo.getDimension()+1;
+               if (comp_topo.nbBlocks()>1)
+                       _proc_group=comp_topo.getProcGroup();
+               else
+                       _proc_group=geom_topo.getProcGroup();
+               _local_array_indices=geom_topo._local_array_indices;
+               vector<int> comp_indices = *(comp_topo.getBlockIndices());
+               _local_array_indices.push_back(comp_indices);
+               _nb_procs_per_dim=geom_topo._nb_procs_per_dim;
+               _nb_procs_per_dim.push_back(comp_topo.nbBlocks());
+               _cycle_type=geom_topo._cycle_type;
+               _cycle_type.push_back(Block);
+               _nb_elems=geom_topo.getNbElements()*comp_topo.nbComponents();
+               cout << " Nb elems "<<_nb_elems<<" topo elems "<<geom_topo.getNbElements()
+                 <<" comp_topo "<<comp_topo.nbComponents()<<endl;
+       }       
+}
+
+/*! Constructor for creating a one-dimensional
+ * topology from a processor group and a local 
+ * number of elements on each processor
+ * 
+ * The function must be called only by the processors belonging
+ * to group \a group. Calling it from a processor not belonging
+ * to \a group will cause an MPI error, while calling from a subset
+ * of \a group will result in a deadlock. 
+ */
+
+BlockTopology::BlockTopology(const ProcessorGroup& group, int nb_elem):_proc_group(&group),_dimension(1)
+{
+       int* nbelems_per_proc = new int[group.size()];
+       const MPIProcessorGroup* mpi_group=dynamic_cast<const MPIProcessorGroup*>(_proc_group);
+       const MPI_Comm* comm=mpi_group->getComm();
+       mpi_group->getCommInterface().allGather(&nb_elem, 1, MPI_INTEGER, 
+                                                                               nbelems_per_proc, 1, MPI_INTEGER, 
+                                                                               *comm);
+       _nb_elems=0;    
+       
+       //splitting along only dimension
+       _local_array_indices.resize(1);
+       _nb_procs_per_dim.resize(1);    
+                                       
+       _local_array_indices[0].resize(_proc_group->size()+1);
+       _local_array_indices[0][0]=0;
+       _nb_procs_per_dim[0]=_proc_group->size();
+       
+       for (int i=1; i<=_proc_group->size(); i++)
+               {
+                       _local_array_indices[0][i]=_local_array_indices[0][i-1]+
+                                       nbelems_per_proc[i-1];
+                       _nb_elems+=nbelems_per_proc[i-1];
+               }
+       _cycle_type.resize(1);
+       _cycle_type[0]=ParaMEDMEM::Block;
+               
+}
+
+BlockTopology::~BlockTopology()
+{
+}
+
+/*! Retrieves the min and max indices of the domain stored locally
+ * for each dimension. The output vector has the topology dimension
+ * as a size and each pair <int,int> contains min and max. Indices 
+ * range from min to max-1.
+ */
+
+std::vector<std::pair<int,int> > BlockTopology::getLocalArrayMinMax() const
+{
+       vector<pair<int,int> > local_indices (_dimension);
+       int myrank=_proc_group->myRank();
+       int increment=1;
+       for (int i=_dimension-1; i>=0; i--)
+               {       
+                       increment *=_nb_procs_per_dim[i];
+                       int idim=myrank%increment;
+                       local_indices[i].first=_local_array_indices[i][idim];
+                       local_indices[i].second=_local_array_indices[i][idim+1];
+                       cout << local_indices[i].first << " "<< local_indices[i].second<<endl;
+               }
+               return local_indices;
+}
+
+/*! Serializes the data contained in the Block Topology
+ * for communication purposes*/
+void BlockTopology::serialize(int* & serializer, int& size) const 
+{
+       vector <int> buffer;
+       
+       buffer.push_back(_dimension);
+       buffer.push_back(_nb_elems);
+       for (int i=0; i<_dimension; i++)
+       {
+               buffer.push_back(_nb_procs_per_dim[i]);
+               buffer.push_back(_cycle_type[i]);
+               buffer.push_back(_local_array_indices[i].size());
+               for (int j=0; j<_local_array_indices[i].size(); j++)
+                       buffer.push_back(_local_array_indices[i][j]);
+       }
+       
+       //serializing the comm group
+       int size_comm=_proc_group->size();
+       buffer.push_back(size_comm);
+       MPIProcessorGroup world_group(_proc_group->getCommInterface());
+       for (int i=0; i<size_comm;i++)
+               {
+                       int world_rank=world_group.translateRank(_proc_group, i);
+                       buffer.push_back(world_rank);
+               }
+       
+       serializer=new int[buffer.size()];
+       size=   buffer.size();
+       copy(buffer.begin(), buffer.end(), serializer);
+       
+}
+/*! Unserializes the data contained in the Block Topology
+ * after communication. Uses the same structure as the one used for serialize()
+ * 
+ * */
+void BlockTopology::unserialize(const int* serializer,const CommInterface& comm_interface)
+{
+       const int* ptr_serializer=serializer;
+       cout << "unserialize..."<<endl;
+       _dimension=*(ptr_serializer++);
+       cout << "dimension "<<_dimension<<endl;
+       _nb_elems=*(ptr_serializer++);
+       cout << "nbelems "<<_nb_elems<<endl;
+       _nb_procs_per_dim.resize(_dimension);
+       _cycle_type.resize(_dimension);
+       _local_array_indices.resize(_dimension);
+       for (int i=0; i<_dimension; i++)
+       {
+               _nb_procs_per_dim[i]=*(ptr_serializer++);
+               _cycle_type[i]=(CYCLE_TYPE)*(ptr_serializer++);
+               _local_array_indices[i].resize(*(ptr_serializer++));
+               for (int j=0; j<_local_array_indices[i].size(); j++)
+                       _local_array_indices[i][j]=*(ptr_serializer++);
+       }
+       set<int> procs;
+       int size_comm=*(ptr_serializer++);
+       for (int i=0; i<size_comm; i++)
+               procs.insert(*(ptr_serializer++));
+       cout << "unserialize..."<<procs.size()<<endl;
+       _proc_group=new MPIProcessorGroup(comm_interface,procs);
+       //TODO manage memory ownership of _proc_group   
+               
+}
+
+}
diff --git a/src/ParaMEDMEM/BlockTopology.hxx b/src/ParaMEDMEM/BlockTopology.hxx
new file mode 100644 (file)
index 0000000..a054460
--- /dev/null
@@ -0,0 +1,165 @@
+#ifndef BLOCKTOPOLOGY_HXX_
+#define BLOCKTOPOLOGY_HXX_
+#include <vector>
+#include <utility>
+#include <iostream>
+#include "ProcessorGroup.hxx"
+
+using namespace std;
+namespace MEDMEM
+{
+       class GRID;
+}
+
+namespace ParaMEDMEM
+{
+class Topology;
+class ComponentTopology;
+typedef enum{Block,Cycle} CYCLE_TYPE; 
+
+class BlockTopology: public Topology
+{
+public:
+       BlockTopology(){};
+       BlockTopology(const ProcessorGroup& group, const MEDMEM::GRID& grid); 
+       BlockTopology(const BlockTopology& geom_topo, const ComponentTopology& comp_topo);
+       BlockTopology(const ProcessorGroup& group, int nb_elem);
+       virtual ~BlockTopology();
+       inline int getNbElements()const;
+       inline int getNbLocalElements() const;
+       const ProcessorGroup* getProcGroup()const {return _proc_group;};
+       inline std::pair<int,int> globalToLocal (const int) const ;
+       inline int localToGlobal (const std::pair<int,int>) const;
+       std::vector<std::pair<int,int> > getLocalArrayMinMax() const ;
+       int getDimension() const {return _dimension;};
+       void serialize(int* & serializer, int& size) const ;
+       void unserialize(const int* serializer, const CommInterface& comm_interface);
+private:
+       //dimension : 2 or 3
+       int _dimension;
+       //proc array
+       std::vector<int> _nb_procs_per_dim;
+       //stores the offsets vector  
+       std::vector<std::vector<int> > _local_array_indices;
+       //stores the cycle type (block or cyclic)
+       std::vector<CYCLE_TYPE> _cycle_type;
+       //Processor group
+       const ProcessorGroup* _proc_group;
+       //nb of elements
+       int _nb_elems;
+};
+
+//!converts a pair <subdomainid,local> to a global number 
+inline std::pair<int,int> BlockTopology::globalToLocal(const int global) const {
+       int subdomain_id=0;
+       //int local=global;
+       int position=global;
+       int size=_nb_elems;
+       int size_procs=_proc_group->size();
+       int increment=size;
+       vector<int>axis_position(_dimension);
+       vector<int>axis_offset(_dimension);
+       for (int idim=0; idim<_dimension; idim++)
+       {
+               int axis_size=_local_array_indices[idim].size()-1;
+               int axis_nb_elem=_local_array_indices[idim][axis_size];
+               increment=increment/axis_nb_elem;
+               //cout << "increment "<<increment<<endl;
+               int proc_increment = size_procs/(axis_size);
+               int axis_pos=position/increment;
+               position=position%increment;
+//             if (_cycle_type[idim]==Block)
+       //      {       
+                       int iaxis=1;
+               //      cout << "local array "<<_local_array_indices[idim][iaxis]<<" "<<axis_pos<<endl;
+                       while (_local_array_indices[idim][iaxis]<=axis_pos)
+                       {
+                               subdomain_id+=proc_increment;
+                                       iaxis++;
+                       }
+                       axis_position[idim]=axis_pos-_local_array_indices[idim][iaxis-1];
+                       axis_offset[idim]=iaxis;
+                       
+//             }
+               
+       //      else
+               //{
+//                     int size = axis_nb_elem/axis_size;
+//                     if ((position%axis_size)<(axis_nb_elem%axis_size))
+//                             size+=1;        
+//                     subdomain_id+=proc_increment*(position%axis_size);
+//                     local -= (axis_nb_elem-size)*increment;
+               //}
+       }
+       int local=0;
+       int local_increment=1;
+       for (int idim=_dimension-1; idim>=0; idim--)
+       {
+               local+=axis_position[idim]*local_increment;
+               local_increment*=_local_array_indices[idim][axis_offset[idim]]-_local_array_indices[idim][axis_offset[idim]-1];
+       }       
+       
+       return make_pair(subdomain_id,local);
+}
+
+//!converts local number to a global number
+inline int BlockTopology::localToGlobal(const pair<int,int> local) const {
+       
+       int subdomain_id=local.first;
+       int global=0;
+       int loc=local.second;
+       int increment=_nb_elems;
+       int proc_increment=_proc_group->size();
+       int local_increment=getNbLocalElements();
+       for (int idim=0; idim < _dimension; idim++)
+       {
+               int axis_size=_local_array_indices[idim].size()-1;
+               int axis_nb_elem=_local_array_indices[idim][axis_size];
+               increment=increment/axis_nb_elem;
+               proc_increment = proc_increment/(axis_size);
+               int proc_axis=subdomain_id/proc_increment;
+               subdomain_id=subdomain_id%proc_increment;
+               int local_axis_nb_elem=_local_array_indices[idim][proc_axis+1]-_local_array_indices[idim][proc_axis];
+               local_increment = local_increment/local_axis_nb_elem;
+       //      if (_cycle_type[idim]==Block)
+               //{     
+                       int iaxis=loc/local_increment+_local_array_indices[idim][proc_axis];
+                       global+=increment*iaxis;
+                       loc = loc%local_increment;
+               //}
+               //else
+               //{
+                       //cout << "cyclic Not implemented yet"<<endl;
+                       //exit (2); 
+               //}
+       }
+       return global;
+       
+}
+
+//!Retrieves the number of elements for a given topology
+inline int BlockTopology::getNbElements()const {return _nb_elems;}
+
+//Retrieves the local number of elements 
+inline int BlockTopology::getNbLocalElements()const 
+{
+       int position=_proc_group->myRank();
+       int nb_elem = 1;
+       int increment=1;
+       for (int i=_dimension-1; i>=0; i--)
+               {       
+                       increment *=_nb_procs_per_dim[i];
+                       int idim=position%increment;
+                       position=position/increment;
+                               //cout << "i idim dimension"<<i<<" "<<idim<<" "<<_dimension<<endl;
+                       int imin=_local_array_indices[i][idim];
+                       int imax=_local_array_indices[i][idim+1];
+//                     cout << "position imax imin "<<position<<" "<< imax <<" "<< imin<< " "<<idim<<endl;
+                       nb_elem*=(imax-imin);
+               }
+       return nb_elem;
+}
+}
+
+
+#endif /*BLOCKTOPOLOGY_HXX_*/
diff --git a/src/ParaMEDMEM/CommInterface.cxx b/src/ParaMEDMEM/CommInterface.cxx
new file mode 100644 (file)
index 0000000..e41efac
--- /dev/null
@@ -0,0 +1,14 @@
+#include "CommInterface.hxx"
+
+namespace ParaMEDMEM
+{
+
+CommInterface::CommInterface()
+{
+}
+
+CommInterface::~CommInterface()
+{
+}
+
+}
diff --git a/src/ParaMEDMEM/CommInterface.hxx b/src/ParaMEDMEM/CommInterface.hxx
new file mode 100644 (file)
index 0000000..6972b54
--- /dev/null
@@ -0,0 +1,45 @@
+#ifndef COMMINTERFACE_HXX_
+#define COMMINTERFACE_HXX_
+
+#include <mpi.h>
+namespace ParaMEDMEM
+{
+
+class CommInterface
+{
+public:
+       CommInterface(){}
+       virtual ~CommInterface(){}
+       int commSize(MPI_Comm comm, int* size) const { return MPI_Comm_size(comm,size);}
+       int commRank(MPI_Comm comm, int* rank) const { return MPI_Comm_rank(comm,rank);}
+       int commGroup(MPI_Comm comm, MPI_Group* group) const  
+               {return MPI_Comm_group(comm, group);}
+       int groupIncl(MPI_Group group, int size, int* ranks, MPI_Group* group_output) const 
+               {return MPI_Group_incl(group, size, ranks, group_output);}
+       int commCreate(MPI_Comm comm, MPI_Group group, MPI_Comm* comm_output) const 
+               {return MPI_Comm_create(comm,group,comm_output);}
+       int groupFree(MPI_Group* group) const {return MPI_Group_free(group);}
+       int commFree(MPI_Comm* comm) const {return MPI_Comm_free(comm);}
+       int broadcast(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm)const
+       {return MPI_Bcast(buffer, count,  datatype, root, comm);}
+       int send(void* buffer, int count, MPI_Datatype datatype, int target, int tag, MPI_Comm comm) const
+       {return MPI_Send(buffer,count, datatype, target, tag, comm);}
+       int recv(void* buffer, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status* status) const
+       {return MPI_Recv(buffer,count, datatype, source, tag, comm, status);}
+       int allToAllV(void* sendbuf, int* sendcounts, int* senddispls, MPI_Datatype sendtype,
+                                 void* recvbuf, int* recvcounts, int* recvdispls, MPI_Datatype recvtype, 
+                                 MPI_Comm comm) const
+       {return MPI_Alltoallv(sendbuf, sendcounts, senddispls, sendtype,
+                                                 recvbuf, recvcounts, recvdispls, recvtype,
+                                                 comm);}
+       int allGather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+                                 void* recvbuf, int recvcount, MPI_Datatype recvtype,
+                                       MPI_Comm comm) const
+       {return MPI_Allgather(sendbuf,sendcount, sendtype, recvbuf, recvcount, recvtype, comm);  
+       }
+       
+};
+
+}
+
+#endif /*COMMINTERFACE_HXX_*/
diff --git a/src/ParaMEDMEM/ComponentTopology.cxx b/src/ParaMEDMEM/ComponentTopology.cxx
new file mode 100644 (file)
index 0000000..a310500
--- /dev/null
@@ -0,0 +1,82 @@
+#include "ComponentTopology.hxx"
+#include "MEDMEM_Exception.hxx"
+
+namespace ParaMEDMEM
+{
+       
+/* Generic constructor for \a nb_comp components equally parted
+ * in \a nb_blocks blocks
+ */
+
+ComponentTopology::ComponentTopology(int nb_comp, ProcessorGroup* group):_proc_group(group)
+{
+       int nb_blocks=group->size();
+       
+       if (nb_blocks>nb_comp) throw MEDMEM::MEDEXCEPTION(
+       LOCALIZED("ComponentTopology Number of components must be larger than number of blocks"));
+               
+       component_array.resize(nb_blocks+1);
+       component_array[0]=0;
+       for (int i=1; i<=nb_blocks; i++)
+       {
+               component_array[i]=component_array[i-1]+nb_comp/nb_blocks;
+               if (i<=nb_comp%nb_blocks)
+                       component_array[i]++;
+       }
+       
+}
+       
+/* Generic constructor for \a nb_comp components equally parted
+ * in \a nb_blocks blocks
+ */
+
+ComponentTopology::ComponentTopology(int nb_comp, int nb_blocks):_proc_group(0)
+{
+       if (nb_blocks>nb_comp) throw MEDMEM::MEDEXCEPTION(
+       LOCALIZED("ComponentTopology Number of components must be larger than number of blocks"));
+               
+       component_array.resize(nb_blocks+1);
+       component_array[0]=0;
+       for (int i=1; i<=nb_blocks; i++)
+       {
+               component_array[i]=component_array[i-1]+nb_comp/nb_blocks;
+               if (i<=nb_comp%nb_blocks)
+                       component_array[i]++;
+       }
+       
+}
+//!Constructor for one block of \a nb_comp components
+ComponentTopology::ComponentTopology(int nb_comp):_proc_group(0)
+{
+               
+       component_array.resize(2);
+       component_array[0]=0;
+       component_array[1]=nb_comp;
+       
+}
+
+//! Constructor for one component
+ComponentTopology::ComponentTopology()
+{
+       component_array.resize(2);
+       component_array[0]=0;
+       component_array[1]=1;
+       
+}
+ComponentTopology::~ComponentTopology()
+{
+}
+
+int ComponentTopology::nbLocalComponents() const{
+       if (_proc_group==0) return nbComponents();
+       
+       int nbcomp;
+       int myrank = _proc_group->myRank();
+       cout << "nbLocalComp "<<myrank<<" "<< component_array[myrank+1]<< " " <<component_array[myrank]<<endl;
+       if (myrank!=-1)
+               nbcomp = component_array[myrank+1]-component_array[myrank];
+       else 
+               nbcomp=0;
+       return nbcomp;
+}
+}
diff --git a/src/ParaMEDMEM/ComponentTopology.hxx b/src/ParaMEDMEM/ComponentTopology.hxx
new file mode 100644 (file)
index 0000000..e2c2463
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef COMPONENTTOPOLOGY_HXX_
+#define COMPONENTTOPOLOGY_HXX_
+
+#include <vector>
+#include "ProcessorGroup.hxx"
+#include "Topology.hxx"
+
+
+namespace ParaMEDMEM
+{
+class ComponentTopology
+{
+public:
+       ComponentTopology(int nb_comp, ProcessorGroup* group);
+       ComponentTopology(int nb_comp, int nb_blocks);
+       ComponentTopology(int nb_comp);
+       ComponentTopology();
+       virtual ~ComponentTopology();
+       //!returns the number of MED components in the topology
+       int nbComponents() const {return component_array[component_array.size()-1];}
+       //!returns the number of MED components on local processor
+       int nbLocalComponents() const ;
+       //!returns the number of blocks in the topology
+       int nbBlocks()const {return component_array.size()-1;}
+       //!returns the block structure
+       const vector <int> * getBlockIndices() const {return &component_array;}
+       const ProcessorGroup* getProcGroup()const {return _proc_group;} 
+private:
+       std::vector<int> component_array;
+       ProcessorGroup* _proc_group;
+};
+
+}
+
+#endif /*COMPONENTTOPOLOGY_HXX_*/
diff --git a/src/ParaMEDMEM/DEC.cxx b/src/ParaMEDMEM/DEC.cxx
new file mode 100644 (file)
index 0000000..773e261
--- /dev/null
@@ -0,0 +1,32 @@
+
+/*! Data Exchange Channel 
+ * Interface class for creation of a link between two 
+ * MPI groups for exhanging mesh or field data*/
+
+#include "CommInterface.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "ComponentTopology.hxx"
+#include "ParaFIELD.hxx"
+#include "DEC.hxx"
+
+namespace ParaMEDMEM
+{
+void DEC::attachTargetField(const ParaFIELD* field) 
+{
+       _target_field=field;
+       if (field!=0)
+       {
+               BlockTopology* topo=dynamic_cast<BlockTopology*>(field->getTopology());
+               _comm_interface=&(topo->getProcGroup()->getCommInterface());
+       }
+}
+void DEC::attachSourceField(const ParaFIELD* field) 
+{_source_field=field;
+       if (field!=0)
+       {
+               BlockTopology* topo=dynamic_cast<BlockTopology*>(field->getTopology());
+               _comm_interface=&(topo->getProcGroup()->getCommInterface());
+       }
+}
+}
diff --git a/src/ParaMEDMEM/DEC.hxx b/src/ParaMEDMEM/DEC.hxx
new file mode 100644 (file)
index 0000000..eb7be98
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef DEC_HXX_
+#define DEC_HXX_
+
+namespace ParaMEDMEM
+{
+class ProcessorGroup;
+class ParaFIELD;
+class CommInterface;
+class DEC
+{
+public:
+       DEC():_source_field(0),_target_field(0){}
+       void attachTargetField(const ParaFIELD* field);
+       void attachSourceField(const ParaFIELD* field) ;
+       virtual void prepareSourceDE()=0;
+       virtual void prepareTargetDE()=0;
+       virtual void recvData()=0;
+       virtual void sendData()=0;
+       virtual void synchronize()=0;
+       virtual ~DEC(){}
+       virtual void computeProcGroup(){};
+protected:
+       const ParaFIELD* _source_field;
+       const ParaFIELD* _target_field;
+       //! Processor group representing the union of target and source processors
+       ProcessorGroup* _group;
+       const CommInterface* _comm_interface;
+};
+
+}
+
+#endif /*DEC_HXX_*/
diff --git a/src/ParaMEDMEM/ExplicitCoincidentDEC.cxx b/src/ParaMEDMEM/ExplicitCoincidentDEC.cxx
new file mode 100644 (file)
index 0000000..c1f2bb7
--- /dev/null
@@ -0,0 +1,284 @@
+#include <mpi.h>
+#include "CommInterface.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "ComponentTopology.hxx"
+#include "ParaFIELD.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "ExplicitCoincidentDEC.hxx"
+
+namespace ParaMEDMEM
+{
+
+ExplicitCoincidentDEC::ExplicitCoincidentDEC():_toposource(0),_topotarget(0)
+{      
+}
+
+ExplicitCoincidentDEC::~ExplicitCoincidentDEC()
+{
+}
+
+/*! Synchronization process for exchanging topologies
+ */
+void ExplicitCoincidentDEC::synchronize()
+{
+       if (_source_field!=0)
+               _toposource = dynamic_cast<ExplicitTopology*>(_source_field->getTopology());
+       if (_target_field!=0)
+               _topotarget = dynamic_cast<ExplicitTopology*>(_target_field->getTopology());
+       
+       // Transmitting source topology to target code 
+       broadcastTopology(*_toposource,*_topotarget,1000);
+       // Transmitting target topology to source code
+       //broadcastTopology(_topotarget,2000);
+       //checkCompatibility(_toposource,_topotarget);
+}
+
+/*! Creates the arrays necessary for the data transfer
+ * and fills the send array with the values of the 
+ * source field
+ *  */
+void ExplicitCoincidentDEC::prepareSourceDE()
+{
+       ////////////////////////////////////
+       //Step 1 : buffer array creation 
+       
+       if (!_toposource->getProcGroup()->containsMyRank())
+               return;
+       MPIProcessorGroup* group=new MPIProcessorGroup(_toposource->getProcGroup()->getCommInterface());
+       
+       int myranksource = _toposource->getProcGroup()->myRank();
+       
+       vector <int>* target_arrays=new vector<int>[_topotarget->getProcGroup()->size()];
+       
+       //cout<<" topotarget size"<<    _topotarget->getProcGroup()->size()<<endl;
+       
+       int nb_local = _toposource-> getNbLocalElements();
+       for (int ielem=0; ielem< nb_local ; ielem++)
+       {
+               pair<int,int> target_local =_distant_elems[ielem];
+               target_arrays[target_local.first].push_back(target_local.second); 
+       }       
+       
+       int union_size=group->size();
+       
+       _sendcounts=new int[union_size];
+       _senddispls=new int[union_size];
+       _recvcounts=new int[union_size];
+       _recvdispls=new int[union_size];
+       
+       for (int i=0; i< union_size; i++)
+       {
+               _sendcounts[i]=0;
+               _recvcounts[i]=0;
+               _recvdispls[i]=0;
+       }
+       _senddispls[0]=0;
+       
+       for (int iproc=0; iproc < _topotarget->getProcGroup()->size(); iproc++)
+       {
+               //converts the rank in target to the rank in union communicator
+               int unionrank=group->translateRank(_topotarget->getProcGroup(),iproc);
+               _sendcounts[unionrank]=target_arrays[iproc].size();
+       }
+       
+       for (int iproc=1; iproc<group->size();iproc++)
+               _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1];
+       
+       _sendbuffer = new double [nb_local ];
+
+       /////////////////////////////////////////////////////////////
+       //Step 2 : filling the buffers with the source field values 
+
+       int* counter=new int [_topotarget->getProcGroup()->size()];
+       counter[0]=0;   
+       for (int i=1; i<_topotarget->getProcGroup()->size(); i++)
+               counter[i]=counter[i-1]+target_arrays[i-1].size();
+               
+                       
+       const double* value = _source_field->getField()->getValue();
+       //cout << "Nb local " << nb_local<<endl;
+       for (int ielem=0; ielem<nb_local ; ielem++)
+       {
+         int global = _toposource->localToGlobal(make_pair(myranksource, ielem));
+         //int global=_toposource->localToGlobal(ielem);
+               int target_local =_topotarget->globalToLocal(global);
+               //cout <<"global : "<< global<<" local :"<<target_local.first<<" "<<target_local.second;
+               //cout <<"counter[]"<<counter[target_local.first]<<endl;
+               _sendbuffer[counter[target_local]++]=value[ielem];
+               
+       }
+       delete[] target_arrays;
+       delete[] counter;
+}
+
+/*!
+ *  Creates the buffers for receiving the fields on the target side
+ */
+void ExplicitCoincidentDEC::prepareTargetDE()
+{
+       if (!_topotarget->getProcGroup()->containsMyRank())
+               return;
+       MPIProcessorGroup* group=new MPIProcessorGroup(_toposource->getProcGroup()->getCommInterface());
+       
+       //int myranktarget = _topotarget->getProcGroup()->myRank();
+       
+       vector < vector <int> > source_arrays(_toposource->getProcGroup()->size());
+       int nb_local = _topotarget-> getNbLocalElements();
+       for (int ielem=0; ielem< nb_local ; ielem++)
+       {
+               pair<int,int> source_local =_distant_elems[ielem];
+               source_arrays[source_local.first].push_back(source_local.second); 
+       }       
+       int union_size=group->size();
+       _recvcounts=new int[union_size];
+    _recvdispls=new int[union_size];
+    _sendcounts=new int[union_size];
+    _senddispls=new int[union_size];
+    
+       for (int i=0; i< union_size; i++)
+               {
+                       _sendcounts[i]=0;
+                       _recvcounts[i]=0;
+                       _recvdispls[i]=0;
+               }
+       for (int iproc=0; iproc < _toposource->getProcGroup()->size(); iproc++)
+       {
+               //converts the rank in target to the rank in union communicator
+               int unionrank=group->translateRank(_toposource->getProcGroup(),iproc);
+               _recvcounts[unionrank]=source_arrays[iproc].size();
+       }
+       for (int i=1; i<union_size; i++)
+               _recvdispls[i]=_recvdispls[i-1]+_recvcounts[i-1];
+       _recvbuffer=new double[nb_local];
+               
+}
+
+/*!
+ * Synchronizing a topology so that all the 
+ * group possesses it.
+ * 
+ * \param topo Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
+ * \param tag Communication tag associated with this operation.
+ */
+void ExplicitCoincidentDEC::broadcastTopology(const ExplicitTopology& toposend, ExplicitTopology& toporecv, int tag)
+{
+       MPI_Status status;
+       
+       int* serializer=0;
+       int size;
+       
+       MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
+       
+       // The send processors serialize the send topology
+       // and send the buffers to the recv procs
+       if (toposend.getProcGroup()->containsMyRank())
+       {
+               toposend.serialize(serializer, size);
+               for (int iproc=0; iproc< group->size(); iproc++)
+               {
+                       int itarget=(iproc+toposend.getProcGroup()->myRank())%group->size();
+                       if (!toposend.getProcGroup()->contains(itarget))
+                       {
+                               int nbelem = toposend.getNbLocalElements();
+                               _comm_interface->send(&nbelem,1,MPI_INTEGER, itarget,tag+itarget,*(group->getComm()));
+                               _comm_interface->send(&serializer, size, MPI_INTEGER, itarget, tag+itarget,*(group->getComm()));                                        
+                       }
+               }
+       }
+       else
+       {
+               vector <int> size (group->size());
+               
+               for (int iproc=0; iproc<group->size();iproc++)
+               {
+                       int isource = iproc;
+                       if (!toporecv.getProcGroup()->contains(isource))
+                       {
+                               int nbelem;
+                               _comm_interface->recv(&nbelem, 1, MPI_INTEGER, isource, tag+isource, *(group->getComm()), &status);
+                               int* buffer = new int[nbelem];
+                               _comm_interface->recv(buffer, nbelem, MPI_INTEGER, isource,tag+isource, *(group->getComm()), &status);                          
+                       
+                               ExplicitTopology* topotemp=new ExplicitTopology();
+                               topotemp->unserialize(buffer, *_comm_interface);
+                               delete[] buffer;
+                               
+                               for (int ielem=0; ielem<toporecv.getNbLocalElements(); ielem++)
+                               {
+                                       int global=toporecv.localToGlobal(make_pair(iproc,ielem));
+                                       int sendlocal=topotemp->globalToLocal(global);
+                                       if (sendlocal!=-1)
+                                       {
+                                               size[iproc]++;
+                                               _distant_elems.insert(make_pair(ielem, make_pair(iproc,sendlocal)));
+                                       }
+                               }
+                               delete topotemp;
+                       }
+               }       
+       }       
+       MESSAGE (" rank "<<group->myRank()<< " broadcastTopology is over");
+}
+
+void ExplicitCoincidentDEC::recvData()
+{
+       //MPI_COMM_WORLD is used instead of group because there is no
+       //mechanism for creating the union group yet
+       MESSAGE("recvData");
+       for (int i=0; i< 4; i++)
+               cout << _recvcounts[i]<<" ";
+       cout <<endl;
+       for (int i=0; i< 4; i++)
+               cout << _recvdispls[i]<<" ";
+       cout <<endl;
+       
+       cout<<"start AllToAll"<<endl;
+       _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, 
+                       _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
+       cout<<"end AllToAll"<<endl;
+       int nb_local = _topotarget->getNbLocalElements();
+       double* value=new double[nb_local];
+       int myranktarget=_topotarget->getProcGroup()->myRank();
+       vector<int> counters(_toposource->getProcGroup()->size());
+       counters[0]=0;
+       for (int i=0; i<_toposource->getProcGroup()->size()-1; i++)
+               {
+                       MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
+                       int worldrank=group->translateRank(_toposource->getProcGroup(),i);
+                       counters[i+1]=counters[i]+_recvcounts[worldrank];
+               }
+       
+       for (int ielem=0; ielem<nb_local ; ielem++)
+       {
+               int global = _topotarget->localToGlobal(make_pair(myranktarget, ielem));
+               int source_local =_toposource->globalToLocal(global);
+               value[ielem]=_recvbuffer[counters[source_local]++];
+       }
+       
+       
+       _target_field->getField()->setValue(value);
+}
+
+void ExplicitCoincidentDEC::sendData()
+{
+       MESSAGE ("sendData");
+       for (int i=0; i< 4; i++)
+               cout << _sendcounts[i]<<" ";
+       cout <<endl;
+       for (int i=0; i< 4; i++)
+               cout << _senddispls[i]<<" ";
+       cout <<endl;
+       //MPI_COMM_WORLD is used instead of group because there is no
+       //mechanism for creating the union group yet
+       cout <<"start AllToAll"<<endl;
+       
+       _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, 
+                       _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
+       cout<<"end AllToAll"<<endl;
+}
+       
+
+}
+
diff --git a/src/ParaMEDMEM/ExplicitCoincidentDEC.hxx b/src/ParaMEDMEM/ExplicitCoincidentDEC.hxx
new file mode 100644 (file)
index 0000000..58fc154
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef ExplicitCOINCIDENTDEC_HXX_
+#define ExplicitCOINCIDENTDEC_HXX_
+
+#include "DEC.hxx"
+#include "ExplicitTopology.hxx"
+#include <map>
+
+namespace ParaMEDMEM
+{
+class DEC;
+class BlockTopology;
+class ExplicitCoincidentDEC: public DEC
+{
+public:
+       ExplicitCoincidentDEC();
+       virtual ~ExplicitCoincidentDEC();
+       void synchronize();
+       void broadcastTopology(BlockTopology*&, int tag);
+       void broadcastTopology(const ExplicitTopology& toposend, ExplicitTopology& toporecv, int tag);
+       
+       void prepareSourceDE();
+       void prepareTargetDE();
+       void recvData();
+       void sendData();
+private :
+       
+       ExplicitTopology* _toposource;
+       ExplicitTopology* _topotarget;
+       int* _sendcounts;
+       int* _recvcounts;
+       int* _senddispls;
+       int* _recvdispls;
+       double* _recvbuffer;
+       double* _sendbuffer;
+       std::map<int,std::pair<int,int> > _distant_elems;
+};
+
+}
+
+#endif /*ExplicitCOINCIDENTDEC_HXX_*/
+       
diff --git a/src/ParaMEDMEM/ExplicitTopology.cxx b/src/ParaMEDMEM/ExplicitTopology.cxx
new file mode 100644 (file)
index 0000000..e953bfe
--- /dev/null
@@ -0,0 +1,91 @@
+#include "MEDMEM_Mesh.hxx"
+#include "MEDMEM_Support.hxx"
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "ParaSUPPORT.hxx"
+#include "ParaMESH.hxx"
+#include "Topology.hxx"
+#include "ExplicitTopology.hxx"
+#include "BlockTopology.hxx"
+#include "ComponentTopology.hxx"
+
+#include <vector>
+#include <algorithm>
+
+using namespace std;
+using namespace MEDMEM;
+namespace ParaMEDMEM
+{
+
+ExplicitTopology::ExplicitTopology(const ParaSUPPORT& parasupport ):
+_proc_group(parasupport.getMesh()->getBlockTopology()->getProcGroup()){
+       _nb_elems=parasupport.getSupport()->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);
+       MED_EN::medEntityMesh entity= parasupport.getSupport()->getEntity();
+       const int* global=parasupport.getMesh()->getGlobalNumbering(entity);
+       _loc2glob=new int[_nb_elems]; 
+       
+       if (parasupport.getSupport()->isOnAllElements())
+       {
+               for (int i=0; i<_nb_elems; i++)
+               {
+                       _loc2glob[i]=global[i];
+                       _glob2loc[global[i]]=i;
+               }
+       }
+       else
+       {
+               const int* number= parasupport.getSupport()->getNumber(MED_EN::MED_ALL_ELEMENTS);       
+               for (int i=0; i<_nb_elems; i++)
+               {
+                       int local=number[i];
+                       _loc2glob[i]=global[local];
+                       _glob2loc[global[local]]=i;
+               }
+       }
+}
+
+
+ExplicitTopology::~ExplicitTopology()
+{
+}
+
+
+/*! Serializes the data contained in the Explicit Topology
+ * for communication purposes*/
+void ExplicitTopology::serialize(int* & serializer, int& size) const 
+{
+       vector <int> buffer;
+       
+       buffer.push_back(_nb_elems);
+       for (int i=0; i<_nb_elems; i++)
+       {
+         buffer.push_back(_loc2glob[i]);
+       }
+               
+       serializer=new int[buffer.size()];
+       size=   buffer.size();
+       copy(buffer.begin(), buffer.end(), serializer);
+       
+}
+/*! Unserializes the data contained in the Explicit Topology
+ * after communication. Uses the same structure as the one used for serialize()
+ * 
+ * */
+void ExplicitTopology::unserialize(const int* serializer,const CommInterface& comm_interface)
+{
+       const int* ptr_serializer=serializer;
+       cout << "unserialize..."<<endl;
+       _nb_elems=*(ptr_serializer++);
+       cout << "nbelems "<<_nb_elems<<endl;
+       _loc2glob=new int[_nb_elems];
+       for (int i=0; i<_nb_elems; i++)
+       {
+         _loc2glob[i]=*(ptr_serializer++);
+         _glob2loc[*ptr_serializer]=i;
+         
+       }
+
+}
+
+}
diff --git a/src/ParaMEDMEM/ExplicitTopology.hxx b/src/ParaMEDMEM/ExplicitTopology.hxx
new file mode 100644 (file)
index 0000000..47a497b
--- /dev/null
@@ -0,0 +1,73 @@
+#ifndef ExplicitTOPOLOGY_HXX_
+#define ExplicitTOPOLOGY_HXX_
+#include <vector>
+#include <utility>
+#include <iostream>
+#include "ProcessorGroup.hxx"
+#include <ext/hash_map>
+
+using namespace std;
+using namespace __gnu_cxx;
+
+namespace MEDMEM
+{
+       class GRID;
+}
+
+namespace ParaMEDMEM
+{
+class Topology;
+class ComponentTopology;
+
+class ExplicitTopology: public Topology
+{
+public:
+       ExplicitTopology(){};
+       ExplicitTopology(const ParaSUPPORT&);
+       virtual ~ExplicitTopology();
+       
+       inline int getNbElements()const;
+       inline int getNbLocalElements() const;
+       const ProcessorGroup* getProcGroup()const {return _proc_group;};
+//     inline std::pair<int,int> globalToLocal (const int global) const {
+//     pair <int,int>local;
+//     local.first=_proc_group->myRank();
+//     local.second=globalToLocal(global);}
+    int localToGlobal (const std::pair<int,int> local) const {return localToGlobal(local.second);}
+       inline int localToGlobal(int) const;
+       inline int globalToLocal(int) const;
+       void serialize(int* & serializer, int& size) const ;
+       void unserialize(const int* serializer, const CommInterface& comm_interface);
+private:
+       //Processor group
+       const ProcessorGroup* _proc_group;
+       //nb of elements
+       int _nb_elems;
+       //mapping local to global
+       int* _loc2glob;
+       //mapping global to local
+       hash_map<int,int> _glob2loc;
+};
+
+//!converts a pair <subdomainid,local> to a global number 
+inline int ExplicitTopology::globalToLocal(const int global) const {
+       return (_glob2loc.find(global))->second;;
+       }
+
+//!converts local number to a global number
+int ExplicitTopology::localToGlobal(int local) const {
+       return _loc2glob[local];
+       }
+
+//!Retrieves the number of elements for a given topology
+inline int ExplicitTopology::getNbElements()const {return _nb_elems;}
+
+//Retrieves the local number of elements 
+inline int ExplicitTopology::getNbLocalElements()const 
+{
+       return _glob2loc.size();
+}
+}
+
+
+#endif /*ExplicitTOPOLOGY_HXX_*/
diff --git a/src/ParaMEDMEM/MPIProcessorGroup.cxx b/src/ParaMEDMEM/MPIProcessorGroup.cxx
new file mode 100644 (file)
index 0000000..84a176f
--- /dev/null
@@ -0,0 +1,73 @@
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "CommInterface.hxx"
+
+#include <iostream>
+#include <set>
+#include <algorithm>
+#include "/export/home/vb144235/mpich2_install/include/mpi.h"
+
+using namespace std;
+
+namespace ParaMEDMEM
+{
+MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface):
+ProcessorGroup(interface)
+{
+  _comm=MPI_COMM_WORLD;
+  _comm_interface.commGroup(MPI_COMM_WORLD, &_group);
+  int size;
+  _comm_interface.commSize(MPI_COMM_WORLD,&size);
+  for (int i=0; i<size; i++)
+       _proc_ids.insert(i);
+
+}
+MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids):
+ProcessorGroup(interface, proc_ids)
+{
+  //Creation of a communicator 
+  MPI_Group group_world;
+  
+  int size_world;
+  _comm_interface.commSize(MPI_COMM_WORLD,&size_world);
+  int rank_world;
+  _comm_interface.commRank(MPI_COMM_WORLD,&rank_world);
+  _comm_interface.commGroup(MPI_COMM_WORLD, &group_world);
+
+  int* ranks=new int[proc_ids.size()];
+   
+  // copying proc_ids in ranks
+  copy<set<int>::const_iterator,int*> (proc_ids.begin(), proc_ids.end(), ranks);
+  
+  _comm_interface.groupIncl(group_world, proc_ids.size(), ranks, &_group);
+  
+  _comm_interface.commCreate(MPI_COMM_WORLD, _group, &_comm);
+  
+}
+
+MPIProcessorGroup::MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids) :
+ProcessorGroup(proc_group.getCommInterface())
+{
+       cout << "MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids)" <<endl;
+       cout << "Not implemented yet !"<<endl;
+       exit(1);
+}
+
+MPIProcessorGroup::~MPIProcessorGroup()
+{
+       _comm_interface.groupFree(&_group);
+       if (_comm!=MPI_COMM_WORLD && _comm !=MPI_COMM_NULL)
+               _comm_interface.commFree(&_comm);
+}
+
+int MPIProcessorGroup::translateRank(const ProcessorGroup* group, int rank) const
+{
+       const MPIProcessorGroup* targetgroup=dynamic_cast<const MPIProcessorGroup*>(group);
+       int local_rank;
+       MPI_Group_translate_ranks(targetgroup->_group, 1, &rank, _group, &local_rank);
+       return local_rank;
+}
+
+
+       
+}
diff --git a/src/ParaMEDMEM/MPIProcessorGroup.hxx b/src/ParaMEDMEM/MPIProcessorGroup.hxx
new file mode 100644 (file)
index 0000000..9a5e704
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef MPIPROCESSORGROUP_HXX_
+#define MPIPROCESSORGROUP_HXX_
+
+#include <set>
+#include <mpi.h>
+
+using namespace std;
+namespace ParaMEDMEM
+{
+class ProcessorGroup;
+class CommInterface;
+
+class MPIProcessorGroup:public ProcessorGroup
+{
+public:
+       MPIProcessorGroup(const CommInterface& interface);
+       MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids);
+       MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids);
+       virtual ~MPIProcessorGroup();
+       void fuse (ProcessorGroup&){};
+       void intersect (ProcessorGroup&){};
+       int myRank() const {int rank; MPI_Comm_rank(_comm,&rank); return rank;}
+       bool containsMyRank() const { int rank; MPI_Group_rank(_group, &rank); return (rank!=MPI_UNDEFINED);}
+       int translateRank(const ProcessorGroup* group, int rank) const;
+       const MPI_Comm* getComm() const {return &_comm;}
+private:
+       MPI_Group _group;
+       MPI_Comm _comm;
+};
+
+}
+
+#endif /*MPIPROCESSORGROUP_HXX_*/
diff --git a/src/ParaMEDMEM/Makefile.in b/src/ParaMEDMEM/Makefile.in
new file mode 100644 (file)
index 0000000..5daf5c4
--- /dev/null
@@ -0,0 +1,116 @@
+#  MED MEDMEM : MED files in memory
+#
+#  Copyright (C) 2003  OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
+#  CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS 
+# 
+#  This library is free software; you can redistribute it and/or 
+#  modify it under the terms of the GNU Lesser General Public 
+#  License as published by the Free Software Foundation; either 
+#  version 2.1 of the License. 
+# 
+#  This library is distributed in the hope that it will be useful, 
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of 
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
+#  Lesser General Public License for more details. 
+# 
+#  You should have received a copy of the GNU Lesser General Public 
+#  License along with this library; if not, write to the Free Software 
+#  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA 
+# 
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+#
+#
+#  File   : Makefile.in
+#  Author : Vincent BERGEAUD (CEA/DEN/DANS/DM2S/SFME/LGLS)
+#  Module : MED
+
+top_srcdir=@top_srcdir@
+top_builddir=../..
+srcdir=@srcdir@
+VPATH=.:$(srcdir):$(srcdir)/tests
+
+MACHINE=PCLINUX
+
+@COMMENCE@
+
+
+EXPORT_PYSCRIPTS = \
+
+
+EXPORT_HEADERS = \
+CommInterface.hxx\
+MPIProcessorGroup.hxx\
+ProcessorGroup.hxx\
+BlockTopology.hxx\
+Topology.hxx\
+ParaGRID.hxx\
+ParaMESH.hxx\
+ParaSUPPORT.hxx\
+StructuredParaSUPPORT.hxx\
+ComponentTopology.hxx\
+ExplicitTopology.hxx\
+ParaFIELD.hxx\
+DEC.hxx\
+StructuredCoincidentDEC.hxx\
+ExplicitCoincidentDEC.hxx
+
+# Libraries targets
+
+LIB=libparamed.la
+
+LIB_SRC = \
+MPIProcessorGroup.cxx\
+BlockTopology.cxx\
+ParaGRID.cxx\
+ParaMESH.cxx\
+ParaSUPPORT.cxx\
+StructuredParaSUPPORT.cxx\
+ComponentTopology.cxx\
+ParaFIELD.cxx\
+DEC.cxx\
+StructuredCoincidentDEC.cxx\
+ExplicitCoincidentDEC.cxx\
+ExplicitTopology.cxx
+
+
+
+# Executables targets
+BIN =
+BIN_SRC = 
+BIN_SERVER_IDL = 
+BIN_CLIENT_IDL = 
+
+TEST_PROGS = test_ProcessorGroup test_BlockTopology test_ParaStructuredSupport \
+test_ParaField test_DEC test_UnstructuredDEC
+
+LDFLAGS+= -L$(top_builddir)/lib@LIB_LOCATION_SUFFIX@/salome 
+LDFLAGSFORBIN+= -L$(top_builddir)/lib@LIB_LOCATION_SUFFIX@/salome
+
+CPPFLAGS+=$(MED2_INCLUDES) $(HDF5_INCLUDES) $(MPI_INCLUDES)
+
+CXXFLAGS+=@CXXTMPDPTHFLAGS@ $(MPI_INCLUDES)
+CPPFLAGS+=$(BOOST_CPPFLAGS)
+#LDFLAGS+=$(MED2_LIBS) $(HDF5_LIBS) 
+# change motivated by the bug KERNEL4778.
+LDFLAGS+=$(MED2_LIBS) $(HDF5_LIBS) -lmed_V2_1 $(STDLIB) -lmedmem $(MPI_LIBS)
+
+#LDFLAGSFORBIN+=$(MED2_LIBS) $(HDF5_LIBS)
+# change motivated by the bug KERNEL4778.
+LDFLAGSFORBIN+=-lm $(MED2_LIBS) $(HDF5_LIBS) -lmed_V2_1 -lmedmem $(MPI_LIBS)  $(BOOST_LIBS)
+
+ifeq ($(MED_WITH_KERNEL),yes)
+  CPPFLAGS+= ${KERNEL_CXXFLAGS}
+  CXXFLAGS+= ${KERNEL_CXXFLAGS}
+  LDFLAGS+= ${KERNEL_LDFLAGS} -lSALOMELocalTrace 
+  LDFLAGSFORBIN+= ${KERNEL_LDFLAGS} -lSALOMELocalTrace -lSALOMEBasics
+endif
+
+LIBSFORBIN=$(BOOSTLIBS) $(MPI_LIBS) 
+
+LIBS=
+
+# build create_mesh :
+bin: 
+
+@CONCLUDE@
diff --git a/src/ParaMEDMEM/ParaFIELD.cxx b/src/ParaMEDMEM/ParaFIELD.cxx
new file mode 100644 (file)
index 0000000..f003519
--- /dev/null
@@ -0,0 +1,109 @@
+#include "MEDMEM_Exception.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "ComponentTopology.hxx"
+#include "ParaSUPPORT.hxx"
+#include "StructuredParaSUPPORT.hxx"
+#include "ExplicitCoincidentDEC.hxx";
+#include "StructuredCoincidentDEC.hxx"
+#include "ParaFIELD.hxx"
+#include "ParaMESH.hxx"
+
+using namespace MEDMEM;
+
+namespace ParaMEDMEM
+{
+
+ParaFIELD::ParaFIELD(ParaSUPPORT* para_support, const ComponentTopology& component_topology)
+:_support(para_support),
+_component_topology(component_topology) 
+{
+       if (dynamic_cast<StructuredParaSUPPORT*>(para_support)!=0)
+       {const BlockTopology* source_topo = dynamic_cast<const BlockTopology*>(para_support->getTopology());
+               _topology=new BlockTopology(*source_topo,component_topology);
+       }
+       else
+               throw MEDEXCEPTION(LOCALIZED(
+               "ParaFIELD constructor : Unstructured Support not taken into account with component topology yet"));
+               
+       
+//     int nb_components=0;
+//     if (component_topology.getProcGroup()!=0)
+       int nb_components = component_topology.nbLocalComponents();
+       if (nb_components!=0)
+               {
+                       _field=new FIELD<double> (para_support->getSupport(), nb_components);
+               }
+       else return;
+       
+       _field->setName("toto");
+       _field->setDescription("titi");
+       _field->setNumberOfValues(para_support->getSupport()->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS));
+       string* compnames=new string[nb_components];
+       string* compdesc=new string[nb_components];
+       for (int i=0; i<nb_components; i++)
+       {
+               ostringstream stream(compnames[i]);
+               ostringstream stream2(compdesc[i]);
+               stream<<"component "<<i;
+               stream2<<"component description "<<i;
+       }
+       _field->setComponentsNames(compnames);
+       _field->setComponentsDescriptions(compdesc);
+       _field->setIterationNumber(0);
+       _field->setOrderNumber(0);
+       _field->setTime(0.0);
+} 
+
+ParaFIELD::ParaFIELD(MEDMEM::driverTypes driver_type, const string& file_name, 
+       const string& driver_name, const ComponentTopology& component_topology) 
+       throw (MEDEXCEPTION):_component_topology(component_topology){}
+ParaFIELD::~ParaFIELD(){}
+
+void ParaFIELD::write(MEDMEM::driverTypes driverType, const string& fileName, const string& meshName){
+       BlockTopology* topo = dynamic_cast<BlockTopology*> (_topology);
+       int myrank = topo->getProcGroup()->myRank();
+       ostringstream name;
+       name <<fileName<<myrank+1<<".med";
+       cout << name <<endl;
+       int driver = _field->addDriver(driverType, name.str().c_str(), meshName);
+       _field->write(driver);
+}
+
+void ParaFIELD::synchronizeTarget(ParaFIELD* source_field){
+       DEC* data_channel;
+       if (dynamic_cast<BlockTopology*>(_topology)!=0)
+       {
+               data_channel=new StructuredCoincidentDEC();
+       }
+       else
+       {
+               data_channel=new ExplicitCoincidentDEC();
+       }
+       data_channel->attachTargetField(this);
+       data_channel->synchronize();
+       data_channel->prepareTargetDE();
+       data_channel->sendData();
+       
+       delete data_channel;
+}
+
+void ParaFIELD::synchronizeSource(ParaFIELD* target_field){
+       DEC* data_channel;
+       if (dynamic_cast<BlockTopology*>(_topology)!=0)
+       {
+               data_channel=new StructuredCoincidentDEC();
+       }
+       else
+       {
+               data_channel=new ExplicitCoincidentDEC();
+       }
+       data_channel->attachSourceField(this);
+       data_channel->synchronize();
+       data_channel->prepareSourceDE();
+       data_channel->sendData();
+       
+       delete data_channel;
+}
+
+}
diff --git a/src/ParaMEDMEM/ParaFIELD.hxx b/src/ParaMEDMEM/ParaFIELD.hxx
new file mode 100644 (file)
index 0000000..dcb6c28
--- /dev/null
@@ -0,0 +1,42 @@
+#ifndef PARAFIELD_HXX_
+#define PARAFIELD_HXX_
+
+#include "MEDMEM_define.hxx"
+#include "MEDMEM_GenDriver.hxx"
+#include "MEDMEM_Field.hxx"
+
+namespace MEDMEM{
+       class MEDEXCEPTION;
+}
+
+
+namespace ParaMEDMEM
+{
+class ComponentTopology;
+class ParaSUPPORT;
+
+class ParaFIELD
+{
+public:
+       ParaFIELD(ParaSUPPORT* support, const ComponentTopology& component_topology); 
+       ParaFIELD(ParaSUPPORT* support);
+       ParaFIELD(MEDMEM::driverTypes driver_type, const string& file_name, 
+               const string& driver_name, const ComponentTopology& component_topology) 
+               throw (MEDMEM::MEDEXCEPTION);
+       virtual ~ParaFIELD();
+       void write(MEDMEM::driverTypes driverType, const string& fileName="", const string& meshName="");
+       void synchronizeTarget(ParaFIELD* source_field);
+       void synchronizeSource(ParaFIELD* target_field);
+       MEDMEM::FIELD<double>* getField() const {return _field;}
+       Topology* getTopology() const {return _topology;}
+       int nbComponents() const {return _component_topology.nbComponents();}
+private:
+       const ComponentTopology& _component_topology;
+       Topology* _topology; 
+       MEDMEM::FIELD<double>* _field;
+       ParaSUPPORT* _support;
+};
+
+}
+
+#endif /*PARAFIELD_HXX_*/
diff --git a/src/ParaMEDMEM/ParaGRID.cxx b/src/ParaMEDMEM/ParaGRID.cxx
new file mode 100644 (file)
index 0000000..b60c63f
--- /dev/null
@@ -0,0 +1,109 @@
+#include <fstream>
+#include <vector>
+
+#include "ParaGRID.hxx"
+
+using namespace std;
+
+namespace ParaMEDMEM
+{
+       
+ParaGRID::ParaGRID(MEDMEM::GRID* global_grid, Topology* topology)throw (MEDMEM::MEDEXCEPTION):
+_name(global_grid->getName())
+{
+       
+       _block_topology = dynamic_cast<BlockTopology*>(topology);
+       if (_block_topology==0) throw MEDEXCEPTION(LOCALIZED(
+       "ParaGRID::ParaGRID topology must be block topology"));
+       
+       if (!_block_topology->getProcGroup()->containsMyRank()) return;
+       
+       int dimension=_block_topology->getDimension() ;
+       if (dimension != global_grid->getMeshDimension())
+               throw MEDEXCEPTION(LOCALIZED("ParaGrid::ParaGrid incompatible topology"));
+       
+       vector<vector<double> > xyz_array(dimension);
+       vector<pair<int,int> > local_indices = _block_topology->getLocalArrayMinMax();
+       int myrank=_block_topology->getProcGroup()->myRank();
+       vector <string> coordinates_names;
+       vector <string> coordinates_units;
+       for (int idim=0; idim<dimension ; idim++)
+       {
+               cout << " Indices "<< local_indices[idim].first <<" "<<local_indices[idim].second<<endl;
+               for (int i=(local_indices)[idim].first; i<(local_indices)[idim].second; i++)
+                       xyz_array[idim].push_back(global_grid->getArrayValue(idim+1,i));
+               coordinates_names.push_back(global_grid->getCoordinatesNames()[idim]);
+               coordinates_units.push_back(global_grid->getCoordinatesUnits()[idim]);
+       }
+       _grid=new MEDMEM::GRID(xyz_array,
+                                                  coordinates_names,
+                                              coordinates_units);
+       _grid->setName(global_grid->getName());
+       _grid->setDescription(global_grid->getDescription());
+       
+}
+
+ParaGRID::ParaGRID(MEDMEM::driverTypes driver_type, const string& file_name, 
+       const string& driver_name, int domain_id)
+throw (MEDMEM::MEDEXCEPTION){};
+ParaGRID::~ParaGRID(){if (_grid !=0) delete _grid;};
+
+
+/*! method for writing a distributed grid
+ * 
+ * \param driverType type of driver used (MED_DRIVER,VTK_DRIVER)
+ * \param master_filename name of the master file
+ */
+void ParaGRID::write(MEDMEM::driverTypes driverType, const string& master_filename)
+throw (MEDMEM::MEDEXCEPTION){
+       
+       BEGIN_OF("ParaMEDMEM::ParaGRID::write()");
+        
+       if (!_block_topology->getProcGroup()->containsMyRank()) return;
+        
+       int myrank=_block_topology->getProcGroup()->myRank();
+       
+       ofstream file(master_filename.c_str());
+       int nbdomains= _block_topology->getProcGroup()->size(); 
+       vector<string> filename(nbdomains);
+               
+               
+       //loop on the domains
+       for (int i=0; i<nbdomains;i++)
+       {
+               char distfilename[256];
+       
+               ostringstream suffix;
+               
+               suffix << master_filename<< i+1 <<".med";
+               
+               strcpy(distfilename,suffix.str().c_str());
+               filename[i]=string(distfilename);
+               MESSAGE("File name "<<string(distfilename));
+       }       
+       
+       //creation of the master file by proc 0 on ProgGroup    
+       if (myrank==0)
+       {
+               file <<"#MED Fichier V 2.3"<<" "<<endl;
+               file <<"#"<<" "<<endl;
+               file<<nbdomains<<" "<<endl;
+               for (int i=0; i<nbdomains;i++)
+               {
+                       //updating the ascii description file
+                       file << _name <<" "<< i+1 << " "<< _name << " localhost " << filename[i] << " "<<endl;
+               }
+       }
+               
+       int id=_grid->addDriver(MEDMEM::MED_DRIVER,filename[myrank],_name);
+               
+       MESSAGE("Start writing");
+       _grid->write(id);
+                       
+       END_OF("ParaMEDMEM::ParaGRID::write()");
+};
+
+       
+       
+}
diff --git a/src/ParaMEDMEM/ParaGRID.hxx b/src/ParaMEDMEM/ParaGRID.hxx
new file mode 100644 (file)
index 0000000..6d977d5
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef PARAGRID_HXX_
+#define PARAGRID_HXX_
+
+#include <vector>
+
+#include "MEDMEM_Exception.hxx"
+#include "MEDMEM_define.hxx"
+#include "MEDMEM_GenDriver.hxx"
+#include "MEDMEM_Grid.hxx"
+#include "MEDMEM_ConnectZone.hxx"
+#include "ProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+
+using namespace MEDMEM;
+namespace ParaMEDMEM
+{
+
+
+const int MYRANK_ID=-100;
+class ParaGRID
+{
+public:
+       ParaGRID(MEDMEM::GRID* global_grid, Topology* topology)throw (MEDMEM::MEDEXCEPTION);
+       ParaGRID(MEDMEM::driverTypes driver_type, const string& file_name, 
+               const string& driver_name, int domain_id=MYRANK_ID)
+       throw (MEDMEM::MEDEXCEPTION);
+       
+       void write(MEDMEM::driverTypes driverType, const string& fileName="")
+       throw (MEDMEM::MEDEXCEPTION);
+       
+       ParaMEDMEM::BlockTopology * getBlockTopology() const {return _block_topology;}
+       virtual ~ParaGRID();
+       MEDMEM::GRID* getGrid() const {return _grid;} 
+       
+private:
+       MEDMEM::GRID* _grid;
+       //grid name 
+       const string _name;
+       // structured grid topology
+       ParaMEDMEM::BlockTopology* _block_topology;
+       // stores the x,y,z axes on the global grid
+       std::vector<std::vector<double> > _global_axis;
+       //id of the local grid
+       int _my_domain_id;
+
+};
+
+}
+
+#endif /*PARAGRID_H_*/
diff --git a/src/ParaMEDMEM/ParaMESH.cxx b/src/ParaMEDMEM/ParaMESH.cxx
new file mode 100644 (file)
index 0000000..c0914d0
--- /dev/null
@@ -0,0 +1,273 @@
+#include <fstream>
+#include <vector>
+
+#include "ParaMESH.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "MEDMEM_ConnectZone.hxx"
+
+//inclusion for the namespaces
+#include "MEDMEM_Field.hxx"
+using namespace std;
+
+namespace ParaMEDMEM
+{
+       
+
+ParaMESH::ParaMESH(MEDMEM::driverTypes driver_type, const string& filename, 
+       const ProcessorGroup& group)
+throw (MEDMEM::MEDEXCEPTION){
+
+  BEGIN_OF("MEDSPLITTER::MESHCollectionDriver::read()")
+  
+  string meshstring;
+  char file[256];
+  char meshname[MED_TAILLE_NOM];
+  int domain_id=group.myRank();
+  
+  // reading ascii master file
+  try{
+       MESSAGE("Start reading");
+    ifstream asciiinput(filename.c_str());
+    char charbuffer[512];
+    asciiinput.getline(charbuffer,512);
+    
+    while (charbuffer[0]=='#')
+      {
+       asciiinput.getline(charbuffer,512);
+     }
+
+    //reading number of domains
+    int nbdomain=atoi(charbuffer);
+    cout << "nb domain"<<nbdomain<<endl;
+    //    asciiinput>>nbdomain;
+   
+    string mesh;
+       int idomain;
+       string host;
+       
+    for (int i=0; i<=domain_id;i++)
+      {
+       //reading information about the domain
+               
+      
+               asciiinput >> mesh >> idomain >> meshstring >> host >> _medfilename;
+               
+               if (idomain!=i+1)
+                 {
+                   cerr<<"Error : domain must be written from 1 to N in asciifile descriptor"<<endl;
+                       throw (MEDEXCEPTION("Error : domain must be written from 1 to N in asciifile descriptor"));
+                 }
+               strcpy(meshname,meshstring.c_str());
+               strcpy(file,_medfilename.c_str());
+      }
+       ///////////////////////////////////////////
+       // treatment of the domain that corresponds
+       // to the local id
+       ///////////////////////////////////////////
+       _mesh=new MEDMEM::MESH(driver_type,file, meshname);
+               
+       //reading MEDSPLITTER::CONNECTZONEs NODE/NODE and CELL/CELL
+       med_2_2::med_idt fid = med_2_2::MEDouvrir(file,med_2_2::MED_LECTURE);
+       med_2_2::med_int njoint = med_2_2::MEDnJoint(fid, meshname);
+       for (int ijoint=1; ijoint<=njoint; ijoint++)
+       {
+               int distant;
+               char joint_description[MED_TAILLE_DESC];
+           char name[MED_TAILLE_NOM];
+           char name_distant[MED_TAILLE_NOM];
+           cout << "arguments"<< fid<<" "<<file<<" "<<ijoint<<" "<<name<<" "<<joint_description<<" "<<distant<<" "<<name_distant<<endl;
+           int ncorr = med_2_2::MEDjointInfo(fid,meshname, ijoint, name, 
+               joint_description,
+                      &distant, name_distant);
+               cout << "Found " << ncorr <<"correspondances in joint "<<ijoint<<endl;
+
+       for (int ic=1; ic<=ncorr; ic++)
+           {
+             med_2_2::med_entite_maillage cor_typent_local;
+             med_2_2::med_geometrie_element cor_typgeo_local;
+             med_2_2::med_entite_maillage cor_typent_dist;
+             med_2_2::med_geometrie_element cor_typgeo_dist;
+         
+          
+             int ncouples;
+             ncouples = med_2_2::MEDjointTypeCorres(fid, meshname, name, ic,
+                                           &cor_typent_local,  &cor_typgeo_local,
+                                           &cor_typent_dist, &cor_typgeo_dist
+                                           );
+             int* node_corresp=new int[ncouples];
+             if (cor_typent_local == med_2_2::MED_NOEUD && cor_typent_dist == med_2_2::MED_NOEUD)
+                       {
+         
+                         med_2_2::MEDjointLire(fid, meshname, name,
+                      node_corresp,ncouples,
+                      cor_typent_local,  cor_typgeo_local,
+                      cor_typent_dist, cor_typgeo_dist
+                      );
+                       }
+                       //constructing the connect zone and adding it to the connect zone list
+                       MEDMEM::CONNECTZONE* cz = new MEDMEM::CONNECTZONE();
+                       cz->setName(string(name));
+                       cz->setDescription(joint_description);
+                       cz->setLocalDomainNumber(domain_id);
+                       cz->setDistantDomainNumber(distant);
+                       //cz->setLocalMesh((m_collection->getMesh())[i]);
+                       //cz->setDistantMesh((m_collection->getMesh())[distant]);
+                       cz->setNodeCorresp(node_corresp,ncouples);
+                       _connect_zone.push_back(cz);
+                       
+           }//loop on correspondances
+       }//loop on joints       
+       
+       //
+       // Reading global numbering
+       // 
+       int ncell=_mesh->getNumberOfElements(MED_EN::MED_CELL,MED_EN::MED_ALL_ELEMENTS);
+       int * array=new int[ncell];
+       int offset=0;
+       MESSAGE("Reading cell global numbering for mesh "<< domain_id);
+       MED_EN::MESH_ENTITIES::const_iterator currentEntity;
+       list<MED_EN::medGeometryElement>::const_iterator iter;
+       currentEntity  = MED_EN::meshEntities.find(MED_EN::MED_CELL);
+       char meshchar[MED_TAILLE_NOM];
+       strcpy(meshchar,_mesh->getName().c_str());
+       for (iter = (*currentEntity).second.begin();iter != (*currentEntity).second.end(); iter++)
+       {
+               MED_EN::medGeometryElement type=*iter;
+               if (type/100 != _mesh->getMeshDimension()) continue;
+               int ntype = _mesh->getNumberOfElements(MED_EN::MED_CELL,type);
+               if (ntype==0) continue;
+               med_2_2::MEDglobalNumLire(fid,meshname, array+offset, ntype,
+               med_2_2::MED_MAILLE, (med_2_2::med_geometrie_element)type);
+        offset+=ntype;
+       }
+       _cellglobal=array;
+       
+       MESSAGE("Reading node global numbering");
+       int nnode= _mesh->getNumberOfNodes();
+       array=new int[nnode];
+       med_2_2::MEDglobalNumLire(fid,meshname, array, nnode,
+                       med_2_2::MED_NOEUD, med_2_2::MED_POINT1); 
+       _nodeglobal=array;
+       
+       MESSAGE("Reading face global numbering for mesh "<<domain_id);
+       int nbface=_mesh->getNumberOfElements(MED_EN::MED_FACE,MED_EN::MED_ALL_ELEMENTS);
+       array=new int[nbface];
+       currentEntity  = MED_EN::meshEntities.find(MED_EN::MED_FACE);
+       offset=0;
+       for (iter = (*currentEntity).second.begin();iter != (*currentEntity).second.end(); iter++)
+       {
+               MED_EN::medGeometryElement type=*iter;
+               if (type/100 != _mesh->getMeshDimension()-1) continue;
+               int ntype = _mesh->getNumberOfElements(MED_EN::MED_FACE,type);
+               if (ntype==0) continue;
+               med_2_2::MEDglobalNumLire(fid,meshname, array+offset, ntype,
+               med_2_2::MED_FACE, (med_2_2::med_geometrie_element)type);
+        offset+=ntype;
+       }
+       _faceglobal=array;
+//             faceglobal[i]=0;
+    med_2_2::MEDfermer(fid);
+       
+    _block_topology=new BlockTopology(group,ncell); 
+  
+     MESSAGE("end of read");
+      
+  }//of try
+  catch(...)
+    {
+      cerr << "I/O error reading parallel MED file"<<endl;
+      throw;
+    }
+   
+  
+  //creation of topology from mesh and connect zones
+  //m_collection->setTopology(
+  //   new ParallelTopology((m_collection->getMesh()),(m_collection->getCZ()),cellglobal,nodeglobal,faceglobal)
+   // );
+    
+    END_OF("MEDSPLITTER::MESHCollectionDriver::read()")
+};
+
+
+ParaMESH::~ParaMESH(){if (_mesh !=0) delete _mesh;};
+
+
+/*! method for writing a distributed MESH
+ * 
+ * \param driverType type of driver used (MED_DRIVER,VTK_DRIVER)
+ * \param master_filename name of the master file
+ */
+void ParaMESH::write(MEDMEM::driverTypes driverType, const string& master_filename)
+throw (MEDMEM::MEDEXCEPTION){
+       
+       BEGIN_OF("ParaMEDMEM::ParaMESH::write()");
+        
+       if (!_block_topology->getProcGroup()->containsMyRank()) return;
+        
+       int myrank=_block_topology->getProcGroup()->myRank();
+       cout << "Myrank in write " << myrank<<endl;
+       
+       int nbdomains= _block_topology->getProcGroup()->size(); 
+       vector<string> filename(nbdomains);
+               
+               
+       //loop on the domains
+       for (int i=0; i<nbdomains;i++)
+       {
+               char distfilename[256];
+       
+               ostringstream suffix;
+               
+               suffix << master_filename<< i+1 <<".med";
+               
+               strcpy(distfilename,suffix.str().c_str());
+               filename[i]=string(distfilename);
+               MESSAGE("File name "<<string(distfilename));
+       }       
+       
+       //creation of the master file by proc 0 on ProgGroup    
+       if (myrank==0)
+       {
+               MESSAGE("Master File Name "<<master_filename);
+               ofstream file(master_filename.c_str());
+               if (!file) throw (MEDEXCEPTION("Unable to create master file"));
+               file <<"#MED Fichier V 2.3"<<" "<<endl;
+               file <<"#"<<" "<<endl;
+               file<<nbdomains<<" "<<endl;
+               for (int i=0; i<nbdomains;i++)
+               {
+                       //updating the ascii description file
+                       file << _name <<" "<< i+1 << " "<< _name << " localhost " << filename[i] << " "<<endl;
+               }
+               
+       }
+               
+       int id=_mesh->addDriver(MEDMEM::MED_DRIVER,filename[myrank],_name);
+               
+       MESSAGE("Start writing");
+       _mesh->write(id);
+       _mesh->rmDriver(id);            
+       END_OF("ParaMEDMEM::ParaMESH::write()");
+};
+
+const int* ParaMESH::getGlobalNumbering(const MED_EN::medEntityMesh entity)const
+{
+       switch (entity)
+       {
+               case MED_CELL:
+                       return _cellglobal;
+               case MED_FACE :
+                       return _faceglobal;
+               case MED_EDGE :
+                       return _edgeglobal;
+               case MED_NODE:
+                       return _nodeglobal;
+       }
+}      
+       
+}
diff --git a/src/ParaMEDMEM/ParaMESH.hxx b/src/ParaMEDMEM/ParaMESH.hxx
new file mode 100644 (file)
index 0000000..a99612a
--- /dev/null
@@ -0,0 +1,52 @@
+#ifndef PARAMESH_HXX_
+#define PARAMESH_HXX_
+#include <string>
+#include <vector>
+
+#include "MEDMEM_Exception.hxx"
+#include "MEDMEM_define.hxx"
+#include "MEDMEM_GenDriver.hxx"
+#include "MEDMEM_Mesh.hxx"
+#include "MEDMEM_ConnectZone.hxx"
+#include "ProcessorGroup.hxx"
+
+namespace ParaMEDMEM
+{
+class BlockTopology;
+
+class ParaMESH
+{
+public:
+       ParaMESH(MEDMEM::driverTypes driver_type, const std::string& file_name, 
+               const ProcessorGroup& group)
+       throw (MEDMEM::MEDEXCEPTION);
+       void write(MEDMEM::driverTypes driverType, const std::string& fileName="")
+       throw (MEDMEM::MEDEXCEPTION);
+       virtual ~ParaMESH();
+       MEDMEM::MESH* getMesh() const {return _mesh;}
+       ParaMEDMEM::BlockTopology* getBlockTopology()const {return _block_topology;}
+       const string& getFilename() const {return _medfilename;}
+       const int* getGlobalNumbering(MED_EN::medEntityMesh)const; 
+private:
+       //mesh object underlying the ParaMESH object
+       MEDMEM::MESH* _mesh;
+       //name of the mesh
+       const string _name;
+       //connect zone
+       std::vector<MEDMEM::CONNECTZONE*> _connect_zone;
+       //id of the local grid
+       int _my_domain_id;
+       //global topology of the cells
+       ParaMEDMEM::BlockTopology* _block_topology;
+       //name of the local filename (!= masterfilename)
+       string _medfilename;
+       // pointers to global numberings
+       int* _nodeglobal;
+       int* _edgeglobal;
+       int* _faceglobal;
+       int* _cellglobal;
+};
+
+}
+
+#endif /*PARAMESH_H_*/
diff --git a/src/ParaMEDMEM/ParaMESHCollection.cxx b/src/ParaMEDMEM/ParaMESHCollection.cxx
new file mode 100644 (file)
index 0000000..4d8e385
--- /dev/null
@@ -0,0 +1,9 @@
+#include "ParaMESHCollection.hxx"
+
+ParaMESHCollection::ParaMESHCollection()
+{
+}
+
+ParaMESHCollection::~ParaMESHCollection()
+{
+}
diff --git a/src/ParaMEDMEM/ParaMESHCollection.hxx b/src/ParaMEDMEM/ParaMESHCollection.hxx
new file mode 100644 (file)
index 0000000..1044b32
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef PARAMESHCOLLECTION_HXX_
+#define PARAMESHCOLLECTION_HXX_
+
+#include <vector>
+
+#include "MEDMEM_Exception.hxx"
+#include "MEDMEM_define.hxx"
+#include "MEDMEM_GenDriver.hxx"
+#include "MEDMEM_Mesh.hxx"
+#include "MEDMEM_ConnectZone.hxx"
+#include "ParaMEDMEM_ProcessorGroup.hxx"
+
+class ParaMESHCollection
+{
+public:
+       ParaMESHCollection();
+       ParaMESHCollection(MEDMEM::driverTypes driver_type, const string& file_name, 
+               const string& driver_name, const ProcessorGroup& proc_group,
+               const int* distribution=NULL)
+       throw (MEDMEM::MEDEXCEPTION);
+       virtual ~ParaMESHCollection();
+       
+private :
+       vector<ParaMESH*> _meshes;
+};
+
+#endif /*PARAMESHCOLLECTION_HXX_*/
diff --git a/src/ParaMEDMEM/ParaSUPPORT.cxx b/src/ParaMEDMEM/ParaSUPPORT.cxx
new file mode 100644 (file)
index 0000000..b03fd5d
--- /dev/null
@@ -0,0 +1,19 @@
+#include "ParaSUPPORT.hxx"
+#include "ParaMESH.hxx"
+#include "MEDMEM_Support.hxx"
+
+namespace ParaMEDMEM
+{
+
+  ParaSUPPORT::ParaSUPPORT()
+  {
+  }
+  
+  ParaSUPPORT::ParaSUPPORT(const MEDMEM::SUPPORT& support):_support(&support) {}
+  
+  ParaSUPPORT::~ParaSUPPORT()
+  {
+  }
+  
+}
+
diff --git a/src/ParaMEDMEM/ParaSUPPORT.hxx b/src/ParaMEDMEM/ParaSUPPORT.hxx
new file mode 100644 (file)
index 0000000..7ff301a
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef PARASUPPORT_HXX_
+#define PARASUPPORT_HXX_
+
+namespace MEDMEM
+{
+  class SUPPORT;
+}
+namespace ParaMEDMEM
+{
+  class Topology;
+  class ParaMESH;
+  class ParaSUPPORT
+  {
+  public:
+    ParaSUPPORT();
+    ParaSUPPORT(const MEDMEM::SUPPORT&);
+    virtual ~ParaSUPPORT();
+    virtual const Topology* getTopology() const {};
+    virtual const MEDMEM::SUPPORT* getSupport() const {return _support;}
+    virtual const ParaMESH* getMesh() const {return _mesh;}
+  private :
+    const MEDMEM::SUPPORT* _support;
+    const ParaMESH* _mesh;
+  };
+  
+}
+
+
+#endif /*PARASUPPORT_HXX_*/
diff --git a/src/ParaMEDMEM/ProcessorGroup.cxx b/src/ParaMEDMEM/ProcessorGroup.cxx
new file mode 100644 (file)
index 0000000..942d801
--- /dev/null
@@ -0,0 +1,14 @@
+#include "ProcessorGroup.hxx"
+
+namespace ParaMEDMEM
+{
+
+ProcessorGroup::ProcessorGroup()
+{
+}
+
+ProcessorGroup::~ProcessorGroup()
+{
+}
+
+}
diff --git a/src/ParaMEDMEM/ProcessorGroup.hxx b/src/ParaMEDMEM/ProcessorGroup.hxx
new file mode 100644 (file)
index 0000000..d58eae0
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef PROCESSORGROUP_HXX_
+#define PROCESSORGROUP_HXX_
+#include <set>
+
+
+
+namespace ParaMEDMEM
+{
+class CommInterface;
+
+class ProcessorGroup
+{
+public:
+
+       ProcessorGroup(const CommInterface& interface):_comm_interface(interface){}
+       ProcessorGroup(const CommInterface& interface, std::set<int> proc_ids):
+               _comm_interface(interface),_proc_ids(proc_ids){}
+       ProcessorGroup (const ProcessorGroup& proc_group, std::set<int> proc_ids):
+               _comm_interface(proc_group.getCommInterface()){}
+       virtual ~ProcessorGroup(){}
+       virtual void fuse (ProcessorGroup&)=0;
+       virtual void intersect (ProcessorGroup&)=0;
+       bool contains(int rank) const {return _proc_ids.find(rank)!=_proc_ids.end();};
+       virtual bool containsMyRank() const=0;
+       int size() const  {return _proc_ids.size();}
+       const CommInterface& getCommInterface()const {return _comm_interface;};
+       virtual int myRank() const =0;
+       virtual int translateRank(const ProcessorGroup*, int) const =0;
+       
+protected:
+       const CommInterface& _comm_interface;
+       std::set<int> _proc_ids;
+};
+
+}
+
+#endif /*PROCESSORGROUP_HXX_*/
diff --git a/src/ParaMEDMEM/StructuredCoincidentDEC.cxx b/src/ParaMEDMEM/StructuredCoincidentDEC.cxx
new file mode 100644 (file)
index 0000000..881fca2
--- /dev/null
@@ -0,0 +1,294 @@
+#include <mpi.h>
+#include "CommInterface.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "ComponentTopology.hxx"
+#include "ParaFIELD.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "StructuredCoincidentDEC.hxx"
+
+namespace ParaMEDMEM
+{
+
+StructuredCoincidentDEC::StructuredCoincidentDEC():_toposource(0),_topotarget(0)
+{      
+}
+
+
+StructuredCoincidentDEC::~StructuredCoincidentDEC()
+{
+}
+
+
+/*! Synchronization process for exchanging topologies
+ */
+void StructuredCoincidentDEC::synchronize()
+{
+       if (_source_field!=0)
+               _toposource = dynamic_cast<BlockTopology*>(_source_field->getTopology());
+       if (_target_field!=0)
+               _topotarget = dynamic_cast<BlockTopology*>(_target_field->getTopology());
+       
+       // Transmitting source topology to target code 
+       broadcastTopology(_toposource,1000);
+       // Transmitting target topology to source code
+       broadcastTopology(_topotarget,2000);
+       //checkCompatibility(_toposource,_topotarget);
+}
+
+/*! Creates the arrays necessary for the data transfer
+ * and fills the send array with the values of the 
+ * source field
+ *  */
+void StructuredCoincidentDEC::prepareSourceDE()
+{
+       ////////////////////////////////////
+       //Step 1 : buffer array creation 
+       
+       if (!_toposource->getProcGroup()->containsMyRank())
+               return;
+       MPIProcessorGroup* group=new MPIProcessorGroup(_toposource->getProcGroup()->getCommInterface());
+       
+       int myranksource = _toposource->getProcGroup()->myRank();
+       
+       vector <int>* target_arrays=new vector<int>[_topotarget->getProcGroup()->size()];
+       
+       //cout<<" topotarget size"<<    _topotarget->getProcGroup()->size()<<endl;
+       
+       int nb_local = _toposource-> getNbLocalElements();
+       for (int ielem=0; ielem< nb_local ; ielem++)
+       {
+       //      cout <<"source local :"<<myranksource<<","<<ielem<<endl; 
+               int global = _toposource->localToGlobal(make_pair(myranksource, ielem));
+       //      cout << "global "<<global<<endl;
+               pair<int,int> target_local =_topotarget->globalToLocal(global);
+       //      cout << "target local : "<<target_local.first<<","<<target_local.second<<endl; 
+               target_arrays[target_local.first].push_back(target_local.second); 
+       }       
+       
+       int union_size=group->size();
+       
+       _sendcounts=new int[union_size];
+       _senddispls=new int[union_size];
+       _recvcounts=new int[union_size];
+       _recvdispls=new int[union_size];
+       
+       for (int i=0; i< union_size; i++)
+       {
+               _sendcounts[i]=0;
+               _recvcounts[i]=0;
+               _recvdispls[i]=0;
+       }
+       _senddispls[0]=0;
+       
+       for (int iproc=0; iproc < _topotarget->getProcGroup()->size(); iproc++)
+       {
+               //converts the rank in target to the rank in union communicator
+               int unionrank=group->translateRank(_topotarget->getProcGroup(),iproc);
+               _sendcounts[unionrank]=target_arrays[iproc].size();
+       }
+       
+       for (int iproc=1; iproc<group->size();iproc++)
+               _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1];
+       
+       _sendbuffer = new double [nb_local ];
+
+       /////////////////////////////////////////////////////////////
+       //Step 2 : filling the buffers with the source field values 
+
+       int* counter=new int [_topotarget->getProcGroup()->size()];
+       counter[0]=0;   
+       for (int i=1; i<_topotarget->getProcGroup()->size(); i++)
+               counter[i]=counter[i-1]+target_arrays[i-1].size();
+               
+                       
+       const double* value = _source_field->getField()->getValue();
+       //cout << "Nb local " << nb_local<<endl;
+       for (int ielem=0; ielem<nb_local ; ielem++)
+       {
+               int global = _toposource->localToGlobal(make_pair(myranksource, ielem));
+               pair<int,int> target_local =_topotarget->globalToLocal(global);
+               //cout <<"global : "<< global<<" local :"<<target_local.first<<" "<<target_local.second;
+               //cout <<"counter[]"<<counter[target_local.first]<<endl;
+               _sendbuffer[counter[target_local.first]++]=value[ielem];
+               
+       }
+       delete[] target_arrays;
+       delete[] counter;
+}
+
+/*!
+ *  Creates the buffers for receiving the fields on the target side
+ */
+void StructuredCoincidentDEC::prepareTargetDE()
+{
+       if (!_topotarget->getProcGroup()->containsMyRank())
+               return;
+       MPIProcessorGroup* group=new MPIProcessorGroup(_toposource->getProcGroup()->getCommInterface());
+       
+       int myranktarget = _topotarget->getProcGroup()->myRank();
+       
+       vector < vector <int> > source_arrays(_toposource->getProcGroup()->size());
+       int nb_local = _topotarget-> getNbLocalElements();
+       for (int ielem=0; ielem< nb_local ; ielem++)
+       {
+       //      cout <<"TS target local :"<<myranktarget<<","<<ielem<<endl; 
+               int global = _topotarget->localToGlobal(make_pair(myranktarget, ielem));
+               //cout << "TS global "<<global<<endl;
+               pair<int,int> source_local =_toposource->globalToLocal(global);
+       //      cout << "TS source local : "<<source_local.first<<","<<source_local.second<<endl; 
+               source_arrays[source_local.first].push_back(source_local.second); 
+       }       
+       int union_size=group->size();
+       _recvcounts=new int[union_size];
+    _recvdispls=new int[union_size];
+    _sendcounts=new int[union_size];
+    _senddispls=new int[union_size];
+    
+       for (int i=0; i< union_size; i++)
+               {
+                       _sendcounts[i]=0;
+                       _recvcounts[i]=0;
+                       _recvdispls[i]=0;
+               }
+       for (int iproc=0; iproc < _toposource->getProcGroup()->size(); iproc++)
+       {
+               //converts the rank in target to the rank in union communicator
+               int unionrank=group->translateRank(_toposource->getProcGroup(),iproc);
+               _recvcounts[unionrank]=source_arrays[iproc].size();
+       }
+       for (int i=1; i<union_size; i++)
+               _recvdispls[i]=_recvdispls[i-1]+_recvcounts[i-1];
+       _recvbuffer=new double[nb_local];
+               
+}
+
+/*!
+ * Synchronizing a topology so that all the 
+ * group possesses it.
+ * 
+ * \param topo Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
+ * \param tag Communication tag associated with this operation.
+ */
+void StructuredCoincidentDEC::broadcastTopology(BlockTopology*& topo, int tag)
+{
+       MPI_Status status;
+       
+       int* serializer=0;
+       int size;
+       
+       MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
+       
+       // The master proc creates a send buffer containing
+       // a serialized topology
+       int rank_master;
+       
+       if (topo!=0 && topo->getProcGroup()->myRank()==0)
+       {
+               MESSAGE ("Master rank");
+               topo->serialize(serializer, size);
+               rank_master = group->translateRank(topo->getProcGroup(),0);
+               MESSAGE("Master rank world number is "<<rank_master);
+               MESSAGE("World Size is "<<group->size());
+               for (int i=0; i< group->size(); i++)
+               {
+                       if (i!= rank_master)
+                               _comm_interface->send(&rank_master,1,MPI_INTEGER, i,tag+i,*(group->getComm()));
+               }
+       }
+       else
+       {
+               MESSAGE(" rank "<<group->myRank()<< " waiting ...");
+               _comm_interface->recv(&rank_master, 1,MPI_INTEGER, MPI_ANY_SOURCE, tag+group->myRank(), *(group->getComm()),&status);
+               MESSAGE(" rank "<<group->myRank()<< "received master rank"<<rank_master);
+       }
+       // The topology is broadcasted to all processsors in the group
+       _comm_interface->broadcast(&size, 1,MPI_INTEGER,rank_master,*(group->getComm()));
+       
+       int* buffer=new int[size];
+       if (topo!=0 && topo->getProcGroup()->myRank()==0)
+               copy(serializer, serializer+size, buffer); 
+       _comm_interface->broadcast(buffer,size,MPI_INTEGER,rank_master,*(group->getComm()));
+       
+       // Processors which did not possess the source topology 
+       // unserialize it
+       
+       BlockTopology* topotemp=new BlockTopology();
+       topotemp->unserialize(buffer, *_comm_interface);
+       
+       if (topo==0) 
+               topo=topotemp;
+       else 
+               delete topotemp;
+       
+       // Memory cleaning
+       delete[] buffer;
+       if (serializer!=0)
+               delete[] serializer;
+       MESSAGE (" rank "<<group->myRank()<< " unserialize is over");
+}
+
+
+
+void StructuredCoincidentDEC::recvData()
+{
+       //MPI_COMM_WORLD is used instead of group because there is no
+       //mechanism for creating the union group yet
+       MESSAGE("recvData");
+       for (int i=0; i< 4; i++)
+               cout << _recvcounts[i]<<" ";
+       cout <<endl;
+       for (int i=0; i< 4; i++)
+               cout << _recvdispls[i]<<" ";
+       cout <<endl;
+       
+       cout<<"start AllToAll"<<endl;
+       _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, 
+                       _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
+       cout<<"end AllToAll"<<endl;
+
+       int nb_local = _topotarget->getNbLocalElements();
+       double* value=new double[nb_local];
+       int myranktarget=_topotarget->getProcGroup()->myRank();
+       vector<int> counters(_toposource->getProcGroup()->size());
+       counters[0]=0;
+       for (int i=0; i<_toposource->getProcGroup()->size()-1; i++)
+               {
+                       MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
+                       int worldrank=group->translateRank(_toposource->getProcGroup(),i);
+                       counters[i+1]=counters[i]+_recvcounts[worldrank];
+               }
+       
+       for (int ielem=0; ielem<nb_local ; ielem++)
+       {
+               int global = _topotarget->localToGlobal(make_pair(myranktarget, ielem));
+               pair<int,int> source_local =_toposource->globalToLocal(global);
+               value[ielem]=_recvbuffer[counters[source_local.first]++];
+       }
+       
+       
+       _target_field->getField()->setValue(value);
+}
+
+void StructuredCoincidentDEC::sendData()
+{
+       MESSAGE ("sendData");
+       for (int i=0; i< 4; i++)
+               cout << _sendcounts[i]<<" ";
+       cout <<endl;
+       for (int i=0; i< 4; i++)
+               cout << _senddispls[i]<<" ";
+       cout <<endl;
+       //MPI_COMM_WORLD is used instead of group because there is no
+       //mechanism for creating the union group yet
+       cout <<"start AllToAll"<<endl;
+       
+       _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, 
+                       _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
+       cout<<"end AllToAll"<<endl;
+}
+       
+
+}
+
diff --git a/src/ParaMEDMEM/StructuredCoincidentDEC.hxx b/src/ParaMEDMEM/StructuredCoincidentDEC.hxx
new file mode 100644 (file)
index 0000000..4231124
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef STRUCTUREDCOINCIDENTDEC_HXX_
+#define STRUCTUREDCOINCIDENTDEC_HXX_
+
+#include "DEC.hxx"
+#include "BlockTopology.hxx"
+
+
+namespace ParaMEDMEM
+{
+class DEC;
+class BlockTopology;
+class StructuredCoincidentDEC: public DEC
+{
+public:
+       StructuredCoincidentDEC();
+       virtual ~StructuredCoincidentDEC();
+       void synchronize();
+       void broadcastTopology(BlockTopology*&, int tag);
+       void prepareSourceDE();
+       void prepareTargetDE();
+       void recvData();
+       void sendData();
+private :
+       
+       BlockTopology* _toposource;
+       BlockTopology* _topotarget;
+       int* _sendcounts;
+       int* _recvcounts;
+       int* _senddispls;
+       int* _recvdispls;
+       double* _recvbuffer;
+       double* _sendbuffer;
+};
+
+}
+
+#endif /*STRUCTUREDCOINCIDENTDEC_HXX_*/
+       
diff --git a/src/ParaMEDMEM/StructuredParaSUPPORT.cxx b/src/ParaMEDMEM/StructuredParaSUPPORT.cxx
new file mode 100644 (file)
index 0000000..3af5bca
--- /dev/null
@@ -0,0 +1,36 @@
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "ParaGRID.hxx"
+#include "ParaMESH.hxx"
+#include "StructuredParaSUPPORT.hxx"
+#include "MEDMEM_Support.hxx"
+
+namespace ParaMEDMEM 
+{
+       
+/*! Constructor on all elements from a GRID */
+StructuredParaSUPPORT::StructuredParaSUPPORT(const ParaGRID* const grid, const MED_EN::medEntityMesh entity):
+_block_topology(grid->getBlockTopology()),
+_grid(grid), 
+_mesh(0),
+_entity(entity),
+_support(new SUPPORT(grid->getGrid(), "support on all entities", entity))
+{
+       
+}
+/*! Constructor on all elements from a GRID */
+StructuredParaSUPPORT::StructuredParaSUPPORT(const ParaMESH* const mesh, const MED_EN::medEntityMesh entity):
+_block_topology(mesh->getBlockTopology()),
+_grid(0),
+_mesh(mesh), 
+_entity(entity),
+_support(new SUPPORT(mesh->getMesh(), "support on all entities", entity))
+{
+       
+}
+StructuredParaSUPPORT::~StructuredParaSUPPORT()
+{
+       delete _support;
+}
+
+}//end of namespace ParaMEDMEM
diff --git a/src/ParaMEDMEM/StructuredParaSUPPORT.hxx b/src/ParaMEDMEM/StructuredParaSUPPORT.hxx
new file mode 100644 (file)
index 0000000..8072cc2
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef STRUCTUREDPARASUPPORT_HXX_
+#define STRUCTUREDPARASUPPORT_HXX_
+
+#include "ParaSUPPORT.hxx"
+#include "MEDMEM_define.hxx"
+
+using namespace MED_EN;
+namespace MEDMEM
+{
+       class SUPPORT;
+}
+
+namespace ParaMEDMEM
+{
+class BlockTopology;
+class ParaGRID;
+class ParaMESH;
+
+class StructuredParaSUPPORT:public ParaSUPPORT
+{
+public:
+       
+       StructuredParaSUPPORT(const ParaGRID* const grid, const MED_EN::medEntityMesh entity);
+       StructuredParaSUPPORT(const ParaMESH* const mesh, const MED_EN::medEntityMesh entity);
+       
+       virtual ~StructuredParaSUPPORT();
+       const Topology* getTopology() const {return _block_topology;}
+       const MEDMEM::SUPPORT* getSupport() {return _support;}
+       const ParaMESH* getParaMesh()const {return _mesh;}
+       
+private:
+       const BlockTopology* const  _block_topology;
+       const ParaGRID* const _grid;
+       const ParaMESH* const _mesh;
+       const MED_EN::medEntityMesh _entity;
+       const MEDMEM::SUPPORT* _support;
+       
+};
+
+}
+#endif /*STRUCTUREDPARASUPPORT_HXX_*/
diff --git a/src/ParaMEDMEM/Topology.cxx b/src/ParaMEDMEM/Topology.cxx
new file mode 100644 (file)
index 0000000..b29cdf1
--- /dev/null
@@ -0,0 +1,14 @@
+#include "Topology.hxx"
+
+namespace ParaMEDMEM
+{
+
+Topology::Topology()
+{
+}
+
+Topology::~Topology()
+{
+}
+
+}
diff --git a/src/ParaMEDMEM/Topology.hxx b/src/ParaMEDMEM/Topology.hxx
new file mode 100644 (file)
index 0000000..1ab9434
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef TOPOLOGY_HXX_
+#define TOPOLOGY_HXX_
+
+#include <utility>
+
+using namespace std;
+namespace ParaMEDMEM
+{
+
+class Topology
+{
+public:
+       Topology(){}
+       virtual ~Topology(){}
+//     virtual std::pair<int,int> globalToLocal (const int) const =0;
+//     virtual int localToGlobal (const std::pair<int,int>) const =0;
+       virtual int getNbElements() const=0;
+       virtual int getNbLocalElements() const =0;
+};
+
+}
+
+#endif /*TOPOLOGY_HXX_*/
diff --git a/src/ParaMEDMEM/UnstructuredParaSUPPORT.cxx b/src/ParaMEDMEM/UnstructuredParaSUPPORT.cxx
new file mode 100644 (file)
index 0000000..49c28ba
--- /dev/null
@@ -0,0 +1,24 @@
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "ParaGRID.hxx"
+#include "UnstructuredParaSUPPORT.hxx"
+#include "MEDMEM_Support.hxx"
+
+namespace ParaMEDMEM 
+{
+       
+/*! Constructor on all elements from a MESH */
+UnstructuredParaSUPPORT::UnstructuredParaSUPPORT(const ParaMESH* const mesh, const SUPPORT* support):
+_mesh(mesh), 
+_entity(support->getEntity()),
+_support(support),
+_block_topology(mesh->getBlockTopology()->getProcGroup(), support->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS))
+{
+}
+
+StructuredParaSUPPORT::~StructuredParaSUPPORT()
+{
+       delete _support;
+}
+
+}//end of namespace ParaMEDMEM
diff --git a/src/ParaMEDMEM/UnstructuredParaSUPPORT.hxx b/src/ParaMEDMEM/UnstructuredParaSUPPORT.hxx
new file mode 100644 (file)
index 0000000..4f663f2
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef UNSTRUCTUREDPARASUPPORT_HXX_
+#define UNSTRUCTUREDPARASUPPORT_HXX_
+
+#include "ParaSUPPORT.hxx"
+#include "MEDMEM_define.hxx"
+
+using namespace MED_EN;
+namespace MEDMEM
+{
+       class SUPPORT;
+}
+
+namespace ParaMEDMEM
+{
+class BlockTopology;
+class ParaMESH;
+
+class UnstructuredParaSUPPORT:public ParaSUPPORT
+{
+public:
+       
+       UnstructuredParaSUPPORT(const ParaMESH* const mesh, SUPPORT* support );
+       virtual ~UnstructuredParaSUPPORT();
+       const Topology* getTopology() const {return _block_topology;}
+       const MEDMEM::SUPPORT* getSupport() {return _support;}
+private:
+       const BlockTopology* const  _block_topology;
+       const ParaMESH* const _mesh;
+       const MED_EN::medEntityMesh _entity;
+       const MEDMEM::SUPPORT* _support;
+       
+};
+
+}
+#endif /*STRUCTUREDPARASUPPORT_HXX_*/
diff --git a/src/ParaMEDMEM/test_BlockTopology.cxx b/src/ParaMEDMEM/test_BlockTopology.cxx
new file mode 100644 (file)
index 0000000..eebfc5d
--- /dev/null
@@ -0,0 +1,70 @@
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "ProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "CommInterface.hxx"
+
+#include "MPIProcessorGroup.hxx"
+#include "MEDMEM_Grid.hxx"
+
+using namespace std;
+using namespace ParaMEDMEM;
+using namespace MEDMEM;
+int main(int argc, char** argv)
+{
+       string testname="ParaMEDMEM - test #1 -";
+       MPI_Init(&argc, &argv); 
+       int size;
+       int rank;
+       MPI_Comm_size(MPI_COMM_WORLD,&size);
+       MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+       
+       set<int> procs;
+       for (int i=0; i< size-1; i++)
+       procs.insert(i);
+       
+       ParaMEDMEM::CommInterface interface;
+               
+       ParaMEDMEM::ProcessorGroup* group=new ParaMEDMEM::MPIProcessorGroup(interface,procs);
+       double xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0;
+       int nx=10, ny=10;
+       std::vector<std::vector<double> > axes(2);
+       axes[0].resize(nx);
+       axes[1].resize(ny);
+       for (int i=0; i<nx; i++)
+               axes[0][i]=xmin+((xmax-xmin)*i)/(nx-1);
+       for (int i=0; i<ny; i++)
+               axes[1][i]=ymin+((ymax-ymin)*i)/(ny-1);
+       vector <string> coord_name;
+       vector <string> coord_unit;
+       coord_name.push_back("x");coord_name.push_back("y");
+       coord_unit.push_back("m");coord_unit.push_back("m");
+       
+       MEDMEM::GRID grid(axes, coord_name,coord_unit);
+       
+       BlockTopology* topo = new BlockTopology(*group, grid);
+       for (int i=0; i<size; i++)
+       {
+               MPI_Barrier(MPI_COMM_WORLD);
+               if (i==rank)
+               {
+               cout << "Global to Local 10, 9, 45 :"<<endl;
+               cout << "("<<topo->globalToLocal(10).first<<","<<topo->globalToLocal(10).second<<")";
+               cout << "("<<topo->globalToLocal(9).first<<","<<topo->globalToLocal(9).second<<")";
+               cout << "("<<topo->globalToLocal(45).first<<","<<topo->globalToLocal(45).second<<")";
+               
+               cout << endl;
+               }
+       } 
+       MPI_Finalize();
+       return 0;
+}
+
+
+
+
diff --git a/src/ParaMEDMEM/test_DEC.cxx b/src/ParaMEDMEM/test_DEC.cxx
new file mode 100644 (file)
index 0000000..1149d89
--- /dev/null
@@ -0,0 +1,113 @@
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "ProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "CommInterface.hxx"
+
+
+#include "MPIProcessorGroup.hxx"
+#include "MEDMEM_Grid.hxx"
+#include "ParaGRID.hxx"
+#include "StructuredParaSUPPORT.hxx"
+#include "ComponentTopology.hxx"
+#include "ParaFIELD.hxx"
+
+
+using namespace std;
+using namespace ParaMEDMEM;
+using namespace MEDMEM;
+int main(int argc, char** argv)
+{
+       string testname="ParaMEDMEM - test #1 -";
+       MPI_Init(&argc, &argv); 
+       int size;
+       int rank;
+       MPI_Comm_size(MPI_COMM_WORLD,&size);
+       MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+       if (size<3) 
+       { 
+               cout << " test_DEC test program is not meant to run "<<endl;
+               cout << " with less than 3 processes"<<endl;
+               return 1;
+       }
+       set<int> self_procs;
+       set<int> procs_source;
+       set<int> procs_target;
+       for (int i=0; i<size-2; i++)
+               procs_source.insert(i);
+       for (int i=size-2; i<size; i++)
+               procs_target.insert(i);
+       self_procs.insert(rank);
+       
+       ParaMEDMEM::CommInterface interface;
+               
+       ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+       ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
+       ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
+       
+       
+       double xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0;
+       int nx=500, ny=500;
+       std::vector<std::vector<double> > axes(2);
+       axes[0].resize(nx);
+       axes[1].resize(ny);
+       for (int i=0; i<nx; i++)
+               axes[0][i]=xmin+((xmax-xmin)*i)/(nx-1);
+       for (int i=0; i<ny; i++)
+               axes[1][i]=ymin+((ymax-ymin)*i)/(ny-1);
+       vector <string> coord_name;
+       vector <string> coord_unit;
+       coord_name.push_back("x");coord_name.push_back("y");
+       coord_unit.push_back("m");coord_unit.push_back("m");
+       
+       MEDMEM::GRID grid(axes, coord_name,coord_unit);
+       
+       grid.setName("grid_5_5");
+       grid.setDescription("5 by 5 square grid");
+       Topology* topo_source = new BlockTopology(*self_group, grid);
+       Topology* topo_target = new BlockTopology(*target_group,grid);
+       ParaGRID* source_grid;
+       ParaGRID* target_grid;
+       StructuredParaSUPPORT* target_support;
+       StructuredParaSUPPORT* source_support;
+       ComponentTopology* target_comp;
+       ComponentTopology* source_comp;
+       ParaFIELD* target_field=0;
+       ParaFIELD* source_field=0;
+       
+       if (source_group->containsMyRank())
+       {
+               source_grid=new ParaGRID(&grid, topo_source);
+               source_support=new StructuredParaSUPPORT(source_grid,MED_EN::MED_CELL);
+               source_comp=new ComponentTopology (6, source_group);
+               source_field = new ParaFIELD(source_support, *source_comp);
+               cout << "Source field nb elems on rank : "<<rank<<" : "<<source_field->getTopology()->getNbLocalElements()<<endl;
+               double * value= new double[source_field->getTopology()->getNbLocalElements()];
+               for(int ielem=0; ielem<source_field->getTopology()->getNbLocalElements();ielem++)
+                       value[ielem]=(double)ielem;
+               source_field->getField()->setValue(value);
+               source_field->synchronizeSource(target_field);
+       }
+       if (target_group->containsMyRank())
+       {
+               target_grid=new ParaGRID(&grid, topo_target);
+               target_support=new StructuredParaSUPPORT(target_grid,MED_EN::MED_CELL); 
+               target_comp= new ComponentTopology (6);
+               target_field = new ParaFIELD(target_support, *target_comp);
+               target_field->synchronizeTarget(source_field);
+               //target_grid->write(MED_DRIVER, "/tmp/target");
+               //target_field->write(MED_DRIVER, "/tmp/target");
+       }
+       MPI_Barrier(MPI_COMM_WORLD);
+       MPI_Finalize();
+       return 0;
+}
+
+
+
+
diff --git a/src/ParaMEDMEM/test_ExplicitDEC.cxx b/src/ParaMEDMEM/test_ExplicitDEC.cxx
new file mode 100644 (file)
index 0000000..70fc5f8
--- /dev/null
@@ -0,0 +1,111 @@
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "ProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "CommInterface.hxx"
+
+
+#include "MPIProcessorGroup.hxx"
+#include "MEDMEM_Mesh.hxx"
+#include "ParaMESH.hxx"
+#include "StructuredParaSUPPORT.hxx"
+#include "ComponentTopology.hxx"
+#include "ParaFIELD.hxx"
+
+
+using namespace std;
+using namespace ParaMEDMEM;
+using namespace MEDMEM;
+int main(int argc, char** argv)
+{
+       string testname="ParaMEDMEM - test #1 -";
+       MPI_Init(&argc, &argv); 
+       int size;
+       int rank;
+       MPI_Comm_size(MPI_COMM_WORLD,&size);
+       MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+       if (size<3) 
+       { 
+               cout << " test_DEC test program is not meant to run "<<endl;
+               cout << " with less than 3 processes"<<endl;
+               return 1;
+       }
+       set<int> self_procs;
+       set<int> procs_source;
+       set<int> procs_target;
+       for (int i=0; i<size-2; i++)
+               procs_source.insert(i);
+       for (int i=size-2; i<size; i++)
+               procs_target.insert(i);
+       self_procs.insert(rank);
+       
+       ParaMEDMEM::CommInterface interface;
+               
+       ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+       ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
+       ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
+       
+       ParaMEDMEM::ParaMESH* source_mesh=0;
+       ParaMEDMEM::ParaMESH* target_mesh=0;
+       
+       
+       Topology* topo_source;
+       Topology* topo_target;
+       if (source_group->containsMyRank())
+       {
+               source_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/pointe_nosplit",*self_group);
+               topo_source=source_mesh->getBlockTopology();
+       }
+       if (target_group->containsMyRank())
+       {
+               target_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/pointe_split",*target_group);
+               topo_target=target_mesh->getBlockTopology();
+       }
+               
+       StructuredParaSUPPORT* target_support;
+       StructuredParaSUPPORT* source_support;
+       ComponentTopology* target_comp;
+       ComponentTopology* source_comp;
+       ParaFIELD* target_field=0;
+       ParaFIELD* source_field=0;
+       
+       if (source_group->containsMyRank())
+       {
+               source_support=new StructuredParaSUPPORT(source_mesh,MED_EN::MED_CELL);
+               source_comp=new ComponentTopology (6, source_group);
+               source_field = new ParaFIELD(source_support, *source_comp);
+               int nb_local = source_field->getTopology()->getNbLocalElements();
+               cout << "Source field nb elems on rank : "<<rank<<" : "<<nb_local<<endl;
+               double * value= new double[nb_local];
+               for(int ielem=0; ielem<nb_local;ielem++)
+                       value[ielem]=(double)ielem;
+               source_field->getField()->setValue(value);
+               source_field->synchronizeSource(target_field);
+               if (source_group->myRank()==0)
+               {
+                       source_mesh->write(MED_DRIVER,"/home/vb144235/tmp/source");
+                       source_field->write(MED_DRIVER,"/home/vb144235/tmp/source","maa1");
+               }
+       }
+       if (target_group->containsMyRank())
+       {
+               target_support=new StructuredParaSUPPORT(target_mesh,MED_EN::MED_CELL); 
+               target_comp= new ComponentTopology (6);
+               target_field = new ParaFIELD(target_support, *target_comp);
+               target_field->synchronizeTarget(source_field);
+               target_mesh->write(MED_DRIVER, "/home/vb144235/tmp/target");
+               target_field->write(MED_DRIVER, "/home/vb144235/tmp/target", "maa1");
+       }
+       MPI_Barrier(MPI_COMM_WORLD);
+       MPI_Finalize();
+       return 0;
+}
+
+
+
+
diff --git a/src/ParaMEDMEM/test_ParaField.cxx b/src/ParaMEDMEM/test_ParaField.cxx
new file mode 100644 (file)
index 0000000..42f4b48
--- /dev/null
@@ -0,0 +1,78 @@
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "ProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "CommInterface.hxx"
+
+
+#include "MPIProcessorGroup.hxx"
+#include "MEDMEM_Grid.hxx"
+#include "ParaGRID.hxx"
+#include "StructuredParaSUPPORT.hxx"
+#include "ComponentTopology.hxx"
+#include "ParaFIELD.hxx"
+
+
+using namespace std;
+using namespace ParaMEDMEM;
+using namespace MEDMEM;
+int main(int argc, char** argv)
+{
+       string testname="ParaMEDMEM - test #1 -";
+       MPI_Init(&argc, &argv); 
+       int size;
+       int rank;
+       MPI_Comm_size(MPI_COMM_WORLD,&size);
+       MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+       
+       set<int> self_procs;
+       set<int> procs;
+       for (int i=0; i<size-1; i++)
+               procs.insert(i);
+       self_procs.insert(rank);
+       
+       ParaMEDMEM::CommInterface interface;
+               
+       ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+       ParaMEDMEM::ProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface,procs);
+       
+       
+       double xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0;
+       int nx=10, ny=10;
+       std::vector<std::vector<double> > axes(2);
+       axes[0].resize(nx);
+       axes[1].resize(ny);
+       for (int i=0; i<nx; i++)
+               axes[0][i]=xmin+((xmax-xmin)*i)/(nx-1);
+       for (int i=0; i<ny; i++)
+               axes[1][i]=ymin+((ymax-ymin)*i)/(ny-1);
+       vector <string> coord_name;
+       vector <string> coord_unit;
+       coord_name.push_back("x");coord_name.push_back("y");
+       coord_unit.push_back("m");coord_unit.push_back("m");
+       
+       MEDMEM::GRID grid(axes, coord_name,coord_unit);
+       
+       grid.setName("grid_10_10");
+       grid.setDescription("10 by 10 square grid");
+       Topology* topo = new BlockTopology(*self_group, grid);
+
+       ParaGRID local_grid(&grid, topo);
+       StructuredParaSUPPORT support(&local_grid,MED_EN::MED_CELL);
+       local_grid.write (MED_DRIVER, "/tmp/toto");
+       
+       ComponentTopology comp_topo(5, group);
+       ParaFIELD field(&support, comp_topo);
+       
+       MPI_Finalize();
+       return 0;
+}
+
+
+
+
diff --git a/src/ParaMEDMEM/test_ParaStructuredSupport.cxx b/src/ParaMEDMEM/test_ParaStructuredSupport.cxx
new file mode 100644 (file)
index 0000000..0198a61
--- /dev/null
@@ -0,0 +1,67 @@
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "ProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "CommInterface.hxx"
+
+
+#include "MPIProcessorGroup.hxx"
+#include "MEDMEM_Grid.hxx"
+#include "ParaGRID.hxx"
+#include "StructuredParaSUPPORT.hxx"
+
+using namespace std;
+using namespace ParaMEDMEM;
+using namespace MEDMEM;
+int main(int argc, char** argv)
+{
+       string testname="ParaMEDMEM - test #1 -";
+       MPI_Init(&argc, &argv); 
+       int size;
+       int rank;
+       MPI_Comm_size(MPI_COMM_WORLD,&size);
+       MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+       
+       set<int> procs;
+       for (int i=0; i< size-1; i++)
+       procs.insert(i);
+       
+       ParaMEDMEM::CommInterface interface;
+               
+       ParaMEDMEM::ProcessorGroup* group=new ParaMEDMEM::MPIProcessorGroup(interface,procs);
+       double xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0;
+       int nx=10, ny=10;
+       std::vector<std::vector<double> > axes(2);
+       axes[0].resize(nx);
+       axes[1].resize(ny);
+       for (int i=0; i<nx; i++)
+               axes[0][i]=xmin+((xmax-xmin)*i)/(nx-1);
+       for (int i=0; i<ny; i++)
+               axes[1][i]=ymin+((ymax-ymin)*i)/(ny-1);
+       vector <string> coord_name;
+       vector <string> coord_unit;
+       coord_name.push_back("x");coord_name.push_back("y");
+       coord_unit.push_back("m");coord_unit.push_back("m");
+       
+       MEDMEM::GRID grid(axes, coord_name,coord_unit);
+       
+       grid.setName("grid_10_10");
+       grid.setDescription("10 by 10 square grid");
+       Topology* topo = new BlockTopology(*group, grid);
+
+       ParaGRID local_grid(&grid, topo);
+       StructuredParaSUPPORT support(&local_grid,MED_EN::MED_CELL);
+       local_grid.write (MED_DRIVER, "/tmp/toto");
+       
+       MPI_Finalize();
+       return 0;
+}
+
+
+
+
diff --git a/src/ParaMEDMEM/test_ProcessorGroup.cxx b/src/ParaMEDMEM/test_ProcessorGroup.cxx
new file mode 100644 (file)
index 0000000..8838a31
--- /dev/null
@@ -0,0 +1,46 @@
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+int main(int argc, char** argv)
+{
+       string testname="ParaMEDMEM - test #1 -";
+       MPI_Init(&argc, &argv); 
+       int size;
+       int rank;
+       MPI_Comm_size(MPI_COMM_WORLD,&size);
+       MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+       
+       set<int> procs;
+       procs.insert(0);
+       
+       ParaMEDMEM::CommInterface interface;
+               
+       ParaMEDMEM::ProcessorGroup* group=new ParaMEDMEM::MPIProcessorGroup(interface,procs);
+       cout << "proc #"<<rank<<" size :"<<group->size()<<endl;
+       if (group->size() !=1) return 1;
+               
+       set<int> empty_proc_group;
+       ParaMEDMEM::ProcessorGroup* group_empty=new ParaMEDMEM::MPIProcessorGroup(interface,empty_proc_group);
+       cout << "proc #"<<rank<<" size :"<<group_empty->size()<<endl;
+       if (group_empty->size() !=0) return 1;
+       
+       delete group;
+       delete group_empty;
+       
+       MPI_Finalize();
+       return 0;
+}
+
+
+
+
diff --git a/src/ParaMEDMEM/test_UnstructuredDEC.cxx b/src/ParaMEDMEM/test_UnstructuredDEC.cxx
new file mode 100644 (file)
index 0000000..126b6e2
--- /dev/null
@@ -0,0 +1,111 @@
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "ProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "CommInterface.hxx"
+
+
+#include "MPIProcessorGroup.hxx"
+#include "MEDMEM_Mesh.hxx"
+#include "ParaMESH.hxx"
+#include "StructuredParaSUPPORT.hxx"
+#include "ComponentTopology.hxx"
+#include "ParaFIELD.hxx"
+
+
+using namespace std;
+using namespace ParaMEDMEM;
+using namespace MEDMEM;
+int main(int argc, char** argv)
+{
+       string testname="ParaMEDMEM - test #1 -";
+       MPI_Init(&argc, &argv); 
+       int size;
+       int rank;
+       MPI_Comm_size(MPI_COMM_WORLD,&size);
+       MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+       if (size<3) 
+       { 
+               cout << " test_DEC test program is not meant to run "<<endl;
+               cout << " with less than 3 processes"<<endl;
+               return 1;
+       }
+       set<int> self_procs;
+       set<int> procs_source;
+       set<int> procs_target;
+       for (int i=0; i<size-2; i++)
+               procs_source.insert(i);
+       for (int i=size-2; i<size; i++)
+               procs_target.insert(i);
+       self_procs.insert(rank);
+       
+       ParaMEDMEM::CommInterface interface;
+               
+       ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+       ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
+       ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
+       
+       ParaMEDMEM::ParaMESH* source_mesh=0;
+       ParaMEDMEM::ParaMESH* target_mesh=0;
+       
+       
+       Topology* topo_source;
+       Topology* topo_target;
+       if (source_group->containsMyRank())
+       {
+               source_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/med/pointe_nosplit",*self_group);
+               topo_source=source_mesh->getBlockTopology();
+       }
+       if (target_group->containsMyRank())
+       {
+               target_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/med/pointe_split",*target_group);
+               topo_target=target_mesh->getBlockTopology();
+       }
+               
+       StructuredParaSUPPORT* target_support;
+       StructuredParaSUPPORT* source_support;
+       ComponentTopology* target_comp;
+       ComponentTopology* source_comp;
+       ParaFIELD* target_field=0;
+       ParaFIELD* source_field=0;
+       
+       if (source_group->containsMyRank())
+       {
+               source_support=new StructuredParaSUPPORT(source_mesh,MED_EN::MED_CELL);
+               source_comp=new ComponentTopology (6, source_group);
+               source_field = new ParaFIELD(source_support, *source_comp);
+               int nb_local = source_field->getTopology()->getNbLocalElements();
+               cout << "Source field nb elems on rank : "<<rank<<" : "<<nb_local<<endl;
+               double * value= new double[nb_local];
+               for(int ielem=0; ielem<nb_local;ielem++)
+                       value[ielem]=(double)ielem;
+               source_field->getField()->setValue(value);
+               source_field->synchronizeSource(target_field);
+               if (source_group->myRank()==0)
+               {
+                       source_mesh->write(MED_DRIVER,"/home/vb144235/tmp/source");
+                       source_field->write(MED_DRIVER,"/home/vb144235/tmp/source","maa1");
+               }
+       }
+       if (target_group->containsMyRank())
+       {
+               target_support=new StructuredParaSUPPORT(target_mesh,MED_EN::MED_CELL); 
+               target_comp= new ComponentTopology (6);
+               target_field = new ParaFIELD(target_support, *target_comp);
+               target_field->synchronizeTarget(source_field);
+               target_mesh->write(MED_DRIVER, "/home/vb144235/tmp/target");
+               target_field->write(MED_DRIVER, "/home/vb144235/tmp/target", "maa1");
+       }
+       MPI_Barrier(MPI_COMM_WORLD);
+       MPI_Finalize();
+       return 0;
+}
+
+
+
+