From: vbd Date: Wed, 24 Jan 2007 11:07:54 +0000 (+0000) Subject: creating ParaMEDMEM X-Git-Tag: trio_trio_coupling~109 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=ae6da848e0cbd9b3e27b3a500c07444f1ebededa;p=tools%2Fmedcoupling.git creating ParaMEDMEM --- diff --git a/src/ParaMEDMEM/BlockTopology.cxx b/src/ParaMEDMEM/BlockTopology.cxx new file mode 100644 index 000000000..0469ed468 --- /dev/null +++ b/src/ParaMEDMEM/BlockTopology.cxx @@ -0,0 +1,236 @@ +#include "MEDMEM_Grid.hxx" +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" + +#include +#include + +using namespace std; +using namespace MEDMEM; +namespace ParaMEDMEM +{ + +/*! + * Constructor of a block topology from a grid. + * This preliminary version simply splits along the first axis + * instead of making the best choice with respect to the + * values of the different axes. + */ +BlockTopology::BlockTopology(const ProcessorGroup& group, const GRID& grid): +_proc_group(&group), _dimension(grid.getSpaceDimension()) +{ + vector axis_length(_dimension); + + // idim + 1, because MEDMEM numbering of the axis start at one. + _nb_elems=1; + for (int idim=0; idim <_dimension; idim++) + { + axis_length[idim]=grid.getArrayLength(idim+1); + _nb_elems*=axis_length[idim]; + } + //default splitting along 1st dimension + _local_array_indices.resize(_dimension); + _nb_procs_per_dim.resize(_dimension); + + _local_array_indices[0].resize(_proc_group->size()+1); + _local_array_indices[0][0]=0; + _nb_procs_per_dim[0]=_proc_group->size(); + + for (int i=1; i<=_proc_group->size(); i++) + { + _local_array_indices[0][i]=_local_array_indices[0][i-1]+ + axis_length[0]/_proc_group->size(); + if (i<= axis_length[0]%_proc_group->size()) + _local_array_indices[0][i]+=1; + } + for (int i=1; i<_dimension; i++) + { + _local_array_indices[i].resize(2); + _local_array_indices[i][0]=0; + _local_array_indices[i][1]=axis_length[i]; + _nb_procs_per_dim[i]=1; + } + _cycle_type.resize(_dimension); + for (int i=0; i<_dimension; i++) + _cycle_type[i]=ParaMEDMEM::Block; + + +} + +/*! + * Creation of a block topology by composing + * a geometrical topology and a component topology. + * This constructor is intended for creating fields + * for which the parallel distribution is made on the + * components of the field rather than on the geometrical + * partitioning of the underlying mesh. + * + */ +BlockTopology::BlockTopology(const BlockTopology& geom_topo, const ComponentTopology& comp_topo) +{ + // so far, the block topology can only be created if the proc group + // is either on geom_topo or on comp_topo + if (geom_topo.getProcGroup()->size()>1 && comp_topo.nbBlocks()>1) + throw MEDEXCEPTION(LOCALIZED( + "BlockTopology cannot yet be constructed with both complex geo and components topology")); + + if (comp_topo.nbComponents()==1) + { + *this=geom_topo; + return; + } + else + { + _dimension = geom_topo.getDimension()+1; + if (comp_topo.nbBlocks()>1) + _proc_group=comp_topo.getProcGroup(); + else + _proc_group=geom_topo.getProcGroup(); + _local_array_indices=geom_topo._local_array_indices; + vector comp_indices = *(comp_topo.getBlockIndices()); + _local_array_indices.push_back(comp_indices); + _nb_procs_per_dim=geom_topo._nb_procs_per_dim; + _nb_procs_per_dim.push_back(comp_topo.nbBlocks()); + _cycle_type=geom_topo._cycle_type; + _cycle_type.push_back(Block); + _nb_elems=geom_topo.getNbElements()*comp_topo.nbComponents(); + cout << " Nb elems "<<_nb_elems<<" topo elems "<(_proc_group); + const MPI_Comm* comm=mpi_group->getComm(); + mpi_group->getCommInterface().allGather(&nb_elem, 1, MPI_INTEGER, + nbelems_per_proc, 1, MPI_INTEGER, + *comm); + _nb_elems=0; + + //splitting along only dimension + _local_array_indices.resize(1); + _nb_procs_per_dim.resize(1); + + _local_array_indices[0].resize(_proc_group->size()+1); + _local_array_indices[0][0]=0; + _nb_procs_per_dim[0]=_proc_group->size(); + + for (int i=1; i<=_proc_group->size(); i++) + { + _local_array_indices[0][i]=_local_array_indices[0][i-1]+ + nbelems_per_proc[i-1]; + _nb_elems+=nbelems_per_proc[i-1]; + } + _cycle_type.resize(1); + _cycle_type[0]=ParaMEDMEM::Block; + +} + +BlockTopology::~BlockTopology() +{ +} + +/*! Retrieves the min and max indices of the domain stored locally + * for each dimension. The output vector has the topology dimension + * as a size and each pair contains min and max. Indices + * range from min to max-1. + */ + +std::vector > BlockTopology::getLocalArrayMinMax() const +{ + vector > local_indices (_dimension); + int myrank=_proc_group->myRank(); + int increment=1; + for (int i=_dimension-1; i>=0; i--) + { + increment *=_nb_procs_per_dim[i]; + int idim=myrank%increment; + local_indices[i].first=_local_array_indices[i][idim]; + local_indices[i].second=_local_array_indices[i][idim+1]; + cout << local_indices[i].first << " "<< local_indices[i].second< buffer; + + buffer.push_back(_dimension); + buffer.push_back(_nb_elems); + for (int i=0; i<_dimension; i++) + { + buffer.push_back(_nb_procs_per_dim[i]); + buffer.push_back(_cycle_type[i]); + buffer.push_back(_local_array_indices[i].size()); + for (int j=0; j<_local_array_indices[i].size(); j++) + buffer.push_back(_local_array_indices[i][j]); + } + + //serializing the comm group + int size_comm=_proc_group->size(); + buffer.push_back(size_comm); + MPIProcessorGroup world_group(_proc_group->getCommInterface()); + for (int i=0; i procs; + int size_comm=*(ptr_serializer++); + for (int i=0; i +#include +#include +#include "ProcessorGroup.hxx" + +using namespace std; +namespace MEDMEM +{ + class GRID; +} + +namespace ParaMEDMEM +{ +class Topology; +class ComponentTopology; +typedef enum{Block,Cycle} CYCLE_TYPE; + +class BlockTopology: public Topology +{ +public: + BlockTopology(){}; + BlockTopology(const ProcessorGroup& group, const MEDMEM::GRID& grid); + BlockTopology(const BlockTopology& geom_topo, const ComponentTopology& comp_topo); + BlockTopology(const ProcessorGroup& group, int nb_elem); + virtual ~BlockTopology(); + inline int getNbElements()const; + inline int getNbLocalElements() const; + const ProcessorGroup* getProcGroup()const {return _proc_group;}; + inline std::pair globalToLocal (const int) const ; + inline int localToGlobal (const std::pair) const; + std::vector > getLocalArrayMinMax() const ; + int getDimension() const {return _dimension;}; + void serialize(int* & serializer, int& size) const ; + void unserialize(const int* serializer, const CommInterface& comm_interface); +private: + //dimension : 2 or 3 + int _dimension; + //proc array + std::vector _nb_procs_per_dim; + //stores the offsets vector + std::vector > _local_array_indices; + //stores the cycle type (block or cyclic) + std::vector _cycle_type; + //Processor group + const ProcessorGroup* _proc_group; + //nb of elements + int _nb_elems; +}; + +//!converts a pair to a global number +inline std::pair BlockTopology::globalToLocal(const int global) const { + int subdomain_id=0; + //int local=global; + int position=global; + int size=_nb_elems; + int size_procs=_proc_group->size(); + int increment=size; + vectoraxis_position(_dimension); + vectoraxis_offset(_dimension); + for (int idim=0; idim<_dimension; idim++) + { + int axis_size=_local_array_indices[idim].size()-1; + int axis_nb_elem=_local_array_indices[idim][axis_size]; + increment=increment/axis_nb_elem; + //cout << "increment "<=0; idim--) + { + local+=axis_position[idim]*local_increment; + local_increment*=_local_array_indices[idim][axis_offset[idim]]-_local_array_indices[idim][axis_offset[idim]-1]; + } + + return make_pair(subdomain_id,local); +} + +//!converts local number to a global number +inline int BlockTopology::localToGlobal(const pair local) const { + + int subdomain_id=local.first; + int global=0; + int loc=local.second; + int increment=_nb_elems; + int proc_increment=_proc_group->size(); + int local_increment=getNbLocalElements(); + for (int idim=0; idim < _dimension; idim++) + { + int axis_size=_local_array_indices[idim].size()-1; + int axis_nb_elem=_local_array_indices[idim][axis_size]; + increment=increment/axis_nb_elem; + proc_increment = proc_increment/(axis_size); + int proc_axis=subdomain_id/proc_increment; + subdomain_id=subdomain_id%proc_increment; + int local_axis_nb_elem=_local_array_indices[idim][proc_axis+1]-_local_array_indices[idim][proc_axis]; + local_increment = local_increment/local_axis_nb_elem; + // if (_cycle_type[idim]==Block) + //{ + int iaxis=loc/local_increment+_local_array_indices[idim][proc_axis]; + global+=increment*iaxis; + loc = loc%local_increment; + //} + //else + //{ + //cout << "cyclic Not implemented yet"<myRank(); + int nb_elem = 1; + int increment=1; + for (int i=_dimension-1; i>=0; i--) + { + increment *=_nb_procs_per_dim[i]; + int idim=position%increment; + position=position/increment; + //cout << "i idim dimension"< +namespace ParaMEDMEM +{ + +class CommInterface +{ +public: + CommInterface(){} + virtual ~CommInterface(){} + int commSize(MPI_Comm comm, int* size) const { return MPI_Comm_size(comm,size);} + int commRank(MPI_Comm comm, int* rank) const { return MPI_Comm_rank(comm,rank);} + int commGroup(MPI_Comm comm, MPI_Group* group) const + {return MPI_Comm_group(comm, group);} + int groupIncl(MPI_Group group, int size, int* ranks, MPI_Group* group_output) const + {return MPI_Group_incl(group, size, ranks, group_output);} + int commCreate(MPI_Comm comm, MPI_Group group, MPI_Comm* comm_output) const + {return MPI_Comm_create(comm,group,comm_output);} + int groupFree(MPI_Group* group) const {return MPI_Group_free(group);} + int commFree(MPI_Comm* comm) const {return MPI_Comm_free(comm);} + int broadcast(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm)const + {return MPI_Bcast(buffer, count, datatype, root, comm);} + int send(void* buffer, int count, MPI_Datatype datatype, int target, int tag, MPI_Comm comm) const + {return MPI_Send(buffer,count, datatype, target, tag, comm);} + int recv(void* buffer, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status* status) const + {return MPI_Recv(buffer,count, datatype, source, tag, comm, status);} + int allToAllV(void* sendbuf, int* sendcounts, int* senddispls, MPI_Datatype sendtype, + void* recvbuf, int* recvcounts, int* recvdispls, MPI_Datatype recvtype, + MPI_Comm comm) const + {return MPI_Alltoallv(sendbuf, sendcounts, senddispls, sendtype, + recvbuf, recvcounts, recvdispls, recvtype, + comm);} + int allGather(void* sendbuf, int sendcount, MPI_Datatype sendtype, + void* recvbuf, int recvcount, MPI_Datatype recvtype, + MPI_Comm comm) const + {return MPI_Allgather(sendbuf,sendcount, sendtype, recvbuf, recvcount, recvtype, comm); + } + +}; + +} + +#endif /*COMMINTERFACE_HXX_*/ diff --git a/src/ParaMEDMEM/ComponentTopology.cxx b/src/ParaMEDMEM/ComponentTopology.cxx new file mode 100644 index 000000000..a310500d6 --- /dev/null +++ b/src/ParaMEDMEM/ComponentTopology.cxx @@ -0,0 +1,82 @@ +#include "ComponentTopology.hxx" +#include "MEDMEM_Exception.hxx" + +namespace ParaMEDMEM +{ + +/* Generic constructor for \a nb_comp components equally parted + * in \a nb_blocks blocks + */ + +ComponentTopology::ComponentTopology(int nb_comp, ProcessorGroup* group):_proc_group(group) +{ + int nb_blocks=group->size(); + + if (nb_blocks>nb_comp) throw MEDMEM::MEDEXCEPTION( + LOCALIZED("ComponentTopology Number of components must be larger than number of blocks")); + + component_array.resize(nb_blocks+1); + component_array[0]=0; + for (int i=1; i<=nb_blocks; i++) + { + component_array[i]=component_array[i-1]+nb_comp/nb_blocks; + if (i<=nb_comp%nb_blocks) + component_array[i]++; + } + +} + +/* Generic constructor for \a nb_comp components equally parted + * in \a nb_blocks blocks + */ + +ComponentTopology::ComponentTopology(int nb_comp, int nb_blocks):_proc_group(0) +{ + if (nb_blocks>nb_comp) throw MEDMEM::MEDEXCEPTION( + LOCALIZED("ComponentTopology Number of components must be larger than number of blocks")); + + component_array.resize(nb_blocks+1); + component_array[0]=0; + for (int i=1; i<=nb_blocks; i++) + { + component_array[i]=component_array[i-1]+nb_comp/nb_blocks; + if (i<=nb_comp%nb_blocks) + component_array[i]++; + } + +} +//!Constructor for one block of \a nb_comp components +ComponentTopology::ComponentTopology(int nb_comp):_proc_group(0) +{ + + component_array.resize(2); + component_array[0]=0; + component_array[1]=nb_comp; + +} + +//! Constructor for one component +ComponentTopology::ComponentTopology() +{ + component_array.resize(2); + component_array[0]=0; + component_array[1]=1; + +} +ComponentTopology::~ComponentTopology() +{ +} + +int ComponentTopology::nbLocalComponents() const{ + if (_proc_group==0) return nbComponents(); + + int nbcomp; + int myrank = _proc_group->myRank(); + cout << "nbLocalComp "< +#include "ProcessorGroup.hxx" +#include "Topology.hxx" + + +namespace ParaMEDMEM +{ +class ComponentTopology +{ +public: + ComponentTopology(int nb_comp, ProcessorGroup* group); + ComponentTopology(int nb_comp, int nb_blocks); + ComponentTopology(int nb_comp); + ComponentTopology(); + virtual ~ComponentTopology(); + //!returns the number of MED components in the topology + int nbComponents() const {return component_array[component_array.size()-1];} + //!returns the number of MED components on local processor + int nbLocalComponents() const ; + //!returns the number of blocks in the topology + int nbBlocks()const {return component_array.size()-1;} + //!returns the block structure + const vector * getBlockIndices() const {return &component_array;} + const ProcessorGroup* getProcGroup()const {return _proc_group;} +private: + std::vector component_array; + ProcessorGroup* _proc_group; +}; + +} + +#endif /*COMPONENTTOPOLOGY_HXX_*/ diff --git a/src/ParaMEDMEM/DEC.cxx b/src/ParaMEDMEM/DEC.cxx new file mode 100644 index 000000000..773e2618c --- /dev/null +++ b/src/ParaMEDMEM/DEC.cxx @@ -0,0 +1,32 @@ + +/*! Data Exchange Channel + * Interface class for creation of a link between two + * MPI groups for exhanging mesh or field data*/ + +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" +#include "DEC.hxx" + +namespace ParaMEDMEM +{ +void DEC::attachTargetField(const ParaFIELD* field) +{ + _target_field=field; + if (field!=0) + { + BlockTopology* topo=dynamic_cast(field->getTopology()); + _comm_interface=&(topo->getProcGroup()->getCommInterface()); + } +} +void DEC::attachSourceField(const ParaFIELD* field) +{_source_field=field; + if (field!=0) + { + BlockTopology* topo=dynamic_cast(field->getTopology()); + _comm_interface=&(topo->getProcGroup()->getCommInterface()); + } +} +} diff --git a/src/ParaMEDMEM/DEC.hxx b/src/ParaMEDMEM/DEC.hxx new file mode 100644 index 000000000..eb7be98a9 --- /dev/null +++ b/src/ParaMEDMEM/DEC.hxx @@ -0,0 +1,32 @@ +#ifndef DEC_HXX_ +#define DEC_HXX_ + +namespace ParaMEDMEM +{ +class ProcessorGroup; +class ParaFIELD; +class CommInterface; +class DEC +{ +public: + DEC():_source_field(0),_target_field(0){} + void attachTargetField(const ParaFIELD* field); + void attachSourceField(const ParaFIELD* field) ; + virtual void prepareSourceDE()=0; + virtual void prepareTargetDE()=0; + virtual void recvData()=0; + virtual void sendData()=0; + virtual void synchronize()=0; + virtual ~DEC(){} + virtual void computeProcGroup(){}; +protected: + const ParaFIELD* _source_field; + const ParaFIELD* _target_field; + //! Processor group representing the union of target and source processors + ProcessorGroup* _group; + const CommInterface* _comm_interface; +}; + +} + +#endif /*DEC_HXX_*/ diff --git a/src/ParaMEDMEM/ExplicitCoincidentDEC.cxx b/src/ParaMEDMEM/ExplicitCoincidentDEC.cxx new file mode 100644 index 000000000..c1f2bb7cc --- /dev/null +++ b/src/ParaMEDMEM/ExplicitCoincidentDEC.cxx @@ -0,0 +1,284 @@ +#include +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" +#include "MPIProcessorGroup.hxx" +#include "ExplicitCoincidentDEC.hxx" + +namespace ParaMEDMEM +{ + +ExplicitCoincidentDEC::ExplicitCoincidentDEC():_toposource(0),_topotarget(0) +{ +} + +ExplicitCoincidentDEC::~ExplicitCoincidentDEC() +{ +} + +/*! Synchronization process for exchanging topologies + */ +void ExplicitCoincidentDEC::synchronize() +{ + if (_source_field!=0) + _toposource = dynamic_cast(_source_field->getTopology()); + if (_target_field!=0) + _topotarget = dynamic_cast(_target_field->getTopology()); + + // Transmitting source topology to target code + broadcastTopology(*_toposource,*_topotarget,1000); + // Transmitting target topology to source code + //broadcastTopology(_topotarget,2000); + //checkCompatibility(_toposource,_topotarget); +} + +/*! Creates the arrays necessary for the data transfer + * and fills the send array with the values of the + * source field + * */ +void ExplicitCoincidentDEC::prepareSourceDE() +{ + //////////////////////////////////// + //Step 1 : buffer array creation + + if (!_toposource->getProcGroup()->containsMyRank()) + return; + MPIProcessorGroup* group=new MPIProcessorGroup(_toposource->getProcGroup()->getCommInterface()); + + int myranksource = _toposource->getProcGroup()->myRank(); + + vector * target_arrays=new vector[_topotarget->getProcGroup()->size()]; + + //cout<<" topotarget size"<< _topotarget->getProcGroup()->size()< getNbLocalElements(); + for (int ielem=0; ielem< nb_local ; ielem++) + { + pair target_local =_distant_elems[ielem]; + target_arrays[target_local.first].push_back(target_local.second); + } + + int union_size=group->size(); + + _sendcounts=new int[union_size]; + _senddispls=new int[union_size]; + _recvcounts=new int[union_size]; + _recvdispls=new int[union_size]; + + for (int i=0; i< union_size; i++) + { + _sendcounts[i]=0; + _recvcounts[i]=0; + _recvdispls[i]=0; + } + _senddispls[0]=0; + + for (int iproc=0; iproc < _topotarget->getProcGroup()->size(); iproc++) + { + //converts the rank in target to the rank in union communicator + int unionrank=group->translateRank(_topotarget->getProcGroup(),iproc); + _sendcounts[unionrank]=target_arrays[iproc].size(); + } + + for (int iproc=1; iprocsize();iproc++) + _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1]; + + _sendbuffer = new double [nb_local ]; + + ///////////////////////////////////////////////////////////// + //Step 2 : filling the buffers with the source field values + + int* counter=new int [_topotarget->getProcGroup()->size()]; + counter[0]=0; + for (int i=1; i<_topotarget->getProcGroup()->size(); i++) + counter[i]=counter[i-1]+target_arrays[i-1].size(); + + + const double* value = _source_field->getField()->getValue(); + //cout << "Nb local " << nb_local<localToGlobal(make_pair(myranksource, ielem)); + //int global=_toposource->localToGlobal(ielem); + int target_local =_topotarget->globalToLocal(global); + //cout <<"global : "<< global<<" local :"<getProcGroup()->containsMyRank()) + return; + MPIProcessorGroup* group=new MPIProcessorGroup(_toposource->getProcGroup()->getCommInterface()); + + //int myranktarget = _topotarget->getProcGroup()->myRank(); + + vector < vector > source_arrays(_toposource->getProcGroup()->size()); + int nb_local = _topotarget-> getNbLocalElements(); + for (int ielem=0; ielem< nb_local ; ielem++) + { + pair source_local =_distant_elems[ielem]; + source_arrays[source_local.first].push_back(source_local.second); + } + int union_size=group->size(); + _recvcounts=new int[union_size]; + _recvdispls=new int[union_size]; + _sendcounts=new int[union_size]; + _senddispls=new int[union_size]; + + for (int i=0; i< union_size; i++) + { + _sendcounts[i]=0; + _recvcounts[i]=0; + _recvdispls[i]=0; + } + for (int iproc=0; iproc < _toposource->getProcGroup()->size(); iproc++) + { + //converts the rank in target to the rank in union communicator + int unionrank=group->translateRank(_toposource->getProcGroup(),iproc); + _recvcounts[unionrank]=source_arrays[iproc].size(); + } + for (int i=1; icontainsMyRank()) + { + toposend.serialize(serializer, size); + for (int iproc=0; iproc< group->size(); iproc++) + { + int itarget=(iproc+toposend.getProcGroup()->myRank())%group->size(); + if (!toposend.getProcGroup()->contains(itarget)) + { + int nbelem = toposend.getNbLocalElements(); + _comm_interface->send(&nbelem,1,MPI_INTEGER, itarget,tag+itarget,*(group->getComm())); + _comm_interface->send(&serializer, size, MPI_INTEGER, itarget, tag+itarget,*(group->getComm())); + } + } + } + else + { + vector size (group->size()); + + for (int iproc=0; iprocsize();iproc++) + { + int isource = iproc; + if (!toporecv.getProcGroup()->contains(isource)) + { + int nbelem; + _comm_interface->recv(&nbelem, 1, MPI_INTEGER, isource, tag+isource, *(group->getComm()), &status); + int* buffer = new int[nbelem]; + _comm_interface->recv(buffer, nbelem, MPI_INTEGER, isource,tag+isource, *(group->getComm()), &status); + + ExplicitTopology* topotemp=new ExplicitTopology(); + topotemp->unserialize(buffer, *_comm_interface); + delete[] buffer; + + for (int ielem=0; ielemglobalToLocal(global); + if (sendlocal!=-1) + { + size[iproc]++; + _distant_elems.insert(make_pair(ielem, make_pair(iproc,sendlocal))); + } + } + delete topotemp; + } + } + } + MESSAGE (" rank "<myRank()<< " broadcastTopology is over"); +} + +void ExplicitCoincidentDEC::recvData() +{ + //MPI_COMM_WORLD is used instead of group because there is no + //mechanism for creating the union group yet + MESSAGE("recvData"); + for (int i=0; i< 4; i++) + cout << _recvcounts[i]<<" "; + cout <allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, + _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD); + cout<<"end AllToAll"<getNbLocalElements(); + double* value=new double[nb_local]; + int myranktarget=_topotarget->getProcGroup()->myRank(); + vector counters(_toposource->getProcGroup()->size()); + counters[0]=0; + for (int i=0; i<_toposource->getProcGroup()->size()-1; i++) + { + MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface); + int worldrank=group->translateRank(_toposource->getProcGroup(),i); + counters[i+1]=counters[i]+_recvcounts[worldrank]; + } + + for (int ielem=0; ielemlocalToGlobal(make_pair(myranktarget, ielem)); + int source_local =_toposource->globalToLocal(global); + value[ielem]=_recvbuffer[counters[source_local]++]; + } + + + _target_field->getField()->setValue(value); +} + +void ExplicitCoincidentDEC::sendData() +{ + MESSAGE ("sendData"); + for (int i=0; i< 4; i++) + cout << _sendcounts[i]<<" "; + cout <allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, + _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD); + cout<<"end AllToAll"< + +namespace ParaMEDMEM +{ +class DEC; +class BlockTopology; +class ExplicitCoincidentDEC: public DEC +{ +public: + ExplicitCoincidentDEC(); + virtual ~ExplicitCoincidentDEC(); + void synchronize(); + void broadcastTopology(BlockTopology*&, int tag); + void broadcastTopology(const ExplicitTopology& toposend, ExplicitTopology& toporecv, int tag); + + void prepareSourceDE(); + void prepareTargetDE(); + void recvData(); + void sendData(); +private : + + ExplicitTopology* _toposource; + ExplicitTopology* _topotarget; + int* _sendcounts; + int* _recvcounts; + int* _senddispls; + int* _recvdispls; + double* _recvbuffer; + double* _sendbuffer; + std::map > _distant_elems; +}; + +} + +#endif /*ExplicitCOINCIDENTDEC_HXX_*/ + diff --git a/src/ParaMEDMEM/ExplicitTopology.cxx b/src/ParaMEDMEM/ExplicitTopology.cxx new file mode 100644 index 000000000..e953bfe94 --- /dev/null +++ b/src/ParaMEDMEM/ExplicitTopology.cxx @@ -0,0 +1,91 @@ +#include "MEDMEM_Mesh.hxx" +#include "MEDMEM_Support.hxx" +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "ParaSUPPORT.hxx" +#include "ParaMESH.hxx" +#include "Topology.hxx" +#include "ExplicitTopology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" + +#include +#include + +using namespace std; +using namespace MEDMEM; +namespace ParaMEDMEM +{ + +ExplicitTopology::ExplicitTopology(const ParaSUPPORT& parasupport ): +_proc_group(parasupport.getMesh()->getBlockTopology()->getProcGroup()){ + _nb_elems=parasupport.getSupport()->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS); + MED_EN::medEntityMesh entity= parasupport.getSupport()->getEntity(); + const int* global=parasupport.getMesh()->getGlobalNumbering(entity); + _loc2glob=new int[_nb_elems]; + + if (parasupport.getSupport()->isOnAllElements()) + { + for (int i=0; i<_nb_elems; i++) + { + _loc2glob[i]=global[i]; + _glob2loc[global[i]]=i; + } + } + else + { + const int* number= parasupport.getSupport()->getNumber(MED_EN::MED_ALL_ELEMENTS); + for (int i=0; i<_nb_elems; i++) + { + int local=number[i]; + _loc2glob[i]=global[local]; + _glob2loc[global[local]]=i; + } + } +} + + +ExplicitTopology::~ExplicitTopology() +{ +} + + +/*! Serializes the data contained in the Explicit Topology + * for communication purposes*/ +void ExplicitTopology::serialize(int* & serializer, int& size) const +{ + vector buffer; + + buffer.push_back(_nb_elems); + for (int i=0; i<_nb_elems; i++) + { + buffer.push_back(_loc2glob[i]); + } + + serializer=new int[buffer.size()]; + size= buffer.size(); + copy(buffer.begin(), buffer.end(), serializer); + +} +/*! Unserializes the data contained in the Explicit Topology + * after communication. Uses the same structure as the one used for serialize() + * + * */ +void ExplicitTopology::unserialize(const int* serializer,const CommInterface& comm_interface) +{ + const int* ptr_serializer=serializer; + cout << "unserialize..."< +#include +#include +#include "ProcessorGroup.hxx" +#include + +using namespace std; +using namespace __gnu_cxx; + +namespace MEDMEM +{ + class GRID; +} + +namespace ParaMEDMEM +{ +class Topology; +class ComponentTopology; + +class ExplicitTopology: public Topology +{ +public: + ExplicitTopology(){}; + ExplicitTopology(const ParaSUPPORT&); + virtual ~ExplicitTopology(); + + inline int getNbElements()const; + inline int getNbLocalElements() const; + const ProcessorGroup* getProcGroup()const {return _proc_group;}; +// inline std::pair globalToLocal (const int global) const { +// pair local; +// local.first=_proc_group->myRank(); +// local.second=globalToLocal(global);} + int localToGlobal (const std::pair local) const {return localToGlobal(local.second);} + inline int localToGlobal(int) const; + inline int globalToLocal(int) const; + void serialize(int* & serializer, int& size) const ; + void unserialize(const int* serializer, const CommInterface& comm_interface); +private: + //Processor group + const ProcessorGroup* _proc_group; + //nb of elements + int _nb_elems; + //mapping local to global + int* _loc2glob; + //mapping global to local + hash_map _glob2loc; +}; + +//!converts a pair to a global number +inline int ExplicitTopology::globalToLocal(const int global) const { + return (_glob2loc.find(global))->second;; + } + +//!converts local number to a global number +int ExplicitTopology::localToGlobal(int local) const { + return _loc2glob[local]; + } + +//!Retrieves the number of elements for a given topology +inline int ExplicitTopology::getNbElements()const {return _nb_elems;} + +//Retrieves the local number of elements +inline int ExplicitTopology::getNbLocalElements()const +{ + return _glob2loc.size(); +} +} + + +#endif /*ExplicitTOPOLOGY_HXX_*/ diff --git a/src/ParaMEDMEM/MPIProcessorGroup.cxx b/src/ParaMEDMEM/MPIProcessorGroup.cxx new file mode 100644 index 000000000..84a176fa4 --- /dev/null +++ b/src/ParaMEDMEM/MPIProcessorGroup.cxx @@ -0,0 +1,73 @@ +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "CommInterface.hxx" + +#include +#include +#include +#include "/export/home/vb144235/mpich2_install/include/mpi.h" + +using namespace std; + +namespace ParaMEDMEM +{ +MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface): +ProcessorGroup(interface) +{ + _comm=MPI_COMM_WORLD; + _comm_interface.commGroup(MPI_COMM_WORLD, &_group); + int size; + _comm_interface.commSize(MPI_COMM_WORLD,&size); + for (int i=0; i proc_ids): +ProcessorGroup(interface, proc_ids) +{ + //Creation of a communicator + MPI_Group group_world; + + int size_world; + _comm_interface.commSize(MPI_COMM_WORLD,&size_world); + int rank_world; + _comm_interface.commRank(MPI_COMM_WORLD,&rank_world); + _comm_interface.commGroup(MPI_COMM_WORLD, &group_world); + + int* ranks=new int[proc_ids.size()]; + + // copying proc_ids in ranks + copy::const_iterator,int*> (proc_ids.begin(), proc_ids.end(), ranks); + + _comm_interface.groupIncl(group_world, proc_ids.size(), ranks, &_group); + + _comm_interface.commCreate(MPI_COMM_WORLD, _group, &_comm); + +} + +MPIProcessorGroup::MPIProcessorGroup (const ProcessorGroup& proc_group, set proc_ids) : +ProcessorGroup(proc_group.getCommInterface()) +{ + cout << "MPIProcessorGroup (const ProcessorGroup& proc_group, set proc_ids)" <(group); + int local_rank; + MPI_Group_translate_ranks(targetgroup->_group, 1, &rank, _group, &local_rank); + return local_rank; +} + + + +} diff --git a/src/ParaMEDMEM/MPIProcessorGroup.hxx b/src/ParaMEDMEM/MPIProcessorGroup.hxx new file mode 100644 index 000000000..9a5e7043b --- /dev/null +++ b/src/ParaMEDMEM/MPIProcessorGroup.hxx @@ -0,0 +1,33 @@ +#ifndef MPIPROCESSORGROUP_HXX_ +#define MPIPROCESSORGROUP_HXX_ + +#include +#include + +using namespace std; +namespace ParaMEDMEM +{ +class ProcessorGroup; +class CommInterface; + +class MPIProcessorGroup:public ProcessorGroup +{ +public: + MPIProcessorGroup(const CommInterface& interface); + MPIProcessorGroup(const CommInterface& interface, set proc_ids); + MPIProcessorGroup (const ProcessorGroup& proc_group, set proc_ids); + virtual ~MPIProcessorGroup(); + void fuse (ProcessorGroup&){}; + void intersect (ProcessorGroup&){}; + int myRank() const {int rank; MPI_Comm_rank(_comm,&rank); return rank;} + bool containsMyRank() const { int rank; MPI_Group_rank(_group, &rank); return (rank!=MPI_UNDEFINED);} + int translateRank(const ProcessorGroup* group, int rank) const; + const MPI_Comm* getComm() const {return &_comm;} +private: + MPI_Group _group; + MPI_Comm _comm; +}; + +} + +#endif /*MPIPROCESSORGROUP_HXX_*/ diff --git a/src/ParaMEDMEM/Makefile.in b/src/ParaMEDMEM/Makefile.in new file mode 100644 index 000000000..5daf5c465 --- /dev/null +++ b/src/ParaMEDMEM/Makefile.in @@ -0,0 +1,116 @@ +# MED MEDMEM : MED files in memory +# +# Copyright (C) 2003 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN, +# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# +# +# +# File : Makefile.in +# Author : Vincent BERGEAUD (CEA/DEN/DANS/DM2S/SFME/LGLS) +# Module : MED + +top_srcdir=@top_srcdir@ +top_builddir=../.. +srcdir=@srcdir@ +VPATH=.:$(srcdir):$(srcdir)/tests + +MACHINE=PCLINUX + +@COMMENCE@ + + +EXPORT_PYSCRIPTS = \ + + +EXPORT_HEADERS = \ +CommInterface.hxx\ +MPIProcessorGroup.hxx\ +ProcessorGroup.hxx\ +BlockTopology.hxx\ +Topology.hxx\ +ParaGRID.hxx\ +ParaMESH.hxx\ +ParaSUPPORT.hxx\ +StructuredParaSUPPORT.hxx\ +ComponentTopology.hxx\ +ExplicitTopology.hxx\ +ParaFIELD.hxx\ +DEC.hxx\ +StructuredCoincidentDEC.hxx\ +ExplicitCoincidentDEC.hxx + +# Libraries targets + +LIB=libparamed.la + +LIB_SRC = \ +MPIProcessorGroup.cxx\ +BlockTopology.cxx\ +ParaGRID.cxx\ +ParaMESH.cxx\ +ParaSUPPORT.cxx\ +StructuredParaSUPPORT.cxx\ +ComponentTopology.cxx\ +ParaFIELD.cxx\ +DEC.cxx\ +StructuredCoincidentDEC.cxx\ +ExplicitCoincidentDEC.cxx\ +ExplicitTopology.cxx + + + +# Executables targets +BIN = +BIN_SRC = +BIN_SERVER_IDL = +BIN_CLIENT_IDL = + +TEST_PROGS = test_ProcessorGroup test_BlockTopology test_ParaStructuredSupport \ +test_ParaField test_DEC test_UnstructuredDEC + +LDFLAGS+= -L$(top_builddir)/lib@LIB_LOCATION_SUFFIX@/salome +LDFLAGSFORBIN+= -L$(top_builddir)/lib@LIB_LOCATION_SUFFIX@/salome + +CPPFLAGS+=$(MED2_INCLUDES) $(HDF5_INCLUDES) $(MPI_INCLUDES) + +CXXFLAGS+=@CXXTMPDPTHFLAGS@ $(MPI_INCLUDES) +CPPFLAGS+=$(BOOST_CPPFLAGS) +#LDFLAGS+=$(MED2_LIBS) $(HDF5_LIBS) +# change motivated by the bug KERNEL4778. +LDFLAGS+=$(MED2_LIBS) $(HDF5_LIBS) -lmed_V2_1 $(STDLIB) -lmedmem $(MPI_LIBS) + +#LDFLAGSFORBIN+=$(MED2_LIBS) $(HDF5_LIBS) +# change motivated by the bug KERNEL4778. +LDFLAGSFORBIN+=-lm $(MED2_LIBS) $(HDF5_LIBS) -lmed_V2_1 -lmedmem $(MPI_LIBS) $(BOOST_LIBS) + +ifeq ($(MED_WITH_KERNEL),yes) + CPPFLAGS+= ${KERNEL_CXXFLAGS} + CXXFLAGS+= ${KERNEL_CXXFLAGS} + LDFLAGS+= ${KERNEL_LDFLAGS} -lSALOMELocalTrace + LDFLAGSFORBIN+= ${KERNEL_LDFLAGS} -lSALOMELocalTrace -lSALOMEBasics +endif + +LIBSFORBIN=$(BOOSTLIBS) $(MPI_LIBS) + +LIBS= + +# build create_mesh : +bin: + +@CONCLUDE@ diff --git a/src/ParaMEDMEM/ParaFIELD.cxx b/src/ParaMEDMEM/ParaFIELD.cxx new file mode 100644 index 000000000..f003519a6 --- /dev/null +++ b/src/ParaMEDMEM/ParaFIELD.cxx @@ -0,0 +1,109 @@ +#include "MEDMEM_Exception.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ParaSUPPORT.hxx" +#include "StructuredParaSUPPORT.hxx" +#include "ExplicitCoincidentDEC.hxx"; +#include "StructuredCoincidentDEC.hxx" +#include "ParaFIELD.hxx" +#include "ParaMESH.hxx" + +using namespace MEDMEM; + +namespace ParaMEDMEM +{ + +ParaFIELD::ParaFIELD(ParaSUPPORT* para_support, const ComponentTopology& component_topology) +:_support(para_support), +_component_topology(component_topology) +{ + if (dynamic_cast(para_support)!=0) + {const BlockTopology* source_topo = dynamic_cast(para_support->getTopology()); + _topology=new BlockTopology(*source_topo,component_topology); + } + else + throw MEDEXCEPTION(LOCALIZED( + "ParaFIELD constructor : Unstructured Support not taken into account with component topology yet")); + + +// int nb_components=0; +// if (component_topology.getProcGroup()!=0) + int nb_components = component_topology.nbLocalComponents(); + if (nb_components!=0) + { + _field=new FIELD (para_support->getSupport(), nb_components); + } + else return; + + _field->setName("toto"); + _field->setDescription("titi"); + _field->setNumberOfValues(para_support->getSupport()->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS)); + string* compnames=new string[nb_components]; + string* compdesc=new string[nb_components]; + for (int i=0; isetComponentsNames(compnames); + _field->setComponentsDescriptions(compdesc); + _field->setIterationNumber(0); + _field->setOrderNumber(0); + _field->setTime(0.0); +} + +ParaFIELD::ParaFIELD(MEDMEM::driverTypes driver_type, const string& file_name, + const string& driver_name, const ComponentTopology& component_topology) + throw (MEDEXCEPTION):_component_topology(component_topology){} +ParaFIELD::~ParaFIELD(){} + +void ParaFIELD::write(MEDMEM::driverTypes driverType, const string& fileName, const string& meshName){ + BlockTopology* topo = dynamic_cast (_topology); + int myrank = topo->getProcGroup()->myRank(); + ostringstream name; + name <addDriver(driverType, name.str().c_str(), meshName); + _field->write(driver); +} + +void ParaFIELD::synchronizeTarget(ParaFIELD* source_field){ + DEC* data_channel; + if (dynamic_cast(_topology)!=0) + { + data_channel=new StructuredCoincidentDEC(); + } + else + { + data_channel=new ExplicitCoincidentDEC(); + } + data_channel->attachTargetField(this); + data_channel->synchronize(); + data_channel->prepareTargetDE(); + data_channel->sendData(); + + delete data_channel; +} + +void ParaFIELD::synchronizeSource(ParaFIELD* target_field){ + DEC* data_channel; + if (dynamic_cast(_topology)!=0) + { + data_channel=new StructuredCoincidentDEC(); + } + else + { + data_channel=new ExplicitCoincidentDEC(); + } + data_channel->attachSourceField(this); + data_channel->synchronize(); + data_channel->prepareSourceDE(); + data_channel->sendData(); + + delete data_channel; +} + +} diff --git a/src/ParaMEDMEM/ParaFIELD.hxx b/src/ParaMEDMEM/ParaFIELD.hxx new file mode 100644 index 000000000..dcb6c288e --- /dev/null +++ b/src/ParaMEDMEM/ParaFIELD.hxx @@ -0,0 +1,42 @@ +#ifndef PARAFIELD_HXX_ +#define PARAFIELD_HXX_ + +#include "MEDMEM_define.hxx" +#include "MEDMEM_GenDriver.hxx" +#include "MEDMEM_Field.hxx" + +namespace MEDMEM{ + class MEDEXCEPTION; +} + + +namespace ParaMEDMEM +{ +class ComponentTopology; +class ParaSUPPORT; + +class ParaFIELD +{ +public: + ParaFIELD(ParaSUPPORT* support, const ComponentTopology& component_topology); + ParaFIELD(ParaSUPPORT* support); + ParaFIELD(MEDMEM::driverTypes driver_type, const string& file_name, + const string& driver_name, const ComponentTopology& component_topology) + throw (MEDMEM::MEDEXCEPTION); + virtual ~ParaFIELD(); + void write(MEDMEM::driverTypes driverType, const string& fileName="", const string& meshName=""); + void synchronizeTarget(ParaFIELD* source_field); + void synchronizeSource(ParaFIELD* target_field); + MEDMEM::FIELD* getField() const {return _field;} + Topology* getTopology() const {return _topology;} + int nbComponents() const {return _component_topology.nbComponents();} +private: + const ComponentTopology& _component_topology; + Topology* _topology; + MEDMEM::FIELD* _field; + ParaSUPPORT* _support; +}; + +} + +#endif /*PARAFIELD_HXX_*/ diff --git a/src/ParaMEDMEM/ParaGRID.cxx b/src/ParaMEDMEM/ParaGRID.cxx new file mode 100644 index 000000000..b60c63f47 --- /dev/null +++ b/src/ParaMEDMEM/ParaGRID.cxx @@ -0,0 +1,109 @@ +#include +#include + +#include "ParaGRID.hxx" + +using namespace std; + +namespace ParaMEDMEM +{ + +ParaGRID::ParaGRID(MEDMEM::GRID* global_grid, Topology* topology)throw (MEDMEM::MEDEXCEPTION): +_name(global_grid->getName()) +{ + + _block_topology = dynamic_cast(topology); + if (_block_topology==0) throw MEDEXCEPTION(LOCALIZED( + "ParaGRID::ParaGRID topology must be block topology")); + + if (!_block_topology->getProcGroup()->containsMyRank()) return; + + int dimension=_block_topology->getDimension() ; + if (dimension != global_grid->getMeshDimension()) + throw MEDEXCEPTION(LOCALIZED("ParaGrid::ParaGrid incompatible topology")); + + vector > xyz_array(dimension); + vector > local_indices = _block_topology->getLocalArrayMinMax(); + int myrank=_block_topology->getProcGroup()->myRank(); + vector coordinates_names; + vector coordinates_units; + for (int idim=0; idimgetArrayValue(idim+1,i)); + coordinates_names.push_back(global_grid->getCoordinatesNames()[idim]); + coordinates_units.push_back(global_grid->getCoordinatesUnits()[idim]); + } + _grid=new MEDMEM::GRID(xyz_array, + coordinates_names, + coordinates_units); + _grid->setName(global_grid->getName()); + _grid->setDescription(global_grid->getDescription()); + +} + +ParaGRID::ParaGRID(MEDMEM::driverTypes driver_type, const string& file_name, + const string& driver_name, int domain_id) +throw (MEDMEM::MEDEXCEPTION){}; +ParaGRID::~ParaGRID(){if (_grid !=0) delete _grid;}; + + +/*! method for writing a distributed grid + * + * \param driverType type of driver used (MED_DRIVER,VTK_DRIVER) + * \param master_filename name of the master file + */ + +void ParaGRID::write(MEDMEM::driverTypes driverType, const string& master_filename) +throw (MEDMEM::MEDEXCEPTION){ + + BEGIN_OF("ParaMEDMEM::ParaGRID::write()"); + + if (!_block_topology->getProcGroup()->containsMyRank()) return; + + int myrank=_block_topology->getProcGroup()->myRank(); + + ofstream file(master_filename.c_str()); + int nbdomains= _block_topology->getProcGroup()->size(); + vector filename(nbdomains); + + + //loop on the domains + for (int i=0; iaddDriver(MEDMEM::MED_DRIVER,filename[myrank],_name); + + MESSAGE("Start writing"); + _grid->write(id); + + END_OF("ParaMEDMEM::ParaGRID::write()"); +}; + + + +} diff --git a/src/ParaMEDMEM/ParaGRID.hxx b/src/ParaMEDMEM/ParaGRID.hxx new file mode 100644 index 000000000..6d977d585 --- /dev/null +++ b/src/ParaMEDMEM/ParaGRID.hxx @@ -0,0 +1,51 @@ +#ifndef PARAGRID_HXX_ +#define PARAGRID_HXX_ + +#include + +#include "MEDMEM_Exception.hxx" +#include "MEDMEM_define.hxx" +#include "MEDMEM_GenDriver.hxx" +#include "MEDMEM_Grid.hxx" +#include "MEDMEM_ConnectZone.hxx" +#include "ProcessorGroup.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" + +using namespace MEDMEM; +namespace ParaMEDMEM +{ + + +const int MYRANK_ID=-100; +class ParaGRID +{ +public: + ParaGRID(MEDMEM::GRID* global_grid, Topology* topology)throw (MEDMEM::MEDEXCEPTION); + ParaGRID(MEDMEM::driverTypes driver_type, const string& file_name, + const string& driver_name, int domain_id=MYRANK_ID) + throw (MEDMEM::MEDEXCEPTION); + + void write(MEDMEM::driverTypes driverType, const string& fileName="") + throw (MEDMEM::MEDEXCEPTION); + + ParaMEDMEM::BlockTopology * getBlockTopology() const {return _block_topology;} + virtual ~ParaGRID(); + MEDMEM::GRID* getGrid() const {return _grid;} + +private: + MEDMEM::GRID* _grid; + //grid name + const string _name; + // structured grid topology + ParaMEDMEM::BlockTopology* _block_topology; + // stores the x,y,z axes on the global grid + std::vector > _global_axis; + //id of the local grid + int _my_domain_id; + +}; + +} + +#endif /*PARAGRID_H_*/ diff --git a/src/ParaMEDMEM/ParaMESH.cxx b/src/ParaMEDMEM/ParaMESH.cxx new file mode 100644 index 000000000..c0914d098 --- /dev/null +++ b/src/ParaMEDMEM/ParaMESH.cxx @@ -0,0 +1,273 @@ +#include +#include + +#include "ParaMESH.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "MEDMEM_ConnectZone.hxx" + +//inclusion for the namespaces +#include "MEDMEM_Field.hxx" +using namespace std; + +namespace ParaMEDMEM +{ + + +ParaMESH::ParaMESH(MEDMEM::driverTypes driver_type, const string& filename, + const ProcessorGroup& group) +throw (MEDMEM::MEDEXCEPTION){ + + BEGIN_OF("MEDSPLITTER::MESHCollectionDriver::read()") + + + string meshstring; + char file[256]; + char meshname[MED_TAILLE_NOM]; + int domain_id=group.myRank(); + + // reading ascii master file + try{ + MESSAGE("Start reading"); + ifstream asciiinput(filename.c_str()); + char charbuffer[512]; + asciiinput.getline(charbuffer,512); + + while (charbuffer[0]=='#') + { + asciiinput.getline(charbuffer,512); + } + + //reading number of domains + int nbdomain=atoi(charbuffer); + cout << "nb domain"<>nbdomain; + + string mesh; + int idomain; + string host; + + for (int i=0; i<=domain_id;i++) + { + //reading information about the domain + + + asciiinput >> mesh >> idomain >> meshstring >> host >> _medfilename; + + if (idomain!=i+1) + { + cerr<<"Error : domain must be written from 1 to N in asciifile descriptor"<setName(string(name)); + cz->setDescription(joint_description); + cz->setLocalDomainNumber(domain_id); + cz->setDistantDomainNumber(distant); + //cz->setLocalMesh((m_collection->getMesh())[i]); + //cz->setDistantMesh((m_collection->getMesh())[distant]); + cz->setNodeCorresp(node_corresp,ncouples); + _connect_zone.push_back(cz); + + }//loop on correspondances + }//loop on joints + + // + // Reading global numbering + // + int ncell=_mesh->getNumberOfElements(MED_EN::MED_CELL,MED_EN::MED_ALL_ELEMENTS); + int * array=new int[ncell]; + int offset=0; + MESSAGE("Reading cell global numbering for mesh "<< domain_id); + MED_EN::MESH_ENTITIES::const_iterator currentEntity; + list::const_iterator iter; + currentEntity = MED_EN::meshEntities.find(MED_EN::MED_CELL); + char meshchar[MED_TAILLE_NOM]; + strcpy(meshchar,_mesh->getName().c_str()); + for (iter = (*currentEntity).second.begin();iter != (*currentEntity).second.end(); iter++) + { + MED_EN::medGeometryElement type=*iter; + if (type/100 != _mesh->getMeshDimension()) continue; + int ntype = _mesh->getNumberOfElements(MED_EN::MED_CELL,type); + if (ntype==0) continue; + med_2_2::MEDglobalNumLire(fid,meshname, array+offset, ntype, + med_2_2::MED_MAILLE, (med_2_2::med_geometrie_element)type); + offset+=ntype; + } + _cellglobal=array; + + MESSAGE("Reading node global numbering"); + int nnode= _mesh->getNumberOfNodes(); + array=new int[nnode]; + med_2_2::MEDglobalNumLire(fid,meshname, array, nnode, + med_2_2::MED_NOEUD, med_2_2::MED_POINT1); + _nodeglobal=array; + + MESSAGE("Reading face global numbering for mesh "<getNumberOfElements(MED_EN::MED_FACE,MED_EN::MED_ALL_ELEMENTS); + array=new int[nbface]; + currentEntity = MED_EN::meshEntities.find(MED_EN::MED_FACE); + offset=0; + for (iter = (*currentEntity).second.begin();iter != (*currentEntity).second.end(); iter++) + { + MED_EN::medGeometryElement type=*iter; + if (type/100 != _mesh->getMeshDimension()-1) continue; + int ntype = _mesh->getNumberOfElements(MED_EN::MED_FACE,type); + if (ntype==0) continue; + med_2_2::MEDglobalNumLire(fid,meshname, array+offset, ntype, + med_2_2::MED_FACE, (med_2_2::med_geometrie_element)type); + offset+=ntype; + } + _faceglobal=array; +// faceglobal[i]=0; + med_2_2::MEDfermer(fid); + + _block_topology=new BlockTopology(group,ncell); + + MESSAGE("end of read"); + + }//of try + catch(...) + { + cerr << "I/O error reading parallel MED file"<setTopology( + // new ParallelTopology((m_collection->getMesh()),(m_collection->getCZ()),cellglobal,nodeglobal,faceglobal) + // ); + + END_OF("MEDSPLITTER::MESHCollectionDriver::read()") +}; + + +ParaMESH::~ParaMESH(){if (_mesh !=0) delete _mesh;}; + + +/*! method for writing a distributed MESH + * + * \param driverType type of driver used (MED_DRIVER,VTK_DRIVER) + * \param master_filename name of the master file + */ + +void ParaMESH::write(MEDMEM::driverTypes driverType, const string& master_filename) +throw (MEDMEM::MEDEXCEPTION){ + + BEGIN_OF("ParaMEDMEM::ParaMESH::write()"); + + if (!_block_topology->getProcGroup()->containsMyRank()) return; + + int myrank=_block_topology->getProcGroup()->myRank(); + cout << "Myrank in write " << myrank<getProcGroup()->size(); + vector filename(nbdomains); + + + //loop on the domains + for (int i=0; iaddDriver(MEDMEM::MED_DRIVER,filename[myrank],_name); + + MESSAGE("Start writing"); + _mesh->write(id); + _mesh->rmDriver(id); + END_OF("ParaMEDMEM::ParaMESH::write()"); +}; + +const int* ParaMESH::getGlobalNumbering(const MED_EN::medEntityMesh entity)const +{ + switch (entity) + { + case MED_CELL: + return _cellglobal; + case MED_FACE : + return _faceglobal; + case MED_EDGE : + return _edgeglobal; + case MED_NODE: + return _nodeglobal; + } +} + +} diff --git a/src/ParaMEDMEM/ParaMESH.hxx b/src/ParaMEDMEM/ParaMESH.hxx new file mode 100644 index 000000000..a99612ab1 --- /dev/null +++ b/src/ParaMEDMEM/ParaMESH.hxx @@ -0,0 +1,52 @@ +#ifndef PARAMESH_HXX_ +#define PARAMESH_HXX_ +#include +#include + +#include "MEDMEM_Exception.hxx" +#include "MEDMEM_define.hxx" +#include "MEDMEM_GenDriver.hxx" +#include "MEDMEM_Mesh.hxx" +#include "MEDMEM_ConnectZone.hxx" +#include "ProcessorGroup.hxx" + +namespace ParaMEDMEM +{ +class BlockTopology; + +class ParaMESH +{ +public: + ParaMESH(MEDMEM::driverTypes driver_type, const std::string& file_name, + const ProcessorGroup& group) + throw (MEDMEM::MEDEXCEPTION); + void write(MEDMEM::driverTypes driverType, const std::string& fileName="") + throw (MEDMEM::MEDEXCEPTION); + virtual ~ParaMESH(); + MEDMEM::MESH* getMesh() const {return _mesh;} + ParaMEDMEM::BlockTopology* getBlockTopology()const {return _block_topology;} + const string& getFilename() const {return _medfilename;} + const int* getGlobalNumbering(MED_EN::medEntityMesh)const; +private: + //mesh object underlying the ParaMESH object + MEDMEM::MESH* _mesh; + //name of the mesh + const string _name; + //connect zone + std::vector _connect_zone; + //id of the local grid + int _my_domain_id; + //global topology of the cells + ParaMEDMEM::BlockTopology* _block_topology; + //name of the local filename (!= masterfilename) + string _medfilename; + // pointers to global numberings + int* _nodeglobal; + int* _edgeglobal; + int* _faceglobal; + int* _cellglobal; +}; + +} + +#endif /*PARAMESH_H_*/ diff --git a/src/ParaMEDMEM/ParaMESHCollection.cxx b/src/ParaMEDMEM/ParaMESHCollection.cxx new file mode 100644 index 000000000..4d8e38582 --- /dev/null +++ b/src/ParaMEDMEM/ParaMESHCollection.cxx @@ -0,0 +1,9 @@ +#include "ParaMESHCollection.hxx" + +ParaMESHCollection::ParaMESHCollection() +{ +} + +ParaMESHCollection::~ParaMESHCollection() +{ +} diff --git a/src/ParaMEDMEM/ParaMESHCollection.hxx b/src/ParaMEDMEM/ParaMESHCollection.hxx new file mode 100644 index 000000000..1044b32cb --- /dev/null +++ b/src/ParaMEDMEM/ParaMESHCollection.hxx @@ -0,0 +1,27 @@ +#ifndef PARAMESHCOLLECTION_HXX_ +#define PARAMESHCOLLECTION_HXX_ + +#include + +#include "MEDMEM_Exception.hxx" +#include "MEDMEM_define.hxx" +#include "MEDMEM_GenDriver.hxx" +#include "MEDMEM_Mesh.hxx" +#include "MEDMEM_ConnectZone.hxx" +#include "ParaMEDMEM_ProcessorGroup.hxx" + +class ParaMESHCollection +{ +public: + ParaMESHCollection(); + ParaMESHCollection(MEDMEM::driverTypes driver_type, const string& file_name, + const string& driver_name, const ProcessorGroup& proc_group, + const int* distribution=NULL) + throw (MEDMEM::MEDEXCEPTION); + virtual ~ParaMESHCollection(); + +private : + vector _meshes; +}; + +#endif /*PARAMESHCOLLECTION_HXX_*/ diff --git a/src/ParaMEDMEM/ParaSUPPORT.cxx b/src/ParaMEDMEM/ParaSUPPORT.cxx new file mode 100644 index 000000000..b03fd5d21 --- /dev/null +++ b/src/ParaMEDMEM/ParaSUPPORT.cxx @@ -0,0 +1,19 @@ +#include "ParaSUPPORT.hxx" +#include "ParaMESH.hxx" +#include "MEDMEM_Support.hxx" + +namespace ParaMEDMEM +{ + + ParaSUPPORT::ParaSUPPORT() + { + } + + ParaSUPPORT::ParaSUPPORT(const MEDMEM::SUPPORT& support):_support(&support) {} + + ParaSUPPORT::~ParaSUPPORT() + { + } + +} + diff --git a/src/ParaMEDMEM/ParaSUPPORT.hxx b/src/ParaMEDMEM/ParaSUPPORT.hxx new file mode 100644 index 000000000..7ff301a6f --- /dev/null +++ b/src/ParaMEDMEM/ParaSUPPORT.hxx @@ -0,0 +1,29 @@ +#ifndef PARASUPPORT_HXX_ +#define PARASUPPORT_HXX_ + +namespace MEDMEM +{ + class SUPPORT; +} +namespace ParaMEDMEM +{ + class Topology; + class ParaMESH; + class ParaSUPPORT + { + public: + ParaSUPPORT(); + ParaSUPPORT(const MEDMEM::SUPPORT&); + virtual ~ParaSUPPORT(); + virtual const Topology* getTopology() const {}; + virtual const MEDMEM::SUPPORT* getSupport() const {return _support;} + virtual const ParaMESH* getMesh() const {return _mesh;} + private : + const MEDMEM::SUPPORT* _support; + const ParaMESH* _mesh; + }; + +} + + +#endif /*PARASUPPORT_HXX_*/ diff --git a/src/ParaMEDMEM/ProcessorGroup.cxx b/src/ParaMEDMEM/ProcessorGroup.cxx new file mode 100644 index 000000000..942d80165 --- /dev/null +++ b/src/ParaMEDMEM/ProcessorGroup.cxx @@ -0,0 +1,14 @@ +#include "ProcessorGroup.hxx" + +namespace ParaMEDMEM +{ + +ProcessorGroup::ProcessorGroup() +{ +} + +ProcessorGroup::~ProcessorGroup() +{ +} + +} diff --git a/src/ParaMEDMEM/ProcessorGroup.hxx b/src/ParaMEDMEM/ProcessorGroup.hxx new file mode 100644 index 000000000..d58eae0ee --- /dev/null +++ b/src/ParaMEDMEM/ProcessorGroup.hxx @@ -0,0 +1,37 @@ +#ifndef PROCESSORGROUP_HXX_ +#define PROCESSORGROUP_HXX_ +#include + + + +namespace ParaMEDMEM +{ +class CommInterface; + +class ProcessorGroup +{ +public: + + ProcessorGroup(const CommInterface& interface):_comm_interface(interface){} + ProcessorGroup(const CommInterface& interface, std::set proc_ids): + _comm_interface(interface),_proc_ids(proc_ids){} + ProcessorGroup (const ProcessorGroup& proc_group, std::set proc_ids): + _comm_interface(proc_group.getCommInterface()){} + virtual ~ProcessorGroup(){} + virtual void fuse (ProcessorGroup&)=0; + virtual void intersect (ProcessorGroup&)=0; + bool contains(int rank) const {return _proc_ids.find(rank)!=_proc_ids.end();}; + virtual bool containsMyRank() const=0; + int size() const {return _proc_ids.size();} + const CommInterface& getCommInterface()const {return _comm_interface;}; + virtual int myRank() const =0; + virtual int translateRank(const ProcessorGroup*, int) const =0; + +protected: + const CommInterface& _comm_interface; + std::set _proc_ids; +}; + +} + +#endif /*PROCESSORGROUP_HXX_*/ diff --git a/src/ParaMEDMEM/StructuredCoincidentDEC.cxx b/src/ParaMEDMEM/StructuredCoincidentDEC.cxx new file mode 100644 index 000000000..881fca22a --- /dev/null +++ b/src/ParaMEDMEM/StructuredCoincidentDEC.cxx @@ -0,0 +1,294 @@ +#include +#include "CommInterface.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" +#include "MPIProcessorGroup.hxx" +#include "StructuredCoincidentDEC.hxx" + +namespace ParaMEDMEM +{ + +StructuredCoincidentDEC::StructuredCoincidentDEC():_toposource(0),_topotarget(0) +{ +} + + +StructuredCoincidentDEC::~StructuredCoincidentDEC() +{ +} + + +/*! Synchronization process for exchanging topologies + */ +void StructuredCoincidentDEC::synchronize() +{ + if (_source_field!=0) + _toposource = dynamic_cast(_source_field->getTopology()); + if (_target_field!=0) + _topotarget = dynamic_cast(_target_field->getTopology()); + + // Transmitting source topology to target code + broadcastTopology(_toposource,1000); + // Transmitting target topology to source code + broadcastTopology(_topotarget,2000); + //checkCompatibility(_toposource,_topotarget); +} + +/*! Creates the arrays necessary for the data transfer + * and fills the send array with the values of the + * source field + * */ +void StructuredCoincidentDEC::prepareSourceDE() +{ + //////////////////////////////////// + //Step 1 : buffer array creation + + if (!_toposource->getProcGroup()->containsMyRank()) + return; + MPIProcessorGroup* group=new MPIProcessorGroup(_toposource->getProcGroup()->getCommInterface()); + + int myranksource = _toposource->getProcGroup()->myRank(); + + vector * target_arrays=new vector[_topotarget->getProcGroup()->size()]; + + //cout<<" topotarget size"<< _topotarget->getProcGroup()->size()< getNbLocalElements(); + for (int ielem=0; ielem< nb_local ; ielem++) + { + // cout <<"source local :"<localToGlobal(make_pair(myranksource, ielem)); + // cout << "global "< target_local =_topotarget->globalToLocal(global); + // cout << "target local : "<size(); + + _sendcounts=new int[union_size]; + _senddispls=new int[union_size]; + _recvcounts=new int[union_size]; + _recvdispls=new int[union_size]; + + for (int i=0; i< union_size; i++) + { + _sendcounts[i]=0; + _recvcounts[i]=0; + _recvdispls[i]=0; + } + _senddispls[0]=0; + + for (int iproc=0; iproc < _topotarget->getProcGroup()->size(); iproc++) + { + //converts the rank in target to the rank in union communicator + int unionrank=group->translateRank(_topotarget->getProcGroup(),iproc); + _sendcounts[unionrank]=target_arrays[iproc].size(); + } + + for (int iproc=1; iprocsize();iproc++) + _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1]; + + _sendbuffer = new double [nb_local ]; + + ///////////////////////////////////////////////////////////// + //Step 2 : filling the buffers with the source field values + + int* counter=new int [_topotarget->getProcGroup()->size()]; + counter[0]=0; + for (int i=1; i<_topotarget->getProcGroup()->size(); i++) + counter[i]=counter[i-1]+target_arrays[i-1].size(); + + + const double* value = _source_field->getField()->getValue(); + //cout << "Nb local " << nb_local<localToGlobal(make_pair(myranksource, ielem)); + pair target_local =_topotarget->globalToLocal(global); + //cout <<"global : "<< global<<" local :"<getProcGroup()->containsMyRank()) + return; + MPIProcessorGroup* group=new MPIProcessorGroup(_toposource->getProcGroup()->getCommInterface()); + + int myranktarget = _topotarget->getProcGroup()->myRank(); + + vector < vector > source_arrays(_toposource->getProcGroup()->size()); + int nb_local = _topotarget-> getNbLocalElements(); + for (int ielem=0; ielem< nb_local ; ielem++) + { + // cout <<"TS target local :"<localToGlobal(make_pair(myranktarget, ielem)); + //cout << "TS global "< source_local =_toposource->globalToLocal(global); + // cout << "TS source local : "<size(); + _recvcounts=new int[union_size]; + _recvdispls=new int[union_size]; + _sendcounts=new int[union_size]; + _senddispls=new int[union_size]; + + for (int i=0; i< union_size; i++) + { + _sendcounts[i]=0; + _recvcounts[i]=0; + _recvdispls[i]=0; + } + for (int iproc=0; iproc < _toposource->getProcGroup()->size(); iproc++) + { + //converts the rank in target to the rank in union communicator + int unionrank=group->translateRank(_toposource->getProcGroup(),iproc); + _recvcounts[unionrank]=source_arrays[iproc].size(); + } + for (int i=1; igetProcGroup()->myRank()==0) + { + MESSAGE ("Master rank"); + topo->serialize(serializer, size); + rank_master = group->translateRank(topo->getProcGroup(),0); + MESSAGE("Master rank world number is "<size()); + for (int i=0; i< group->size(); i++) + { + if (i!= rank_master) + _comm_interface->send(&rank_master,1,MPI_INTEGER, i,tag+i,*(group->getComm())); + } + } + else + { + MESSAGE(" rank "<myRank()<< " waiting ..."); + _comm_interface->recv(&rank_master, 1,MPI_INTEGER, MPI_ANY_SOURCE, tag+group->myRank(), *(group->getComm()),&status); + MESSAGE(" rank "<myRank()<< "received master rank"<broadcast(&size, 1,MPI_INTEGER,rank_master,*(group->getComm())); + + int* buffer=new int[size]; + if (topo!=0 && topo->getProcGroup()->myRank()==0) + copy(serializer, serializer+size, buffer); + _comm_interface->broadcast(buffer,size,MPI_INTEGER,rank_master,*(group->getComm())); + + // Processors which did not possess the source topology + // unserialize it + + BlockTopology* topotemp=new BlockTopology(); + topotemp->unserialize(buffer, *_comm_interface); + + if (topo==0) + topo=topotemp; + else + delete topotemp; + + // Memory cleaning + delete[] buffer; + if (serializer!=0) + delete[] serializer; + MESSAGE (" rank "<myRank()<< " unserialize is over"); +} + + + +void StructuredCoincidentDEC::recvData() +{ + //MPI_COMM_WORLD is used instead of group because there is no + //mechanism for creating the union group yet + MESSAGE("recvData"); + for (int i=0; i< 4; i++) + cout << _recvcounts[i]<<" "; + cout <allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, + _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD); + cout<<"end AllToAll"<getNbLocalElements(); + double* value=new double[nb_local]; + int myranktarget=_topotarget->getProcGroup()->myRank(); + vector counters(_toposource->getProcGroup()->size()); + counters[0]=0; + for (int i=0; i<_toposource->getProcGroup()->size()-1; i++) + { + MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface); + int worldrank=group->translateRank(_toposource->getProcGroup(),i); + counters[i+1]=counters[i]+_recvcounts[worldrank]; + } + + for (int ielem=0; ielemlocalToGlobal(make_pair(myranktarget, ielem)); + pair source_local =_toposource->globalToLocal(global); + value[ielem]=_recvbuffer[counters[source_local.first]++]; + } + + + _target_field->getField()->setValue(value); +} + +void StructuredCoincidentDEC::sendData() +{ + MESSAGE ("sendData"); + for (int i=0; i< 4; i++) + cout << _sendcounts[i]<<" "; + cout <allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, + _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD); + cout<<"end AllToAll"<getBlockTopology()), +_grid(grid), +_mesh(0), +_entity(entity), +_support(new SUPPORT(grid->getGrid(), "support on all entities", entity)) +{ + +} +/*! Constructor on all elements from a GRID */ +StructuredParaSUPPORT::StructuredParaSUPPORT(const ParaMESH* const mesh, const MED_EN::medEntityMesh entity): +_block_topology(mesh->getBlockTopology()), +_grid(0), +_mesh(mesh), +_entity(entity), +_support(new SUPPORT(mesh->getMesh(), "support on all entities", entity)) +{ + +} +StructuredParaSUPPORT::~StructuredParaSUPPORT() +{ + delete _support; +} + +}//end of namespace ParaMEDMEM diff --git a/src/ParaMEDMEM/StructuredParaSUPPORT.hxx b/src/ParaMEDMEM/StructuredParaSUPPORT.hxx new file mode 100644 index 000000000..8072cc2b6 --- /dev/null +++ b/src/ParaMEDMEM/StructuredParaSUPPORT.hxx @@ -0,0 +1,41 @@ +#ifndef STRUCTUREDPARASUPPORT_HXX_ +#define STRUCTUREDPARASUPPORT_HXX_ + +#include "ParaSUPPORT.hxx" +#include "MEDMEM_define.hxx" + +using namespace MED_EN; +namespace MEDMEM +{ + class SUPPORT; +} + +namespace ParaMEDMEM +{ +class BlockTopology; +class ParaGRID; +class ParaMESH; + +class StructuredParaSUPPORT:public ParaSUPPORT +{ +public: + + StructuredParaSUPPORT(const ParaGRID* const grid, const MED_EN::medEntityMesh entity); + StructuredParaSUPPORT(const ParaMESH* const mesh, const MED_EN::medEntityMesh entity); + + virtual ~StructuredParaSUPPORT(); + const Topology* getTopology() const {return _block_topology;} + const MEDMEM::SUPPORT* getSupport() {return _support;} + const ParaMESH* getParaMesh()const {return _mesh;} + +private: + const BlockTopology* const _block_topology; + const ParaGRID* const _grid; + const ParaMESH* const _mesh; + const MED_EN::medEntityMesh _entity; + const MEDMEM::SUPPORT* _support; + +}; + +} +#endif /*STRUCTUREDPARASUPPORT_HXX_*/ diff --git a/src/ParaMEDMEM/Topology.cxx b/src/ParaMEDMEM/Topology.cxx new file mode 100644 index 000000000..b29cdf150 --- /dev/null +++ b/src/ParaMEDMEM/Topology.cxx @@ -0,0 +1,14 @@ +#include "Topology.hxx" + +namespace ParaMEDMEM +{ + +Topology::Topology() +{ +} + +Topology::~Topology() +{ +} + +} diff --git a/src/ParaMEDMEM/Topology.hxx b/src/ParaMEDMEM/Topology.hxx new file mode 100644 index 000000000..1ab9434da --- /dev/null +++ b/src/ParaMEDMEM/Topology.hxx @@ -0,0 +1,23 @@ +#ifndef TOPOLOGY_HXX_ +#define TOPOLOGY_HXX_ + +#include + +using namespace std; +namespace ParaMEDMEM +{ + +class Topology +{ +public: + Topology(){} + virtual ~Topology(){} +// virtual std::pair globalToLocal (const int) const =0; +// virtual int localToGlobal (const std::pair) const =0; + virtual int getNbElements() const=0; + virtual int getNbLocalElements() const =0; +}; + +} + +#endif /*TOPOLOGY_HXX_*/ diff --git a/src/ParaMEDMEM/UnstructuredParaSUPPORT.cxx b/src/ParaMEDMEM/UnstructuredParaSUPPORT.cxx new file mode 100644 index 000000000..49c28ba75 --- /dev/null +++ b/src/ParaMEDMEM/UnstructuredParaSUPPORT.cxx @@ -0,0 +1,24 @@ +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "ParaGRID.hxx" +#include "UnstructuredParaSUPPORT.hxx" +#include "MEDMEM_Support.hxx" + +namespace ParaMEDMEM +{ + +/*! Constructor on all elements from a MESH */ +UnstructuredParaSUPPORT::UnstructuredParaSUPPORT(const ParaMESH* const mesh, const SUPPORT* support): +_mesh(mesh), +_entity(support->getEntity()), +_support(support), +_block_topology(mesh->getBlockTopology()->getProcGroup(), support->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS)) +{ +} + +StructuredParaSUPPORT::~StructuredParaSUPPORT() +{ + delete _support; +} + +}//end of namespace ParaMEDMEM diff --git a/src/ParaMEDMEM/UnstructuredParaSUPPORT.hxx b/src/ParaMEDMEM/UnstructuredParaSUPPORT.hxx new file mode 100644 index 000000000..4f663f255 --- /dev/null +++ b/src/ParaMEDMEM/UnstructuredParaSUPPORT.hxx @@ -0,0 +1,35 @@ +#ifndef UNSTRUCTUREDPARASUPPORT_HXX_ +#define UNSTRUCTUREDPARASUPPORT_HXX_ + +#include "ParaSUPPORT.hxx" +#include "MEDMEM_define.hxx" + +using namespace MED_EN; +namespace MEDMEM +{ + class SUPPORT; +} + +namespace ParaMEDMEM +{ +class BlockTopology; +class ParaMESH; + +class UnstructuredParaSUPPORT:public ParaSUPPORT +{ +public: + + UnstructuredParaSUPPORT(const ParaMESH* const mesh, SUPPORT* support ); + virtual ~UnstructuredParaSUPPORT(); + const Topology* getTopology() const {return _block_topology;} + const MEDMEM::SUPPORT* getSupport() {return _support;} +private: + const BlockTopology* const _block_topology; + const ParaMESH* const _mesh; + const MED_EN::medEntityMesh _entity; + const MEDMEM::SUPPORT* _support; + +}; + +} +#endif /*STRUCTUREDPARASUPPORT_HXX_*/ diff --git a/src/ParaMEDMEM/test_BlockTopology.cxx b/src/ParaMEDMEM/test_BlockTopology.cxx new file mode 100644 index 000000000..eebfc5d9d --- /dev/null +++ b/src/ParaMEDMEM/test_BlockTopology.cxx @@ -0,0 +1,70 @@ +#include +#include +#include +#include +#include + +#include "ProcessorGroup.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "CommInterface.hxx" + +#include "MPIProcessorGroup.hxx" +#include "MEDMEM_Grid.hxx" + +using namespace std; +using namespace ParaMEDMEM; +using namespace MEDMEM; +int main(int argc, char** argv) +{ + string testname="ParaMEDMEM - test #1 -"; + MPI_Init(&argc, &argv); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + set procs; + for (int i=0; i< size-1; i++) + procs.insert(i); + + ParaMEDMEM::CommInterface interface; + + ParaMEDMEM::ProcessorGroup* group=new ParaMEDMEM::MPIProcessorGroup(interface,procs); + double xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0; + int nx=10, ny=10; + std::vector > axes(2); + axes[0].resize(nx); + axes[1].resize(ny); + for (int i=0; i coord_name; + vector coord_unit; + coord_name.push_back("x");coord_name.push_back("y"); + coord_unit.push_back("m");coord_unit.push_back("m"); + + MEDMEM::GRID grid(axes, coord_name,coord_unit); + + BlockTopology* topo = new BlockTopology(*group, grid); + for (int i=0; iglobalToLocal(10).first<<","<globalToLocal(10).second<<")"; + cout << "("<globalToLocal(9).first<<","<globalToLocal(9).second<<")"; + cout << "("<globalToLocal(45).first<<","<globalToLocal(45).second<<")"; + + cout << endl; + } + } + MPI_Finalize(); + return 0; +} + + + + diff --git a/src/ParaMEDMEM/test_DEC.cxx b/src/ParaMEDMEM/test_DEC.cxx new file mode 100644 index 000000000..1149d89c4 --- /dev/null +++ b/src/ParaMEDMEM/test_DEC.cxx @@ -0,0 +1,113 @@ +#include +#include +#include +#include +#include + +#include "ProcessorGroup.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "CommInterface.hxx" + + +#include "MPIProcessorGroup.hxx" +#include "MEDMEM_Grid.hxx" +#include "ParaGRID.hxx" +#include "StructuredParaSUPPORT.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" + + +using namespace std; +using namespace ParaMEDMEM; +using namespace MEDMEM; +int main(int argc, char** argv) +{ + string testname="ParaMEDMEM - test #1 -"; + MPI_Init(&argc, &argv); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + if (size<3) + { + cout << " test_DEC test program is not meant to run "< self_procs; + set procs_source; + set procs_target; + for (int i=0; i > axes(2); + axes[0].resize(nx); + axes[1].resize(ny); + for (int i=0; i coord_name; + vector coord_unit; + coord_name.push_back("x");coord_name.push_back("y"); + coord_unit.push_back("m");coord_unit.push_back("m"); + + MEDMEM::GRID grid(axes, coord_name,coord_unit); + + grid.setName("grid_5_5"); + grid.setDescription("5 by 5 square grid"); + Topology* topo_source = new BlockTopology(*self_group, grid); + Topology* topo_target = new BlockTopology(*target_group,grid); + ParaGRID* source_grid; + ParaGRID* target_grid; + StructuredParaSUPPORT* target_support; + StructuredParaSUPPORT* source_support; + ComponentTopology* target_comp; + ComponentTopology* source_comp; + ParaFIELD* target_field=0; + ParaFIELD* source_field=0; + + if (source_group->containsMyRank()) + { + source_grid=new ParaGRID(&grid, topo_source); + source_support=new StructuredParaSUPPORT(source_grid,MED_EN::MED_CELL); + source_comp=new ComponentTopology (6, source_group); + source_field = new ParaFIELD(source_support, *source_comp); + cout << "Source field nb elems on rank : "<getTopology()->getNbLocalElements()<getTopology()->getNbLocalElements()]; + for(int ielem=0; ielemgetTopology()->getNbLocalElements();ielem++) + value[ielem]=(double)ielem; + source_field->getField()->setValue(value); + source_field->synchronizeSource(target_field); + } + if (target_group->containsMyRank()) + { + target_grid=new ParaGRID(&grid, topo_target); + target_support=new StructuredParaSUPPORT(target_grid,MED_EN::MED_CELL); + target_comp= new ComponentTopology (6); + target_field = new ParaFIELD(target_support, *target_comp); + target_field->synchronizeTarget(source_field); + //target_grid->write(MED_DRIVER, "/tmp/target"); + //target_field->write(MED_DRIVER, "/tmp/target"); + } + MPI_Barrier(MPI_COMM_WORLD); + MPI_Finalize(); + return 0; +} + + + + diff --git a/src/ParaMEDMEM/test_ExplicitDEC.cxx b/src/ParaMEDMEM/test_ExplicitDEC.cxx new file mode 100644 index 000000000..70fc5f823 --- /dev/null +++ b/src/ParaMEDMEM/test_ExplicitDEC.cxx @@ -0,0 +1,111 @@ +#include +#include +#include +#include +#include + +#include "ProcessorGroup.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "CommInterface.hxx" + + +#include "MPIProcessorGroup.hxx" +#include "MEDMEM_Mesh.hxx" +#include "ParaMESH.hxx" +#include "StructuredParaSUPPORT.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" + + +using namespace std; +using namespace ParaMEDMEM; +using namespace MEDMEM; +int main(int argc, char** argv) +{ + string testname="ParaMEDMEM - test #1 -"; + MPI_Init(&argc, &argv); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + if (size<3) + { + cout << " test_DEC test program is not meant to run "< self_procs; + set procs_source; + set procs_target; + for (int i=0; icontainsMyRank()) + { + source_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/pointe_nosplit",*self_group); + topo_source=source_mesh->getBlockTopology(); + } + if (target_group->containsMyRank()) + { + target_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/pointe_split",*target_group); + topo_target=target_mesh->getBlockTopology(); + } + + StructuredParaSUPPORT* target_support; + StructuredParaSUPPORT* source_support; + ComponentTopology* target_comp; + ComponentTopology* source_comp; + ParaFIELD* target_field=0; + ParaFIELD* source_field=0; + + if (source_group->containsMyRank()) + { + source_support=new StructuredParaSUPPORT(source_mesh,MED_EN::MED_CELL); + source_comp=new ComponentTopology (6, source_group); + source_field = new ParaFIELD(source_support, *source_comp); + int nb_local = source_field->getTopology()->getNbLocalElements(); + cout << "Source field nb elems on rank : "<getField()->setValue(value); + source_field->synchronizeSource(target_field); + if (source_group->myRank()==0) + { + source_mesh->write(MED_DRIVER,"/home/vb144235/tmp/source"); + source_field->write(MED_DRIVER,"/home/vb144235/tmp/source","maa1"); + } + } + if (target_group->containsMyRank()) + { + target_support=new StructuredParaSUPPORT(target_mesh,MED_EN::MED_CELL); + target_comp= new ComponentTopology (6); + target_field = new ParaFIELD(target_support, *target_comp); + target_field->synchronizeTarget(source_field); + target_mesh->write(MED_DRIVER, "/home/vb144235/tmp/target"); + target_field->write(MED_DRIVER, "/home/vb144235/tmp/target", "maa1"); + } + MPI_Barrier(MPI_COMM_WORLD); + MPI_Finalize(); + return 0; +} + + + + diff --git a/src/ParaMEDMEM/test_ParaField.cxx b/src/ParaMEDMEM/test_ParaField.cxx new file mode 100644 index 000000000..42f4b482b --- /dev/null +++ b/src/ParaMEDMEM/test_ParaField.cxx @@ -0,0 +1,78 @@ +#include +#include +#include +#include +#include + +#include "ProcessorGroup.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "CommInterface.hxx" + + +#include "MPIProcessorGroup.hxx" +#include "MEDMEM_Grid.hxx" +#include "ParaGRID.hxx" +#include "StructuredParaSUPPORT.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" + + +using namespace std; +using namespace ParaMEDMEM; +using namespace MEDMEM; +int main(int argc, char** argv) +{ + string testname="ParaMEDMEM - test #1 -"; + MPI_Init(&argc, &argv); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + set self_procs; + set procs; + for (int i=0; i > axes(2); + axes[0].resize(nx); + axes[1].resize(ny); + for (int i=0; i coord_name; + vector coord_unit; + coord_name.push_back("x");coord_name.push_back("y"); + coord_unit.push_back("m");coord_unit.push_back("m"); + + MEDMEM::GRID grid(axes, coord_name,coord_unit); + + grid.setName("grid_10_10"); + grid.setDescription("10 by 10 square grid"); + Topology* topo = new BlockTopology(*self_group, grid); + + ParaGRID local_grid(&grid, topo); + StructuredParaSUPPORT support(&local_grid,MED_EN::MED_CELL); + local_grid.write (MED_DRIVER, "/tmp/toto"); + + ComponentTopology comp_topo(5, group); + ParaFIELD field(&support, comp_topo); + + MPI_Finalize(); + return 0; +} + + + + diff --git a/src/ParaMEDMEM/test_ParaStructuredSupport.cxx b/src/ParaMEDMEM/test_ParaStructuredSupport.cxx new file mode 100644 index 000000000..0198a615b --- /dev/null +++ b/src/ParaMEDMEM/test_ParaStructuredSupport.cxx @@ -0,0 +1,67 @@ +#include +#include +#include +#include +#include + +#include "ProcessorGroup.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "CommInterface.hxx" + + +#include "MPIProcessorGroup.hxx" +#include "MEDMEM_Grid.hxx" +#include "ParaGRID.hxx" +#include "StructuredParaSUPPORT.hxx" + +using namespace std; +using namespace ParaMEDMEM; +using namespace MEDMEM; +int main(int argc, char** argv) +{ + string testname="ParaMEDMEM - test #1 -"; + MPI_Init(&argc, &argv); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + set procs; + for (int i=0; i< size-1; i++) + procs.insert(i); + + ParaMEDMEM::CommInterface interface; + + ParaMEDMEM::ProcessorGroup* group=new ParaMEDMEM::MPIProcessorGroup(interface,procs); + double xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0; + int nx=10, ny=10; + std::vector > axes(2); + axes[0].resize(nx); + axes[1].resize(ny); + for (int i=0; i coord_name; + vector coord_unit; + coord_name.push_back("x");coord_name.push_back("y"); + coord_unit.push_back("m");coord_unit.push_back("m"); + + MEDMEM::GRID grid(axes, coord_name,coord_unit); + + grid.setName("grid_10_10"); + grid.setDescription("10 by 10 square grid"); + Topology* topo = new BlockTopology(*group, grid); + + ParaGRID local_grid(&grid, topo); + StructuredParaSUPPORT support(&local_grid,MED_EN::MED_CELL); + local_grid.write (MED_DRIVER, "/tmp/toto"); + + MPI_Finalize(); + return 0; +} + + + + diff --git a/src/ParaMEDMEM/test_ProcessorGroup.cxx b/src/ParaMEDMEM/test_ProcessorGroup.cxx new file mode 100644 index 000000000..8838a319f --- /dev/null +++ b/src/ParaMEDMEM/test_ProcessorGroup.cxx @@ -0,0 +1,46 @@ +#include +#include +#include +#include +#include +#include "CommInterface.hxx" +#include "ProcessorGroup.hxx" +#include "MPIProcessorGroup.hxx" + + +using namespace std; +using namespace ParaMEDMEM; + +int main(int argc, char** argv) +{ + string testname="ParaMEDMEM - test #1 -"; + MPI_Init(&argc, &argv); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + set procs; + procs.insert(0); + + ParaMEDMEM::CommInterface interface; + + ParaMEDMEM::ProcessorGroup* group=new ParaMEDMEM::MPIProcessorGroup(interface,procs); + cout << "proc #"<size()<size() !=1) return 1; + + set empty_proc_group; + ParaMEDMEM::ProcessorGroup* group_empty=new ParaMEDMEM::MPIProcessorGroup(interface,empty_proc_group); + cout << "proc #"<size()<size() !=0) return 1; + + delete group; + delete group_empty; + + MPI_Finalize(); + return 0; +} + + + + diff --git a/src/ParaMEDMEM/test_UnstructuredDEC.cxx b/src/ParaMEDMEM/test_UnstructuredDEC.cxx new file mode 100644 index 000000000..126b6e270 --- /dev/null +++ b/src/ParaMEDMEM/test_UnstructuredDEC.cxx @@ -0,0 +1,111 @@ +#include +#include +#include +#include +#include + +#include "ProcessorGroup.hxx" +#include "Topology.hxx" +#include "BlockTopology.hxx" +#include "CommInterface.hxx" + + +#include "MPIProcessorGroup.hxx" +#include "MEDMEM_Mesh.hxx" +#include "ParaMESH.hxx" +#include "StructuredParaSUPPORT.hxx" +#include "ComponentTopology.hxx" +#include "ParaFIELD.hxx" + + +using namespace std; +using namespace ParaMEDMEM; +using namespace MEDMEM; +int main(int argc, char** argv) +{ + string testname="ParaMEDMEM - test #1 -"; + MPI_Init(&argc, &argv); + int size; + int rank; + MPI_Comm_size(MPI_COMM_WORLD,&size); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + if (size<3) + { + cout << " test_DEC test program is not meant to run "< self_procs; + set procs_source; + set procs_target; + for (int i=0; icontainsMyRank()) + { + source_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/med/pointe_nosplit",*self_group); + topo_source=source_mesh->getBlockTopology(); + } + if (target_group->containsMyRank()) + { + target_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/med/pointe_split",*target_group); + topo_target=target_mesh->getBlockTopology(); + } + + StructuredParaSUPPORT* target_support; + StructuredParaSUPPORT* source_support; + ComponentTopology* target_comp; + ComponentTopology* source_comp; + ParaFIELD* target_field=0; + ParaFIELD* source_field=0; + + if (source_group->containsMyRank()) + { + source_support=new StructuredParaSUPPORT(source_mesh,MED_EN::MED_CELL); + source_comp=new ComponentTopology (6, source_group); + source_field = new ParaFIELD(source_support, *source_comp); + int nb_local = source_field->getTopology()->getNbLocalElements(); + cout << "Source field nb elems on rank : "<getField()->setValue(value); + source_field->synchronizeSource(target_field); + if (source_group->myRank()==0) + { + source_mesh->write(MED_DRIVER,"/home/vb144235/tmp/source"); + source_field->write(MED_DRIVER,"/home/vb144235/tmp/source","maa1"); + } + } + if (target_group->containsMyRank()) + { + target_support=new StructuredParaSUPPORT(target_mesh,MED_EN::MED_CELL); + target_comp= new ComponentTopology (6); + target_field = new ParaFIELD(target_support, *target_comp); + target_field->synchronizeTarget(source_field); + target_mesh->write(MED_DRIVER, "/home/vb144235/tmp/target"); + target_field->write(MED_DRIVER, "/home/vb144235/tmp/target", "maa1"); + } + MPI_Barrier(MPI_COMM_WORLD); + MPI_Finalize(); + return 0; +} + + + +