1 // Copyright (C) 2007-2008 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
19 #include "ProcessorGroup.hxx"
20 #include "MPIProcessorGroup.hxx"
21 #include "CommInterface.hxx"
22 #include "InterpolationUtils.hxx"
31 /*! \defgroup processor_group Processor Groups
33 * \section processor_group_overview Overview
34 * The MPIProcessorGroup class is used to set up processor groups that help to define
35 * the MPI topology of the couplings. They can be set up in various ways, the most common being
36 * the use of the \c MPIProcessorGroup(Comminterface, int pfirst, int plast)
39 * The following code excerpt creates two processor groups on respectively 3 and 2 processors.
43 MPI_Init(&argc,&argv);
44 CommInterface comm_interface;
45 MPIProcessorGroup codeA_group(comm_interface, 0, 2);
46 MPIProcessorGroup codeB_group(comm_interface, 3, 4);
57 \addtogroup processor_group
62 * Creates a processor group that is based on all the
63 MPI_COMM_WORLD processor.This routine must be called by all processors in MPI_COMM_WORLD.
64 \param interface CommInterface object giving access to the MPI
67 MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface):
68 ProcessorGroup(interface),_world_comm(MPI_COMM_WORLD)
71 _comm_interface.commGroup(_world_comm, &_group);
73 _comm_interface.commSize(_world_comm,&size);
74 for (int i=0; i<size; i++)
79 /*! Creates a processor group that is based on the processors included in \a proc_ids.
80 This routine must be called by all processors in MPI_COMM_WORLD.
82 \param interface CommInterface object giving access to the MPI
84 \param proc_ids set of ids that are to be integrated in the group. The ids number are
85 to be understood in terms of MPI_COMM_WORLD ranks.
88 MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids, const MPI_Comm& world_comm):
89 ProcessorGroup(interface, proc_ids),_world_comm(world_comm)
91 //Creation of a communicator
92 MPI_Group group_world;
95 _comm_interface.commSize(_world_comm,&size_world);
97 _comm_interface.commRank(_world_comm,&rank_world);
98 _comm_interface.commGroup(_world_comm, &group_world);
100 int* ranks=new int[proc_ids.size()];
102 // copying proc_ids in ranks
103 copy<set<int>::const_iterator,int*> (proc_ids.begin(), proc_ids.end(), ranks);
104 for (int i=0; i< proc_ids.size();i++)
105 if (ranks[i]>size_world-1)
106 throw INTERP_KERNEL::Exception("invalid rank in set<int> argument of MPIProcessorGroup constructor");
108 _comm_interface.groupIncl(group_world, proc_ids.size(), ranks, &_group);
110 _comm_interface.commCreate(_world_comm, _group, &_comm);
113 /*! Creates a processor group that is based on the processors between \a pstart and \a pend.
114 This routine must be called by all processors in MPI_COMM_WORLD.
116 \param comm_interface CommInterface object giving access to the MPI
118 \param pstart id in MPI_COMM_WORLD of the first processor in the group
119 \param pend id in MPI_COMM_WORLD of the last processor in the group
121 MPIProcessorGroup::MPIProcessorGroup (const CommInterface& comm_interface, int pstart, int pend): ProcessorGroup(comm_interface,pstart,pend),_world_comm(MPI_COMM_WORLD)
123 //Creation of a communicator
124 MPI_Group group_world;
127 _comm_interface.commSize(_world_comm,&size_world);
129 _comm_interface.commRank(_world_comm,&rank_world);
130 _comm_interface.commGroup(_world_comm, &group_world);
132 if (pend>size_world-1 || pend <pstart || pstart<0)
133 throw INTERP_KERNEL::Exception("invalid argument in MPIProcessorGroup constructor (comm,pfirst,plast)");
134 int nprocs=pend-pstart+1;
135 int* ranks=new int[nprocs];
136 for (int i=pstart; i<=pend;i++)
141 _comm_interface.groupIncl(group_world, nprocs, ranks, &_group);
143 _comm_interface.commCreate(_world_comm, _group, &_comm);
150 MPIProcessorGroup::MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids) :
151 ProcessorGroup(proc_group.getCommInterface()),_world_comm(MPI_COMM_WORLD)
153 cout << "MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids)" <<endl;
154 cout << "Not implemented yet !"<<endl;
158 MPIProcessorGroup::~MPIProcessorGroup()
160 _comm_interface.groupFree(&_group);
161 if (_comm!=_world_comm && _comm !=MPI_COMM_NULL)
162 _comm_interface.commFree(&_comm);
166 \addtogroup processor_group
170 /*! Translation of the rank id between two processor groups. This method translates rank \a rank
171 on the current processor group to the rank on group pointed by \a group.
172 \param group group from which the rank is expected
173 \param rank rank on group \a group of the processor which is to be translated
174 \return rank on local group
176 int MPIProcessorGroup::translateRank(const ProcessorGroup* group, int rank) const
178 const MPIProcessorGroup* targetgroup=dynamic_cast<const MPIProcessorGroup*>(group);
180 MPI_Group_translate_ranks(targetgroup->_group, 1, &rank, _group, &local_rank);
184 /*!Creates a processor group that is the complement of the current group
185 inside MPI_COMM_WORLD
186 \return pointer to the new ProcessorGroup structure.
188 ProcessorGroup* MPIProcessorGroup::createComplementProcGroup() const
191 int world_size=_comm_interface.worldSize();
192 for (int i=0; i<world_size; i++)
194 for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
197 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
201 /*!Adding processors of group \a group to local group.
202 \param group group that is to be fused with current group
203 \return new group formed by the fusion of local group and \a group.
205 ProcessorGroup* MPIProcessorGroup::fuse (const ProcessorGroup& group) const
207 set <int> procs = _proc_ids;
208 const set<int>& distant_proc_ids = group.getProcIDs();
209 for (set<int>::const_iterator iter=distant_proc_ids.begin(); iter!=distant_proc_ids.end(); iter++)
213 return new MPIProcessorGroup(_comm_interface,procs);
218 ProcessorGroup* MPIProcessorGroup::createProcGroup() const
221 for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
224 return new MPIProcessorGroup(_comm_interface, procs);