1 // Copyright (C) 2007-2024 CEA, EDF
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "ProcessorGroup.hxx"
21 #include "MPIProcessorGroup.hxx"
22 #include "CommInterface.hxx"
23 #include "InterpolationUtils.hxx"
36 \anchor MPIProcessorGroup-det
37 \class MPIProcessorGroup
39 The MPIProcessorGroup class represents a set of distinct "processors" (computation nodes)
40 in a MPI code. It is used to define the MPI topology of code couplings.
42 Groups can be set up in various ways, the most common being
43 the use of the \c MPIProcessorGroup(Comminterface, int pfirst, int plast)
46 The following code excerpt creates two processor groups on respectively 3 and 2 processors.
50 MPI_Init(&argc,&argv);
51 CommInterface comm_interface;
52 MPIProcessorGroup codeA_group(comm_interface, 0, 2); // groups processors 0, 1 and 2
53 MPIProcessorGroup codeB_group(comm_interface, 3, 4); // groups processors 3 and 4
62 * Creates a processor group that is based on all the
63 processors of MPI_COMM_WORLD .This routine must be called by all processors in MPI_COMM_WORLD.
64 \param interface CommInterface object giving access to the MPI
67 MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface):
68 ProcessorGroup(interface),_world_comm(MPI_COMM_WORLD)
71 _comm_interface.commGroup(_world_comm, &_group);
73 _comm_interface.commSize(_world_comm,&size);
74 for (int i=0; i<size; i++)
79 /*! Creates a processor group that is based on the processors included in \a proc_ids.
80 This routine must be called by all processors in MPI_COMM_WORLD.
82 \param interface CommInterface object giving access to the MPI
84 \param proc_ids set of ids that are to be integrated in the group. The ids number are
85 to be understood in terms of MPI_COMM_WORLD ranks.
88 MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids, const MPI_Comm& world_comm):
89 ProcessorGroup(interface, proc_ids), _world_comm(world_comm)
91 updateMPISpecificAttributes();
95 void MPIProcessorGroup::updateMPISpecificAttributes()
97 //Creation of a communicator
98 MPI_Group group_world;
101 _comm_interface.commSize(_world_comm,&size_world);
103 _comm_interface.commRank(_world_comm,&rank_world);
104 _comm_interface.commGroup(_world_comm, &group_world);
106 int* ranks=new int[_proc_ids.size()];
108 // copying proc_ids in ranks
109 copy<set<int>::const_iterator,int*> (_proc_ids.begin(), _proc_ids.end(), ranks);
110 for (int i=0; i< (int)_proc_ids.size();i++)
111 if (ranks[i]>size_world-1)
114 _comm_interface.groupFree(&group_world); // MPI_Group is a C structure and won't get de-allocated automatically?
115 throw INTERP_KERNEL::Exception("invalid rank in set<int> argument of MPIProcessorGroup constructor");
118 _comm_interface.groupIncl(group_world, (int)_proc_ids.size(), ranks, &_group);
120 _comm_interface.commCreate(_world_comm, _group, &_comm);
124 _comm_interface.groupFree(&group_world); // MPI_Group is a C structure and won't get de-allocated automatically?
127 /*! Creates a processor group that is based on the processors between \a pstart and \a pend.
128 This routine must be called by all processors in MPI_COMM_WORLD.
130 \param comm_interface CommInterface object giving access to the MPI
132 \param pstart id in MPI_COMM_WORLD of the first processor in the group
133 \param pend id in MPI_COMM_WORLD of the last processor in the group
135 MPIProcessorGroup::MPIProcessorGroup (const CommInterface& comm_interface, int pstart, int pend, const MPI_Comm& world_comm): ProcessorGroup(comm_interface,pstart,pend),_world_comm(world_comm)
137 //Creation of a communicator
138 MPI_Group group_world;
141 _comm_interface.commSize(_world_comm,&size_world);
143 _comm_interface.commRank(_world_comm,&rank_world);
144 _comm_interface.commGroup(_world_comm, &group_world);
146 if (pend>size_world-1 || pend <pstart || pstart<0)
148 _comm_interface.groupFree(&group_world);
149 throw INTERP_KERNEL::Exception("invalid argument in MPIProcessorGroup constructor (comm,pfirst,plast)");
151 int nprocs=pend-pstart+1;
152 int* ranks=new int[nprocs];
153 for (int i=pstart; i<=pend;i++)
158 _comm_interface.groupIncl(group_world, nprocs, ranks, &_group);
160 _comm_interface.commCreate(_world_comm, _group, &_comm);
164 _comm_interface.groupFree(&group_world); // MPI_Group is a C structured and won't get de-allocated automatically?
167 MPIProcessorGroup::MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids) :
168 ProcessorGroup(proc_group.getCommInterface()),
169 _world_comm(MPI_COMM_WORLD), _group(MPI_GROUP_NULL), _comm(MPI_COMM_NULL)
171 cout << "MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids)" <<endl;
172 cout << "Not implemented yet !"<<endl;
176 MPIProcessorGroup::MPIProcessorGroup(const MPIProcessorGroup& other):
177 ProcessorGroup(other),_world_comm(other._world_comm)
179 updateMPISpecificAttributes();
182 MPIProcessorGroup::~MPIProcessorGroup()
187 /** Destructor involves MPI operations: make sure this is accessible from a proper
188 * method for Python wrapping.
190 void MPIProcessorGroup::release()
192 if (_group != MPI_GROUP_EMPTY)
193 _comm_interface.groupFree(&_group);
194 _group = MPI_GROUP_EMPTY;
195 if (_comm!=_world_comm && _comm !=MPI_COMM_NULL)
196 _comm_interface.commFree(&_comm);
197 _comm = MPI_COMM_NULL;
200 /*! Translation of the rank id between two processor groups. This method translates rank \a rank
201 on the current processor group to the rank on group pointed by \a group.
202 \param group group from which the rank is expected
203 \param rank rank on group \a group of the processor which is to be translated
204 \return rank on local group
206 int MPIProcessorGroup::translateRank(const ProcessorGroup* group, int rank) const
208 const MPIProcessorGroup* targetgroup=dynamic_cast<const MPIProcessorGroup*>(group);
210 MPI_Group_translate_ranks(targetgroup->_group, 1, &rank, _group, &local_rank);
214 /*!Creates a processor group that is the complement of the current group
215 inside MPI_COMM_WORLD
216 \return pointer to the new ProcessorGroup structure.
218 ProcessorGroup* MPIProcessorGroup::createComplementProcGroup() const
221 int world_size=_comm_interface.worldSize();
222 for (int i=0; i<world_size; i++)
224 for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
227 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
231 MPIProcessorGroup *MPIProcessorGroup::deepCopy() const
233 return new MPIProcessorGroup(*this);
236 /*!Adding processors of group \a group to local group.
237 \param group group that is to be fused with current group
238 \return new group formed by the fusion of local group and \a group.
240 ProcessorGroup* MPIProcessorGroup::fuse (const ProcessorGroup& group) const
242 set <int> procs = _proc_ids;
243 const set<int>& distant_proc_ids = group.getProcIDs();
244 for (set<int>::const_iterator iter=distant_proc_ids.begin(); iter!=distant_proc_ids.end(); iter++)
248 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
251 int MPIProcessorGroup::myRank() const
254 MPI_Comm_rank(_comm,&rank);
258 ProcessorGroup* MPIProcessorGroup::createProcGroup() const
261 for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
264 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);