1 // Copyright (C) 2007-2024 CEA, EDF
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "ProcessorGroup.hxx"
21 #include "MPIProcessorGroup.hxx"
22 #include "CommInterface.hxx"
23 #include "InterpolationUtils.hxx"
36 \anchor MPIProcessorGroup-det
37 \class MPIProcessorGroup
39 The MPIProcessorGroup class represents a set of distinct "processors" (computation nodes)
40 in a MPI code. It is used to define the MPI topology of code couplings.
42 Groups can be set up in various ways, the most common being
43 the use of the \c MPIProcessorGroup(Comminterface, int pfirst, int plast)
46 The following code excerpt creates two processor groups on respectively 3 and 2 processors.
50 MPI_Init(&argc,&argv);
51 CommInterface comm_interface;
52 MPIProcessorGroup codeA_group(comm_interface, 0, 2); // groups processors 0, 1 and 2
53 MPIProcessorGroup codeB_group(comm_interface, 3, 4); // groups processors 3 and 4
62 * Creates a processor group that is based on all the
63 processors of MPI_COMM_WORLD .This routine must be called by all processors in MPI_COMM_WORLD.
64 \param interface CommInterface object giving access to the MPI
67 MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface):
68 ProcessorGroup(interface),_world_comm(MPI_COMM_WORLD)
71 _comm_interface.commGroup(_world_comm, &_group);
73 _comm_interface.commSize(_world_comm,&size);
74 for (int i=0; i<size; i++)
79 /*! Creates a processor group that is based on the processors included in \a proc_ids.
80 This routine must be called by all processors in MPI_COMM_WORLD.
82 \param interface CommInterface object giving access to the MPI
84 \param proc_ids set of ids that are to be integrated in the group. The ids number are
85 to be understood in terms of MPI_COMM_WORLD ranks.
88 MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids, const MPI_Comm& world_comm):
89 ProcessorGroup(interface, proc_ids), _world_comm(world_comm)
91 updateMPISpecificAttributes();
95 /*! Creates a processor group that is based on the processors included in \a proc_ids_by_name[name].
96 This routine must be called by all processors in MPI_COMM_WORLD.
98 \param interface CommInterface object giving access to the MPI
100 \param proc_ids_by_name a map defining a relation between a name and a set of ids that are to be integrated in the group.
101 The ids number are to be understood in terms of MPI_COMM_WORLD ranks.
102 \param simCodeTag identifier of the group
105 MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, std::map<std::string,std::set<int>> proc_ids_by_name, const std::string& simCodeTag, const MPI_Comm& world_comm):
106 ProcessorGroup(interface, proc_ids_by_name, simCodeTag), _world_comm(world_comm)
108 updateMPISpecificAttributes();
112 void MPIProcessorGroup::updateMPISpecificAttributes()
114 //Creation of a communicator
115 MPI_Group group_world;
118 _comm_interface.commSize(_world_comm,&size_world);
120 _comm_interface.commRank(_world_comm,&rank_world);
121 _comm_interface.commGroup(_world_comm, &group_world);
123 int* ranks=new int[_proc_ids.size()];
125 // copying proc_ids in ranks
126 copy<set<int>::const_iterator,int*> (_proc_ids.begin(), _proc_ids.end(), ranks);
127 for (int i=0; i< (int)_proc_ids.size();i++)
128 if (ranks[i]>size_world-1)
131 _comm_interface.groupFree(&group_world); // MPI_Group is a C structure and won't get de-allocated automatically?
132 throw INTERP_KERNEL::Exception("invalid rank in set<int> argument of MPIProcessorGroup constructor");
135 _comm_interface.groupIncl(group_world, (int)_proc_ids.size(), ranks, &_group);
137 _comm_interface.commCreate(_world_comm, _group, &_comm);
141 _comm_interface.groupFree(&group_world); // MPI_Group is a C structure and won't get de-allocated automatically?
144 /*! Creates a processor group that is based on the processors between \a pstart and \a pend.
145 This routine must be called by all processors in MPI_COMM_WORLD.
147 \param comm_interface CommInterface object giving access to the MPI
149 \param pstart id in MPI_COMM_WORLD of the first processor in the group
150 \param pend id in MPI_COMM_WORLD of the last processor in the group
152 MPIProcessorGroup::MPIProcessorGroup (const CommInterface& comm_interface, int pstart, int pend, const MPI_Comm& world_comm): ProcessorGroup(comm_interface,pstart,pend),_world_comm(world_comm)
154 //Creation of a communicator
155 MPI_Group group_world;
158 _comm_interface.commSize(_world_comm,&size_world);
160 _comm_interface.commRank(_world_comm,&rank_world);
161 _comm_interface.commGroup(_world_comm, &group_world);
163 if (pend>size_world-1 || pend <pstart || pstart<0)
165 _comm_interface.groupFree(&group_world);
166 throw INTERP_KERNEL::Exception("invalid argument in MPIProcessorGroup constructor (comm,pfirst,plast)");
168 int nprocs=pend-pstart+1;
169 int* ranks=new int[nprocs];
170 for (int i=pstart; i<=pend;i++)
175 _comm_interface.groupIncl(group_world, nprocs, ranks, &_group);
177 _comm_interface.commCreate(_world_comm, _group, &_comm);
181 _comm_interface.groupFree(&group_world); // MPI_Group is a C structured and won't get de-allocated automatically?
184 MPIProcessorGroup::MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids) :
185 ProcessorGroup(proc_group.getCommInterface()),
186 _world_comm(MPI_COMM_WORLD), _group(MPI_GROUP_NULL), _comm(MPI_COMM_NULL)
188 cout << "MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids)" <<endl;
189 cout << "Not implemented yet !"<<endl;
193 MPIProcessorGroup::MPIProcessorGroup(const MPIProcessorGroup& other):
194 ProcessorGroup(other),_world_comm(other._world_comm)
196 updateMPISpecificAttributes();
199 MPIProcessorGroup::~MPIProcessorGroup()
204 /** Destructor involves MPI operations: make sure this is accessible from a proper
205 * method for Python wrapping.
207 void MPIProcessorGroup::release()
209 if (_group != MPI_GROUP_EMPTY)
210 _comm_interface.groupFree(&_group);
211 _group = MPI_GROUP_EMPTY;
212 if (_comm!=_world_comm && _comm !=MPI_COMM_NULL)
213 _comm_interface.commFree(&_comm);
214 _comm = MPI_COMM_NULL;
217 /*! Translation of the rank id between two processor groups. This method translates rank \a rank
218 on the current processor group to the rank on group pointed by \a group.
219 \param group group from which the rank is expected
220 \param rank rank on group \a group of the processor which is to be translated
221 \return rank on local group
223 int MPIProcessorGroup::translateRank(const ProcessorGroup* group, int rank) const
225 const MPIProcessorGroup* targetgroup=dynamic_cast<const MPIProcessorGroup*>(group);
227 MPI_Group_translate_ranks(targetgroup->_group, 1, &rank, _group, &local_rank);
231 /*!Creates a processor group that is the complement of the current group
232 inside MPI_COMM_WORLD
233 \return pointer to the new ProcessorGroup structure.
235 ProcessorGroup* MPIProcessorGroup::createComplementProcGroup() const
238 int world_size=_comm_interface.worldSize();
239 for (int i=0; i<world_size; i++)
241 for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
244 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
248 MPIProcessorGroup *MPIProcessorGroup::deepCopy() const
250 return new MPIProcessorGroup(*this);
253 /*!Adding processors of group \a group to local group.
254 \param group group that is to be fused with current group
255 \return new group formed by the fusion of local group and \a group.
257 ProcessorGroup* MPIProcessorGroup::fuse (const ProcessorGroup& group) const
259 set <int> procs = _proc_ids;
260 const set<int>& distant_proc_ids = group.getProcIDs();
261 for (set<int>::const_iterator iter=distant_proc_ids.begin(); iter!=distant_proc_ids.end(); iter++)
265 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
268 int MPIProcessorGroup::myRank() const
271 MPI_Comm_rank(_comm,&rank);
275 ProcessorGroup* MPIProcessorGroup::createProcGroup() const
278 for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
281 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);