1 // Copyright (C) 2007-2015 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "ProcessorGroup.hxx"
21 #include "MPIProcessorGroup.hxx"
22 #include "CommInterface.hxx"
23 #include "InterpolationUtils.hxx"
36 * \anchor MPIProcessorGroup-det
37 * \class MPIProcessorGroup
39 * \section processor_group_overview Overview
40 * The MPIProcessorGroup class is used to set up processor groups that help to define
41 * the MPI topology of the couplings. They can be set up in various ways, the most common being
42 * the use of the \c MPIProcessorGroup(Comminterface, int pfirst, int plast)
45 * The following code excerpt creates two processor groups on respectively 3 and 2 processors.
49 MPI_Init(&argc,&argv);
50 CommInterface comm_interface;
51 MPIProcessorGroup codeA_group(comm_interface, 0, 2); // groups processors 0, 1 and 2
52 MPIProcessorGroup codeB_group(comm_interface, 3, 4); // groups processors 3 and 4
61 * Creates a processor group that is based on all the
62 MPI_COMM_WORLD processor.This routine must be called by all processors in MPI_COMM_WORLD.
63 \param interface CommInterface object giving access to the MPI
66 MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface):
67 ProcessorGroup(interface),_world_comm(MPI_COMM_WORLD)
70 _comm_interface.commGroup(_world_comm, &_group);
72 _comm_interface.commSize(_world_comm,&size);
73 for (int i=0; i<size; i++)
78 /*! Creates a processor group that is based on the processors included in \a proc_ids.
79 This routine must be called by all processors in MPI_COMM_WORLD.
81 \param interface CommInterface object giving access to the MPI
83 \param proc_ids set of ids that are to be integrated in the group. The ids number are
84 to be understood in terms of MPI_COMM_WORLD ranks.
87 MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids, const MPI_Comm& world_comm):
88 ProcessorGroup(interface, proc_ids), _world_comm(world_comm)
90 updateMPISpecificAttributes();
94 void MPIProcessorGroup::updateMPISpecificAttributes()
96 //Creation of a communicator
97 MPI_Group group_world;
100 _comm_interface.commSize(_world_comm,&size_world);
102 _comm_interface.commRank(_world_comm,&rank_world);
103 _comm_interface.commGroup(_world_comm, &group_world);
105 int* ranks=new int[_proc_ids.size()];
107 // copying proc_ids in ranks
108 copy<set<int>::const_iterator,int*> (_proc_ids.begin(), _proc_ids.end(), ranks);
109 for (int i=0; i< (int)_proc_ids.size();i++)
110 if (ranks[i]>size_world-1)
113 _comm_interface.groupFree(&group_world); // MPI_Group is a C structure and won't get de-allocated automatically?
114 throw INTERP_KERNEL::Exception("invalid rank in set<int> argument of MPIProcessorGroup constructor");
117 _comm_interface.groupIncl(group_world, _proc_ids.size(), ranks, &_group);
119 _comm_interface.commCreate(_world_comm, _group, &_comm);
123 _comm_interface.groupFree(&group_world); // MPI_Group is a C structure and won't get de-allocated automatically?
126 /*! Creates a processor group that is based on the processors between \a pstart and \a pend.
127 This routine must be called by all processors in MPI_COMM_WORLD.
129 \param comm_interface CommInterface object giving access to the MPI
131 \param pstart id in MPI_COMM_WORLD of the first processor in the group
132 \param pend id in MPI_COMM_WORLD of the last processor in the group
134 MPIProcessorGroup::MPIProcessorGroup (const CommInterface& comm_interface, int pstart, int pend, const MPI_Comm& world_comm): ProcessorGroup(comm_interface,pstart,pend),_world_comm(world_comm)
136 //Creation of a communicator
137 MPI_Group group_world;
140 _comm_interface.commSize(_world_comm,&size_world);
142 _comm_interface.commRank(_world_comm,&rank_world);
143 _comm_interface.commGroup(_world_comm, &group_world);
145 if (pend>size_world-1 || pend <pstart || pstart<0)
147 _comm_interface.groupFree(&group_world);
148 throw INTERP_KERNEL::Exception("invalid argument in MPIProcessorGroup constructor (comm,pfirst,plast)");
150 int nprocs=pend-pstart+1;
151 int* ranks=new int[nprocs];
152 for (int i=pstart; i<=pend;i++)
157 _comm_interface.groupIncl(group_world, nprocs, ranks, &_group);
159 _comm_interface.commCreate(_world_comm, _group, &_comm);
163 _comm_interface.groupFree(&group_world); // MPI_Group is a C structured and won't get de-allocated automatically?
166 MPIProcessorGroup::MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids) :
167 ProcessorGroup(proc_group.getCommInterface()),_world_comm(MPI_COMM_WORLD)
169 cout << "MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids)" <<endl;
170 cout << "Not implemented yet !"<<endl;
174 MPIProcessorGroup::MPIProcessorGroup(const MPIProcessorGroup& other):ProcessorGroup(other),_world_comm(other._world_comm)
176 updateMPISpecificAttributes();
179 MPIProcessorGroup::~MPIProcessorGroup()
181 _comm_interface.groupFree(&_group);
182 if (_comm!=_world_comm && _comm !=MPI_COMM_NULL)
183 _comm_interface.commFree(&_comm);
187 /*! Translation of the rank id between two processor groups. This method translates rank \a rank
188 on the current processor group to the rank on group pointed by \a group.
189 \param group group from which the rank is expected
190 \param rank rank on group \a group of the processor which is to be translated
191 \return rank on local group
193 int MPIProcessorGroup::translateRank(const ProcessorGroup* group, int rank) const
195 const MPIProcessorGroup* targetgroup=dynamic_cast<const MPIProcessorGroup*>(group);
197 MPI_Group_translate_ranks(targetgroup->_group, 1, &rank, _group, &local_rank);
201 /*!Creates a processor group that is the complement of the current group
202 inside MPI_COMM_WORLD
203 \return pointer to the new ProcessorGroup structure.
205 ProcessorGroup* MPIProcessorGroup::createComplementProcGroup() const
208 int world_size=_comm_interface.worldSize();
209 for (int i=0; i<world_size; i++)
211 for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
214 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
218 ProcessorGroup *MPIProcessorGroup::deepCpy() const
220 return new MPIProcessorGroup(*this);
223 /*!Adding processors of group \a group to local group.
224 \param group group that is to be fused with current group
225 \return new group formed by the fusion of local group and \a group.
227 ProcessorGroup* MPIProcessorGroup::fuse (const ProcessorGroup& group) const
229 set <int> procs = _proc_ids;
230 const set<int>& distant_proc_ids = group.getProcIDs();
231 for (set<int>::const_iterator iter=distant_proc_ids.begin(); iter!=distant_proc_ids.end(); iter++)
235 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
238 int MPIProcessorGroup::myRank() const
241 MPI_Comm_rank(_comm,&rank);
245 ProcessorGroup* MPIProcessorGroup::createProcGroup() const
248 for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
251 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);