1 // Copyright (C) 2007-2015 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "ProcessorGroup.hxx"
21 #include "MPIProcessorGroup.hxx"
22 #include "CommInterface.hxx"
23 #include "InterpolationUtils.hxx"
32 /*! \defgroup processor_group Processor Groups
34 * \section processor_group_overview Overview
35 * The MPIProcessorGroup class is used to set up processor groups that help to define
36 * the MPI topology of the couplings. They can be set up in various ways, the most common being
37 * the use of the \c MPIProcessorGroup(Comminterface, int pfirst, int plast)
40 * The following code excerpt creates two processor groups on respectively 3 and 2 processors.
44 MPI_Init(&argc,&argv);
45 CommInterface comm_interface;
46 MPIProcessorGroup codeA_group(comm_interface, 0, 2);
47 MPIProcessorGroup codeB_group(comm_interface, 3, 4);
58 \addtogroup processor_group
63 * Creates a processor group that is based on all the
64 MPI_COMM_WORLD processor.This routine must be called by all processors in MPI_COMM_WORLD.
65 \param interface CommInterface object giving access to the MPI
68 MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface):
69 ProcessorGroup(interface),_world_comm(MPI_COMM_WORLD)
72 _comm_interface.commGroup(_world_comm, &_group);
74 _comm_interface.commSize(_world_comm,&size);
75 for (int i=0; i<size; i++)
80 /*! Creates a processor group that is based on the processors included in \a proc_ids.
81 This routine must be called by all processors in MPI_COMM_WORLD.
83 \param interface CommInterface object giving access to the MPI
85 \param proc_ids set of ids that are to be integrated in the group. The ids number are
86 to be understood in terms of MPI_COMM_WORLD ranks.
89 MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids, const MPI_Comm& world_comm):
90 ProcessorGroup(interface, proc_ids),_world_comm(world_comm)
92 updateMPISpecificAttributes();
96 void MPIProcessorGroup::updateMPISpecificAttributes()
98 //Creation of a communicator
99 MPI_Group group_world;
102 _comm_interface.commSize(_world_comm,&size_world);
104 _comm_interface.commRank(_world_comm,&rank_world);
105 _comm_interface.commGroup(_world_comm, &group_world);
107 int* ranks=new int[_proc_ids.size()];
109 // copying proc_ids in ranks
110 copy<set<int>::const_iterator,int*> (_proc_ids.begin(), _proc_ids.end(), ranks);
111 for (int i=0; i< (int)_proc_ids.size();i++)
112 if (ranks[i]>size_world-1)
115 _comm_interface.groupFree(&group_world); // MPI_Group is a C structure and won't get de-allocated automatically?
116 throw INTERP_KERNEL::Exception("invalid rank in set<int> argument of MPIProcessorGroup constructor");
119 _comm_interface.groupIncl(group_world, _proc_ids.size(), ranks, &_group);
121 _comm_interface.commCreate(_world_comm, _group, &_comm);
125 _comm_interface.groupFree(&group_world); // MPI_Group is a C structure and won't get de-allocated automatically?
128 /*! Creates a processor group that is based on the processors between \a pstart and \a pend.
129 This routine must be called by all processors in MPI_COMM_WORLD.
131 \param comm_interface CommInterface object giving access to the MPI
133 \param pstart id in MPI_COMM_WORLD of the first processor in the group
134 \param pend id in MPI_COMM_WORLD of the last processor in the group
136 MPIProcessorGroup::MPIProcessorGroup (const CommInterface& comm_interface, int pstart, int pend, const MPI_Comm& world_comm): ProcessorGroup(comm_interface,pstart,pend),_world_comm(world_comm)
138 //Creation of a communicator
139 MPI_Group group_world;
142 _comm_interface.commSize(_world_comm,&size_world);
144 _comm_interface.commRank(_world_comm,&rank_world);
145 _comm_interface.commGroup(_world_comm, &group_world);
147 if (pend>size_world-1 || pend <pstart || pstart<0)
149 _comm_interface.groupFree(&group_world);
150 throw INTERP_KERNEL::Exception("invalid argument in MPIProcessorGroup constructor (comm,pfirst,plast)");
152 int nprocs=pend-pstart+1;
153 int* ranks=new int[nprocs];
154 for (int i=pstart; i<=pend;i++)
159 _comm_interface.groupIncl(group_world, nprocs, ranks, &_group);
161 _comm_interface.commCreate(_world_comm, _group, &_comm);
165 _comm_interface.groupFree(&group_world); // MPI_Group is a C structured and won't get de-allocated automatically?
171 MPIProcessorGroup::MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids) :
172 ProcessorGroup(proc_group.getCommInterface()),_world_comm(MPI_COMM_WORLD)
174 cout << "MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids)" <<endl;
175 cout << "Not implemented yet !"<<endl;
179 MPIProcessorGroup::MPIProcessorGroup(const MPIProcessorGroup& other):ProcessorGroup(other),_world_comm(other._world_comm)
181 updateMPISpecificAttributes();
184 MPIProcessorGroup::~MPIProcessorGroup()
186 _comm_interface.groupFree(&_group);
187 if (_comm!=_world_comm && _comm !=MPI_COMM_NULL)
188 _comm_interface.commFree(&_comm);
192 \addtogroup processor_group
196 /*! Translation of the rank id between two processor groups. This method translates rank \a rank
197 on the current processor group to the rank on group pointed by \a group.
198 \param group group from which the rank is expected
199 \param rank rank on group \a group of the processor which is to be translated
200 \return rank on local group
202 int MPIProcessorGroup::translateRank(const ProcessorGroup* group, int rank) const
204 const MPIProcessorGroup* targetgroup=dynamic_cast<const MPIProcessorGroup*>(group);
206 MPI_Group_translate_ranks(targetgroup->_group, 1, &rank, _group, &local_rank);
210 /*!Creates a processor group that is the complement of the current group
211 inside MPI_COMM_WORLD
212 \return pointer to the new ProcessorGroup structure.
214 ProcessorGroup* MPIProcessorGroup::createComplementProcGroup() const
217 int world_size=_comm_interface.worldSize();
218 for (int i=0; i<world_size; i++)
220 for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
223 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
227 ProcessorGroup *MPIProcessorGroup::deepCpy() const
229 return new MPIProcessorGroup(*this);
232 /*!Adding processors of group \a group to local group.
233 \param group group that is to be fused with current group
234 \return new group formed by the fusion of local group and \a group.
236 ProcessorGroup* MPIProcessorGroup::fuse (const ProcessorGroup& group) const
238 set <int> procs = _proc_ids;
239 const set<int>& distant_proc_ids = group.getProcIDs();
240 for (set<int>::const_iterator iter=distant_proc_ids.begin(); iter!=distant_proc_ids.end(); iter++)
244 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
247 int MPIProcessorGroup::myRank() const
250 MPI_Comm_rank(_comm,&rank);
257 ProcessorGroup* MPIProcessorGroup::createProcGroup() const
260 for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
263 return new MPIProcessorGroup(_comm_interface, procs, _world_comm);