1 // Copyright (C) 2007-2020 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "DisjointDEC.hxx"
21 #include "CommInterface.hxx"
22 #include "Topology.hxx"
23 #include "BlockTopology.hxx"
24 #include "ComponentTopology.hxx"
25 #include "ParaFIELD.hxx"
26 #include "ParaMESH.hxx"
27 #include "ICoCoField.hxx"
28 #include "ICoCoMEDField.hxx"
29 #include "MPIProcessorGroup.hxx"
37 DisjointDEC::DisjointDEC(ProcessorGroup& source_group, ProcessorGroup& target_group):
39 _source_group(&source_group),
40 _target_group(&target_group),
44 _union_comm(MPI_COMM_NULL)
46 checkPartitionGroup();
47 _union_group = source_group.fuse(target_group);
50 DisjointDEC::DisjointDEC(const DisjointDEC& s):
59 _union_comm(MPI_COMM_NULL)
64 DisjointDEC & DisjointDEC::operator=(const DisjointDEC& s)
72 void DisjointDEC::copyInstance(const DisjointDEC& other)
75 if (other._union_comm != MPI_COMM_NULL)
77 // Tricky: the DEC is responsible for the management of _union_comm. And this comm is referenced by
78 // the MPIProcGroups (source/targets). In the case where _union_comm is not NULL we must take care of rebuilding the
79 // MPIProcGroups with a communicator that will survive the destruction of 'other'.
81 MPI_Comm_dup(other._union_comm, &_union_comm);
82 // std::cout << "DUP union comm - new is "<< _union_comm << "\n";
83 _target_group = new MPIProcessorGroup(*_comm_interface, other._target_group->getProcIDs(), _union_comm);
84 _source_group = new MPIProcessorGroup(*_comm_interface, other._source_group->getProcIDs(), _union_comm);
87 if(other._target_group)
89 _target_group=other._target_group->deepCopy();
92 if(other._source_group)
94 _source_group=other._source_group->deepCopy();
98 if (_source_group && _target_group)
99 _union_group = _source_group->fuse(*_target_group);
102 DisjointDEC::DisjointDEC(const std::set<int>& source_ids,
103 const std::set<int>& target_ids,
104 const MPI_Comm& world_comm):
109 _union_comm(MPI_COMM_NULL)
111 MEDCoupling::CommInterface comm;
112 // Create the list of procs including source and target
113 std::set<int> union_ids; // source and target ids in world_comm
114 union_ids.insert(source_ids.begin(),source_ids.end());
115 union_ids.insert(target_ids.begin(),target_ids.end());
116 if(union_ids.size()!=(source_ids.size()+target_ids.size()))
117 throw INTERP_KERNEL::Exception("DisjointDEC constructor : source_ids and target_ids overlap partially or fully. This type of DEC does not support it! OverlapDEC class could be the solution!");
118 int* union_ranks_world=new int[union_ids.size()]; // ranks of sources and targets in world_comm
119 std::copy(union_ids.begin(), union_ids.end(), union_ranks_world);
121 // Create a communicator on these procs
122 MPI_Group union_group,world_group;
123 comm.commGroup(world_comm,&world_group);
124 comm.groupIncl(world_group,(int)union_ids.size(),union_ranks_world,&union_group);
125 comm.commCreate(world_comm,union_group,&_union_comm);
126 delete[] union_ranks_world;
127 if (_union_comm==MPI_COMM_NULL)
128 { // This process is not in union
132 comm.groupFree(&union_group);
133 comm.groupFree(&world_group);
137 // Translate source_ids and target_ids from world_comm to union_comm
138 int* source_ranks_world=new int[source_ids.size()]; // ranks of sources in world_comm
139 std::copy(source_ids.begin(), source_ids.end(),source_ranks_world);
140 int* source_ranks_union=new int[source_ids.size()]; // ranks of sources in union_comm
141 int* target_ranks_world=new int[target_ids.size()]; // ranks of targets in world_comm
142 std::copy(target_ids.begin(), target_ids.end(),target_ranks_world);
143 int* target_ranks_union=new int[target_ids.size()]; // ranks of targets in union_comm
144 MPI_Group_translate_ranks(world_group,(int)source_ids.size(),source_ranks_world,union_group,source_ranks_union);
145 MPI_Group_translate_ranks(world_group,(int)target_ids.size(),target_ranks_world,union_group,target_ranks_union);
146 std::set<int> source_ids_union;
147 for (int i=0;i<(int)source_ids.size();i++)
148 source_ids_union.insert(source_ranks_union[i]);
149 std::set<int> target_ids_union;
150 for (int i=0;i<(int)target_ids.size();i++)
151 target_ids_union.insert(target_ranks_union[i]);
152 delete [] source_ranks_world;
153 delete [] source_ranks_union;
154 delete [] target_ranks_world;
155 delete [] target_ranks_union;
157 // Create the MPIProcessorGroups
158 _source_group = new MPIProcessorGroup(comm,source_ids_union,_union_comm);
159 _target_group = new MPIProcessorGroup(comm,target_ids_union,_union_comm);
160 _union_group = _source_group->fuse(*_target_group);
161 comm.groupFree(&union_group);
162 comm.groupFree(&world_group);
165 DisjointDEC::~DisjointDEC()
170 void DisjointDEC::cleanInstance()
180 delete _source_group;
181 delete _target_group;
188 if (_union_comm != MPI_COMM_NULL)
189 _comm_interface->commFree(&_union_comm);
193 * Check that the sources and targets procs form a partition of the world communicator referenced in the groups.
194 * This world communicator is not necessarily MPI_WORLD_COMM, but it has to be covered completely for the DECs to work.
196 void DisjointDEC::checkPartitionGroup() const
199 MPIProcessorGroup * tgt = static_cast<MPIProcessorGroup *>(_target_group);
200 MPIProcessorGroup * src = static_cast<MPIProcessorGroup *>(_source_group);
201 MPI_Comm comm_t = tgt->getWorldComm();
202 MPI_Comm comm_s = src->getWorldComm();
203 if (comm_t != comm_s)
204 throw INTERP_KERNEL::Exception("DisjointDEC constructor: Inconsistent world communicator when building DisjointDEC");
205 MPI_Comm_size(comm_t, &size);
207 std::set<int> union_ids; // source and target ids in world_comm
208 union_ids.insert(src->getProcIDs().begin(),src->getProcIDs().end());
209 union_ids.insert(tgt->getProcIDs().begin(),tgt->getProcIDs().end());
210 if((int)union_ids.size()!=size)
211 throw INTERP_KERNEL::Exception("DisjointDEC constructor: source_ids and target_ids do not form a partition of the communicator! Restrain the world communicator passed to MPIProcessorGroup ctor.");
214 void DisjointDEC::setNature(NatureOfField nature)
217 _local_field->getField()->setNature(nature);
220 /*! Attaches a local field to a DEC.
221 If the processor is on the receiving end of the DEC, the field
222 will be updated by a recvData() call.
223 Reversely, if the processor is on the sending end, the field will be read, possibly transformed, and sent appropriately to the other side.
225 void DisjointDEC::attachLocalField(const ParaFIELD *field, bool ownPt)
233 _comm_interface=&(field->getTopology()->getProcGroup()->getCommInterface());
234 compareFieldAndMethod();
237 /*! Attaches a local field to a DEC. The method will test whether the processor
238 is on the source or the target side and will associate the mesh underlying the
239 field to the local side.
241 If the processor is on the receiving end of the DEC, the field
242 will be updated by a recvData() call.
243 Reversely, if the processor is on the sending end, the field will be read, possibly transformed,
244 and sent appropriately to the other side.
247 void DisjointDEC::attachLocalField(MEDCouplingFieldDouble *field)
251 ProcessorGroup* local_group;
252 if (_source_group->containsMyRank())
253 local_group=_source_group;
254 else if (_target_group->containsMyRank())
255 local_group=_target_group;
257 throw INTERP_KERNEL::Exception("Invalid procgroup for field attachment to DEC");
258 ParaMESH *paramesh=new ParaMESH(static_cast<MEDCouplingPointSet *>(const_cast<MEDCouplingMesh *>(field->getMesh())),*local_group,field->getMesh()->getName());
259 ParaFIELD *tmp=new ParaFIELD(field, paramesh, *local_group);
260 tmp->setOwnSupport(true);
261 attachLocalField(tmp,true);
262 //_comm_interface=&(local_group->getCommInterface());
266 Attaches a local field to a DEC.
267 If the processor is on the receiving end of the DEC, the field
268 will be updated by a recvData() call.
269 Reversely, if the processor is on the sending end, the field will be read, possibly transformed, and sent appropriately to the other side.
270 The field type is a generic ICoCo Field, so that the DEC can couple a number of different fields :
271 - a ICoCo::MEDField, that is created from a MEDCoupling structure
274 void DisjointDEC::attachLocalField(const ICoCo::MEDField *field)
279 throw INTERP_KERNEL::Exception("DisjointDEC::attachLocalField : ICoCo::MEDField pointer is NULL !");
280 attachLocalField(field->getField());
284 Computes the field norm over its support
285 on the source side and renormalizes the field on the target side
286 so that the norms match.
289 I_{source}=\sum_{i=1}^{n_{source}}V_{i}.|\Phi^{source}_{i}|^2,
293 I_{target}=\sum_{i=1}^{n_{target}}V_{i}.|\Phi^{target}_{i}|^2,
297 \Phi^{target}:=\Phi^{target}.\sqrt{I_{source}/I_{target}}.
301 void DisjointDEC::renormalizeTargetField(bool isWAbs)
303 if (_source_group->containsMyRank())
304 for (int icomp=0; icomp<(int)_local_field->getField()->getArray()->getNumberOfComponents(); icomp++)
306 double total_norm = _local_field->getVolumeIntegral(icomp+1,isWAbs);
307 double source_norm = total_norm;
308 _comm_interface->broadcast(&source_norm, 1, MPI_DOUBLE, 0,* dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
311 if (_target_group->containsMyRank())
313 for (int icomp=0; icomp<(int)_local_field->getField()->getArray()->getNumberOfComponents(); icomp++)
315 double total_norm = _local_field->getVolumeIntegral(icomp+1,isWAbs);
316 double source_norm=total_norm;
317 _comm_interface->broadcast(&source_norm, 1, MPI_DOUBLE, 0,* dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
319 if (fabs(total_norm)>1e-100)
320 _local_field->getField()->applyLin(source_norm/total_norm,0.0,icomp+1);
325 bool DisjointDEC::isInSourceSide() const
329 return _source_group->containsMyRank();
332 bool DisjointDEC::isInTargetSide() const
336 return _target_group->containsMyRank();
339 bool DisjointDEC::isInUnion() const
343 return _union_group->containsMyRank();
346 void DisjointDEC::compareFieldAndMethod() const
350 TypeOfField entity = _local_field->getField()->getTypeOfField();
351 if ( getMethod() == "P0" )
353 if ( entity != ON_CELLS )
354 throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch."
355 " For P0 interpolation, field must be on MED_CELL's");
357 else if ( getMethod() == "P1" )
359 if ( entity != ON_NODES )
360 throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch."
361 " For P1 interpolation, field must be on MED_NODE's");
363 else if ( getMethod() == "P1d" )
365 if ( entity != ON_CELLS )
366 throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch."
367 " For P1d interpolation, field must be on MED_CELL's");
368 if ( _target_group->containsMyRank() )
369 throw INTERP_KERNEL::Exception("Projection to P1d field not supported");
373 throw INTERP_KERNEL::Exception("Unknown interpolation method. Possible methods: P0, P1, P1d");
379 If way==true, source procs call sendData() and target procs call recvData().
380 if way==false, it's the other way round.
382 void DisjointDEC::sendRecvData(bool way)
393 else if(isInTargetSide())