1 // Copyright (C) 2007-2020 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "DisjointDEC.hxx"
21 #include "CommInterface.hxx"
22 #include "Topology.hxx"
23 #include "BlockTopology.hxx"
24 #include "ComponentTopology.hxx"
25 #include "ParaFIELD.hxx"
26 #include "ParaMESH.hxx"
27 #include "ICoCoField.hxx"
28 #include "ICoCoMEDField.hxx"
29 #include "MPIProcessorGroup.hxx"
37 DisjointDEC::DisjointDEC(ProcessorGroup& source_group, ProcessorGroup& target_group):
39 _source_group(&source_group),
40 _target_group(&target_group),
44 _union_comm(MPI_COMM_NULL)
46 checkPartitionGroup();
47 _union_group = source_group.fuse(target_group);
50 DisjointDEC::DisjointDEC(const DisjointDEC& s):
59 _union_comm(MPI_COMM_NULL)
64 DisjointDEC & DisjointDEC::operator=(const DisjointDEC& s)
72 void DisjointDEC::copyInstance(const DisjointDEC& other)
75 if (other._union_comm != MPI_COMM_NULL)
77 // Tricky: the DEC is responsible for the management of _union_comm. And this comm is referenced by
78 // the MPIProcGroups (source/targets). In the case where _union_comm is not NULL we must take care of rebuilding the
79 // MPIProcGroups with a communicator that will survive the destruction of 'other'.
81 MPI_Comm_dup(other._union_comm, &_union_comm);
82 // std::cout << "DUP union comm - new is "<< _union_comm << "\n";
83 _target_group = new MPIProcessorGroup(*_comm_interface, other._target_group->getProcIDs(), _union_comm);
84 _source_group = new MPIProcessorGroup(*_comm_interface, other._source_group->getProcIDs(), _union_comm);
87 if(other._target_group)
89 _target_group=other._target_group->deepCopy();
92 if(other._source_group)
94 _source_group=other._source_group->deepCopy();
98 if (_source_group && _target_group)
99 _union_group = _source_group->fuse(*_target_group);
102 DisjointDEC::DisjointDEC(const std::set<int>& source_ids,
103 const std::set<int>& target_ids,
104 const MPI_Comm& world_comm):
109 _union_comm(MPI_COMM_NULL)
111 MEDCoupling::CommInterface comm;
112 // Create the list of procs including source and target
113 std::set<int> union_ids; // source and target ids in world_comm
114 union_ids.insert(source_ids.begin(),source_ids.end());
115 union_ids.insert(target_ids.begin(),target_ids.end());
116 if(union_ids.size()!=(source_ids.size()+target_ids.size()))
117 throw INTERP_KERNEL::Exception("DisjointDEC constructor : source_ids and target_ids overlap partially or fully. This type of DEC does not support it! OverlapDEC class could be the solution!");
118 int* union_ranks_world=new int[union_ids.size()]; // ranks of sources and targets in world_comm
119 std::copy(union_ids.begin(), union_ids.end(), union_ranks_world);
121 // Create a communicator on these procs
122 MPI_Group union_group,world_group;
123 comm.commGroup(world_comm,&world_group);
124 comm.groupIncl(world_group,(int)union_ids.size(),union_ranks_world,&union_group);
125 comm.commCreate(world_comm,union_group,&_union_comm);
126 delete[] union_ranks_world;
127 if (_union_comm==MPI_COMM_NULL)
128 { // This process is not in union
132 comm.groupFree(&union_group);
133 comm.groupFree(&world_group);
137 // Translate source_ids and target_ids from world_comm to union_comm
138 int* source_ranks_world=new int[source_ids.size()]; // ranks of sources in world_comm
139 std::copy(source_ids.begin(), source_ids.end(),source_ranks_world);
140 int* source_ranks_union=new int[source_ids.size()]; // ranks of sources in union_comm
141 int* target_ranks_world=new int[target_ids.size()]; // ranks of targets in world_comm
142 std::copy(target_ids.begin(), target_ids.end(),target_ranks_world);
143 int* target_ranks_union=new int[target_ids.size()]; // ranks of targets in union_comm
144 MPI_Group_translate_ranks(world_group,(int)source_ids.size(),source_ranks_world,union_group,source_ranks_union);
145 MPI_Group_translate_ranks(world_group,(int)target_ids.size(),target_ranks_world,union_group,target_ranks_union);
146 std::set<int> source_ids_union;
147 for (int i=0;i<(int)source_ids.size();i++)
148 source_ids_union.insert(source_ranks_union[i]);
149 std::set<int> target_ids_union;
150 for (int i=0;i<(int)target_ids.size();i++)
151 target_ids_union.insert(target_ranks_union[i]);
152 delete [] source_ranks_world;
153 delete [] source_ranks_union;
154 delete [] target_ranks_world;
155 delete [] target_ranks_union;
157 // Create the MPIProcessorGroups
158 _source_group = new MPIProcessorGroup(comm,source_ids_union,_union_comm);
159 _target_group = new MPIProcessorGroup(comm,target_ids_union,_union_comm);
160 _union_group = _source_group->fuse(*_target_group);
161 comm.groupFree(&union_group);
162 comm.groupFree(&world_group);
165 DisjointDEC::~DisjointDEC()
170 void DisjointDEC::cleanInstance()
176 _local_field=nullptr;
180 delete _source_group;
181 delete _target_group;
184 _source_group=nullptr;
185 _target_group=nullptr;
187 _union_group=nullptr;
188 if (_union_comm != MPI_COMM_NULL)
189 _comm_interface->commFree(&_union_comm);
190 _union_comm = MPI_COMM_NULL;
194 * Check that the sources and targets procs form a partition of the world communicator referenced in the groups.
195 * This world communicator is not necessarily MPI_WORLD_COMM, but it has to be covered completely for the DECs to work.
197 void DisjointDEC::checkPartitionGroup() const
200 MPIProcessorGroup * tgt = static_cast<MPIProcessorGroup *>(_target_group);
201 MPIProcessorGroup * src = static_cast<MPIProcessorGroup *>(_source_group);
202 MPI_Comm comm_t = tgt->getWorldComm();
203 MPI_Comm comm_s = src->getWorldComm();
204 if (comm_t != comm_s)
205 throw INTERP_KERNEL::Exception("DisjointDEC constructor: Inconsistent world communicator when building DisjointDEC");
206 MPI_Comm_size(comm_t, &size);
208 std::set<int> union_ids; // source and target ids in world_comm
209 union_ids.insert(src->getProcIDs().begin(),src->getProcIDs().end());
210 union_ids.insert(tgt->getProcIDs().begin(),tgt->getProcIDs().end());
211 if((int)union_ids.size()!=size)
212 throw INTERP_KERNEL::Exception("DisjointDEC constructor: source_ids and target_ids do not form a partition of the communicator! Restrain the world communicator passed to MPIProcessorGroup ctor.");
215 void DisjointDEC::setNature(NatureOfField nature)
218 _local_field->getField()->setNature(nature);
221 /*! Attaches a local field to a DEC.
222 If the processor is on the receiving end of the DEC, the field
223 will be updated by a recvData() call.
224 Reversely, if the processor is on the sending end, the field will be read, possibly transformed, and sent appropriately to the other side.
226 void DisjointDEC::attachLocalField(const ParaFIELD *field, bool ownPt)
234 _comm_interface=&(field->getTopology()->getProcGroup()->getCommInterface());
235 compareFieldAndMethod();
238 /*! Attaches a local field to a DEC. The method will test whether the processor
239 is on the source or the target side and will associate the mesh underlying the
240 field to the local side.
242 If the processor is on the receiving end of the DEC, the field
243 will be updated by a recvData() call.
244 Reversely, if the processor is on the sending end, the field will be read, possibly transformed,
245 and sent appropriately to the other side.
248 void DisjointDEC::attachLocalField(MEDCouplingFieldDouble *field)
252 ProcessorGroup* local_group;
253 if (_source_group->containsMyRank())
254 local_group=_source_group;
255 else if (_target_group->containsMyRank())
256 local_group=_target_group;
258 throw INTERP_KERNEL::Exception("Invalid procgroup for field attachment to DEC");
259 ParaMESH *paramesh=new ParaMESH(static_cast<MEDCouplingPointSet *>(const_cast<MEDCouplingMesh *>(field->getMesh())),*local_group,field->getMesh()->getName());
260 ParaFIELD *tmp=new ParaFIELD(field, paramesh, *local_group);
261 tmp->setOwnSupport(true);
262 attachLocalField(tmp,true);
263 //_comm_interface=&(local_group->getCommInterface());
267 Attaches a local field to a DEC.
268 If the processor is on the receiving end of the DEC, the field
269 will be updated by a recvData() call.
270 Reversely, if the processor is on the sending end, the field will be read, possibly transformed, and sent appropriately to the other side.
271 The field type is a generic ICoCo Field, so that the DEC can couple a number of different fields :
272 - a ICoCo::MEDField, that is created from a MEDCoupling structure
275 void DisjointDEC::attachLocalField(const ICoCo::MEDField *field)
280 throw INTERP_KERNEL::Exception("DisjointDEC::attachLocalField : ICoCo::MEDField pointer is NULL !");
281 attachLocalField(field->getField());
285 Computes the field norm over its support
286 on the source side and renormalizes the field on the target side
287 so that the norms match.
290 I_{source}=\sum_{i=1}^{n_{source}}V_{i}.|\Phi^{source}_{i}|^2,
294 I_{target}=\sum_{i=1}^{n_{target}}V_{i}.|\Phi^{target}_{i}|^2,
298 \Phi^{target}:=\Phi^{target}.\sqrt{I_{source}/I_{target}}.
302 void DisjointDEC::renormalizeTargetField(bool isWAbs)
304 if (_source_group->containsMyRank())
305 for (int icomp=0; icomp<(int)_local_field->getField()->getArray()->getNumberOfComponents(); icomp++)
307 double total_norm = _local_field->getVolumeIntegral(icomp+1,isWAbs);
308 double source_norm = total_norm;
309 _comm_interface->broadcast(&source_norm, 1, MPI_DOUBLE, 0,* dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
312 if (_target_group->containsMyRank())
314 for (int icomp=0; icomp<(int)_local_field->getField()->getArray()->getNumberOfComponents(); icomp++)
316 double total_norm = _local_field->getVolumeIntegral(icomp+1,isWAbs);
317 double source_norm=total_norm;
318 _comm_interface->broadcast(&source_norm, 1, MPI_DOUBLE, 0,* dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
320 if (fabs(total_norm)>1e-100)
321 _local_field->getField()->applyLin(source_norm/total_norm,0.0,icomp+1);
326 bool DisjointDEC::isInSourceSide() const
330 return _source_group->containsMyRank();
333 bool DisjointDEC::isInTargetSide() const
337 return _target_group->containsMyRank();
340 bool DisjointDEC::isInUnion() const
344 return _union_group->containsMyRank();
347 void DisjointDEC::compareFieldAndMethod() const
351 TypeOfField entity = _local_field->getField()->getTypeOfField();
352 if ( getMethod() == "P0" )
354 if ( entity != ON_CELLS )
355 throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch."
356 " For P0 interpolation, field must be on MED_CELL's");
358 else if ( getMethod() == "P1" )
360 if ( entity != ON_NODES )
361 throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch."
362 " For P1 interpolation, field must be on MED_NODE's");
364 else if ( getMethod() == "P1d" )
366 if ( entity != ON_CELLS )
367 throw INTERP_KERNEL::Exception("Field support and interpolation method mismatch."
368 " For P1d interpolation, field must be on MED_CELL's");
369 if ( _target_group->containsMyRank() )
370 throw INTERP_KERNEL::Exception("Projection to P1d field not supported");
374 throw INTERP_KERNEL::Exception("Unknown interpolation method. Possible methods: P0, P1, P1d");
380 If way==true, source procs call sendData() and target procs call recvData().
381 if way==false, it's the other way round.
383 void DisjointDEC::sendRecvData(bool way)
394 else if(isInTargetSide())