1 // Copyright (C) 2007-2013 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
19 // File : MEDSPLITTER_ParaDomainSelector.cxx
20 // Created : Wed Jun 24 12:39:59 2009
21 // Author : Edward AGAPOV (eap)
23 #include "MEDSPLITTER_ParaDomainSelector.hxx"
25 #include "MEDSPLITTER_UserGraph.hxx"
26 #include "MEDSPLITTER_JointExchangeData.hxx"
28 #include <MEDMEM_Meshing.hxx>
29 #include <MEDMEM_DriversDef.hxx>
39 #include <sys/sysinfo.h>
42 using namespace MEDSPLITTER;
43 using namespace MED_EN;
46 //================================================================================
48 * \brief Constructor. Find out my rank and world size
50 //================================================================================
52 ParaDomainSelector::ParaDomainSelector(bool mesure_memory)
53 :_rank(0),_world_size(1), _nb_result_domains(-1), _mesure_memory(mesure_memory),
54 _init_time(0.0), _init_memory(0), _max_memory(0)
57 MPI_Comm_size(MPI_COMM_WORLD,&_world_size) ;
58 MPI_Comm_rank(MPI_COMM_WORLD,&_rank) ;
59 _init_time = MPI_Wtime();
64 //================================================================================
68 //================================================================================
70 ParaDomainSelector::~ParaDomainSelector()
74 //================================================================================
76 * \brief Return true if is running on different hosts
78 //================================================================================
80 bool ParaDomainSelector::isOnDifferentHosts() const
83 if ( _world_size < 2 )
87 char name_here[ MPI_MAX_PROCESSOR_NAME+1 ], name_there[ MPI_MAX_PROCESSOR_NAME+1 ];
89 MPI_Get_processor_name( name_here, &size);
91 int next_proc = (rank() + 1) % nbProcs();
92 int prev_proc = (rank() - 1 + nbProcs()) % nbProcs();
96 MPI_Sendrecv((void*)&name_here[0], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, next_proc, tag,
97 (void*)&name_there[0], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, prev_proc, tag,
98 MPI_COMM_WORLD, &status);
99 return string(name_here) != string(name_there);
105 //================================================================================
107 * \brief Return true if the domain with domainIndex is to be loaded on this proc
108 * \param domainIndex - index of mesh domain
109 * \retval bool - to load or not
111 //================================================================================
113 bool ParaDomainSelector::isMyDomain(int domainIndex) const
116 return (_rank == getProccessorID( domainIndex ));
119 //================================================================================
121 * \brief Return processor id where the domain with domainIndex resides
122 * \param domainIndex - index of mesh domain
123 * \retval int - processor id
125 //================================================================================
127 int ParaDomainSelector::getProccessorID(int domainIndex) const
130 return ( domainIndex % _world_size );
133 //================================================================================
135 * \brief Gather info on nb of entities on each processor and return total nb.
138 * 1) for MED_CELL to know global id shift for domains at graph construction;
139 * 2) for sub-entity to know total nb of sub-entities before creating those of joints
141 //================================================================================
143 int ParaDomainSelector::gatherNbOf(MED_EN::medEntityMesh entity,
144 const vector<MEDMEM::MESH*>& domain_meshes)
148 // get nb of elems of each domain mesh
149 int nb_domains = domain_meshes.size();
150 vector<int> nb_elems( nb_domains, 0 );
151 for ( int i = 0; i < nb_domains; ++i )
152 if ( domain_meshes[i] )
153 nb_elems[i] = domain_meshes[i]->getNumberOfElements(entity, MED_ALL_ELEMENTS);
155 // receive nb of elems from other procs
156 vector<int> all_nb_elems( nb_domains );
158 MPI_Allreduce((void*)&nb_elems[0], (void*)&all_nb_elems[0], nb_domains,
159 MPI_INT, MPI_SUM, MPI_COMM_WORLD);
161 int total_nb = std::accumulate( all_nb_elems.begin(), all_nb_elems.end(), 0 );
163 vector<int>& elem_shift_by_domain
164 = (entity == MED_CELL) ? _cell_shift_by_domain : _face_shift_by_domain;
166 // fill elem_shift_by_domain
168 vector< int > ordered_nbs, domain_order( nb_domains );
169 ordered_nbs.push_back(0);
170 for ( int iproc = 0; iproc < nbProcs(); ++iproc )
171 for ( int idomain = 0; idomain < nb_domains; ++idomain )
172 if ( getProccessorID( idomain ) == iproc )
174 domain_order[ idomain ] = ordered_nbs.size() - 1;
175 ordered_nbs.push_back( ordered_nbs.back() + all_nb_elems[idomain] );
177 elem_shift_by_domain.resize( nb_domains+1, 0 );
178 for ( int idomain = 0; idomain < nb_domains; ++idomain )
179 elem_shift_by_domain[ idomain ] = ordered_nbs[ domain_order[ idomain ]];
181 elem_shift_by_domain.back() = ordered_nbs.back(); // to know total nb of elements
183 if ( entity == MED_CELL )
185 // fill _nb_vert_of_procs
186 _nb_vert_of_procs.resize( _world_size+1, 0 );
187 for ( int i = 0; i < nb_domains; ++i )
189 int rank = getProccessorID( i );
190 _nb_vert_of_procs[ rank+1 ] += all_nb_elems[ i ];
192 _nb_vert_of_procs[0] = 1; // base = 1
193 for ( int i = 1; i < _nb_vert_of_procs.size(); ++i )
194 _nb_vert_of_procs[ i ] += _nb_vert_of_procs[ i-1 ]; // to CSR format
198 // to compute global ids of faces in joints
199 //_total_nb_faces = total_nb;
203 // MEDMEM::STRING out("_nb_vert_of_procs :");
204 // for ( int i = 0; i < _nb_vert_of_procs.size(); ++i )
205 // out << _nb_vert_of_procs[i] << " ";
206 // std::cout << out << std::endl;
209 // MEDMEM::STRING out("elem_shift_by_domain :");
210 // for ( int i = 0; i < elem_shift_by_domain.size(); ++i )
211 // out << elem_shift_by_domain[i] << " ";
212 // std::cout << out << std::endl;
220 //================================================================================
222 * \brief Return distribution of the graph vertices among the processors
223 * \retval int* - array conatining nb of vertices on all processors
225 * gatherNbOf( MED_CELL ) must be called before.
226 * The result array is to be used as the first arg of ParMETIS_V3_PartKway() and
227 * is freed by ParaDomainSelector.
229 //================================================================================
231 #define gatherNbOf_NOT_CALLED(meth) throw MED_EXCEPTION \
232 ("ParaDomainSelector::" #meth "(): gatherNbOf( MED_CELL ) must be called before")
234 int* ParaDomainSelector::getNbVertOfProcs() const
237 if ( _nb_vert_of_procs.empty() )
238 gatherNbOf_NOT_CALLED(getNbVertOfProcs);
240 return (int*) & _nb_vert_of_procs[0];
242 //================================================================================
244 * \brief Return nb of cells in domains with lower index.
246 * gatherNbOf( MED_CELL ) must be called before.
247 * Result added to local id on given domain gives id in the whole distributed mesh
249 //================================================================================
251 int ParaDomainSelector::getDomainShift(int domainIndex) const
254 if ( _cell_shift_by_domain.empty() )
255 gatherNbOf_NOT_CALLED(getDomainShift);
257 return _cell_shift_by_domain[ domainIndex ];
260 //================================================================================
262 * \brief Return nb of cells on processors with lower rank.
264 * gatherNbOf( MED_CELL ) must be called before.
265 * Result added to global id on this processor gives id in the whole distributed mesh
267 //================================================================================
269 int ParaDomainSelector::getProcShift() const
272 if ( _nb_vert_of_procs.empty() )
273 gatherNbOf_NOT_CALLED(getProcShift);
275 return _nb_vert_of_procs[_rank]-1;
278 //================================================================================
280 * \brief Gather graphs from all processors into one
282 //================================================================================
284 auto_ptr<Graph> ParaDomainSelector::gatherGraph(const Graph* graph) const
286 Graph* glob_graph = 0;
295 vector<int> index_size_of_proc( nbProcs() ); // index sizes - 1
296 for ( int i = 1; i < _nb_vert_of_procs.size(); ++i )
297 index_size_of_proc[i-1] = _nb_vert_of_procs[ i ] - _nb_vert_of_procs[ i-1 ];
299 int index_size = 1 + _cell_shift_by_domain.back();
300 int* graph_index = new int[ index_size ];
301 const int* index = graph->getGraph()->getIndex();
302 int* proc_index_displacement = (int*) & _nb_vert_of_procs[0];
304 MPI_Allgatherv((void*) (index+1), // send local index except first 0 (or 1)
305 index_size_of_proc[_rank], // index size on this proc
307 (void*) graph_index, // receive indices
308 & index_size_of_proc[0], // index size on each proc
309 proc_index_displacement, // displacement of each proc index
312 graph_index[0] = index[0]; // it is not overwritten thanks to proc_index_displacement[0]==1
314 // get sizes of graph values on each proc by the got indices of graphs
315 vector< int > value_size_of_proc( nbProcs() ), proc_value_displacement(1,0);
316 for ( int i = 0; i < nbProcs(); ++i )
318 if ( index_size_of_proc[i] > 0 )
319 value_size_of_proc[i] = graph_index[ proc_index_displacement[ i+1 ]-1 ] - graph_index[0];
321 value_size_of_proc[i] = 0;
322 proc_value_displacement.push_back( proc_value_displacement.back() + value_size_of_proc[i] );
325 // update graph_index
326 for ( int i = 1; i < nbProcs(); ++i )
328 int shift = graph_index[ proc_index_displacement[i]-1 ]-graph_index[0];
329 for ( int j = proc_index_displacement[i]; j < proc_index_displacement[i+1]; ++j )
330 graph_index[ j ] += shift;
337 int value_size = graph_index[ index_size-1 ] - graph_index[ 0 ];
338 int* graph_value = new int[ value_size ];
339 const int* value = graph->getGraph()->getValue();
341 MPI_Allgatherv((void*) value, // send local value
342 value_size_of_proc[_rank], // value size on this proc
344 (void*) graph_value, // receive values
345 & value_size_of_proc[0], // value size on each proc
346 & proc_value_displacement[0], // displacement of each proc value
354 int * partition = new int[ _cell_shift_by_domain.back() ];
355 const int* part = graph->getPart();
357 MPI_Allgatherv((void*) part, // send local partition
358 index_size_of_proc[_rank], // index size on this proc
360 (void*)(partition-1), // -1 compensates proc_index_displacement[0]==1
361 & index_size_of_proc[0], // index size on each proc
362 proc_index_displacement, // displacement of each proc index
370 MEDMEM::MEDSKYLINEARRAY* array =
371 new MEDMEM::MEDSKYLINEARRAY( index_size-1, value_size, graph_index, graph_value, true );
373 glob_graph = new UserGraph( array, partition, index_size-1 );
381 return auto_ptr<Graph>( glob_graph );
384 //================================================================================
386 * \brief Sets global numbering for the entity.
388 * This method must be once called for MED_CELL before exchangeJoint() calls
390 //================================================================================
392 void ParaDomainSelector::gatherEntityTypesInfo(vector<MEDMEM::MESH*>& domain_meshes,
393 MED_EN::medEntityMesh entity)
395 const list<medGeometryElement> & all_types = meshEntities[ entity ];
399 // Put nb of elements of the entity of all domains in one vector
400 // and by the way find out mesh dimension
402 vector<int> nb_of_type( domain_meshes.size() * all_types.size(), 0 );
403 int mesh_dim = -1, space_dim = -1;
405 for ( int idomain = 0; idomain < domain_meshes.size(); ++idomain )
407 if ( !isMyDomain(idomain)) continue;
409 int* domain_nbs = & nb_of_type[ idomain * all_types.size()];
411 list<medGeometryElement>::const_iterator type = all_types.begin();
412 for ( int t = 0; type != all_types.end(); ++t,++type )
413 domain_nbs[t] = domain_meshes[idomain]->getNumberOfElements(entity,*type);
415 int i_mesh_dim = domain_meshes[idomain]->getMeshDimension();
416 int i_space_dim = domain_meshes[idomain]->getSpaceDimension();
417 if ( mesh_dim < i_mesh_dim && i_mesh_dim <= 3 )
418 mesh_dim = i_mesh_dim;
419 if ( space_dim < i_space_dim && i_space_dim <= 3 )
420 space_dim = i_space_dim;
423 // Receive nbs from other procs
425 vector< int > nb_recv( nb_of_type.size() );
427 MPI_Allreduce((void*)&nb_of_type[0], (void*)&nb_recv[0], nb_of_type.size(),
428 MPI_INT, MPI_SUM, MPI_COMM_WORLD);
431 // Set info to meshes of distant domains
433 for ( int idomain = 0; idomain < domain_meshes.size(); ++idomain )
435 if ( isMyDomain(idomain)) continue;
437 MEDMEM::MESHING* meshing = (MEDMEM::MESHING*) domain_meshes[idomain];
438 if ( meshing->getMeshDimension() < mesh_dim )
440 //meshing->setSpaceDimension( space_dim );
441 meshing->setCoordinates( space_dim, /*NumberOfNodes=*/0, 0, "", 0);
444 vector< medGeometryElement > types;
445 vector< int > nb_elems;
447 int* domain_nbs = & nb_recv[ idomain * all_types.size()];
449 list<medGeometryElement>::const_iterator type = all_types.begin();
450 for ( int t = 0; type != all_types.end(); ++t,++type )
452 if ( domain_nbs[t]==0 ) continue;
453 types.push_back( *type );
454 nb_elems.push_back( domain_nbs[t] );
456 meshing->setNumberOfTypes( types.size(), entity );
457 if ( !types.empty() )
459 meshing->setTypes ( & types[0], entity );
460 meshing->setNumberOfElements( & nb_elems[0], entity );
466 //================================================================================
468 * \brief Set nb of cell/cell pairs in a joint between domains
470 //================================================================================
472 void ParaDomainSelector::setNbCellPairs( int nb_cell_pairs, int dist_domain, int loc_domain )
474 // This method is needed for further computing global numbers of faces in joint.
475 // Store if both domains are on this proc else on one of procs only
476 if ( isMyDomain( dist_domain ) || dist_domain < loc_domain )
478 if ( _nb_cell_pairs_by_joint.empty() )
479 _nb_cell_pairs_by_joint.resize( _nb_result_domains*(_nb_result_domains+1), 0);
481 int joint_id = jointId( loc_domain, dist_domain );
482 _nb_cell_pairs_by_joint[ joint_id ] = nb_cell_pairs;
487 //================================================================================
489 * \brief Return nb of cell/cell pairs in a joint between domains on different procs
491 //================================================================================
493 int ParaDomainSelector::getNbCellPairs( int dist_domain, int loc_domain ) const
497 int joint_id = jointId( loc_domain, dist_domain );
498 return _nb_cell_pairs_by_joint[ joint_id ];
501 //================================================================================
503 * \brief Gather size of each joint
505 //================================================================================
507 void ParaDomainSelector::gatherNbCellPairs()
509 const char* LOC = "MEDSPLITTER::ParaDomainSelector::gatherNbCellPairs(): ";
510 if ( _nb_cell_pairs_by_joint.empty() )
511 _nb_cell_pairs_by_joint.resize( _nb_result_domains*(_nb_result_domains+1), 0);
514 vector< int > send_buf = _nb_cell_pairs_by_joint;
516 MPI_Allreduce((void*)&send_buf[0],
517 (void*)&_nb_cell_pairs_by_joint[0],
518 _nb_cell_pairs_by_joint.size(),
519 MPI_INT, MPI_SUM, MPI_COMM_WORLD);
521 // check that the set nbs of cell pairs are correct,
522 // namely that each joint is treated on one proc only
523 for ( int j = 0; j < _nb_cell_pairs_by_joint.size(); ++j )
524 if ( _nb_cell_pairs_by_joint[j] != send_buf[j] && send_buf[j]>0 )
525 throw MED_EXCEPTION(MEDMEM::STRING(LOC) << "invalid nb of cell pairs");
528 //================================================================================
530 * \brief Send-receive joint data
532 //================================================================================
534 void ParaDomainSelector::exchangeJoint( JointExchangeData* joint ) const
537 vector<int> send_data, recv_data( joint->serialize( send_data ));
539 int dest = getProccessorID( joint->distantDomain() );
540 int tag = 1001 + jointId( joint->localDomain(), joint->distantDomain() );
543 MPI_Sendrecv((void*)&send_data[0], send_data.size(), MPI_INT, dest, tag,
544 (void*)&recv_data[0], recv_data.size(), MPI_INT, dest, tag,
545 MPI_COMM_WORLD, &status);
547 joint->deserialize( recv_data );
551 //================================================================================
553 * \brief Return the first global id of sub-entity for the joint
555 //================================================================================
557 int ParaDomainSelector::getFisrtGlobalIdOfSubentity( int loc_domain, int dist_domain ) const
559 // total_nb_faces includes faces existing before creation of joint faces
560 // (got in gatherNbOf( MED_FACE )).
563 int total_nb_faces = _face_shift_by_domain.empty() ? 0 : _face_shift_by_domain.back();
564 int id = total_nb_faces + 1;
566 if ( _nb_cell_pairs_by_joint.empty() )
567 throw MED_EXCEPTION("MEDSPLITTER::ParaDomainSelector::getFisrtGlobalIdOfSubentity(), "
568 "gatherNbCellPairs() must be called before");
569 int joint_id = jointId( loc_domain, dist_domain );
570 for ( int j = 0; j < joint_id; ++j )
571 id += _nb_cell_pairs_by_joint[ j ];
576 //================================================================================
578 * \brief Send-receive local ids of joint faces
580 //================================================================================
582 int* ParaDomainSelector::exchangeSubentityIds( int loc_domain, int dist_domain,
583 const vector<int>& loc_ids_here ) const
585 int* loc_ids_dist = new int[ loc_ids_here.size()];
587 int dest = getProccessorID( dist_domain );
588 int tag = 2002 + jointId( loc_domain, dist_domain );
590 MPI_Sendrecv((void*)&loc_ids_here[0], loc_ids_here.size(), MPI_INT, dest, tag,
591 (void*) loc_ids_dist, loc_ids_here.size(), MPI_INT, dest, tag,
592 MPI_COMM_WORLD, &status);
599 //================================================================================
601 * \brief Return identifier for a joint
603 //================================================================================
605 int ParaDomainSelector::jointId( int local_domain, int distant_domain ) const
608 if (_nb_result_domains < 0)
609 throw MED_EXCEPTION("ParaDomainSelector::jointId(): setNbDomains() must be called before()");
611 if ( local_domain < distant_domain )
612 swap( local_domain, distant_domain );
613 return local_domain * _nb_result_domains + distant_domain;
616 //================================================================================
618 * \brief Return domain order so that first go domains on proc 0 and so n
620 //================================================================================
622 // int ParaDomainSelector::getDomianOrder(int idomain, int nb_domains) const
624 // return nb_domains / nbProcs() * getProccessorID( idomain ) + idomain / nbProcs();
627 //================================================================================
629 * \brief Return time passed from construction in seconds
631 //================================================================================
633 double ParaDomainSelector::getPassedTime() const
636 return MPI_Wtime() - _init_time;
642 //================================================================================
644 * \brief Evaluate current memory usage and return the maximal one in KB
646 //================================================================================
648 int ParaDomainSelector::evaluateMemory() const
650 if ( _mesure_memory )
655 int err = sysinfo( &si );
658 (( si.totalram - si.freeram + si.totalswap - si.freeswap ) * si.mem_unit ) / 1024;
660 if ( used_memory > _max_memory )
661 ((ParaDomainSelector*) this)->_max_memory = used_memory;
664 ((ParaDomainSelector*) this)->_init_memory = used_memory;
666 return _max_memory - _init_memory;