1 // Copyright (C) 2007-2021 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #include "MEDPARTITIONER_ParaDomainSelector.hxx"
21 #include "MEDPARTITIONER_UserGraph.hxx"
22 #include "MEDPARTITIONER_Utils.hxx"
23 #include "MEDPARTITIONER_Utils.hxx"
25 #include "MEDCouplingUMesh.hxx"
26 #include "MEDCouplingSkyLineArray.hxx"
27 #include "MCIdType.hxx"
36 #ifndef MEDCOUPLING_USE_64BIT_IDS
37 #define MPI_ID_TYPE MPI_INT
39 #define MPI_ID_TYPE MPI_LONG
45 * \brief Constructor. Find out my rank and world size
47 MEDPARTITIONER::ParaDomainSelector::ParaDomainSelector(bool mesure_memory)
48 :_rank(0),_world_size(1), _nb_result_domains(-1), _init_time(0.0),
49 _mesure_memory(mesure_memory), _init_memory(0), _max_memory(0)
52 if (MyGlobals::_Rank==-1)
54 MPI_Init(0,0); //do once only
55 MPI_Comm_size(MPI_COMM_WORLD,&_world_size) ;
56 MPI_Comm_rank(MPI_COMM_WORLD,&_rank) ;
60 _world_size=MyGlobals::_World_Size;
61 _rank=MyGlobals::_Rank;
63 _init_time = MPI_Wtime();
68 if (MyGlobals::_Verbose>10)
69 std::cout << "WARNING : ParaDomainSelector constructor without parallel_mode World_Size=1 by default" << std::endl;
71 MyGlobals::_World_Size=_world_size;
72 MyGlobals::_Rank=_rank;
74 if (MyGlobals::_Verbose>200) std::cout << "proc " << MyGlobals::_Rank << " of " << MyGlobals::_World_Size << std::endl;
78 MEDPARTITIONER::ParaDomainSelector::~ParaDomainSelector()
83 * \brief Return true if is running on different hosts
85 bool MEDPARTITIONER::ParaDomainSelector::isOnDifferentHosts() const
88 if ( _world_size < 2 ) return false;
91 char name_here[ MPI_MAX_PROCESSOR_NAME+1 ], name_there[ MPI_MAX_PROCESSOR_NAME+1 ];
93 MPI_Get_processor_name( name_here, &size);
95 int next_proc = (rank() + 1) % nbProcs();
96 int prev_proc = (rank() - 1 + nbProcs()) % nbProcs();
100 MPI_Sendrecv((void*)&name_here[0], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, next_proc, tag,
101 (void*)&name_there[0], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, prev_proc, tag,
102 MPI_COMM_WORLD, &status);
104 //bug: (isOnDifferentHosts here and there) is not (isOnDifferentHosts somewhere)
105 //return string(name_here) != string(name_there);
109 if (std::string(name_here) != std::string(name_there))
111 MPI_Allreduce( &same, &sum_same, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
112 return (sum_same != nbProcs());
118 * \brief Return true if the domain with domainIndex is to be loaded on this proc
119 * \param domainIndex - index of mesh domain
120 * \retval bool - to load or not
122 bool MEDPARTITIONER::ParaDomainSelector::isMyDomain(int domainIndex) const
125 return (_rank == getProcessorID( domainIndex ));
129 * \brief Return processor id where the domain with domainIndex resides
130 * \param domainIndex - index of mesh domain
131 * \retval int - processor id
133 int MEDPARTITIONER::ParaDomainSelector::getProcessorID(int domainIndex) const
136 return ( domainIndex % _world_size );
140 * \brief Gather info on nb of cell entities on each processor and return total nb.
143 * 1) for MED_CELL to know global id shift for domains at graph construction;
144 * 2) for sub-entity to know total nb of sub-entities before creating those of joints
146 void MEDPARTITIONER::ParaDomainSelector::gatherNbOf(const std::vector<MEDCoupling::MEDCouplingUMesh*>& domain_meshes)
149 // get nb of elems of each domain mesh
150 int nb_domains=(int)domain_meshes.size();
151 std::vector<mcIdType> nb_elems(nb_domains*2, 0); //NumberOfCells & NumberOfNodes
152 for (int i=0; i<nb_domains; ++i)
153 if ( domain_meshes[i] )
155 nb_elems[i*2] = domain_meshes[i]->getNumberOfCells();
156 nb_elems[i*2+1] = domain_meshes[i]->getNumberOfNodes();
158 // receive nb of elems from other procs
159 std::vector<mcIdType> all_nb_elems;
160 if (MyGlobals::_World_Size==1)
162 all_nb_elems=nb_elems;
167 all_nb_elems.resize( nb_domains*2 );
168 MPI_Allreduce((void*)&nb_elems[0], (void*)&all_nb_elems[0], nb_domains*2, MPI_ID_TYPE, MPI_SUM, MPI_COMM_WORLD);
170 throw INTERP_KERNEL::Exception("not(HAVE_MPI) incompatible with MPI_World_Size>1");
173 mcIdType total_nb_cells=0, total_nb_nodes=0;
174 for (int i=0; i<nb_domains; ++i)
176 total_nb_cells+=all_nb_elems[i*2];
177 total_nb_nodes+=all_nb_elems[i*2+1];
180 if (MyGlobals::_Is0verbose>10)
181 std::cout << "totalNbCells " << total_nb_cells << " totalNbNodes " << total_nb_nodes << std::endl;
183 std::vector<mcIdType>& cell_shift_by_domain=_cell_shift_by_domain;
184 std::vector<mcIdType>& node_shift_by_domain=_node_shift_by_domain;
185 std::vector<mcIdType>& face_shift_by_domain=_face_shift_by_domain;
187 std::vector< mcIdType > ordered_nbs_cell, ordered_nbs_node, domain_order(nb_domains);
188 ordered_nbs_cell.push_back(0);
189 ordered_nbs_node.push_back(0);
190 for (int iproc=0; iproc<nbProcs(); ++iproc)
191 for (int idomain=0; idomain<nb_domains; ++idomain)
192 if (getProcessorID( idomain )==iproc)
194 domain_order[idomain] = ToIdType( ordered_nbs_cell.size() - 1 );
195 ordered_nbs_cell.push_back( ordered_nbs_cell.back() + all_nb_elems[idomain*2] );
196 ordered_nbs_node.push_back( ordered_nbs_node.back() + all_nb_elems[idomain*2+1] );
198 cell_shift_by_domain.resize( nb_domains+1, 0 );
199 node_shift_by_domain.resize( nb_domains+1, 0 );
200 face_shift_by_domain.resize( nb_domains+1, 0 );
201 for (int idomain=0; idomain<nb_domains; ++idomain)
203 cell_shift_by_domain[ idomain ] = ordered_nbs_cell[ domain_order[ idomain ]];
204 node_shift_by_domain[ idomain ] = ordered_nbs_node[ domain_order[ idomain ]];
206 cell_shift_by_domain.back() = ordered_nbs_cell.back(); // to know total nb of elements
207 node_shift_by_domain.back() = ordered_nbs_node.back(); // to know total nb of elements
209 if (MyGlobals::_Is0verbose>300)
211 std::cout << "proc " << MyGlobals::_Rank << " : cellShiftByDomain ";
212 for (int i=0; i<=nb_domains; ++i)
213 std::cout << cell_shift_by_domain[i] << "|";
214 std::cout << std::endl;
215 std::cout << "proc " << MyGlobals::_Rank << " : nodeShiftBy_domain ";
216 for (int i=0; i<=nb_domains; ++i)
217 std::cout << node_shift_by_domain[i] << "|";
218 std::cout << std::endl;
220 // fill _nb_vert_of_procs (is Vtxdist)
221 _nb_vert_of_procs.resize(_world_size+1);
222 _nb_vert_of_procs[0] = 0; // base = 0
223 for (int i=0; i<nb_domains; ++i)
225 int rankk = getProcessorID(i);
226 _nb_vert_of_procs[rankk+1] += all_nb_elems[i*2];
228 for (std::size_t i=1; i<_nb_vert_of_procs.size(); ++i)
229 _nb_vert_of_procs[i] += _nb_vert_of_procs[i-1]; // to CSR format : cumulated
231 if (MyGlobals::_Is0verbose>200)
233 std::cout << "proc " << MyGlobals::_Rank << " : gatherNbOf : vtxdist is ";
234 for (int i = 0; i <= _world_size; ++i)
235 std::cout << _nb_vert_of_procs[i] << " ";
236 std::cout << std::endl;
244 * \brief Return distribution of the graph vertices among the processors
245 * \retval int* - array containing nb of vertices (=cells) on all processors
247 * gatherNbOf() must be called before.
248 * The result array is to be used as the first arg of ParMETIS_V3_PartKway() and
249 * is freed by ParaDomainSelector.
251 mcIdType *MEDPARTITIONER::ParaDomainSelector::getProcVtxdist() const
254 if (_nb_vert_of_procs.empty())
255 throw INTERP_KERNEL::Exception("_nb_vert_of_procs not set");
256 return const_cast<mcIdType*>(& _nb_vert_of_procs[0]);
260 * \brief Return nb of cells in domains with lower index.
262 * gatherNbOf() must be called before.
263 * Result added to local id on given domain gives id in the whole distributed mesh
265 mcIdType MEDPARTITIONER::ParaDomainSelector::getDomainCellShift(int domainIndex) const
268 if (_cell_shift_by_domain.empty())
269 throw INTERP_KERNEL::Exception("_cell_shift_by_domain not set");
270 return _cell_shift_by_domain[domainIndex];
273 mcIdType MEDPARTITIONER::ParaDomainSelector::getDomainNodeShift(int domainIndex) const
276 if (_node_shift_by_domain.empty())
277 throw INTERP_KERNEL::Exception("_node_shift_by_domain not set");
278 return _node_shift_by_domain[domainIndex];
282 * \brief Return nb of nodes on processors with lower rank.
284 * gatherNbOf() must be called before.
285 * Result added to global id on this processor gives id in the whole distributed mesh
287 mcIdType MEDPARTITIONER::ParaDomainSelector::getProcNodeShift() const
290 if (_nb_vert_of_procs.empty())
291 throw INTERP_KERNEL::Exception("_nb_vert_of_procs not set");
292 return _nb_vert_of_procs[_rank];
296 * \brief Gather graphs from all processors into one
298 std::unique_ptr<MEDPARTITIONER::Graph> MEDPARTITIONER::ParaDomainSelector::gatherGraph(const Graph* graph) const
300 Graph* glob_graph = 0;
309 std::vector<int> index_size_of_proc( nbProcs() ); // index sizes - 1
310 for ( std::size_t i = 1; i < _nb_vert_of_procs.size(); ++i )
311 index_size_of_proc[i-1] = FromIdType<int>(_nb_vert_of_procs[ i ] - _nb_vert_of_procs[ i-1 ]);
313 mcIdType index_size = 1 + _cell_shift_by_domain.back();
314 mcIdType *graph_index = new mcIdType[ index_size ];
315 const mcIdType *index = graph->getGraph()->getIndex();
316 MCAuto< DataArrayInt > nb_vert_of_procs = FromIdTypeVec( _nb_vert_of_procs );
317 int *proc_index_displacement = nb_vert_of_procs->getPointer();
319 MPI_Allgatherv((void*) (index+1), // send local index except first 0 (or 1)
320 index_size_of_proc[_rank], // index size on this proc
322 (void*) graph_index, // receive indices
323 & index_size_of_proc[0], // index size on each proc
324 proc_index_displacement, // displacement of each proc index
327 graph_index[0] = index[0]; // it is not overwritten thanks to proc_index_displacement[0]==1
329 // get sizes of graph values on each proc by the got indices of graphs
330 std::vector< int > value_size_of_proc( nbProcs() ), proc_value_displacement(1,0);
331 for ( int i = 0; i < nbProcs(); ++i )
333 if ( index_size_of_proc[i] > 0 )
334 value_size_of_proc[i] = (int)(graph_index[ proc_index_displacement[ i+1 ]-1 ] - graph_index[0]);
336 value_size_of_proc[i] = 0;
337 proc_value_displacement.push_back( proc_value_displacement.back() + value_size_of_proc[i] );
340 // update graph_index
341 for ( int i = 1; i < nbProcs(); ++i )
343 mcIdType shift = graph_index[ proc_index_displacement[i]-1 ]-graph_index[0];
344 for ( int j = proc_index_displacement[i]; j < proc_index_displacement[i+1]; ++j )
345 graph_index[ j ] += shift;
352 mcIdType value_size = graph_index[ index_size-1 ] - graph_index[ 0 ];
353 mcIdType *graph_value = new mcIdType[ value_size ];
354 const mcIdType *value = graph->getGraph()->getValues();
356 MPI_Allgatherv((void*) value, // send local value
357 value_size_of_proc[_rank], // value size on this proc
359 (void*) graph_value, // receive values
360 & value_size_of_proc[0], // value size on each proc
361 & proc_value_displacement[0], // displacement of each proc value
369 mcIdType * partition = new mcIdType[ _cell_shift_by_domain.back() ];
370 const mcIdType* part = graph->getPart();
372 MPI_Allgatherv((void*) part, // send local partition
373 index_size_of_proc[_rank], // index size on this proc
375 (void*)(partition-1), // -1 compensates proc_index_displacement[0]==1
376 & index_size_of_proc[0], // index size on each proc
377 proc_index_displacement, // displacement of each proc index
385 // MEDCouplingSkyLineArray* array =
386 // new MEDCouplingSkyLineArray( index_size-1, value_size, graph_index, graph_value, true );
388 // glob_graph = new UserGraph( array, partition, index_size-1 );
396 return std::unique_ptr<Graph>( glob_graph );
401 * \brief Set nb of cell/cell pairs in a joint between domains
403 void MEDPARTITIONER::ParaDomainSelector::setNbCellPairs( mcIdType nb_cell_pairs, int dist_domain, int loc_domain )
405 // This method is needed for further computing global numbers of faces in joint.
406 // Store if both domains are on this proc else on one of procs only
407 if ( isMyDomain( dist_domain ) || dist_domain < loc_domain )
409 if ( _nb_cell_pairs_by_joint.empty() )
410 _nb_cell_pairs_by_joint.resize( _nb_result_domains*(_nb_result_domains+1), 0);
412 int joint_id = jointId( loc_domain, dist_domain );
413 _nb_cell_pairs_by_joint[ joint_id ] = nb_cell_pairs;
418 //================================================================================
420 * \brief Return nb of cell/cell pairs in a joint between domains on different procs
422 //================================================================================
424 mcIdType MEDPARTITIONER::ParaDomainSelector::getNbCellPairs( int dist_domain, int loc_domain ) const
428 int joint_id = jointId( loc_domain, dist_domain );
429 return _nb_cell_pairs_by_joint[ joint_id ];
432 //================================================================================
434 * \brief Gather size of each joint
436 //================================================================================
438 void MEDPARTITIONER::ParaDomainSelector::gatherNbCellPairs()
440 if ( _nb_cell_pairs_by_joint.empty() )
441 _nb_cell_pairs_by_joint.resize( _nb_result_domains*(_nb_result_domains+1), 0);
444 std::vector< mcIdType > send_buf = _nb_cell_pairs_by_joint;
446 MPI_Allreduce((void*)&send_buf[0],
447 (void*)&_nb_cell_pairs_by_joint[0],
448 (int)_nb_cell_pairs_by_joint.size(),
449 MPI_ID_TYPE, MPI_SUM, MPI_COMM_WORLD);
451 // check that the set nbs of cell pairs are correct,
452 // namely that each joint is treated on one proc only
453 for ( std::size_t j = 0; j < _nb_cell_pairs_by_joint.size(); ++j )
454 if ( _nb_cell_pairs_by_joint[j] != send_buf[j] && send_buf[j]>0 )
455 throw INTERP_KERNEL::Exception("invalid nb of cell pairs");
458 //================================================================================
460 * \brief Return the first global id of sub-entity for the joint
462 //================================================================================
464 mcIdType MEDPARTITIONER::ParaDomainSelector::getFisrtGlobalIdOfSubentity( int loc_domain, int dist_domain ) const
466 // total_nb_faces includes faces existing before creation of joint faces
467 // (got in gatherNbOf( MED_FACE )).
470 mcIdType total_nb_faces = _face_shift_by_domain.empty() ? 0 : _face_shift_by_domain.back();
471 mcIdType id = total_nb_faces + 1;
473 if ( _nb_cell_pairs_by_joint.empty() )
474 throw INTERP_KERNEL::Exception("gatherNbCellPairs() must be called before");
475 int joint_id = jointId( loc_domain, dist_domain );
476 for ( int j = 0; j < joint_id; ++j )
477 id += _nb_cell_pairs_by_joint[ j ];
482 //================================================================================
484 * \brief Send-receive local ids of joint faces
486 //================================================================================
488 int *MEDPARTITIONER::ParaDomainSelector::exchangeSubentityIds( int loc_domain, int dist_domain,
489 const std::vector<int>& loc_ids_here ) const
491 int* loc_ids_dist = new int[ loc_ids_here.size()];
493 int dest = getProcessorID( dist_domain );
494 int tag = 2002 + jointId( loc_domain, dist_domain );
496 MPI_Sendrecv((void*)&loc_ids_here[0], (int)loc_ids_here.size(), MPI_INT, dest, tag,
497 (void*) loc_ids_dist, (int)loc_ids_here.size(), MPI_INT, dest, tag,
498 MPI_COMM_WORLD, &status);
505 //================================================================================
507 * \brief Return identifier for a joint
509 //================================================================================
511 int MEDPARTITIONER::ParaDomainSelector::jointId( int local_domain, int distant_domain ) const
514 if (_nb_result_domains < 0)
515 throw INTERP_KERNEL::Exception("setNbDomains() must be called before");
517 if ( local_domain < distant_domain )
518 std::swap( local_domain, distant_domain );
519 return local_domain * _nb_result_domains + distant_domain;
523 //================================================================================
525 * \brief Return time passed from construction in seconds
527 //================================================================================
529 double MEDPARTITIONER::ParaDomainSelector::getPassedTime() const
532 return MPI_Wtime() - _init_time;
539 Sends content of \a mesh to processor \a target. To be used with \a recvMesh method.
540 \param mesh mesh to be sent
541 \param target processor id of the target
544 void MEDPARTITIONER::ParaDomainSelector::sendMesh(const MEDCoupling::MEDCouplingUMesh& mesh, int target) const
547 throw INTERP_KERNEL::Exception("ParaDomainSelector::sendMesh : incoherent call in non_MPI mode");
549 if (MyGlobals::_Verbose>600)
550 std::cout << "proc " << _rank << " : sendMesh '" << mesh.getName() << "' size " << mesh.getNumberOfCells() << " to " << target << std::endl;
551 // First stage : sending sizes
552 // ------------------------------
553 std::vector<mcIdType> tinyInfoLocal;
554 std::vector<std::string> tinyInfoLocalS;
555 std::vector<double> tinyInfoLocalD;
556 //Getting tiny info of local mesh to allow the distant proc to initialize and allocate
557 //the transmitted mesh.
558 mesh.getTinySerializationInformation(tinyInfoLocalD,tinyInfoLocal,tinyInfoLocalS);
559 tinyInfoLocal.push_back(mesh.getNumberOfCells());
560 int tinySize=(int)tinyInfoLocal.size();
561 MPI_Send(&tinySize, 1, MPI_INT, target, 1113, MPI_COMM_WORLD);
562 MPI_Send(&tinyInfoLocal[0], (int)tinyInfoLocal.size(), MPI_ID_TYPE, target, 1112, MPI_COMM_WORLD);
564 if (mesh.getNumberOfCells()>0) //no sends if empty
566 MEDCoupling::DataArrayIdType *v1Local=0;
567 MEDCoupling::DataArrayDouble *v2Local=0;
568 //serialization of local mesh to send data to distant proc.
569 mesh.serialize(v1Local,v2Local);
572 if(v1Local) //if empty getNbOfElems() is 1!
574 nbLocalElems=FromIdType<int>(v1Local->getNbOfElems()); // if empty be 1!
575 ptLocal=v1Local->getPointer();
577 MPI_Send(ptLocal, nbLocalElems, MPI_ID_TYPE, target, 1111, MPI_COMM_WORLD);
580 if(v2Local) //if empty be 0!
582 nbLocalElems2=FromIdType<int>(v2Local->getNbOfElems());
583 ptLocal2=v2Local->getPointer();
585 MPI_Send(ptLocal2, nbLocalElems2, MPI_DOUBLE, target, 1110, MPI_COMM_WORLD);
586 if(v1Local) v1Local->decrRef();
587 if(v2Local) v2Local->decrRef();
592 /*! Receives messages from proc \a source to fill mesh \a mesh.
593 To be used with \a sendMesh method.
594 \param mesh pointer to mesh that is filled
595 \param source processor id of the incoming messages
597 void MEDPARTITIONER::ParaDomainSelector::recvMesh(MEDCoupling::MEDCouplingUMesh*& mesh, int source)const
600 throw INTERP_KERNEL::Exception("ParaDomainSelector::recvMesh : incoherent call in non_MPI mode");
602 // First stage : exchanging sizes
603 // ------------------------------
604 std::vector<mcIdType> tinyInfoDistant;
605 std::vector<std::string> tinyInfoLocalS;
606 std::vector<double> tinyInfoDistantD(1);
607 //Getting tiny info of local mesh to allow the distant proc to initialize and allocate
608 //the transmitted mesh.
611 MPI_Recv(&tinyVecSize, 1, MPI_INT, source, 1113, MPI_COMM_WORLD, &status);
612 tinyInfoDistant.resize(tinyVecSize);
613 std::fill(tinyInfoDistant.begin(),tinyInfoDistant.end(),0);
615 MPI_Recv(&tinyInfoDistant[0], tinyVecSize, MPI_ID_TYPE,source,1112,MPI_COMM_WORLD, &status);
616 //there was tinyInfoLocal.push_back(mesh.getNumberOfCells());
617 mcIdType NumberOfCells=tinyInfoDistant[tinyVecSize-1];
620 MEDCoupling::DataArrayIdType *v1Distant=MEDCoupling::DataArrayIdType::New();
621 MEDCoupling::DataArrayDouble *v2Distant=MEDCoupling::DataArrayDouble::New();
622 //Building the right instance of copy of distant mesh.
623 MEDCoupling::MEDCouplingPointSet *distant_mesh_tmp=
624 MEDCoupling::MEDCouplingPointSet::BuildInstanceFromMeshType(
625 (MEDCoupling::MEDCouplingMeshType) tinyInfoDistant[0]);
626 std::vector<std::string> unusedTinyDistantSts;
627 mesh=dynamic_cast<MEDCoupling::MEDCouplingUMesh*> (distant_mesh_tmp);
629 mesh->resizeForUnserialization(tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts);
634 nbDistElem=FromIdType<int>(v1Distant->getNbOfElems());
635 ptDist=v1Distant->getPointer();
637 MPI_Recv(ptDist, nbDistElem, MPI_ID_TYPE, source,1111, MPI_COMM_WORLD, &status);
642 nbDistElem=FromIdType<int>(v2Distant->getNbOfElems());
643 ptDist2=v2Distant->getPointer();
645 MPI_Recv(ptDist2, nbDistElem, MPI_DOUBLE,source, 1110, MPI_COMM_WORLD, &status);
646 //finish unserialization
647 mesh->unserialization(tinyInfoDistantD,tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts);
648 if(v1Distant) v1Distant->decrRef();
649 if(v2Distant) v2Distant->decrRef();
653 mesh=CreateEmptyMEDCouplingUMesh();
655 if (MyGlobals::_Verbose>600)
656 std::cout << "proc " << _rank << " : recvMesh '" << mesh->getName() << "' size " << mesh->getNumberOfCells() << " from " << source << std::endl;
660 #if !defined WIN32 && !defined __APPLE__
661 #include <sys/sysinfo.h>
665 * \brief Evaluate current memory usage and return the maximal one in KB
667 int MEDPARTITIONER::ParaDomainSelector::evaluateMemory() const
669 if ( _mesure_memory )
672 #if !defined WIN32 && !defined __APPLE__
674 int err = sysinfo( &si );
676 used_memory = (int)(( si.totalram - si.freeram + si.totalswap - si.freeswap ) * si.mem_unit ) / 1024;
678 if ( used_memory > _max_memory )
679 _max_memory = used_memory;
682 _init_memory = used_memory;
684 return _max_memory - _init_memory;