X-Git-Url: http://git.salome-platform.org/gitweb/?a=blobdiff_plain;f=src%2FMEDPartitioner%2FMEDPARTITIONER_ParaDomainSelector.cxx;h=7658b72f7d6864803797d8ef094e095ad61468c0;hb=0b187729ac99d3e9e9bb9d2be8cb8600a783be6c;hp=e6beacf192a2575f0d9b1a9433598a912e0943e6;hpb=659f8c67d0348350e12fde38fe8c4de1ff95dffe;p=tools%2Fmedcoupling.git diff --git a/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx b/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx index e6beacf19..7658b72f7 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx +++ b/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx @@ -1,4 +1,4 @@ -// Copyright (C) 2007-2014 CEA/DEN, EDF R&D +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -22,11 +22,12 @@ #include "MEDPARTITIONER_Utils.hxx" #include "MEDCouplingUMesh.hxx" +#include "MEDCouplingSkyLineArray.hxx" #include #include -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI #include #endif @@ -37,7 +38,7 @@ MEDPARTITIONER::ParaDomainSelector::ParaDomainSelector(bool mesure_memory) :_rank(0),_world_size(1), _nb_result_domains(-1), _init_time(0.0), _mesure_memory(mesure_memory), _init_memory(0), _max_memory(0) { -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if (MyGlobals::_Rank==-1) { MPI_Init(0,0); //do once only @@ -76,7 +77,7 @@ bool MEDPARTITIONER::ParaDomainSelector::isOnDifferentHosts() const evaluateMemory(); if ( _world_size < 2 ) return false; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI char name_here[ MPI_MAX_PROCESSOR_NAME+1 ], name_there[ MPI_MAX_PROCESSOR_NAME+1 ]; int size; MPI_Get_processor_name( name_here, &size); @@ -152,11 +153,11 @@ void MEDPARTITIONER::ParaDomainSelector::gatherNbOf(const std::vector1"); + throw INTERP_KERNEL::Exception("not(HAVE_MPI) incompatible with MPI_World_Size>1"); #endif } int total_nb_cells=0, total_nb_nodes=0; @@ -289,7 +290,7 @@ std::auto_ptr MEDPARTITIONER::ParaDomainSelector::gatherG Graph* glob_graph = 0; evaluateMemory(); -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI // --------------- // Gather indices @@ -370,8 +371,8 @@ std::auto_ptr MEDPARTITIONER::ParaDomainSelector::gatherG // Make graph // ----------- - // MEDPARTITIONER::SkyLineArray* array = - // new MEDPARTITIONER::SkyLineArray( index_size-1, value_size, graph_index, graph_value, true ); + // MEDCouplingSkyLineArray* array = + // new MEDCouplingSkyLineArray( index_size-1, value_size, graph_index, graph_value, true ); // glob_graph = new UserGraph( array, partition, index_size-1 ); @@ -379,7 +380,7 @@ std::auto_ptr MEDPARTITIONER::ParaDomainSelector::gatherG delete [] partition; -#endif // HAVE_MPI2 +#endif // HAVE_MPI return std::auto_ptr( glob_graph ); } @@ -430,7 +431,7 @@ void MEDPARTITIONER::ParaDomainSelector::gatherNbCellPairs() evaluateMemory(); std::vector< int > send_buf = _nb_cell_pairs_by_joint; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Allreduce((void*)&send_buf[0], (void*)&_nb_cell_pairs_by_joint[0], _nb_cell_pairs_by_joint.size(), @@ -477,7 +478,7 @@ int *MEDPARTITIONER::ParaDomainSelector::exchangeSubentityIds( int loc_domain, i const std::vector& loc_ids_here ) const { int* loc_ids_dist = new int[ loc_ids_here.size()]; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI int dest = getProcessorID( dist_domain ); int tag = 2002 + jointId( loc_domain, dist_domain ); MPI_Status status; @@ -516,7 +517,7 @@ int MEDPARTITIONER::ParaDomainSelector::jointId( int local_domain, int distant_d double MEDPARTITIONER::ParaDomainSelector::getPassedTime() const { -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI return MPI_Wtime() - _init_time; #else return 0.0; @@ -531,7 +532,7 @@ double MEDPARTITIONER::ParaDomainSelector::getPassedTime() const void MEDPARTITIONER::ParaDomainSelector::sendMesh(const ParaMEDMEM::MEDCouplingUMesh& mesh, int target) const { -#ifndef HAVE_MPI2 +#ifndef HAVE_MPI throw INTERP_KERNEL::Exception("ParaDomainSelector::sendMesh : incoherent call in non_MPI mode"); #else if (MyGlobals::_Verbose>600) @@ -584,7 +585,7 @@ void MEDPARTITIONER::ParaDomainSelector::sendMesh(const ParaMEDMEM::MEDCouplingU */ void MEDPARTITIONER::ParaDomainSelector::recvMesh(ParaMEDMEM::MEDCouplingUMesh*& mesh, int source)const { -#ifndef HAVE_MPI2 +#ifndef HAVE_MPI throw INTERP_KERNEL::Exception("ParaDomainSelector::recvMesh : incoherent call in non_MPI mode"); #else // First stage : exchanging sizes