X-Git-Url: http://git.salome-platform.org/gitweb/?a=blobdiff_plain;f=src%2FMEDPartitioner%2FMEDPARTITIONER_ParaDomainSelector.cxx;h=7658b72f7d6864803797d8ef094e095ad61468c0;hb=0b187729ac99d3e9e9bb9d2be8cb8600a783be6c;hp=48f7a6ca66eb84dc488d4ce393cffb5c95ec9dbf;hpb=10f37bf6f33a762626d7f1093b2f5450c1688667;p=tools%2Fmedcoupling.git diff --git a/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx b/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx index 48f7a6ca6..7658b72f7 100644 --- a/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx +++ b/src/MEDPartitioner/MEDPARTITIONER_ParaDomainSelector.cxx @@ -1,9 +1,9 @@ -// Copyright (C) 2007-2012 CEA/DEN, EDF R&D +// Copyright (C) 2007-2015 CEA/DEN, EDF R&D // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either -// version 2.1 of the License. +// version 2.1 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -22,11 +22,12 @@ #include "MEDPARTITIONER_Utils.hxx" #include "MEDCouplingUMesh.hxx" +#include "MEDCouplingSkyLineArray.hxx" #include #include -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI #include #endif @@ -37,7 +38,7 @@ MEDPARTITIONER::ParaDomainSelector::ParaDomainSelector(bool mesure_memory) :_rank(0),_world_size(1), _nb_result_domains(-1), _init_time(0.0), _mesure_memory(mesure_memory), _init_memory(0), _max_memory(0) { -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI if (MyGlobals::_Rank==-1) { MPI_Init(0,0); //do once only @@ -76,7 +77,7 @@ bool MEDPARTITIONER::ParaDomainSelector::isOnDifferentHosts() const evaluateMemory(); if ( _world_size < 2 ) return false; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI char name_here[ MPI_MAX_PROCESSOR_NAME+1 ], name_there[ MPI_MAX_PROCESSOR_NAME+1 ]; int size; MPI_Get_processor_name( name_here, &size); @@ -100,6 +101,7 @@ bool MEDPARTITIONER::ParaDomainSelector::isOnDifferentHosts() const MPI_Allreduce( &same, &sum_same, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); return (sum_same != nbProcs()); #endif + return false; } /*! @@ -151,11 +153,11 @@ void MEDPARTITIONER::ParaDomainSelector::gatherNbOf(const std::vector1"); + throw INTERP_KERNEL::Exception("not(HAVE_MPI) incompatible with MPI_World_Size>1"); #endif } int total_nb_cells=0, total_nb_nodes=0; @@ -288,7 +290,7 @@ std::auto_ptr MEDPARTITIONER::ParaDomainSelector::gatherG Graph* glob_graph = 0; evaluateMemory(); -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI // --------------- // Gather indices @@ -369,8 +371,8 @@ std::auto_ptr MEDPARTITIONER::ParaDomainSelector::gatherG // Make graph // ----------- - // MEDPARTITIONER::SkyLineArray* array = - // new MEDPARTITIONER::SkyLineArray( index_size-1, value_size, graph_index, graph_value, true ); + // MEDCouplingSkyLineArray* array = + // new MEDCouplingSkyLineArray( index_size-1, value_size, graph_index, graph_value, true ); // glob_graph = new UserGraph( array, partition, index_size-1 ); @@ -378,7 +380,7 @@ std::auto_ptr MEDPARTITIONER::ParaDomainSelector::gatherG delete [] partition; -#endif // HAVE_MPI2 +#endif // HAVE_MPI return std::auto_ptr( glob_graph ); } @@ -429,7 +431,7 @@ void MEDPARTITIONER::ParaDomainSelector::gatherNbCellPairs() evaluateMemory(); std::vector< int > send_buf = _nb_cell_pairs_by_joint; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI MPI_Allreduce((void*)&send_buf[0], (void*)&_nb_cell_pairs_by_joint[0], _nb_cell_pairs_by_joint.size(), @@ -476,7 +478,7 @@ int *MEDPARTITIONER::ParaDomainSelector::exchangeSubentityIds( int loc_domain, i const std::vector& loc_ids_here ) const { int* loc_ids_dist = new int[ loc_ids_here.size()]; -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI int dest = getProcessorID( dist_domain ); int tag = 2002 + jointId( loc_domain, dist_domain ); MPI_Status status; @@ -515,7 +517,7 @@ int MEDPARTITIONER::ParaDomainSelector::jointId( int local_domain, int distant_d double MEDPARTITIONER::ParaDomainSelector::getPassedTime() const { -#ifdef HAVE_MPI2 +#ifdef HAVE_MPI return MPI_Wtime() - _init_time; #else return 0.0; @@ -530,7 +532,7 @@ double MEDPARTITIONER::ParaDomainSelector::getPassedTime() const void MEDPARTITIONER::ParaDomainSelector::sendMesh(const ParaMEDMEM::MEDCouplingUMesh& mesh, int target) const { -#ifndef HAVE_MPI2 +#ifndef HAVE_MPI throw INTERP_KERNEL::Exception("ParaDomainSelector::sendMesh : incoherent call in non_MPI mode"); #else if (MyGlobals::_Verbose>600) @@ -583,7 +585,7 @@ void MEDPARTITIONER::ParaDomainSelector::sendMesh(const ParaMEDMEM::MEDCouplingU */ void MEDPARTITIONER::ParaDomainSelector::recvMesh(ParaMEDMEM::MEDCouplingUMesh*& mesh, int source)const { -#ifndef HAVE_MPI2 +#ifndef HAVE_MPI throw INTERP_KERNEL::Exception("ParaDomainSelector::recvMesh : incoherent call in non_MPI mode"); #else // First stage : exchanging sizes