From: abn Date: Wed, 4 May 2016 14:08:42 +0000 (+0200) Subject: ParaMEDMEM: DisjointDEC X-Git-Tag: V8_1_0a1~27 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=a5a009e0d290b13f262ba4f251dd20d1c7acb20d;p=tools%2Fmedcoupling.git ParaMEDMEM: DisjointDEC + bug fix in copy ctor + new tests: both IDs and procs groups tested in constructor + added method to check group consistency. --- diff --git a/src/ParaMEDMEM/DisjointDEC.cxx b/src/ParaMEDMEM/DisjointDEC.cxx index 5554b15bc..4db440a6b 100644 --- a/src/ParaMEDMEM/DisjointDEC.cxx +++ b/src/ParaMEDMEM/DisjointDEC.cxx @@ -73,6 +73,7 @@ namespace MEDCoupling _owns_groups(false), _union_comm(MPI_COMM_NULL) { + checkPartitionGroup(); _union_group = source_group.fuse(target_group); } @@ -101,16 +102,29 @@ namespace MEDCoupling void DisjointDEC::copyInstance(const DisjointDEC& other) { DEC::copyFrom(other); - if(other._target_group) + if (other._union_comm != MPI_COMM_NULL) { - _target_group=other._target_group->deepCopy(); - _owns_groups=true; - } - if(other._source_group) - { - _source_group=other._source_group->deepCopy(); - _owns_groups=true; + // Tricky: the DEC is responsible for the management of _union_comm. And this comm is referenced by + // the MPIProcGroups (source/targets). In the case where _union_comm is not NULL we must take care of rebuilding the + // MPIProcGroups with a communicator that will survive the destruction of 'other'. + _owns_groups = true; + MPI_Comm_dup(other._union_comm, &_union_comm); +// std::cout << "DUP union comm - new is "<< _union_comm << "\n"; + _target_group = new MPIProcessorGroup(*_comm_interface, other._target_group->getProcIDs(), _union_comm); + _source_group = new MPIProcessorGroup(*_comm_interface, other._source_group->getProcIDs(), _union_comm); } + else{ + if(other._target_group) + { + _target_group=other._target_group->deepCopy(); + _owns_groups=true; + } + if(other._source_group) + { + _source_group=other._source_group->deepCopy(); + _owns_groups=true; + } + } if (_source_group && _target_group) _union_group = _source_group->fuse(*_target_group); } @@ -205,6 +219,28 @@ namespace MEDCoupling _comm_interface->commFree(&_union_comm); } + /** + * Check that the sources and targets procs form a partition of the world communicator referenced in the groups. + * This world communicator is not necessarily MPI_WORLD_COMM, but it has to be covered completly for the DECs to work. + */ + void DisjointDEC::checkPartitionGroup() const + { + int size = -1; + MPIProcessorGroup * tgt = static_cast(_target_group); + MPIProcessorGroup * src = static_cast(_source_group); + MPI_Comm comm_t = tgt->getWorldComm(); + MPI_Comm comm_s = src->getWorldComm(); + if (comm_t != comm_s) + throw INTERP_KERNEL::Exception("DisjointDEC constructor: Inconsistent world communicator when building DisjointDEC"); + MPI_Comm_size(comm_t, &size); + + std::set union_ids; // source and target ids in world_comm + union_ids.insert(src->getProcIDs().begin(),src->getProcIDs().end()); + union_ids.insert(tgt->getProcIDs().begin(),tgt->getProcIDs().end()); + if(union_ids.size()!=size) + throw INTERP_KERNEL::Exception("DisjointDEC constructor: source_ids and target_ids do not form a partition of the communicator! Restrain the world communicator passed to MPIProcessorGroup ctor."); + } + void DisjointDEC::setNature(NatureOfField nature) { if(_local_field) diff --git a/src/ParaMEDMEM/DisjointDEC.hxx b/src/ParaMEDMEM/DisjointDEC.hxx index 4de75ebaf..8cf754248 100644 --- a/src/ParaMEDMEM/DisjointDEC.hxx +++ b/src/ParaMEDMEM/DisjointDEC.hxx @@ -74,6 +74,7 @@ namespace MEDCoupling void compareFieldAndMethod() const throw(INTERP_KERNEL::Exception); void cleanInstance(); void copyInstance(const DisjointDEC& other); + void checkPartitionGroup() const; protected: const ParaFIELD* _local_field; //! Processor group representing the union of target and source processors diff --git a/src/ParaMEDMEM/InterpKernelDEC.cxx b/src/ParaMEDMEM/InterpKernelDEC.cxx index fc708fb03..ccf6571c0 100644 --- a/src/ParaMEDMEM/InterpKernelDEC.cxx +++ b/src/ParaMEDMEM/InterpKernelDEC.cxx @@ -141,6 +141,8 @@ namespace MEDCoupling This constructor creates an InterpKernelDEC which has \a source_group as a working side and \a target_group as an idle side. All the processors will actually participate, but intersection computations will be performed on the working side during the \a synchronize() phase. The constructor must be called synchronously on all processors of both processor groups. + The source group and target group MUST form a partition of all the procs within the communicator passed as 'world_comm' + when building the group. \param source_group working side ProcessorGroup \param target_group lazy side ProcessorGroup @@ -154,6 +156,12 @@ namespace MEDCoupling } + + /*! + * Creates an InterpKernelDEC from a set of source procs IDs and target group IDs. + * The difference with the ctor using groups is that the set of procs might not cover entirely MPI_COMM_WORLD + * (a sub-communicator holding the union of source and target procs is recreated internally). + */ InterpKernelDEC::InterpKernelDEC(const std::set& src_ids, const std::set& trg_ids, const MPI_Comm& world_comm): DisjointDEC(src_ids,trg_ids,world_comm), diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx index baf4dfe78..0f535bb42 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx @@ -77,7 +77,10 @@ class ParaMEDMEMTest : public CppUnit::TestFixture CPPUNIT_TEST(testICoco1); // 2 procs CPPUNIT_TEST(testGauthier1); // 4 procs CPPUNIT_TEST(testGauthier2); // >= 2 procs - CPPUNIT_TEST(testGauthier3); // 4 procs + CPPUNIT_TEST(testGauthier3_1); // 4 procs + CPPUNIT_TEST(testGauthier3_2); // 4 procs + CPPUNIT_TEST(testGauthier3_3); // 5 procs + CPPUNIT_TEST(testGauthier3_4); // 5 procs CPPUNIT_TEST(testGauthier4); // 3 procs CPPUNIT_TEST(testFabienAPI1); // 3 procs CPPUNIT_TEST(testFabienAPI2); // 3 procs @@ -137,7 +140,10 @@ public: void testICoco1(); void testGauthier1(); void testGauthier2(); - void testGauthier3(); + void testGauthier3_1(); + void testGauthier3_2(); + void testGauthier3_3(); + void testGauthier3_4(); void testGauthier4(); void testFabienAPI1(); void testFabienAPI2(); @@ -159,7 +165,7 @@ private: void testInterpKernelDEC_2D_(const char *srcMeth, const char *targetMeth); void testInterpKernelDEC2_2D_(const char *srcMeth, const char *targetMeth); void testInterpKernelDEC_3D_(const char *srcMeth, const char *targetMeth); - + void testGauthier3_GEN(bool, int); }; // to automatically remove temporary files from disk diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx index 26247e278..080640e17 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx @@ -356,32 +356,61 @@ void ParaMEDMEMTest::testGauthier2() } } +void ParaMEDMEMTest::testGauthier3_1() +{ + testGauthier3_GEN(true,4); +} + +void ParaMEDMEMTest::testGauthier3_2() +{ + testGauthier3_GEN(false,4); +} + +void ParaMEDMEMTest::testGauthier3_3() +{ + testGauthier3_GEN(true,5); +} + +void ParaMEDMEMTest::testGauthier3_4() +{ + int size; + MPI_Comm_size(MPI_COMM_WORLD,&size); + + if(size!=5) + return; + + // Should throw since the two groups (source/target) do not form a partition of + // all the procs. + CPPUNIT_ASSERT_THROW(testGauthier3_GEN(false,5), INTERP_KERNEL::Exception); +} + + /*! * Non regression test testing copy constructor of InterpKernelDEC. */ -void ParaMEDMEMTest::testGauthier3() +void ParaMEDMEMTest::testGauthier3_GEN(bool withIDs, int nprocs) { int num_cas=0; int rank, size; MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Comm_size(MPI_COMM_WORLD,&size); - + int is_master=0; CommInterface comm; set emetteur_ids; set recepteur_ids; emetteur_ids.insert(0); - if(size!=4) + if(size!=nprocs) return; recepteur_ids.insert(1); - if (size >2) - recepteur_ids.insert(2); - if (size >2) - emetteur_ids.insert(3); - if ((rank==0)||(rank==1)) + + recepteur_ids.insert(size-2); + + emetteur_ids.insert(size-1); + if ((rank==0)||(rank==1)) is_master=1; - + MPIProcessorGroup recepteur_group(comm,recepteur_ids); MPIProcessorGroup emetteur_group(comm,emetteur_ids); @@ -394,10 +423,14 @@ void ParaMEDMEMTest::testGauthier3() } else { - cas="emetteur"; + if (emetteur_group.containsMyRank()) + cas="emetteur"; + else + cas="vide"; // freopen("emetteur.out","w",stdout); //freopen("emetteur.err","w",stderr); } + double expected[8][4]={ {1.,1.,1.,1.}, {40., 40., 1., 1.}, @@ -409,12 +442,15 @@ void ParaMEDMEMTest::testGauthier3() {20.5,1.,1e200,1e200} }; int expectedLgth[8]={4,4,2,2,4,4,2,2}; - + for (int send=0;send<2;send++) for (int rec=0;rec<2;rec++) { std::vector decu(1); - decu[0]=InterpKernelDEC(emetteur_group,recepteur_group); + if (withIDs) + decu[0] = InterpKernelDEC(emetteur_ids,recepteur_ids); + else + decu[0] = InterpKernelDEC(emetteur_group,recepteur_group); InterpKernelDEC& dec_emetteur=decu[0]; MEDCoupling::ParaFIELD *champ_emetteur(0),*champ_recepteur(0); MEDCoupling::ParaMESH *paramesh(0); @@ -428,29 +464,31 @@ void ParaMEDMEMTest::testGauthier3() { mesh=init_triangleGauthier1(is_master); } - paramesh=new MEDCoupling::ParaMESH(mesh,recepteur_group.containsMyRank()?recepteur_group:emetteur_group,"emetteur mesh"); - MEDCoupling::ComponentTopology comptopo; - champ_emetteur=new MEDCoupling::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); - champ_emetteur->getField()->setNature(IntensiveMaximum); - champ_emetteur->setOwnSupport(true); - if (rec==0) - { - mesh=init_triangleGauthier1(is_master); - } - else + if (cas!="vide") { - mesh=init_quadGauthier1(is_master); - } - paramesh=new MEDCoupling::ParaMESH(mesh,recepteur_group.containsMyRank()?recepteur_group:emetteur_group,"recepteur mesh"); - champ_recepteur=new MEDCoupling::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); - champ_recepteur->getField()->setNature(IntensiveMaximum); - champ_recepteur->setOwnSupport(true); - if (cas=="emetteur") - { - champ_emetteur->getField()->getArray()->fillWithValue(1.); + paramesh=new MEDCoupling::ParaMESH(mesh,recepteur_group.containsMyRank()?recepteur_group:emetteur_group,"emetteur mesh"); + MEDCoupling::ComponentTopology comptopo; + champ_emetteur=new MEDCoupling::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); + champ_emetteur->getField()->setNature(IntensiveMaximum); + champ_emetteur->setOwnSupport(true); + if (rec==0) + { + mesh=init_triangleGauthier1(is_master); + } + else + { + mesh=init_quadGauthier1(is_master); + } + paramesh=new MEDCoupling::ParaMESH(mesh,recepteur_group.containsMyRank()?recepteur_group:emetteur_group,"recepteur mesh"); + champ_recepteur=new MEDCoupling::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); + champ_recepteur->getField()->setNature(IntensiveMaximum); + champ_recepteur->setOwnSupport(true); + if (cas=="emetteur") + { + champ_emetteur->getField()->getArray()->fillWithValue(1.); + } } - - + MPI_Barrier(MPI_COMM_WORLD); //clock_t clock0= clock (); @@ -460,50 +498,50 @@ void ParaMEDMEMTest::testGauthier3() bool stop=false; //boucle sur les pas de quads while (!stop) { - - compti++; - //clock_t clocki= clock (); - //cout << compti << " CLOCK " << (clocki-clock0)*1.e-6 << endl; - for (int non_unif=0;non_unif<2;non_unif++) - { - if (cas=="emetteur") - { - if (non_unif) - if(rank!=3) - champ_emetteur->getField()->getArray()->setIJ(0,0,40); - } - //bool ok=false; // Is the time interval successfully solved ? - - // Loop on the time interval tries - if(1) { - + compti++; + //clock_t clocki= clock (); + //cout << compti << " CLOCK " << (clocki-clock0)*1.e-6 << endl; + for (int non_unif=0;non_unif<2;non_unif++) + { if (cas=="emetteur") - dec_emetteur.attachLocalField(champ_emetteur); - else - dec_emetteur.attachLocalField(champ_recepteur); - - - if(init) dec_emetteur.synchronize(); - init=false; - - if (cas=="emetteur") { - // affiche(champ_emetteur); - dec_emetteur.sendData(); - } - else if (cas=="recepteur") { - dec_emetteur.recvData(); - if (is_master) - afficheGauthier1(*champ_recepteur,expected[num_cas],expectedLgth[num_cas]); + if (non_unif) + if(rank!=3) + champ_emetteur->getField()->getArray()->setIJ(0,0,40); } - else - throw 0; - MPI_Barrier(MPI_COMM_WORLD); + //bool ok=false; // Is the time interval successfully solved ? + + // Loop on the time interval tries + if(1) { + + + if (cas=="emetteur") + dec_emetteur.attachLocalField(champ_emetteur); + else + dec_emetteur.attachLocalField(champ_recepteur); + + + if(init) dec_emetteur.synchronize(); + init=false; + + if (cas=="emetteur") { + // affiche(champ_emetteur); + dec_emetteur.sendData(); + } + else if (cas=="recepteur") + { + dec_emetteur.recvData(); + if (is_master) + afficheGauthier1(*champ_recepteur,expected[num_cas],expectedLgth[num_cas]); + } + // else + // throw 0; + MPI_Barrier(MPI_COMM_WORLD); + } + stop=true; + num_cas++; } - stop=true; - num_cas++; - } } delete champ_emetteur; delete champ_recepteur;