]> SALOME platform Git repositories - tools/medcoupling.git/commitdiff
Salome HOME
Test of ParaMEDMEM using MPI2 capabilities.
authorageay <ageay>
Thu, 15 Jul 2010 15:36:26 +0000 (15:36 +0000)
committerageay <ageay>
Thu, 15 Jul 2010 15:36:26 +0000 (15:36 +0000)
src/ParaMEDMEMTest/MPI2Connector.cxx [new file with mode: 0644]
src/ParaMEDMEMTest/MPI2Connector.hxx [new file with mode: 0644]
src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx [new file with mode: 0644]
src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx [new file with mode: 0644]

diff --git a/src/ParaMEDMEMTest/MPI2Connector.cxx b/src/ParaMEDMEMTest/MPI2Connector.cxx
new file mode 100644 (file)
index 0000000..8daa235
--- /dev/null
@@ -0,0 +1,143 @@
+//  Copyright (C) 2007-2010  CEA/DEN, EDF R&D
+//
+//  This library is free software; you can redistribute it and/or
+//  modify it under the terms of the GNU Lesser General Public
+//  License as published by the Free Software Foundation; either
+//  version 2.1 of the License.
+//
+//  This library is distributed in the hope that it will be useful,
+//  but WITHOUT ANY WARRANTY; without even the implied warranty of
+//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+//  Lesser General Public License for more details.
+//
+//  You should have received a copy of the GNU Lesser General Public
+//  License along with this library; if not, write to the Free Software
+//  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+//
+//  See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include "MPI2Connector.hxx"
+
+#include <iostream>
+
+MPI2Connector::MPI2Connector()
+{
+  MPI_Comm_size( MPI_COMM_WORLD, &_nb_proc );
+  MPI_Comm_rank( MPI_COMM_WORLD, &_num_proc );
+}
+
+MPI2Connector::~MPI2Connector()
+{
+}
+
+MPI_Comm MPI2Connector::remoteMPI2Connect(const std::string& service)
+{
+  int i;
+  char port_name[MPI_MAX_PORT_NAME];
+  char port_name_clt[MPI_MAX_PORT_NAME];
+  std::ostringstream msg;
+  MPI_Comm icom;
+
+  if( service.size() == 0 )
+    {
+      msg << "[" << _num_proc << "] You have to give a service name !";
+      std::cerr << msg.str().c_str() << std::endl;
+      throw std::exception();
+    }
+
+  _srv = false;
+
+  MPI_Barrier(MPI_COMM_WORLD);
+
+  MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+  if( _num_proc == 0 )
+    { 
+      /* rank 0 try to be a server. If service is already published, try to be a cient */
+      MPI_Open_port(MPI_INFO_NULL, port_name); 
+      if ( MPI_Publish_name((char*)service.c_str(), MPI_INFO_NULL, port_name) == MPI_SUCCESS )
+        {
+          _srv = true;
+          _port_name = port_name;
+          std::cerr << "[" << _num_proc << "] service " << service << " available at " << port_name << std::endl;
+        }      
+      else if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS )
+        {
+          std::cerr << "[" << _num_proc << "] I get the connection with " << service << " at " << port_name_clt << std::endl;
+          MPI_Close_port( port_name );
+        }
+      else
+        {
+          msg << "[" << _num_proc << "] Error on connection with " << service << " at " << port_name_clt;
+          std::cerr << msg.str().c_str() << std::endl;
+          throw std::exception();
+        }
+    }
+  else
+    {
+      i=0;
+      /* Waiting rank 0 publish name and try to be a client */
+      while ( i != TIMEOUT  ) 
+        {
+          sleep(1);
+          if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS )
+            {
+              std::cerr << "[" << _num_proc << "] I get the connection with " << service << " at " << port_name_clt << std::endl;
+              break;
+            }
+          i++;
+        }
+      if(i==TIMEOUT)
+        {
+          msg << "[" << _num_proc << "] Error on connection with " << service << " at " << port_name_clt;
+          std::cerr << msg.str().c_str() << std::endl;
+          throw std::exception();
+        }
+    }
+  MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL);
+  
+  /* If rank 0 is server, all processes call MPI_Comm_accept */
+  /* If rank 0 is not server, all processes call MPI_Comm_connect */
+  int srv = (int)_srv;
+  MPI_Bcast(&srv,1,MPI_INT,0,MPI_COMM_WORLD);
+  _srv = (bool)srv;
+  if ( _srv )
+    MPI_Comm_accept( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &icom );
+  else
+    MPI_Comm_connect(port_name_clt, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &icom );
+
+  /* create global communicator: servers have low index in global communicator*/
+  MPI_Intercomm_merge(icom,!_srv,&_gcom);
+
+  /* only rank 0 can be server for unpublish name */
+  if(_num_proc != 0) _srv = false;
+
+  return _gcom;
+
+}
+
+void MPI2Connector::remoteMPI2Disconnect(const std::string& service)
+{
+  std::ostringstream msg;
+
+  if( service.size() == 0 )
+    {
+      msg << "[" << _num_proc << "] You have to give a service name !";
+      std::cerr << msg.str().c_str() << std::endl;
+      throw std::exception();
+    }
+
+  MPI_Comm_disconnect( &_gcom ); 
+  if ( _srv )
+    {
+
+      char port_name[MPI_MAX_PORT_NAME];
+      strcpy(port_name,_port_name.c_str());
+
+      MPI_Unpublish_name((char*)service.c_str(), MPI_INFO_NULL, port_name); 
+      std::cerr << "[" << _num_proc << "] " << service << ": close port " << _port_name << std::endl;
+      MPI_Close_port( port_name ); 
+    }
+  
+}
+
diff --git a/src/ParaMEDMEMTest/MPI2Connector.hxx b/src/ParaMEDMEMTest/MPI2Connector.hxx
new file mode 100644 (file)
index 0000000..905de10
--- /dev/null
@@ -0,0 +1,48 @@
+//  Copyright (C) 2007-2010  CEA/DEN, EDF R&D
+//
+//  This library is free software; you can redistribute it and/or
+//  modify it under the terms of the GNU Lesser General Public
+//  License as published by the Free Software Foundation; either
+//  version 2.1 of the License.
+//
+//  This library is distributed in the hope that it will be useful,
+//  but WITHOUT ANY WARRANTY; without even the implied warranty of
+//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+//  Lesser General Public License for more details.
+//
+//  You should have received a copy of the GNU Lesser General Public
+//  License along with this library; if not, write to the Free Software
+//  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+//
+//  See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#ifndef __MPI2CONNECTOR_HXX__
+#define __MPI2CONNECTOR_HXX__
+
+#include <mpi.h>
+#include <string>
+#include <sstream>
+
+class MPI2Connector
+{
+public:
+  MPI2Connector();
+  ~MPI2Connector();
+  // MPI2 connection
+  MPI_Comm remoteMPI2Connect(const std::string& service);
+  // MPI2 disconnection
+  void remoteMPI2Disconnect(const std::string& service);
+private:
+  // Processus id
+  int _num_proc;
+  // Processus size
+  int _nb_proc;
+  MPI_Comm _gcom;
+  bool _srv;
+  std::string _port_name;
+private:
+  static const int TIMEOUT=5;
+};
+
+#endif
diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx
new file mode 100644 (file)
index 0000000..da5498a
--- /dev/null
@@ -0,0 +1,126 @@
+//  Copyright (C) 2007-2010  CEA/DEN, EDF R&D
+//
+//  This library is free software; you can redistribute it and/or
+//  modify it under the terms of the GNU Lesser General Public
+//  License as published by the Free Software Foundation; either
+//  version 2.1 of the License.
+//
+//  This library is distributed in the hope that it will be useful,
+//  but WITHOUT ANY WARRANTY; without even the implied warranty of
+//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+//  Lesser General Public License for more details.
+//
+//  You should have received a copy of the GNU Lesser General Public
+//  License along with this library; if not, write to the Free Software
+//  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+//
+//  See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <cppunit/extensions/HelperMacros.h>
+
+#include "MPI2Connector.hxx"
+#include "ParaMESH.hxx"
+#include "ParaFIELD.hxx"
+#include "MEDCouplingUMesh.hxx"
+#include "MEDCouplingFieldDouble.hxx"
+#include "InterpKernelDEC.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "CommInterface.hxx"
+
+#include <mpi.h>
+#include <iostream>
+#include <stdlib.h>
+
+class MPI2ParaMEDMEMTest : public CppUnit::TestFixture
+{
+  CPPUNIT_TEST_SUITE( MPI2ParaMEDMEMTest );
+  CPPUNIT_TEST( testBasicMPI2_1 );
+  CPPUNIT_TEST_SUITE_END();
+public:
+  void testBasicMPI2_1();
+};
+
+using namespace ParaMEDMEM;
+
+void MPI2ParaMEDMEMTest::testBasicMPI2_1()
+{
+  int lsize, lrank, gsize, grank;
+  MPI_Comm gcom;
+  std::string service = "SERVICE";
+  std::ostringstream meshfilename, meshname;
+  ParaMEDMEM::ParaMESH *paramesh=0;
+  ParaMEDMEM::MEDCouplingUMesh *mesh;
+  ParaMEDMEM::ParaFIELD *parafield=0;
+  ParaMEDMEM::MEDCouplingFieldDouble *field;
+  ParaMEDMEM::CommInterface *interface;
+  ParaMEDMEM::MPIProcessorGroup *source, *target;
+
+  MPI_Comm_size( MPI_COMM_WORLD, &lsize );
+  MPI_Comm_rank( MPI_COMM_WORLD, &lrank );
+  if(lsize!=2)
+    {
+      CPPUNIT_ASSERT(false);
+      return;
+    }
+
+  /* Connection to remote programm */
+  MPI2Connector *mpio = new MPI2Connector;
+  gcom = mpio->remoteMPI2Connect(service);
+  MPI_Comm_size( gcom, &gsize );
+  MPI_Comm_rank( gcom, &grank );
+  if(gsize!=5)
+    {
+      CPPUNIT_ASSERT(false);
+      return;
+    }
+  interface = new ParaMEDMEM::CommInterface;
+  source = new ParaMEDMEM::MPIProcessorGroup(*interface,0,lsize-1,gcom);
+  target = new ParaMEDMEM::MPIProcessorGroup(*interface,lsize,gsize-1,gcom);
+
+  const double sourceCoordsAll[2][8]={{0.4,0.5,0.4,1.5,1.6,1.5,1.6,0.5},
+                                      {0.3,-0.5,1.6,-0.5,1.6,-1.5,0.3,-1.5}};
+  
+  int conn4All[8]={0,1,2,3,4,5,6,7};
+  
+  std::ostringstream stream; stream << "sourcemesh2D proc " << grank;
+  mesh=MEDCouplingUMesh::New(stream.str().c_str(),2);
+  mesh->allocateCells(2);
+  mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All);
+  mesh->finishInsertingCells();
+  DataArrayDouble *myCoords=DataArrayDouble::New();
+  myCoords->alloc(4,2);
+  const double *sourceCoords=sourceCoordsAll[grank];
+  std::copy(sourceCoords,sourceCoords+8,myCoords->getPointer());
+  mesh->setCoords(myCoords);
+  myCoords->decrRef();
+  paramesh=new ParaMESH(mesh,*source,"source mesh");
+  ParaMEDMEM::ComponentTopology comptopo;
+  parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+  double *value=parafield->getField()->getArray()->getPointer();
+  value[0]=34+13*((double)grank);
+
+  ParaMEDMEM::InterpKernelDEC dec(*source,*target);
+  parafield->getField()->setNature(ConservativeVolumic);
+
+
+  dec.setMethod("P0");
+  dec.attachLocalField(parafield);
+  dec.synchronize();
+  dec.setForcedRenormalization(false);
+  dec.sendData();
+  /* Deconnection of remote programm */
+  mpio->remoteMPI2Disconnect(service);
+  /* clean-up */
+  delete mpio;
+  delete parafield;
+  mesh->decrRef();
+  delete paramesh;
+  delete source;
+  delete target;
+  delete interface;
+}
+
+CPPUNIT_TEST_SUITE_REGISTRATION( MPI2ParaMEDMEMTest );
+
+#include "MPIMainTest.hxx"
diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx
new file mode 100644 (file)
index 0000000..0ca9d76
--- /dev/null
@@ -0,0 +1,132 @@
+//  Copyright (C) 2007-2010  CEA/DEN, EDF R&D
+//
+//  This library is free software; you can redistribute it and/or
+//  modify it under the terms of the GNU Lesser General Public
+//  License as published by the Free Software Foundation; either
+//  version 2.1 of the License.
+//
+//  This library is distributed in the hope that it will be useful,
+//  but WITHOUT ANY WARRANTY; without even the implied warranty of
+//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+//  Lesser General Public License for more details.
+//
+//  You should have received a copy of the GNU Lesser General Public
+//  License along with this library; if not, write to the Free Software
+//  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+//
+//  See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+#include <cppunit/extensions/HelperMacros.h>
+
+#include "MPI2Connector.hxx"
+#include "ParaMESH.hxx"
+#include "ParaFIELD.hxx"
+#include "MEDCouplingUMesh.hxx"
+#include "MEDCouplingFieldDouble.hxx"
+#include "InterpKernelDEC.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "CommInterface.hxx"
+
+#include <mpi.h>
+#include <iostream>
+#include <stdlib.h>
+
+class MPI2ParaMEDMEMTest : public CppUnit::TestFixture
+{
+  CPPUNIT_TEST_SUITE( MPI2ParaMEDMEMTest );
+  CPPUNIT_TEST( testBasicMPI2_1 );
+  CPPUNIT_TEST_SUITE_END();
+public:
+  void testBasicMPI2_1();
+};
+
+using namespace ParaMEDMEM;
+
+void MPI2ParaMEDMEMTest::testBasicMPI2_1()
+{
+  int lsize, lrank, gsize, grank;
+  MPI_Comm gcom;
+  MPI_Status status; 
+  std::string service = "SERVICE";
+  std::ostringstream meshfilename, meshname;
+  ParaMEDMEM::ParaMESH *paramesh=0;
+  ParaMEDMEM::MEDCouplingUMesh* mesh;
+  ParaMEDMEM::ParaFIELD *parafield=0;
+  ParaMEDMEM::MEDCouplingFieldDouble* field;
+  ParaMEDMEM::CommInterface* interface;
+  ParaMEDMEM::MPIProcessorGroup* source, *target;
+  
+  MPI_Comm_size( MPI_COMM_WORLD, &lsize );
+  MPI_Comm_rank( MPI_COMM_WORLD, &lrank );
+  if(lsize!=3)
+    {
+      CPPUNIT_ASSERT(false);
+      return;
+    }
+
+  /* Connection to remote programm */
+  MPI2Connector *mpio = new MPI2Connector;
+  gcom = mpio->remoteMPI2Connect(service);
+  
+  MPI_Comm_size( gcom, &gsize );
+  MPI_Comm_rank( gcom, &grank );
+  if(gsize!=5)
+    {
+      CPPUNIT_ASSERT(false);
+      return;
+    }
+
+  interface = new ParaMEDMEM::CommInterface;
+  source = new ParaMEDMEM::MPIProcessorGroup(*interface,0,gsize-lsize-1,gcom);
+  target = new ParaMEDMEM::MPIProcessorGroup(*interface,gsize-lsize,gsize-1,gcom);
+
+  const double targetCoordsAll[3][16]={{0.7,1.45,0.7,1.65,0.9,1.65,0.9,1.45,  1.1,1.4,1.1,1.6,1.3,1.6,1.3,1.4},
+                                       {0.7,-0.6,0.7,0.7,0.9,0.7,0.9,-0.6,  1.1,-0.7,1.1,0.6,1.3,0.6,1.3,-0.7},
+                                       {0.7,-1.55,0.7,-1.35,0.9,-1.35,0.9,-1.55,  1.1,-1.65,1.1,-1.45,1.3,-1.45,1.3,-1.65}};
+  int conn4All[8]={0,1,2,3,4,5,6,7};
+  double targetResults[3][2]={{34.,34.},{38.333333333333336,42.666666666666664},{47.,47.}};
+
+  std::ostringstream stream; stream << "targetmesh2D proc " << grank-(gsize-lsize);
+  mesh=MEDCouplingUMesh::New(stream.str().c_str(),2);
+  mesh->allocateCells(2);
+  mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All);
+  mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All+4);
+  mesh->finishInsertingCells();
+  DataArrayDouble *myCoords=DataArrayDouble::New();
+  myCoords->alloc(8,2);
+  const double *targetCoords=targetCoordsAll[grank-(gsize-lsize)];
+  std::copy(targetCoords,targetCoords+16,myCoords->getPointer());
+  mesh->setCoords(myCoords);
+  myCoords->decrRef();
+  paramesh=new ParaMESH (mesh,*target,"target mesh");
+  ParaMEDMEM::ComponentTopology comptopo;
+  parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+
+  ParaMEDMEM::InterpKernelDEC dec(*source,*target);
+  parafield->getField()->setNature(ConservativeVolumic);
+
+  dec.setMethod("P0");
+  dec.attachLocalField(parafield);
+  dec.synchronize();
+  dec.setForcedRenormalization(false);
+  dec.recvData();
+  const double *res=parafield->getField()->getArray()->getConstPointer();
+  const double *expected=targetResults[grank-(gsize-lsize)];
+  CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13);
+  CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13);
+  /* Deconnection of remote programm */
+  mpio->remoteMPI2Disconnect(service);
+  /* clean-up */
+  delete mpio;
+  delete parafield;
+  mesh->decrRef();
+  delete paramesh;
+  delete source;
+  delete target;
+  delete interface;
+}
+
+CPPUNIT_TEST_SUITE_REGISTRATION( MPI2ParaMEDMEMTest );
+
+#include "MPIMainTest.hxx"