From: SONOLET Aymeric Date: Mon, 15 Apr 2024 08:58:12 +0000 (+0200) Subject: fix: restore ParaMEDMEMTest to master and fix ParaMEDMEM link X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=b1d70040d84e6c75698ccbe41798375180223beb;p=tools%2Fmedcoupling.git fix: restore ParaMEDMEMTest to master and fix ParaMEDMEM link --- diff --git a/src/ParaMEDMEM/ParaDataArray.cxx b/src/ParaMEDMEM/ParaDataArray.cxx index 9142a3bc3..7bd2249c0 100644 --- a/src/ParaMEDMEM/ParaDataArray.cxx +++ b/src/ParaMEDMEM/ParaDataArray.cxx @@ -18,7 +18,7 @@ // // Author : Anthony Geay (EDF R&D) -#include "ParaDataArray.hxx" +#include "ParaDataArray.txx" #include "MCType.hxx" #include "MEDCouplingMemArray.hxx" diff --git a/src/ParaMEDMEMTest/MPI2Connector.cxx b/src/ParaMEDMEMTest/MPI2Connector.cxx index 3807e6615..c36aa063c 100644 --- a/src/ParaMEDMEMTest/MPI2Connector.cxx +++ b/src/ParaMEDMEMTest/MPI2Connector.cxx @@ -21,8 +21,6 @@ #include #include -#include -#include #ifndef WIN32 #include @@ -46,7 +44,8 @@ MPI2Connector::MPI2Connector() } MPI2Connector::~MPI2Connector() -= default; +{ +} MPI_Comm MPI2Connector::remoteMPI2Connect(const std::string& service) { @@ -119,7 +118,7 @@ MPI_Comm MPI2Connector::remoteMPI2Connect(const std::string& service) /* If rank 0 is server, all processes call MPI_Comm_accept */ /* If rank 0 is not server, all processes call MPI_Comm_connect */ - int const srv = (int)_srv; + int srv = (int)_srv; MPI_Bcast(&srv,1,MPI_INT,0,MPI_COMM_WORLD); _srv = (bool)srv; if ( _srv ) diff --git a/src/ParaMEDMEMTest/MPI2Connector.hxx b/src/ParaMEDMEMTest/MPI2Connector.hxx index 6f2e83597..2d2dc6df7 100644 --- a/src/ParaMEDMEMTest/MPI2Connector.hxx +++ b/src/ParaMEDMEMTest/MPI2Connector.hxx @@ -22,6 +22,7 @@ #include #include +#include class MPI2Connector { diff --git a/src/ParaMEDMEMTest/MPIAccess/MPIAccessDECTest.cxx b/src/ParaMEDMEMTest/MPIAccess/MPIAccessDECTest.cxx index 28f51ab1d..133025665 100644 --- a/src/ParaMEDMEMTest/MPIAccess/MPIAccessDECTest.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/MPIAccessDECTest.cxx @@ -18,9 +18,10 @@ // #include "MPIAccessDECTest.hxx" +#include +#include #include -#include #ifndef WIN32 #include diff --git a/src/ParaMEDMEMTest/MPIAccess/MPIAccessDECTest.hxx b/src/ParaMEDMEMTest/MPIAccess/MPIAccessDECTest.hxx index e8e0a20e4..9be1c2453 100644 --- a/src/ParaMEDMEMTest/MPIAccess/MPIAccessDECTest.hxx +++ b/src/ParaMEDMEMTest/MPIAccess/MPIAccessDECTest.hxx @@ -20,9 +20,9 @@ #ifndef _MPIACCESSDECTEST_HXX_ #define _MPIACCESSDECTEST_HXX_ -#include #include +#include #include #include #include "mpi.h" @@ -54,9 +54,9 @@ class MPIAccessDECTest : public CppUnit::TestFixture public: MPIAccessDECTest():CppUnit::TestFixture(){} - ~MPIAccessDECTest() override= default; - void setUp() override{} - void tearDown() override{} + ~MPIAccessDECTest(){} + void setUp(){} + void tearDown(){} void test_AllToAllDECSynchronousPointToPoint() ; void test_AllToAllDECAsynchronousPointToPoint() ; void test_AllToAllvDECSynchronousPointToPoint() ; @@ -81,7 +81,7 @@ private: class MPIAccessDECTest_TmpFilesRemover { public: - MPIAccessDECTest_TmpFilesRemover() = default; + MPIAccessDECTest_TmpFilesRemover() {} ~MPIAccessDECTest_TmpFilesRemover(); bool Register(const std::string theTmpFile); diff --git a/src/ParaMEDMEMTest/MPIAccess/MPIAccessTest.cxx b/src/ParaMEDMEMTest/MPIAccess/MPIAccessTest.cxx index 22bfb4b3a..7d0988762 100644 --- a/src/ParaMEDMEMTest/MPIAccess/MPIAccessTest.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/MPIAccessTest.cxx @@ -18,9 +18,10 @@ // #include "MPIAccessTest.hxx" +#include +#include #include -#include #ifndef WIN32 #include diff --git a/src/ParaMEDMEMTest/MPIAccess/MPIAccessTest.hxx b/src/ParaMEDMEMTest/MPIAccess/MPIAccessTest.hxx index bff9e951c..88136278b 100644 --- a/src/ParaMEDMEMTest/MPIAccess/MPIAccessTest.hxx +++ b/src/ParaMEDMEMTest/MPIAccess/MPIAccessTest.hxx @@ -20,9 +20,9 @@ #ifndef _MPIACCESSTEST_HXX_ #define _MPIACCESSTEST_HXX_ -#include #include +#include #include #include #include "mpi.h" @@ -58,9 +58,9 @@ class MPIAccessTest : public CppUnit::TestFixture public: MPIAccessTest():CppUnit::TestFixture(){} - ~MPIAccessTest() override= default; - void setUp() override{} - void tearDown() override{} + ~MPIAccessTest(){} + void setUp(){} + void tearDown(){} void test_MPI_Access_Send_Recv() ; void test_MPI_Access_Cyclic_Send_Recv() ; void test_MPI_Access_SendRecv() ; @@ -84,7 +84,7 @@ private: class MPIAccessTest_TmpFilesRemover { public: - MPIAccessTest_TmpFilesRemover() = default; + MPIAccessTest_TmpFilesRemover() {} ~MPIAccessTest_TmpFilesRemover(); bool Register(const std::string theTmpFile); diff --git a/src/ParaMEDMEMTest/MPIAccess/TestMPIAccess.cxx b/src/ParaMEDMEMTest/MPIAccess/TestMPIAccess.cxx index 5470cf5e9..9a0941e19 100644 --- a/src/ParaMEDMEMTest/MPIAccess/TestMPIAccess.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/TestMPIAccess.cxx @@ -20,7 +20,6 @@ // --- include all MPIAccess Test // #include "MPIAccessTest.hxx" -#include // --- Registers the fixture into the 'registry' @@ -28,3 +27,4 @@ CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessTest ); // --- generic Main program from KERNEL_SRC/src/Basics/Test +#include "MPIMainTest.hxx" diff --git a/src/ParaMEDMEMTest/MPIAccess/TestMPIAccessDEC.cxx b/src/ParaMEDMEMTest/MPIAccess/TestMPIAccessDEC.cxx index 6b3bcd5ca..5fa083af9 100644 --- a/src/ParaMEDMEMTest/MPIAccess/TestMPIAccessDEC.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/TestMPIAccessDEC.cxx @@ -20,7 +20,6 @@ // --- include all MPIAccessDEC Test // #include "MPIAccessDECTest.hxx" -#include // --- Registers the fixture into the 'registry' @@ -28,3 +27,4 @@ CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessDECTest ); // --- generic Main program from KERNEL_SRC/src/Basics/Test +#include "MPIMainTest.hxx" diff --git a/src/ParaMEDMEMTest/MPIAccess/test_AllToAllDEC.cxx b/src/ParaMEDMEMTest/MPIAccess/test_AllToAllDEC.cxx index 50344f2f6..31254c265 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_AllToAllDEC.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_AllToAllDEC.cxx @@ -17,17 +17,16 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "MPIAccess.hxx" -#include "CommInterface.hxx" #include "MPIAccessDECTest.hxx" #include #include "../ParaMEDMEM/MPIAccess/MPIAccessDEC.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -98,10 +97,10 @@ void MPIAccessDECTest::test_AllToAllDEC( bool Asynchronous ) { targetprocs.insert(i); } - auto* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ; - auto* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ; + MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ; + MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ; - auto * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , + MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , Asynchronous ) ; MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ; @@ -110,8 +109,8 @@ void MPIAccessDECTest::test_AllToAllDEC( bool Asynchronous ) { #define datamsglength 10 // int sts ; - int const sendcount = datamsglength ; - int const recvcount = datamsglength ; + int sendcount = datamsglength ; + int recvcount = datamsglength ; int * recvbuf = new int[datamsglength*size] ; int ireq ; @@ -126,25 +125,25 @@ void MPIAccessDECTest::test_AllToAllDEC( bool Asynchronous ) { MyMPIAccessDEC->allToAll( sendbuf, sendcount , MPI_INT , recvbuf, recvcount , MPI_INT ) ; - int const nRecvReq = mpi_access->recvRequestIdsSize() ; + int nRecvReq = mpi_access->recvRequestIdsSize() ; int *ArrayOfRecvRequests = new int[nRecvReq] ; - int const nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; + int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ; mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ; delete [] ArrayOfRecvRequests ; } - int const nSendReq = mpi_access->sendRequestIdsSize() ; + int nSendReq = mpi_access->sendRequestIdsSize() ; debugStream << "test_AllToAllDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests" << endl ; if ( nSendReq ) { int *ArrayOfSendRequests = new int[nSendReq] ; - int const nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ; + int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ; mpi_access->waitAll( nReq , ArrayOfSendRequests ) ; delete [] ArrayOfSendRequests ; } - int const nRecvReq = mpi_access->recvRequestIdsSize() ; + int nRecvReq = mpi_access->recvRequestIdsSize() ; if ( nRecvReq ) { ostringstream strstream ; strstream << "test_AllToAllDEC" << myrank << " final RecvRequestIds " << nRecvReq diff --git a/src/ParaMEDMEMTest/MPIAccess/test_AllToAllTimeDEC.cxx b/src/ParaMEDMEMTest/MPIAccess/test_AllToAllTimeDEC.cxx index 763889335..25cb6e587 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_AllToAllTimeDEC.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_AllToAllTimeDEC.cxx @@ -17,13 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "MPIAccess.hxx" -#include "CommInterface.hxx" -#include "DECOptions.hxx" #include "MPIAccessDECTest.hxx" #include @@ -31,7 +30,7 @@ //#include "CommInterface.hxx" //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" -#include "MPIProcessorGroup.hxx" +#include "LinearTimeInterpolator.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -104,11 +103,11 @@ void MPIAccessDECTest::test_AllToAllTimeDEC( bool Asynchronous ) { targetprocs.insert(i); } - auto* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ; - auto* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ; + MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ; + MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ; // LinearTimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ; - auto * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , + MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , Asynchronous ) ; // Asynchronous , LinearInterp , 0.5 ) ; MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ; @@ -123,13 +122,13 @@ void MPIAccessDECTest::test_AllToAllTimeDEC( bool Asynchronous ) { #define datamsglength 10 int sts ; - int const sendcount = datamsglength ; - int const recvcount = datamsglength ; + int sendcount = datamsglength ; + int recvcount = datamsglength ; double time = 0 ; // double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ; - double const deltatime[maxproc] = {1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,11.} ; - double const maxtime = maxreq ; + double deltatime[maxproc] = {1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,11.} ; + double maxtime = maxreq ; double nextdeltatime = deltatime[myrank] ; // MyMPIAccessDEC->InitTime( time , deltatime[myrank] , maxtime ) ; // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) { @@ -174,7 +173,7 @@ void MPIAccessDECTest::test_AllToAllTimeDEC( bool Asynchronous ) { << endl << "=============================================================" << endl ; int *ArrayOfRecvRequests = new int[nRecvReq] ; - int const nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; + int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ; delete [] ArrayOfRecvRequests ; debugStream << strstream.str() << endl ; @@ -221,12 +220,12 @@ void MPIAccessDECTest::test_AllToAllTimeDEC( bool Asynchronous ) { CPPUNIT_FAIL( strstream.str() ) ; } - int const nSendReq = mpi_access->sendRequestIdsSize() ; + int nSendReq = mpi_access->sendRequestIdsSize() ; debugStream << "test_AllToAllTimeDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests" << endl ; if ( nSendReq ) { int *ArrayOfSendRequests = new int[nSendReq] ; - int const nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ; + int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ; mpi_access->waitAll( nReq , ArrayOfSendRequests ) ; delete [] ArrayOfSendRequests ; } diff --git a/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvDEC.cxx b/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvDEC.cxx index 04979b117..713623df1 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvDEC.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvDEC.cxx @@ -17,12 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "MPIAccess.hxx" -#include "CommInterface.hxx" #include "MPIAccessDECTest.hxx" #include @@ -30,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "../ParaMEDMEM/MPIAccess/MPIAccessDEC.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -103,10 +102,10 @@ void MPIAccessDECTest::test_AllToAllvDEC( bool Asynchronous ) { targetprocs.insert(i); } - auto* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ; - auto* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ; + MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ; + MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ; - auto * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , + MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , Asynchronous ) ; MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ; @@ -149,10 +148,10 @@ void MPIAccessDECTest::test_AllToAllvDEC( bool Asynchronous ) { // debugStream << "test_AllToAllvDEC" << myrank << " sendbuf " << sendbuf << endl ; // MyMPIAccessDEC->CheckSent() ; - int const nRecvReq = mpi_access->recvRequestIdsSize() ; + int nRecvReq = mpi_access->recvRequestIdsSize() ; // debugStream << "test_AllToAllvDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests" << endl ; int *ArrayOfRecvRequests = new int[nRecvReq] ; - int const nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; + int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ; mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ; delete [] ArrayOfRecvRequests ; @@ -167,17 +166,17 @@ void MPIAccessDECTest::test_AllToAllvDEC( bool Asynchronous ) { // debugStream << "test_AllToAllvDEC" << myrank << " final CheckSent" << endl ; // MyMPIAccessDEC->CheckSent() ; - int const nSendReq = mpi_access->sendRequestIdsSize() ; + int nSendReq = mpi_access->sendRequestIdsSize() ; debugStream << "test_AllToAllvDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests" << endl ; if ( nSendReq ) { int *ArrayOfSendRequests = new int[nSendReq] ; - int const nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ; + int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ; mpi_access->waitAll( nReq , ArrayOfSendRequests ) ; delete [] ArrayOfSendRequests ; } - int const nRecvReq = mpi_access->recvRequestIdsSize() ; + int nRecvReq = mpi_access->recvRequestIdsSize() ; if ( nRecvReq ) { ostringstream strstream ; strstream << "test_AllToAllvDEC" << myrank << " final RecvRequestIds " << nRecvReq diff --git a/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvTimeDEC.cxx b/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvTimeDEC.cxx index 69bbeee37..3a9473420 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvTimeDEC.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvTimeDEC.cxx @@ -17,14 +17,13 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include #include -#include "MPIAccess.hxx" -#include "CommInterface.hxx" -#include "DECOptions.hxx" #include "MPIAccessDECTest.hxx" #include @@ -32,7 +31,7 @@ //#include "CommInterface.hxx" //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" -#include "MPIProcessorGroup.hxx" +#include "LinearTimeInterpolator.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -94,7 +93,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINat } // int Asynchronous = atoi(argv[1]) ; - int const UseMPI_Alltoallv = UseMPINative ; + int UseMPI_Alltoallv = UseMPINative ; // if ( argc == 3 ) { // UseMPI_Alltoallv = atoi(argv[2]) ; // } @@ -113,11 +112,11 @@ void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINat targetprocs.insert(i); } - auto* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ; - auto* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ; + MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ; + MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ; // TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ; - auto * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , + MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , Asynchronous ) ; // Asynchronous , LinearInterp , 0.5 ) ; MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp , 0.5 ) ; @@ -153,7 +152,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINat } double timeLoc = 0 ; - double const deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ; + double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ; double maxtime ; double nextdeltatime = deltatime[myrank] ; if ( UseMPI_Alltoallv ) { @@ -163,7 +162,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINat maxtime = maxreq ; // MyMPIAccessDEC->InitTime( time , nextdeltatime , maxtime ) ; } - time_t const begintime = time(nullptr) ; + time_t begintime = time(NULL) ; // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) { for ( timeLoc = 0 ; timeLoc <= maxtime && nextdeltatime != 0 ; timeLoc+=nextdeltatime ) { nextdeltatime = deltatime[myrank] ; @@ -188,13 +187,13 @@ void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINat if ( UseMPI_Alltoallv ) { const MPI_Comm* comm = MyMPIAccessDEC->getComm(); - auto * aSendTimeMessage = new TimeMessage ; + TimeMessage * aSendTimeMessage = new TimeMessage ; aSendTimeMessage->time = timeLoc ; // aSendTimeMessage->deltatime = deltatime[myrank] ; aSendTimeMessage->deltatime = nextdeltatime ; // aSendTimeMessage->maxtime = maxtime ; aSendTimeMessage->tag = (int ) (timeLoc/deltatime[myrank]) ; - auto * aRecvTimeMessage = new TimeMessage[size] ; + TimeMessage * aRecvTimeMessage = new TimeMessage[size] ; interface.allToAllV(aSendTimeMessage, sendtimecounts , stimedispls , mpi_access->timeType() , aRecvTimeMessage, recvtimecounts , rtimedispls , @@ -233,7 +232,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINat << endl << "=============================================================" << endl ; int *ArrayOfRecvRequests = new int[nRecvReq] ; - int const nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; + int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ; delete [] ArrayOfRecvRequests ; debugStream << strstream.str() << endl ; @@ -244,7 +243,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINat bool badrecvbuf = false ; for ( i = 0 ; i < size ; i++ ) { for ( int jj = 0 ; jj < datamsglength ; jj++ ) { - int const index = i*datamsglength+jj ; + int index = i*datamsglength+jj ; if ( jj < recvcounts[i] ) { if ( recvbuf[index] != (index/datamsglength)*1000000 + myrank*1000 + myrank*datamsglength+(index%datamsglength) ) { @@ -324,7 +323,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINat << " RecvRequests = 0 OK" << endl ; } - time_t endtime = time(nullptr) ; + time_t endtime = time(NULL) ; debugStream << "test_AllToAllvTimeDEC" << myrank << " begintime " << begintime << " endtime " << endtime << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank] << " calls to AllToAll" << endl ; @@ -349,7 +348,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINat // MPI_Finalize(); - endtime = time(nullptr) ; + endtime = time(NULL) ; debugStream << "test_AllToAllvTimeDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank] diff --git a/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvTimeDoubleDEC.cxx b/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvTimeDoubleDEC.cxx index 3b568a1cb..e3ae828bc 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvTimeDoubleDEC.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_AllToAllvTimeDoubleDEC.cxx @@ -18,14 +18,13 @@ // #include -#include +#include +#include +#include #include #include #include -#include "MPIAccess.hxx" -#include "CommInterface.hxx" -#include "DECOptions.hxx" #include "MPIAccessDECTest.hxx" #include @@ -33,7 +32,7 @@ //#include "CommInterface.hxx" //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" -#include "MPIProcessorGroup.hxx" +#include "LinearTimeInterpolator.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -106,11 +105,11 @@ void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) { targetprocs.insert(i); } - auto* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ; - auto* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ; + MEDCoupling::MPIProcessorGroup* sourcegroup = new MEDCoupling::MPIProcessorGroup(interface,sourceprocs) ; + MEDCoupling::MPIProcessorGroup* targetgroup = new MEDCoupling::MPIProcessorGroup(interface,targetprocs) ; // TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0 ) ; - auto * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , + MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup , Asynchronous ) ; // Asynchronous , LinearInterp , 0.5 ) ; MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ; @@ -144,7 +143,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) { } double timeLoc[maxproc] ; - double const deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ; + double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ; double maxtime[maxproc] ; double nextdeltatime[maxproc] ; for ( i = 0 ; i < size ; i++ ) { @@ -152,7 +151,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) { maxtime[i] = maxreq ; nextdeltatime[i] = deltatime[i] ; } - time_t const begintime = time(nullptr) ; + time_t begintime = time(NULL) ; for ( timeLoc[myrank] = 0 ; timeLoc[myrank] <= maxtime[myrank] && nextdeltatime[myrank] != 0 ; timeLoc[myrank]+=nextdeltatime[myrank] ) { //local and target times @@ -174,9 +173,9 @@ void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) { debugStream << "test" << myrank << "=====TIME " << timeLoc[myrank] << "=====DELTATIME " << nextdeltatime[myrank] << "=====MAXTIME " << maxtime[myrank] << " ======" << endl ; - auto * sendbuf = new double[datamsglength*size] ; + double * sendbuf = new double[datamsglength*size] ; // double * sendbuf = (double *) malloc(sizeof(double)*datamsglength*size) ; - auto * recvbuf = new double[datamsglength*size] ; + double * recvbuf = new double[datamsglength*size] ; int j ; //debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " sendbuf" ; for ( target = 0 ; target < size ; target++ ) { @@ -209,7 +208,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) { << endl << "============================================================" << endl ; int *ArrayOfRecvRequests = new int[nRecvReq] ; - int const nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; + int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ; mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ; delete [] ArrayOfRecvRequests ; debugStream << strstream.str() << endl ; @@ -220,7 +219,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) { bool badrecvbuf = false ; for ( target = 0 ; target < size ; target++ ) { for ( int jj = 0 ; jj < datamsglength ; jj++ ) { - int const index = target*datamsglength+jj ; + int index = target*datamsglength+jj ; if ( jj < recvcounts[target] ) { if ( fabs(recvbuf[index] - (target*1000000 + myrank*10000 + (timeLoc[target]/deltatime[target])*100 + jj)) > 101) { @@ -299,7 +298,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) { << " RecvRequests = 0 OK" << endl ; } - time_t endtime = time(nullptr) ; + time_t endtime = time(NULL) ; debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " begintime " << begintime << " endtime " << endtime << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank] << " calls to AllToAll" << endl ; @@ -323,7 +322,7 @@ void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) { // MPI_Finalize(); - endtime = time(nullptr) ; + endtime = time(NULL) ; debugStream << "test_AllToAllvTimeDoubleDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank] diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cancel.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cancel.cxx index 4f7471f17..54cff9995 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cancel.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cancel.cxx @@ -17,9 +17,10 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include -#include "CommInterface.hxx" -#include "MPIProcessorGroup.hxx" +#include +#include +#include +#include #include #include @@ -65,9 +66,9 @@ void MPIAccessTest::test_MPI_Access_Cancel() { debugStream << "test_MPI_Access_Cancel" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cyclic_ISend_IRecv.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cyclic_ISend_IRecv.cxx index 1be07e3c9..0d70afd7a 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cyclic_ISend_IRecv.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cyclic_ISend_IRecv.cxx @@ -17,11 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -59,9 +59,9 @@ void MPIAccessTest::test_MPI_Access_Cyclic_ISend_IRecv() { debugStream << "test_MPI_Access_Cyclic_ISend_IRecv" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; @@ -73,8 +73,8 @@ void MPIAccessTest::test_MPI_Access_Cyclic_ISend_IRecv() { return ; } - int const alltarget[3] = {1 , 2 , 0 } ; - int const allsource[3] = {2 , 0 , 1 } ; + int alltarget[3] = {1 , 2 , 0 } ; + int allsource[3] = {2 , 0 , 1 } ; int SendRequestId[maxsend] ; int RecvRequestId[maxsend] ; int sendbuf[maxsend] ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cyclic_Send_Recv.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cyclic_Send_Recv.cxx index 0d02dd12b..d1e6922cc 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cyclic_Send_Recv.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Cyclic_Send_Recv.cxx @@ -17,10 +17,11 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -28,7 +29,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -58,9 +58,9 @@ void MPIAccessTest::test_MPI_Access_Cyclic_Send_Recv() { debugStream << "test_MPI_Access_Cyclic_Send_Recv" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; @@ -70,8 +70,8 @@ void MPIAccessTest::test_MPI_Access_Cyclic_Send_Recv() { return ; } - int const alltarget[3] = {1 , 2 , 0 } ; - int const allsource[3] = {2 , 0 , 1 } ; + int alltarget[3] = {1 , 2 , 0 } ; + int allsource[3] = {2 , 0 , 1 } ; int RequestId[10] ; int sts ; int i = 0 ; @@ -109,7 +109,7 @@ void MPIAccessTest::test_MPI_Access_Cyclic_Send_Recv() { } if ( myrank == 0 ) { if ( i != 9 ) { - int const ii = i + 1 ; + int ii = i + 1 ; sts = mpi_access.send(&ii,1,MPI_INT,alltarget[myrank], RequestId[i]) ; debugStream << "test" << myrank << " Send RequestId " << RequestId[i] << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_IProbe.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_IProbe.cxx index 057c62c00..38dbdfc51 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_IProbe.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_IProbe.cxx @@ -17,9 +17,10 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include -#include "CommInterface.hxx" -#include "MPIProcessorGroup.hxx" +#include +#include +#include +#include #include #include @@ -65,9 +66,9 @@ void MPIAccessTest::test_MPI_Access_IProbe() { debugStream << "test_MPI_Access_IProbe" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISendRecv.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISendRecv.cxx index 57c03bdff..9c6f236a4 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISendRecv.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISendRecv.cxx @@ -17,11 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -59,9 +59,9 @@ void MPIAccessTest::test_MPI_Access_ISendRecv() { debugStream << "test_MPI_Access_ISendRecv" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv.cxx index 549c507b6..f9a8abce6 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv.cxx @@ -17,11 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -59,9 +59,9 @@ void MPIAccessTest::test_MPI_Access_ISend_IRecv() { debugStream << "test_MPI_Access_ISend_IRecv" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_BottleNeck.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_BottleNeck.cxx index 6aace3574..9db74c589 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_BottleNeck.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_BottleNeck.cxx @@ -17,11 +17,13 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +31,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -62,9 +63,9 @@ void MPIAccessTest::test_MPI_Access_ISend_IRecv_BottleNeck() { debugStream << "test_MPI_Access_ISend_IRecv_BottleNeck" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; @@ -76,7 +77,7 @@ void MPIAccessTest::test_MPI_Access_ISend_IRecv_BottleNeck() { return ; } - int const target = 1 - myrank ; + int target = 1 - myrank ; int SendRequestId[maxreq] ; int RecvRequestId[maxreq] ; int sts ; @@ -146,7 +147,7 @@ void MPIAccessTest::test_MPI_Access_ISend_IRecv_BottleNeck() { size2 = mpi_access.sendRequestIdsSize() ; debugStream << "test" << myrank << " after WaitAll sendreqsize " << size2 << endl ; int * ArrayOfSendRequests = new int[ size2 ] ; - int const nSendRequest = mpi_access.sendRequestIds( size2 , ArrayOfSendRequests ) ; + int nSendRequest = mpi_access.sendRequestIds( size2 , ArrayOfSendRequests ) ; for ( i = 0 ; i < nSendRequest ; i++ ) { mpi_access.deleteRequest( ArrayOfSendRequests[i] ) ; } @@ -159,7 +160,7 @@ void MPIAccessTest::test_MPI_Access_ISend_IRecv_BottleNeck() { size2 = mpi_access.recvRequestIdsSize() ; debugStream << "test" << myrank << " after WaitAll recvreqsize " << size2 << endl ; int * ArrayOfRecvRequests = new int[ size2 ] ; - int const nRecvRequest = mpi_access.recvRequestIds( size2 , ArrayOfRecvRequests ) ; + int nRecvRequest = mpi_access.recvRequestIds( size2 , ArrayOfRecvRequests ) ; for ( i = 0 ; i < nRecvRequest ; i++ ) { mpi_access.deleteRequest( ArrayOfRecvRequests[i] ) ; } diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_Length.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_Length.cxx index 0a6a1c38c..0e02ef979 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_Length.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_Length.cxx @@ -17,11 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -61,9 +61,9 @@ void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length() { debugStream << "test_MPI_Access_ISend_IRecv_Length" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; @@ -75,7 +75,7 @@ void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length() { return ; } - int const target = 1 - myrank ; + int target = 1 - myrank ; int SendRequestId[maxreq] ; int RecvRequestId[maxreq] ; int sts ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_Length_1.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_Length_1.cxx index d7155f82b..38815c30d 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_Length_1.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_ISend_IRecv_Length_1.cxx @@ -17,11 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -59,9 +59,9 @@ void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length_1() { debugStream << "test_MPI_Access_ISend_IRecv_Length_1" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Probe.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Probe.cxx index c41e730d9..ba027e24e 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Probe.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Probe.cxx @@ -17,11 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -59,9 +59,9 @@ void MPIAccessTest::test_MPI_Access_Probe() { debugStream << "test_MPI_Access_Probe" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_SendRecv.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_SendRecv.cxx index 01655f99d..3842d3650 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_SendRecv.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_SendRecv.cxx @@ -17,11 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -59,9 +59,9 @@ void MPIAccessTest::test_MPI_Access_SendRecv() { debugStream << "MPIAccessTest::test_MPI_Access_SendRecv" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; @@ -71,7 +71,7 @@ void MPIAccessTest::test_MPI_Access_SendRecv() { return ; } - int const target = 1 - myrank ; + int target = 1 - myrank ; int sendRequestId[10] ; int recvRequestId[10] ; int sts ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Send_Recv.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Send_Recv.cxx index aac016f0d..6fcf69473 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Send_Recv.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Send_Recv.cxx @@ -17,11 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -57,9 +57,9 @@ void MPIAccessTest::test_MPI_Access_Send_Recv() { debugStream << "test_MPI_Access_Send_Recv" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; @@ -69,7 +69,7 @@ void MPIAccessTest::test_MPI_Access_Send_Recv() { return ; } - int const target = 1 - myrank ; + int target = 1 - myrank ; int RequestId[10] ; int sts ; int i ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Send_Recv_Length.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Send_Recv_Length.cxx index beba51d59..9d4d3c913 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Send_Recv_Length.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Send_Recv_Length.cxx @@ -17,11 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -61,9 +61,9 @@ void MPIAccessTest::test_MPI_Access_Send_Recv_Length() { debugStream << "test_MPI_Access_Send_Recv_Length" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; @@ -73,7 +73,7 @@ void MPIAccessTest::test_MPI_Access_Send_Recv_Length() { return ; } - int const target = 1 - myrank ; + int target = 1 - myrank ; int RequestId[10] ; int sendbuf[9000] ; int recvbuf[9000] ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Time.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Time.cxx index 3d3bad0cd..912bce91d 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Time.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Time.cxx @@ -17,11 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -61,9 +61,9 @@ void MPIAccessTest::test_MPI_Access_Time() { debugStream << "test_MPI_Access_Time" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; MEDCoupling::MPIAccess mpi_access( group ) ; @@ -90,8 +90,8 @@ void MPIAccessTest::test_MPI_Access_Time() { MEDCoupling::TimeMessage aSendTimeMsg[maxreq] ; MEDCoupling::TimeMessage aRecvTimeMsg[maxreq] ; double t ; - double const dt = 1. ; - double const maxt = 10. ; + double dt = 1. ; + double maxt = 10. ; for ( t = 0 ; t < maxt ; t = t+dt ) { if ( myrank == 0 ) { aSendTimeMsg[i].time = t ; diff --git a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Time_0.cxx b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Time_0.cxx index 0a3aae2d0..9eb214d4b 100644 --- a/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Time_0.cxx +++ b/src/ParaMEDMEMTest/MPIAccess/test_MPI_Access_Time_0.cxx @@ -17,11 +17,12 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include +#include +#include +#include #include #include -#include "CommInterface.hxx" #include "MPIAccessTest.hxx" #include @@ -29,7 +30,6 @@ //#include "ProcessorGroup.hxx" //#include "MPIProcessorGroup.hxx" #include "MPIAccess.hxx" -#include "MPIProcessorGroup.hxx" // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -83,16 +83,16 @@ void MPIAccessTest::test_MPI_Access_Time_0() { #define maxreq 100 double t ; - double const dt[2] = {2., 1.} ; - double const maxt = maxreq/dt[myrank] ; + double dt[2] = {2., 1.} ; + double maxt = maxreq/dt[myrank] ; debugStream << "test_MPI_Access_Time_0 rank" << myrank << endl ; - MEDCoupling::CommInterface const interface ; + MEDCoupling::CommInterface interface ; - auto* group = new MEDCoupling::MPIProcessorGroup(interface) ; + MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ; - auto * mpi_access = new MEDCoupling::MPIAccess( group ) ; + MEDCoupling::MPIAccess * mpi_access = new MEDCoupling::MPIAccess( group ) ; if ( myrank >= 2 ) { debugStream << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ; @@ -145,7 +145,7 @@ void MPIAccessTest::test_MPI_Access_Time_0() { //CheckSent //========= int sendrequests[2*maxreq] ; - int const sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , + int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ; int j , flag ; for ( j = 0 ; j < sendreqsize ; j++ ) { @@ -229,7 +229,7 @@ void MPIAccessTest::test_MPI_Access_Time_0() { } mpi_access->deleteRequest( RecvTimeRequestId[lasttime] ) ; - double const deltatime = aRecvTimeMsg[lasttime].deltatime ; + double deltatime = aRecvTimeMsg[lasttime].deltatime ; //double maxtime = aRecvTimeMsg[lasttime].maxtime ; double nexttime = aRecvTimeMsg[lasttime].time + deltatime ; debugStream << "test" << myrank << " t " << t << " lasttime " << lasttime @@ -367,7 +367,7 @@ void MPIAccessTest::test_MPI_Access_Time_0() { //============== debugStream << "test" << myrank << " CheckFinalSent :" << endl ; int sendrequests[2*maxreq] ; - int const sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ; + int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ; int j ; for ( j = 0 ; j < sendreqsize ; j++ ) { sts = mpi_access->wait( sendrequests[j] ) ; @@ -380,7 +380,7 @@ void MPIAccessTest::test_MPI_Access_Time_0() { else { debugStream << "test" << myrank << " CheckFinalRecv :" << endl ; int recvrequests[2*maxreq] ; - int const recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ; + int recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ; int cancelflag ; int j ; for ( j = 0 ; j < recvreqsize ; j++ ) { diff --git a/src/ParaMEDMEMTest/MPIMainTest.hxx b/src/ParaMEDMEMTest/MPIMainTest.hxx index 4a78c0f7d..a75cf72c9 100644 --- a/src/ParaMEDMEMTest/MPIMainTest.hxx +++ b/src/ParaMEDMEMTest/MPIMainTest.hxx @@ -21,13 +21,13 @@ #define _MPIMAINTEST_HXX_ #include -#include #include #include +#include #include #include #include -#include +#include #include @@ -92,7 +92,7 @@ int main(int argc, char* argv[]) // --- Run the tests. - bool const wasSucessful = result.wasSuccessful(); + bool wasSucessful = result.wasSuccessful(); testFile.close(); // --- Return error code 1 if the one of test failed. diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest.cxx index 19d149ad2..10286c332 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest.cxx @@ -18,12 +18,14 @@ // #include "ParaMEDMEMTest.hxx" +#include "TestInterpKernelUtils.hxx" +#include +#include #include +#include #include #include -#include -#include #ifndef WIN32 #include @@ -40,7 +42,7 @@ std::string ParaMEDMEMTest::getTmpDirectory() { - std::string const path; + std::string path; std::list dirs; if ( getenv("TMP") ) dirs.push_back( getenv("TMP" )); diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx index 8d44f185b..fb2b339ec 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest.hxx @@ -20,9 +20,9 @@ #ifndef _ParaMEDMEMTEST_HXX_ #define _ParaMEDMEMTEST_HXX_ -#include #include +#include #include #include #include "mpi.h" @@ -96,9 +96,9 @@ class ParaMEDMEMTest : public CppUnit::TestFixture public: ParaMEDMEMTest():CppUnit::TestFixture(){} - ~ParaMEDMEMTest() override= default; - void setUp() override{} - void tearDown() override{} + ~ParaMEDMEMTest(){} + void setUp(){} + void tearDown(){} void testMPIProcessorGroup_constructor(); void testMPIProcessorGroup_boolean(); void testMPIProcessorGroup_rank(); @@ -188,7 +188,7 @@ private: class ParaMEDMEMTest_TmpFilesRemover { public: - ParaMEDMEMTest_TmpFilesRemover() = default; + ParaMEDMEMTest_TmpFilesRemover() {} ~ParaMEDMEMTest_TmpFilesRemover(); bool Register(const std::string theTmpFile); diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx index 4811eccb5..a9ea76378 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_1.cxx @@ -17,28 +17,20 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include -#include #include -#include "MCIdType.hxx" -#include "MEDCouplingMemArray.hxx" -#include "MEDCouplingRefCountObject.hxx" -#include "MEDCouplingNatureOfFieldEnum" #include "MPI2Connector.hxx" -#include "NormalizedGeometricTypes" #include "ParaMESH.hxx" #include "ParaFIELD.hxx" #include "MEDCouplingUMesh.hxx" +#include "MEDCouplingFieldDouble.hxx" #include "InterpKernelDEC.hxx" #include "MPIProcessorGroup.hxx" #include "CommInterface.hxx" #include #include -#include #include -#include class MPI2ParaMEDMEMTest : public CppUnit::TestFixture { @@ -57,9 +49,9 @@ void MPI2ParaMEDMEMTest::testBasicMPI2_1() MPI_Comm gcom; std::string service = "SERVICE"; std::ostringstream meshfilename, meshname; - MEDCoupling::ParaMESH *paramesh=nullptr; + MEDCoupling::ParaMESH *paramesh=0; MEDCoupling::MEDCouplingUMesh *mesh; - MEDCoupling::ParaFIELD *parafield=nullptr; + MEDCoupling::ParaFIELD *parafield=0; MEDCoupling::CommInterface *interface; MEDCoupling::MPIProcessorGroup *source, *target; @@ -72,7 +64,7 @@ void MPI2ParaMEDMEMTest::testBasicMPI2_1() } /* Connection to remote program */ - auto *mpio = new MPI2Connector; + MPI2Connector *mpio = new MPI2Connector; gcom = mpio->remoteMPI2Connect(service); MPI_Comm_size( gcom, &gsize ); MPI_Comm_rank( gcom, &grank ); @@ -102,7 +94,7 @@ void MPI2ParaMEDMEMTest::testBasicMPI2_1() mesh->setCoords(myCoords); myCoords->decrRef(); paramesh=new ParaMESH(mesh,*source,"source mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); double *value=parafield->getField()->getArray()->getPointer(); value[0]=34+13*((double)grank); @@ -130,3 +122,4 @@ void MPI2ParaMEDMEMTest::testBasicMPI2_1() CPPUNIT_TEST_SUITE_REGISTRATION( MPI2ParaMEDMEMTest ); +#include "MPIMainTest.hxx" diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx index aa6f04b4c..e5c279d07 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTestMPI2_2.cxx @@ -17,28 +17,20 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include -#include #include -#include "MCIdType.hxx" -#include "MEDCouplingMemArray.hxx" -#include "MEDCouplingRefCountObject.hxx" -#include "MEDCouplingNatureOfFieldEnum" #include "MPI2Connector.hxx" -#include "NormalizedGeometricTypes" #include "ParaMESH.hxx" #include "ParaFIELD.hxx" #include "MEDCouplingUMesh.hxx" +#include "MEDCouplingFieldDouble.hxx" #include "InterpKernelDEC.hxx" #include "MPIProcessorGroup.hxx" #include "CommInterface.hxx" #include #include -#include #include -#include class MPI2ParaMEDMEMTest : public CppUnit::TestFixture { @@ -57,9 +49,9 @@ void MPI2ParaMEDMEMTest::testBasicMPI2_1() MPI_Comm gcom; std::string service = "SERVICE"; std::ostringstream meshfilename, meshname; - MEDCoupling::ParaMESH *paramesh=nullptr; + MEDCoupling::ParaMESH *paramesh=0; MEDCoupling::MEDCouplingUMesh* mesh; - MEDCoupling::ParaFIELD *parafield=nullptr; + MEDCoupling::ParaFIELD *parafield=0; MEDCoupling::CommInterface* interface; MEDCoupling::MPIProcessorGroup* source, *target; @@ -72,7 +64,7 @@ void MPI2ParaMEDMEMTest::testBasicMPI2_1() } /* Connection to remote program */ - auto *mpio = new MPI2Connector; + MPI2Connector *mpio = new MPI2Connector; gcom = mpio->remoteMPI2Connect(service); MPI_Comm_size( gcom, &gsize ); @@ -106,7 +98,7 @@ void MPI2ParaMEDMEMTest::testBasicMPI2_1() mesh->setCoords(myCoords); myCoords->decrRef(); paramesh=new ParaMESH (mesh,*target,"target mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); MEDCoupling::InterpKernelDEC dec(*source,*target); @@ -135,3 +127,4 @@ void MPI2ParaMEDMEMTest::testBasicMPI2_1() CPPUNIT_TEST_SUITE_REGISTRATION( MPI2ParaMEDMEMTest ); +#include "MPIMainTest.hxx" diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_BlockTopology.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_BlockTopology.cxx index a78b0f1c1..bf33f7ede 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_BlockTopology.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_BlockTopology.cxx @@ -17,15 +17,17 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include "MCIdType.hxx" #include "ParaMEDMEMTest.hxx" #include +#include "InterpolationUtils.hxx" #include "CommInterface.hxx" +#include "ProcessorGroup.hxx" #include "MPIProcessorGroup.hxx" +#include "Topology.hxx" #include "BlockTopology.hxx" -#include +#include // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -64,8 +66,8 @@ void ParaMEDMEMTest::testBlockTopology_constructor() MPI_Comm_size(MPI_COMM_WORLD,&size); int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); - CommInterface const interface; - MPIProcessorGroup const group(interface); + CommInterface interface; + MPIProcessorGroup group(interface); BlockTopology blocktopo(group,1); CPPUNIT_ASSERT_EQUAL(ToIdType(1),blocktopo.getNbLocalElements()); CPPUNIT_ASSERT_EQUAL(ToIdType(size),blocktopo.getNbElements()); @@ -105,8 +107,8 @@ void ParaMEDMEMTest::testBlockTopology_serialize() MPI_Comm_size(MPI_COMM_WORLD,&size); int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); - CommInterface const interface; - MPIProcessorGroup const group(interface); + CommInterface interface; + MPIProcessorGroup group(interface); BlockTopology blocktopo(group,3); //testing the serialization process that is used to transfer a diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx index 1301d5b72..8207031d5 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_ByStringMPIProcessorGroup.cxx @@ -20,6 +20,7 @@ #include "ParaMEDMEMTest.hxx" #include #include "CommInterface.hxx" +#include "ProcessorGroup.hxx" #include "ByStringMPIProcessorGroup.hxx" #include @@ -44,8 +45,8 @@ using namespace MEDCoupling; void ParaMEDMEMTest::testByStringMPIProcessorGroup_constructor() { - CommInterface const comm_interface; - auto* group = new ByStringMPIProcessorGroup(comm_interface); + CommInterface comm_interface; + ByStringMPIProcessorGroup* group = new ByStringMPIProcessorGroup(comm_interface); int size; MPI_Comm_size(MPI_COMM_WORLD, &size); CPPUNIT_ASSERT_EQUAL(size,group->size()); @@ -71,9 +72,9 @@ void ParaMEDMEMTest::testByStringMPIProcessorGroup_stringconstructor() else myTag = "gr1"; - CommInterface const comm_interface; + CommInterface comm_interface; ByStringMPIProcessorGroup * group = new ByStringMPIProcessorGroup(comm_interface,myTag,MPI_COMM_WORLD); - auto * copygroup = new ByStringMPIProcessorGroup(*group); + ByStringMPIProcessorGroup * copygroup = new ByStringMPIProcessorGroup(*group); CPPUNIT_ASSERT(group); CPPUNIT_ASSERT(copygroup); diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_FabienAPI.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_FabienAPI.cxx index 28d163896..e976fe1c6 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_FabienAPI.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_FabienAPI.cxx @@ -17,13 +17,9 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include "MEDCouplingMemArray.hxx" -#include "MCIdType.hxx" -#include "NormalizedGeometricTypes" -#include "MEDCouplingRefCountObject.hxx" -#include "MEDCouplingNatureOfFieldEnum" #include "ParaMEDMEMTest.hxx" #include "CommInterface.hxx" +#include "ProcessorGroup.hxx" #include "MPIProcessorGroup.hxx" #include "InterpKernelDEC.hxx" #include "MEDCouplingUMesh.hxx" @@ -31,7 +27,7 @@ #include "ParaFIELD.hxx" #include "ComponentTopology.hxx" -#include +#include using namespace MEDCoupling; @@ -44,22 +40,22 @@ void ParaMEDMEMTest::testFabienAPI1() // if(size!=3) return ; - int const procs_source_c[1]={0}; + int procs_source_c[1]={0}; std::set procs_source(procs_source_c,procs_source_c+1); - int const procs_target_c[1]={1}; + int procs_target_c[1]={1}; std::set procs_target(procs_target_c,procs_target_c+1); // - MEDCoupling::MEDCouplingUMesh *mesh=nullptr; - MEDCoupling::ParaMESH *paramesh=nullptr; - MEDCoupling::ParaFIELD *parafield=nullptr; + MEDCoupling::MEDCouplingUMesh *mesh=0; + MEDCoupling::ParaMESH *paramesh=0; + MEDCoupling::ParaFIELD *parafield=0; // - MEDCoupling::CommInterface const interface; + MEDCoupling::CommInterface interface; // MPI_Barrier(MPI_COMM_WORLD); double targetCoords[8]={ 0.,0., 1., 0., 0., 1., 1., 1. }; - CommInterface const comm; + CommInterface comm; // - auto *dec=new MEDCoupling::InterpKernelDEC(procs_source,procs_target); + MEDCoupling::InterpKernelDEC *dec=new MEDCoupling::InterpKernelDEC(procs_source,procs_target); if(dec->isInSourceSide()) { mesh=MEDCouplingUMesh::New(); @@ -73,7 +69,7 @@ void ParaMEDMEMTest::testFabienAPI1() mesh->allocateCells(1); mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn); mesh->finishInsertingCells(); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; paramesh=new ParaMESH(mesh,*dec->getSourceGrp(),"source mesh"); parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafield->getField()->setNature(IntensiveMaximum); @@ -94,7 +90,7 @@ void ParaMEDMEMTest::testFabienAPI1() mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn); mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn+3); mesh->finishInsertingCells(); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; paramesh=new ParaMESH(mesh,*dec->getTargetGrp(),"target mesh"); parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafield->getField()->setNature(IntensiveMaximum); @@ -129,22 +125,22 @@ void ParaMEDMEMTest::testFabienAPI2() // if(size!=3) return ; - int const procs_source_c[1]={2};//difference with testFabienAPI1 + int procs_source_c[1]={2};//difference with testFabienAPI1 std::set procs_source(procs_source_c,procs_source_c+1); - int const procs_target_c[1]={1}; + int procs_target_c[1]={1}; std::set procs_target(procs_target_c,procs_target_c+1); // - MEDCoupling::MEDCouplingUMesh *mesh=nullptr; - MEDCoupling::ParaMESH *paramesh=nullptr; - MEDCoupling::ParaFIELD *parafield=nullptr; + MEDCoupling::MEDCouplingUMesh *mesh=0; + MEDCoupling::ParaMESH *paramesh=0; + MEDCoupling::ParaFIELD *parafield=0; // - MEDCoupling::CommInterface const interface; + MEDCoupling::CommInterface interface; // MPI_Barrier(MPI_COMM_WORLD); double targetCoords[8]={ 0.,0., 1., 0., 0., 1., 1., 1. }; - CommInterface const comm; + CommInterface comm; // - auto *dec=new MEDCoupling::InterpKernelDEC(procs_source,procs_target); + MEDCoupling::InterpKernelDEC *dec=new MEDCoupling::InterpKernelDEC(procs_source,procs_target); if(dec->isInSourceSide()) { mesh=MEDCouplingUMesh::New(); @@ -158,7 +154,7 @@ void ParaMEDMEMTest::testFabienAPI2() mesh->allocateCells(1); mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn); mesh->finishInsertingCells(); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; paramesh=new ParaMESH(mesh,*dec->getSourceGrp(),"source mesh"); parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafield->getField()->setNature(IntensiveMaximum); @@ -179,7 +175,7 @@ void ParaMEDMEMTest::testFabienAPI2() mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn); mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn+3); mesh->finishInsertingCells(); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; paramesh=new ParaMESH(mesh,*dec->getTargetGrp(),"target mesh"); parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafield->getField()->setNature(IntensiveMaximum); diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx index 11d7706a8..51d1895e5 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_Gauthier1.cxx @@ -17,14 +17,6 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include "MEDCouplingMemArray.hxx" -#include "MCAuto.hxx" -#include "MCIdType.hxx" -#include "NormalizedGeometricTypes" -#include "MEDCouplingRefCountObject.hxx" -#include "MEDCouplingNatureOfFieldEnum" -#include "InterpKernelException.hxx" -#include "InterpolationOptions.hxx" #include "ParaMEDMEMTest.hxx" #include @@ -40,8 +32,8 @@ #include "ComponentTopology.hxx" #include "BlockTopology.hxx" -#include -#include +#include +#include #include #include #include @@ -155,14 +147,14 @@ void ParaMEDMEMTest::testGauthier1() {1.,1.,1e200,1e200}, {20.5,1.,1e200,1e200} }; - int const expectedLgth[8]={4,4,2,2,4,4,2,2}; + int expectedLgth[8]={4,4,2,2,4,4,2,2}; for (int send=0;send<2;send++) for (int rec=0;rec<2;rec++) { InterpKernelDEC dec_emetteur(emetteur_group, recepteur_group); - MEDCoupling::ParaFIELD *champ_emetteur(nullptr),*champ_recepteur(nullptr); - MEDCoupling::ParaMESH *paramesh(nullptr); + MEDCoupling::ParaFIELD *champ_emetteur(0),*champ_recepteur(0); + MEDCoupling::ParaMESH *paramesh(0); MCAuto mesh; dec_emetteur.setOrientation(2); if (send==0) @@ -174,7 +166,7 @@ void ParaMEDMEMTest::testGauthier1() mesh=init_triangleGauthier1(is_master); } paramesh=new MEDCoupling::ParaMESH(mesh,recepteur_group.containsMyRank()?recepteur_group:emetteur_group,"emetteur mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; champ_emetteur=new MEDCoupling::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); champ_emetteur->getField()->setNature(IntensiveMaximum); champ_emetteur->setOwnSupport(true); @@ -220,7 +212,7 @@ void ParaMEDMEMTest::testGauthier1() //bool ok=false; // Is the time interval successfully solved ? // Loop on the time interval tries - if(true) { + if(1) { if (cas=="emetteur") @@ -258,8 +250,8 @@ void ParaMEDMEMTest::testGauthier1() void ParaMEDMEMTest::testGauthier2() { std::cout << "testGauthier2\n"; - double const valuesExpected1[2]={0.,0.}; - double const valuesExpected2[2]={0.95,0.970625}; + double valuesExpected1[2]={0.,0.}; + double valuesExpected2[2]={0.95,0.970625}; double valuesExpected30[]={0., 0., 0.05, 0., 0., 0.15, 0., 0., 0.25, 0., 0., 0.35, 0., 0., 0.45, 0., 0., 0.55, 0., 0., 0.65, 0., 0., 0.75, 0., 0., 0.85, 0., 0., 0.95}; double valuesExpected31[]={0., 0., 0.029375, 0., 0., 0.029375, 0., 0., 0.1, 0., 0., 0.1, 0., 0., 0.2, 0., 0., 0.2, 0., 0., 0.3, 0., 0., 0.3, 0., 0., 0.4, 0., 0., 0.4, 0., 0., 0.5, 0., 0., 0.5, 0., 0., 0.6, 0., 0., 0.6, 0., 0., 0.7, 0., 0., 0.7, 0., 0., 0.8, 0., 0., 0.8, 0., 0., 0.9, 0., 0., 0.9, 0., 0., 0.970625, 0., 0., 0.970625 }; @@ -282,7 +274,7 @@ void ParaMEDMEMTest::testGauthier2() MPIProcessorGroup entree_chaude_group(comm,entree_chaude_ids); MPIProcessorGroup Genepi_group(comm,Genepi_ids); - MEDCoupling::ParaFIELD *vitesse(nullptr); + MEDCoupling::ParaFIELD *vitesse(0); InterpKernelDEC dec_vit_in_chaude(entree_chaude_group, Genepi_group); if ( entree_chaude_group.containsMyRank()) @@ -302,7 +294,7 @@ void ParaMEDMEMTest::testGauthier2() arr=DataArrayDouble::New(); arr->alloc(63,3); std::copy(valsOfField,valsOfField+189,arr->getPointer()); f->setArray(arr); f->setNature(IntensiveMaximum); - auto *paramesh(new MEDCoupling::ParaMESH(mesh,entree_chaude_group,"emetteur mesh")); + MEDCoupling::ParaMESH *paramesh(new MEDCoupling::ParaMESH(mesh,entree_chaude_group,"emetteur mesh")); vitesse=new MEDCoupling::ParaFIELD(f,paramesh,entree_chaude_group); vitesse->setOwnSupport(true); dec_vit_in_chaude.setMethod("P1"); @@ -322,7 +314,7 @@ void ParaMEDMEMTest::testGauthier2() f->setMesh(mesh); f->setName("vitesse_in_chaude"); arr=DataArrayDouble::New(); arr->alloc(f->getNumberOfTuplesExpected()*3); arr->fillWithZero(); arr->rearrange(3); f->setArray(arr); f->setNature(IntensiveMaximum); - auto *paramesh(new MEDCoupling::ParaMESH(mesh,Genepi_group,"recepteur mesh")); + MEDCoupling::ParaMESH *paramesh(new MEDCoupling::ParaMESH(mesh,Genepi_group,"recepteur mesh")); vitesse=new MEDCoupling::ParaFIELD(f,paramesh,Genepi_group); vitesse->setOwnSupport(true); dec_vit_in_chaude.setMethod(f->getDiscretization()->getRepr()); @@ -354,7 +346,7 @@ void ParaMEDMEMTest::testGauthier2() CPPUNIT_ASSERT_DOUBLES_EQUAL(valuesExpected1[type],pmin,1e-12); CPPUNIT_ASSERT_DOUBLES_EQUAL(valuesExpected2[type],pmax,1e-12); - std::size_t const nbCompo(vitesse->getField()->getNumberOfComponents()); + std::size_t nbCompo(vitesse->getField()->getNumberOfComponents()); p=vitesse->getField()->getArray()->begin(); for(int i=0;igetField()->getNumberOfTuples();i++) for(std::size_t c=0;c mesh; dec_emetteur.setOrientation(2); if (send==0) @@ -475,7 +467,7 @@ void ParaMEDMEMTest::testGauthier3_GEN(bool withIDs, int nprocs) if (cas!="vide") { paramesh=new MEDCoupling::ParaMESH(mesh,recepteur_group.containsMyRank()?recepteur_group:emetteur_group,"emetteur mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; champ_emetteur=new MEDCoupling::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); champ_emetteur->getField()->setNature(IntensiveMaximum); champ_emetteur->setOwnSupport(true); @@ -521,7 +513,7 @@ void ParaMEDMEMTest::testGauthier3_GEN(bool withIDs, int nprocs) //bool ok=false; // Is the time interval successfully solved ? // Loop on the time interval tries - if(true) { + if(1) { if (cas=="emetteur") @@ -577,7 +569,7 @@ void ParaMEDMEMTest::testGauthier4() // if(size!=3) return ; - int const nproc_source = 1; + int nproc_source = 1; set self_procs; set procs_source; set procs_target; @@ -588,9 +580,9 @@ void ParaMEDMEMTest::testGauthier4() procs_target.insert(i); self_procs.insert(rank); // - MEDCoupling::MEDCouplingUMesh *mesh=nullptr; - MEDCoupling::ParaMESH *paramesh=nullptr; - MEDCoupling::ParaFIELD* parafield=nullptr; + MEDCoupling::MEDCouplingUMesh *mesh=0; + MEDCoupling::ParaMESH *paramesh=0; + MEDCoupling::ParaFIELD* parafield=0; // MEDCoupling::CommInterface interface; // @@ -613,7 +605,7 @@ void ParaMEDMEMTest::testGauthier4() mesh->setCoords(myCoords); myCoords->decrRef(); paramesh=new ParaMESH(mesh,*source_group,"source mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh,comptopo); double *value=parafield->getField()->getArray()->getPointer(); std::copy(sourceVals,sourceVals+19,value); @@ -634,7 +626,7 @@ void ParaMEDMEMTest::testGauthier4() mesh->setCoords(myCoords); myCoords->decrRef(); paramesh=new ParaMESH (mesh,*target_group,"target mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); } else if(rank==2) @@ -651,7 +643,7 @@ void ParaMEDMEMTest::testGauthier4() mesh->setCoords(myCoords); myCoords->decrRef(); paramesh=new ParaMESH (mesh,*target_group,"target mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); } } diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_ICoco.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_ICoco.cxx index b94f4e5bb..a0bd0bd6a 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_ICoco.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_ICoco.cxx @@ -17,14 +17,9 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include "MCAuto.hxx" -#include "MEDCouplingMemArray.hxx" -#include "MCIdType.hxx" -#include "NormalizedGeometricTypes" -#include "MEDCouplingRefCountObject.hxx" -#include "MEDCouplingNatureOfFieldEnum" #include "ParaMEDMEMTest.hxx" #include "CommInterface.hxx" +#include "ProcessorGroup.hxx" #include "MPIProcessorGroup.hxx" #include "ComponentTopology.hxx" #include "ParaMESH.hxx" @@ -33,6 +28,7 @@ #include "MEDCouplingUMesh.hxx" +#include #include #include #include @@ -42,11 +38,11 @@ using namespace std; using namespace MEDCoupling; using namespace ICoCo; -using synctype = enum {sync_and,sync_or}; +typedef enum {sync_and,sync_or} synctype; void synchronize_bool(bool& stop, synctype s) { int my_stop; - int const my_stop_temp = stop?1:0; + int my_stop_temp = stop?1:0; if (s==sync_and) MPI_Allreduce(&my_stop_temp,&my_stop,1,MPI_INTEGER,MPI_MIN,MPI_COMM_WORLD); else // if (s==sync_or) @@ -56,7 +52,7 @@ void synchronize_bool(bool& stop, synctype s) void synchronize_dt(double& dt) { - double const dttemp=dt; + double dttemp=dt; MPI_Allreduce(&dttemp,&dt,1,MPI_DOUBLE,MPI_MIN,MPI_COMM_WORLD); } @@ -126,13 +122,13 @@ void ParaMEDMEMTest::testICoco1() InterpKernelDEC dec_emetteur(emetteur_group,recepteur_group); dec_emetteur.setOrientation(2); - MEDCoupling::ParaFIELD *champ_emetteur(nullptr),*champ_recepteur(nullptr); - MEDCoupling::ParaMESH *paramesh(nullptr); + MEDCoupling::ParaFIELD *champ_emetteur(0),*champ_recepteur(0); + MEDCoupling::ParaMESH *paramesh(0); if (cas=="emetteur") { MCAuto mesh_emetteur(init_triangle()); paramesh=new MEDCoupling::ParaMESH(mesh_emetteur,emetteur_group,"emetteur mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; champ_emetteur=new MEDCoupling::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); champ_emetteur->getField()->setNature(IntensiveMaximum); champ_emetteur->setOwnSupport(true); @@ -142,7 +138,7 @@ void ParaMEDMEMTest::testICoco1() { MCAuto mesh_recepteur(init_quad()); paramesh=new MEDCoupling::ParaMESH(mesh_recepteur,recepteur_group,"recepteur mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; champ_recepteur=new MEDCoupling::ParaFIELD(ON_CELLS,ONE_TIME,paramesh,comptopo); champ_recepteur->getField()->setNature(IntensiveMaximum); champ_recepteur->setOwnSupport(true); @@ -150,7 +146,7 @@ void ParaMEDMEMTest::testICoco1() MPI_Barrier(MPI_COMM_WORLD); - clock_t const clock0(clock()); + clock_t clock0(clock()); int compti=0; bool init(true),stop(false); @@ -158,7 +154,7 @@ void ParaMEDMEMTest::testICoco1() while(!stop) { compti++; - clock_t const clocki= clock (); + clock_t clocki= clock (); cout << compti << " CLOCK " << (clocki-clock0)*1.e-6 << endl; for (int non_unif=0;non_unif<2;non_unif++) { diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_InterpKernelDEC.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_InterpKernelDEC.cxx index 6f0ab00a7..781c4ed84 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_InterpKernelDEC.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_InterpKernelDEC.cxx @@ -17,14 +17,6 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include "MCIdType.hxx" -#include "NormalizedGeometricTypes" -#include "MEDCouplingMemArray.hxx" -#include "MEDCouplingRefCountObject.hxx" -#include "MEDCouplingNatureOfFieldEnum" -#include "MCAuto.hxx" -#include "MCType.hxx" -#include "DECOptions.hxx" #include "ParaMEDMEMTest.hxx" #include @@ -45,10 +37,8 @@ #include "TestInterpKernelUtils.hxx" -#include -#include -#include #include +#include // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -89,7 +79,7 @@ void ParaMEDMEMTest::testInterpKernelDEC_1D() // if(size!=5) return ; - int const nproc_source = 3; + int nproc_source = 3; set self_procs; set procs_source; set procs_target; @@ -100,9 +90,9 @@ void ParaMEDMEMTest::testInterpKernelDEC_1D() procs_target.insert(i); self_procs.insert(rank); // - MEDCoupling::MEDCouplingUMesh *mesh=nullptr; - MEDCoupling::ParaMESH *paramesh=nullptr; - MEDCoupling::ParaFIELD *parafieldP0=nullptr; + MEDCoupling::MEDCouplingUMesh *mesh=0; + MEDCoupling::ParaMESH *paramesh=0; + MEDCoupling::ParaFIELD *parafieldP0=0; // MEDCoupling::CommInterface interface; // @@ -157,7 +147,7 @@ void ParaMEDMEMTest::testInterpKernelDEC_1D() myCoords->decrRef(); } paramesh=new ParaMESH(mesh,*source_group,"source mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); double *valueP0=parafieldP0->getField()->getArray()->getPointer(); parafieldP0->getField()->setNature(IntensiveMaximum); @@ -207,7 +197,7 @@ void ParaMEDMEMTest::testInterpKernelDEC_1D() myCoords->decrRef(); paramesh=new ParaMESH(mesh,*target_group,targetMeshName); } - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafieldP0->getField()->setNature(IntensiveMaximum); } @@ -278,7 +268,7 @@ void ParaMEDMEMTest::testInterpKernelDEC_2DCurve() // if(size!=5) return ; - int const nproc_source = 3; + int nproc_source = 3; set self_procs; set procs_source; set procs_target; @@ -289,9 +279,9 @@ void ParaMEDMEMTest::testInterpKernelDEC_2DCurve() procs_target.insert(i); self_procs.insert(rank); // - MEDCoupling::MEDCouplingUMesh *mesh=nullptr; - MEDCoupling::ParaMESH *paramesh=nullptr; - MEDCoupling::ParaFIELD *parafieldP0=nullptr; + MEDCoupling::MEDCouplingUMesh *mesh=0; + MEDCoupling::ParaMESH *paramesh=0; + MEDCoupling::ParaFIELD *parafieldP0=0; // MEDCoupling::CommInterface interface; // @@ -346,7 +336,7 @@ void ParaMEDMEMTest::testInterpKernelDEC_2DCurve() myCoords->decrRef(); } paramesh=new ParaMESH(mesh,*source_group,"source mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); double *valueP0=parafieldP0->getField()->getArray()->getPointer(); parafieldP0->getField()->setNature(IntensiveMaximum); @@ -396,7 +386,7 @@ void ParaMEDMEMTest::testInterpKernelDEC_2DCurve() myCoords->decrRef(); paramesh=new ParaMESH(mesh,*target_group,targetMeshName); } - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafieldP0->getField()->setNature(IntensiveMaximum); } @@ -472,8 +462,8 @@ void ParaMEDMEMTest::testInterpKernelDEC_2DCurve() void ParaMEDMEMTest::testInterpKernelDEC_2D_(const char *srcMeth, const char *targetMeth) { - std::string const srcM(srcMeth); - std::string const targetM(targetMeth); + std::string srcM(srcMeth); + std::string targetM(targetMeth); int size; int rank; MPI_Comm_size(MPI_COMM_WORLD,&size); @@ -482,7 +472,7 @@ void ParaMEDMEMTest::testInterpKernelDEC_2D_(const char *srcMeth, const char *ta //the test is meant to run on five processors if (size !=5) return ; - int const nproc_source = 3; + int nproc_source = 3; set self_procs; set procs_source; set procs_target; @@ -508,8 +498,8 @@ void ParaMEDMEMTest::testInterpKernelDEC_2D_(const char *srcMeth, const char *ta MEDCoupling::ParaFIELD* parafield = nullptr; ICoCo::MEDDoubleField* icocofield = nullptr; - string const filename_xml1 = "square1_split"; - string const filename_xml2 = "square2_split"; + string filename_xml1 = "square1_split"; + string filename_xml2 = "square2_split"; //string filename_seq_wr = makeTmpFile(""); //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med"); @@ -519,11 +509,11 @@ void ParaMEDMEMTest::testInterpKernelDEC_2D_(const char *srcMeth, const char *ta MPI_Barrier(MPI_COMM_WORLD); if (source_group->containsMyRank()) { - string const master = filename_xml1; + string master = filename_xml1; ostringstream strstream; strstream <containsMyRank()) { - string const master= filename_xml2; + string master= filename_xml2; ostringstream strstream; strstream << master<<(rank-nproc_source+1)<<".med"; - string const fName = INTERP_TEST::getResourceFile(strstream.str()); + string fName = INTERP_TEST::getResourceFile(strstream.str()); ostringstream meshname ; meshname<< "Mesh_3_"< self_procs; set procs_source; set procs_target; @@ -714,25 +704,25 @@ void ParaMEDMEMTest::testInterpKernelDEC2_2D_(const char *srcMeth, const char *t MEDCoupling::MEDCouplingUMesh* mesh = nullptr; MEDCoupling::MEDCouplingFieldDouble* mcfield = nullptr; - string const filename_xml1 = "square1_split"; - string const filename_xml2 = "square2_split"; + string filename_xml1 = "square1_split"; + string filename_xml2 = "square2_split"; // To remove tmp files from disk - ParaMEDMEMTest_TmpFilesRemover const aRemover; + ParaMEDMEMTest_TmpFilesRemover aRemover; MPI_Barrier(MPI_COMM_WORLD); if (source_group->containsMyRank()) { - string const master = filename_xml1; + string master = filename_xml1; ostringstream strstream; strstream <containsMyRank()) { - string const master= filename_xml2; + string master= filename_xml2; ostringstream strstream; strstream << master<<(rank-nproc_source+1)<<".med"; - string const fName = INTERP_TEST::getResourceFile(strstream.str()); + string fName = INTERP_TEST::getResourceFile(strstream.str()); ostringstream meshname ; meshname<< "Mesh_3_"< self_procs; set procs_source; set procs_target; @@ -877,12 +867,12 @@ void ParaMEDMEMTest::testInterpKernelDEC_3D_(const char *srcMeth, const char *ta char * tmp_dir_c = getenv("TMP"); string tmp_dir; - if (tmp_dir_c != nullptr) + if (tmp_dir_c != NULL) tmp_dir = string(tmp_dir_c); else tmp_dir = "/tmp"; - string const filename_xml1 = "Mesh3D_10_2d"; - string const filename_xml2 = "Mesh3D_11"; + string filename_xml1 = "Mesh3D_10_2d"; + string filename_xml2 = "Mesh3D_11"; //string filename_seq_wr = makeTmpFile(""); //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med"); @@ -892,11 +882,11 @@ void ParaMEDMEMTest::testInterpKernelDEC_3D_(const char *srcMeth, const char *ta MPI_Barrier(MPI_COMM_WORLD); if (source_group->containsMyRank()) { - string const master = filename_xml1; + string master = filename_xml1; ostringstream strstream; strstream <containsMyRank()) { - string const master= filename_xml2; + string master= filename_xml2; ostringstream strstream; strstream << master << ".med"; - std::string const fName = INTERP_TEST::getResourceFile(strstream.str()); + std::string fName = INTERP_TEST::getResourceFile(strstream.str()); ostringstream meshname ; meshname<< "Mesh_6"; mesh = ReadUMeshFromFile(fName.c_str(),meshname.str().c_str(),0); paramesh=new ParaMESH (mesh,*target_group,"target mesh"); // MEDCoupling::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT(support,*target_group); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; if(targetM=="P0") { parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); @@ -1115,7 +1105,7 @@ void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P0() // if(size!=5) return ; - int const nproc_source = 2; + int nproc_source = 2; set self_procs; set procs_source; set procs_target; @@ -1126,9 +1116,9 @@ void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P0() procs_target.insert(i); self_procs.insert(rank); // - MEDCoupling::MEDCouplingUMesh *mesh=nullptr; - MEDCoupling::ParaMESH *paramesh=nullptr; - MEDCoupling::ParaFIELD* parafield=nullptr; + MEDCoupling::MEDCouplingUMesh *mesh=0; + MEDCoupling::ParaMESH *paramesh=0; + MEDCoupling::ParaFIELD* parafield=0; // MEDCoupling::CommInterface interface; // @@ -1151,7 +1141,7 @@ void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P0() mesh->setCoords(myCoords); myCoords->decrRef(); paramesh=new ParaMESH(mesh,*source_group,"source mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); double *value=parafield->getField()->getArray()->getPointer(); value[0]=34+13*((double)rank); @@ -1171,7 +1161,7 @@ void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P0() mesh->setCoords(myCoords); myCoords->decrRef(); paramesh=new ParaMESH (mesh,*target_group,"target mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); } //test 1 - Conservative volumic @@ -1394,7 +1384,7 @@ void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P1P1P0() // if(size!=5) return ; - int const nproc_source = 2; + int nproc_source = 2; set self_procs; set procs_source; set procs_target; @@ -1405,9 +1395,9 @@ void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P1P1P0() procs_target.insert(i); self_procs.insert(rank); // - MEDCoupling::MEDCouplingUMesh *mesh=nullptr; - MEDCoupling::ParaMESH *paramesh=nullptr; - MEDCoupling::ParaFIELD *parafieldP0=nullptr,*parafieldP1=nullptr; + MEDCoupling::MEDCouplingUMesh *mesh=0; + MEDCoupling::ParaMESH *paramesh=0; + MEDCoupling::ParaFIELD *parafieldP0=0,*parafieldP1=0; // MEDCoupling::CommInterface interface; // @@ -1449,7 +1439,7 @@ void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P1P1P0() myCoords->decrRef(); } paramesh=new ParaMESH(mesh,*source_group,"source mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafieldP1 = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo); double *valueP0=parafieldP0->getField()->getArray()->getPointer(); @@ -1532,7 +1522,7 @@ void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P1P1P0() std::copy(globalNumberingP4.begin(), globalNumberingP4.end(), da->rwBegin()); paramesh->setNodeGlobal(da); } - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafieldP1 = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo); parafieldP0->getField()->setNature(IntensiveMaximum); @@ -1613,7 +1603,7 @@ void ParaMEDMEMTest::testInterpKernelDEC2DM1D_P0P0() // if(size!=3) return ; - int const nproc_source=2; + int nproc_source=2; set procs_source; set procs_target; // @@ -1622,9 +1612,9 @@ void ParaMEDMEMTest::testInterpKernelDEC2DM1D_P0P0() for (int i=nproc_source;iinsertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn+7); mesh->finishInsertingCells(); } - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; paramesh=new ParaMESH(mesh,*source_group,"source mesh"); parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafield->getField()->setNature(IntensiveMaximum); @@ -1672,7 +1662,7 @@ void ParaMEDMEMTest::testInterpKernelDEC2DM1D_P0P0() else { mesh=MEDCouplingUMesh::New("an example of -1 D mesh",-1); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; paramesh=new ParaMESH(mesh,*target_group,"target mesh"); parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafield->getField()->setNature(IntensiveMaximum); @@ -1844,15 +1834,15 @@ void ParaMEDMEMTest::testInterpKernelDECPartialProcs() procs_source.insert(0); procs_target.insert(1); // - MEDCoupling::MEDCouplingUMesh *mesh=nullptr; - MEDCoupling::ParaMESH *paramesh=nullptr; - MEDCoupling::ParaFIELD *parafield=nullptr; + MEDCoupling::MEDCouplingUMesh *mesh=0; + MEDCoupling::ParaMESH *paramesh=0; + MEDCoupling::ParaFIELD *parafield=0; // MEDCoupling::CommInterface interface; // MPI_Barrier(MPI_COMM_WORLD); double targetCoords[8]={ 0.,0., 1., 0., 0., 1., 1., 1. }; - CommInterface const comm; + CommInterface comm; int grpIds[2]={0,1}; MPI_Group grp,group_world; comm.commGroup(MPI_COMM_WORLD,&group_world); @@ -1860,10 +1850,10 @@ void ParaMEDMEMTest::testInterpKernelDECPartialProcs() MPI_Comm partialComm; comm.commCreate(MPI_COMM_WORLD,grp,&partialComm); // - ProcessorGroup* target_group=nullptr; - ProcessorGroup* source_group=nullptr; + ProcessorGroup* target_group=0; + ProcessorGroup* source_group=0; // - MEDCoupling::InterpKernelDEC *dec=nullptr; + MEDCoupling::InterpKernelDEC *dec=0; if(rank==0 || rank==1) { target_group = new MEDCoupling::MPIProcessorGroup(interface,procs_target,partialComm); @@ -1881,7 +1871,7 @@ void ParaMEDMEMTest::testInterpKernelDECPartialProcs() mesh->allocateCells(1); mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn); mesh->finishInsertingCells(); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; paramesh=new ParaMESH(mesh,*source_group,"source mesh"); parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafield->getField()->setNature(IntensiveMaximum); @@ -1907,7 +1897,7 @@ void ParaMEDMEMTest::testInterpKernelDECPartialProcs() mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn); mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn+3); mesh->finishInsertingCells(); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; paramesh=new ParaMESH(mesh,*target_group,"target mesh"); parafield=new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafield->getField()->setNature(IntensiveMaximum); @@ -1945,7 +1935,7 @@ void ParaMEDMEMTest::testInterpKernelDEC3DSurfEmptyBBox() // if(size!=3) return ; - int const nproc_source = 1; + int nproc_source = 1; set self_procs; set procs_source; set procs_target; @@ -1956,9 +1946,9 @@ void ParaMEDMEMTest::testInterpKernelDEC3DSurfEmptyBBox() procs_target.insert(i); self_procs.insert(rank); // - MEDCoupling::MEDCouplingUMesh *mesh=nullptr; - MEDCoupling::ParaMESH *paramesh=nullptr; - MEDCoupling::ParaFIELD *parafieldP0=nullptr; + MEDCoupling::MEDCouplingUMesh *mesh=0; + MEDCoupling::ParaMESH *paramesh=0; + MEDCoupling::ParaFIELD *parafieldP0=0; // MEDCoupling::CommInterface interface; // @@ -1983,7 +1973,7 @@ void ParaMEDMEMTest::testInterpKernelDEC3DSurfEmptyBBox() myCoords->decrRef(); // paramesh=new ParaMESH(mesh,*source_group,"source mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); double *valueP0=parafieldP0->getField()->getArray()->getPointer(); parafieldP0->getField()->setNature(IntensiveMaximum); @@ -2022,7 +2012,7 @@ void ParaMEDMEMTest::testInterpKernelDEC3DSurfEmptyBBox() myCoords->decrRef(); paramesh=new ParaMESH(mesh,*target_group,targetMeshName); } - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); parafieldP0->getField()->setNature(IntensiveMaximum); } @@ -2093,8 +2083,8 @@ void ParaMEDMEMTest::testAsynchronousInterpKernelDEC_2D(double dtA, double tmaxA double dtB, double tmaxB, bool WithPointToPoint, bool Asynchronous, bool WithInterp, const char *srcMeth, const char *targetMeth) { - std::string const srcM(srcMeth); - std::string const targetM(targetMeth); + std::string srcM(srcMeth); + std::string targetM(targetMeth); int size; int rank; MPI_Comm_size(MPI_COMM_WORLD,&size); @@ -2103,7 +2093,7 @@ void ParaMEDMEMTest::testAsynchronousInterpKernelDEC_2D(double dtA, double tmaxA //the test is meant to run on five processors if (size !=5) return ; - int const nproc_source = 3; + int nproc_source = 3; set self_procs; set procs_source; set procs_target; @@ -2131,27 +2121,27 @@ void ParaMEDMEMTest::testAsynchronousInterpKernelDEC_2D(double dtA, double tmaxA char * tmp_dir_c = getenv("TMP"); string tmp_dir; - if (tmp_dir_c != nullptr) + if (tmp_dir_c != NULL) tmp_dir = string(tmp_dir_c); else tmp_dir = "/tmp"; - string const filename_xml1 = "square1_split"; - string const filename_xml2 = "square2_split"; + string filename_xml1 = "square1_split"; + string filename_xml2 = "square2_split"; //string filename_seq_wr = makeTmpFile(""); //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med"); // To remove tmp files from disk - ParaMEDMEMTest_TmpFilesRemover const aRemover; + ParaMEDMEMTest_TmpFilesRemover aRemover; MPI_Barrier(MPI_COMM_WORLD); if (source_group->containsMyRank()) { - string const master = filename_xml1; + string master = filename_xml1; ostringstream strstream; strstream <containsMyRank()) { - string const master= filename_xml2; + string master= filename_xml2; ostringstream strstream; strstream << master<<(rank-nproc_source+1)<<".med"; - string const fName = INTERP_TEST::getResourceFile(strstream.str()); + string fName = INTERP_TEST::getResourceFile(strstream.str()); ostringstream meshname ; meshname<< "Mesh_3_"<getVolumeIntegral(0,true); + double vi = parafield->getVolumeIntegral(0,true); cout << "testAsynchronousInterpKernelDEC_2D" << rank << " time " << time << " VolumeIntegral " << vi << " time*10000 " << time*10000 << endl ; diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_MEDLoader.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_MEDLoader.cxx index 31ad920c2..c992562d0 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_MEDLoader.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_MEDLoader.cxx @@ -17,11 +17,6 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include "MCAuto.hxx" -#include "MEDCouplingCMesh.hxx" -#include "MEDCouplingMemArray.hxx" -#include "MCIdType.hxx" -#include "NormalizedGeometricTypes" #include "ParaMEDMEMTest.hxx" #include "MEDLoader.hxx" @@ -34,7 +29,9 @@ #include #include -#include +#include +#include +#include using namespace MEDCoupling; @@ -199,7 +196,7 @@ void ParaMEDMEMTest::testParallelLoad1() else distrib = { {INTERP_KERNEL::NORM_QUAD4,{2,3,6,7}} }; - std::string const filename=INTERP_TEST::getResourceFile("SimpleTest2D.med"); + std::string filename=INTERP_TEST::getResourceFile("SimpleTest2D.med"); MCAuto mu = ParaMEDFileUMesh::ParaNew(distrib, MPI_COMM_WORLD, MPI_INFO_NULL, filename, "mesh"); MCAuto mesh = mu->getMeshAtLevel(0); MCAuto meshRef = genLocMesh2D(rank); @@ -230,7 +227,7 @@ void ParaMEDMEMTest::testParallelLoad2() else distrib= { {INTERP_KERNEL::NORM_TRI3,{0,1}} , {INTERP_KERNEL::NORM_QUAD4,{1,3}} }; - std::string const filename=INTERP_TEST::getResourceFile("Test2DMultiGeoType.med"); + std::string filename=INTERP_TEST::getResourceFile("Test2DMultiGeoType.med"); MCAuto mu = ParaMEDFileUMesh::ParaNew(distrib, MPI_COMM_WORLD, MPI_INFO_NULL, filename, "mesh"); MCAuto mesh = mu->getMeshAtLevel(0); MEDCouplingUMesh *meshRef; @@ -279,7 +276,7 @@ void ParaMEDMEMTest::testParallelLoad3() distrib = { {INTERP_KERNEL::NORM_TETRA4,distribCells} }; } - std::string const filename=INTERP_TEST::getResourceFile("SimpleTest3D.med"); + std::string filename=INTERP_TEST::getResourceFile("SimpleTest3D.med"); MCAuto mu = ParaMEDFileUMesh::ParaNew(distrib, MPI_COMM_WORLD, MPI_INFO_NULL, filename, "mesh"); MCAuto mesh = mu->getMeshAtLevel(0); CPPUNIT_ASSERT_EQUAL(96,(int)mesh->getNumberOfCells()); @@ -349,7 +346,7 @@ void ParaMEDMEMTest::testParallelLoad4() else distrib = {2,3,6,7}; - std::string const filename=INTERP_TEST::getResourceFile("SimpleTest2D.med"); + std::string filename=INTERP_TEST::getResourceFile("SimpleTest2D.med"); MCAuto f1TS = ParaMEDFileField1TS::ParaNew(MPI_COMM_WORLD, MPI_INFO_NULL,filename,"fieldOnCells","mesh",distrib,ON_CELLS,INTERP_KERNEL::NORM_QUAD4); MCAuto fieldRef = genLocFieldCells(rank); CPPUNIT_ASSERT(f1TS->getUndergroundDataArray()->isEqual(*fieldRef->getArray(),1e-12)); @@ -375,7 +372,7 @@ void ParaMEDMEMTest::testParallelLoad5() else distrib = {2,3,4,7,8,9,12,13,14}; - std::string const filename=INTERP_TEST::getResourceFile("SimpleTest2D.med"); + std::string filename=INTERP_TEST::getResourceFile("SimpleTest2D.med"); // for fields on nodes, geometrical type is not needed MCAuto f1TS = ParaMEDFileField1TS::ParaNew(MPI_COMM_WORLD, MPI_INFO_NULL,filename,"fieldOnNodes","mesh",distrib,ON_NODES,INTERP_KERNEL::NORM_ERROR); MCAuto fieldRef = genLocFieldNodes(rank); diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_MPIProcessorGroup.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_MPIProcessorGroup.cxx index caf429cf0..2883b4055 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_MPIProcessorGroup.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_MPIProcessorGroup.cxx @@ -17,13 +17,14 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include "InterpKernelException.hxx" #include "ParaMEDMEMTest.hxx" #include #include "CommInterface.hxx" #include "ProcessorGroup.hxx" #include "MPIProcessorGroup.hxx" +#include "InterpolationUtils.hxx" +#include // use this define to enable lines, execution of which leads to Segmentation Fault #define ENABLE_FAULTS @@ -57,7 +58,7 @@ using namespace MEDCoupling; void ParaMEDMEMTest::testMPIProcessorGroup_constructor() { CommInterface comm_interface; - auto* group = new MPIProcessorGroup(comm_interface);; + MPIProcessorGroup* group = new MPIProcessorGroup(comm_interface);; int size; MPI_Comm_size(MPI_COMM_WORLD, &size); CPPUNIT_ASSERT_EQUAL(size,group->size()); @@ -98,11 +99,11 @@ void ParaMEDMEMTest::testMPIProcessorGroup_boolean() int size; MPI_Comm_size(MPI_COMM_WORLD, &size); - CommInterface const comm_interface; + CommInterface comm_interface; MPIProcessorGroup group(comm_interface,0,0); MPIProcessorGroup group2(comm_interface,size-1,size-1); ProcessorGroup* group_fuse=group.fuse(group2); - int const group_fuse_size=(size==1)?1:2; + int group_fuse_size=(size==1)?1:2; CPPUNIT_ASSERT_EQUAL(group_fuse_size,group_fuse->size()); ProcessorGroup* group_complement=((MPIProcessorGroup*)group_fuse)->createComplementProcGroup(); @@ -128,7 +129,7 @@ void ParaMEDMEMTest::testMPIProcessorGroup_rank() int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); - CommInterface const comm_interface; + CommInterface comm_interface; MPIProcessorGroup group(comm_interface,0,0); MPIProcessorGroup group2(comm_interface,size-1,size-1); ProcessorGroup* group_fuse=group2.fuse(group); diff --git a/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx b/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx index afd83c0b9..d8b0f3580 100644 --- a/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx +++ b/src/ParaMEDMEMTest/ParaMEDMEMTest_OverlapDEC.cxx @@ -17,10 +17,6 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include "MEDCouplingNatureOfFieldEnum" -#include "MCIdType.hxx" -#include "NormalizedGeometricTypes" -#include "MEDCouplingRefCountObject.hxx" #include "ParaMEDMEMTest.hxx" #include @@ -35,20 +31,22 @@ #include "MEDCouplingUMesh.hxx" +#include using namespace std; #include "MCAuto.hxx" #include "MEDLoader.hxx" +#include "MEDLoaderBase.hxx" #include "MEDCouplingFieldDouble.hxx" #include "MEDCouplingMemArray.hxx" #include "MEDCouplingRemapper.hxx" using namespace MEDCoupling; -using MUMesh = MCAuto; -using MFDouble = MCAuto; -using DADouble = MCAuto; +typedef MCAuto MUMesh; +typedef MCAuto MFDouble; +typedef MCAuto DADouble; //void ParaMEDMEMTest::testOverlapDEC_LMEC_seq() //{ @@ -356,16 +354,16 @@ void prepareData2(int rank, ProcessorGroup * grp, NatureOfField nature, bool stripPartOfSource=false, int fieldCompoNum=1) { - MEDCouplingUMesh *meshS_0 = nullptr, *meshT_0 = nullptr; + MEDCouplingUMesh *meshS_0 = 0, *meshT_0 = 0; prepareData2_buildOneSquare(meshS_0, meshT_0); if(rank==0) { const double tr1[] = {1.5, 0.0}; - auto *meshS_1 = static_cast(meshS_0->deepCopy()); + MEDCouplingUMesh *meshS_1 = static_cast(meshS_0->deepCopy()); meshS_1->translate(tr1); const double tr2[] = {3.0, 0.0}; - auto *meshS_2 = static_cast(meshS_0->deepCopy()); + MEDCouplingUMesh *meshS_2 = static_cast(meshS_0->deepCopy()); meshS_2->translate(tr2); std::vector vec; @@ -375,7 +373,7 @@ void prepareData2(int rank, ProcessorGroup * grp, NatureOfField nature, meshS = MEDCouplingUMesh::MergeUMeshes(vec); meshS_1->decrRef(); meshS_2->decrRef(); - ComponentTopology const comptopo(fieldCompoNum); + ComponentTopology comptopo(fieldCompoNum); parameshS=new ParaMESH(meshS, *grp,"source mesh"); parafieldS=new ParaFIELD(ON_CELLS,ONE_TIME,parameshS,comptopo); parafieldS->getField()->setNature(nature); @@ -392,7 +390,7 @@ void prepareData2(int rank, ProcessorGroup * grp, NatureOfField nature, // const double tr3[] = {0.0, -1.5}; - auto *meshT_3 = static_cast(meshT_0->deepCopy()); + MEDCouplingUMesh *meshT_3 = static_cast(meshT_0->deepCopy()); meshT_3->translate(tr3); vec.clear(); vec.push_back(meshT_0);vec.push_back(meshT_3); @@ -407,10 +405,10 @@ void prepareData2(int rank, ProcessorGroup * grp, NatureOfField nature, if(rank==1) { const double tr3[] = {0.0, -1.5}; - auto *meshS_3 = static_cast(meshS_0->deepCopy()); + MEDCouplingUMesh *meshS_3 = static_cast(meshS_0->deepCopy()); meshS_3->translate(tr3); const double tr4[] = {1.5, -1.5}; - auto *meshS_4 = static_cast(meshS_0->deepCopy()); + MEDCouplingUMesh *meshS_4 = static_cast(meshS_0->deepCopy()); meshS_4->translate(tr4); std::vector vec; @@ -418,7 +416,7 @@ void prepareData2(int rank, ProcessorGroup * grp, NatureOfField nature, meshS = MEDCouplingUMesh::MergeUMeshes(vec); meshS_3->decrRef(); meshS_4->decrRef(); - ComponentTopology const comptopo(fieldCompoNum); + ComponentTopology comptopo(fieldCompoNum); parameshS=new ParaMESH(meshS, *grp,"source mesh"); parafieldS=new ParaFIELD(ON_CELLS,ONE_TIME,parameshS,comptopo); parafieldS->getField()->setNature(nature); @@ -431,13 +429,13 @@ void prepareData2(int rank, ProcessorGroup * grp, NatureOfField nature, // const double tr5[] = {1.5, 0.0}; - auto *meshT_1 = static_cast(meshT_0->deepCopy()); + MEDCouplingUMesh *meshT_1 = static_cast(meshT_0->deepCopy()); meshT_1->translate(tr5); const double tr6[] = {3.0, 0.0}; - auto *meshT_2 = static_cast(meshT_0->deepCopy()); + MEDCouplingUMesh *meshT_2 = static_cast(meshT_0->deepCopy()); meshT_2->translate(tr6); const double tr7[] = {1.5, -1.5}; - auto *meshT_4 = static_cast(meshT_0->deepCopy()); + MEDCouplingUMesh *meshT_4 = static_cast(meshT_0->deepCopy()); meshT_4->translate(tr7); vec.clear(); @@ -474,14 +472,14 @@ void testOverlapDEC_generic(int workSharingAlgo, double bbAdj) // } if (size != 3) return ; - int const nproc = 3; + int nproc = 3; std::set procs; for (int i=0; i procs; for (int i=0; i procs; for (int i=0; i #include "CommInterface.hxx" +#include "ProcessorGroup.hxx" #include "MPIProcessorGroup.hxx" +#include "Topology.hxx" #include "DEC.hxx" #include "StructuredCoincidentDEC.hxx" #include "ParaMESH.hxx" #include "ParaFIELD.hxx" #include "ComponentTopology.hxx" +#include "ICoCoMEDDoubleField.hxx" #include "MEDLoader.hxx" #include "MEDCouplingUMesh.hxx" #include "TestInterpKernelUtils.hxx" -#include #include // use this define to enable lines, execution of which leads to Segmentation Fault @@ -57,7 +57,7 @@ using namespace MEDCoupling; */ void ParaMEDMEMTest::testStructuredCoincidentDEC() { - string const testname="ParaMEDMEM - testStructured CoincidentDEC"; + string testname="ParaMEDMEM - testStructured CoincidentDEC"; // MPI_Init(&argc, &argv); int size; int rank; @@ -67,7 +67,7 @@ void ParaMEDMEMTest::testStructuredCoincidentDEC() { return; } - MEDCoupling::CommInterface const interface; + MEDCoupling::CommInterface interface; MEDCoupling::MPIProcessorGroup self_group (interface,rank,rank); MEDCoupling::MPIProcessorGroup target_group(interface,3,size-1); @@ -77,13 +77,13 @@ void ParaMEDMEMTest::testStructuredCoincidentDEC() { MEDCoupling::ParaMESH* paramesh = nullptr; MEDCoupling::ParaFIELD* parafield = nullptr; - string const filename_xml1 = INTERP_TEST::getResourceFile("square1_split"); - string const filename_2 = INTERP_TEST::getResourceFile("square1.med"); + string filename_xml1 = INTERP_TEST::getResourceFile("square1_split"); + string filename_2 = INTERP_TEST::getResourceFile("square1.med"); //string filename_seq_wr = makeTmpFile(""); //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med"); // To remove tmp files from disk - ParaMEDMEMTest_TmpFilesRemover const aRemover; + ParaMEDMEMTest_TmpFilesRemover aRemover; //loading the geometry for the source group @@ -91,7 +91,7 @@ void ParaMEDMEMTest::testStructuredCoincidentDEC() { MPI_Barrier(MPI_COMM_WORLD); if (source_group.containsMyRank()) { - string const master = filename_xml1; + string master = filename_xml1; ostringstream strstream; strstream <getNumberOfCells(); + int nb_local=mesh->getNumberOfCells(); const mcIdType* global_numbering = paramesh->getGlobalNumberingCell(); double *value=parafield->getField()->getArray()->getPointer(); @@ -125,7 +125,7 @@ void ParaMEDMEMTest::testStructuredCoincidentDEC() { //loading the geometry for the target group if (target_group.containsMyRank()) { - string const meshname2("Mesh_2"); + string meshname2("Mesh_2"); mesh = ReadUMeshFromFile(filename_2.c_str(),meshname2.c_str(),0); paramesh=new ParaMESH (mesh,self_group,"target mesh"); @@ -133,7 +133,7 @@ void ParaMEDMEMTest::testStructuredCoincidentDEC() { parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); - int const nb_local=mesh->getNumberOfCells(); + int nb_local=mesh->getNumberOfCells(); double *value=parafield->getField()->getArray()->getPointer(); for (int ielem=0; ielemgetField()->getArray()->getPointer(); for (int i=0; i< nb_local; i++) { - int const first = comptopo.firstLocalComponent(); + int first = comptopo.firstLocalComponent(); for (int icomp = 0; icomp < comptopo.nbLocalComponents(); icomp++) CPPUNIT_ASSERT_DOUBLES_EQUAL(recv_value[i*comptopo.nbLocalComponents()+icomp],(double)(i*6+icomp+first),1e-12); } diff --git a/src/ParaMEDMEMTest/TestParaMEDMEM.cxx b/src/ParaMEDMEMTest/TestParaMEDMEM.cxx index e5710db42..340e0c091 100644 --- a/src/ParaMEDMEMTest/TestParaMEDMEM.cxx +++ b/src/ParaMEDMEMTest/TestParaMEDMEM.cxx @@ -20,7 +20,6 @@ // --- include all MEDMEM Test // #include "ParaMEDMEMTest.hxx" -#include // --- Registers the fixture into the 'registry' diff --git a/src/ParaMEDMEMTest/test_perf.cxx b/src/ParaMEDMEMTest/test_perf.cxx index 910a6570a..76dbaaa67 100644 --- a/src/ParaMEDMEMTest/test_perf.cxx +++ b/src/ParaMEDMEMTest/test_perf.cxx @@ -17,17 +17,11 @@ // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com // -#include -#include -#include -#include #include #include #include -#include "InterpolationOptions.hxx" -#include "MEDCouplingRefCountObject.hxx" -#include "DECOptions.hxx" #include "ParaMEDMEMTest.hxx" +#include #include "CommInterface.hxx" #include "ProcessorGroup.hxx" @@ -122,7 +116,7 @@ int main(int argc, char *argv[]) void testInterpKernelDEC_2D(const string& filename_xml1, const string& meshname1, const string& filename_xml2, const string& meshname2, - int nproc_source, double /*epsilon*/, bool tri, bool all) + int nproc_source, double epsilon, bool tri, bool all) { float tcpu, tcpu_u, tcpu_s, telps; int size; @@ -160,19 +154,19 @@ void testInterpKernelDEC_2D(const string& filename_xml1, const string& meshname1 ICoCo::MEDDoubleField* icocofield = nullptr; // To remove tmp files from disk - ParaMEDMEMTest_TmpFilesRemover const aRemover; + ParaMEDMEMTest_TmpFilesRemover aRemover; MPI_Barrier(MPI_COMM_WORLD); if (source_group->containsMyRank()){ - string const master = filename_xml1; + string master = filename_xml1; - ostringstream const strstream; + ostringstream strstream; if( nproc_source == 1 ) strstream <getNumberOfCells(); + int nb_local=mesh->getNumberOfCells(); double *value=parafield->getField()->getArray()->getPointer(); for(int ielem=0; ielemcontainsMyRank()){ - string const master= filename_xml2; - ostringstream const strstream; + string master= filename_xml2; + ostringstream strstream; if( (size-nproc_source) == 1 ) strstream << master<<".med"; else strstream << master<<(rank-nproc_source+1)<<".med"; - ostringstream const meshname ; + ostringstream meshname ; if( (size-nproc_source) == 1 ) meshname<< meshname2; else @@ -220,10 +214,10 @@ void testInterpKernelDEC_2D(const string& filename_xml1, const string& meshname1 mesh->incrRef(); paramesh=new ParaMESH (mesh,*target_group,"target mesh"); - MEDCoupling::ComponentTopology const comptopo; + MEDCoupling::ComponentTopology comptopo; parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo); - int const nb_local=mesh->getNumberOfCells(); + int nb_local=mesh->getNumberOfCells(); double *value=parafield->getField()->getArray()->getPointer(); for(int ielem=0; ielem