1 // Copyright (C) 2007-2016 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
26 #include "MPIAccessTest.hxx"
27 #include <cppunit/TestAssert.h>
29 //#include "CommInterface.hxx"
30 //#include "ProcessorGroup.hxx"
31 //#include "MPIProcessorGroup.hxx"
32 #include "MPIAccess.hxx"
34 // use this define to enable lines, execution of which leads to Segmentation Fault
37 // use this define to enable CPPUNIT asserts and fails, showing bugs
38 #define ENABLE_FORCED_FAILURES
41 using namespace MEDCoupling;
43 void MPIAccessTest::test_MPI_Access_Probe() {
45 debugStream << "test_MPI_Access_Probe" << endl ;
47 // MPI_Init(&argc, &argv) ;
51 MPI_Comm_size(MPI_COMM_WORLD,&size) ;
52 MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
55 cerr << "test_MPI_Access_Probe must be runned with 2 procs" << endl ;
56 //CPPUNIT_FAIL("test_MPI_Access_Probe must be runned with 2 procs") ;
60 debugStream << "test_MPI_Access_Probe" << myrank << endl ;
62 MEDCoupling::CommInterface interface ;
64 MEDCoupling::MPIProcessorGroup* group = new MEDCoupling::MPIProcessorGroup(interface) ;
66 MEDCoupling::MPIAccess mpi_access( group ) ;
69 mpi_access.barrier() ;
74 int target = 1 - myrank ;
78 for ( i = 0 ; i < 10 ; i++ ) {
80 sts = mpi_access.send(&i,1,MPI_INT,target, RequestId[i]) ;
81 debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
85 int source, tag, outcount ;
86 MPI_Datatype datatype ;
87 sts = mpi_access.probe(target, source, tag, datatype, outcount ) ;
88 debugStream << "test" << myrank << " Probe target " << target << " source " << source
89 << " tag " << tag << " outcount " << outcount << endl ;
91 sts = mpi_access.recv(&recvbuf,outcount,datatype,source, RequestId[i],
93 if ( (outcount != 1) | (recvbuf != i) ) {
94 ostringstream strstream ;
95 strstream << "==========================================================="
96 << "test" << myrank << " outcount " << outcount
97 << " recvbuf " << recvbuf << " KO"
98 << "==========================================================="
100 debugStream << strstream.str() << endl ;
101 CPPUNIT_FAIL( strstream.str() ) ;
104 char msgerr[MPI_MAX_ERROR_STRING] ;
106 mpi_access.errorString(sts, msgerr, &lenerr) ;
107 debugStream << "test" << myrank << " lenerr " << lenerr << " "
110 if ( sts != MPI_SUCCESS ) {
111 ostringstream strstream ;
112 strstream << "==========================================================="
113 << "test" << myrank << " KO"
114 << "==========================================================="
116 debugStream << strstream.str() << endl ;
117 CPPUNIT_FAIL( strstream.str() ) ;
119 if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
122 mpi_access.testAll(10,RequestId,flag) ;
124 ostringstream strstream ;
125 strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
126 debugStream << strstream.str() << endl ;
127 CPPUNIT_FAIL( strstream.str() ) ;
129 mpi_access.waitAll(10,RequestId) ;
130 if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
132 mpi_access.barrier() ;
138 debugStream << "test" << myrank << " OK" << endl ;