1 // Copyright (C) 2007-2015 CEA/DEN, EDF R&D
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
25 #include "MPIAccessTest.hxx"
26 #include <cppunit/TestAssert.h>
28 //#include "CommInterface.hxx"
29 //#include "ProcessorGroup.hxx"
30 //#include "MPIProcessorGroup.hxx"
31 #include "MPIAccess.hxx"
33 // use this define to enable lines, execution of which leads to Segmentation Fault
36 // use this define to enable CPPUNIT asserts and fails, showing bugs
37 #define ENABLE_FORCED_FAILURES
40 using namespace ParaMEDMEM;
42 void MPIAccessTest::test_MPI_Access_Cyclic_Send_Recv() {
44 debugStream << "test_MPI_Access_Cyclic_Send_Recv" << endl ;
46 // MPI_Init(&argc, &argv) ;
50 MPI_Comm_size(MPI_COMM_WORLD,&size) ;
51 MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
54 cerr << "test_MPI_Access_Send_Recv must be runned with 3 procs" << endl ;
55 //CPPUNIT_FAIL("test_MPI_Access_Send_Recv must be runned with 3 procs") ;
59 debugStream << "test_MPI_Access_Cyclic_Send_Recv" << myrank << endl ;
61 ParaMEDMEM::CommInterface interface ;
63 ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
65 ParaMEDMEM::MPIAccess mpi_access( group ) ;
68 mpi_access.barrier() ;
73 int alltarget[3] = {1 , 2 , 0 } ;
74 int allsource[3] = {2 , 0 , 1 } ;
79 sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
80 debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
81 << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
83 for ( i = 0 ; i < 10 ; i++ ) {
88 sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i],
92 sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i]) ;
95 //int source, tag, error, outcount ;
96 //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
97 debugStream << "test" << myrank << " Recv RequestId " << RequestId[i]
98 << " tag " << mpi_access.recvMPITag(allsource[myrank])
99 << " outcount " << outcount << endl ;
100 if ( (outcount != 1) | (recvbuf != i) ) {
101 ostringstream strstream ;
102 strstream << "==========================================================="
103 << "test" << myrank << " outcount "
104 << outcount << " recvbuf " << recvbuf << " KO"
105 << "==========================================================="
107 debugStream << strstream.str() << endl ;
108 CPPUNIT_FAIL( strstream.str() ) ;
113 sts = mpi_access.send(&ii,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
114 debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
115 << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
119 sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
120 debugStream << "test" << myrank << " Send RequestId " << RequestId[i]
121 << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
123 char msgerr[MPI_MAX_ERROR_STRING] ;
125 mpi_access.errorString(sts, msgerr, &lenerr) ;
126 debugStream << "test" << myrank << " lenerr " << lenerr
127 << " " << msgerr << endl ;
129 if ( sts != MPI_SUCCESS ) {
130 ostringstream strstream ;
131 strstream << "==========================================================="
132 << "test" << myrank << " KO"
133 << "==========================================================="
135 debugStream << strstream.str() << endl ;
136 CPPUNIT_FAIL( strstream.str() ) ;
138 if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
142 mpi_access.testAll(10,RequestId,flag) ;
144 ostringstream strstream ;
145 strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
146 debugStream << strstream.str() << endl ;
147 CPPUNIT_FAIL( strstream.str() ) ;
149 mpi_access.waitAll(10,RequestId) ;
150 if(MPI_ACCESS_VERBOSE) mpi_access.check() ;
152 int sendrequests[10] ;
153 int sendreqsize = mpi_access.sendRequestIds( alltarget[myrank] , 10 ,
155 if ( sendreqsize != 0 ) {
156 ostringstream strstream ;
157 strstream << "=========================================================" << endl
158 << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
159 << "=========================================================" << endl ;
160 debugStream << strstream.str() << endl ;
161 CPPUNIT_FAIL( strstream.str() ) ;
163 int recvrequests[10] ;
164 int recvreqsize = mpi_access.sendRequestIds( allsource[myrank] , 10 ,
166 if ( recvreqsize != 0 ) {
167 ostringstream strstream ;
168 strstream << "=========================================================" << endl
169 << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
170 << "=========================================================" << endl ;
171 debugStream << strstream.str() << endl ;
172 CPPUNIT_FAIL( strstream.str() ) ;
175 mpi_access.barrier() ;
181 debugStream << "test" << myrank << " OK" << endl ;