6 #define EPSILON 0.00000001
12 int main(int argc, char**argv)
15 double *vector, sum=0., norm, etalon;
16 int rank, size, grank, gsize, rsize;
17 int vsize=20, lvsize, rlvsize;
18 int i, k1, k2, imin, imax, nb;
22 char port_name [MPI_MAX_PORT_NAME];
23 char port_name_clt [MPI_MAX_PORT_NAME];
24 std::string service = "SERVICE";
28 std::cout << "This test only works with openmpi implementation" << std::endl;
33 std::string sargv = argv[i];
34 if(sargv.find("-debug")!=std::string::npos)
36 else if(sargv.find("-vsize")!=std::string::npos)
37 vsize = atoi(argv[++i]);
40 MPI_Init( &argc, &argv );
42 MPI_Comm_size( MPI_COMM_WORLD, &size );
43 MPI_Comm_rank( MPI_COMM_WORLD, &rank );
45 MPI_Barrier(MPI_COMM_WORLD);
47 MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
49 MPI_Open_port(MPI_INFO_NULL, port_name);
50 if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS ) {
52 std::cout << "[" << rank << "] I am client: I get the service " << service << " !" << std::endl;
53 MPI_Close_port( port_name );
55 else if ( MPI_Publish_name((char*)service.c_str(), MPI_INFO_NULL, port_name) == MPI_SUCCESS ) {
57 std::cout << "[" << rank << "] I am server: I've managed to publish the service " << service << " !" << std::endl;
60 else if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS ) {
62 std::cout << "[" << rank << "] I am client: I get the service " << service << " !" << std::endl;;
63 MPI_Close_port( port_name );
67 std::cout << "[" << rank << "] ERROR!!!" << std::endl;
74 while ( i != TIMEOUT ) {
76 if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS ) {
78 std::cout << "[" << rank << "] I am client: I get the service " << service << " !" << std::endl;
85 std::cout << "[" << rank << "] Waiting too long exiting !" << std::endl;
90 MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL);
91 MPI_Bcast(&srv,1,MPI_INT,0,MPI_COMM_WORLD);
93 MPI_Comm_accept( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &icom );
95 MPI_Comm_connect(port_name_clt, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &icom );
97 MPI_Intercomm_merge(icom,!srv,&com);
99 MPI_Comm_rank( com, &grank );
100 MPI_Comm_size( com, &gsize );
103 lvsize = ((rank+1)*vsize) / size - (rank*vsize) / size;
104 vector = (double*)malloc(lvsize*sizeof(double));
105 indg = (int*)malloc(lvsize*sizeof(int));
106 rsize = gsize - size;
108 for(i=0;i<lvsize;i++){
109 indg[i] = (rank*vsize)/size + i;
114 vector[i] = 2. * sin( (rank*vsize)/size + i );
115 sum += vector[i]*vector[i];
120 MPI_Reduce(&sum,&norm,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
124 std::cout << "[" << grank << "] norm=" << norm << std::endl;
128 for(i=0;i<rsize;i++){
129 rlvsize = ((i+1)*vsize) / rsize - (i*vsize) / rsize;
130 k1 = (i*vsize)/rsize;
131 k2 = ((i+1)*vsize)/rsize -1;
133 if( (k1 <= indg[lvsize-1]) && (k2 >= indg[0]) ){
135 if( indg[0] > imin ) imin = indg[0];
137 if( indg[lvsize-1] < imax) imax = indg[lvsize-1];
139 nb = imax - imin + 1;
140 MPI_Send( &nb, 1, MPI_INT, i+size, 100, com );
141 MPI_Send( vector+imin-indg[0], nb, MPI_DOUBLE, i+size, 200, com );
144 MPI_Recv( &nb, 1, MPI_INT, i, 100, com, &status );
145 MPI_Recv( vector+imin-indg[0], nb, MPI_DOUBLE, i, 200, com, &status );
154 for(i=0;i<lvsize;i++){
156 std::cout << "[" << rank << "] vector[" << i << "]=" << vector[i] << std::endl;
157 sum += vector[i]*vector[i];
159 MPI_Reduce(&sum,&norm,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
163 std::cout << "[" << grank << "] norm=" << norm << std::endl;
170 MPI_Recv(&etalon, 1, MPI_DOUBLE,size,400,com, &status);
171 MPI_Send(&norm,1,MPI_DOUBLE, size, 300, com);
175 MPI_Send(&norm,1,MPI_DOUBLE, 0, 400, com);
176 MPI_Recv(&etalon, 1, MPI_DOUBLE,0,300,com, &status);
182 MPI_Comm_disconnect( &com );
184 MPI_Unpublish_name((char*)service.c_str(), MPI_INFO_NULL, port_name);
185 MPI_Close_port( port_name );
193 if(fabs(norm-etalon)/norm < EPSILON ){
195 std::cout << "OK" << std::endl;
200 std::cout << "KO" << std::endl;