1 // Copyright (C) 2011-2024 CEA, EDF, OPEN CASCADE
3 // This library is free software; you can redistribute it and/or
4 // modify it under the terms of the GNU Lesser General Public
5 // License as published by the Free Software Foundation; either
6 // version 2.1 of the License, or (at your option) any later version.
8 // This library is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 // Lesser General Public License for more details.
13 // You should have received a copy of the GNU Lesser General Public
14 // License along with this library; if not, write to the Free Software
15 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
25 #define EPSILON 0.00000001
31 #include "Basics_MpiUtils.hxx"
33 int main(int argc, char**argv)
36 double *vector, sum=0., norm=1., etalon=0.;
37 int rank, size, grank, gsize, rsize;
40 int i, k1, k2, imin, imax, nb;
45 char port_name [MPI_MAX_PORT_NAME];
46 char port_name_clt [MPI_MAX_PORT_NAME];
47 std::string service = "SERVICE";
51 std::string sargv = argv[i];
52 if(sargv.find("-debug")!=std::string::npos)
54 else if(sargv.find("-vsize")!=std::string::npos)
55 vsize = atoi(argv[++i]);
58 MPI_Init( &argc, &argv );
60 MPI_Comm_size( MPI_COMM_WORLD, &size );
61 MPI_Comm_rank( MPI_COMM_WORLD, &rank );
63 MPI_Barrier(MPI_COMM_WORLD);
64 MPI_ERROR_HANDLER(MPI_ERRORS_RETURN);
67 MPI_Info_create(&info);
68 MPI_Info_set(info, "ompi_unique", "true");
70 MPI_Open_port(MPI_INFO_NULL, port_name);
71 if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS ) {
73 std::cout << "[" << rank << "] I am client: I get the service " << service << " !" << std::endl;
74 MPI_Close_port( port_name );
76 else if ( MPI_Publish_name((char*)service.c_str(), info, port_name) == MPI_SUCCESS ) {
78 std::cout << "[" << rank << "] I am server: I've managed to publish the service " << service << " !" << std::endl;
81 else if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS ) {
83 std::cout << "[" << rank << "] I am client: I get the service " << service << " !" << std::endl;;
84 MPI_Close_port( port_name );
88 std::cout << "[" << rank << "] ERROR!!!" << std::endl;
95 while ( i != TIMEOUT ) {
97 if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS ) {
99 std::cout << "[" << rank << "] I am client: I get the service " << service << " !" << std::endl;
104 if ( i == TIMEOUT ) {
106 std::cout << "[" << rank << "] Waiting too long exiting !" << std::endl;
111 MPI_ERROR_HANDLER(MPI_ERRORS_ARE_FATAL);
112 MPI_Bcast(&srv,1,MPI_INT,0,MPI_COMM_WORLD);
114 MPI_Comm_accept( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &icom );
116 MPI_Comm_connect(port_name_clt, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &icom );
118 MPI_Intercomm_merge(icom,!srv,&com);
120 MPI_Comm_rank( com, &grank );
121 MPI_Comm_size( com, &gsize );
124 lvsize = ((rank+1)*vsize) / size - (rank*vsize) / size;
125 vector = (double*)malloc(lvsize*sizeof(double));
126 indg = (int*)malloc(lvsize*sizeof(int));
127 rsize = gsize - size;
129 for(i=0;i<lvsize;i++){
130 indg[i] = (rank*vsize)/size + i;
135 vector[i] = 2. * sin( (rank*vsize)/size + i );
136 sum += vector[i]*vector[i];
141 MPI_Reduce(&sum,&norm,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
145 std::cout << "[" << grank << "] norm=" << norm << std::endl;
149 for(i=0;i<rsize;i++){
150 //rlvsize = ((i+1)*vsize) / rsize - (i*vsize) / rsize;
151 k1 = (i*vsize)/rsize;
152 k2 = ((i+1)*vsize)/rsize -1;
154 if( (k1 <= indg[lvsize-1]) && (k2 >= indg[0]) ){
156 if( indg[0] > imin ) imin = indg[0];
158 if( indg[lvsize-1] < imax) imax = indg[lvsize-1];
160 nb = imax - imin + 1;
161 MPI_Send( &nb, 1, MPI_INT, i+size, 100, com );
162 MPI_Send( vector+imin-indg[0], nb, MPI_DOUBLE, i+size, 200, com );
165 MPI_Recv( &nb, 1, MPI_INT, i, 100, com, &status );
166 MPI_Recv( vector+imin-indg[0], nb, MPI_DOUBLE, i, 200, com, &status );
175 for(i=0;i<lvsize;i++){
177 std::cout << "[" << rank << "] vector[" << i << "]=" << vector[i] << std::endl;
178 sum += vector[i]*vector[i];
180 MPI_Reduce(&sum,&norm,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
184 std::cout << "[" << grank << "] norm=" << norm << std::endl;
191 MPI_Recv(&etalon, 1, MPI_DOUBLE,size,400,com, &status);
192 MPI_Send(&norm,1,MPI_DOUBLE, size, 300, com);
196 MPI_Send(&norm,1,MPI_DOUBLE, 0, 400, com);
197 MPI_Recv(&etalon, 1, MPI_DOUBLE,0,300,com, &status);
203 MPI_Comm_disconnect( &com );
205 MPI_Unpublish_name((char*)service.c_str(), MPI_INFO_NULL, port_name);
206 MPI_Close_port( port_name );
215 if(fabs(norm-etalon)/norm < EPSILON ){
217 std::cout << "OK" << std::endl;
222 std::cout << "KO" << std::endl;