SET(MPI_LIBRARIES ${MPI_C_LIBRARIES} ${MPI_CXX_LIBRARIES})
IF(MPI_FOUND)
+ # Detect if function MPI_Publish_name is provided by the external MPI library
+ # otherwise take ours.
+ include(CheckSymbolExists)
+ SET(CMAKE_REQUIRED_LIBRARIES ${MPI_LIBRARIES})
+ CHECK_SYMBOL_EXISTS(MPI_Publish_name ${MPI_C_INCLUDE_PATH}/mpi.h MPI2_IS_OK)
+ SET(MPI_DEFINITIONS "${MPI_CXX_COMPILE_FLAGS}")
+ IF(MPI2_IS_OK)
+ MESSAGE(STATUS "Your mpi implementation is compatible with mpi2 ... adding -DHAVE_MPI2")
+ SET(MPI_DEFINITIONS "${MPI_CXX_COMPILE_FLAGS} -DHAVE_MPI2")
+ ENDIF(MPI2_IS_OK)
+
SALOME_ACCUMULATE_HEADERS(MPI_INCLUDE_DIRS)
SALOME_ACCUMULATE_ENVIRONMENT(LD_LIBRARY_PATH ${MPI_LIBRARIES})
ENDIF()
MatrixClient.cxx
)
-ADD_DEFINITIONS(${OMNIORB_DEFINITIONS} ${MPI_CXX_COMPILE_FLAGS})
+ADD_DEFINITIONS(${OMNIORB_DEFINITIONS} ${MPI_DEFINITIONS})
ADD_LIBRARY(SalomeCommunication ${SalomeCommunication_SOURCES})
TARGET_LINK_LIBRARIES(SalomeCommunication ${COMMON_LIBS})
IF(SALOME_USE_MPI)
INCLUDE_DIRECTORIES(${MPI_CXX_INCLUDE_PATH})
- ADD_DEFINITIONS(${MPI_CXX_COMPILE_FLAGS})
+ ADD_DEFINITIONS(${MPI_DEFINITIONS})
SET(_libSALOME_Comm_LIBS
${_libSALOME_Comm_LIBS}
${MPI_CXX_LIBRARIES}
)
IF(WITH_MPI_SEQ_CONTAINER)
- ADD_DEFINITIONS(${MPI_CXX_COMPILE_FLAGS})
+ ADD_DEFINITIONS(${MPI_DEFINITIONS})
SET(COMMON_LIBS
${MPI_CXX_LIBRARIES}
)
#include <SALOMEconfig.h>
#include CORBA_CLIENT_HEADER(SALOME_Session)
+#ifdef HAVE_MPI2
+#include <mpi.h>
+#endif
+
#ifdef WIN32
#include <process.h>
#define getpid _getpid
_isAppliSalomeDefined = (GetenvThreadSafe("APPLI") != 0);
#ifdef HAVE_MPI2
-#ifdef WITHOPENMPI
+#ifdef OPEN_MPI
_pid_mpiServer = -1;
// the urifile name depends on pid of the process
std::stringstream urifile;
if(_pid_mpiServer < 0)
throw SALOME_Exception("Error when getting ompi-server id");
}
-#elif defined(WITHMPICH)
+#elif defined(MPICH)
_pid_mpiServer = -1;
// get the pid of all hydra_nameserver
std::set<pid_t> thepids1 = getpidofprogram("hydra_nameserver");
MESSAGE("destructor");
delete _resManager;
#ifdef HAVE_MPI2
-#ifdef WITHOPENMPI
+#ifdef OPEN_MPI
if( GetenvThreadSafe("OMPI_URI_FILE") != NULL ){
// kill my ompi-server
if( kill(_pid_mpiServer,SIGTERM) != 0 )
if(status!=0)
throw SALOME_Exception("Error when removing urifile");
}
-#elif defined(WITHMPICH)
+#elif defined(MPICH)
// kill my hydra_nameserver
if(_pid_mpiServer > -1)
if( kill(_pid_mpiServer,SIGTERM) != 0 )
std::ostringstream o;
o << nbproc << " ";
command += o.str();
-#ifdef WITHLAM
+#ifdef LAM_MPI
command += "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
-#elif defined(WITHOPENMPI)
+#elif defined(OPEN_MPI)
if( GetenvThreadSafe("OMPI_URI_FILE") == NULL )
command += "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
else{
command += "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
command += GetenvThreadSafe("OMPI_URI_FILE");
}
-#elif defined(WITHMPICH)
+#elif defined(MPICH)
command += "-nameserver " + Kernel_Utils::GetHostname();
#endif
command += " SALOME_MPIContainer ";
if( GetenvThreadSafe("LIBBATCH_NODEFILE") != NULL )
o << "-machinefile " << machinesFile << " ";
-#ifdef WITHLAM
+#ifdef LAM_MPI
o << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
-#elif defined(WITHOPENMPI)
+#elif defined(OPEN_MPI)
if( GetenvThreadSafe("OMPI_URI_FILE") == NULL )
o << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
else
o << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
o << GetenvThreadSafe("OMPI_URI_FILE");
}
-#elif defined(WITHMPICH)
+#elif defined(MPICH)
o << "-nameserver " + Kernel_Utils::GetHostname();
#endif
std::ostringstream o;
tempOutputFile << nbproc << " ";
-#ifdef WITHLAM
+#ifdef LAM_MPI
tempOutputFile << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
-#elif defined(WITHOPENMPI)
+#elif defined(OPEN_MPI)
if( GetenvThreadSafe("OMPI_URI_FILE") == NULL )
tempOutputFile << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
else{
tempOutputFile << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
tempOutputFile << GetenvThreadSafe("OMPI_URI_FILE");
}
-#elif defined(WITHMPICH)
+#elif defined(MPICH)
tempOutputFile << "-nameserver " + Kernel_Utils::GetHostname();
#endif
}
SET(COMMON_FLAGS
${OMNIORB_DEFINITIONS}
- ${MPI_CXX_COMPILE_FLAGS}
+ ${MPI_DEFINITIONS}
)
SET(SalomeParallelDSCContainer_SOURCES
ParallelDSC_i.cxx
${PROJECT_BINARY_DIR}/idl
)
-ADD_DEFINITIONS(${MPI_CXX_COMPILE_FLAGS} ${LIBXML2_DEFINITIONS} ${OMNIORB_DEFINITIONS})
+ADD_DEFINITIONS(${MPI_DEFINITIONS} ${LIBXML2_DEFINITIONS} ${OMNIORB_DEFINITIONS})
IF(SALOME_USE_LIBBATCH)
ADD_DEFINITIONS(-DWITH_LIBBATCH)
ENDIF(SALOME_USE_LIBBATCH)
${OMNIORB_LIBRARIES}
)
-ADD_DEFINITIONS(${MPI_CXX_COMPILE_FLAGS} ${OMNIORB_DEFINITIONS})
+ADD_DEFINITIONS(${MPI_DEFINITIONS} ${OMNIORB_DEFINITIONS})
ADD_LIBRARY(SalomeMPIContainer MPIObject_i.cxx MPIContainer_i.cxx)
TARGET_LINK_LIBRARIES(SalomeMPIContainer ${COMMON_LIBS})
ADD_EXECUTABLE(testMPI2 testMPI2.cxx)
TARGET_LINK_LIBRARIES(testMPI2 ${MPI_CXX_LIBRARIES})
+ADD_EXECUTABLE(getMPIImplementation getMPIImplementation.cxx)
+TARGET_LINK_LIBRARIES(getMPIImplementation ${MPI_CXX_LIBRARIES})
+
INSTALL(TARGETS SALOME_MPIContainer EXPORT ${PROJECT_NAME}TargetGroup DESTINATION ${SALOME_INSTALL_BINS})
INSTALL(TARGETS testMPI2 DESTINATION ${SALOME_INSTALL_BINS})
+INSTALL(TARGETS getMPIImplementation DESTINATION ${SALOME_INSTALL_BINS})
-INSTALL(FILES launch_testMPI2.csh DESTINATION ${SALOME_INSTALL_SCRIPT_SCRIPTS})
+INSTALL(FILES launch_testMPI2.csh PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE DESTINATION ${SALOME_INSTALL_SCRIPT_SCRIPTS})
FILE(GLOB COMMON_HEADERS_HXX "${CMAKE_CURRENT_SOURCE_DIR}/*.hxx")
INSTALL(FILES ${COMMON_HEADERS_HXX} DESTINATION ${SALOME_INSTALL_HEADERS})
int i;
char port_name[MPI_MAX_PORT_NAME];
char port_name_clt[MPI_MAX_PORT_NAME];
+ MPI_Info info;
std::ostringstream msg;
if( service.size() == 0 )
MPI_Barrier(MPI_COMM_WORLD);
MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ MPI_Info_create(&info);
+ MPI_Info_set(info, "ompi_unique", "true");
if( _numproc == 0 )
{
/* rank 0 try to be a server. If service is already published, try to be a cient */
MESSAGE("[" << _numproc << "] I get the connection with " << service << " at " << port_name_clt << std::endl);
MPI_Close_port( port_name );
}
- else if ( MPI_Publish_name((char*)service.c_str(), MPI_INFO_NULL, port_name) == MPI_SUCCESS )
+ else if ( MPI_Publish_name((char*)service.c_str(), info, port_name) == MPI_SUCCESS )
{
_srv[service] = true;
_port_name[service] = port_name;
--- /dev/null
+#include <mpi.h>
+
+int main(int argc, char** argv)
+{
+#ifdef OPEN_MPI
+ return 1;
+#elif defined(MPICH)
+ return 2;
+#elif defined(LAM_MPI)
+ return 3;
+#else
+ return 0;
+#endif
+}
else
set debug=""
endif
-# get pid of ompi-server
-setenv OMPI_URI_FILE ${HOME}/.urifile_$$
-set lpid1=`pidof ompi-server`
-ompi-server -r ${OMPI_URI_FILE}
-set lpid2=`pidof ompi-server`
+# get mpi implementation
+${KERNEL_ROOT_DIR}/bin/salome/getMPIImplementation
+set res = $?
+if $res == 1 then
+ set mpi="openmpi"
+else if $res == 2 then
+ set mpi="mpich"
+endif
+if $mpi == "openmpi" then
+# launch ompi-server
+ setenv OMPI_URI_FILE ${HOME}/.urifile_$$
+ set lpid1=`pidof ompi-server`
+ ompi-server -r ${OMPI_URI_FILE}
+ set lpid2=`pidof ompi-server`
+else if $mpi == "mpich" then
+# launch hydra_nameserver
+ set lpid1=`pidof hydra_nameserver`
+ if $lpid1 == "" then
+ hydra_nameserver &
+ endif
+ set lpid2=`pidof hydra_nameserver`
+endif
+# get pid of mpi server
+set pid=0
foreach i ($lpid2)
set flag=0
foreach j ($lpid1)
end
sleep 2
# launch two instances of executable to create communication between both
-mpirun -np 2 -ompi-server file:${OMPI_URI_FILE} ${KERNEL_ROOT_DIR}/bin/salome/testMPI2 -vsize 32 $debug &
-mpirun -np 3 -ompi-server file:${OMPI_URI_FILE} ${KERNEL_ROOT_DIR}/bin/salome/testMPI2 -vsize 32 $debug
+if $mpi == "openmpi" then
+ mpirun -np 2 -ompi-server file:${OMPI_URI_FILE} ${KERNEL_ROOT_DIR}/bin/salome/testMPI2 -vsize 32 $debug &
+ mpirun -np 3 -ompi-server file:${OMPI_URI_FILE} ${KERNEL_ROOT_DIR}/bin/salome/testMPI2 -vsize 32 $debug
+else if $mpi == "mpich" then
+ mpirun -np 2 -nameserver $HOSTNAME ${KERNEL_ROOT_DIR}/bin/salome/testMPI2 -vsize 32 $debug &
+ mpirun -np 3 -nameserver $HOSTNAME ${KERNEL_ROOT_DIR}/bin/salome/testMPI2 -vsize 32 $debug
+endif
set res=$status
sleep 1
-# kill ompi-server
-kill -9 $pid
+# kill mpi server
+if $pid != 0 then
+ kill -9 $pid
+endif
+if $mpi == "openmpi" then
# delete uri file
-rm -f ${OMPI_URI_FILE}
+ rm -f ${OMPI_URI_FILE}
+endif
# give result of test
if $res == 0 then
echo "OK"
int main(int argc, char**argv)
{
int *indg;
- double *vector, sum=0., norm, etalon;
+ double *vector, sum=0., norm=1., etalon=0.;
int rank, size, grank, gsize, rsize;
int vsize=20, lvsize, rlvsize;
int i, k1, k2, imin, imax, nb;
int srv=0;
MPI_Comm com, icom;
- MPI_Status status;
+ MPI_Status status;
+ MPI_Info info;
char port_name [MPI_MAX_PORT_NAME];
char port_name_clt [MPI_MAX_PORT_NAME];
std::string service = "SERVICE";
bool debug=false;
-#ifndef WITHOPENMPI
- std::cout << "This test only works with openmpi implementation" << std::endl;
- exit(1);
-#endif
-
for(i=1;i<argc;i++){
std::string sargv = argv[i];
if(sargv.find("-debug")!=std::string::npos)
MPI_Barrier(MPI_COMM_WORLD);
MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+#ifdef HAVE_MPI2
+ MPI_Info_create(&info);
+ MPI_Info_set(info, "ompi_unique", "true");
if(rank==0){
MPI_Open_port(MPI_INFO_NULL, port_name);
if ( MPI_Lookup_name((char*)service.c_str(), MPI_INFO_NULL, port_name_clt) == MPI_SUCCESS ) {
std::cout << "[" << rank << "] I am client: I get the service " << service << " !" << std::endl;
MPI_Close_port( port_name );
}
- else if ( MPI_Publish_name((char*)service.c_str(), MPI_INFO_NULL, port_name) == MPI_SUCCESS ) {
+ else if ( MPI_Publish_name((char*)service.c_str(), info, port_name) == MPI_SUCCESS ) {
if(debug)
std::cout << "[" << rank << "] I am server: I've managed to publish the service " << service << " !" << std::endl;
srv = 1;
free(indg);
free(vector);
+#endif
MPI_Finalize();
if(rank==0){
SALOME_ParallelGlobalProcessVar_i.cxx
)
-ADD_DEFINITIONS(${ONMIORB_DEFINITIONS} ${MPI_CXX_COMPILE_FLAGS})
+ADD_DEFINITIONS(${OMNIORB_DEFINITIONS} ${MPI_DEFINITIONS})
ADD_LIBRARY(SalomeParallelContainer ${SalomeParallelContainer_SOURCES})
TARGET_LINK_LIBRARIES(SalomeParallelContainer ${COMMON_LIBS} ${MPI_CXX_LIBRARIES})
${PROJECT_BINARY_DIR}/idl
)
-ADD_DEFINITIONS(${OMNIORB_DEFINITIONS} ${MPI_CXX_COMPILE_FLAGS})
+ADD_DEFINITIONS(${OMNIORB_DEFINITIONS} ${MPI_DEFINITIONS})
SET(COMMON_LIBS
Registry
}
else{
m1->Coucou(1L);
+// Display MPI component in naming service
+ std::string cmdn = "nameclt list Containers.dir/";
+ cmdn += hostName;
+ cmdn += ".dir";
+ system(cmdn.c_str());
+ cmdn = "nameclt list Containers.dir/";
+ cmdn += hostName;
+ cmdn += ".dir/MPIFactoryServer_";
+ cmdn += argv[2];
+ cmdn += ".dir";
+ system(cmdn.c_str());
// // sleep(5);
INFOS("Unload MPI Component");
iGenFact->remove_impl(m1) ;