LIBS_old="$LIBS"
LDFLAGS_old="$LDFLAGS"
LDFLAGS="$MPI_LIBS $LDFLAGS"
- AC_CHECK_LIB(lam,lam_mp_init,,WITHLAM="no")
- AC_CHECK_LIB(mpi,MPI_Init,WITHLAM="yes",WITHLAM="no")
- AC_CHECK_LIB(mpi,MPI_Publish_name,WITHMPI2="yes",WITHMPI2="no")
- LDFLAGS="$LDFLAGS_old"
- LIBS="$LIBS_old"
+ fi
+
+ if test "$WITHLAM" = "yes";then
+ WITHLAM="no"
+
+ if test "$WITHLAM" = "no";then
+ CPPFLAGS="$MPI_INCLUDES $CPPFLAGS"
+ LIBS="$LIBS -lmpi++"
+ AC_TRY_LINK([
+ #include <mpi.h>
+ ], [int argc=0; char **argv=0; MPI_Init(&argc,&argv);],
+ WITHLAM="yes",WITHLAM="no")
+ if test "$WITHLAM" = "yes";then
+ MPI_LIBS="$MPI_LIBS -lmpi++"
+ fi
+ LIBS="$LIBS_old"
+ CPPFLAGS="$CPPFLAGS_old"
+
+ AC_CHECK_LIB(mpi++,MPI_Publish_name,WITHMPI2="yes",WITHMPI2="no")
+ LDFLAGS="$LDFLAGS_old"
+ LIBS="$LIBS_old"
+ fi
+
+ if test "$WITHLAM" = "no";then
+ AC_CHECK_LIB(lam,lam_mp_init,WITHLAM="yes",WITHLAM="no")
+ if test "$WITHLAM" = "yes";then
+ MPI_LIBS="$MPI_LIBS -llam"
+ LIBS="$LIBS -llam"
+ fi
+
+ AC_CHECK_LIB(mpi,MPI_Init,WITHLAM="yes",WITHLAM="no")
+ if test "$WITHLAM" = "yes";then
+ MPI_LIBS="$MPI_LIBS -lmpi"
+ fi
+
+ AC_CHECK_LIB(mpi,MPI_Publish_name,WITHMPI2="yes",WITHMPI2="no")
+ LDFLAGS="$LDFLAGS_old"
+ LIBS="$LIBS_old"
+ fi
fi
if test "$WITHLAM" = "yes";then
WITHMPI="yes"
mpi_ok=yes
- MPI_LIBS="$MPI_LIBS -llammpi++"
+ CPPFLAGS="-DWITHLAM $CPPFLAGS"
else
mpi_ok=no
fi
+#ifdef HAVE_MPI2
+#include "mpi.h"
+#endif
#include "ReceiverFactory.hxx"
#include "Receivers.hxx"
using namespace std;
#ifndef _RECEIVERS_HXX_
#define _RECEIVERS_HXX_
-#include "SALOME_Comm_i.hxx"
-#include "Receiver.hxx"
#ifdef HAVE_MPI2
#include "mpi.h"
#endif
+#include "SALOME_Comm_i.hxx"
+#include "Receiver.hxx"
/*!
Receiver used for transfert with CORBA when no copy is required remotely and locally.
+#include "SALOME_Comm_i.hxx"
#ifndef WNT
#include <rpc/xdr.h>
#endif
-#include "SALOME_Comm_i.hxx"
#include "poa.h"
#include "omnithread.h"
#include "Utils_SINGLETON.hxx"
#ifndef _SALOME_COMM_I_HXX_
#define _SALOME_COMM_I_HXX_
-#include <string>
-#include <SALOMEconfig.h>
-#include CORBA_SERVER_HEADER(SALOME_Comm)
#ifdef HAVE_MPI2
#include "mpi.h"
#endif
+#include <string>
+#include <SALOMEconfig.h>
+#include CORBA_SERVER_HEADER(SALOME_Comm)
#define TIMEOUT 20
+#include "SALOME_Comm_i.hxx"
#include "SenderFactory.hxx"
#include "utilities.h"
#include "SALOMEMultiComm.hxx"
-#include "SALOME_Comm_i.hxx"
using namespace std;
#ifdef COMP_CORBA_DOUBLE
%{
#include "ReceiverFactory.hxx"
+ #undef SEEK_SET
+ #undef SEEK_CUR
+ #undef SEEK_END
#include "SALOME_Comm_i.hxx"
%}
// Module : SALOME
// $Header$
+#ifdef HAVE_MPI2
+#include <mpi.h>
+#endif
+
#include <iostream>
#include <string>
#include <stdio.h>
#include <Utils_Timer.hxx>
#endif
-#ifdef HAVE_MPI2
-#include <mpi.h>
-#endif
-
#include "Container_init_python.hxx"
using namespace std;
MESSAGE("constructor");
_NS = new SALOME_NamingService(orb);
_ResManager = new SALOME_ResourcesManager(orb);
+ _id=0;
PortableServer::POA_var root_poa = PortableServer::POA::_the_root_poa();
PortableServer::POAManager_var pman = root_poa->the_POAManager();
PortableServer::POA_var my_poa;
FindOrStartContainer(const Engines::MachineParameters& params,
const Engines::MachineList& possibleComputers)
{
+ long id;
+ string containerNameInNS;
+ char idc[sizeof(long)+1];
+
Engines::Container_ptr ret = FindContainer(params,possibleComputers);
if(!CORBA::is_nil(ret))
return ret;
string theMachine=_ResManager->FindBest(possibleComputers);
MESSAGE("try to launch it on " << theMachine);
+ // Get Id for container: a parallel container registers in Naming Service
+ // on the machine where is process 0. ContainerManager does'nt know the name
+ // of this machine before the launch of the parallel container. So to get
+ // the IOR of the parallel container in Naming Service, ContainerManager
+ // gives a unique Id. The parallel container registers his name under
+ // /ContainerManager/Id directory in NamingService
+
+ id = GetIdForContainer();
+
string command;
if(theMachine=="")
{
}
else if(theMachine==GetHostname())
{
- command=_ResManager->BuildCommandToLaunchLocalContainer(params);
+ command=_ResManager->BuildCommandToLaunchLocalContainer(params,id);
}
else
command =
- _ResManager->BuildCommandToLaunchRemoteContainer(theMachine,params);
+ _ResManager->BuildCommandToLaunchRemoteContainer(theMachine,params,id);
_ResManager->RmTmpFile();
int status=system(command.c_str());
count-- ;
if ( count != 10 )
MESSAGE( count << ". Waiting for FactoryServer on " << theMachine);
- string containerNameInNS =
- _NS->BuildContainerNameForNS(params,theMachine.c_str());
+ if(params.isMPI)
+ {
+ containerNameInNS = "/ContainerManager/id";
+ sprintf(idc,"%ld",id);
+ containerNameInNS += idc;
+ }
+ else
+ containerNameInNS =
+ _NS->BuildContainerNameForNS(params,theMachine.c_str());
SCRUTE(containerNameInNS);
CORBA::Object_var obj = _NS->Resolve(containerNameInNS.c_str());
ret=Engines::Container::_narrow(obj);
MESSAGE("FindContainer: not found");
return Engines::Container::_nil();
}
+
+//=============================================================================
+/*!
+ * Get Id for container: a parallel container registers in Naming Service
+ * on the machine where is process 0. ContainerManager does'nt know the name
+ * of this machine before the launch of the parallel container. So to get
+ * the IOR of the parallel container in Naming Service, ContainerManager
+ * gives a unique Id. The parallel container registers his name under
+ * /ContainerManager/Id directory in NamingService
+ */
+//=============================================================================
+
+
+long SALOME_ContainerManager::GetIdForContainer(void)
+{
+ _id++;
+ return _id;
+}
+
FindContainer(const Engines::MachineParameters& params,
const char *theMachine);
+ long GetIdForContainer(void);
+ long _id;
+
SALOME_ResourcesManager *_ResManager;
SALOME_NamingService *_NS;
};
int argc, char *argv[])
: Engines_Container_i(orb,poa,containerName,argc,argv,false), MPIObject_i(nbproc,numproc)
{
+ long id=0;
+ string IdContainerinNS;
+ char idc[sizeof(long)+1];
+
MESSAGE("[" << numproc << "] activate object");
_id = _poa->activate_object(this);
-// this->_add_ref();
+
+ if(argc>1)
+ {
+ for(int i=0;i<argc;i++)
+ {
+ if(strcmp(argv[i],"-id")==NULL)
+ {
+ id = atoi(argv[i+1]);
+ continue;
+ }
+ }
+ }
+ SCRUTE(id);
if(numproc==0){
_NS = new SALOME_NamingService();
-// _NS = SINGLETON_<SALOME_NamingService>::Instance() ;
-// ASSERT(SINGLETON_<SALOME_NamingService>::IsAlreadyExisting()) ;
_NS->init_orb( CORBA::ORB::_duplicate(_orb) ) ;
-// Engines::Container_ptr pCont
-// = Engines::Container::_narrow(POA_Engines::MPIContainer::_this());
CORBA::Object_var obj=_poa->id_to_reference(*_id);
Engines::Container_var pCont = Engines::Container::_narrow(obj);
+
string hostname = GetHostname();
_containerName = _NS->BuildContainerNameForNS(containerName,hostname.c_str());
SCRUTE(_containerName);
_NS->Register(pCont, _containerName.c_str());
+
+ // A parallel container registers in Naming Service
+ // on the machine where is process 0. ContainerManager does'nt know the name
+ // of this machine before the launch of the parallel container. So to get
+ // the IOR of the parallel container in Naming Service, ContainerManager
+ // gives a unique Id. The parallel container registers his name under
+ // /ContainerManager/Id directory in NamingService
+
+ IdContainerinNS = "/ContainerManager/id";
+ sprintf(idc,"%ld",id);
+ IdContainerinNS += idc;
+ SCRUTE(IdContainerinNS);
+ _NS->Register(pCont, IdContainerinNS.c_str());
+
}
// Root recupere les ior des container des autre process
// File : MPIObject_i.cxx
// Module : SALOME
+#include <mpi.h>
#include "MPIObject_i.hxx"
#include "utilities.h"
-#include <mpi.h>
using namespace std;
MPIObject_i::MPIObject_i()
+#include <mpi.h>
#include <iostream>
#include "MPIContainer_i.hxx"
#include "Utils_ORB_INIT.hxx"
#include "Utils_SINGLETON.hxx"
#include "utilities.h"
-#include <mpi.h>
#include "SALOMETraceCollector.hxx"
using namespace std;
for (unsigned int ind = 0; ind < contList.size(); ind++)
{
name = contList[ind].c_str();
+
+ if ( nbproc >= 1 )
+ {
+ char *str_nbproc = new char[8];
+ sprintf(str_nbproc, "_%d", nbproc);
+ if( strstr(name.c_str(),str_nbproc) == NULL)
+ continue; // check only containers with _%d in name
+ delete [] str_nbproc;
+ }
+
name += "/";
name += componentName;
SCRUTE(name);
void SALOME_NamingService::Destroy_FullDirectory(const char* Path)
throw(ServiceUnreachable)
{
- Change_Directory(Path);
- vector<string> contList = list_directory();
-
- for (unsigned int ind = 0; ind < contList.size(); ind++)
- Destroy_Name(contList[ind].c_str());
-
- Destroy_Directory(Path);
+ if( Change_Directory(Path) )
+ {
+ vector<string> contList = list_directory();
- Destroy_Name(Path);
+ for (unsigned int ind = 0; ind < contList.size(); ind++)
+ Destroy_Name(contList[ind].c_str());
+
+ Destroy_Directory(Path);
+
+ Destroy_Name(Path);
+ }
}
// ============================================================================
string
SALOME_ResourcesManager::BuildCommandToLaunchRemoteContainer
(const string& machine,
- const Engines::MachineParameters& params)
+ const Engines::MachineParameters& params, const long id)
{
string command;
-
+ int nbproc;
+ char idc[sizeof(long)+1];
+
if ( ! _isAppliSalomeDefined )
command = BuildTempFileToLaunchRemoteContainer(machine, params);
if (params.isMPI)
{
- int nbproc;
-
if ( (params.nb_node <= 0) && (params.nb_proc_per_node <= 0) )
nbproc = 1;
else if ( params.nb_node == 0 )
ASSERT(getenv("NSPORT"));
command += getenv("NSPORT"); // port of CORBA name server
- command += " SALOME_Container ";
+ if(params.isMPI)
+ {
+ command += " mpirun -np ";
+ std::ostringstream o;
+ o << nbproc << " ";
+ command += o.str();
+#ifdef WITHLAM
+ command += "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
+#endif
+ command += " SALOME_MPIContainer ";
+ }
+ else
+ command += " SALOME_Container ";
+
+ command += _NS->ContainerName(params);
+ command += " -id ";
+ sprintf(idc,"%ld",id);
+ command += idc;
+ command += " -";
+ AddOmninamesParams(command);
+ command += " > /tmp/";
command += _NS->ContainerName(params);
- command += "&";
+ command += "_";
+ command += GetHostname();
+ command += "_";
+ command += getenv( "USER" ) ;
+ command += ".log 2>&1 &" ;
MESSAGE("command =" << command);
}
string
SALOME_ResourcesManager::BuildCommandToLaunchLocalContainer
-(const Engines::MachineParameters& params)
+(const Engines::MachineParameters& params, const long id)
{
_TmpFileName = "";
string command;
int nbproc = 0;
+ char idc[sizeof(long)+1];
if (params.isMPI)
{
o << nbproc << " ";
command += o.str();
+#ifdef WITHLAM
command += "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
+#endif
if (isPythonContainer(params.container_name))
command += "pyMPI SALOME_ContainerPy.py ";
}
command += _NS->ContainerName(params);
+ command += " -id ";
+ sprintf(idc,"%ld",id);
+ command += idc;
command += " -";
AddOmninamesParams(command);
command += " > /tmp/";
std::ostringstream o;
tempOutputFile << nbproc << " ";
+#ifdef WITHLAM
+ tempOutputFile << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
+#endif
}
tempOutputFile << (*(resInfo.ModulesPath.find("KERNEL"))).second
std::string BuildCommandToLaunchRemoteContainer
(const std::string& machine,
- const Engines::MachineParameters& params);
+ const Engines::MachineParameters& params, const long id);
std::string BuildCommandToLaunchLocalContainer
- (const Engines::MachineParameters& params);
+ (const Engines::MachineParameters& params, const long id);
void RmTmpFile();