-// Copyright (C) 2007-2014 CEA/DEN, EDF R&D, OPEN CASCADE
+// Copyright (C) 2007-2017 CEA/DEN, EDF R&D, OPEN CASCADE
//
// Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
// CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#include <SALOMEconfig.h>
#include CORBA_CLIENT_HEADER(SALOME_Session)
+#ifdef HAVE_MPI2
+#include <mpi.h>
+#include <sys/wait.h>
+#endif
+
#ifdef WIN32
#include <process.h>
#define getpid _getpid
const int SALOME_ContainerManager::TIME_OUT_TO_LAUNCH_CONT=60;
-const char *SALOME_ContainerManager::_ContainerManagerNameInNS =
+const char *SALOME_ContainerManager::_ContainerManagerNameInNS =
"/ContainerManager";
omni_mutex SALOME_ContainerManager::_numInstanceMutex;
Utils_Mutex SALOME_ContainerManager::_systemMutex;
//=============================================================================
-/*!
+/*!
* Constructor
* \param orb
* Define a CORBA single thread policy for the server, which avoid to deal
_isAppliSalomeDefined = (GetenvThreadSafe("APPLI") != 0);
#ifdef HAVE_MPI2
-#ifdef WITHOPENMPI
+#ifdef OPEN_MPI
_pid_mpiServer = -1;
// the urifile name depends on pid of the process
std::stringstream urifile;
- urifile << GetenvThreadSafe("HOME") << "/.urifile_" << getpid();
+ urifile << GetenvThreadSafeAsString("HOME") << "/.urifile_" << getpid();
setenv("OMPI_URI_FILE",urifile.str().c_str(),1);
if( GetenvThreadSafe("OMPI_URI_FILE") != NULL ){
- // get the pid of all ompi-server
- std::set<pid_t> thepids1 = getpidofprogram("ompi-server");
- // launch a new ompi-server
- std::string command;
- command = "ompi-server -r ";
- command += GetenvThreadSafe("OMPI_URI_FILE");
- int status=SystemThreadSafe(command.c_str());
- if(status!=0)
- throw SALOME_Exception("Error when launching ompi-server");
- // get the pid of all ompi-server
- std::set<pid_t> thepids2 = getpidofprogram("ompi-server");
- // my ompi-server is the new one
- std::set<pid_t>::const_iterator it;
- for(it=thepids2.begin();it!=thepids2.end();it++)
- if(thepids1.find(*it) == thepids1.end())
- _pid_mpiServer = *it;
- if(_pid_mpiServer < 0)
- throw SALOME_Exception("Error when getting ompi-server id");
+ // Linux specific code
+ pid_t pid = fork(); // spawn a child process, following code is executed in both processes
+ if ( pid == 0 ) // I'm a child, replace myself with a new ompi-server
+ {
+ std::string uriarg = GetenvThreadSafeAsString("OMPI_URI_FILE");
+ execlp( "ompi-server", "ompi-server", "-r", uriarg.c_str(), NULL );
+ throw SALOME_Exception("Error when launching ompi-server"); // execlp failed
+ }
+ else if ( pid < 0 )
+ {
+ throw SALOME_Exception("fork() failed");
+ }
+ else // I'm a parent
+ {
+ //wait(NULL); // wait(?) for a child end
+ _pid_mpiServer = pid;
+ }
}
-#elif defined(WITHMPICH)
+#elif defined(MPICH)
_pid_mpiServer = -1;
- // get the pid of all hydra_nameserver
- std::set<pid_t> thepids1 = getpidofprogram("hydra_nameserver");
- // launch a new hydra_nameserver
- std::string command;
- command = "hydra_nameserver &";
- SystemThreadSafe(command.c_str());
- // get the pid of all hydra_nameserver
- std::set<pid_t> thepids2 = getpidofprogram("hydra_nameserver");
- // my hydra_nameserver is the new one
- std::set<pid_t>::const_iterator it;
- for(it=thepids2.begin();it!=thepids2.end();it++)
- if(thepids1.find(*it) == thepids1.end())
- _pid_mpiServer = *it;
+ // Linux specific code
+ pid_t pid = fork(); // spawn a child process, following code is executed in both processes
+ if ( pid == 0 ) // I'm a child, replace myself with a new hydra_nameserver
+ {
+ execlp( "hydra_nameserver", "hydra_nameserver", NULL );
+ throw SALOME_Exception("Error when launching hydra_nameserver"); // execlp failed
+ }
+ else if ( pid < 0 )
+ {
+ throw SALOME_Exception("fork() failed");
+ }
+ else // I'm a parent
+ {
+ //wait(NULL);
+ _pid_mpiServer = pid;
+ }
#endif
#endif
}
//=============================================================================
-/*!
+/*!
* destructor
*/
//=============================================================================
MESSAGE("destructor");
delete _resManager;
#ifdef HAVE_MPI2
-#ifdef WITHOPENMPI
+#ifdef OPEN_MPI
if( GetenvThreadSafe("OMPI_URI_FILE") != NULL ){
// kill my ompi-server
if( kill(_pid_mpiServer,SIGTERM) != 0 )
if(status!=0)
throw SALOME_Exception("Error when removing urifile");
}
-#elif defined(WITHMPICH)
+#elif defined(MPICH)
// kill my hydra_nameserver
if(_pid_mpiServer > -1)
if( kill(_pid_mpiServer,SIGTERM) != 0 )
MESSAGE("ShutdownContainers: " << (*iter));
cont->Shutdown();
}
- else
+ else
MESSAGE("ShutdownContainers: no container ref for " << (*iter));
}
catch(CORBA::SystemException& e)
MESSAGE("[GiveContainer] - length of possible resources " << possibleResources.size());
std::vector<std::string> local_resources;
- // Step 3: if mode is "get" keep only machines with existing containers
+ // Step 3: if mode is "get" keep only machines with existing containers
if(mode == "get")
{
for(unsigned int i=0; i < possibleResources.size(); i++)
return ret;
}
// A mpi parallel container register on zero node in NS
- containerNameInNS = _NS->BuildContainerNameForNS(params, GetMPIZeroNode(hostname,machFile).c_str());
+ std::string mpiZeroNode = GetMPIZeroNode(resource_selected,machFile).c_str();
+ containerNameInNS = _NS->BuildContainerNameForNS(params, mpiZeroNode.c_str());
}
else
containerNameInNS = _NS->BuildContainerNameForNS(params, hostname.c_str());
else
{
ASSERT(GetenvThreadSafe("APPLI"));
- command += GetenvThreadSafe("APPLI");
+ command += GetenvThreadSafeAsString("APPLI");
}
command += "/runRemote.sh ";
ASSERT(GetenvThreadSafe("NSHOST"));
- command += GetenvThreadSafe("NSHOST"); // hostname of CORBA name server
+ command += GetenvThreadSafeAsString("NSHOST"); // hostname of CORBA name server
command += " ";
ASSERT(GetenvThreadSafe("NSPORT"));
- command += GetenvThreadSafe("NSPORT"); // port of CORBA name server
+ command += GetenvThreadSafeAsString("NSPORT"); // port of CORBA name server
command += " \"ls /tmp >/dev/null 2>&1\"";
// Launch remote command
//redirect stdout and stderr in a file
#ifdef WIN32
- logFilename=GetenvThreadSafe("TEMP");
+ logFilename=GetenvThreadSafeAsString("TEMP");
logFilename += "\\";
- user = GetenvThreadSafe( "USERNAME" );
+ user = GetenvThreadSafeAsString( "USERNAME" );
#else
- user = GetenvThreadSafe( "USER" );
+ user = GetenvThreadSafeAsString( "USER" );
+ if (user.empty())
+ user = GetenvThreadSafeAsString( "LOGNAME" );
logFilename="/tmp";
char* val = GetenvThreadSafe("SALOME_TMP_DIR");
if(val)
//=============================================================================
/*!
* This is no longer valid (C++ container are also python containers)
- */
+ */
//=============================================================================
bool isPythonContainer(const char* ContainerName)
{
* ssh user@machine distantPath/runRemote.sh hostNS portNS WORKINGDIR workingdir \
* SALOME_Container containerName &"
- * - where user is ommited if not specified in CatalogResources,
+ * - where user is omitted if not specified in CatalogResources,
* - where distant path is always relative to user@machine $HOME, and
* equal to $APPLI if not specified in CatalogResources,
* - where hostNS is the hostname of CORBA naming server (set by scripts to
* use to launch SALOME and servers in $APPLI: runAppli.sh, runRemote.sh)
* - where workingdir is the requested working directory for the container.
* If WORKINGDIR (and workingdir) is not present the working dir will be $HOME
- */
+ */
//=============================================================================
std::string
nbproc = params.nb_proc;
}
- // "ssh -l user machine distantPath/runRemote.sh hostNS portNS WORKINGDIR workingdir \
+ // "ssh -l user machine distantPath/runRemote.sh hostNS portNS WORKINGDIR workingdir
// SALOME_Container containerName &"
command = getCommandToRunRemoteProcess(resInfo.Protocol, resInfo.HostName, resInfo.UserName);
else
{
ASSERT(GetenvThreadSafe("APPLI"));
- command += GetenvThreadSafe("APPLI"); // path relative to user@machine $HOME
+ command += GetenvThreadSafeAsString("APPLI"); // path relative to user@machine $HOME
}
command += "/runRemote.sh ";
ASSERT(GetenvThreadSafe("NSHOST"));
- command += GetenvThreadSafe("NSHOST"); // hostname of CORBA name server
+ command += GetenvThreadSafeAsString("NSHOST"); // hostname of CORBA name server
command += " ";
ASSERT(GetenvThreadSafe("NSPORT"));
- command += GetenvThreadSafe("NSPORT"); // port of CORBA name server
+ command += GetenvThreadSafeAsString("NSPORT"); // port of CORBA name server
std::string wdir = params.workingdir.in();
if(wdir != "")
if(wdir == "$TEMPDIR")
wdir="\\$TEMPDIR";
command += wdir; // requested working directory
- command += "'";
+ command += "'";
}
if(params.isMPI)
std::ostringstream o;
o << nbproc << " ";
command += o.str();
-#ifdef WITHLAM
+#ifdef LAM_MPI
command += "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
-#elif defined(WITHOPENMPI)
+#elif defined(OPEN_MPI)
if( GetenvThreadSafe("OMPI_URI_FILE") == NULL )
command += "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
else{
command += "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
- command += GetenvThreadSafe("OMPI_URI_FILE");
+ command += GetenvThreadSafeAsString("OMPI_URI_FILE");
}
-#elif defined(WITHMPICH)
+#elif defined(MPICH)
command += "-nameserver " + Kernel_Utils::GetHostname();
-#endif
+#endif
command += " SALOME_MPIContainer ";
}
else
//=============================================================================
/*!
* builds the command to be launched.
- */
+ */
//=============================================================================
std::string SALOME_ContainerManager::BuildCommandToLaunchLocalContainer(const Engines::ContainerParameters& params, const std::string& machinesFile, const std::string& container_exe, std::string& tmpFileName) const
{
if( GetenvThreadSafe("LIBBATCH_NODEFILE") != NULL )
o << "-machinefile " << machinesFile << " ";
-#ifdef WITHLAM
+#ifdef LAM_MPI
o << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
-#elif defined(WITHOPENMPI)
+#elif defined(OPEN_MPI)
if( GetenvThreadSafe("OMPI_URI_FILE") == NULL )
o << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
else
{
o << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
- o << GetenvThreadSafe("OMPI_URI_FILE");
+ o << GetenvThreadSafeAsString("OMPI_URI_FILE");
}
-#elif defined(WITHMPICH)
+#elif defined(MPICH)
o << "-nameserver " + Kernel_Utils::GetHostname();
#endif
/*!
* removes the generated temporary file in case of a remote launch.
* This method is thread safe
- */
+ */
//=============================================================================
void SALOME_ContainerManager::RmTmpFile(std::string& tmpFileName)
{
- int lenght = tmpFileName.size();
- if ( lenght > 0)
+ int length = tmpFileName.size();
+ if ( length > 0)
{
#ifdef WIN32
std::string command = "del /F ";
#else
- std::string command = "rm ";
+ std::string command = "rm ";
#endif
- if ( lenght > 4 )
- command += tmpFileName.substr(0, lenght - 3 );
+ if ( length > 4 )
+ command += tmpFileName.substr(0, length - 3 );
else
command += tmpFileName;
command += '*';
//=============================================================================
/*!
* add to command all options relative to naming service.
- */
+ */
//=============================================================================
void SALOME_ContainerManager::AddOmninamesParams(std::string& command) const
//=============================================================================
/*!
* add to command all options relative to naming service.
- */
+ */
//=============================================================================
void SALOME_ContainerManager::AddOmninamesParams(std::ostream& fileStream) const
//=============================================================================
/*!
* add to command all options relative to naming service.
- */
+ */
//=============================================================================
void SALOME_ContainerManager::AddOmninamesParams(std::ostream& fileStream, SALOME_NamingService *ns)
void SALOME_ContainerManager::MakeTheCommandToBeLaunchedASync(std::string& command)
{
#ifdef WIN32
- command = "%PYTHONBIN% -c \"import win32pm ; win32pm.spawnpid(r'" + command + "', '')\"";
+ command = "%PYTHONBIN% -c \"import subprocess ; subprocess.Popen(r'" + command + "').pid\"";
#else
command += " &";
#endif
int count(TIME_OUT_TO_LAUNCH_CONT);
if (GetenvThreadSafe("TIMEOUT_TO_LAUNCH_CONTAINER") != 0)
{
- std::string new_count_str(GetenvThreadSafe("TIMEOUT_TO_LAUNCH_CONTAINER"));
+ std::string new_count_str(GetenvThreadSafeAsString("TIMEOUT_TO_LAUNCH_CONTAINER"));
int new_count;
std::istringstream ss(new_count_str);
if (!(ss >> new_count))
//=============================================================================
/*!
* generate a file name in /tmp directory
- */
+ */
//=============================================================================
std::string SALOME_ContainerManager::BuildTemporaryFileName()
{
//build more complex file name to support multiple salome session
std::string aFileName = Kernel_Utils::GetTmpFileName();
+ std::ostringstream str_pid;
+ str_pid << ::getpid();
+ aFileName = aFileName + "-" + str_pid.str();
#ifndef WIN32
aFileName += ".sh";
#else
//=============================================================================
/*!
* Builds in a temporary file the script to be launched.
- *
+ *
* Used if SALOME Application ($APPLI) is not defined.
* The command is build with data from CatalogResources, in which every path
* used on remote computer must be defined.
- */
+ */
//=============================================================================
std::string SALOME_ContainerManager::BuildTempFileToLaunchRemoteContainer (const std::string& resource_name, const Engines::ContainerParameters& params, std::string& tmpFileName) const
std::ostringstream o;
tempOutputFile << nbproc << " ";
-#ifdef WITHLAM
+#ifdef LAM_MPI
tempOutputFile << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
-#elif defined(WITHOPENMPI)
+#elif defined(OPEN_MPI)
if( GetenvThreadSafe("OMPI_URI_FILE") == NULL )
tempOutputFile << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
else{
tempOutputFile << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
- tempOutputFile << GetenvThreadSafe("OMPI_URI_FILE");
+ tempOutputFile << GetenvThreadSafeAsString("OMPI_URI_FILE");
}
-#elif defined(WITHMPICH)
+#elif defined(MPICH)
tempOutputFile << "-nameserver " + Kernel_Utils::GetHostname();
#endif
}
- tempOutputFile << GetenvThreadSafe("KERNEL_ROOT_DIR") << "/bin/salome/";
+ tempOutputFile << GetenvThreadSafeAsString("KERNEL_ROOT_DIR") << "/bin/salome/";
if (params.isMPI)
{
throw SALOME_Exception("Unknown protocol");
if(status)
- throw SALOME_Exception("Error of connection on remote host");
+ throw SALOME_Exception("Error of connection on remote host");
command += resInfo.HostName;
command += " ";
std::string zeronode;
std::string command;
std::string tmpFile = BuildTemporaryFileName();
+ const ParserResourcesType resInfo(_resManager->GetResourceDefinition(machine));
+
+ if(resInfo.Protocol == sh)
+ {
+ return resInfo.HostName;
+ }
if( GetenvThreadSafe("LIBBATCH_NODEFILE") == NULL )
{
if (_isAppliSalomeDefined)
{
- const ParserResourcesType resInfo(_resManager->GetResourceDefinition(machine));
if (resInfo.Protocol == rsh)
command = "rsh ";
else
{
ASSERT(GetenvThreadSafe("APPLI"));
- command += GetenvThreadSafe("APPLI"); // path relative to user@machine $HOME
+ command += GetenvThreadSafeAsString("APPLI"); // path relative to user@machine $HOME
}
command += "/runRemote.sh ";
ASSERT(GetenvThreadSafe("NSHOST"));
- command += GetenvThreadSafe("NSHOST"); // hostname of CORBA name server
+ command += GetenvThreadSafeAsString("NSHOST"); // hostname of CORBA name server
command += " ";
ASSERT(GetenvThreadSafe("NSPORT"));
- command += GetenvThreadSafe("NSPORT"); // port of CORBA name server
+ command += GetenvThreadSafeAsString("NSPORT"); // port of CORBA name server
command += " mpirun -np 1 hostname -s > " + tmpFile;
}
std::string SALOME_ContainerManager::machinesFile(const int nbproc)
{
std::string tmp;
- std::string nodesFile = GetenvThreadSafe("LIBBATCH_NODEFILE");
+ std::string nodesFile = GetenvThreadSafeAsString("LIBBATCH_NODEFILE");
std::string machinesFile = Kernel_Utils::GetTmpFileName();
std::ifstream fpi(nodesFile.c_str(),std::ios::in);
std::ofstream fpo(machinesFile.c_str(),std::ios::out);
}
-std::set<pid_t> SALOME_ContainerManager::getpidofprogram(const std::string program)
-{
- std::set<pid_t> thepids;
- std::string tmpFile = Kernel_Utils::GetTmpFileName();
- std::string cmd;
- std::string thepid;
- cmd = "pidof " + program + " > " + tmpFile;
- SystemThreadSafe(cmd.c_str());
- std::ifstream fpi(tmpFile.c_str(),std::ios::in);
- while(fpi >> thepid){
- thepids.insert(atoi(thepid.c_str()));
- }
- return thepids;
-}
-
std::string SALOME_ContainerManager::getCommandToRunRemoteProcess(AccessProtocolType protocol,
const std::string & hostname,
const std::string & username)
return command.str();
}
-bool
+bool
SALOME_ContainerManager::checkPaCOParameters(Engines::ContainerParameters & params, std::string resource_selected)
{
bool result = true;
-
+
// Step 1 : check ContainerParameters
// Check container_name, has to be defined
if (std::string(params.container_name.in()) == "")
return result;
}
+/*
+ * :WARNING: Do not directly convert returned value to std::string
+ * This function may return NULL if env variable is not defined.
+ * And std::string(NULL) causes undefined behavior.
+ * Use GetenvThreadSafeAsString to properly get a std::string.
+*/
char *SALOME_ContainerManager::GetenvThreadSafe(const char *name)
{// getenv is not thread safe. See man 7 pthread.
Utils_Locker lock (&_getenvMutex);
return getenv(name);
}
+/*
+ * Return env variable as a std::string.
+ * Return empty string if env variable is not set.
+ */
+std::string SALOME_ContainerManager::GetenvThreadSafeAsString(const char *name)
+{
+ char* var = GetenvThreadSafe(name);
+ return var ? std::string(var) : std::string();
+}
+
int SALOME_ContainerManager::SystemThreadSafe(const char *command)
{
Utils_Locker lock (&_systemMutex);
INFOS("[StartPaCOPPContainer] on resource : " << resource_selected);
// Step 2 : Get a MachineFile for the parallel container
- std::string machine_file_name = _ResManager->getMachineFile(resource_selected,
+ std::string machine_file_name = _resManager->getMachineFile(resource_selected,
params.nb_proc,
params.parallelLib.in());
// Step 3 : starting parallel container proxy
std::string command_proxy("");
std::string proxy_machine;
- try
+ try
{
command_proxy = BuildCommandToLaunchPaCOProxyContainer(params, machine_file_name, proxy_machine);
}
// Step 4 : starting parallel container nodes
std::string command_nodes("");
SALOME_ContainerManager::actual_launch_machine_t nodes_machines;
- try
+ try
{
command_nodes = BuildCommandToLaunchPaCONodeContainer(params, machine_file_name, nodes_machines, proxy_machine);
}
{
INFOS("[StarPaCOPPContainer] LaunchPaCONodeContainer failed !");
// Il faut tuer le proxy
- try
+ try
{
Engines::Container_var proxy = Engines::Container::_narrow(container_proxy);
proxy->Shutdown();
}
catch (...)
{
- INFOS("[StarPaCOPPContainer] Exception catched from proxy Shutdown...");
+ INFOS("[StarPaCOPPContainer] Exception caught from proxy Shutdown...");
}
return ret;
}
// Step 4 : connecting nodes and the proxy to actually create a parallel container
- for (int i = 0; i < params.nb_proc; i++)
+ for (int i = 0; i < params.nb_proc; i++)
{
std::ostringstream tmp;
tmp << i;
std::string theNodeMachine(nodes_machines[i]);
std::string containerNameInNS = _NS->BuildContainerNameForNS(container_node_name.c_str(), theNodeMachine.c_str());
obj = _NS->Resolve(containerNameInNS.c_str());
- if (CORBA::is_nil(obj))
+ if (CORBA::is_nil(obj))
{
INFOS("[StarPaCOPPContainer] CONNECTION FAILED From Naming Service !");
INFOS("[StarPaCOPPContainer] Container name is " << containerNameInNS);
}
// Step 5 : starting parallel container
- try
+ try
{
MESSAGE ("[StarPaCOPPContainer] Starting parallel object");
container_proxy->start();
}
catch(std::exception& exc)
{
- INFOS("Caught std::exception - "<<exc.what());
+ INFOS("Caught std::exception - "<<exc.what());
}
catch(...)
{
return ret;
}
-std::string
+std::string
SALOME_ContainerManager::BuildCommandToLaunchPaCOProxyContainer(const Engines::ContainerParameters& params,
std::string machine_file_name,
std::string & proxy_hostname)
std::string nb_proc_str = tmp_string.str();
// Get resource definition
- Engines::ResourceDefinition_var resource_definition =
- _ResManager->GetResourceDefinition(params.resource_params.name);
+ ParserResourcesType resource_definition =
+ _resManager->GetResourceDefinition(params.resource_params.name.in());
// Choose hostname
std::string hostname;
MESSAGE("[BuildCommandToLaunchPaCOProxyContainer] remote machine case detected !");
remote_execution = true;
}
-
+
// Log environnement
std::string log_type("");
char * get_val = GetenvThreadSafe("PARALLEL_LOG");
ASSERT(GetenvThreadSafe("NSHOST"));
ASSERT(GetenvThreadSafe("NSPORT"));
- command << resource_definition->protocol.in();
+ command << resource_definition.getAccessProtocolTypeStr();
command << " -l ";
- command << resource_definition->username.in();
+ command << resource_definition.UserName;
command << " " << hostname;
- command << " " << resource_definition->applipath.in();
+ command << " " << resource_definition.AppliPath;
command << "/runRemote.sh ";
- command << GetenvThreadSafe("NSHOST") << " "; // hostname of CORBA name server
- command << GetenvThreadSafe("NSPORT") << " "; // port of CORBA name server
+ command << GetenvThreadSafeAsString("NSHOST") << " "; // hostname of CORBA name server
+ command << GetenvThreadSafeAsString("NSPORT") << " "; // port of CORBA name server
}
command << exe_name;
return command.str();
}
-std::string
+std::string
SALOME_ContainerManager::BuildCommandToLaunchPaCONodeContainer(const Engines::ContainerParameters& params,
const std::string & machine_file_name,
SALOME_ContainerManager::actual_launch_machine_t & vect_machine,
nb_proc_stream << params.nb_proc;
// Get resource definition
- Engines::ResourceDefinition_var resource_definition =
- _ResManager->GetResourceDefinition(params.resource_params.name);
-
+ ParserResourcesType resource_definition =
+ _resManager->GetResourceDefinition(params.resource_params.name.in());
+
// Log environnement
std::string log_type("");
char * get_val = GetenvThreadSafe("PARALLEL_LOG");
ASSERT(GetenvThreadSafe("NSHOST"));
ASSERT(GetenvThreadSafe("NSPORT"));
- command_node_stream << resource_definition->protocol.in();
+ command_node_stream << resource_definition.getAccessProtocolTypeStr();
command_node_stream << " -l ";
- command_node_stream << resource_definition->username.in();
+ command_node_stream << resource_definition.UserName;
command_node_stream << " " << hostname;
- command_node_stream << " " << resource_definition->applipath.in();
+ command_node_stream << " " << resource_definition.AppliPath;
command_node_stream << "/runRemote.sh ";
- command_node_stream << GetenvThreadSafe("NSHOST") << " "; // hostname of CORBA name server
- command_node_stream << GetenvThreadSafe("NSPORT") << " "; // port of CORBA name server
+ command_node_stream << GetenvThreadSafeAsString("NSHOST") << " "; // hostname of CORBA name server
+ command_node_stream << GetenvThreadSafeAsString("NSPORT") << " "; // port of CORBA name server
}
command_node_stream << exe_name;
if (last == std::string::npos)
last = -1;
- std::string protocol = resource_definition->protocol.in();
- if (protocol == "rsh")
+ if (resource_definition.Protocol == rsh)
command_remote_stream << "rcp ";
- else
+ else
command_remote_stream << "scp ";
command_remote_stream << machine_file_name << " ";
- command_remote_stream << resource_definition->username.in() << "@";
- command_remote_stream << hostname << ":" << resource_definition->applipath.in();
+ command_remote_stream << resource_definition.UserName << "@";
+ command_remote_stream << hostname << ":" << resource_definition.AppliPath;
command_remote_stream << "/" << machine_file_name.substr(last+1);
int status = SystemThreadSafe(command_remote_stream.str().c_str());
ASSERT(GetenvThreadSafe("NSHOST"));
ASSERT(GetenvThreadSafe("NSPORT"));
- command_nodes << resource_definition->protocol.in();
+ command_nodes << resource_definition.getAccessProtocolTypeStr();
command_nodes << " -l ";
- command_nodes << resource_definition->username.in();
+ command_nodes << resource_definition.UserName;
command_nodes << " " << hostname;
- command_nodes << " " << resource_definition->applipath.in();
+ command_nodes << " " << resource_definition.AppliPath;
command_nodes << "/runRemote.sh ";
- command_nodes << GetenvThreadSafe("NSHOST") << " "; // hostname of CORBA name server
- command_nodes << GetenvThreadSafe("NSPORT") << " "; // port of CORBA name server
+ command_nodes << GetenvThreadSafeAsString("NSHOST") << " "; // hostname of CORBA name server
+ command_nodes << GetenvThreadSafeAsString("NSPORT") << " "; // port of CORBA name server
}
- if (std::string(resource_definition->mpiImpl.in()) == "lam")
+ if (resource_definition.mpi == lam)
{
command_nodes << "mpiexec -ssi boot ";
- command_nodes << "-machinefile " << machine_file_name << " ";
+ command_nodes << "-machinefile " << machine_file_name << " ";
command_nodes << "-n " << params.nb_proc;
}
else
// We don't put hostname, because nodes are registered in the resource of the proxy
for (int i= 0; i < params.nb_proc; i++)
- vect_machine.push_back(proxy_hostname);
+ vect_machine.push_back(proxy_hostname);
command_nodes << command_end;
}
const std::string & exe_type,
const std::string & container_name,
const std::string & hostname,
- std::string & begin,
+ std::string & begin,
std::string & end)
{
if(log_type == "xterm")
{
// default into a file...
std::string logFilename = "/tmp/" + container_name + "_" + hostname + "_" + exe_type + "_";
- logFilename += std::string(GetenvThreadSafe("USER")) + ".log";
+ std::string user = GetenvThreadSafeAsString("USER");
+ if (user.empty())
+ user = GetenvThreadSafeAsString("LOGNAME");
+ logFilename += user + ".log";
end = " > " + logFilename + " 2>&1 & ";
}
}
-CORBA::Object_ptr
-SALOME_ContainerManager::LaunchPaCOProxyContainer(const std::string& command,
+CORBA::Object_ptr
+SALOME_ContainerManager::LaunchPaCOProxyContainer(const std::string& command,
const Engines::ContainerParameters& params,
const std::string & hostname)
{
int count(GetTimeOutToLoaunchServer());
CORBA::Object_var obj = CORBA::Object::_nil();
- std::string containerNameInNS = _NS->BuildContainerNameForNS(params.container_name.in(),
+ std::string containerNameInNS = _NS->BuildContainerNameForNS(params.container_name.in(),
hostname.c_str());
MESSAGE("[LaunchParallelContainer] Waiting for Parallel Container proxy : " << containerNameInNS);
- while (CORBA::is_nil(obj) && count)
+ while (CORBA::is_nil(obj) && count)
{
sleep(1);
count--;
obj = _NS->Resolve(containerNameInNS.c_str());
}
- try
+ try
{
container_proxy = PaCO::InterfaceManager::_narrow(obj);
}
//=============================================================================
/*! This method launches the parallel container.
- * It will may be placed on the ressources manager.
+ * It will may be placed on the resources manager.
*
* \param command to launch
* \param container's parameters
*/
//=============================================================================
bool
-SALOME_ContainerManager::LaunchPaCONodeContainer(const std::string& command,
+SALOME_ContainerManager::LaunchPaCONodeContainer(const std::string& command,
const Engines::ContainerParameters& params,
const std::string& name,
SALOME_ContainerManager::actual_launch_machine_t & vect_machine)
INFOS("[LaunchPaCONodeContainer] Waiting for the nodes of the parallel container");
// We are waiting all the nodes
- for (int i = 0; i < params.nb_proc; i++)
+ for (int i = 0; i < params.nb_proc; i++)
{
CORBA::Object_var obj = CORBA::Object::_nil();
std::string theMachine(vect_machine[i]);
return ret;
}
-std::string
+std::string
SALOME_ContainerManager::BuildCommandToLaunchPaCOProxyContainer(const Engines::ContainerParameters& params,
std::string machine_file_name,
std::string & proxy_hostname)
return "";
}
-std::string
+std::string
SALOME_ContainerManager::BuildCommandToLaunchPaCONodeContainer(const Engines::ContainerParameters& params,
const std::string & machine_file_name,
- SALOME_ContainerManager::actual_launch_machine_t & vect_machine,
- const std::string & proxy_hostname)
+ SALOME_ContainerManager::actual_launch_machine_t & vect_machine,
+ const std::string & proxy_hostname)
{
return "";
}
-void
+void
SALOME_ContainerManager::LogConfiguration(const std::string & log_type,
const std::string & exe_type,
const std::string & container_name,
const std::string & hostname,
- std::string & begin,
+ std::string & begin,
std::string & end)
{
}
-CORBA::Object_ptr
-SALOME_ContainerManager::LaunchPaCOProxyContainer(const std::string& command,
+CORBA::Object_ptr
+SALOME_ContainerManager::LaunchPaCOProxyContainer(const std::string& command,
const Engines::ContainerParameters& params,
const std::string& hostname)
{
return ret;
}
-bool
-SALOME_ContainerManager::LaunchPaCONodeContainer(const std::string& command,
+bool
+SALOME_ContainerManager::LaunchPaCONodeContainer(const std::string& command,
const Engines::ContainerParameters& params,
const std::string& name,
SALOME_ContainerManager::actual_launch_machine_t & vect_machine)
return false;
}
#endif
-