1 // Copyright (C) 2007-2008 CEA/DEN, EDF R&D, OPEN CASCADE
3 // Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
4 // CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
6 // This library is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 2.1 of the License.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // Lesser General Public License for more details.
16 // You should have received a copy of the GNU Lesser General Public
17 // License along with this library; if not, write to the Free Software
18 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
22 #include "SALOME_ContainerManager.hxx"
23 #include "SALOME_NamingService.hxx"
24 #include "SALOME_ModuleCatalog.hh"
25 #include "Basics_Utils.hxx"
26 #include "Basics_DirUtils.hxx"
27 #include <sys/types.h>
33 #include "Utils_CorbaException.hxx"
36 #ifdef WITH_PACO_PARALLEL
40 #define TIME_OUT_TO_LAUNCH_CONT 61
44 const char *SALOME_ContainerManager::_ContainerManagerNameInNS =
47 omni_mutex SALOME_ContainerManager::_numInstanceMutex;
50 //=============================================================================
54 * Define a CORBA single thread policy for the server, which avoid to deal
55 * with non thread-safe usage like Change_Directory in SALOME naming service
57 //=============================================================================
59 SALOME_ContainerManager::SALOME_ContainerManager(CORBA::ORB_ptr orb, PortableServer::POA_var poa, SALOME_ResourcesManager *rm, SALOME_NamingService *ns):_nbprocUsed(0)
61 MESSAGE("constructor");
65 PortableServer::POAManager_var pman = poa->the_POAManager();
66 _orb = CORBA::ORB::_duplicate(orb) ;
67 CORBA::PolicyList policies;
69 PortableServer::ThreadPolicy_var threadPol =
70 poa->create_thread_policy(PortableServer::SINGLE_THREAD_MODEL);
71 policies[0] = PortableServer::ThreadPolicy::_duplicate(threadPol);
73 _poa = poa->create_POA("SThreadPOA",pman,policies);
75 PortableServer::ObjectId_var id = _poa->activate_object(this);
76 CORBA::Object_var obj = _poa->id_to_reference(id);
77 Engines::ContainerManager_var refContMan =
78 Engines::ContainerManager::_narrow(obj);
80 _NS->Register(refContMan,_ContainerManagerNameInNS);
81 _isAppliSalomeDefined = (getenv("APPLI") != 0);
85 if( getenv("OMPI_URI_FILE") != NULL ){
86 system("killall ompi-server");
88 command = "ompi-server -r ";
89 command += getenv("OMPI_URI_FILE");
90 int status=system(command.c_str());
92 throw SALOME_Exception("Error when launching ompi-server");
97 MESSAGE("constructor end");
100 //=============================================================================
104 //=============================================================================
106 SALOME_ContainerManager::~SALOME_ContainerManager()
108 MESSAGE("destructor");
111 if( getenv("OMPI_URI_FILE") != NULL )
112 system("killall ompi-server");
117 //=============================================================================
118 //! shutdown all the containers, then the ContainerManager servant
121 //=============================================================================
123 void SALOME_ContainerManager::Shutdown()
126 ShutdownContainers();
127 _NS->Destroy_Name(_ContainerManagerNameInNS);
128 PortableServer::ObjectId_var oid = _poa->servant_to_id(this);
129 _poa->deactivate_object(oid);
132 //=============================================================================
133 //! Loop on all the containers listed in naming service, ask shutdown on each
136 //=============================================================================
138 void SALOME_ContainerManager::ShutdownContainers()
140 MESSAGE("ShutdownContainers");
142 isOK = _NS->Change_Directory("/Containers");
144 vector<string> vec = _NS->list_directory_recurs();
145 list<string> lstCont;
146 for(vector<string>::iterator iter = vec.begin();iter!=vec.end();iter++)
149 CORBA::Object_var obj=_NS->Resolve((*iter).c_str());
152 Engines::Container_var cont=Engines::Container::_narrow(obj);
153 if(!CORBA::is_nil(cont))
154 lstCont.push_back((*iter));
156 catch(const CORBA::Exception& e)
158 // ignore this entry and continue
161 MESSAGE("Container list: ");
162 for(list<string>::iterator iter=lstCont.begin();iter!=lstCont.end();iter++){
165 for(list<string>::iterator iter=lstCont.begin();iter!=lstCont.end();iter++)
170 CORBA::Object_var obj=_NS->Resolve((*iter).c_str());
171 Engines::Container_var cont=Engines::Container::_narrow(obj);
172 if(!CORBA::is_nil(cont))
174 MESSAGE("ShutdownContainers: " << (*iter));
178 MESSAGE("ShutdownContainers: no container ref for " << (*iter));
180 catch(CORBA::SystemException& e)
182 INFOS("CORBA::SystemException ignored : " << e);
184 catch(CORBA::Exception&)
186 INFOS("CORBA::Exception ignored.");
190 INFOS("Unknown exception ignored.");
196 //=============================================================================
197 //! Give a suitable Container given constraints
199 * \param params Container Parameters required for the container
200 * \return the container or nil
202 //=============================================================================
203 Engines::Container_ptr
204 SALOME_ContainerManager::GiveContainer(const Engines::ContainerParameters& params)
206 Engines::Container_ptr ret = Engines::Container::_nil();
208 // Step 0: Default mode is start
209 Engines::ContainerParameters local_params(params);
210 if (std::string(local_params.mode.in()) == "")
211 local_params.mode = CORBA::string_dup("start");
212 std::string mode = local_params.mode.in();
213 MESSAGE("[GiveContainer] starting with mode: " << mode);
215 // Step 1: Find Container for find and findorstart mode
216 if (mode == "find" or mode == "findorstart")
218 ret = FindContainer(params, params.resource_params.resList);
219 if(!CORBA::is_nil(ret))
225 MESSAGE("[GiveContainer] no container found");
235 // Step 2: Get all possibleResources from the parameters
236 Engines::ResourceList_var possibleResources = _ResManager->GetFittingResources(local_params.resource_params);
237 MESSAGE("[GiveContainer] - length of possible resources " << possibleResources->length());
238 std::vector<std::string> local_resources;
240 // Step 3: if mode is "get" keep only machines with existing containers
243 for(unsigned int i=0; i < possibleResources->length(); i++)
245 Engines::Container_ptr cont = FindContainer(params, possibleResources[i].in());
248 if(!cont->_non_existent())
249 local_resources.push_back(string(possibleResources[i]));
251 catch(CORBA::Exception&) {}
254 // if local_resources is empty, we cannot give a container
255 if (local_resources.size() == 0)
257 MESSAGE("[GiveContainer] cannot find a container for mode get");
262 for(unsigned int i=0; i < possibleResources->length(); i++)
263 local_resources.push_back(string(possibleResources[i]));
265 // Step 4: select the resource where to get/start the container
266 std::string resource_selected;
269 resource_selected = _ResManager->GetImpl()->Find(params.resource_params.policy.in(), local_resources);
271 catch(const SALOME_Exception &ex)
273 MESSAGE("[GiveContainer] Exception in ResourceManager find !: " << ex.what());
276 MESSAGE("[GiveContainer] Resource selected is: " << resource_selected);
278 _numInstanceMutex.lock();
280 // Step 5: get container in the naming service
281 Engines::ResourceDefinition_var resource_definition = _ResManager->GetResourceDefinition(resource_selected.c_str());
282 std::string hostname(resource_definition->name.in());
283 std::string containerNameInNS;
286 if ( (params.resource_params.nb_node <= 0) && (params.resource_params.nb_proc_per_node <= 0) )
288 else if ( params.resource_params.nb_node == 0 )
289 nbproc = params.resource_params.nb_proc_per_node;
290 else if ( params.resource_params.nb_proc_per_node == 0 )
291 nbproc = params.resource_params.nb_node;
293 nbproc = params.resource_params.nb_node * params.resource_params.nb_proc_per_node;
294 if( getenv("LIBBATCH_NODEFILE") != NULL )
295 machinesFile(nbproc);
296 // A mpi parallel container register on zero node in NS
297 containerNameInNS = _NS->BuildContainerNameForNS(params, GetMPIZeroNode(hostname).c_str());
300 containerNameInNS = _NS->BuildContainerNameForNS(params, hostname.c_str());
301 MESSAGE("[GiveContainer] Container name in the naming service: " << containerNameInNS);
303 // Step 6: check if the name exists in naming service
304 //if params.mode == "getorstart" or "get" use the existing container
305 //if params.mode == "start" shutdown the existing container before launching a new one with that name
306 CORBA::Object_var obj = _NS->Resolve(containerNameInNS.c_str());
307 if (!CORBA::is_nil(obj))
311 Engines::Container_var cont=Engines::Container::_narrow(obj);
312 if(!cont->_non_existent())
314 if(std::string(params.mode.in())=="getorstart" or std::string(params.mode.in())=="get"){
315 _numInstanceMutex.unlock();
316 return cont._retn(); /* the container exists and params.mode is getorstart or get use it*/
320 INFOS("[GiveContainer] A container is already registered with the name: " << containerNameInNS << ", shutdown the existing container");
321 cont->Shutdown(); // shutdown the registered container if it exists
325 catch(CORBA::Exception&)
327 INFOS("[GiveContainer] CORBA::Exception ignored when trying to get the container - we start a new one");
331 // Step 7: type of container: PaCO, Exe, Mpi or Classic
332 // Mpi already tested in step 5, specific code on BuildCommandToLaunch Local/Remote Container methods
333 // TODO -> separates Mpi from Classic/Exe
335 std::string parallelLib(params.parallelLib);
336 if (std::string(local_params.parallelLib.in()) != "")
338 INFOS("[GiveContainer] PaCO++ container are not currently available");
339 _numInstanceMutex.unlock();
343 std::string container_exe = "SALOME_Container"; // Classic container
347 CORBA::String_var container_exe_tmp;
348 CORBA::Object_var obj = _NS->Resolve("/Kernel/ModulCatalog");
349 SALOME_ModuleCatalog::ModuleCatalog_var Catalog = SALOME_ModuleCatalog::ModuleCatalog::_narrow(obj) ;
350 if (CORBA::is_nil (Catalog))
352 INFOS("[GiveContainer] Module Catalog is not found -> cannot launch a container");
353 _numInstanceMutex.unlock();
356 // Loop through component list
357 for(unsigned int i=0; i < local_params.resource_params.componentList.length(); i++)
359 const char* compoi = local_params.resource_params.componentList[i];
360 SALOME_ModuleCatalog::Acomponent_var compoInfo = Catalog->GetComponent(compoi);
361 if (CORBA::is_nil (compoInfo))
365 SALOME_ModuleCatalog::ImplType impl=compoInfo->implementation_type();
366 container_exe_tmp=compoInfo->implementation_name();
367 if(impl==SALOME_ModuleCatalog::CEXE)
371 INFOS("ContainerManager Error: you can't have 2 CEXE component in the same container" );
372 _numInstanceMutex.unlock();
373 return Engines::Container::_nil();
375 MESSAGE("[GiveContainer] Exe container found !: " << container_exe_tmp);
376 container_exe = container_exe_tmp.in();
381 catch (ServiceUnreachable&)
383 INFOS("Caught exception: Naming Service Unreachable");
384 _numInstanceMutex.unlock();
389 INFOS("Caught unknown exception.");
390 _numInstanceMutex.unlock();
394 // Step 8: start a new container
395 MESSAGE("[GiveContainer] Try to launch a new container on " << resource_selected);
397 // if a parallel container is launched in batch job, command is: "mpirun -np nbproc -machinefile nodesfile SALOME_MPIContainer"
398 if( getenv("LIBBATCH_NODEFILE") != NULL && params.isMPI )
399 command = BuildCommandToLaunchLocalContainer(params,container_exe);
400 // if a container is launched on localhost, command is "SALOME_Container" or "mpirun -np nbproc SALOME_MPIContainer"
401 else if(hostname == Kernel_Utils::GetHostname())
402 command = BuildCommandToLaunchLocalContainer(params, container_exe);
403 // if a container is launched in remote mode, command is "ssh resource_selected SALOME_Container" or "ssh resource_selected mpirun -np nbproc SALOME_MPIContainer"
405 command = BuildCommandToLaunchRemoteContainer(resource_selected, params, container_exe);
407 //redirect stdout and stderr in a file
409 string logFilename=getenv("TEMP");
412 string logFilename="/tmp";
413 char* val = getenv("SALOME_TMP_DIR");
416 struct stat file_info;
417 stat(val, &file_info);
418 bool is_dir = S_ISDIR(file_info.st_mode);
419 if (is_dir)logFilename=val;
420 else std::cerr << "SALOME_TMP_DIR environment variable is not a directory use /tmp instead" << std::endl;
424 logFilename += _NS->ContainerName(params)+"_"+ resource_selected +"_"+getenv( "USER" )+".log" ;
425 command += " > " + logFilename + " 2>&1";
427 command = "%PYTHONBIN% -c \"import win32pm ; win32pm.spawnpid(r'" + command + "', '')\"";
432 // launch container with a system call
433 int status=system(command.c_str());
435 _numInstanceMutex.unlock();
438 MESSAGE("SALOME_ContainerManager::StartContainer rsh failed (system command status -1)");
439 RmTmpFile(_TmpFileName); // command file can be removed here
440 return Engines::Container::_nil();
442 else if (status == 217){
443 MESSAGE("SALOME_ContainerManager::StartContainer rsh failed (system command status 217)");
444 RmTmpFile(_TmpFileName); // command file can be removed here
445 return Engines::Container::_nil();
449 int count = TIME_OUT_TO_LAUNCH_CONT;
450 MESSAGE("[GiveContainer] waiting " << count << " second steps");
451 while (CORBA::is_nil(ret) && count)
459 MESSAGE("[GiveContainer] step " << count << " Waiting for container on " << resource_selected);
460 CORBA::Object_var obj = _NS->Resolve(containerNameInNS.c_str());
461 ret=Engines::Container::_narrow(obj);
463 if (CORBA::is_nil(ret))
465 INFOS("[GiveContainer] was not able to launch container " << containerNameInNS);
469 // Setting log file name
470 logFilename=":"+logFilename;
471 logFilename="@"+Kernel_Utils::GetHostname()+logFilename;
472 logFilename=getenv( "USER" )+logFilename;
473 ret->logfilename(logFilename.c_str());
474 RmTmpFile(_TmpFileName); // command file can be removed here
480 //=============================================================================
481 //! Find a container given constraints (params) on a list of machines (possibleComputers)
485 //=============================================================================
487 Engines::Container_ptr
488 SALOME_ContainerManager::FindContainer(const Engines::ContainerParameters& params,
489 const Engines::ResourceList& possibleResources)
491 MESSAGE("[FindContainer] FindContainer on " << possibleResources.length() << " resources");
492 for(unsigned int i=0; i < possibleResources.length();i++)
494 Engines::Container_ptr cont = FindContainer(params, possibleResources[i].in());
495 if(!CORBA::is_nil(cont))
498 MESSAGE("[FindContainer] no container found");
499 return Engines::Container::_nil();
502 //=============================================================================
503 //! Find a container given constraints (params) on a machine (theMachine)
507 //=============================================================================
509 Engines::Container_ptr
510 SALOME_ContainerManager::FindContainer(const Engines::ContainerParameters& params,
511 const std::string& resource)
513 std::string containerNameInNS(_NS->BuildContainerNameForNS(params, resource.c_str()));
514 MESSAGE("[FindContainer] Try to find a container " << containerNameInNS << " on resource " << resource);
515 CORBA::Object_var obj = _NS->Resolve(containerNameInNS.c_str());
518 if(obj->_non_existent())
519 return Engines::Container::_nil();
521 return Engines::Container::_narrow(obj);
523 catch(const CORBA::Exception& e)
525 return Engines::Container::_nil();
529 //=============================================================================
531 * This is no longer valid (C++ container are also python containers)
533 //=============================================================================
534 bool isPythonContainer(const char* ContainerName)
537 int len = strlen(ContainerName);
540 if (strcmp(ContainerName + len - 2, "Py") == 0)
546 //=============================================================================
548 * Builds the script to be launched
550 * If SALOME Application not defined ($APPLI),
551 * see BuildTempFileToLaunchRemoteContainer()
553 * Else rely on distant configuration. Command is under the form (example):
554 * ssh user@machine distantPath/runRemote.sh hostNS portNS WORKINGDIR workingdir \
555 * SALOME_Container containerName &"
557 * - where user is ommited if not specified in CatalogResources,
558 * - where distant path is always relative to user@machine $HOME, and
559 * equal to $APPLI if not specified in CatalogResources,
560 * - where hostNS is the hostname of CORBA naming server (set by scripts to
561 * use to launch SALOME and servers in $APPLI: runAppli.sh, runRemote.sh)
562 * - where portNS is the port used by CORBA naming server (set by scripts to
563 * use to launch SALOME and servers in $APPLI: runAppli.sh, runRemote.sh)
564 * - where workingdir is the requested working directory for the container.
565 * If WORKINGDIR (and workingdir) is not present the working dir will be $HOME
567 //=============================================================================
570 SALOME_ContainerManager::BuildCommandToLaunchRemoteContainer
571 (const string& resource_name,
572 const Engines::ContainerParameters& params, const std::string& container_exe)
576 if (!_isAppliSalomeDefined)
577 command = BuildTempFileToLaunchRemoteContainer(resource_name, params);
581 Engines::ResourceDefinition_var resource_definition = _ResManager->GetResourceDefinition(resource_name.c_str());
582 std::string hostname(resource_definition->name.in());
583 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesDescr(resource_name);
587 if ((params.resource_params.nb_node <= 0) && (params.resource_params.nb_proc_per_node <= 0))
589 else if (params.resource_params.nb_node == 0)
590 nbproc = params.resource_params.nb_proc_per_node;
591 else if (params.resource_params.nb_proc_per_node == 0)
592 nbproc = params.resource_params.nb_node;
594 nbproc = params.resource_params.nb_node * params.resource_params.nb_proc_per_node;
597 // "ssh -l user machine distantPath/runRemote.sh hostNS portNS WORKINGDIR workingdir \
598 // SALOME_Container containerName &"
599 if (resInfo.Protocol == rsh)
601 else if (resInfo.Protocol == ssh)
604 throw SALOME_Exception("Unknown protocol");
606 if (resInfo.UserName != "")
609 command += resInfo.UserName;
613 command += resInfo.HostName;
616 if (resInfo.AppliPath != "")
617 command += resInfo.AppliPath; // path relative to user@machine $HOME
620 ASSERT(getenv("APPLI"));
621 command += getenv("APPLI"); // path relative to user@machine $HOME
624 command += "/runRemote.sh ";
626 ASSERT(getenv("NSHOST"));
627 command += getenv("NSHOST"); // hostname of CORBA name server
630 ASSERT(getenv("NSPORT"));
631 command += getenv("NSPORT"); // port of CORBA name server
633 std::string wdir = params.workingdir.in();
636 command += " WORKINGDIR ";
638 if(wdir == "$TEMPDIR")
640 command += wdir; // requested working directory
646 command += " mpirun -np ";
647 std::ostringstream o;
651 command += "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
652 #elif defined(WITHOPENMPI)
653 if( getenv("OMPI_URI_FILE") == NULL )
654 command += "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
656 command += "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
657 command += getenv("OMPI_URI_FILE");
660 command += " SALOME_MPIContainer ";
663 command += " " +container_exe+ " ";
665 command += _NS->ContainerName(params);
667 AddOmninamesParams(command);
669 MESSAGE("command =" << command);
675 //=============================================================================
677 * builds the command to be launched.
679 //=============================================================================
681 SALOME_ContainerManager::BuildCommandToLaunchLocalContainer
682 (const Engines::ContainerParameters& params, const std::string& container_exe)
684 _TmpFileName = BuildTemporaryFileName();
694 if ( (params.resource_params.nb_node <= 0) && (params.resource_params.nb_proc_per_node <= 0) )
696 else if ( params.resource_params.nb_node == 0 )
697 nbproc = params.resource_params.nb_proc_per_node;
698 else if ( params.resource_params.nb_proc_per_node == 0 )
699 nbproc = params.resource_params.nb_node;
701 nbproc = params.resource_params.nb_node * params.resource_params.nb_proc_per_node;
705 if( getenv("LIBBATCH_NODEFILE") != NULL )
706 o << "-machinefile " << _machinesFile << " ";
709 o << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
710 #elif defined(WITHOPENMPI)
711 if( getenv("OMPI_URI_FILE") == NULL )
712 o << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
715 o << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
716 o << getenv("OMPI_URI_FILE");
720 if (isPythonContainer(params.container_name))
721 o << " pyMPI SALOME_ContainerPy.py ";
723 o << " SALOME_MPIContainer ";
728 std::string wdir=params.workingdir.in();
731 // a working directory is requested
732 if(wdir == "$TEMPDIR")
734 // a new temporary directory is requested
735 string dir = Kernel_Utils::GetTmpDir();
737 o << "cd /d " << dir << endl;
739 o << "cd " << dir << ";";
745 // a permanent directory is requested use it or create it
747 o << "mkdir " + wdir << endl;
748 o << "cd /D " + wdir << endl;
750 o << "mkdir -p " << wdir << " && cd " << wdir + ";";
754 if (isPythonContainer(params.container_name))
755 o << "SALOME_ContainerPy.py ";
757 o << container_exe + " ";
761 o << _NS->ContainerName(params);
763 AddOmninamesParams(o);
765 ofstream command_file( _TmpFileName.c_str() );
766 command_file << o.str();
767 command_file.close();
770 chmod(_TmpFileName.c_str(), 0x1ED);
772 command = _TmpFileName;
774 MESSAGE("Command is file ... " << command);
775 MESSAGE("Command is ... " << o.str());
780 //=============================================================================
782 * removes the generated temporary file in case of a remote launch.
784 //=============================================================================
786 void SALOME_ContainerManager::RmTmpFile(std::string& tmpFileName)
788 int lenght = tmpFileName.size();
792 string command = "del /F ";
794 string command = "rm ";
797 command += tmpFileName.substr(0, lenght - 3 );
799 command += tmpFileName;
801 system(command.c_str());
802 //if dir is empty - remove it
803 string tmp_dir = Kernel_Utils::GetDirByPath( tmpFileName );
804 if ( Kernel_Utils::IsEmptyDir( tmp_dir ) )
807 command = "del /F " + tmp_dir;
809 command = "rmdir " + tmp_dir;
811 system(command.c_str());
816 //=============================================================================
818 * add to command all options relative to naming service.
820 //=============================================================================
822 void SALOME_ContainerManager::AddOmninamesParams(string& command) const
824 CORBA::String_var iorstr = _NS->getIORaddr();
825 command += "ORBInitRef NameService=";
829 //=============================================================================
831 * add to command all options relative to naming service.
833 //=============================================================================
835 void SALOME_ContainerManager::AddOmninamesParams(ofstream& fileStream) const
837 CORBA::String_var iorstr = _NS->getIORaddr();
838 fileStream << "ORBInitRef NameService=";
839 fileStream << iorstr;
842 //=============================================================================
844 * add to command all options relative to naming service.
846 //=============================================================================
848 void SALOME_ContainerManager::AddOmninamesParams(ostringstream& oss) const
850 CORBA::String_var iorstr = _NS->getIORaddr();
851 oss << "ORBInitRef NameService=";
855 //=============================================================================
857 * generate a file name in /tmp directory
859 //=============================================================================
861 string SALOME_ContainerManager::BuildTemporaryFileName() const
863 //build more complex file name to support multiple salome session
864 string aFileName = Kernel_Utils::GetTmpFileName();
873 //=============================================================================
875 * Builds in a temporary file the script to be launched.
877 * Used if SALOME Application ($APPLI) is not defined.
878 * The command is build with data from CatalogResources, in which every path
879 * used on remote computer must be defined.
881 //=============================================================================
884 SALOME_ContainerManager::BuildTempFileToLaunchRemoteContainer
885 (const string& resource_name,
886 const Engines::ContainerParameters& params) throw(SALOME_Exception)
890 _TmpFileName = BuildTemporaryFileName();
891 ofstream tempOutputFile;
892 tempOutputFile.open(_TmpFileName.c_str(), ofstream::out );
893 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesDescr(resource_name);
894 tempOutputFile << "#! /bin/sh" << endl;
898 tempOutputFile << "export SALOME_trace=local" << endl; // mkr : 27.11.2006 : PAL13967 - Distributed supervision graphs - Problem with "SALOME_trace"
899 //tempOutputFile << "source " << resInfo.PreReqFilePath << endl;
905 tempOutputFile << "mpirun -np ";
908 if ( (params.resource_params.nb_node <= 0) && (params.resource_params.nb_proc_per_node <= 0) )
910 else if ( params.resource_params.nb_node == 0 )
911 nbproc = params.resource_params.nb_proc_per_node;
912 else if ( params.resource_params.nb_proc_per_node == 0 )
913 nbproc = params.resource_params.nb_node;
915 nbproc = params.resource_params.nb_node * params.resource_params.nb_proc_per_node;
917 std::ostringstream o;
919 tempOutputFile << nbproc << " ";
921 tempOutputFile << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
922 #elif defined(WITHOPENMPI)
923 if( getenv("OMPI_URI_FILE") == NULL )
924 tempOutputFile << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
926 tempOutputFile << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
927 tempOutputFile << getenv("OMPI_URI_FILE");
932 tempOutputFile << getenv("KERNEL_ROOT_DIR") << "/bin/salome/";
936 if (isPythonContainer(params.container_name))
937 tempOutputFile << " pyMPI SALOME_ContainerPy.py ";
939 tempOutputFile << " SALOME_MPIContainer ";
944 if (isPythonContainer(params.container_name))
945 tempOutputFile << "SALOME_ContainerPy.py ";
947 tempOutputFile << "SALOME_Container ";
950 tempOutputFile << _NS->ContainerName(params) << " -";
951 AddOmninamesParams(tempOutputFile);
952 tempOutputFile << " &" << endl;
953 tempOutputFile.flush();
954 tempOutputFile.close();
956 chmod(_TmpFileName.c_str(), 0x1ED);
963 if (resInfo.Protocol == rsh)
966 string commandRcp = "rcp ";
967 commandRcp += _TmpFileName;
969 commandRcp += resInfo.HostName;
971 commandRcp += _TmpFileName;
972 status = system(commandRcp.c_str());
975 else if (resInfo.Protocol == ssh)
978 string commandRcp = "scp ";
979 commandRcp += _TmpFileName;
981 commandRcp += resInfo.HostName;
983 commandRcp += _TmpFileName;
984 status = system(commandRcp.c_str());
987 throw SALOME_Exception("Unknown protocol");
990 throw SALOME_Exception("Error of connection on remote host");
992 command += resInfo.HostName;
993 _CommandForRemAccess = command;
995 command += _TmpFileName;
1003 #ifdef WITH_PACO_PARALLEL
1004 //=============================================================================
1006 * Find or Start a suitable PaCO++ Parallel Container in a list of machines.
1007 * \param params Machine Parameters required for the container
1008 * \return CORBA container reference.
1010 //=============================================================================
1011 Engines::Container_ptr
1012 SALOME_ContainerManager::StartPaCOPPContainer(const Engines::ContainerParameters& params_const)
1014 CORBA::Object_var obj;
1015 PaCO::InterfaceManager_var container_proxy;
1016 Engines::Container_ptr ret = Engines::Container::_nil();
1017 Engines::MachineParameters params(params_const);
1019 // Step 1 : Try to find a suitable container
1020 // Currently not as good as could be since
1021 // we have to verified the number of nodes of the container
1022 // if a user tell that.
1023 ret = FindContainer(params, params.computerList);
1024 if(CORBA::is_nil(ret)) {
1025 // Step 2 : Starting a new parallel container !
1026 INFOS("[StartParallelContainer] Starting a PaCO++ parallel container");
1028 // Step 3 : Choose a computer
1029 std::string theMachine = _ResManager->FindFirst(params.computerList);
1030 //If the machine name is localhost use the real name
1031 if(theMachine == "localhost")
1032 theMachine=Kernel_Utils::GetHostname();
1034 if(theMachine == "") {
1035 INFOS("[StartParallelContainer] !!!!!!!!!!!!!!!!!!!!!!!!!!");
1036 INFOS("[StartParallelContainer] No possible computer found");
1037 INFOS("[StartParallelContainer] !!!!!!!!!!!!!!!!!!!!!!!!!!");
1040 INFOS("[StartParallelContainer] on machine : " << theMachine);
1041 params.hostname = CORBA::string_dup(theMachine.c_str());
1043 // Step 4 : starting parallel container proxy
1044 Engines::MachineParameters params_proxy(params);
1045 std::string command_proxy;
1046 SALOME_ContainerManager::actual_launch_machine_t proxy_machine;
1049 command_proxy = BuildCommandToLaunchParallelContainer("SALOME_ParallelContainerProxy", params_proxy, proxy_machine);
1051 catch(const SALOME_Exception & ex)
1053 INFOS("[StartParallelContainer] Exception in BuildCommandToLaunchParallelContainer");
1057 params_proxy.nb_proc = 0; // LaunchParallelContainer uses this value to know if it launches the proxy or the nodes
1058 obj = LaunchParallelContainer(command_proxy, params_proxy, _NS->ContainerName(params_proxy), proxy_machine);
1059 if (CORBA::is_nil(obj))
1061 INFOS("[StartParallelContainer] LaunchParallelContainer for proxy returns NIL !");
1066 container_proxy = PaCO::InterfaceManager::_narrow(obj);
1068 catch(CORBA::SystemException& e)
1070 INFOS("[StartParallelContainer] Exception in _narrow after LaunchParallelContainer for proxy !");
1071 INFOS("CORBA::SystemException : " << e);
1074 catch(CORBA::Exception& e)
1076 INFOS("[StartParallelContainer] Exception in _narrow after LaunchParallelContainer for proxy !");
1077 INFOS("CORBA::Exception" << e);
1082 INFOS("[StartParallelContainer] Exception in _narrow after LaunchParallelContainer for proxy !");
1083 INFOS("Unknown exception !");
1086 if (CORBA::is_nil(container_proxy))
1088 INFOS("[StartParallelContainer] PaCO::InterfaceManager::_narrow returns NIL !");
1092 // Step 5 : starting parallel container nodes
1093 std::string command_nodes;
1094 Engines::MachineParameters params_nodes(params);
1095 SALOME_ContainerManager::actual_launch_machine_t nodes_machines;
1098 command_nodes = BuildCommandToLaunchParallelContainer("SALOME_ParallelContainerNode", params_nodes, nodes_machines, proxy_machine[0]);
1100 catch(const SALOME_Exception & ex)
1102 INFOS("[StartParallelContainer] Exception in BuildCommandToLaunchParallelContainer");
1106 std::string container_generic_node_name = _NS->ContainerName(params) + "Node";
1107 obj = LaunchParallelContainer(command_nodes, params_nodes, container_generic_node_name, nodes_machines);
1108 if (CORBA::is_nil(obj))
1110 INFOS("[StartParallelContainer] LaunchParallelContainer for nodes returns NIL !");
1111 // Il faut tuer le proxy
1114 Engines::Container_var proxy = Engines::Container::_narrow(container_proxy);
1119 INFOS("[StartParallelContainer] Exception catched from proxy Shutdown...");
1124 // Step 6 : connecting nodes and the proxy to actually create a parallel container
1125 for (int i = 0; i < params.nb_proc; i++)
1127 std::ostringstream tmp;
1129 std::string proc_number = tmp.str();
1130 std::string container_node_name = container_generic_node_name + proc_number;
1132 std::string theNodeMachine(nodes_machines[i]);
1133 std::string containerNameInNS = _NS->BuildContainerNameForNS(container_node_name.c_str(), theNodeMachine.c_str());
1134 obj = _NS->Resolve(containerNameInNS.c_str());
1135 if (CORBA::is_nil(obj))
1137 INFOS("[StartParallelContainer] CONNECTION FAILED From Naming Service !");
1138 INFOS("[StartParallelContainer] Container name is " << containerNameInNS);
1143 MESSAGE("[StartParallelContainer] Deploying node : " << container_node_name);
1144 PaCO::InterfaceParallel_var node = PaCO::InterfaceParallel::_narrow(obj);
1146 MESSAGE("[StartParallelContainer] node " << container_node_name << " is deployed");
1148 catch(CORBA::SystemException& e)
1150 INFOS("[StartParallelContainer] Exception in deploying node : " << containerNameInNS);
1151 INFOS("CORBA::SystemException : " << e);
1154 catch(CORBA::Exception& e)
1156 INFOS("[StartParallelContainer] Exception in deploying node : " << containerNameInNS);
1157 INFOS("CORBA::Exception" << e);
1162 INFOS("[StartParallelContainer] Exception in deploying node : " << containerNameInNS);
1163 INFOS("Unknown exception !");
1168 // Step 7 : starting parallel container
1171 MESSAGE ("[StartParallelContainer] Starting parallel object");
1172 container_proxy->start();
1173 MESSAGE ("[StartParallelContainer] Parallel object is started");
1174 ret = Engines::Container::_narrow(container_proxy);
1176 catch(CORBA::SystemException& e)
1178 INFOS("Caught CORBA::SystemException. : " << e);
1180 catch(PortableServer::POA::ServantAlreadyActive&)
1182 INFOS("Caught CORBA::ServantAlreadyActiveException");
1184 catch(CORBA::Exception&)
1186 INFOS("Caught CORBA::Exception.");
1188 catch(std::exception& exc)
1190 INFOS("Caught std::exception - "<<exc.what());
1194 INFOS("Caught unknown exception.");
1200 //=============================================================================
1202 * Find or Start a suitable PaCO++ Parallel Container in a list of machines.
1203 * \param params Machine Parameters required for the container
1204 * \return CORBA container reference.
1206 //=============================================================================
1207 Engines::Container_ptr
1208 SALOME_ContainerManager::StartPaCOPPContainer(const Engines::ContainerParameters& params)
1210 Engines::Container_ptr ret = Engines::Container::_nil();
1211 INFOS("[StartParallelContainer] is disabled !");
1212 INFOS("[StartParallelContainer] recompile SALOME Kernel to enable parallel extension");
1217 #ifndef WITH_PACO_PARALLEL
1219 SALOME_ContainerManager::LaunchParallelContainer(const std::string& command,
1220 const Engines::ContainerParameters& params,
1221 const std::string& name,
1222 SALOME_ContainerManager::actual_launch_machine_t & vect_machine)
1224 CORBA::Object_ptr obj = CORBA::Object::_nil();
1228 //=============================================================================
1229 /*! This method launches the parallel container.
1230 * It will may be placed on the ressources manager.
1232 * \param command to launch
1233 * \param container's parameters
1234 * \param name of the container
1236 * \return CORBA container reference
1238 //=============================================================================
1240 SALOME_ContainerManager::LaunchParallelContainer(const std::string& command,
1241 const Engines::ContainerParameters& params,
1242 const std::string& name,
1243 SALOME_ContainerManager::actual_launch_machine_t & vect_machine)
1245 CORBA::Object_ptr obj = CORBA::Object::_nil();
1246 std::string containerNameInNS;
1247 int count = TIME_OUT_TO_LAUNCH_CONT;
1249 INFOS("[LaunchParallelContainer] Begin");
1250 int status = system(command.c_str());
1252 INFOS("[LaunchParallelContainer] failed : system command status -1");
1255 else if (status == 217) {
1256 INFOS("[LaunchParallelContainer] failed : system command status 217");
1260 if (params.nb_proc == 0)
1262 std::string theMachine(vect_machine[0]);
1263 // Proxy We have launch a proxy
1264 containerNameInNS = _NS->BuildContainerNameForNS((char*) name.c_str(), theMachine.c_str());
1265 INFOS("[LaunchParallelContainer] Waiting for Parallel Container proxy " << containerNameInNS << " on " << theMachine);
1266 while (CORBA::is_nil(obj) && count)
1274 obj = _NS->Resolve(containerNameInNS.c_str());
1279 INFOS("[LaunchParallelContainer] launching the nodes of the parallel container");
1280 // We are waiting all the nodes
1281 for (int i = 0; i < params.nb_proc; i++)
1283 obj = CORBA::Object::_nil();
1284 std::string theMachine(vect_machine[i]);
1286 std::ostringstream tmp;
1288 std::string proc_number = tmp.str();
1289 std::string container_node_name = name + proc_number;
1290 containerNameInNS = _NS->BuildContainerNameForNS((char*) container_node_name.c_str(), theMachine.c_str());
1291 INFOS("[LaunchParallelContainer] Waiting for Parallel Container node " << containerNameInNS << " on " << theMachine);
1292 while (CORBA::is_nil(obj) && count) {
1299 obj = _NS->Resolve(containerNameInNS.c_str());
1301 if (CORBA::is_nil(obj))
1303 INFOS("[LaunchParallelContainer] Launch of node failed (or not found) !");
1308 if (CORBA::is_nil(obj))
1309 INFOS("[LaunchParallelContainer] failed");
1315 #ifndef WITH_PACO_PARALLEL
1317 SALOME_ContainerManager::BuildCommandToLaunchParallelContainer(const std::string& exe_name,
1318 const Engines::ContainerParameters& params,
1319 SALOME_ContainerManager::actual_launch_machine_t & vect_machine,
1320 const std::string proxy_hostname)
1325 //=============================================================================
1326 /*! Creates a command line that the container manager uses to launch
1327 * a parallel container.
1329 //=============================================================================
1331 SALOME_ContainerManager::BuildCommandToLaunchParallelContainer(const std::string& exe_name,
1332 const Engines::ContainerParameters& params,
1333 SALOME_ContainerManager::actual_launch_machine_t & vect_machine,
1334 const std::string proxy_hostname)
1336 // This method knows the differences between the proxy and the nodes.
1337 // nb_proc is not used in the same way if it is a proxy or
1340 //command = "gdb --args ";
1341 //command = "valgrind --tool=memcheck --log-file=val_log ";
1342 //command += real_exe_name;
1344 // Step 0 : init some variables...
1345 std::string parallelLib(CORBA::string_dup(params.parallelLib));
1346 std::string real_exe_name = exe_name + parallelLib;
1347 std::string machine_file_name("");
1348 bool remote = false;
1349 bool is_a_proxy = false;
1350 std::string hostname(CORBA::string_dup(params.hostname));
1352 std::ostringstream tmp_string;
1353 CORBA::Long nb_nodes = params.nb_proc;
1354 tmp_string << nb_nodes;
1355 std::string nbproc = tmp_string.str();
1357 Engines::MachineParameters_var rtn = new Engines::MachineParameters();
1358 rtn->container_name = params.container_name;
1359 rtn->hostname = params.hostname;
1360 rtn->OS = params.OS;
1361 rtn->mem_mb = params.mem_mb;
1362 rtn->cpu_clock = params.cpu_clock;
1363 rtn->nb_proc_per_node = params.nb_proc_per_node;
1364 rtn->nb_node = params.nb_node;
1365 rtn->nb_proc = params.nb_proc;
1366 rtn->isMPI = params.isMPI;
1368 // Step 1 : local or remote launch ?
1369 if (hostname != std::string(Kernel_Utils::GetHostname()) )
1371 MESSAGE("[BuildCommandToLaunchParallelContainer] remote machine case detected !");
1375 // Step 2 : proxy or nodes launch ?
1376 std::string::size_type loc_proxy = exe_name.find("Proxy");
1377 if( loc_proxy != string::npos ) {
1381 // Step 3 : Depending of the parallelLib, getting the machine file
1382 // ParallelLib Dummy has is own machine for this method
1387 machine_file_name = _ResManager->getMachineFile(hostname,
1393 machine_file_name = _ResManager->getMachineFile(hostname,
1397 if (machine_file_name == "")
1399 INFOS("[BuildCommandToLaunchParallelContainer] Error machine_file was not generated for machine " << hostname);
1400 throw SALOME_Exception("Error machine_file was not generated");
1402 MESSAGE("[BuildCommandToLaunchParallelContainer] machine_file_name is : " << machine_file_name);
1405 // Step 4 : Log type choosen by the user
1406 std::string log_env("");
1407 char * get_val = getenv("PARALLEL_LOG");
1410 std::string command_begin("");
1411 std::string command_end("");
1412 if(log_env == "xterm")
1414 command_begin = "/usr/X11R6/bin/xterm -e \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH; export PATH=$PATH;";
1415 command_end = "\"&";
1417 else if(log_env == "xterm_debug")
1419 command_begin = "/usr/X11R6/bin/xterm -e \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH; export PATH=$PATH;";
1420 command_end = "; cat \" &";
1424 // default into a file...
1425 std::string logFilename = "/tmp/" + _NS->ContainerName(params) + "_" + hostname;
1427 logFilename += "_Proxy_";
1429 logFilename += "_Node_";
1430 logFilename += std::string(getenv("USER")) + ".log";
1431 command_end = " > " + logFilename + " 2>&1 & ";
1434 // Step 5 : Building the command
1435 std::string command("");
1436 if (parallelLib == "Dummy")
1440 std::string command_remote("");
1443 std::string machine_name;
1444 std::ifstream machine_file(machine_file_name.c_str());
1445 std::getline(machine_file, machine_name);
1446 MESSAGE("[BuildCommandToLaunchParallelContainer] machine file name extracted is " << machine_name)
1448 // We want to launch a command like :
1449 // ssh -l user machine distantPath/runRemote.sh hostNS portNS
1450 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(machine_name);
1451 if (resInfo.Protocol == rsh)
1452 command_remote = "rsh ";
1454 command_remote = "ssh ";
1455 command_remote += "-l ";
1456 command_remote += resInfo.UserName;
1457 command_remote += " ";
1458 command_remote += machine_name;
1459 command_remote += " ";
1460 command_remote += resInfo.AppliPath; // path relative to user@machine $HOME
1461 command_remote += "/runRemote.sh ";
1462 ASSERT(getenv("NSHOST"));
1463 command_remote += getenv("NSHOST"); // hostname of CORBA name server
1464 command_remote += " ";
1465 ASSERT(getenv("NSPORT"));
1466 command_remote += getenv("NSPORT"); // port of CORBA name server
1467 command_remote += " ";
1469 hostname = machine_name;
1472 command = real_exe_name;
1473 command += " " + _NS->ContainerName(rtn);
1474 command += " " + parallelLib;
1475 command += " " + hostname;
1476 command += " " + nbproc;
1478 AddOmninamesParams(command);
1480 command = command_begin + command_remote + command + command_end;
1481 vect_machine.push_back(hostname);
1485 std::ifstream * machine_file = NULL;
1487 machine_file = new std::ifstream(machine_file_name.c_str());
1488 for (int i= 0; i < nb_nodes; i++)
1490 std::string command_remote("");
1493 std::string machine_name;
1494 std::getline(*machine_file, machine_name);
1495 MESSAGE("[BuildCommandToLaunchParallelContainer] machine file name extracted is " << machine_name)
1497 // We want to launch a command like :
1498 // ssh -l user machine distantPath/runRemote.sh hostNS portNS
1499 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(machine_name);
1500 if (resInfo.Protocol == rsh)
1501 command_remote = "rsh ";
1503 command_remote = "ssh ";
1504 command_remote += "-l ";
1505 command_remote += resInfo.UserName;
1506 command_remote += " ";
1507 command_remote += machine_name;
1508 command_remote += " ";
1509 command_remote += resInfo.AppliPath; // path relative to user@machine $HOME
1510 command_remote += "/runRemote.sh ";
1511 ASSERT(getenv("NSHOST"));
1512 command_remote += getenv("NSHOST"); // hostname of CORBA name server
1513 command_remote += " ";
1514 ASSERT(getenv("NSPORT"));
1515 command_remote += getenv("NSPORT"); // port of CORBA name server
1516 command_remote += " ";
1518 hostname = machine_name;
1521 std::ostringstream tmp;
1523 std::string proc_number = tmp.str();
1525 std::string command_tmp("");
1526 command_tmp += real_exe_name;
1527 command_tmp += " " + _NS->ContainerName(rtn);
1528 command_tmp += " " + parallelLib;
1529 command_tmp += " " + proxy_hostname;
1530 command_tmp += " " + proc_number;
1531 command_tmp += " -";
1532 AddOmninamesParams(command_tmp);
1534 // On change _Node_ par _Nodex_ pour avoir chaque noeud
1536 std::string command_end_tmp = command_end;
1537 std::string::size_type loc_node = command_end_tmp.find("_Node_");
1538 if (loc_node != std::string::npos)
1539 command_end_tmp.insert(loc_node+5, proc_number);
1540 command += command_begin + command_remote + command_tmp + command_end_tmp;
1541 vect_machine.push_back(hostname);
1544 delete machine_file;
1547 else if (parallelLib == "Mpi")
1549 // Step 0: if remote we have to copy the file
1550 // to the first machine of the file
1551 std::string remote_machine("");
1554 std::ifstream * machine_file = NULL;
1555 machine_file = new std::ifstream(machine_file_name.c_str());
1556 // Get first word of the line
1557 // For MPI implementation the first word is the
1559 std::getline(*machine_file, remote_machine, ' ');
1560 machine_file->close();
1561 MESSAGE("[BuildCommandToLaunchParallelContainer] machine file name extracted is " << remote_machine)
1563 // We want to launch a command like :
1564 // scp mpi_machine_file user@machine:Path
1565 std::string command_remote("");
1566 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(remote_machine);
1567 if (resInfo.Protocol == rsh)
1568 command_remote = "rcp ";
1570 command_remote = "scp ";
1572 command_remote += machine_file_name;
1573 command_remote += " ";
1574 command_remote += resInfo.UserName;
1575 command_remote += "@";
1576 command_remote += remote_machine;
1577 command_remote += ":";
1578 command_remote += machine_file_name;
1580 int status = system(command_remote.c_str());
1583 INFOS("copy of the mpi machine file failed !");
1590 std::string command_remote("");
1593 // We want to launch a command like :
1594 // ssh -l user machine distantPath/runRemote.sh hostNS portNS
1595 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(remote_machine);
1596 if (resInfo.Protocol == rsh)
1597 command_remote = "rsh ";
1599 command_remote = "ssh ";
1600 command_remote += "-l ";
1601 command_remote += resInfo.UserName;
1602 command_remote += " ";
1603 command_remote += remote_machine;
1604 command_remote += " ";
1605 command_remote += resInfo.AppliPath; // path relative to user@machine $HOME
1606 command_remote += "/runRemote.sh ";
1607 ASSERT(getenv("NSHOST"));
1608 command_remote += getenv("NSHOST"); // hostname of CORBA name server
1609 command_remote += " ";
1610 ASSERT(getenv("NSPORT"));
1611 command_remote += getenv("NSPORT"); // port of CORBA name server
1612 command_remote += " ";
1614 hostname = remote_machine;
1617 // We use Dummy proxy for MPI parallel containers
1618 real_exe_name = exe_name + "Dummy";
1619 command = real_exe_name;
1620 command += " " + _NS->ContainerName(rtn);
1621 command += " Dummy";
1622 command += " " + hostname;
1623 command += " " + nbproc;
1625 AddOmninamesParams(command);
1627 command = command_begin + command_remote + command + command_end;
1628 vect_machine.push_back(hostname);
1632 std::string command_remote("");
1635 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(remote_machine);
1636 if (resInfo.Protocol == rsh)
1637 command_remote = "rsh ";
1639 command_remote = "ssh ";
1640 command_remote += "-l ";
1641 command_remote += resInfo.UserName;
1642 command_remote += " ";
1643 command_remote += remote_machine;
1644 command_remote += " ";
1646 std::string new_real_exe_name("");
1647 new_real_exe_name += resInfo.AppliPath; // path relative to user@machine $HOME
1648 new_real_exe_name += "/runRemote.sh ";
1649 ASSERT(getenv("NSHOST"));
1650 new_real_exe_name += getenv("NSHOST"); // hostname of CORBA name server
1651 new_real_exe_name += " ";
1652 ASSERT(getenv("NSPORT"));
1653 new_real_exe_name += getenv("NSPORT"); // port of CORBA name server
1654 new_real_exe_name += " ";
1656 real_exe_name = new_real_exe_name + real_exe_name;
1657 hostname = remote_machine;
1660 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(hostname);
1661 if (resInfo.mpi == lam)
1663 command = "mpiexec -ssi boot ";
1664 if (resInfo.Protocol == rsh)
1668 command += "-machinefile " + machine_file_name + " ";
1669 command += "-n " + nbproc + " ";
1670 command += real_exe_name;
1671 command += " " + _NS->ContainerName(rtn);
1672 command += " " + parallelLib;
1673 command += " " + proxy_hostname;
1675 AddOmninamesParams(command);
1679 command = "mpirun -np " + nbproc + " ";
1680 command += real_exe_name;
1681 command += " " + _NS->ContainerName(rtn);
1682 command += " " + parallelLib;
1683 command += " " + proxy_hostname;
1685 AddOmninamesParams(command);
1688 command = command_begin + command_remote + command + command_end;
1689 for (int i= 0; i < nb_nodes; i++)
1690 vect_machine.push_back(proxy_hostname);
1695 std::string message("Unknown parallelLib : " + parallelLib);
1696 throw SALOME_Exception(message.c_str());
1699 MESSAGE("Parallel launch is: " << command);
1704 string SALOME_ContainerManager::GetMPIZeroNode(string machine)
1709 string tmpFile = BuildTemporaryFileName();
1711 if( getenv("LIBBATCH_NODEFILE") == NULL )
1712 cmd = "ssh " + machine + " mpirun -np 1 hostname > " + tmpFile;
1714 cmd = "mpirun -np 1 -machinefile " + _machinesFile + " hostname > " + tmpFile;
1716 status = system(cmd.c_str());
1718 ifstream fp(tmpFile.c_str(),ios::in);
1727 void SALOME_ContainerManager::machinesFile(const int nbproc)
1730 string nodesFile = getenv("LIBBATCH_NODEFILE");
1731 _machinesFile = Kernel_Utils::GetTmpFileName();
1732 ifstream fpi(nodesFile.c_str(),ios::in);
1733 ofstream fpo(_machinesFile.c_str(),ios::out);
1735 for(int i=0;i<_nbprocUsed;i++)
1738 for(int i=0;i<nbproc;i++)
1742 throw SALOME_Exception("You ask more processes than batch session have allocated!");
1744 _nbprocUsed += nbproc;