1 // Copyright (C) 2007-2008 CEA/DEN, EDF R&D, OPEN CASCADE
3 // Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
4 // CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
6 // This library is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 2.1 of the License.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // Lesser General Public License for more details.
16 // You should have received a copy of the GNU Lesser General Public
17 // License along with this library; if not, write to the Free Software
18 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
22 #include "SALOME_ContainerManager.hxx"
23 #include "SALOME_NamingService.hxx"
24 #include "SALOME_ModuleCatalog.hh"
25 #include "Basics_Utils.hxx"
26 #include "Basics_DirUtils.hxx"
27 #include <sys/types.h>
33 #include "Utils_CorbaException.hxx"
36 #ifdef WITH_PACO_PARALLEL
40 #define TIME_OUT_TO_LAUNCH_CONT 61
44 const char *SALOME_ContainerManager::_ContainerManagerNameInNS =
47 //=============================================================================
51 * Define a CORBA single thread policy for the server, which avoid to deal
52 * with non thread-safe usage like Change_Directory in SALOME naming service
54 //=============================================================================
56 SALOME_ContainerManager::SALOME_ContainerManager(CORBA::ORB_ptr orb, PortableServer::POA_var poa,
57 SALOME_ResourcesManager *rm, SALOME_NamingService *ns)
59 MESSAGE("constructor");
63 PortableServer::POAManager_var pman = poa->the_POAManager();
64 _orb = CORBA::ORB::_duplicate(orb) ;
65 CORBA::PolicyList policies;
67 PortableServer::ThreadPolicy_var threadPol =
68 poa->create_thread_policy(PortableServer::SINGLE_THREAD_MODEL);
69 policies[0] = PortableServer::ThreadPolicy::_duplicate(threadPol);
71 _poa = poa->create_POA("SThreadPOA",pman,policies);
73 PortableServer::ObjectId_var id = _poa->activate_object(this);
74 CORBA::Object_var obj = _poa->id_to_reference(id);
75 Engines::ContainerManager_var refContMan =
76 Engines::ContainerManager::_narrow(obj);
78 _NS->Register(refContMan,_ContainerManagerNameInNS);
79 _isAppliSalomeDefined = (getenv("APPLI") != 0);
83 if( getenv("OMPI_URI_FILE") != NULL ){
84 system("killall ompi-server");
86 command = "ompi-server -r ";
87 command += getenv("OMPI_URI_FILE");
88 int status=system(command.c_str());
90 throw SALOME_Exception("Error when launching ompi-server");
95 MESSAGE("constructor end");
98 //=============================================================================
102 //=============================================================================
104 SALOME_ContainerManager::~SALOME_ContainerManager()
106 MESSAGE("destructor");
109 if( getenv("OMPI_URI_FILE") != NULL )
110 system("killall ompi-server");
115 //=============================================================================
116 //! shutdown all the containers, then the ContainerManager servant
119 //=============================================================================
121 void SALOME_ContainerManager::Shutdown()
124 ShutdownContainers();
125 _NS->Destroy_Name(_ContainerManagerNameInNS);
126 PortableServer::ObjectId_var oid = _poa->servant_to_id(this);
127 _poa->deactivate_object(oid);
130 //=============================================================================
131 //! Loop on all the containers listed in naming service, ask shutdown on each
134 //=============================================================================
136 void SALOME_ContainerManager::ShutdownContainers()
138 MESSAGE("ShutdownContainers");
140 isOK = _NS->Change_Directory("/Containers");
142 vector<string> vec = _NS->list_directory_recurs();
143 list<string> lstCont;
144 for(vector<string>::iterator iter = vec.begin();iter!=vec.end();iter++)
147 CORBA::Object_var obj=_NS->Resolve((*iter).c_str());
150 Engines::Container_var cont=Engines::Container::_narrow(obj);
151 if(!CORBA::is_nil(cont))
152 lstCont.push_back((*iter));
154 catch(const CORBA::Exception& e)
156 // ignore this entry and continue
159 MESSAGE("Container list: ");
160 for(list<string>::iterator iter=lstCont.begin();iter!=lstCont.end();iter++){
163 for(list<string>::iterator iter=lstCont.begin();iter!=lstCont.end();iter++)
168 CORBA::Object_var obj=_NS->Resolve((*iter).c_str());
169 Engines::Container_var cont=Engines::Container::_narrow(obj);
170 if(!CORBA::is_nil(cont))
172 MESSAGE("ShutdownContainers: " << (*iter));
176 MESSAGE("ShutdownContainers: no container ref for " << (*iter));
178 catch(CORBA::SystemException& e)
180 INFOS("CORBA::SystemException ignored : " << e);
182 catch(CORBA::Exception&)
184 INFOS("CORBA::Exception ignored.");
188 INFOS("Unknown exception ignored.");
194 //=============================================================================
195 //! Give a suitable Container given constraints
197 * \param params Container Parameters required for the container
198 * \return the container or nil
200 //=============================================================================
201 Engines::Container_ptr
202 SALOME_ContainerManager::GiveContainer(const Engines::ContainerParameters& params)
204 Engines::Container_ptr ret = Engines::Container::_nil();
206 // Step 0: Default mode is start
207 Engines::ContainerParameters local_params(params);
208 if (std::string(local_params.mode.in()) == "")
209 local_params.mode = CORBA::string_dup("start");
210 std::string mode = local_params.mode.in();
211 MESSAGE("[GiveContainer] starting with mode: " << mode);
213 // Step 1: Find Container for find and findorstart mode
214 if (mode == "find" or mode == "findorstart")
216 ret = FindContainer(params, params.resource_params.resList);
217 if(!CORBA::is_nil(ret))
223 MESSAGE("[GiveContainer] no container found");
233 // Step 2: Get all possibleResources from the parameters
234 Engines::ResourceList_var possibleResources = _ResManager->GetFittingResources(local_params.resource_params);
235 MESSAGE("[GiveContainer] - length of possible resources " << possibleResources->length());
236 std::vector<std::string> local_resources;
238 // Step 3: if mode is "get" keep only machines with existing containers
241 for(unsigned int i=0; i < possibleResources->length(); i++)
243 Engines::Container_ptr cont = FindContainer(params, possibleResources[i].in());
246 if(!cont->_non_existent())
247 local_resources.push_back(string(possibleResources[i]));
249 catch(CORBA::Exception&) {}
252 // if local_resources is empty, we cannot give a container
253 if (local_resources.size() == 0)
255 MESSAGE("[GiveContainer] cannot find a container for mode get");
260 for(unsigned int i=0; i < possibleResources->length(); i++)
261 local_resources.push_back(string(possibleResources[i]));
263 // Step 4: select the resource where to get/start the container
264 std::string resource_selected;
267 resource_selected = _ResManager->GetImpl()->Find(params.resource_params.policy.in(), local_resources);
269 catch(const SALOME_Exception &ex)
271 MESSAGE("[GiveContainer] Exception in ResourceManager find !: " << ex.what());
274 MESSAGE("[GiveContainer] Resource selected is: " << resource_selected);
276 // Step 5: get container in the naming service
277 Engines::ResourceDefinition_var resource_definition = _ResManager->GetResourceDefinition(resource_selected.c_str());
278 std::string hostname(resource_definition->name.in());
279 std::string containerNameInNS;
281 // A mpi parallel container register on zero node in NS
282 containerNameInNS = _NS->BuildContainerNameForNS(params, GetMPIZeroNode(hostname).c_str());
284 containerNameInNS = _NS->BuildContainerNameForNS(params, hostname.c_str());
285 MESSAGE("[GiveContainer] Container name in the naming service: " << containerNameInNS);
287 // Step 6: check if the name exists in naming service
288 //if params.mode == "getorstart" or "get" use the existing container
289 //if params.mode == "start" shutdown the existing container before launching a new one with that name
290 CORBA::Object_var obj = _NS->Resolve(containerNameInNS.c_str());
291 if (!CORBA::is_nil(obj))
295 Engines::Container_var cont=Engines::Container::_narrow(obj);
296 if(!cont->_non_existent())
298 if(std::string(params.mode.in())=="getorstart" or std::string(params.mode.in())=="get")
299 return cont._retn(); /* the container exists and params.mode is getorstart or get use it*/
302 INFOS("[GiveContainer] A container is already registered with the name: " << containerNameInNS << ", shutdown the existing container");
303 cont->Shutdown(); // shutdown the registered container if it exists
307 catch(CORBA::Exception&)
309 INFOS("[GiveContainer] CORBA::Exception ignored when trying to get the container - we start a new one");
313 // Step 7: type of container: PaCO, Exe, Mpi or Classic
314 // Mpi already tested in step 5, specific code on BuildCommandToLaunch Local/Remote Container methods
315 // TODO -> separates Mpi from Classic/Exe
317 std::string parallelLib(params.parallelLib);
318 if (std::string(local_params.parallelLib.in()) != "")
320 INFOS("[GiveContainer] PaCO++ container are not currently available");
324 std::string container_exe = "SALOME_Container"; // Classic container
328 CORBA::String_var container_exe_tmp;
329 CORBA::Object_var obj = _NS->Resolve("/Kernel/ModulCatalog");
330 SALOME_ModuleCatalog::ModuleCatalog_var Catalog = SALOME_ModuleCatalog::ModuleCatalog::_narrow(obj) ;
331 if (CORBA::is_nil (Catalog))
333 INFOS("[GiveContainer] Module Catalog is not found -> cannot launch a container");
336 // Loop through component list
337 for(unsigned int i=0; i < local_params.resource_params.componentList.length(); i++)
339 const char* compoi = local_params.resource_params.componentList[i];
340 SALOME_ModuleCatalog::Acomponent_var compoInfo = Catalog->GetComponent(compoi);
341 if (CORBA::is_nil (compoInfo))
345 SALOME_ModuleCatalog::ImplType impl=compoInfo->implementation_type();
346 container_exe_tmp=compoInfo->implementation_name();
347 if(impl==SALOME_ModuleCatalog::CEXE)
351 INFOS("ContainerManager Error: you can't have 2 CEXE component in the same container" );
352 return Engines::Container::_nil();
354 MESSAGE("[GiveContainer] Exe container found !: " << container_exe_tmp);
355 container_exe = container_exe_tmp.in();
360 catch (ServiceUnreachable&)
362 INFOS("Caught exception: Naming Service Unreachable");
367 INFOS("Caught unknown exception.");
371 // Step 8: start a new container
372 MESSAGE("[GiveContainer] Try to launch a new container on " << resource_selected);
374 if(hostname == Kernel_Utils::GetHostname())
375 command = BuildCommandToLaunchLocalContainer(params, container_exe);
377 command = BuildCommandToLaunchRemoteContainer(resource_selected, params, container_exe);
379 //redirect stdout and stderr in a file
381 string logFilename=getenv("TEMP");
384 string logFilename="/tmp";
385 char* val = getenv("SALOME_TMP_DIR");
388 struct stat file_info;
389 stat(val, &file_info);
390 bool is_dir = S_ISDIR(file_info.st_mode);
391 if (is_dir)logFilename=val;
392 else std::cerr << "SALOME_TMP_DIR environment variable is not a directory use /tmp instead" << std::endl;
396 logFilename += _NS->ContainerName(params)+"_"+ resource_selected +"_"+getenv( "USER" )+".log" ;
397 command += " > " + logFilename + " 2>&1";
399 command = "%PYTHONBIN% -c \"import win32pm ; win32pm.spawnpid(r'" + command + "', '')\"";
404 // launch container with a system call
405 int status=system(command.c_str());
408 MESSAGE("SALOME_ContainerManager::StartContainer rsh failed (system command status -1)");
409 RmTmpFile(_TmpFileName); // command file can be removed here
410 return Engines::Container::_nil();
412 else if (status == 217){
413 MESSAGE("SALOME_ContainerManager::StartContainer rsh failed (system command status 217)");
414 RmTmpFile(_TmpFileName); // command file can be removed here
415 return Engines::Container::_nil();
419 int count = TIME_OUT_TO_LAUNCH_CONT;
420 MESSAGE("[GiveContainer] waiting " << count << " second steps");
421 while (CORBA::is_nil(ret) && count)
429 MESSAGE("[GiveContainer] step " << count << " Waiting for container on " << resource_selected);
430 CORBA::Object_var obj = _NS->Resolve(containerNameInNS.c_str());
431 ret=Engines::Container::_narrow(obj);
433 if (CORBA::is_nil(ret))
435 INFOS("[GiveContainer] was not able to launch container " << containerNameInNS);
439 // Setting log file name
440 logFilename=":"+logFilename;
441 logFilename="@"+Kernel_Utils::GetHostname()+logFilename;
442 logFilename=getenv( "USER" )+logFilename;
443 ret->logfilename(logFilename.c_str());
444 RmTmpFile(_TmpFileName); // command file can be removed here
450 //=============================================================================
451 //! Find a container given constraints (params) on a list of machines (possibleComputers)
455 //=============================================================================
457 Engines::Container_ptr
458 SALOME_ContainerManager::FindContainer(const Engines::ContainerParameters& params,
459 const Engines::ResourceList& possibleResources)
461 MESSAGE("[FindContainer] FindContainer on " << possibleResources.length() << " resources");
462 for(unsigned int i=0; i < possibleResources.length();i++)
464 Engines::Container_ptr cont = FindContainer(params, possibleResources[i].in());
465 if(!CORBA::is_nil(cont))
468 MESSAGE("[FindContainer] no container found");
469 return Engines::Container::_nil();
472 //=============================================================================
473 //! Find a container given constraints (params) on a machine (theMachine)
477 //=============================================================================
479 Engines::Container_ptr
480 SALOME_ContainerManager::FindContainer(const Engines::ContainerParameters& params,
481 const std::string& resource)
483 std::string containerNameInNS(_NS->BuildContainerNameForNS(params, resource.c_str()));
484 MESSAGE("[FindContainer] Try to find a container " << containerNameInNS << " on resource " << resource);
485 CORBA::Object_var obj = _NS->Resolve(containerNameInNS.c_str());
488 if(obj->_non_existent())
489 return Engines::Container::_nil();
491 return Engines::Container::_narrow(obj);
493 catch(const CORBA::Exception& e)
495 return Engines::Container::_nil();
499 //=============================================================================
501 * This is no longer valid (C++ container are also python containers)
503 //=============================================================================
504 bool isPythonContainer(const char* ContainerName)
507 int len = strlen(ContainerName);
510 if (strcmp(ContainerName + len - 2, "Py") == 0)
516 //=============================================================================
518 * Builds the script to be launched
520 * If SALOME Application not defined ($APPLI),
521 * see BuildTempFileToLaunchRemoteContainer()
523 * Else rely on distant configuration. Command is under the form (example):
524 * ssh user@machine distantPath/runRemote.sh hostNS portNS WORKINGDIR workingdir \
525 * SALOME_Container containerName &"
527 * - where user is ommited if not specified in CatalogResources,
528 * - where distant path is always relative to user@machine $HOME, and
529 * equal to $APPLI if not specified in CatalogResources,
530 * - where hostNS is the hostname of CORBA naming server (set by scripts to
531 * use to launch SALOME and servers in $APPLI: runAppli.sh, runRemote.sh)
532 * - where portNS is the port used by CORBA naming server (set by scripts to
533 * use to launch SALOME and servers in $APPLI: runAppli.sh, runRemote.sh)
534 * - where workingdir is the requested working directory for the container.
535 * If WORKINGDIR (and workingdir) is not present the working dir will be $HOME
537 //=============================================================================
540 SALOME_ContainerManager::BuildCommandToLaunchRemoteContainer
541 (const string& resource_name,
542 const Engines::ContainerParameters& params, const std::string& container_exe)
546 if (!_isAppliSalomeDefined)
547 command = BuildTempFileToLaunchRemoteContainer(resource_name, params);
551 Engines::ResourceDefinition_var resource_definition = _ResManager->GetResourceDefinition(resource_name.c_str());
552 std::string hostname(resource_definition->name.in());
553 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesDescr(resource_name);
557 if ((params.resource_params.nb_node <= 0) && (params.resource_params.nb_proc_per_node <= 0))
559 else if (params.resource_params.nb_node == 0)
560 nbproc = params.resource_params.nb_proc_per_node;
561 else if (params.resource_params.nb_proc_per_node == 0)
562 nbproc = params.resource_params.nb_node;
564 nbproc = params.resource_params.nb_node * params.resource_params.nb_proc_per_node;
567 // "ssh -l user machine distantPath/runRemote.sh hostNS portNS WORKINGDIR workingdir \
568 // SALOME_Container containerName &"
569 if (resInfo.Protocol == rsh)
571 else if (resInfo.Protocol == ssh)
574 throw SALOME_Exception("Unknown protocol");
576 if (resInfo.UserName != "")
579 command += resInfo.UserName;
583 command += resInfo.HostName;
586 if (resInfo.AppliPath != "")
587 command += resInfo.AppliPath; // path relative to user@machine $HOME
590 ASSERT(getenv("APPLI"));
591 command += getenv("APPLI"); // path relative to user@machine $HOME
594 command += "/runRemote.sh ";
596 ASSERT(getenv("NSHOST"));
597 command += getenv("NSHOST"); // hostname of CORBA name server
600 ASSERT(getenv("NSPORT"));
601 command += getenv("NSPORT"); // port of CORBA name server
603 std::string wdir = params.workingdir.in();
606 command += " WORKINGDIR ";
608 if(wdir == "$TEMPDIR")
610 command += wdir; // requested working directory
616 command += " mpirun -np ";
617 std::ostringstream o;
621 command += "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
622 #elif defined(WITHOPENMPI)
623 if( getenv("OMPI_URI_FILE") == NULL )
624 command += "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
626 command += "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
627 command += getenv("OMPI_URI_FILE");
630 command += " SALOME_MPIContainer ";
633 command += " " +container_exe+ " ";
635 command += _NS->ContainerName(params);
637 AddOmninamesParams(command);
639 MESSAGE("command =" << command);
645 //=============================================================================
647 * builds the command to be launched.
649 //=============================================================================
651 SALOME_ContainerManager::BuildCommandToLaunchLocalContainer
652 (const Engines::ContainerParameters& params, const std::string& container_exe)
654 _TmpFileName = BuildTemporaryFileName();
664 if ( (params.resource_params.nb_node <= 0) && (params.resource_params.nb_proc_per_node <= 0) )
666 else if ( params.resource_params.nb_node == 0 )
667 nbproc = params.resource_params.nb_proc_per_node;
668 else if ( params.resource_params.nb_proc_per_node == 0 )
669 nbproc = params.resource_params.nb_node;
671 nbproc = params.resource_params.nb_node * params.resource_params.nb_proc_per_node;
676 o << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
677 #elif defined(WITHOPENMPI)
678 if( getenv("OMPI_URI_FILE") == NULL )
679 o << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
682 o << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
683 o << getenv("OMPI_URI_FILE");
687 if (isPythonContainer(params.container_name))
688 o << " pyMPI SALOME_ContainerPy.py ";
690 o << " SALOME_MPIContainer ";
695 std::string wdir=params.workingdir.in();
698 // a working directory is requested
699 if(wdir == "$TEMPDIR")
701 // a new temporary directory is requested
702 string dir = Kernel_Utils::GetTmpDir();
704 o << "cd /d " << dir << endl;
706 o << "cd " << dir << ";";
712 // a permanent directory is requested use it or create it
714 o << "mkdir " + wdir << endl;
715 o << "cd /D " + wdir << endl;
717 o << "mkdir -p " << wdir << " && cd " << wdir + ";";
721 if (isPythonContainer(params.container_name))
722 o << "SALOME_ContainerPy.py ";
724 o << container_exe + " ";
728 o << _NS->ContainerName(params);
730 AddOmninamesParams(o);
732 ofstream command_file( _TmpFileName.c_str() );
733 command_file << o.str();
734 command_file.close();
737 chmod(_TmpFileName.c_str(), 0x1ED);
739 command = _TmpFileName;
741 MESSAGE("Command is file ... " << command);
742 MESSAGE("Command is ... " << o.str());
747 //=============================================================================
749 * removes the generated temporary file in case of a remote launch.
751 //=============================================================================
753 void SALOME_ContainerManager::RmTmpFile(std::string& tmpFileName)
755 int lenght = tmpFileName.size();
759 string command = "del /F ";
761 string command = "rm ";
764 command += tmpFileName.substr(0, lenght - 3 );
766 command += tmpFileName;
768 system(command.c_str());
769 //if dir is empty - remove it
770 string tmp_dir = Kernel_Utils::GetDirByPath( tmpFileName );
771 if ( Kernel_Utils::IsEmptyDir( tmp_dir ) )
774 command = "del /F " + tmp_dir;
776 command = "rmdir " + tmp_dir;
778 system(command.c_str());
783 //=============================================================================
785 * add to command all options relative to naming service.
787 //=============================================================================
789 void SALOME_ContainerManager::AddOmninamesParams(string& command) const
791 CORBA::String_var iorstr = _NS->getIORaddr();
792 command += "ORBInitRef NameService=";
796 //=============================================================================
798 * add to command all options relative to naming service.
800 //=============================================================================
802 void SALOME_ContainerManager::AddOmninamesParams(ofstream& fileStream) const
804 CORBA::String_var iorstr = _NS->getIORaddr();
805 fileStream << "ORBInitRef NameService=";
806 fileStream << iorstr;
809 //=============================================================================
811 * add to command all options relative to naming service.
813 //=============================================================================
815 void SALOME_ContainerManager::AddOmninamesParams(ostringstream& oss) const
817 CORBA::String_var iorstr = _NS->getIORaddr();
818 oss << "ORBInitRef NameService=";
822 //=============================================================================
824 * generate a file name in /tmp directory
826 //=============================================================================
828 string SALOME_ContainerManager::BuildTemporaryFileName() const
830 //build more complex file name to support multiple salome session
831 string aFileName = Kernel_Utils::GetTmpFileName();
840 string SALOME_ContainerManager::GetMPIZeroNode(string machine)
845 string tmpFile = BuildTemporaryFileName();
847 cmd = "ssh " + machine + " mpirun -np 1 hostname > " + tmpFile;
849 status = system(cmd.c_str());
851 ifstream fp(tmpFile.c_str(),ios::in);
860 //=============================================================================
862 * Builds in a temporary file the script to be launched.
864 * Used if SALOME Application ($APPLI) is not defined.
865 * The command is build with data from CatalogResources, in which every path
866 * used on remote computer must be defined.
868 //=============================================================================
871 SALOME_ContainerManager::BuildTempFileToLaunchRemoteContainer
872 (const string& resource_name,
873 const Engines::ContainerParameters& params) throw(SALOME_Exception)
877 _TmpFileName = BuildTemporaryFileName();
878 ofstream tempOutputFile;
879 tempOutputFile.open(_TmpFileName.c_str(), ofstream::out );
880 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesDescr(resource_name);
881 tempOutputFile << "#! /bin/sh" << endl;
885 tempOutputFile << "export SALOME_trace=local" << endl; // mkr : 27.11.2006 : PAL13967 - Distributed supervision graphs - Problem with "SALOME_trace"
886 //tempOutputFile << "source " << resInfo.PreReqFilePath << endl;
892 tempOutputFile << "mpirun -np ";
895 if ( (params.resource_params.nb_node <= 0) && (params.resource_params.nb_proc_per_node <= 0) )
897 else if ( params.resource_params.nb_node == 0 )
898 nbproc = params.resource_params.nb_proc_per_node;
899 else if ( params.resource_params.nb_proc_per_node == 0 )
900 nbproc = params.resource_params.nb_node;
902 nbproc = params.resource_params.nb_node * params.resource_params.nb_proc_per_node;
904 std::ostringstream o;
906 tempOutputFile << nbproc << " ";
908 tempOutputFile << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
909 #elif defined(WITHOPENMPI)
910 if( getenv("OMPI_URI_FILE") == NULL )
911 tempOutputFile << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
913 tempOutputFile << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
914 tempOutputFile << getenv("OMPI_URI_FILE");
919 tempOutputFile << getenv("KERNEL_ROOT_DIR") << "/bin/salome/";
923 if (isPythonContainer(params.container_name))
924 tempOutputFile << " pyMPI SALOME_ContainerPy.py ";
926 tempOutputFile << " SALOME_MPIContainer ";
931 if (isPythonContainer(params.container_name))
932 tempOutputFile << "SALOME_ContainerPy.py ";
934 tempOutputFile << "SALOME_Container ";
937 tempOutputFile << _NS->ContainerName(params) << " -";
938 AddOmninamesParams(tempOutputFile);
939 tempOutputFile << " &" << endl;
940 tempOutputFile.flush();
941 tempOutputFile.close();
943 chmod(_TmpFileName.c_str(), 0x1ED);
950 if (resInfo.Protocol == rsh)
953 string commandRcp = "rcp ";
954 commandRcp += _TmpFileName;
956 commandRcp += resInfo.HostName;
958 commandRcp += _TmpFileName;
959 status = system(commandRcp.c_str());
962 else if (resInfo.Protocol == ssh)
965 string commandRcp = "scp ";
966 commandRcp += _TmpFileName;
968 commandRcp += resInfo.HostName;
970 commandRcp += _TmpFileName;
971 status = system(commandRcp.c_str());
974 throw SALOME_Exception("Unknown protocol");
977 throw SALOME_Exception("Error of connection on remote host");
979 command += resInfo.HostName;
980 _CommandForRemAccess = command;
982 command += _TmpFileName;
990 #ifdef WITH_PACO_PARALLEL
991 //=============================================================================
993 * Find or Start a suitable PaCO++ Parallel Container in a list of machines.
994 * \param params Machine Parameters required for the container
995 * \return CORBA container reference.
997 //=============================================================================
998 Engines::Container_ptr
999 SALOME_ContainerManager::StartPaCOPPContainer(const Engines::ContainerParameters& params_const)
1001 CORBA::Object_var obj;
1002 PaCO::InterfaceManager_var container_proxy;
1003 Engines::Container_ptr ret = Engines::Container::_nil();
1004 Engines::MachineParameters params(params_const);
1006 // Step 1 : Try to find a suitable container
1007 // Currently not as good as could be since
1008 // we have to verified the number of nodes of the container
1009 // if a user tell that.
1010 ret = FindContainer(params, params.computerList);
1011 if(CORBA::is_nil(ret)) {
1012 // Step 2 : Starting a new parallel container !
1013 INFOS("[StartParallelContainer] Starting a PaCO++ parallel container");
1015 // Step 3 : Choose a computer
1016 std::string theMachine = _ResManager->FindFirst(params.computerList);
1017 //If the machine name is localhost use the real name
1018 if(theMachine == "localhost")
1019 theMachine=Kernel_Utils::GetHostname();
1021 if(theMachine == "") {
1022 INFOS("[StartParallelContainer] !!!!!!!!!!!!!!!!!!!!!!!!!!");
1023 INFOS("[StartParallelContainer] No possible computer found");
1024 INFOS("[StartParallelContainer] !!!!!!!!!!!!!!!!!!!!!!!!!!");
1027 INFOS("[StartParallelContainer] on machine : " << theMachine);
1028 params.hostname = CORBA::string_dup(theMachine.c_str());
1030 // Step 4 : starting parallel container proxy
1031 Engines::MachineParameters params_proxy(params);
1032 std::string command_proxy;
1033 SALOME_ContainerManager::actual_launch_machine_t proxy_machine;
1036 command_proxy = BuildCommandToLaunchParallelContainer("SALOME_ParallelContainerProxy", params_proxy, proxy_machine);
1038 catch(const SALOME_Exception & ex)
1040 INFOS("[StartParallelContainer] Exception in BuildCommandToLaunchParallelContainer");
1044 params_proxy.nb_proc = 0; // LaunchParallelContainer uses this value to know if it launches the proxy or the nodes
1045 obj = LaunchParallelContainer(command_proxy, params_proxy, _NS->ContainerName(params_proxy), proxy_machine);
1046 if (CORBA::is_nil(obj))
1048 INFOS("[StartParallelContainer] LaunchParallelContainer for proxy returns NIL !");
1053 container_proxy = PaCO::InterfaceManager::_narrow(obj);
1055 catch(CORBA::SystemException& e)
1057 INFOS("[StartParallelContainer] Exception in _narrow after LaunchParallelContainer for proxy !");
1058 INFOS("CORBA::SystemException : " << e);
1061 catch(CORBA::Exception& e)
1063 INFOS("[StartParallelContainer] Exception in _narrow after LaunchParallelContainer for proxy !");
1064 INFOS("CORBA::Exception" << e);
1069 INFOS("[StartParallelContainer] Exception in _narrow after LaunchParallelContainer for proxy !");
1070 INFOS("Unknown exception !");
1073 if (CORBA::is_nil(container_proxy))
1075 INFOS("[StartParallelContainer] PaCO::InterfaceManager::_narrow returns NIL !");
1079 // Step 5 : starting parallel container nodes
1080 std::string command_nodes;
1081 Engines::MachineParameters params_nodes(params);
1082 SALOME_ContainerManager::actual_launch_machine_t nodes_machines;
1085 command_nodes = BuildCommandToLaunchParallelContainer("SALOME_ParallelContainerNode", params_nodes, nodes_machines, proxy_machine[0]);
1087 catch(const SALOME_Exception & ex)
1089 INFOS("[StartParallelContainer] Exception in BuildCommandToLaunchParallelContainer");
1093 std::string container_generic_node_name = _NS->ContainerName(params) + "Node";
1094 obj = LaunchParallelContainer(command_nodes, params_nodes, container_generic_node_name, nodes_machines);
1095 if (CORBA::is_nil(obj))
1097 INFOS("[StartParallelContainer] LaunchParallelContainer for nodes returns NIL !");
1098 // Il faut tuer le proxy
1101 Engines::Container_var proxy = Engines::Container::_narrow(container_proxy);
1106 INFOS("[StartParallelContainer] Exception catched from proxy Shutdown...");
1111 // Step 6 : connecting nodes and the proxy to actually create a parallel container
1112 for (int i = 0; i < params.nb_proc; i++)
1114 std::ostringstream tmp;
1116 std::string proc_number = tmp.str();
1117 std::string container_node_name = container_generic_node_name + proc_number;
1119 std::string theNodeMachine(nodes_machines[i]);
1120 std::string containerNameInNS = _NS->BuildContainerNameForNS(container_node_name.c_str(), theNodeMachine.c_str());
1121 obj = _NS->Resolve(containerNameInNS.c_str());
1122 if (CORBA::is_nil(obj))
1124 INFOS("[StartParallelContainer] CONNECTION FAILED From Naming Service !");
1125 INFOS("[StartParallelContainer] Container name is " << containerNameInNS);
1130 MESSAGE("[StartParallelContainer] Deploying node : " << container_node_name);
1131 PaCO::InterfaceParallel_var node = PaCO::InterfaceParallel::_narrow(obj);
1133 MESSAGE("[StartParallelContainer] node " << container_node_name << " is deployed");
1135 catch(CORBA::SystemException& e)
1137 INFOS("[StartParallelContainer] Exception in deploying node : " << containerNameInNS);
1138 INFOS("CORBA::SystemException : " << e);
1141 catch(CORBA::Exception& e)
1143 INFOS("[StartParallelContainer] Exception in deploying node : " << containerNameInNS);
1144 INFOS("CORBA::Exception" << e);
1149 INFOS("[StartParallelContainer] Exception in deploying node : " << containerNameInNS);
1150 INFOS("Unknown exception !");
1155 // Step 7 : starting parallel container
1158 MESSAGE ("[StartParallelContainer] Starting parallel object");
1159 container_proxy->start();
1160 MESSAGE ("[StartParallelContainer] Parallel object is started");
1161 ret = Engines::Container::_narrow(container_proxy);
1163 catch(CORBA::SystemException& e)
1165 INFOS("Caught CORBA::SystemException. : " << e);
1167 catch(PortableServer::POA::ServantAlreadyActive&)
1169 INFOS("Caught CORBA::ServantAlreadyActiveException");
1171 catch(CORBA::Exception&)
1173 INFOS("Caught CORBA::Exception.");
1175 catch(std::exception& exc)
1177 INFOS("Caught std::exception - "<<exc.what());
1181 INFOS("Caught unknown exception.");
1187 //=============================================================================
1189 * Find or Start a suitable PaCO++ Parallel Container in a list of machines.
1190 * \param params Machine Parameters required for the container
1191 * \return CORBA container reference.
1193 //=============================================================================
1194 Engines::Container_ptr
1195 SALOME_ContainerManager::StartPaCOPPContainer(const Engines::ContainerParameters& params)
1197 Engines::Container_ptr ret = Engines::Container::_nil();
1198 INFOS("[StartParallelContainer] is disabled !");
1199 INFOS("[StartParallelContainer] recompile SALOME Kernel to enable parallel extension");
1204 #ifndef WITH_PACO_PARALLEL
1206 SALOME_ContainerManager::LaunchParallelContainer(const std::string& command,
1207 const Engines::ContainerParameters& params,
1208 const std::string& name,
1209 SALOME_ContainerManager::actual_launch_machine_t & vect_machine)
1211 CORBA::Object_ptr obj = CORBA::Object::_nil();
1215 //=============================================================================
1216 /*! This method launches the parallel container.
1217 * It will may be placed on the ressources manager.
1219 * \param command to launch
1220 * \param container's parameters
1221 * \param name of the container
1223 * \return CORBA container reference
1225 //=============================================================================
1227 SALOME_ContainerManager::LaunchParallelContainer(const std::string& command,
1228 const Engines::ContainerParameters& params,
1229 const std::string& name,
1230 SALOME_ContainerManager::actual_launch_machine_t & vect_machine)
1232 CORBA::Object_ptr obj = CORBA::Object::_nil();
1233 std::string containerNameInNS;
1234 int count = TIME_OUT_TO_LAUNCH_CONT;
1236 INFOS("[LaunchParallelContainer] Begin");
1237 int status = system(command.c_str());
1239 INFOS("[LaunchParallelContainer] failed : system command status -1");
1242 else if (status == 217) {
1243 INFOS("[LaunchParallelContainer] failed : system command status 217");
1247 if (params.nb_proc == 0)
1249 std::string theMachine(vect_machine[0]);
1250 // Proxy We have launch a proxy
1251 containerNameInNS = _NS->BuildContainerNameForNS((char*) name.c_str(), theMachine.c_str());
1252 INFOS("[LaunchParallelContainer] Waiting for Parallel Container proxy " << containerNameInNS << " on " << theMachine);
1253 while (CORBA::is_nil(obj) && count)
1261 obj = _NS->Resolve(containerNameInNS.c_str());
1266 INFOS("[LaunchParallelContainer] launching the nodes of the parallel container");
1267 // We are waiting all the nodes
1268 for (int i = 0; i < params.nb_proc; i++)
1270 obj = CORBA::Object::_nil();
1271 std::string theMachine(vect_machine[i]);
1273 std::ostringstream tmp;
1275 std::string proc_number = tmp.str();
1276 std::string container_node_name = name + proc_number;
1277 containerNameInNS = _NS->BuildContainerNameForNS((char*) container_node_name.c_str(), theMachine.c_str());
1278 INFOS("[LaunchParallelContainer] Waiting for Parallel Container node " << containerNameInNS << " on " << theMachine);
1279 while (CORBA::is_nil(obj) && count) {
1286 obj = _NS->Resolve(containerNameInNS.c_str());
1288 if (CORBA::is_nil(obj))
1290 INFOS("[LaunchParallelContainer] Launch of node failed (or not found) !");
1295 if (CORBA::is_nil(obj))
1296 INFOS("[LaunchParallelContainer] failed");
1302 #ifndef WITH_PACO_PARALLEL
1304 SALOME_ContainerManager::BuildCommandToLaunchParallelContainer(const std::string& exe_name,
1305 const Engines::ContainerParameters& params,
1306 SALOME_ContainerManager::actual_launch_machine_t & vect_machine,
1307 const std::string proxy_hostname)
1312 //=============================================================================
1313 /*! Creates a command line that the container manager uses to launch
1314 * a parallel container.
1316 //=============================================================================
1318 SALOME_ContainerManager::BuildCommandToLaunchParallelContainer(const std::string& exe_name,
1319 const Engines::ContainerParameters& params,
1320 SALOME_ContainerManager::actual_launch_machine_t & vect_machine,
1321 const std::string proxy_hostname)
1323 // This method knows the differences between the proxy and the nodes.
1324 // nb_proc is not used in the same way if it is a proxy or
1327 //command = "gdb --args ";
1328 //command = "valgrind --tool=memcheck --log-file=val_log ";
1329 //command += real_exe_name;
1331 // Step 0 : init some variables...
1332 std::string parallelLib(CORBA::string_dup(params.parallelLib));
1333 std::string real_exe_name = exe_name + parallelLib;
1334 std::string machine_file_name("");
1335 bool remote = false;
1336 bool is_a_proxy = false;
1337 std::string hostname(CORBA::string_dup(params.hostname));
1339 std::ostringstream tmp_string;
1340 CORBA::Long nb_nodes = params.nb_proc;
1341 tmp_string << nb_nodes;
1342 std::string nbproc = tmp_string.str();
1344 Engines::MachineParameters_var rtn = new Engines::MachineParameters();
1345 rtn->container_name = params.container_name;
1346 rtn->hostname = params.hostname;
1347 rtn->OS = params.OS;
1348 rtn->mem_mb = params.mem_mb;
1349 rtn->cpu_clock = params.cpu_clock;
1350 rtn->nb_proc_per_node = params.nb_proc_per_node;
1351 rtn->nb_node = params.nb_node;
1352 rtn->nb_proc = params.nb_proc;
1353 rtn->isMPI = params.isMPI;
1355 // Step 1 : local or remote launch ?
1356 if (hostname != std::string(Kernel_Utils::GetHostname()) )
1358 MESSAGE("[BuildCommandToLaunchParallelContainer] remote machine case detected !");
1362 // Step 2 : proxy or nodes launch ?
1363 std::string::size_type loc_proxy = exe_name.find("Proxy");
1364 if( loc_proxy != string::npos ) {
1368 // Step 3 : Depending of the parallelLib, getting the machine file
1369 // ParallelLib Dummy has is own machine for this method
1374 machine_file_name = _ResManager->getMachineFile(hostname,
1380 machine_file_name = _ResManager->getMachineFile(hostname,
1384 if (machine_file_name == "")
1386 INFOS("[BuildCommandToLaunchParallelContainer] Error machine_file was not generated for machine " << hostname);
1387 throw SALOME_Exception("Error machine_file was not generated");
1389 MESSAGE("[BuildCommandToLaunchParallelContainer] machine_file_name is : " << machine_file_name);
1392 // Step 4 : Log type choosen by the user
1393 std::string log_env("");
1394 char * get_val = getenv("PARALLEL_LOG");
1397 std::string command_begin("");
1398 std::string command_end("");
1399 if(log_env == "xterm")
1401 command_begin = "/usr/X11R6/bin/xterm -e \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH; export PATH=$PATH;";
1402 command_end = "\"&";
1404 else if(log_env == "xterm_debug")
1406 command_begin = "/usr/X11R6/bin/xterm -e \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH; export PATH=$PATH;";
1407 command_end = "; cat \" &";
1411 // default into a file...
1412 std::string logFilename = "/tmp/" + _NS->ContainerName(params) + "_" + hostname;
1414 logFilename += "_Proxy_";
1416 logFilename += "_Node_";
1417 logFilename += std::string(getenv("USER")) + ".log";
1418 command_end = " > " + logFilename + " 2>&1 & ";
1421 // Step 5 : Building the command
1422 std::string command("");
1423 if (parallelLib == "Dummy")
1427 std::string command_remote("");
1430 std::string machine_name;
1431 std::ifstream machine_file(machine_file_name.c_str());
1432 std::getline(machine_file, machine_name);
1433 MESSAGE("[BuildCommandToLaunchParallelContainer] machine file name extracted is " << machine_name)
1435 // We want to launch a command like :
1436 // ssh -l user machine distantPath/runRemote.sh hostNS portNS
1437 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(machine_name);
1438 if (resInfo.Protocol == rsh)
1439 command_remote = "rsh ";
1441 command_remote = "ssh ";
1442 command_remote += "-l ";
1443 command_remote += resInfo.UserName;
1444 command_remote += " ";
1445 command_remote += machine_name;
1446 command_remote += " ";
1447 command_remote += resInfo.AppliPath; // path relative to user@machine $HOME
1448 command_remote += "/runRemote.sh ";
1449 ASSERT(getenv("NSHOST"));
1450 command_remote += getenv("NSHOST"); // hostname of CORBA name server
1451 command_remote += " ";
1452 ASSERT(getenv("NSPORT"));
1453 command_remote += getenv("NSPORT"); // port of CORBA name server
1454 command_remote += " ";
1456 hostname = machine_name;
1459 command = real_exe_name;
1460 command += " " + _NS->ContainerName(rtn);
1461 command += " " + parallelLib;
1462 command += " " + hostname;
1463 command += " " + nbproc;
1465 AddOmninamesParams(command);
1467 command = command_begin + command_remote + command + command_end;
1468 vect_machine.push_back(hostname);
1472 std::ifstream * machine_file = NULL;
1474 machine_file = new std::ifstream(machine_file_name.c_str());
1475 for (int i= 0; i < nb_nodes; i++)
1477 std::string command_remote("");
1480 std::string machine_name;
1481 std::getline(*machine_file, machine_name);
1482 MESSAGE("[BuildCommandToLaunchParallelContainer] machine file name extracted is " << machine_name)
1484 // We want to launch a command like :
1485 // ssh -l user machine distantPath/runRemote.sh hostNS portNS
1486 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(machine_name);
1487 if (resInfo.Protocol == rsh)
1488 command_remote = "rsh ";
1490 command_remote = "ssh ";
1491 command_remote += "-l ";
1492 command_remote += resInfo.UserName;
1493 command_remote += " ";
1494 command_remote += machine_name;
1495 command_remote += " ";
1496 command_remote += resInfo.AppliPath; // path relative to user@machine $HOME
1497 command_remote += "/runRemote.sh ";
1498 ASSERT(getenv("NSHOST"));
1499 command_remote += getenv("NSHOST"); // hostname of CORBA name server
1500 command_remote += " ";
1501 ASSERT(getenv("NSPORT"));
1502 command_remote += getenv("NSPORT"); // port of CORBA name server
1503 command_remote += " ";
1505 hostname = machine_name;
1508 std::ostringstream tmp;
1510 std::string proc_number = tmp.str();
1512 std::string command_tmp("");
1513 command_tmp += real_exe_name;
1514 command_tmp += " " + _NS->ContainerName(rtn);
1515 command_tmp += " " + parallelLib;
1516 command_tmp += " " + proxy_hostname;
1517 command_tmp += " " + proc_number;
1518 command_tmp += " -";
1519 AddOmninamesParams(command_tmp);
1521 // On change _Node_ par _Nodex_ pour avoir chaque noeud
1523 std::string command_end_tmp = command_end;
1524 std::string::size_type loc_node = command_end_tmp.find("_Node_");
1525 if (loc_node != std::string::npos)
1526 command_end_tmp.insert(loc_node+5, proc_number);
1527 command += command_begin + command_remote + command_tmp + command_end_tmp;
1528 vect_machine.push_back(hostname);
1531 delete machine_file;
1534 else if (parallelLib == "Mpi")
1536 // Step 0: if remote we have to copy the file
1537 // to the first machine of the file
1538 std::string remote_machine("");
1541 std::ifstream * machine_file = NULL;
1542 machine_file = new std::ifstream(machine_file_name.c_str());
1543 // Get first word of the line
1544 // For MPI implementation the first word is the
1546 std::getline(*machine_file, remote_machine, ' ');
1547 machine_file->close();
1548 MESSAGE("[BuildCommandToLaunchParallelContainer] machine file name extracted is " << remote_machine)
1550 // We want to launch a command like :
1551 // scp mpi_machine_file user@machine:Path
1552 std::string command_remote("");
1553 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(remote_machine);
1554 if (resInfo.Protocol == rsh)
1555 command_remote = "rcp ";
1557 command_remote = "scp ";
1559 command_remote += machine_file_name;
1560 command_remote += " ";
1561 command_remote += resInfo.UserName;
1562 command_remote += "@";
1563 command_remote += remote_machine;
1564 command_remote += ":";
1565 command_remote += machine_file_name;
1567 int status = system(command_remote.c_str());
1570 INFOS("copy of the mpi machine file failed !");
1577 std::string command_remote("");
1580 // We want to launch a command like :
1581 // ssh -l user machine distantPath/runRemote.sh hostNS portNS
1582 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(remote_machine);
1583 if (resInfo.Protocol == rsh)
1584 command_remote = "rsh ";
1586 command_remote = "ssh ";
1587 command_remote += "-l ";
1588 command_remote += resInfo.UserName;
1589 command_remote += " ";
1590 command_remote += remote_machine;
1591 command_remote += " ";
1592 command_remote += resInfo.AppliPath; // path relative to user@machine $HOME
1593 command_remote += "/runRemote.sh ";
1594 ASSERT(getenv("NSHOST"));
1595 command_remote += getenv("NSHOST"); // hostname of CORBA name server
1596 command_remote += " ";
1597 ASSERT(getenv("NSPORT"));
1598 command_remote += getenv("NSPORT"); // port of CORBA name server
1599 command_remote += " ";
1601 hostname = remote_machine;
1604 // We use Dummy proxy for MPI parallel containers
1605 real_exe_name = exe_name + "Dummy";
1606 command = real_exe_name;
1607 command += " " + _NS->ContainerName(rtn);
1608 command += " Dummy";
1609 command += " " + hostname;
1610 command += " " + nbproc;
1612 AddOmninamesParams(command);
1614 command = command_begin + command_remote + command + command_end;
1615 vect_machine.push_back(hostname);
1619 std::string command_remote("");
1622 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(remote_machine);
1623 if (resInfo.Protocol == rsh)
1624 command_remote = "rsh ";
1626 command_remote = "ssh ";
1627 command_remote += "-l ";
1628 command_remote += resInfo.UserName;
1629 command_remote += " ";
1630 command_remote += remote_machine;
1631 command_remote += " ";
1633 std::string new_real_exe_name("");
1634 new_real_exe_name += resInfo.AppliPath; // path relative to user@machine $HOME
1635 new_real_exe_name += "/runRemote.sh ";
1636 ASSERT(getenv("NSHOST"));
1637 new_real_exe_name += getenv("NSHOST"); // hostname of CORBA name server
1638 new_real_exe_name += " ";
1639 ASSERT(getenv("NSPORT"));
1640 new_real_exe_name += getenv("NSPORT"); // port of CORBA name server
1641 new_real_exe_name += " ";
1643 real_exe_name = new_real_exe_name + real_exe_name;
1644 hostname = remote_machine;
1647 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(hostname);
1648 if (resInfo.mpi == lam)
1650 command = "mpiexec -ssi boot ";
1651 if (resInfo.Protocol == rsh)
1655 command += "-machinefile " + machine_file_name + " ";
1656 command += "-n " + nbproc + " ";
1657 command += real_exe_name;
1658 command += " " + _NS->ContainerName(rtn);
1659 command += " " + parallelLib;
1660 command += " " + proxy_hostname;
1662 AddOmninamesParams(command);
1666 command = "mpirun -np " + nbproc + " ";
1667 command += real_exe_name;
1668 command += " " + _NS->ContainerName(rtn);
1669 command += " " + parallelLib;
1670 command += " " + proxy_hostname;
1672 AddOmninamesParams(command);
1675 command = command_begin + command_remote + command + command_end;
1676 for (int i= 0; i < nb_nodes; i++)
1677 vect_machine.push_back(proxy_hostname);
1682 std::string message("Unknown parallelLib : " + parallelLib);
1683 throw SALOME_Exception(message.c_str());
1686 MESSAGE("Parallel launch is: " << command);