1 // Copyright (C) 2007-2008 CEA/DEN, EDF R&D, OPEN CASCADE
3 // Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
4 // CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
6 // This library is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 2.1 of the License.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // Lesser General Public License for more details.
16 // You should have received a copy of the GNU Lesser General Public
17 // License along with this library; if not, write to the Free Software
18 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
22 #include "SALOME_ContainerManager.hxx"
23 #include "SALOME_NamingService.hxx"
24 #include "SALOME_ModuleCatalog.hh"
25 #include "Basics_Utils.hxx"
26 #include "Basics_DirUtils.hxx"
27 #include <sys/types.h>
33 #include "Utils_CorbaException.hxx"
36 #ifdef WITH_PACO_PARALLEL
40 #define TIME_OUT_TO_LAUNCH_CONT 61
44 const char *SALOME_ContainerManager::_ContainerManagerNameInNS =
47 omni_mutex SALOME_ContainerManager::_numInstanceMutex;
50 //=============================================================================
54 * Define a CORBA single thread policy for the server, which avoid to deal
55 * with non thread-safe usage like Change_Directory in SALOME naming service
57 //=============================================================================
59 SALOME_ContainerManager::SALOME_ContainerManager(CORBA::ORB_ptr orb, PortableServer::POA_var poa, SALOME_ResourcesManager *rm, SALOME_NamingService *ns):_nbprocUsed(0)
61 MESSAGE("constructor");
65 PortableServer::POAManager_var pman = poa->the_POAManager();
66 _orb = CORBA::ORB::_duplicate(orb) ;
67 CORBA::PolicyList policies;
69 PortableServer::ThreadPolicy_var threadPol =
70 poa->create_thread_policy(PortableServer::SINGLE_THREAD_MODEL);
71 policies[0] = PortableServer::ThreadPolicy::_duplicate(threadPol);
73 _poa = poa->create_POA("SThreadPOA",pman,policies);
75 PortableServer::ObjectId_var id = _poa->activate_object(this);
76 CORBA::Object_var obj = _poa->id_to_reference(id);
77 Engines::ContainerManager_var refContMan =
78 Engines::ContainerManager::_narrow(obj);
80 _NS->Register(refContMan,_ContainerManagerNameInNS);
81 _isAppliSalomeDefined = (getenv("APPLI") != 0);
85 if( getenv("OMPI_URI_FILE") != NULL ){
86 system("killall ompi-server");
88 command = "ompi-server -r ";
89 command += getenv("OMPI_URI_FILE");
90 int status=system(command.c_str());
92 throw SALOME_Exception("Error when launching ompi-server");
97 MESSAGE("constructor end");
100 //=============================================================================
104 //=============================================================================
106 SALOME_ContainerManager::~SALOME_ContainerManager()
108 MESSAGE("destructor");
111 if( getenv("OMPI_URI_FILE") != NULL )
112 system("killall ompi-server");
117 //=============================================================================
118 //! shutdown all the containers, then the ContainerManager servant
121 //=============================================================================
123 void SALOME_ContainerManager::Shutdown()
126 ShutdownContainers();
127 _NS->Destroy_Name(_ContainerManagerNameInNS);
128 PortableServer::ObjectId_var oid = _poa->servant_to_id(this);
129 _poa->deactivate_object(oid);
132 //=============================================================================
133 //! Loop on all the containers listed in naming service, ask shutdown on each
136 //=============================================================================
138 void SALOME_ContainerManager::ShutdownContainers()
140 MESSAGE("ShutdownContainers");
142 isOK = _NS->Change_Directory("/Containers");
144 vector<string> vec = _NS->list_directory_recurs();
145 list<string> lstCont;
146 for(vector<string>::iterator iter = vec.begin();iter!=vec.end();iter++)
149 CORBA::Object_var obj=_NS->Resolve((*iter).c_str());
152 Engines::Container_var cont=Engines::Container::_narrow(obj);
153 if(!CORBA::is_nil(cont))
154 lstCont.push_back((*iter));
156 catch(const CORBA::Exception& e)
158 // ignore this entry and continue
161 MESSAGE("Container list: ");
162 for(list<string>::iterator iter=lstCont.begin();iter!=lstCont.end();iter++){
165 for(list<string>::iterator iter=lstCont.begin();iter!=lstCont.end();iter++)
170 CORBA::Object_var obj=_NS->Resolve((*iter).c_str());
171 Engines::Container_var cont=Engines::Container::_narrow(obj);
172 if(!CORBA::is_nil(cont))
174 MESSAGE("ShutdownContainers: " << (*iter));
178 MESSAGE("ShutdownContainers: no container ref for " << (*iter));
180 catch(CORBA::SystemException& e)
182 INFOS("CORBA::SystemException ignored : " << e);
184 catch(CORBA::Exception&)
186 INFOS("CORBA::Exception ignored.");
190 INFOS("Unknown exception ignored.");
196 //=============================================================================
197 //! Give a suitable Container given constraints
199 * \param params Container Parameters required for the container
200 * \return the container or nil
202 //=============================================================================
203 Engines::Container_ptr
204 SALOME_ContainerManager::GiveContainer(const Engines::ContainerParameters& params)
207 Engines::Container_ptr ret = Engines::Container::_nil();
209 // Step 0: Default mode is start
210 Engines::ContainerParameters local_params(params);
211 if (std::string(local_params.mode.in()) == "")
212 local_params.mode = CORBA::string_dup("start");
213 std::string mode = local_params.mode.in();
214 MESSAGE("[GiveContainer] starting with mode: " << mode);
216 // Step 1: Find Container for find and findorstart mode
217 if (mode == "find" or mode == "findorstart")
219 ret = FindContainer(params, params.resource_params.resList);
220 if(!CORBA::is_nil(ret))
226 MESSAGE("[GiveContainer] no container found");
236 // Step 2: Get all possibleResources from the parameters
237 Engines::ResourceList_var possibleResources = _ResManager->GetFittingResources(local_params.resource_params);
238 MESSAGE("[GiveContainer] - length of possible resources " << possibleResources->length());
239 std::vector<std::string> local_resources;
241 // Step 3: if mode is "get" keep only machines with existing containers
244 for(unsigned int i=0; i < possibleResources->length(); i++)
246 Engines::Container_ptr cont = FindContainer(params, possibleResources[i].in());
249 if(!cont->_non_existent())
250 local_resources.push_back(string(possibleResources[i]));
252 catch(CORBA::Exception&) {}
255 // if local_resources is empty, we cannot give a container
256 if (local_resources.size() == 0)
258 MESSAGE("[GiveContainer] cannot find a container for mode get");
263 for(unsigned int i=0; i < possibleResources->length(); i++)
264 local_resources.push_back(string(possibleResources[i]));
266 // Step 4: select the resource where to get/start the container
267 std::string resource_selected;
270 resource_selected = _ResManager->GetImpl()->Find(params.resource_params.policy.in(), local_resources);
272 catch(const SALOME_Exception &ex)
274 MESSAGE("[GiveContainer] Exception in ResourceManager find !: " << ex.what());
277 MESSAGE("[GiveContainer] Resource selected is: " << resource_selected);
279 _numInstanceMutex.lock();
281 // Step 5: get container in the naming service
282 Engines::ResourceDefinition_var resource_definition = _ResManager->GetResourceDefinition(resource_selected.c_str());
283 std::string hostname(resource_definition->hostname.in());
284 std::string containerNameInNS;
287 if ( (params.resource_params.nb_node <= 0) && (params.resource_params.nb_proc_per_node <= 0) )
289 else if ( params.resource_params.nb_node == 0 )
290 nbproc = params.resource_params.nb_proc_per_node;
291 else if ( params.resource_params.nb_proc_per_node == 0 )
292 nbproc = params.resource_params.nb_node;
294 nbproc = params.resource_params.nb_node * params.resource_params.nb_proc_per_node;
295 if( getenv("LIBBATCH_NODEFILE") != NULL )
296 machFile = machinesFile(nbproc);
297 // A mpi parallel container register on zero node in NS
298 containerNameInNS = _NS->BuildContainerNameForNS(params, GetMPIZeroNode(hostname,machFile).c_str());
301 containerNameInNS = _NS->BuildContainerNameForNS(params, hostname.c_str());
302 MESSAGE("[GiveContainer] Container name in the naming service: " << containerNameInNS);
304 // Step 6: check if the name exists in naming service
305 //if params.mode == "getorstart" or "get" use the existing container
306 //if params.mode == "start" shutdown the existing container before launching a new one with that name
307 CORBA::Object_var obj = _NS->Resolve(containerNameInNS.c_str());
308 if (!CORBA::is_nil(obj))
312 Engines::Container_var cont=Engines::Container::_narrow(obj);
313 if(!cont->_non_existent())
315 if(std::string(params.mode.in())=="getorstart" or std::string(params.mode.in())=="get"){
316 return cont._retn(); /* the container exists and params.mode is getorstart or get use it*/
320 INFOS("[GiveContainer] A container is already registered with the name: " << containerNameInNS << ", shutdown the existing container");
321 cont->Shutdown(); // shutdown the registered container if it exists
325 catch(CORBA::Exception&)
327 INFOS("[GiveContainer] CORBA::Exception ignored when trying to get the container - we start a new one");
331 // Step 7: type of container: PaCO, Exe, Mpi or Classic
332 // Mpi already tested in step 5, specific code on BuildCommandToLaunch Local/Remote Container methods
333 // TODO -> separates Mpi from Classic/Exe
335 std::string parallelLib(params.parallelLib);
336 if (std::string(local_params.parallelLib.in()) != "")
338 INFOS("[GiveContainer] PaCO++ container are not currently available");
339 _numInstanceMutex.unlock();
343 std::string container_exe = "SALOME_Container"; // Classic container
347 CORBA::String_var container_exe_tmp;
348 CORBA::Object_var obj = _NS->Resolve("/Kernel/ModulCatalog");
349 SALOME_ModuleCatalog::ModuleCatalog_var Catalog = SALOME_ModuleCatalog::ModuleCatalog::_narrow(obj) ;
350 if (CORBA::is_nil (Catalog))
352 INFOS("[GiveContainer] Module Catalog is not found -> cannot launch a container");
353 _numInstanceMutex.unlock();
356 // Loop through component list
357 for(unsigned int i=0; i < local_params.resource_params.componentList.length(); i++)
359 const char* compoi = local_params.resource_params.componentList[i];
360 SALOME_ModuleCatalog::Acomponent_var compoInfo = Catalog->GetComponent(compoi);
361 if (CORBA::is_nil (compoInfo))
365 SALOME_ModuleCatalog::ImplType impl=compoInfo->implementation_type();
366 container_exe_tmp=compoInfo->implementation_name();
367 if(impl==SALOME_ModuleCatalog::CEXE)
371 INFOS("ContainerManager Error: you can't have 2 CEXE component in the same container" );
372 _numInstanceMutex.unlock();
373 return Engines::Container::_nil();
375 MESSAGE("[GiveContainer] Exe container found !: " << container_exe_tmp);
376 container_exe = container_exe_tmp.in();
381 catch (ServiceUnreachable&)
383 INFOS("Caught exception: Naming Service Unreachable");
384 _numInstanceMutex.unlock();
389 INFOS("Caught unknown exception.");
390 _numInstanceMutex.unlock();
394 // Step 8: start a new container
395 MESSAGE("[GiveContainer] Try to launch a new container on " << resource_selected);
397 // if a parallel container is launched in batch job, command is: "mpirun -np nbproc -machinefile nodesfile SALOME_MPIContainer"
398 if( getenv("LIBBATCH_NODEFILE") != NULL && params.isMPI )
399 command = BuildCommandToLaunchLocalContainer(params, machFile, container_exe);
400 // if a container is launched on localhost, command is "SALOME_Container" or "mpirun -np nbproc SALOME_MPIContainer"
401 else if(hostname == Kernel_Utils::GetHostname())
402 command = BuildCommandToLaunchLocalContainer(params, machFile, container_exe);
403 // if a container is launched in remote mode, command is "ssh resource_selected SALOME_Container" or "ssh resource_selected mpirun -np nbproc SALOME_MPIContainer"
405 command = BuildCommandToLaunchRemoteContainer(resource_selected, params, container_exe);
407 //redirect stdout and stderr in a file
409 string logFilename=getenv("TEMP");
412 string logFilename="/tmp";
413 char* val = getenv("SALOME_TMP_DIR");
416 struct stat file_info;
417 stat(val, &file_info);
418 bool is_dir = S_ISDIR(file_info.st_mode);
419 if (is_dir)logFilename=val;
420 else std::cerr << "SALOME_TMP_DIR environment variable is not a directory use /tmp instead" << std::endl;
424 logFilename += _NS->ContainerName(params)+"_"+ resource_selected +"_"+getenv( "USER" )+".log" ;
425 command += " > " + logFilename + " 2>&1";
427 command = "%PYTHONBIN% -c \"import win32pm ; win32pm.spawnpid(r'" + command + "', '')\"";
432 // launch container with a system call
433 int status=system(command.c_str());
435 _numInstanceMutex.unlock();
438 MESSAGE("SALOME_ContainerManager::StartContainer rsh failed (system command status -1)");
439 RmTmpFile(_TmpFileName); // command file can be removed here
440 return Engines::Container::_nil();
442 else if (status == 217){
443 MESSAGE("SALOME_ContainerManager::StartContainer rsh failed (system command status 217)");
444 RmTmpFile(_TmpFileName); // command file can be removed here
445 return Engines::Container::_nil();
449 int count = TIME_OUT_TO_LAUNCH_CONT;
450 MESSAGE("[GiveContainer] waiting " << count << " second steps");
451 while (CORBA::is_nil(ret) && count)
459 MESSAGE("[GiveContainer] step " << count << " Waiting for container on " << resource_selected);
460 CORBA::Object_var obj = _NS->Resolve(containerNameInNS.c_str());
461 ret=Engines::Container::_narrow(obj);
463 if (CORBA::is_nil(ret))
465 INFOS("[GiveContainer] was not able to launch container " << containerNameInNS);
469 // Setting log file name
470 logFilename=":"+logFilename;
471 logFilename="@"+Kernel_Utils::GetHostname()+logFilename;
472 logFilename=getenv( "USER" )+logFilename;
473 ret->logfilename(logFilename.c_str());
474 RmTmpFile(_TmpFileName); // command file can be removed here
480 //=============================================================================
481 //! Find a container given constraints (params) on a list of machines (possibleComputers)
485 //=============================================================================
487 Engines::Container_ptr
488 SALOME_ContainerManager::FindContainer(const Engines::ContainerParameters& params,
489 const Engines::ResourceList& possibleResources)
491 MESSAGE("[FindContainer] FindContainer on " << possibleResources.length() << " resources");
492 for(unsigned int i=0; i < possibleResources.length();i++)
494 Engines::Container_ptr cont = FindContainer(params, possibleResources[i].in());
495 if(!CORBA::is_nil(cont))
498 MESSAGE("[FindContainer] no container found");
499 return Engines::Container::_nil();
502 //=============================================================================
503 //! Find a container given constraints (params) on a machine (theMachine)
507 //=============================================================================
509 Engines::Container_ptr
510 SALOME_ContainerManager::FindContainer(const Engines::ContainerParameters& params,
511 const std::string& resource)
513 Engines::ResourceDefinition_var resource_definition = _ResManager->GetResourceDefinition(resource.c_str());
514 std::string hostname(resource_definition->hostname.in());
515 std::string containerNameInNS(_NS->BuildContainerNameForNS(params, hostname.c_str()));
516 MESSAGE("[FindContainer] Try to find a container " << containerNameInNS << " on resource " << resource);
517 CORBA::Object_var obj = _NS->Resolve(containerNameInNS.c_str());
520 if(obj->_non_existent())
521 return Engines::Container::_nil();
523 return Engines::Container::_narrow(obj);
525 catch(const CORBA::Exception& e)
527 return Engines::Container::_nil();
531 //=============================================================================
533 * This is no longer valid (C++ container are also python containers)
535 //=============================================================================
536 bool isPythonContainer(const char* ContainerName)
539 int len = strlen(ContainerName);
542 if (strcmp(ContainerName + len - 2, "Py") == 0)
548 //=============================================================================
550 * Builds the script to be launched
552 * If SALOME Application not defined ($APPLI),
553 * see BuildTempFileToLaunchRemoteContainer()
555 * Else rely on distant configuration. Command is under the form (example):
556 * ssh user@machine distantPath/runRemote.sh hostNS portNS WORKINGDIR workingdir \
557 * SALOME_Container containerName &"
559 * - where user is ommited if not specified in CatalogResources,
560 * - where distant path is always relative to user@machine $HOME, and
561 * equal to $APPLI if not specified in CatalogResources,
562 * - where hostNS is the hostname of CORBA naming server (set by scripts to
563 * use to launch SALOME and servers in $APPLI: runAppli.sh, runRemote.sh)
564 * - where portNS is the port used by CORBA naming server (set by scripts to
565 * use to launch SALOME and servers in $APPLI: runAppli.sh, runRemote.sh)
566 * - where workingdir is the requested working directory for the container.
567 * If WORKINGDIR (and workingdir) is not present the working dir will be $HOME
569 //=============================================================================
572 SALOME_ContainerManager::BuildCommandToLaunchRemoteContainer
573 (const string& resource_name,
574 const Engines::ContainerParameters& params, const std::string& container_exe)
578 if (!_isAppliSalomeDefined)
579 command = BuildTempFileToLaunchRemoteContainer(resource_name, params);
583 Engines::ResourceDefinition_var resource_definition = _ResManager->GetResourceDefinition(resource_name.c_str());
584 std::string hostname(resource_definition->hostname.in());
585 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesDescr(resource_name);
589 if ((params.resource_params.nb_node <= 0) && (params.resource_params.nb_proc_per_node <= 0))
591 else if (params.resource_params.nb_node == 0)
592 nbproc = params.resource_params.nb_proc_per_node;
593 else if (params.resource_params.nb_proc_per_node == 0)
594 nbproc = params.resource_params.nb_node;
596 nbproc = params.resource_params.nb_node * params.resource_params.nb_proc_per_node;
599 // "ssh -l user machine distantPath/runRemote.sh hostNS portNS WORKINGDIR workingdir \
600 // SALOME_Container containerName &"
601 if (resInfo.Protocol == rsh)
603 else if (resInfo.Protocol == ssh)
606 throw SALOME_Exception("Unknown protocol");
608 if (resInfo.UserName != "")
611 command += resInfo.UserName;
615 command += resInfo.HostName;
618 if (resInfo.AppliPath != "")
619 command += resInfo.AppliPath; // path relative to user@machine $HOME
622 ASSERT(getenv("APPLI"));
623 command += getenv("APPLI"); // path relative to user@machine $HOME
626 command += "/runRemote.sh ";
628 ASSERT(getenv("NSHOST"));
629 command += getenv("NSHOST"); // hostname of CORBA name server
632 ASSERT(getenv("NSPORT"));
633 command += getenv("NSPORT"); // port of CORBA name server
635 std::string wdir = params.workingdir.in();
638 command += " WORKINGDIR ";
640 if(wdir == "$TEMPDIR")
642 command += wdir; // requested working directory
648 command += " mpirun -np ";
649 std::ostringstream o;
653 command += "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
654 #elif defined(WITHOPENMPI)
655 if( getenv("OMPI_URI_FILE") == NULL )
656 command += "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
658 command += "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
659 command += getenv("OMPI_URI_FILE");
662 command += " SALOME_MPIContainer ";
665 command += " " +container_exe+ " ";
667 command += _NS->ContainerName(params);
669 AddOmninamesParams(command);
671 MESSAGE("command =" << command);
677 //=============================================================================
679 * builds the command to be launched.
681 //=============================================================================
683 SALOME_ContainerManager::BuildCommandToLaunchLocalContainer
684 (const Engines::ContainerParameters& params, const std::string& machinesFile, const std::string& container_exe)
686 _TmpFileName = BuildTemporaryFileName();
696 if ( (params.resource_params.nb_node <= 0) && (params.resource_params.nb_proc_per_node <= 0) )
698 else if ( params.resource_params.nb_node == 0 )
699 nbproc = params.resource_params.nb_proc_per_node;
700 else if ( params.resource_params.nb_proc_per_node == 0 )
701 nbproc = params.resource_params.nb_node;
703 nbproc = params.resource_params.nb_node * params.resource_params.nb_proc_per_node;
707 if( getenv("LIBBATCH_NODEFILE") != NULL )
708 o << "-machinefile " << machinesFile << " ";
711 o << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
712 #elif defined(WITHOPENMPI)
713 if( getenv("OMPI_URI_FILE") == NULL )
714 o << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
717 o << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
718 o << getenv("OMPI_URI_FILE");
722 if (isPythonContainer(params.container_name))
723 o << " pyMPI SALOME_ContainerPy.py ";
725 o << " SALOME_MPIContainer ";
730 std::string wdir=params.workingdir.in();
733 // a working directory is requested
734 if(wdir == "$TEMPDIR")
736 // a new temporary directory is requested
737 string dir = Kernel_Utils::GetTmpDir();
739 o << "cd /d " << dir << endl;
741 o << "cd " << dir << ";";
747 // a permanent directory is requested use it or create it
749 o << "mkdir " + wdir << endl;
750 o << "cd /D " + wdir << endl;
752 o << "mkdir -p " << wdir << " && cd " << wdir + ";";
756 if (isPythonContainer(params.container_name))
757 o << "SALOME_ContainerPy.py ";
759 o << container_exe + " ";
763 o << _NS->ContainerName(params);
765 AddOmninamesParams(o);
767 ofstream command_file( _TmpFileName.c_str() );
768 command_file << o.str();
769 command_file.close();
772 chmod(_TmpFileName.c_str(), 0x1ED);
774 command = _TmpFileName;
776 MESSAGE("Command is file ... " << command);
777 MESSAGE("Command is ... " << o.str());
782 //=============================================================================
784 * removes the generated temporary file in case of a remote launch.
786 //=============================================================================
788 void SALOME_ContainerManager::RmTmpFile(std::string& tmpFileName)
790 int lenght = tmpFileName.size();
794 string command = "del /F ";
796 string command = "rm ";
799 command += tmpFileName.substr(0, lenght - 3 );
801 command += tmpFileName;
803 system(command.c_str());
804 //if dir is empty - remove it
805 string tmp_dir = Kernel_Utils::GetDirByPath( tmpFileName );
806 if ( Kernel_Utils::IsEmptyDir( tmp_dir ) )
809 command = "del /F " + tmp_dir;
811 command = "rmdir " + tmp_dir;
813 system(command.c_str());
818 //=============================================================================
820 * add to command all options relative to naming service.
822 //=============================================================================
824 void SALOME_ContainerManager::AddOmninamesParams(string& command) const
826 CORBA::String_var iorstr = _NS->getIORaddr();
827 command += "ORBInitRef NameService=";
831 //=============================================================================
833 * add to command all options relative to naming service.
835 //=============================================================================
837 void SALOME_ContainerManager::AddOmninamesParams(ofstream& fileStream) const
839 CORBA::String_var iorstr = _NS->getIORaddr();
840 fileStream << "ORBInitRef NameService=";
841 fileStream << iorstr;
844 //=============================================================================
846 * add to command all options relative to naming service.
848 //=============================================================================
850 void SALOME_ContainerManager::AddOmninamesParams(ostringstream& oss) const
852 CORBA::String_var iorstr = _NS->getIORaddr();
853 oss << "ORBInitRef NameService=";
857 //=============================================================================
859 * generate a file name in /tmp directory
861 //=============================================================================
863 string SALOME_ContainerManager::BuildTemporaryFileName() const
865 //build more complex file name to support multiple salome session
866 string aFileName = Kernel_Utils::GetTmpFileName();
875 //=============================================================================
877 * Builds in a temporary file the script to be launched.
879 * Used if SALOME Application ($APPLI) is not defined.
880 * The command is build with data from CatalogResources, in which every path
881 * used on remote computer must be defined.
883 //=============================================================================
886 SALOME_ContainerManager::BuildTempFileToLaunchRemoteContainer
887 (const string& resource_name,
888 const Engines::ContainerParameters& params) throw(SALOME_Exception)
892 _TmpFileName = BuildTemporaryFileName();
893 ofstream tempOutputFile;
894 tempOutputFile.open(_TmpFileName.c_str(), ofstream::out );
895 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesDescr(resource_name);
896 tempOutputFile << "#! /bin/sh" << endl;
900 tempOutputFile << "export SALOME_trace=local" << endl; // mkr : 27.11.2006 : PAL13967 - Distributed supervision graphs - Problem with "SALOME_trace"
901 //tempOutputFile << "source " << resInfo.PreReqFilePath << endl;
907 tempOutputFile << "mpirun -np ";
910 if ( (params.resource_params.nb_node <= 0) && (params.resource_params.nb_proc_per_node <= 0) )
912 else if ( params.resource_params.nb_node == 0 )
913 nbproc = params.resource_params.nb_proc_per_node;
914 else if ( params.resource_params.nb_proc_per_node == 0 )
915 nbproc = params.resource_params.nb_node;
917 nbproc = params.resource_params.nb_node * params.resource_params.nb_proc_per_node;
919 std::ostringstream o;
921 tempOutputFile << nbproc << " ";
923 tempOutputFile << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
924 #elif defined(WITHOPENMPI)
925 if( getenv("OMPI_URI_FILE") == NULL )
926 tempOutputFile << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace";
928 tempOutputFile << "-x PATH -x LD_LIBRARY_PATH -x OMNIORB_CONFIG -x SALOME_trace -ompi-server file:";
929 tempOutputFile << getenv("OMPI_URI_FILE");
934 tempOutputFile << getenv("KERNEL_ROOT_DIR") << "/bin/salome/";
938 if (isPythonContainer(params.container_name))
939 tempOutputFile << " pyMPI SALOME_ContainerPy.py ";
941 tempOutputFile << " SALOME_MPIContainer ";
946 if (isPythonContainer(params.container_name))
947 tempOutputFile << "SALOME_ContainerPy.py ";
949 tempOutputFile << "SALOME_Container ";
952 tempOutputFile << _NS->ContainerName(params) << " -";
953 AddOmninamesParams(tempOutputFile);
954 tempOutputFile << " &" << endl;
955 tempOutputFile.flush();
956 tempOutputFile.close();
958 chmod(_TmpFileName.c_str(), 0x1ED);
965 if (resInfo.Protocol == rsh)
968 string commandRcp = "rcp ";
969 commandRcp += _TmpFileName;
971 commandRcp += resInfo.HostName;
973 commandRcp += _TmpFileName;
974 status = system(commandRcp.c_str());
977 else if (resInfo.Protocol == ssh)
980 string commandRcp = "scp ";
981 commandRcp += _TmpFileName;
983 commandRcp += resInfo.HostName;
985 commandRcp += _TmpFileName;
986 status = system(commandRcp.c_str());
989 throw SALOME_Exception("Unknown protocol");
992 throw SALOME_Exception("Error of connection on remote host");
994 command += resInfo.HostName;
995 _CommandForRemAccess = command;
997 command += _TmpFileName;
1005 #ifdef WITH_PACO_PARALLEL
1006 //=============================================================================
1008 * Find or Start a suitable PaCO++ Parallel Container in a list of machines.
1009 * \param params Machine Parameters required for the container
1010 * \return CORBA container reference.
1012 //=============================================================================
1013 Engines::Container_ptr
1014 SALOME_ContainerManager::StartPaCOPPContainer(const Engines::ContainerParameters& params_const)
1016 CORBA::Object_var obj;
1017 PaCO::InterfaceManager_var container_proxy;
1018 Engines::Container_ptr ret = Engines::Container::_nil();
1019 Engines::MachineParameters params(params_const);
1021 // Step 1 : Try to find a suitable container
1022 // Currently not as good as could be since
1023 // we have to verified the number of nodes of the container
1024 // if a user tell that.
1025 ret = FindContainer(params, params.computerList);
1026 if(CORBA::is_nil(ret)) {
1027 // Step 2 : Starting a new parallel container !
1028 INFOS("[StartParallelContainer] Starting a PaCO++ parallel container");
1030 // Step 3 : Choose a computer
1031 std::string theMachine = _ResManager->FindFirst(params.computerList);
1032 //If the machine name is localhost use the real name
1033 if(theMachine == "localhost")
1034 theMachine=Kernel_Utils::GetHostname();
1036 if(theMachine == "") {
1037 INFOS("[StartParallelContainer] !!!!!!!!!!!!!!!!!!!!!!!!!!");
1038 INFOS("[StartParallelContainer] No possible computer found");
1039 INFOS("[StartParallelContainer] !!!!!!!!!!!!!!!!!!!!!!!!!!");
1042 INFOS("[StartParallelContainer] on machine : " << theMachine);
1043 params.hostname = CORBA::string_dup(theMachine.c_str());
1045 // Step 4 : starting parallel container proxy
1046 Engines::MachineParameters params_proxy(params);
1047 std::string command_proxy;
1048 SALOME_ContainerManager::actual_launch_machine_t proxy_machine;
1051 command_proxy = BuildCommandToLaunchParallelContainer("SALOME_ParallelContainerProxy", params_proxy, proxy_machine);
1053 catch(const SALOME_Exception & ex)
1055 INFOS("[StartParallelContainer] Exception in BuildCommandToLaunchParallelContainer");
1059 params_proxy.nb_proc = 0; // LaunchParallelContainer uses this value to know if it launches the proxy or the nodes
1060 obj = LaunchParallelContainer(command_proxy, params_proxy, _NS->ContainerName(params_proxy), proxy_machine);
1061 if (CORBA::is_nil(obj))
1063 INFOS("[StartParallelContainer] LaunchParallelContainer for proxy returns NIL !");
1068 container_proxy = PaCO::InterfaceManager::_narrow(obj);
1070 catch(CORBA::SystemException& e)
1072 INFOS("[StartParallelContainer] Exception in _narrow after LaunchParallelContainer for proxy !");
1073 INFOS("CORBA::SystemException : " << e);
1076 catch(CORBA::Exception& e)
1078 INFOS("[StartParallelContainer] Exception in _narrow after LaunchParallelContainer for proxy !");
1079 INFOS("CORBA::Exception" << e);
1084 INFOS("[StartParallelContainer] Exception in _narrow after LaunchParallelContainer for proxy !");
1085 INFOS("Unknown exception !");
1088 if (CORBA::is_nil(container_proxy))
1090 INFOS("[StartParallelContainer] PaCO::InterfaceManager::_narrow returns NIL !");
1094 // Step 5 : starting parallel container nodes
1095 std::string command_nodes;
1096 Engines::MachineParameters params_nodes(params);
1097 SALOME_ContainerManager::actual_launch_machine_t nodes_machines;
1100 command_nodes = BuildCommandToLaunchParallelContainer("SALOME_ParallelContainerNode", params_nodes, nodes_machines, proxy_machine[0]);
1102 catch(const SALOME_Exception & ex)
1104 INFOS("[StartParallelContainer] Exception in BuildCommandToLaunchParallelContainer");
1108 std::string container_generic_node_name = _NS->ContainerName(params) + "Node";
1109 obj = LaunchParallelContainer(command_nodes, params_nodes, container_generic_node_name, nodes_machines);
1110 if (CORBA::is_nil(obj))
1112 INFOS("[StartParallelContainer] LaunchParallelContainer for nodes returns NIL !");
1113 // Il faut tuer le proxy
1116 Engines::Container_var proxy = Engines::Container::_narrow(container_proxy);
1121 INFOS("[StartParallelContainer] Exception catched from proxy Shutdown...");
1126 // Step 6 : connecting nodes and the proxy to actually create a parallel container
1127 for (int i = 0; i < params.nb_proc; i++)
1129 std::ostringstream tmp;
1131 std::string proc_number = tmp.str();
1132 std::string container_node_name = container_generic_node_name + proc_number;
1134 std::string theNodeMachine(nodes_machines[i]);
1135 std::string containerNameInNS = _NS->BuildContainerNameForNS(container_node_name.c_str(), theNodeMachine.c_str());
1136 obj = _NS->Resolve(containerNameInNS.c_str());
1137 if (CORBA::is_nil(obj))
1139 INFOS("[StartParallelContainer] CONNECTION FAILED From Naming Service !");
1140 INFOS("[StartParallelContainer] Container name is " << containerNameInNS);
1145 MESSAGE("[StartParallelContainer] Deploying node : " << container_node_name);
1146 PaCO::InterfaceParallel_var node = PaCO::InterfaceParallel::_narrow(obj);
1148 MESSAGE("[StartParallelContainer] node " << container_node_name << " is deployed");
1150 catch(CORBA::SystemException& e)
1152 INFOS("[StartParallelContainer] Exception in deploying node : " << containerNameInNS);
1153 INFOS("CORBA::SystemException : " << e);
1156 catch(CORBA::Exception& e)
1158 INFOS("[StartParallelContainer] Exception in deploying node : " << containerNameInNS);
1159 INFOS("CORBA::Exception" << e);
1164 INFOS("[StartParallelContainer] Exception in deploying node : " << containerNameInNS);
1165 INFOS("Unknown exception !");
1170 // Step 7 : starting parallel container
1173 MESSAGE ("[StartParallelContainer] Starting parallel object");
1174 container_proxy->start();
1175 MESSAGE ("[StartParallelContainer] Parallel object is started");
1176 ret = Engines::Container::_narrow(container_proxy);
1178 catch(CORBA::SystemException& e)
1180 INFOS("Caught CORBA::SystemException. : " << e);
1182 catch(PortableServer::POA::ServantAlreadyActive&)
1184 INFOS("Caught CORBA::ServantAlreadyActiveException");
1186 catch(CORBA::Exception&)
1188 INFOS("Caught CORBA::Exception.");
1190 catch(std::exception& exc)
1192 INFOS("Caught std::exception - "<<exc.what());
1196 INFOS("Caught unknown exception.");
1202 //=============================================================================
1204 * Find or Start a suitable PaCO++ Parallel Container in a list of machines.
1205 * \param params Machine Parameters required for the container
1206 * \return CORBA container reference.
1208 //=============================================================================
1209 Engines::Container_ptr
1210 SALOME_ContainerManager::StartPaCOPPContainer(const Engines::ContainerParameters& params)
1212 Engines::Container_ptr ret = Engines::Container::_nil();
1213 INFOS("[StartParallelContainer] is disabled !");
1214 INFOS("[StartParallelContainer] recompile SALOME Kernel to enable parallel extension");
1219 #ifndef WITH_PACO_PARALLEL
1221 SALOME_ContainerManager::LaunchParallelContainer(const std::string& command,
1222 const Engines::ContainerParameters& params,
1223 const std::string& name,
1224 SALOME_ContainerManager::actual_launch_machine_t & vect_machine)
1226 CORBA::Object_ptr obj = CORBA::Object::_nil();
1230 //=============================================================================
1231 /*! This method launches the parallel container.
1232 * It will may be placed on the ressources manager.
1234 * \param command to launch
1235 * \param container's parameters
1236 * \param name of the container
1238 * \return CORBA container reference
1240 //=============================================================================
1242 SALOME_ContainerManager::LaunchParallelContainer(const std::string& command,
1243 const Engines::ContainerParameters& params,
1244 const std::string& name,
1245 SALOME_ContainerManager::actual_launch_machine_t & vect_machine)
1247 CORBA::Object_ptr obj = CORBA::Object::_nil();
1248 std::string containerNameInNS;
1249 int count = TIME_OUT_TO_LAUNCH_CONT;
1251 INFOS("[LaunchParallelContainer] Begin");
1252 int status = system(command.c_str());
1254 INFOS("[LaunchParallelContainer] failed : system command status -1");
1257 else if (status == 217) {
1258 INFOS("[LaunchParallelContainer] failed : system command status 217");
1262 if (params.nb_proc == 0)
1264 std::string theMachine(vect_machine[0]);
1265 // Proxy We have launch a proxy
1266 containerNameInNS = _NS->BuildContainerNameForNS((char*) name.c_str(), theMachine.c_str());
1267 INFOS("[LaunchParallelContainer] Waiting for Parallel Container proxy " << containerNameInNS << " on " << theMachine);
1268 while (CORBA::is_nil(obj) && count)
1276 obj = _NS->Resolve(containerNameInNS.c_str());
1281 INFOS("[LaunchParallelContainer] launching the nodes of the parallel container");
1282 // We are waiting all the nodes
1283 for (int i = 0; i < params.nb_proc; i++)
1285 obj = CORBA::Object::_nil();
1286 std::string theMachine(vect_machine[i]);
1288 std::ostringstream tmp;
1290 std::string proc_number = tmp.str();
1291 std::string container_node_name = name + proc_number;
1292 containerNameInNS = _NS->BuildContainerNameForNS((char*) container_node_name.c_str(), theMachine.c_str());
1293 INFOS("[LaunchParallelContainer] Waiting for Parallel Container node " << containerNameInNS << " on " << theMachine);
1294 while (CORBA::is_nil(obj) && count) {
1301 obj = _NS->Resolve(containerNameInNS.c_str());
1303 if (CORBA::is_nil(obj))
1305 INFOS("[LaunchParallelContainer] Launch of node failed (or not found) !");
1310 if (CORBA::is_nil(obj))
1311 INFOS("[LaunchParallelContainer] failed");
1317 #ifndef WITH_PACO_PARALLEL
1319 SALOME_ContainerManager::BuildCommandToLaunchParallelContainer(const std::string& exe_name,
1320 const Engines::ContainerParameters& params,
1321 SALOME_ContainerManager::actual_launch_machine_t & vect_machine,
1322 const std::string proxy_hostname)
1327 //=============================================================================
1328 /*! Creates a command line that the container manager uses to launch
1329 * a parallel container.
1331 //=============================================================================
1333 SALOME_ContainerManager::BuildCommandToLaunchParallelContainer(const std::string& exe_name,
1334 const Engines::ContainerParameters& params,
1335 SALOME_ContainerManager::actual_launch_machine_t & vect_machine,
1336 const std::string proxy_hostname)
1338 // This method knows the differences between the proxy and the nodes.
1339 // nb_proc is not used in the same way if it is a proxy or
1342 //command = "gdb --args ";
1343 //command = "valgrind --tool=memcheck --log-file=val_log ";
1344 //command += real_exe_name;
1346 // Step 0 : init some variables...
1347 std::string parallelLib(CORBA::string_dup(params.parallelLib));
1348 std::string real_exe_name = exe_name + parallelLib;
1349 std::string machine_file_name("");
1350 bool remote = false;
1351 bool is_a_proxy = false;
1352 std::string hostname(CORBA::string_dup(params.hostname));
1354 std::ostringstream tmp_string;
1355 CORBA::Long nb_nodes = params.nb_proc;
1356 tmp_string << nb_nodes;
1357 std::string nbproc = tmp_string.str();
1359 Engines::MachineParameters_var rtn = new Engines::MachineParameters();
1360 rtn->container_name = params.container_name;
1361 rtn->hostname = params.hostname;
1362 rtn->OS = params.OS;
1363 rtn->mem_mb = params.mem_mb;
1364 rtn->cpu_clock = params.cpu_clock;
1365 rtn->nb_proc_per_node = params.nb_proc_per_node;
1366 rtn->nb_node = params.nb_node;
1367 rtn->nb_proc = params.nb_proc;
1368 rtn->isMPI = params.isMPI;
1370 // Step 1 : local or remote launch ?
1371 if (hostname != std::string(Kernel_Utils::GetHostname()) )
1373 MESSAGE("[BuildCommandToLaunchParallelContainer] remote machine case detected !");
1377 // Step 2 : proxy or nodes launch ?
1378 std::string::size_type loc_proxy = exe_name.find("Proxy");
1379 if( loc_proxy != string::npos ) {
1383 // Step 3 : Depending of the parallelLib, getting the machine file
1384 // ParallelLib Dummy has is own machine for this method
1389 machine_file_name = _ResManager->getMachineFile(hostname,
1395 machine_file_name = _ResManager->getMachineFile(hostname,
1399 if (machine_file_name == "")
1401 INFOS("[BuildCommandToLaunchParallelContainer] Error machine_file was not generated for machine " << hostname);
1402 throw SALOME_Exception("Error machine_file was not generated");
1404 MESSAGE("[BuildCommandToLaunchParallelContainer] machine_file_name is : " << machine_file_name);
1407 // Step 4 : Log type choosen by the user
1408 std::string log_env("");
1409 char * get_val = getenv("PARALLEL_LOG");
1412 std::string command_begin("");
1413 std::string command_end("");
1414 if(log_env == "xterm")
1416 command_begin = "/usr/X11R6/bin/xterm -e \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH; export PATH=$PATH;";
1417 command_end = "\"&";
1419 else if(log_env == "xterm_debug")
1421 command_begin = "/usr/X11R6/bin/xterm -e \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH; export PATH=$PATH;";
1422 command_end = "; cat \" &";
1426 // default into a file...
1427 std::string logFilename = "/tmp/" + _NS->ContainerName(params) + "_" + hostname;
1429 logFilename += "_Proxy_";
1431 logFilename += "_Node_";
1432 logFilename += std::string(getenv("USER")) + ".log";
1433 command_end = " > " + logFilename + " 2>&1 & ";
1436 // Step 5 : Building the command
1437 std::string command("");
1438 if (parallelLib == "Dummy")
1442 std::string command_remote("");
1445 std::string machine_name;
1446 std::ifstream machine_file(machine_file_name.c_str());
1447 std::getline(machine_file, machine_name);
1448 MESSAGE("[BuildCommandToLaunchParallelContainer] machine file name extracted is " << machine_name)
1450 // We want to launch a command like :
1451 // ssh -l user machine distantPath/runRemote.sh hostNS portNS
1452 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(machine_name);
1453 if (resInfo.Protocol == rsh)
1454 command_remote = "rsh ";
1456 command_remote = "ssh ";
1457 command_remote += "-l ";
1458 command_remote += resInfo.UserName;
1459 command_remote += " ";
1460 command_remote += machine_name;
1461 command_remote += " ";
1462 command_remote += resInfo.AppliPath; // path relative to user@machine $HOME
1463 command_remote += "/runRemote.sh ";
1464 ASSERT(getenv("NSHOST"));
1465 command_remote += getenv("NSHOST"); // hostname of CORBA name server
1466 command_remote += " ";
1467 ASSERT(getenv("NSPORT"));
1468 command_remote += getenv("NSPORT"); // port of CORBA name server
1469 command_remote += " ";
1471 hostname = machine_name;
1474 command = real_exe_name;
1475 command += " " + _NS->ContainerName(rtn);
1476 command += " " + parallelLib;
1477 command += " " + hostname;
1478 command += " " + nbproc;
1480 AddOmninamesParams(command);
1482 command = command_begin + command_remote + command + command_end;
1483 vect_machine.push_back(hostname);
1487 std::ifstream * machine_file = NULL;
1489 machine_file = new std::ifstream(machine_file_name.c_str());
1490 for (int i= 0; i < nb_nodes; i++)
1492 std::string command_remote("");
1495 std::string machine_name;
1496 std::getline(*machine_file, machine_name);
1497 MESSAGE("[BuildCommandToLaunchParallelContainer] machine file name extracted is " << machine_name)
1499 // We want to launch a command like :
1500 // ssh -l user machine distantPath/runRemote.sh hostNS portNS
1501 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(machine_name);
1502 if (resInfo.Protocol == rsh)
1503 command_remote = "rsh ";
1505 command_remote = "ssh ";
1506 command_remote += "-l ";
1507 command_remote += resInfo.UserName;
1508 command_remote += " ";
1509 command_remote += machine_name;
1510 command_remote += " ";
1511 command_remote += resInfo.AppliPath; // path relative to user@machine $HOME
1512 command_remote += "/runRemote.sh ";
1513 ASSERT(getenv("NSHOST"));
1514 command_remote += getenv("NSHOST"); // hostname of CORBA name server
1515 command_remote += " ";
1516 ASSERT(getenv("NSPORT"));
1517 command_remote += getenv("NSPORT"); // port of CORBA name server
1518 command_remote += " ";
1520 hostname = machine_name;
1523 std::ostringstream tmp;
1525 std::string proc_number = tmp.str();
1527 std::string command_tmp("");
1528 command_tmp += real_exe_name;
1529 command_tmp += " " + _NS->ContainerName(rtn);
1530 command_tmp += " " + parallelLib;
1531 command_tmp += " " + proxy_hostname;
1532 command_tmp += " " + proc_number;
1533 command_tmp += " -";
1534 AddOmninamesParams(command_tmp);
1536 // On change _Node_ par _Nodex_ pour avoir chaque noeud
1538 std::string command_end_tmp = command_end;
1539 std::string::size_type loc_node = command_end_tmp.find("_Node_");
1540 if (loc_node != std::string::npos)
1541 command_end_tmp.insert(loc_node+5, proc_number);
1542 command += command_begin + command_remote + command_tmp + command_end_tmp;
1543 vect_machine.push_back(hostname);
1546 delete machine_file;
1549 else if (parallelLib == "Mpi")
1551 // Step 0: if remote we have to copy the file
1552 // to the first machine of the file
1553 std::string remote_machine("");
1556 std::ifstream * machine_file = NULL;
1557 machine_file = new std::ifstream(machine_file_name.c_str());
1558 // Get first word of the line
1559 // For MPI implementation the first word is the
1561 std::getline(*machine_file, remote_machine, ' ');
1562 machine_file->close();
1563 MESSAGE("[BuildCommandToLaunchParallelContainer] machine file name extracted is " << remote_machine)
1565 // We want to launch a command like :
1566 // scp mpi_machine_file user@machine:Path
1567 std::string command_remote("");
1568 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(remote_machine);
1569 if (resInfo.Protocol == rsh)
1570 command_remote = "rcp ";
1572 command_remote = "scp ";
1574 command_remote += machine_file_name;
1575 command_remote += " ";
1576 command_remote += resInfo.UserName;
1577 command_remote += "@";
1578 command_remote += remote_machine;
1579 command_remote += ":";
1580 command_remote += machine_file_name;
1582 int status = system(command_remote.c_str());
1585 INFOS("copy of the mpi machine file failed !");
1592 std::string command_remote("");
1595 // We want to launch a command like :
1596 // ssh -l user machine distantPath/runRemote.sh hostNS portNS
1597 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(remote_machine);
1598 if (resInfo.Protocol == rsh)
1599 command_remote = "rsh ";
1601 command_remote = "ssh ";
1602 command_remote += "-l ";
1603 command_remote += resInfo.UserName;
1604 command_remote += " ";
1605 command_remote += remote_machine;
1606 command_remote += " ";
1607 command_remote += resInfo.AppliPath; // path relative to user@machine $HOME
1608 command_remote += "/runRemote.sh ";
1609 ASSERT(getenv("NSHOST"));
1610 command_remote += getenv("NSHOST"); // hostname of CORBA name server
1611 command_remote += " ";
1612 ASSERT(getenv("NSPORT"));
1613 command_remote += getenv("NSPORT"); // port of CORBA name server
1614 command_remote += " ";
1616 hostname = remote_machine;
1619 // We use Dummy proxy for MPI parallel containers
1620 real_exe_name = exe_name + "Dummy";
1621 command = real_exe_name;
1622 command += " " + _NS->ContainerName(rtn);
1623 command += " Dummy";
1624 command += " " + hostname;
1625 command += " " + nbproc;
1627 AddOmninamesParams(command);
1629 command = command_begin + command_remote + command + command_end;
1630 vect_machine.push_back(hostname);
1634 std::string command_remote("");
1637 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(remote_machine);
1638 if (resInfo.Protocol == rsh)
1639 command_remote = "rsh ";
1641 command_remote = "ssh ";
1642 command_remote += "-l ";
1643 command_remote += resInfo.UserName;
1644 command_remote += " ";
1645 command_remote += remote_machine;
1646 command_remote += " ";
1648 std::string new_real_exe_name("");
1649 new_real_exe_name += resInfo.AppliPath; // path relative to user@machine $HOME
1650 new_real_exe_name += "/runRemote.sh ";
1651 ASSERT(getenv("NSHOST"));
1652 new_real_exe_name += getenv("NSHOST"); // hostname of CORBA name server
1653 new_real_exe_name += " ";
1654 ASSERT(getenv("NSPORT"));
1655 new_real_exe_name += getenv("NSPORT"); // port of CORBA name server
1656 new_real_exe_name += " ";
1658 real_exe_name = new_real_exe_name + real_exe_name;
1659 hostname = remote_machine;
1662 const ParserResourcesType& resInfo = _ResManager->GetImpl()->GetResourcesList(hostname);
1663 if (resInfo.mpi == lam)
1665 command = "mpiexec -ssi boot ";
1666 if (resInfo.Protocol == rsh)
1670 command += "-machinefile " + machine_file_name + " ";
1671 command += "-n " + nbproc + " ";
1672 command += real_exe_name;
1673 command += " " + _NS->ContainerName(rtn);
1674 command += " " + parallelLib;
1675 command += " " + proxy_hostname;
1677 AddOmninamesParams(command);
1681 command = "mpirun -np " + nbproc + " ";
1682 command += real_exe_name;
1683 command += " " + _NS->ContainerName(rtn);
1684 command += " " + parallelLib;
1685 command += " " + proxy_hostname;
1687 AddOmninamesParams(command);
1690 command = command_begin + command_remote + command + command_end;
1691 for (int i= 0; i < nb_nodes; i++)
1692 vect_machine.push_back(proxy_hostname);
1697 std::string message("Unknown parallelLib : " + parallelLib);
1698 throw SALOME_Exception(message.c_str());
1701 MESSAGE("Parallel launch is: " << command);
1706 string SALOME_ContainerManager::GetMPIZeroNode(const string machine, const string machinesFile)
1711 string tmpFile = BuildTemporaryFileName();
1713 if( getenv("LIBBATCH_NODEFILE") == NULL )
1714 cmd = "ssh " + machine + " mpirun -np 1 hostname > " + tmpFile;
1716 cmd = "mpirun -np 1 -machinefile " + machinesFile + " hostname > " + tmpFile;
1718 status = system(cmd.c_str());
1720 ifstream fp(tmpFile.c_str(),ios::in);
1729 string SALOME_ContainerManager::machinesFile(const int nbproc)
1732 string nodesFile = getenv("LIBBATCH_NODEFILE");
1733 string machinesFile = Kernel_Utils::GetTmpFileName();
1734 ifstream fpi(nodesFile.c_str(),ios::in);
1735 ofstream fpo(machinesFile.c_str(),ios::out);
1737 _numInstanceMutex.lock();
1739 for(int i=0;i<_nbprocUsed;i++)
1742 for(int i=0;i<nbproc;i++)
1746 throw SALOME_Exception("You ask more processes than batch session have allocated!");
1748 _nbprocUsed += nbproc;
1752 _numInstanceMutex.unlock();
1754 return machinesFile;