-This is the version 3.1.0a2 of KERNEL
+This is the version 3.1.0b1 of KERNEL
Previous versions :
- 3.0.0
- 2.2.4
envSalome.py \
salomeConsole.py \
showNS.py \
-addToKillList.py
+addToKillList.py \
+NSparam.py
# copy header files in common directory
OWN_CONFIG_H=@OWN_CONFIG_H@
--- /dev/null
+#!/usr/bin/env python
+
+import sys,os
+import string
+
+def getNSparams(info=""):
+ """
+ check environment for omniORB configuration file.
+ parse the file to find the line defining naming service host and port,
+ set environment variables NSPORT and NSHOST,
+ get host and port,
+ if info==host print host
+ elif info==port print host
+ else print 2 strings on stdout on one line: host port
+ """
+ my_port=""
+ my_host=""
+ if os.environ.has_key("OMNIORB_CONFIG"):
+ file = open(os.environ["OMNIORB_CONFIG"], "r")
+ s = file.read()
+ while len(s):
+ l = string.split(s, ":")
+ if string.split(l[0], " ")[0] == "ORBInitRef" or \
+ string.split(l[0], " ")[0] == "InitRef" :
+ my_port = l[len(l)-1]
+ if my_port[-1] == '\n':
+ my_port = my_port[:-1]
+ pass
+ my_host = l[len(l)-2]
+ break;
+ pass
+ s = file.read()
+ pass
+ pass
+ if info=='host':
+ # keep print, stdout used in shell
+ print my_host
+ os.environ['NSHOST']=my_host
+ return my_host
+ pass
+ elif info=='port':
+ # keep print, stdout used in shell
+ print my_port
+ os.environ['NSPORT']=my_port
+ return my_port
+ pass
+ else:
+ # keep print, stdout used in shell
+ print my_host, my_port
+ return my_host, my_port
+ pass
+
+# ------------------------------------------------------------------------
+
+if __name__ == "__main__":
+ if len(sys.argv) >1:
+ if sys.argv[1]=='host':
+ getNSparams('host')
+ pass
+ elif sys.argv[1]=='port':
+ getNSparams('port')
+ pass
+ else:
+ getNSparams('')
+ pass
+ pass
+ else:
+ getNSparams('')
+ pass
-THIS IS SALOME - KERNEL VERSION: 3.1.0a2
+THIS IS SALOME - KERNEL VERSION: 3.1.0b1
+=======================================
Set of scripts for a SALOME application
=======================================
+*html version of this document is produced with docutils*::
+
+ rest2html < doc.txt > doc.html
+
+This document corresponds to SALOME2 3.1. (alpha version)
+
++-------------------------------------------+
+| **WORK in PROGRESS, INCOMPLETE DOCUMENT** |
++-------------------------------------------+
+
+SALOME Application concept
+--------------------------
+
+See SALOME_Application_ to define your own configuration of SALOME and run it
+on one or several computers. This is the recommended way of configuration.
+
+.. _SALOME_Application: ../../doc/SALOME_Application.html
+
+
+User run scripts
+----------------
+
+The SALOME user can use the following scripts:
+
+runAppli
+ Launches a SALOME Session
+ (similar to ${KERNEL_ROOT_DIR}/bin/salome/runSalome but with a different
+ name to avoid confusions).
+
+runSession
+ Launches a shell script in the SALOME application environment, with access
+ to the current SALOME session (naming service), if any.
+ Without arguments, the script is interactive. With arguments, the script
+ executes the command in the SALOME application environment.
+
+runConsole
+ Gives a python console connected to the current SALOME Session.
+ It is also possible to use runSession, then python.
+
+runTests
+ Similar to runSession, used for unit testing. runTests defines a new
+ configuration for naming service (new port number) to avoid interferences
+ with a running SALOME session. runSession tries to use an already existing
+ naming service definition from a running session (hostname & port number).
+
+killCurrentPort
+ Kills the last SALOME session corresponding to this application, and
+ intially launched from this computer.
+ Cleans associated config files.
+
+SALOME internal run scripts
+---------------------------
+
+envd
+ Sets SALOME application environment, envd is sourced by other scripts.
+
+setAppliPath.sh
+ Used by other scripts to define the Application Path.
+
+searchFreePort.sh
+ Used by other scripts to find a free port for naming service.
+
+For remote calls, SALOME uses one script.
+
+runRemote.sh
+ This script is mainly used to launch containers. The first 2 arguments
+ define the hostname and port userd for naming service, the remaining
+ arguments define the command to execute.
+
+
+The following files must be adapted to your environment and SALOME Application
+------------------------------------------------------------------------------
-# - A SALOME application distributed on several computers needs APPLI
-# directories on the same path ($APPLI) relative to $HOME directory
-# of the user, on each computer.
+- CatalogResources.xml
+- SalomeApp.xml
+- env.d/atFirst.sh
+- env.d/envProducts.sh
+- env.d/envSalome.sh
-user scripts:
--------------
+CatalogRessources.xml
+ This files describes all the computer the application can use. The given
+ example is minimal and suppose ${APPLI} is the same relative path
+ to ${HOME}, on all the computers. A different directory can be set on a
+ particular computer with a line::
-runAppli : SALOME launch (idem runSalome but different name to avoid
- confusion with ${KERNEL_ROOT_DIR}/bin/salome/runSalome
+ appliPath="my/specific/path/on/this/computer"
-runConsole : a python console in the current SALOME session environment
+SalomeApp.xml
+ This file is similar to the default given
+ in ${GUI_ROOT_DIR}/share/salome/resources
-runSession : a shell in SALOME environment, either interactive (without args)
- or used to run a program (defined by given args)
-internal application scripts:
------------------------------
+Proposal for env.d scripts
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+Each user **must define** his own configuration for these scripts, following
+the above rules. **The following is only an example not working as it is**.
-runRemote.sh : called from remote computer, via ssh, rsh...
- used for instance to create container
+atFirst.sh
+ Sets the computer configuration not directly related to SALOME,
+ like useful tools, default PATH.
-envd : sets SALOME application environment
- sourced by other scripts
+envProducts.sh
+ Sets the SALOME prerequisites.
-The following files must be adapted to your environment and SALOME Application:
--------------------------------------------------------------------------------
+envSALOME.sh
+ Sets all the MODULE_ROOT_DIR that can be used in the SALOME application.
-SalomeApp.xml - list of modules, options on server launch and resources...
-CatalogResources.xml - configuration of machines used in SALOME application
- (no need of modules list and path here)
+ SalomeAppConfig is also defined by::
-env.d directory must contain the necessary files to source, to define
-the SALOME Application environment :
- ( envd script source these files in alphanumeric order )
+ export SalomeAppConfig=${HOME}/${APPLI}
-For instance,
- atFirst.sh - general presets
- envProducts.sh - prerequisite SALOME environment
- envSalome.sh - list of MODULE_ROOT_DIR
+ where SalomeAppConfig designates the directory containing SalomeApp.xml.
+ Note that ${APPLI} is already defined by the calling scripts when
+ env.d/envSalome.sh is sourced.
#!/bin/bash
-for i in $1/env.d/*;do source ${i}; done
+for i in $1/env.d/*.sh; do source ${i}; done
--- /dev/null
+#!/bin/bash
+
+# --- retrieve APPLI path, relative to $HOME, set ${APPLI}
+
+. `dirname $0`/setAppliPath.sh
+
+# --- set the SALOME environment (prerequisites, MODULES_ROOT_DIR...)
+
+. ${HOME}/${APPLI}/envd ${HOME}/${APPLI}
+
+# --- find omniORB configuration relative to current session if any
+
+myhost=`hostname`
+fileOmniConfig=${HOME}/${APPLI}/.omniORB_${myhost}_last.cfg
+
+if [ -f $fileOmniConfig ]; then
+ export OMNIORB_CONFIG=${HOME}/${APPLI}/.omniORB_${myhost}_last.cfg
+fi
+
+currentPort=`${KERNEL_ROOT_DIR}/bin/salome/NSparam.py port`
+echo $currentPort
+
+# --- kill current salome session
+
+${KERNEL_ROOT_DIR}/bin/salome/killSalomeWithPort.py $currentPort
+
+# --- delete config files
+
+if [ -s $fileOmniConfig ]; then
+ refConfig=`ls -l $fileOmniConfig | awk '{print \$NF}'`
+ if [ -f $refConfig ]; then
+ rm $refConfig
+ fi
+ rm $fileOmniConfig
+fi
#!/bin/bash
-# --- retrieve APPLI path, relative to $HOME
-# on sarge, "which" gives not allways the absolute path...
-
-comName=`which $0`
-aa=${comName:0:1}
-if test x$aa == x\/; then
- mycom=${comName}
-elif test x$aa == x\.; then
- mycom=${PWD}/${comName:2}
-else
- mycom=${PWD}/${comName}
-fi
-APPLI=`echo ${HOME} \`dirname $mycom\` | awk ' { print substr($2,length($1)+2) } '`
-#echo $APPLI
-export APPLI
+# --- retrieve APPLI path, relative to $HOME, set ${APPLI}
+
+. `dirname $0`/setAppliPath.sh
# --- set the SALOME environment (prerequisites, MODULES_ROOT_DIR...)
# --- define port for CORBA naming service
-searchFreePort() {
- echo -n "Searching for a free port for naming service: "
- export NSPORT=2810
- local limit=$NSPORT
- let limit=limit+100
- while [ 1 ]
- do
- aRes=`netstat -ltn | grep -E :${NSPORT}`
- if [ -z "$aRes" ]; then
- echo ${NSPORT} - Ok
- local myhost=`hostname`
- export OMNIORB_CONFIG=${HOME}/${APPLI}/.omniORB_${myhost}_${NSPORT}.cfg
- export NSPORT
- export NSHOST=${myhost}
- local initref="NameService=corbaname::"`hostname`":$NSPORT"
- #echo "ORBInitRef $initref" > $OMNIORB_CONFIG
- echo "InitRef = $initref" > $OMNIORB_CONFIG
- export LAST_RUNNING_CONFIG=${HOME}/${APPLI}/.omniORB_${myhost}_last.cfg
- rm ${LAST_RUNNING_CONFIG}
- ln -s ${OMNIORB_CONFIG} ${LAST_RUNNING_CONFIG}
- break
- fi
- echo -n "${NSPORT} "
- if [[ $NSPORT -eq $limit ]] ; then
- echo
- echo "Can't find a free port to launch omniNames"
- echo "Try to kill the running servers and then launch SALOME again."
- exit
- fi
- let NSPORT=NSPORT+1
- done
-}
+. `dirname $0`/searchFreePort.sh
+searchFreePort
# --- if mpi lam, start lam (seems safe to be done several times)
# arret manuel avec lamhalt
# (default arguments defined in local salome.launch could be completed
# by arguments to this command)
-searchFreePort
-
if [ $# -ne 0 ] ; then
${KERNEL_ROOT_DIR}/bin/salome/envSalome.py python -i ${KERNEL_ROOT_DIR}/bin/salome/runSalome.py $*
- # --- todo delete omniORB config files in relation to the naming service kill
- rm ${OMNIORB_CONFIG}
- rm ${LAST_RUNNING_CONFIG}
+
else
${KERNEL_ROOT_DIR}/bin/salome/envSalome.py python ${KERNEL_ROOT_DIR}/bin/salome/runSalome.py
fi
#!/bin/bash
-# --- retrieve APPLI path, relative to $HOME
-# on sarge, "which" gives not allways the absolute path...
-
-comName=`which $0`
-aa=${comName:0:1}
-if test x$aa == x\/; then
- mycom=${comName}
-elif test x$aa == x\.; then
- mycom=${PWD}/${comName:2}
-else
- mycom=${PWD}/${comName}
-fi
-APPLI=`echo ${HOME} \`dirname $mycom\` | awk ' { print substr($2,length($1)+2) } '`
-#echo $APPLI
-export APPLI
+# --- retrieve APPLI path, relative to $HOME, set ${APPLI}
+
+. `dirname $0`/setAppliPath.sh
# --- set the SALOME environment (prerequisites, MODULES_ROOT_DIR...)
#!/bin/bash
-./runAppli --killall
+./KillCurrentPort
+
+./runAppli --logger
-#./runSession killSalome.py
# $3 and following : local command to execute, with args
#
-# --- retrieve APPLI path, relative to $HOME
-# on sarge, "which" gives not allways the absolute path...
-
-comName=`which $0`
-aa=${comName:0:1}
-if test x$aa == x\/; then
- mycom=${comName}
-elif test x$aa == x\.; then
- mycom=${PWD}/${comName:2}
-else
- mycom=${PWD}/${comName}
-fi
-APPLI=`echo ${HOME} \`dirname $mycom\` | awk ' { print substr($2,length($1)+2) } '`
-#echo $APPLI
-export APPLI
+# --- retrieve APPLI path, relative to $HOME, set ${APPLI}
+
+. `dirname $0`/setAppliPath.sh
# --- set the SALOME environment (prerequisites, MODULES_ROOT_DIR...)
# Use it without args to run an interactive shell under Salome env
#
-# --- retrieve APPLI path, relative to $HOME
-# on sarge, "which" gives not allways the absolute path...
-
-comName=`which $0`
-aa=${comName:0:1}
-if test x$aa == x\/; then
- mycom=${comName}
-elif test x$aa == x\.; then
- mycom=${PWD}/${comName:2}
-else
- mycom=${PWD}/${comName}
-fi
-APPLI=`echo ${HOME} \`dirname $mycom\` | awk ' { print substr($2,length($1)+2) } '`
-echo $APPLI
-export APPLI
+# --- retrieve APPLI path, relative to $HOME, set ${APPLI}
+
+. `dirname $0`/setAppliPath.sh
# --- set the SALOME environment (prerequisites, MODULES_ROOT_DIR...)
. ${HOME}/${APPLI}/envd ${HOME}/${APPLI}
+# --- set omniORB configuration to current session if any
+
myhost=`hostname`
-export OMNIORB_CONFIG=${HOME}/${APPLI}/.omniORB_${myhost}_last.cfg
+fileOmniConfig=${HOME}/${APPLI}/.omniORB_${myhost}_last.cfg
+
+if [ -f $fileOmniConfig ]; then
+ export OMNIORB_CONFIG=${HOME}/${APPLI}/.omniORB_${myhost}_last.cfg
+
+ # --- set environment variables for port and hostname of NamingService
+
+ export NSHOST=`${KERNEL_ROOT_DIR}/bin/salome/NSparam.py host`
+ export NSPORT=`${KERNEL_ROOT_DIR}/bin/salome/NSparam.py port`
+fi
# --- invoque shell with or without args
#!/bin/bash
-# --- retrieve APPLI path, relative to $HOME
-# on sarge, "which" gives not allways the absolute path...
-
-comName=`which $0`
-aa=${comName:0:1}
-if test x$aa == x\/; then
- mycom=${comName}
-elif test x$aa == x\.; then
- mycom=${PWD}/${comName:2}
-else
- mycom=${PWD}/${comName}
-fi
-APPLI=`echo ${HOME} \`dirname $mycom\` | awk ' { print substr($2,length($1)+2) } '`
-#echo $APPLI
-export APPLI
+# --- retrieve APPLI path, relative to $HOME, set ${APPLI}
+
+. `dirname $0`/setAppliPath.sh
# --- set the SALOME environment (prerequisites, MODULES_ROOT_DIR...)
--- /dev/null
+#!/bin/bash
+
+# --- define port for CORBA naming service
+
+searchFreePort() {
+ echo -n "Searching for a free port for naming service: "
+ export NSPORT=2810
+ local limit=$NSPORT
+ let limit=limit+100
+ while [ 1 ]
+ do
+ aRes=`netstat -ltn | grep -E :${NSPORT}`
+ if [ -z "$aRes" ]; then
+ echo ${NSPORT} - Ok
+ local myhost=`hostname`
+ export OMNIORB_CONFIG=${HOME}/${APPLI}/.omniORB_${myhost}_${NSPORT}.cfg
+ export NSPORT
+ export NSHOST=${myhost}
+ local initref="NameService=corbaname::"`hostname`":$NSPORT"
+ #echo "ORBInitRef $initref" > $OMNIORB_CONFIG
+ echo "InitRef = $initref" > $OMNIORB_CONFIG
+ export LAST_RUNNING_CONFIG=${HOME}/${APPLI}/.omniORB_${myhost}_last.cfg
+ rm ${LAST_RUNNING_CONFIG}
+ ln -s ${OMNIORB_CONFIG} ${LAST_RUNNING_CONFIG}
+ break
+ fi
+ echo -n "${NSPORT} "
+ if [[ $NSPORT -eq $limit ]] ; then
+ echo
+ echo "Can't find a free port to launch omniNames"
+ echo "Try to kill the running servers and then launch SALOME again."
+ exit
+ fi
+ let NSPORT=NSPORT+1
+ done
+}
+
--- /dev/null
+#!/bin/bash
+
+# --- retrieve APPLI path, relative to $HOME, set ${APPLI}
+# on sarge, "which" gives not allways the absolute path...
+
+comName=`which $0`
+aa=${comName:0:1}
+if test x$aa == x\/; then
+ mycom=${comName}
+elif test x$aa == x\.; then
+ mycom=${PWD}/${comName:2}
+else
+ mycom=${PWD}/${comName}
+fi
+APPLI=`echo ${HOME} \`dirname $mycom\` | awk ' { print substr($2,length($1)+2) } '`
+#echo $APPLI
+export APPLI
def getPiDict(port,appname='salome',full=True):
from Utils_Identity import getShortHostName
-
- if os.getenv("HOSTNAME") == None:
- if os.getenv("HOST") == None:
- os.environ["HOSTNAME"]=getShortHostName()
- else:
- os.environ["HOSTNAME"]=os.getenv("HOST")
+
+ host = os.getenv("HOSTNAME")
+ if not host:
+ host = os.getenv("HOST")
+ if not host:
+ host = getShortHostName()
filedict = []
filedict.append( os.getenv('USER') ) # user name
- filedict.append( os.getenv('HOSTNAME') ) # host name
+ filedict.append( host ) # host name
filedict.append( str(port) ) # port number
filedict.append( appname.upper() ) # application name
filedict.append( 'pidict' ) # constant part
self.SCMD2+=['GUI']
if self.args['splash']:
self.SCMD2+=['SPLASH']
+ if self.args.has_key('modules'):
+ self.SCMD2+=['--modules (']
+ for mod in self.args['modules']:
+ self.SCMD2+=[mod + ':']
+ self.SCMD2+=[')']
def setpath(self,modules_list,modules_root_dir):
cata_path=[]
#
# Lancement Session Server
#
+
mySessionServ = SessionServer(args)
mySessionServ.setpath(modules_list,modules_root_dir)
mySessionServ.run()
where SalomeAppConfig designates the directory containing SalomeApp.xml.
Note that ${APPLI} is already defined by the calling scripts when
- envSALOME.sh is sourced.
+ env.d/envSalome.sh is sourced.
2.2 User run scripts
~~~~~~~~~~~~~~~~~~~~
and port number), runTests defines a new configuration for naming service
(new port number).
-2.3 SALOME internal run script
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+2.3 SALOME internal run scripts
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+envd
+ Sets SALOME application environment, envd is sourced by other scripts.
For remote calls, SALOME uses one script.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SalomeApp.xml
- This file is similar to the default given in $GUI_ROOT_DIR
+ This file is similar to the default given
+ in ${GUI_ROOT_DIR}/share/salome/resources
CatalogRessources.xml
This files describes all the computer the application can use. The given
LIBS_old="$LIBS"
LDFLAGS_old="$LDFLAGS"
LDFLAGS="$MPI_LIBS $LDFLAGS"
- AC_CHECK_LIB(lam,lam_mp_init,,WITHLAM="no")
- AC_CHECK_LIB(mpi,MPI_Init,WITHLAM="yes",WITHLAM="no")
- AC_CHECK_LIB(mpi,MPI_Publish_name,WITHMPI2="yes",WITHMPI2="no")
- LDFLAGS="$LDFLAGS_old"
- LIBS="$LIBS_old"
+ fi
+
+ if test "$WITHLAM" = "yes";then
+ WITHLAM="no"
+
+ if test "$WITHLAM" = "no";then
+ CPPFLAGS="$MPI_INCLUDES $CPPFLAGS"
+ LIBS="$LIBS -lmpi++"
+ AC_TRY_LINK([
+ #include <mpi.h>
+ ], [int argc=0; char **argv=0; MPI_Init(&argc,&argv);],
+ WITHLAM="yes",WITHLAM="no")
+ if test "$WITHLAM" = "yes";then
+ MPI_LIBS="$MPI_LIBS -lmpi++"
+ fi
+ LIBS="$LIBS_old"
+ CPPFLAGS="$CPPFLAGS_old"
+
+ AC_CHECK_LIB(mpi++,MPI_Publish_name,WITHMPI2="yes",WITHMPI2="no")
+ LDFLAGS="$LDFLAGS_old"
+ LIBS="$LIBS_old"
+ fi
+
+ if test "$WITHLAM" = "no";then
+ AC_CHECK_LIB(lam,lam_mp_init,WITHLAM="yes",WITHLAM="no")
+ if test "$WITHLAM" = "yes";then
+ MPI_LIBS="$MPI_LIBS -llam"
+ LIBS="$LIBS -llam"
+ fi
+
+ AC_CHECK_LIB(mpi,MPI_Init,WITHLAM="yes",WITHLAM="no")
+ if test "$WITHLAM" = "yes";then
+ MPI_LIBS="$MPI_LIBS -lmpi"
+ fi
+
+ AC_CHECK_LIB(mpi,MPI_Publish_name,WITHMPI2="yes",WITHMPI2="no")
+ LDFLAGS="$LDFLAGS_old"
+ LIBS="$LIBS_old"
+ fi
fi
if test "$WITHLAM" = "yes";then
WITHMPI="yes"
mpi_ok=yes
- MPI_LIBS="$MPI_LIBS -llammpi++"
+ CPPFLAGS="-DWITHLAM $CPPFLAGS"
else
mpi_ok=no
fi
AC_MSG_RESULT(QTDIR environment variable may be wrong)
else
AC_MSG_RESULT(yes)
- QT_INCLUDES="-I${QT_ROOT}/include${QTINC} -DQT_THREAD_SUPPORT"
- QT_MT_INCLUDES="-I${QT_ROOT}/include${QTINC} -DQT_THREAD_SUPPORT"
+ QT_INCLUDES="-I${QT_ROOT}/include${QTINC} -DQT_THREAD_SUPPORT -DQT_CLEAN_NAMESPACE"
+ QT_MT_INCLUDES="-I${QT_ROOT}/include${QTINC} -DQT_THREAD_SUPPORT -DQT_CLEAN_NAMESPACE"
fi
fi
else
LIBS="$LIBS -L$QTDIR/lib -lqt-mt"
fi
- if test "x$QWTHOME" = "x/usr/lib"
+ if test "x$QWTHOME" = "x/usr"
then
LIBS="$LIBS -lqwt"
else
AC_MSG_RESULT(QWTHOME environment variable may be wrong)
else
QWT_INCLUDES="-I$QWT_INCLUDES"
- if test "x$QWTHOME" = "x/usr/lib"
+ if test "x$QWTHOME" = "x/usr"
then
QWT_LIBS=" -lqwt"
else
+#ifdef HAVE_MPI2
+#include "mpi.h"
+#endif
#include "ReceiverFactory.hxx"
#include "Receivers.hxx"
using namespace std;
#ifndef _RECEIVERS_HXX_
#define _RECEIVERS_HXX_
-#include "SALOME_Comm_i.hxx"
-#include "Receiver.hxx"
#ifdef HAVE_MPI2
#include "mpi.h"
#endif
+#include "SALOME_Comm_i.hxx"
+#include "Receiver.hxx"
/*!
Receiver used for transfert with CORBA when no copy is required remotely and locally.
+#include "SALOME_Comm_i.hxx"
#ifndef WNT
#include <rpc/xdr.h>
#endif
-#include "SALOME_Comm_i.hxx"
#include "poa.h"
#include "omnithread.h"
#include "Utils_SINGLETON.hxx"
#ifndef _SALOME_COMM_I_HXX_
#define _SALOME_COMM_I_HXX_
-#include <string>
-#include <SALOMEconfig.h>
-#include CORBA_SERVER_HEADER(SALOME_Comm)
#ifdef HAVE_MPI2
#include "mpi.h"
#endif
+#include <string>
+#include <SALOMEconfig.h>
+#include CORBA_SERVER_HEADER(SALOME_Comm)
#define TIMEOUT 20
+#include "SALOME_Comm_i.hxx"
#include "SenderFactory.hxx"
#include "utilities.h"
#include "SALOMEMultiComm.hxx"
-#include "SALOME_Comm_i.hxx"
using namespace std;
#ifdef COMP_CORBA_DOUBLE
%{
#include "ReceiverFactory.hxx"
+ #undef SEEK_SET
+ #undef SEEK_CUR
+ #undef SEEK_END
#include "SALOME_Comm_i.hxx"
%}
// Module : SALOME
// $Header$
+#ifdef HAVE_MPI2
+#include <mpi.h>
+#endif
+
#include <iostream>
#include <string>
#include <stdio.h>
#include <Utils_Timer.hxx>
#endif
-#ifdef HAVE_MPI2
-#include <mpi.h>
-#endif
-
#include "Container_init_python.hxx"
using namespace std;
MESSAGE("constructor");
_NS = new SALOME_NamingService(orb);
_ResManager = new SALOME_ResourcesManager(orb);
+ _id=0;
PortableServer::POA_var root_poa = PortableServer::POA::_the_root_poa();
PortableServer::POAManager_var pman = root_poa->the_POAManager();
PortableServer::POA_var my_poa;
FindOrStartContainer(const Engines::MachineParameters& params,
const Engines::MachineList& possibleComputers)
{
+ long id;
+ string containerNameInNS;
+ char idc[3*sizeof(long)];
+
Engines::Container_ptr ret = FindContainer(params,possibleComputers);
if(!CORBA::is_nil(ret))
return ret;
string theMachine=_ResManager->FindBest(possibleComputers);
MESSAGE("try to launch it on " << theMachine);
+ // Get Id for container: a parallel container registers in Naming Service
+ // on the machine where is process 0. ContainerManager does'nt know the name
+ // of this machine before the launch of the parallel container. So to get
+ // the IOR of the parallel container in Naming Service, ContainerManager
+ // gives a unique Id. The parallel container registers his name under
+ // /ContainerManager/Id directory in NamingService
+
+ id = GetIdForContainer();
+
string command;
if(theMachine=="")
{
}
else if(theMachine==GetHostname())
{
- command=_ResManager->BuildCommandToLaunchLocalContainer(params);
+ command=_ResManager->BuildCommandToLaunchLocalContainer(params,id);
}
else
command =
- _ResManager->BuildCommandToLaunchRemoteContainer(theMachine,params);
+ _ResManager->BuildCommandToLaunchRemoteContainer(theMachine,params,id);
_ResManager->RmTmpFile();
int status=system(command.c_str());
count-- ;
if ( count != 10 )
MESSAGE( count << ". Waiting for FactoryServer on " << theMachine);
- string containerNameInNS =
- _NS->BuildContainerNameForNS(params,theMachine.c_str());
+ if(params.isMPI)
+ {
+ containerNameInNS = "/ContainerManager/id";
+ sprintf(idc,"%ld",id);
+ containerNameInNS += idc;
+ }
+ else
+ containerNameInNS =
+ _NS->BuildContainerNameForNS(params,theMachine.c_str());
SCRUTE(containerNameInNS);
CORBA::Object_var obj = _NS->Resolve(containerNameInNS.c_str());
ret=Engines::Container::_narrow(obj);
MESSAGE("FindContainer: not found");
return Engines::Container::_nil();
}
+
+//=============================================================================
+/*!
+ * Get Id for container: a parallel container registers in Naming Service
+ * on the machine where is process 0. ContainerManager does'nt know the name
+ * of this machine before the launch of the parallel container. So to get
+ * the IOR of the parallel container in Naming Service, ContainerManager
+ * gives a unique Id. The parallel container registers his name under
+ * /ContainerManager/Id directory in NamingService
+ */
+//=============================================================================
+
+
+long SALOME_ContainerManager::GetIdForContainer(void)
+{
+ _id++;
+ return _id;
+}
+
FindContainer(const Engines::MachineParameters& params,
const char *theMachine);
+ long GetIdForContainer(void);
+ long _id;
+
SALOME_ResourcesManager *_ResManager;
SALOME_NamingService *_NS;
};
int argc, char *argv[])
: Engines_Container_i(orb,poa,containerName,argc,argv,false), MPIObject_i(nbproc,numproc)
{
+ long id=0;
+ string IdContainerinNS;
+ char idc[3*sizeof(long)];
+
MESSAGE("[" << numproc << "] activate object");
_id = _poa->activate_object(this);
-// this->_add_ref();
+
+ if(argc>1)
+ {
+ for(int i=0;i<argc;i++)
+ {
+ if(strcmp(argv[i],"-id")==NULL)
+ {
+ id = atoi(argv[i+1]);
+ continue;
+ }
+ }
+ }
+ SCRUTE(id);
if(numproc==0){
_NS = new SALOME_NamingService();
-// _NS = SINGLETON_<SALOME_NamingService>::Instance() ;
-// ASSERT(SINGLETON_<SALOME_NamingService>::IsAlreadyExisting()) ;
_NS->init_orb( CORBA::ORB::_duplicate(_orb) ) ;
-// Engines::Container_ptr pCont
-// = Engines::Container::_narrow(POA_Engines::MPIContainer::_this());
CORBA::Object_var obj=_poa->id_to_reference(*_id);
Engines::Container_var pCont = Engines::Container::_narrow(obj);
+
string hostname = GetHostname();
_containerName = _NS->BuildContainerNameForNS(containerName,hostname.c_str());
SCRUTE(_containerName);
_NS->Register(pCont, _containerName.c_str());
+
+ // A parallel container registers in Naming Service
+ // on the machine where is process 0. ContainerManager does'nt know the name
+ // of this machine before the launch of the parallel container. So to get
+ // the IOR of the parallel container in Naming Service, ContainerManager
+ // gives a unique Id. The parallel container registers his name under
+ // /ContainerManager/Id directory in NamingService
+
+ IdContainerinNS = "/ContainerManager/id";
+ sprintf(idc,"%ld",id);
+ IdContainerinNS += idc;
+ SCRUTE(IdContainerinNS);
+ _NS->Register(pCont, IdContainerinNS.c_str());
+
}
// Root recupere les ior des container des autre process
// File : MPIObject_i.cxx
// Module : SALOME
+#include <mpi.h>
#include "MPIObject_i.hxx"
#include "utilities.h"
-#include <mpi.h>
using namespace std;
MPIObject_i::MPIObject_i()
+#include <mpi.h>
#include <iostream>
#include "MPIContainer_i.hxx"
#include "Utils_ORB_INIT.hxx"
#include "Utils_SINGLETON.hxx"
#include "utilities.h"
-#include <mpi.h>
#include "SALOMETraceCollector.hxx"
using namespace std;
for (unsigned int ind = 0; ind < contList.size(); ind++)
{
name = contList[ind].c_str();
+
+ if ( nbproc >= 1 )
+ {
+ char *str_nbproc = new char[8];
+ sprintf(str_nbproc, "_%d", nbproc);
+ if( strstr(name.c_str(),str_nbproc) == NULL)
+ continue; // check only containers with _%d in name
+ delete [] str_nbproc;
+ }
+
name += "/";
name += componentName;
SCRUTE(name);
void SALOME_NamingService::Destroy_FullDirectory(const char* Path)
throw(ServiceUnreachable)
{
- Change_Directory(Path);
- vector<string> contList = list_directory();
-
- for (unsigned int ind = 0; ind < contList.size(); ind++)
- Destroy_Name(contList[ind].c_str());
-
- Destroy_Directory(Path);
+ if( Change_Directory(Path) )
+ {
+ vector<string> contList = list_directory();
- Destroy_Name(Path);
+ for (unsigned int ind = 0; ind < contList.size(); ind++)
+ Destroy_Name(contList[ind].c_str());
+
+ Destroy_Directory(Path);
+
+ Destroy_Name(Path);
+ }
}
// ============================================================================
if (endIdx == string::npos)
endIdx = path.length();
int lsub = endIdx - begIdx;
- if (lsub > 1)
+ if (lsub >= 1)
splitPath.push_back(path.substr(begIdx, lsub));
begIdx = path.find_first_not_of(delims, endIdx);
}
self._orb = orb
# initialize root context and current context
ok = 0
- steps = 40
+ steps = 240
while steps > 0 and ok == 0:
try:
obj =self._orb.resolve_initial_references("NameService")
MESSAGE(" Name service not found")
time.sleep(0.25)
steps = steps - 1
- if steps == 0:
+ if steps == 0 and self._root_context is None:
MESSAGE ( "Name Service Reference is invalid" )
sys.exit(1)
#-------------------------------------------------------------------------
NSTEST::echo_var anEchoRef1a = NSTEST::echo::_narrow(obj);
CPPUNIT_ASSERT(!CORBA::is_nil(anEchoRef1a));
CPPUNIT_ASSERT(anEchoRef1->getId() == anEchoRef1a->getId());
+
+ NSTEST::echo_var anEchoRef2 = myFactory->createInstance();
+ _NS.Register(anEchoRef2,"/nstest2/1/2/3/4/echo_1");
+
+ obj = _NS.Resolve("/nstest2/1/2/3/4/echo_1");
+ CPPUNIT_ASSERT(!CORBA::is_nil(obj));
+ NSTEST::echo_var anEchoRef2a = NSTEST::echo::_narrow(obj);
+ CPPUNIT_ASSERT(!CORBA::is_nil(anEchoRef2a));
+ CPPUNIT_ASSERT(anEchoRef2->getId() == anEchoRef2a->getId());
}
// ============================================================================
string
SALOME_ResourcesManager::BuildCommandToLaunchRemoteContainer
(const string& machine,
- const Engines::MachineParameters& params)
+ const Engines::MachineParameters& params, const long id)
{
string command;
-
+ int nbproc;
+ char idc[3*sizeof(long)];
+
if ( ! _isAppliSalomeDefined )
command = BuildTempFileToLaunchRemoteContainer(machine, params);
if (params.isMPI)
{
- int nbproc;
-
if ( (params.nb_node <= 0) && (params.nb_proc_per_node <= 0) )
nbproc = 1;
else if ( params.nb_node == 0 )
ASSERT(getenv("NSPORT"));
command += getenv("NSPORT"); // port of CORBA name server
- command += " SALOME_Container ";
+ if(params.isMPI)
+ {
+ command += " mpirun -np ";
+ std::ostringstream o;
+ o << nbproc << " ";
+ command += o.str();
+#ifdef WITHLAM
+ command += "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
+#endif
+ command += " SALOME_MPIContainer ";
+ }
+ else
+ command += " SALOME_Container ";
+
+ command += _NS->ContainerName(params);
+ command += " -id ";
+ sprintf(idc,"%ld",id);
+ command += idc;
+ command += " -";
+ AddOmninamesParams(command);
+ command += " > /tmp/";
command += _NS->ContainerName(params);
- command += "&";
+ command += "_";
+ command += GetHostname();
+ command += "_";
+ command += getenv( "USER" ) ;
+ command += ".log 2>&1 &" ;
MESSAGE("command =" << command);
}
string
SALOME_ResourcesManager::BuildCommandToLaunchLocalContainer
-(const Engines::MachineParameters& params)
+(const Engines::MachineParameters& params, const long id)
{
_TmpFileName = "";
string command;
int nbproc = 0;
+ char idc[3*sizeof(long)];
if (params.isMPI)
{
o << nbproc << " ";
command += o.str();
+#ifdef WITHLAM
command += "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
+#endif
if (isPythonContainer(params.container_name))
command += "pyMPI SALOME_ContainerPy.py ";
}
command += _NS->ContainerName(params);
+ command += " -id ";
+ sprintf(idc,"%ld",id);
+ command += idc;
command += " -";
AddOmninamesParams(command);
command += " > /tmp/";
std::ostringstream o;
tempOutputFile << nbproc << " ";
+#ifdef WITHLAM
+ tempOutputFile << "-x PATH,LD_LIBRARY_PATH,OMNIORB_CONFIG,SALOME_trace ";
+#endif
}
tempOutputFile << (*(resInfo.ModulesPath.find("KERNEL"))).second
std::string BuildCommandToLaunchRemoteContainer
(const std::string& machine,
- const Engines::MachineParameters& params);
+ const Engines::MachineParameters& params, const long id);
std::string BuildCommandToLaunchLocalContainer
- (const Engines::MachineParameters& params);
+ (const Engines::MachineParameters& params, const long id);
void RmTmpFile();
{
SALOMEDS::Locker lock;
Handle(SALOMEDSImpl_SObject) aSO;
- char* anID = anObject->GetID();
- aSO = Handle(SALOMEDSImpl_Study)::DownCast(_impl->GetOwner())->GetSObject(anID);
- delete [] anID;
+ CORBA::String_var anID = anObject->GetID();
+ aSO = Handle(SALOMEDSImpl_Study)::DownCast(_impl->GetOwner())->GetSObject(anID.inout());
Handle(TDF_Attribute) anAttr;
try {
anAttr = _impl->FindOrCreateAttribute(aSO, TCollection_AsciiString((char*)aTypeOfAttribute));
class SALOME_DriverPy_i(SALOMEDS__POA.Driver):
"""
+ Python implementation of generic SALOMEDS driver.
+ Should be inherited by any Python module's engine
+ to provide persistence mechanism.
"""
- _ComponentDataType = None
-
def __init__ (self, componentDataType):
print "SALOME_DriverPy.__init__: ",componentDataType
- _ComponentDataType = componentDataType
+ self._ComponentDataType = componentDataType
def IORToLocalPersistentID(self, theSObject, IORString, isMultiFile, isASCII):
return theSObject.GetID()
return ""
def ComponentDataType(self):
- return _ComponentDataType
+ return self._ComponentDataType
def Save(self, theComponent, theURL, isMultiFile):
return NULL
{
int ret;
ret = pthread_mutex_lock(&_incrementMutex); // lock access to counters
- pos++;
+ unsigned long mypos = ++pos;
ret = pthread_mutex_unlock(&_incrementMutex); // release lock
- return pos;
+ return mypos;
}