Engines_Container_i::Engines_Container_i (CORBA::ORB_ptr orb,
PortableServer::POA_ptr poa,
char *containerName ,
- int argc , char* argv[] ) :
+ int argc , char* argv[],
+ bool regist,
+ bool activ ) :
_numInstance(0)
{
_orb = CORBA::ORB::_duplicate(orb) ;
_poa = PortableServer::POA::_duplicate(poa) ;
- MESSAGE("activate object");
- _id = _poa->activate_object(this);
-
-// _NS = new SALOME_NamingService(_orb);
- _NS = SINGLETON_<SALOME_NamingService>::Instance() ;
- ASSERT(SINGLETON_<SALOME_NamingService>::IsAlreadyExisting()) ;
- _NS->init_orb( orb ) ;
-
- Engines::Container_ptr pCont
- = Engines::Container::_narrow(_this());
- SCRUTE(_containerName);
- _NS->Register(pCont, _containerName.c_str());
-}
+ // Pour les containers paralleles: il ne faut pas activer le container generique, mais le container specialise
+ if(activ){
+ MESSAGE("activate object");
+ _id = _poa->activate_object(this);
+ }
-// Constructeur pour composant parallele : ne pas faire appel au naming service
-Engines_Container_i::Engines_Container_i (CORBA::ORB_ptr orb,
- PortableServer::POA_ptr poa,
- char *containerName,
- int flag )
- : _numInstance(0)
-{
- string hostname = GetHostname();
- SCRUTE(hostname);
+ // Pour les containers paralleles: il ne faut pas enregistrer le container generique, mais le container specialise
+ if(regist){
- _containerName = "/Containers/";
- if (strlen(containerName)== 0)
- {
- _containerName += hostname;
- }
- else
- {
- _containerName += containerName;
- }
+ // _NS = new SALOME_NamingService(_orb);
+ _NS = SINGLETON_<SALOME_NamingService>::Instance() ;
+ ASSERT(SINGLETON_<SALOME_NamingService>::IsAlreadyExisting()) ;
+ _NS->init_orb( orb ) ;
- _orb = CORBA::ORB::_duplicate(orb) ;
- _poa = PortableServer::POA::_duplicate(poa) ;
+ Engines::Container_ptr pCont
+ = Engines::Container::_narrow(_this());
+ SCRUTE(_containerName);
+ _NS->Register(pCont, _containerName.c_str());
+ }
}
Engines_Container_i(CORBA::ORB_ptr orb,
PortableServer::POA_ptr poa,
char * containerName ,
- int argc, char* argv[]);
-// Constructeur pour composant parallele : ne pas faire appel au naming service
- Engines_Container_i(CORBA::ORB_ptr orb,
- PortableServer::POA_ptr poa,
- char * containerName,
- int flag);
+ int argc, char* argv[],
+ bool regist = true,
+ bool activ = true);
virtual ~Engines_Container_i();
map<string, void *> remove_map ;
omni_mutex _numInstanceMutex ; // if several threads on the same object
-private:
+ //private:
int _argc ;
char** _argv ;
if path != "" :
rshstr = rshstr + path + "/../bin/"
else :
- rshstr = rshstr + os.getenv( "SALOME_ROOT_DIR" ) + "/bin/"
+ rshstr = rshstr + os.getenv( "KERNEL_ROOT_DIR" ) + "/bin/"
if theContainer == "FactoryServer" :
rshstr = rshstr + "./runSession ./SALOME_Container "
else :
return aContainer
return aContainer
- #os.system("rsh -n dm2s0017 /export/home/SALOME_ROOT/bin/runSession SALOME_Container -ORBInitRef NameService=corbaname::dm2s0017:1515")
+ #os.system("rsh -n dm2s0017 /export/home/KERNEL_ROOT/bin/runSession SALOME_Container -ORBInitRef NameService=corbaname::dm2s0017:1515")
#-------------------------------------------------------------------------
protected:
SALOME_NamingService *_NS;
Engines::Container_var _FactoryServer ;
-
-private:
+ std::string ComputerPath( const char * theComputer ) ;
std::string ContainerName( const char * aComputerContainer ,
std::string * theComputer ,
std::string * theContainer ) ;
- std::string ComputerPath( const char * theComputer ) ;
+
+private:
Engines::Container_var FindOrStartContainer(const std::string aComputerContainer ,
const std::string theComputer ,
const std::string theContainer ) ;
#include <dlfcn.h>
#include <stdio.h>
#include "MPIContainer_i.hxx"
+#include "SALOME_NamingService.hxx"
+#include "Utils_SINGLETON.hxx"
+#include "OpUtil.hxx"
#include "utilities.h"
-MPIContainer_i::MPIContainer_i(int nbproc, int numproc,
- CORBA::ORB_ptr orb,
- PortableServer::POA_ptr poa,
- char * containerName)
- : Engines_Container_i(orb,poa,containerName,0), MPIObject_i(nbproc,numproc)
+
+// L'appel au registry SALOME ne se fait que pour le process 0
+Engines_MPIContainer_i::Engines_MPIContainer_i(int nbproc, int numproc,
+ CORBA::ORB_ptr orb,
+ PortableServer::POA_ptr poa,
+ char * containerName,
+ int argc, char *argv[])
+ : Engines_Container_i(orb,poa,containerName,argc,argv,false,false), MPIObject_i(nbproc,numproc)
{
+ MESSAGE("[" << numproc << "] activate object");
_id = _poa->activate_object(this);
- MESSAGE("[" << _numproc << "] containerName=" << _containerName);
- if( _numproc == 0 ){
+ if(numproc==0){
+
+ // _NS = new SALOME_NamingService(_orb);
_NS = SINGLETON_<SALOME_NamingService>::Instance() ;
ASSERT(SINGLETON_<SALOME_NamingService>::IsAlreadyExisting()) ;
_NS->init_orb( orb ) ;
Engines::Container_ptr pCont
= Engines::Container::_narrow(POA_Engines::MPIContainer::_this());
- _NS->Register(pCont, _containerName.c_str());
+ SCRUTE(_containerName);
+ _NS->Register(pCont, _containerName.c_str());
}
// Root recupere les ior des container des autre process
BCastIOR(_orb,pobj,true);
}
-MPIContainer_i::~MPIContainer_i(void)
+Engines_MPIContainer_i::~Engines_MPIContainer_i(void)
{
+ MESSAGE("[" << _numproc << "] Engines_MPIContainer_i::~Engines_MPIContainer_i()");
if( !handle_map.empty() ){
- MESSAGE("[" << _numproc << "] MPIContainer_i::~MPIContainer_i: warning destroy a not empty container");
+ MESSAGE("[" << _numproc << "] Engines_MPIContainer_i::~Engines_MPIContainer_i: warning destroy a not empty container");
+ }
+}
+
+// Start MPI Container
+Engines::MPIContainer_ptr Engines_MPIContainer_i::start_MPIimpl(
+ const char* ContainerName,
+ CORBA::Short nbproc )
+{
+
+ char nbp[1024];
+
+ MESSAGE("[" << _numproc << "] start_impl argc " << _argc << " ContainerName " << ContainerName
+ << hex << this << dec) ;
+ _numInstanceMutex.lock() ; // lock on the instance number
+
+ CORBA::Object_var obj = Engines::MPIContainer::_nil() ;
+ bool nilvar = true ;
+ try {
+ string cont("/Containers/");
+ cont += machineName() ;
+ cont += "/" ;
+ cont += ContainerName;
+ INFOS("[" << _numproc << "] " << machineName() << " start_impl unknown container " << cont.c_str()
+ << " try to Resolve" );
+ obj = _NS->Resolve( cont.c_str() );
+ nilvar = CORBA::is_nil( obj ) ;
+ if ( nilvar ) {
+ INFOS("[" << _numproc << "] " << machineName() << " start_impl unknown container "
+ << ContainerName);
+ }
+ }
+ catch (ServiceUnreachable&) {
+ INFOS("[" << _numproc << "] " << machineName() << "Caught exception: Naming Service Unreachable");
+ }
+ catch (...) {
+ INFOS("[" << _numproc << "] " << machineName() << "Caught unknown exception.");
+ }
+ if ( !nilvar ) {
+ _numInstanceMutex.unlock() ;
+ MESSAGE("[" << _numproc << "] start_impl container found without runSession") ;
+ return Engines::MPIContainer::_narrow(obj);
+ }
+ int i = 0 ;
+ while ( _argv[ i ] ) {
+ MESSAGE("[" << _numproc << "] argv" << i << " " << _argv[ i ]) ;
+ i++ ;
+ }
+// string shstr( "rsh -n " ) ;
+// shstr += machineName() ;
+// shstr += " " ;
+// shstr += _argv[ 0 ] ;
+// string shstr( _argv[ 0 ] ) ;
+ sprintf(nbp,"./runSession mpirun -np %d SALOME_MPIContainer ",nbproc);
+ string shstr(nbp);
+ shstr += ContainerName ;
+ if ( _argc == 4 ) {
+ shstr += " " ;
+ shstr += _argv[ 2 ] ;
+ shstr += " " ;
+ shstr += _argv[ 3 ] ;
+ }
+ shstr += " > /tmp/" ;
+ shstr += ContainerName ;
+ shstr += ".log 2>&1 &" ;
+ MESSAGE("system(" << shstr << ")") ;
+ int status = system( shstr.c_str() ) ;
+ if (status == -1) {
+ INFOS("[" << _numproc << "] Engines_MPIContainer_i::start_impl runSession(SALOME_MPIContainer) failed (system command status -1)") ;
+ }
+ else if (status == 217) {
+ INFOS("[" << _numproc << "] Engines_MPIContainer_i::start_impl runSession(SALOME_MPIContainer) failed (system command status 217)") ;
+ }
+ INFOS("[" << _numproc << "] " << machineName() << " Engines_MPIContainer_i::start_impl runSession(SALOME_MPIContainer) done");
+#if 0
+ pid_t pid = fork() ;
+ if ( pid == 0 ) {
+ string anExe( _argv[ 0 ] ) ;
+ anExe += "runSession" ;
+ char * args[ 9 ] ;
+ args[ 0 ] = "runSession" ;
+ args[ 1 ] = "mpirun" ;
+ args[ 2 ] = "-np" ;
+ args[ 3 ] = (char*)calloc(10,sizeof(char));
+ sprintf(args[ 3 ],"%d",nbproc);
+ args[ 4 ] = "SALOME_MPIContainer" ;
+ args[ 5 ] = strdup( ContainerName ) ;
+ args[ 6 ] = strdup( _argv[ 2 ] ) ;
+ args[ 7 ] = strdup( _argv[ 3 ] ) ;
+ args[ 8 ] = NULL ;
+ MESSAGE("[" << _numproc << "] execl(" << anExe.c_str() << " , " << args[ 0 ] << " , "
+ << args[ 1 ] << " , " << args[ 2 ] << " , " << args[ 3 ]
+ << " , " << args[ 4 ] << args[ 5 ] << args[ 6 ]
+ << args[ 7 ] << ")") ;
+ int status = execv( anExe.c_str() , args ) ;
+ if (status == -1) {
+ INFOS("[" << _numproc << "] Engines_MPIContainer_i::start_impl execl failed (system command status -1)") ;
+ perror( "Engines_MPIContainer_i::start_impl execl error ") ;
+ }
+ else {
+ INFOS("[" << _numproc << "] " << machineName() << " Engines_MPIContainer_i::start_impl execl done");
+ }
+ exit(0) ;
+ }
+#endif
+
+ obj = Engines::MPIContainer::_nil() ;
+ try {
+ string cont("/Containers/");
+ cont += machineName() ;
+ cont += "/" ;
+ cont += ContainerName;
+ nilvar = true ;
+ int count = 20 ;
+ while ( nilvar && count >= 0) {
+ sleep( 1 ) ;
+ obj = _NS->Resolve(cont.c_str());
+ nilvar = CORBA::is_nil( obj ) ;
+ if ( nilvar ) {
+ INFOS("[" << _numproc << "] " << count << ". " << machineName()
+ << " start_impl unknown container " << cont.c_str());
+ count -= 1 ;
+ }
+ }
+ _numInstanceMutex.unlock() ;
+ if ( !nilvar ) {
+ MESSAGE("[" << _numproc << "] start_impl container found after runSession(SALOME_MPIContainer)") ;
+ }
+ return Engines::MPIContainer::_narrow(obj);
}
+ catch (ServiceUnreachable&) {
+ INFOS("[" << _numproc << "] " << machineName() << "Caught exception: Naming Service Unreachable");
+ }
+ catch (...) {
+ INFOS("[" << _numproc << "] " << machineName() << "Caught unknown exception.");
+ }
+ _numInstanceMutex.unlock() ;
+ MESSAGE("[" << _numproc << "] start_impl MPI container not found after runSession(SALOME_MPIContainer)") ;
+ return Engines::MPIContainer::_nil() ;
}
// Load component
-Engines::Component_ptr MPIContainer_i::load_impl(const char* nameToRegister,
+Engines::Component_ptr Engines_MPIContainer_i::load_impl(const char* nameToRegister,
const char* componentName)
{
int ip;
- Engines::Component_var iobject;
- Engines::MPIObject_var pobj;
- char cproc[4];
if( _numproc == 0 ){
// Invocation du chargement du composant dans les autres process
for(ip= 1;ip<_nbproc;ip++)
- (Engines::MPIContainer::_narrow((*_tior)[ip]))->load_impl(nameToRegister,
+ (Engines::MPIContainer::_narrow((*_tior)[ip]))->SPload_impl(nameToRegister,
componentName);
}
+ return Lload_impl(nameToRegister,componentName);
+
+}
+
+// Load component
+void Engines_MPIContainer_i::SPload_impl(const char* nameToRegister,
+ const char* componentName)
+{
+ Lload_impl(nameToRegister,componentName);
+}
+
+Engines::Component_ptr Engines_MPIContainer_i::Lload_impl(
+ const char* nameToRegister,
+ const char* componentName)
+{
+ Engines::Component_var iobject;
+ Engines::MPIObject_var pobj;
+ char cproc[4];
+
sprintf(cproc,"_%d",_numproc);
- BEGIN_OF("[" << _numproc << "] MPIContainer_i::load_impl");
+ BEGIN_OF("[" << _numproc << "] MPIContainer_i::Lload_impl");
_numInstanceMutex.lock() ; // lock on the instance number
_numInstance++ ;
char _aNumI[12];
sprintf(_aNumI,"%d",_numInstance) ;
- _numInstanceMutex.unlock() ;
string _impl_name = componentName;
- string instanceName = string(nameToRegister) + "_inst_" + _aNumI + cproc;
+ string _nameToRegister = nameToRegister;
+ string instanceName = _nameToRegister + "_inst_" + _aNumI + cproc;
+ MESSAGE("[" << _numproc << "] instanceName=" << instanceName);
string absolute_impl_name(_impl_name);
MESSAGE("[" << _numproc << "] absolute_impl_name=" << absolute_impl_name);
return Engines::Component::_nil() ;
}
- string factory_name = string(nameToRegister) + string("Engine_factory");
+ string factory_name = _nameToRegister + string("Engine_factory");
MESSAGE("[" << _numproc << "] factory_name=" << factory_name) ;
PortableServer::ObjectId * (*MPIComponent_factory) (int,int,
if ((error = dlerror()) != NULL){
// Try to load a sequential component
MESSAGE("[" << _numproc << "] Try to load a sequential component");
+ _numInstanceMutex.unlock() ;
iobject = Engines_Container_i::load_impl(nameToRegister,componentName);
if( CORBA::is_nil(iobject) ) return Engines::Component::_duplicate(iobject);
}
// Instanciation du composant parallele
MESSAGE("[" << _numproc << "] Try to load a parallel component");
PortableServer::ObjectId * id = (MPIComponent_factory)
- (_nbproc,_numproc,_orb, _poa, _id, instanceName.c_str(), nameToRegister);
+ (_nbproc,_numproc,_orb, _poa, _id, instanceName.c_str(), _nameToRegister.c_str());
// get reference from id
CORBA::Object_var o = _poa->id_to_reference(*id);
pobj = Engines::MPIObject::_narrow(o) ;
- iobject = Engines::Component::_narrow(pobj) ;
+ iobject = Engines::Component::_narrow(o) ;
}
- // Root recupere les ior des composants des autre process
- BCastIOR(_orb,pobj,false);
-
if( _numproc == 0 ){
// utiliser + tard le registry ici :
// register the engine under the name containerName.dir/nameToRegister.object
- string component_registerName = _containerName + "/" + nameToRegister;
+ string component_registerName = _containerName + "/" + _nameToRegister;
_NS->Register(iobject, component_registerName.c_str()) ;
}
- _numInstanceMutex.lock() ; // lock on the add on handle_map (necessary ?)
handle_map[instanceName] = handle;
_numInstanceMutex.unlock() ;
- END_OF("[" <<_numproc << "] MPIContainer_i::load_impl");
+
+ // Root recupere les ior des composants des autre process
+ BCastIOR(_orb,pobj,false);
+
+ END_OF("[" <<_numproc << "] MPIContainer_i::Lload_impl");
return Engines::Component::_duplicate(iobject);
}
-void MPIContainer_i::remove_impl(Engines::Component_ptr component_i)
+void Engines_MPIContainer_i::remove_impl(Engines::Component_ptr component_i)
{
int ip;
Engines::Component_ptr cptr;
for(ip= 1;ip<_nbproc;ip++){
spcptr = Engines::MPIObject::_narrow((*(pcptr->tior()))[ip]);
cptr = (Engines::Component_ptr)spcptr;
- (Engines::MPIContainer::_narrow((*_tior)[ip]))->remove_impl(cptr);
+ (Engines::MPIContainer::_narrow((*_tior)[ip]))->SPremove_impl(cptr);
}
}
+ Lremove_impl(component_i);
+}
+
+void Engines_MPIContainer_i::SPremove_impl(Engines::Component_ptr component_i)
+{
+ Lremove_impl(component_i);
+}
+
+void Engines_MPIContainer_i::Lremove_impl(Engines::Component_ptr component_i)
+{
+ int ip;
+ Engines::Component_ptr cptr;
+ Engines::MPIObject_ptr pcptr;
+ Engines::MPIObject_ptr spcptr;
+
+ BEGIN_OF("[" << _numproc << "] MPIContainer_i::Lremove_impl");
+
+ ASSERT(! CORBA::is_nil(component_i));
+
string instanceName = component_i->instanceName() ;
MESSAGE("[" << _numproc << "] unload component " << instanceName);
component_i->destroy() ;
{
MESSAGE("[" << _numproc << "] stay " << (*im).first);
}
+
+ END_OF("[" << _numproc << "] MPIContainer_i::Lremove_impl");
+
}
-void MPIContainer_i::finalize_removal()
+void Engines_MPIContainer_i::finalize_removal()
{
int ip;
- MESSAGE("[" << _numproc << "] finalize unload : dlclose");
-
if( _numproc == 0 ){
// Invocation de la destruction du composant dans les autres process
for(ip= 1;ip<_nbproc;ip++)
- (Engines::MPIContainer::_narrow((*_tior)[ip]))->finalize_removal();
+ (Engines::MPIContainer::_narrow((*_tior)[ip]))->SPfinalize_removal();
}
+ Lfinalize_removal();
+}
+
+void Engines_MPIContainer_i::SPfinalize_removal()
+{
+ Lfinalize_removal();
+}
+
+void Engines_MPIContainer_i::Lfinalize_removal()
+{
+ BEGIN_OF("[" << _numproc << "] MPIContainer_i::Lfinalize_removal");
+
map<string, void *>::iterator im ;
- _numInstanceMutex.lock() ; // lock on the explore remove_map & dlclose
+ // lock on the explore remove_map & dlclose
+ _numInstanceMutex.lock() ;
for (im = remove_map.begin() ; im != remove_map.end() ; im ++)
{
void * handle = (*im).second ;
- dlclose(handle) ;
MESSAGE("[" << _numproc << "] dlclose " << (*im).first);
+ dlclose(handle) ;
}
+ MESSAGE("[" << _numproc << "] remove_map.clear()");
remove_map.clear() ;
_numInstanceMutex.unlock() ;
- MESSAGE("[" << _numproc << "] remove_map.clear()");
+
+ END_OF("[" << _numproc << "] MPIContainer_i::Lfinalize_removal");
}
#define _SALOME_PCONTAINER_
#include <SALOMEconfig.h>
-#include CORBA_SERVER_HEADER(MPIContainer)
+#include CORBA_SERVER_HEADER(SALOME_MPIContainer)
#include "SALOME_Container_i.hxx"
-#include "SALOME_NamingService.hxx"
-#include "Utils_SINGLETON.hxx"
-#include "OpUtil.hxx"
#include "MPIObject_i.hxx"
-class MPIContainer_i : public POA_Engines::MPIContainer,
- public Engines_Container_i,
- public MPIObject_i
+class Engines_MPIContainer_i : public POA_Engines::MPIContainer,
+ public Engines_Container_i,
+ public MPIObject_i
{
public:
// Constructor
- MPIContainer_i( int nbproc, int numproc,
- CORBA::ORB_ptr orb,
- PortableServer::POA_ptr poa,
- char * containerName);
+ Engines_MPIContainer_i( int nbproc, int numproc,
+ CORBA::ORB_ptr orb,
+ PortableServer::POA_ptr poa,
+ char * containerName,
+ int argc, char *argv[]);
// Destructor
- ~MPIContainer_i();
+ ~Engines_MPIContainer_i();
+
+ // Start MPI Container
+ Engines::MPIContainer_ptr start_MPIimpl(const char* ContainerName,
+ CORBA::Short nbproc);
// Load a component
+ // synchronous version for process 0
Engines::Component_ptr load_impl(const char* nameToRegister,
const char* componentName);
+ // asynchronous version for other process
+ void SPload_impl(const char* nameToRegister, const char* componentName);
// Unload a component
+ // synchronous version for process 0
void remove_impl(Engines::Component_ptr component_i);
+ // asynchronous version for other process
+ void SPremove_impl(Engines::Component_ptr component_i);
+
+ // synchronous version for process 0
void finalize_removal();
+ // asynchronous version for other process
+ void SPfinalize_removal();
+
+ private:
+ // local version to not duplicate code
+ // called by synchronous and asynchronous version
+ Engines::Component_ptr Lload_impl(const char* nameToRegister,
+ const char* componentName);
+ void Lremove_impl(Engines::Component_ptr component_i);
+ void Lfinalize_removal();
};
#endif
#define _SALOME_POBJECT_I_H_
#include <SALOMEconfig.h>
-#include CORBA_SERVER_HEADER(MPIObject)
+#include CORBA_SERVER_HEADER(SALOME_MPIObject)
class MPIObject_i: public POA_Engines::MPIObject
{
LIB = libSalomeMPIContainer.la
LIB_SRC = MPIObject_i.cxx MPIContainer_i.cxx
-LIB_SERVER_IDL = TypeData.idl MPIObject.idl MPIContainer.idl
+LIB_SERVER_IDL = SALOME_MPIObject.idl SALOME_MPIContainer.idl
# Executables targets
BIN = SALOME_MPIContainer
BIN_SRC =
-BIN_SERVER_IDL = TypeData.idl MPIObject.idl MPIContainer.idl
+BIN_SERVER_IDL = SALOME_MPIObject.idl SALOME_MPIContainer.idl
CXXFLAGS+=${MPICH_INCLUDES}
CXX_DEPEND_FLAG+=${MPICH_INCLUDES}
-// SALOME MPIContainer : implemenation of container based on MPI libraries
-//
-// Copyright (C) 2003 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
-// CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.opencascade.org/SALOME/ or email : webmaster.salome@opencascade.org
-//
-//
-//
-// File : SALOME_MPIContainer.cxx
-// Module : SALOME
-
using namespace std;
#include <iostream>
#include "MPIContainer_i.hxx"
+#include "Utils_ORB_INIT.hxx"
+#include "Utils_SINGLETON.hxx"
#include "utilities.h"
#include <mpi.h>
int main(int argc, char* argv[])
{
int nbproc, numproc;
- MPIContainer_i * myContainer;
+ int flag;
+ Engines_MPIContainer_i * myContainer=NULL;
BEGIN_OF(argv[0])
try {
MESSAGE("Connection MPI");
-
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&nbproc);
MPI_Comm_rank(MPI_COMM_WORLD,&numproc);
- MESSAGE("Initialisation CORBA");
+ MESSAGE("[" << numproc << "] Initialisation CORBA");
// Initialise the ORB.
- CORBA::ORB_var orb = CORBA::ORB_init(argc, argv);
+ // CORBA::ORB_var orb = CORBA::ORB_init(argc, argv);
+ ORB_INIT &init = *SINGLETON_<ORB_INIT>::Instance() ;
+ ASSERT(SINGLETON_<ORB_INIT>::IsAlreadyExisting()) ;
+ CORBA::ORB_var &orb = init( argc , argv ) ;
// Obtain a reference to the root POA.
CORBA::Object_var obj = orb->resolve_initial_references("RootPOA");
containerName = argv[1] ;
}
- MESSAGE("Chargement container sur proc: " << numproc);
- myContainer = new MPIContainer_i(nbproc,numproc,orb,factory_poa, containerName);
- MESSAGE("Fin chargement container");
+ MESSAGE("[" << numproc << "] Chargement container");
+ myContainer = new Engines_MPIContainer_i(nbproc,numproc,orb,factory_poa, containerName,argc,argv);
pman->activate();
orb->run();
orb->destroy();
- delete myContainer;
- MPI_Finalize();
}
- catch(CORBA::SystemException&) {
- INFOS("Caught CORBA::SystemException.")
+ catch(CORBA::SystemException&){
+ INFOS("Caught CORBA::SystemException.");
+ }
+ catch(PortableServer::POA::WrongPolicy&){
+ INFOS("Caught CORBA::WrongPolicyException.");
}
- catch(PortableServer::POA::WrongPolicy&)
- {
- INFOS("Caught CORBA::WrongPolicyException.")
+ catch(PortableServer::POA::ServantAlreadyActive&){
+ INFOS("Caught CORBA::ServantAlreadyActiveException");
}
- catch(PortableServer::POA::ServantAlreadyActive&)
- {
- INFOS("Caught CORBA::ServantAlreadyActiveException")
+ catch(CORBA::Exception&){
+ INFOS("Caught CORBA::Exception.");
}
- catch(CORBA::Exception&) {
- INFOS("Caught CORBA::Exception.")
+ catch(...){
+ INFOS("Caught unknown exception.");
}
- catch(...) {
- INFOS("Caught unknown exception.")
- }
+
+ if(myContainer)
+ delete myContainer;
+ MPI_Initialized(&flag);
+ if(flag)
+ MPI_Finalize();
+
END_OF(argv[0]);
}
--- /dev/null
+#==============================================================================
+# File : MPILifeCycleCORBA.py
+# Created : ven may 30 08:42:01 CEST 2003
+# Author : Bernard SECHER, CEA
+# Project : SALOME
+# Copyright : CEA 2003
+# $Header$
+#==============================================================================
+
+from LifeCycleCORBA import *
+
+class MPILifeCycleCORBA(LifeCycleCORBA):
+ #-------------------------------------------------------------------------
+
+ def __init__(self, orb):
+ MESSAGE( "MPILifeCycleCORBA::__init__" )
+ LifeCycleCORBA.__init__(self, orb)
+
+ #-------------------------------------------------------------------------
+
+ def FindOrStartMPIContainer(self, theComputer , theMPIContainerRoot, nbproc ):
+ theMPIContainer = theMPIContainerRoot + "_" + str(nbproc)
+ MESSAGE( "FindOrStartMPIContainer" + theComputer + theMPIContainer )
+ aMPIContainer = self.FindContainer( theComputer + "/" + theMPIContainer )
+ if aMPIContainer is None :
+ if (theMPIContainerRoot == "MPIFactoryServer") | (theMPIContainerRoot == "MPIFactoryServerPy") :
+ if theComputer == os.getenv("HOSTNAME") :
+ rshstr = ""
+ else :
+ rshstr = "rsh -n " + theComputer + " "
+ path = self.ComputerPath( theComputer )
+ if path != "" :
+ rshstr = rshstr + path + "/../bin/"
+ else :
+ rshstr = rshstr + os.getenv( "KERNEL_ROOT_DIR" ) + "/bin/"
+# rshstr = rshstr + os.getenv( "PWD" ) + "/"
+ if theMPIContainerRoot == "MPIFactoryServer" :
+ rshstr = rshstr + "./runSession mpirun -np " + str(nbproc) + " ./SALOME_MPIContainer "
+ else :
+ rshstr = rshstr + "./runSession ./SALOME_MPIContainerPy.py '"
+ rshstr = rshstr + theMPIContainer + " -"
+ omniORBcfg = os.getenv( "OMNIORB_CONFIG" )
+# omniORBcfg = os.getenv( "HOME" ) + "/.omniORB.cfg"
+ file = os.open( omniORBcfg , os.O_RDONLY )
+ ORBInitRef = os.read(file,132)
+ if ORBInitRef[len(ORBInitRef)-1] == '\n' :
+ ORBInitRef,bsn = ORBInitRef.split('\n')
+ os.close( file )
+ rshstr = rshstr + ORBInitRef
+ if theMPIContainerRoot == "MPIFactoryServerPy" :
+ rshstr = rshstr + "'"
+ rshstr = rshstr + " > /tmp/" + theMPIContainer + "_"
+ rshstr = rshstr + theComputer
+ rshstr = rshstr + ".log 2>&1 &"
+ os.system( rshstr )
+ MESSAGE( "FindOrStartMPIContainer" + rshstr + " done" )
+ else :
+ if theMPIContainer.find('Py') == -1 :
+ aMPIContainer = self.FindContainer( theComputer + "/" + "MPIFactoryServer_" + str(nbproc) )
+ else :
+ aMPIContainer = self.FindContainer( theComputer + "/" + "MPIFactoryServerPy_" + str(nbproc) )
+ aMPIContainer = aMPIContainer.start_impl( theMPIContainer )
+
+ count = 21
+ while aMPIContainer is None :
+ time.sleep(1)
+ count = count - 1
+ MESSAGE( str(count) + ". Waiting for " + theComputer + "/" + theMPIContainer )
+ aMPIContainer = self.FindContainer( theComputer + "/" + theMPIContainer )
+ if count == 0 :
+ return aMPIContainer
+
+ return aMPIContainer
+ #os.system("rsh -n dm2s0017 /export/home/SALOME_ROOT/bin/runSession SALOME_Container -ORBInitRef NameService=corbaname::dm2s0017:1515")
+
+ #-------------------------------------------------------------------------
+
+ def FindOrLoadMPIComponent(self, MPIcontainerName, MPIcomponentName, nbproc):
+
+ theComputer,theMPIContainerRoot = self.ContainerName( MPIcontainerName )
+ theMPIContainer = theMPIContainerRoot + "_" + str(nbproc)
+ name = [CosNaming.NameComponent(theComputer,"dir"),
+ CosNaming.NameComponent(theMPIContainer,"dir"),
+ CosNaming.NameComponent(MPIcomponentName,"object")]
+ try:
+ obj = self._containerRootContext.resolve(name)
+ except CosNaming.NamingContext.NotFound, ex:
+ MESSAGE( "component " + MPIcomponentName + " not found, trying to load" )
+ MPIcontainer = self.FindContainer(theComputer + "/" + theMPIContainer)
+ if MPIcontainer is None:
+ MESSAGE( "MPIcontainer " + theComputer + "/" + theMPIContainer + " not found in Naming Service, trying to start" )
+ if (theMPIContainerRoot != "MPIFactoryServer") & (theMPIContainerRoot != "MPIFactoryServerPy") :
+ if theMPIContainer.find('Py') == -1 :
+ theMPIFactorycontainerRoot = "MPIFactoryServer"
+ theMPIFactorycontainer = theMPIFactorycontainerRoot + "_" + str(nbproc)
+ else :
+ theMPIFactorycontainerRoot = "MPIFactoryServerPy"
+ theMPIFactorycontainer = theMPIFactorycontainerRoot + "_" + str(nbproc)
+ MPIFactorycontainer = self.FindContainer(theComputer + "/" + theMPIFactorycontainer)
+ if MPIFactorycontainer is None:
+ MESSAGE( "MPIcontainer " + theComputer + "/" + theMPIFactorycontainer + " not found in Naming Service, trying to start" )
+ MPIFactorycontainer = self.FindOrStartMPIContainer(theComputer,theMPIFactorycontainerRoot,nbproc)
+ else:
+ MPIFactorycontainer = self.FindOrStartMPIContainer(theComputer,theMPIContainerRoot,nbproc)
+ if MPIFactorycontainer != None :
+ MPIcontainer = self.FindOrStartMPIContainer(theComputer,theMPIContainerRoot,nbproc)
+
+ if MPIcontainer != None:
+ compoinfo = self._catalog.GetComponent(MPIcomponentName)
+ if compoinfo is None:
+ MESSAGE( "MPIcomponent " + MPIcomponentName + " not found in Module Catalog" )
+ else:
+ try:
+ machineName = theComputer
+ path = compoinfo.GetPathPrefix(machineName) + "/"
+ except SALOME_ModuleCatalog.NotFound, ex:
+ MESSAGE( "machine " + machineName + " not found in Module Catalog" )
+ MESSAGE( "trying localhost" )
+ try:
+ path = compoinfo.GetPathPrefix("localhost") + "/"
+ except SALOME_ModuleCatalog.NotFound, ex:
+ path = ""
+ implementation = path + "lib" + MPIcomponentName + "Engine.so"
+ MESSAGE( "Trying to load " + implementation )
+ try:
+ MPIcomponent = MPIcontainer.load_impl(MPIcomponentName, implementation)
+ MESSAGE( "component " + MPIcomponent._get_instanceName() + " launched !" )
+ return MPIcomponent
+ except:
+ MESSAGE( "component " + MPIcomponentName + " NOT launched !" )
+
+ else:
+ try:
+ MPIcomponent = obj._narrow(Engines.Component)
+ if MPIcomponent is None:
+ MESSAGE( MPIcomponentName + " is not a component !" )
+ else:
+ MESSAGE( "MPIcomponent " + MPIcomponent._get_instanceName() + " found !" )
+ return MPIcomponent
+ except:
+ MESSAGE( MPIcomponentName + " failure" )
+ return None
--- /dev/null
+import salome
+from MPILifeCycleCORBA import *
+
+# create an LifeCycleCORBA instance
+lcc = MPILifeCycleCORBA(salome.orb)
--- /dev/null
+#==============================================================================
+# File : Makefile.in
+# Created : lun jui 2 20:32:24 CEST 2001
+# Author : Paul RASCLE, EDF - Marc Tajchman, CEA
+# Project : SALOME
+# Copyright : EDF 2001
+# $Header$
+#==============================================================================
+
+# source path
+top_srcdir=@top_srcdir@
+top_builddir=../..
+srcdir=@srcdir@
+VPATH=.:@srcdir@:@top_srcdir@/idl
+
+
+@COMMENCE@
+
+EXPORT_HEADERS = SALOME_MPILifeCycleCORBA.hxx
+
+EXPORT_PYSCRIPTS = MPIsalome.py MPILifeCycleCORBA.py
+
+# Libraries targets
+
+LIB = libSalomeMPILifeCycleCORBA.la
+LIB_SRC = SALOME_MPILifeCycleCORBA.cxx
+LIB_CLIENT_IDL = SALOME_MPIObject.idl SALOME_MPIContainer.idl \
+ SALOME_Component.idl SALOME_ModuleCatalog.idl
+
+# Executables targets
+BIN = TestMPILifeCycleCORBA
+BIN_SRC =
+BIN_CLIENT_IDL = Logger.idl SALOME_MPIObject.idl SALOME_MPIContainer.idl SALOME_TestMPIComponent.idl
+
+LDFLAGS += -lSalomeNS -lSalomeLifeCycleCORBA -lOpUtil -lSalomeLoggerServer
+
+@CONCLUDE@
+
--- /dev/null
+using namespace std;
+//=============================================================================
+// File : SALOME_MPILifeCycleCORBA.cxx
+// Created : mar jui 03 14:55:50 CEST 2003
+// Author : Bernard SECHER CEA
+// Project : SALOME
+// Copyright : CEA 2003
+// $Header$
+//=============================================================================
+
+#include <iostream>
+#include <fstream>
+#include <strstream>
+#include <iomanip>
+#include <stdio.h>
+#include <string.h>
+
+#include "OpUtil.hxx"
+#include "utilities.h"
+
+#include <ServiceUnreachable.hxx>
+
+#include "SALOME_MPILifeCycleCORBA.hxx"
+#include CORBA_CLIENT_HEADER(SALOME_ModuleCatalog)
+#include "SALOME_NamingService.hxx"
+
+SALOME_MPILifeCycleCORBA::SALOME_MPILifeCycleCORBA() :
+ SALOME_LifeCycleCORBA()
+{
+}
+
+SALOME_MPILifeCycleCORBA::SALOME_MPILifeCycleCORBA(SALOME_NamingService *ns) :
+ SALOME_LifeCycleCORBA(ns)
+{
+}
+
+SALOME_MPILifeCycleCORBA::~SALOME_MPILifeCycleCORBA()
+{
+}
+
+Engines::MPIContainer_var SALOME_MPILifeCycleCORBA::FindOrStartMPIContainer(
+ const string theComputer ,
+ const string theMPIContainerRoot,
+ const int nbproc)
+{
+ char nbp[1024];
+
+ sprintf(nbp,"_%d",nbproc);
+ string theMPIContainer = theMPIContainerRoot + nbp;
+ string aComputerContainer = theComputer + "/" + theMPIContainer;
+
+ SCRUTE( aComputerContainer ) ;
+ SCRUTE( theComputer ) ;
+ SCRUTE( theMPIContainer ) ;
+
+ // On recherche si le containe rest deja lance
+ Engines::MPIContainer_var aMPIContainer = Engines::MPIContainer::_narrow(FindContainer(aComputerContainer.c_str()));
+
+ //On a trouve le container: on renvoie une poigne dessus
+ if ( !CORBA::is_nil( aMPIContainer ) ) {
+ MESSAGE("MPIContainer " << aComputerContainer << " found!!!");
+ return aMPIContainer ;
+ }
+ // On a pas trouve le container
+ else {
+ MESSAGE("MPIContainer " << aComputerContainer << " not found!!!");
+ // On recherche un container generique
+ bool pyCont = false ;
+ int len = theMPIContainer.length() ;
+ if ( !strcmp( &theMPIContainerRoot.c_str()[len-2] , "Py" ) ) {
+ pyCont = true ;
+ }
+ string MPIFactoryServer = theComputer ;
+ if ( pyCont ) {
+ MPIFactoryServer += "/MPIFactoryServerPy" ;
+ }
+ else {
+ MPIFactoryServer += "/MPIFactoryServer" ;
+ }
+ MPIFactoryServer += nbp;
+ Engines::MPIContainer_var aMPIFactoryServer = Engines::MPIContainer::_narrow(FindContainer( MPIFactoryServer.c_str()));
+
+ // On n'a pas trouve le container generique: on lance le container demande
+ if ( CORBA::is_nil( aMPIFactoryServer ) ) {
+// rsh -n ikkyo /export/home/rahuel/SALOME_ROOT/bin/runSession SALOME_Container -ORBInitRef NameService=corbaname::dm2s0017:1515 &
+ string rsh( "" ) ;
+ if ( theComputer!= GetHostname() ) {
+ rsh += "rsh -n " ;
+ rsh += theComputer ;
+ rsh += " " ;
+ }
+ string path = ComputerPath( theComputer.c_str() ) ;
+ SCRUTE( path ) ;
+ if ( strlen(path.c_str()) > 0 ) {
+ cout << "path de longueur: " << strlen(path.c_str()) << endl;
+ rsh += path ;
+ rsh += "/../bin/" ;
+ }
+ rsh += "runSession " ;
+ if ( pyCont ) {
+ rsh += "SALOME_MPIContainerPy.py " ;
+ rsh += "MPIFactoryServerPy -" ;
+ }
+ else {
+ sprintf(nbp,"mpirun -np %d SALOME_MPIContainer ",nbproc);
+ rsh += nbp;
+ rsh += theMPIContainer +" -" ;
+ }
+ string omniORBcfg( getenv( "OMNIORB_CONFIG" ) ) ;
+ ifstream omniORBfile( omniORBcfg.c_str() ) ;
+ char ORBInitRef[12] ;
+ char nameservice[132] ;
+ omniORBfile >> ORBInitRef ;
+ rsh += ORBInitRef ;
+ rsh += " " ;
+ omniORBfile >> nameservice ;
+ omniORBfile.close() ;
+ char * bsn = strchr( nameservice , '\n' ) ;
+ if ( bsn ) {
+ bsn[ 0 ] = '\0' ;
+ }
+ rsh += nameservice ;
+ if ( pyCont ) {
+ rsh += " > /tmp/MPIFactoryServerPy_" ;
+ }
+ else {
+ rsh += " > /tmp/MPIFactoryServer_" ;
+ }
+ sprintf(nbp,"%d_",nbproc);
+ rsh += nbp;
+ rsh += theComputer ;
+ rsh += ".log 2>&1 &" ;
+ SCRUTE( rsh );
+ int status = system( rsh.c_str() ) ;
+ if (status == -1) {
+ INFOS("SALOME_MPILifeCycleCORBA::FindOrStartMPIContainer rsh failed (system command status -1)") ;
+ }
+ else if (status == 217) {
+ INFOS("SALOME_MPILifeCycleCORBA::FindOrStartContainer rsh failed (system command status 217)") ;
+ }
+ else {
+ int count = 21 ;
+ while ( CORBA::is_nil( aMPIFactoryServer ) && count ) {
+ sleep( 1 ) ;
+ count-- ;
+ if ( count != 10 )
+ MESSAGE( count << ". Waiting for FactoryServer on " << theComputer)
+ aMPIFactoryServer = Engines::MPIContainer::_narrow(FindContainer( MPIFactoryServer.c_str()));
+ }
+ if ( CORBA::is_nil( aMPIFactoryServer ) ) {
+ INFOS("SALOME_MPILifeCycleCORBA::FindOrStartMPIContainer rsh failed") ;
+ }
+ else if ( strcmp( theComputer.c_str() , GetHostname().c_str() ) ) {
+ _MPIFactoryServer = aMPIFactoryServer ;
+ }
+ }
+ }
+ //On a trouve le container generique: on renvoie une poigne dessus
+ if ( !CORBA::is_nil( aMPIFactoryServer ) ) {
+ if ( strcmp( theMPIContainer.c_str() , "MPIFactoryServer" ) ||
+ strcmp( theMPIContainer.c_str() , "MPIFactoryServerPy" ) ) {
+ MESSAGE("MPI Container not found ! trying to start " << aComputerContainer);
+ Engines::MPIContainer_var myMPIContainer = aMPIFactoryServer->start_MPIimpl( theMPIContainer.c_str(), nbproc ) ;
+ if ( !CORBA::is_nil( myMPIContainer ) ) {
+ MESSAGE("MPIContainer " << aComputerContainer << " started");
+ return myMPIContainer ;
+ }
+ else {
+ MESSAGE("MPIContainer " << aComputerContainer << " NOT started");
+ }
+ }
+ else {
+ MESSAGE("MPIContainer " << aComputerContainer << " started");
+ return aMPIFactoryServer ;
+ }
+ }
+ }
+ return Engines::MPIContainer::_nil();
+}
+
+// Engines::Component_var SALOME_MPILifeCycleCORBA::FindOrLoad_MPIComponent
+// (const char *MPIcontainerName,
+// const char *MPIcomponentName,
+// const char *implementation,
+// const int nbproc)
+// {
+// BEGIN_OF("FindOrLoad_MPIComponent(1)");
+// ASSERT(_NS != NULL);
+// string theComputer ;
+// string theMPIContainer ;
+// string theComputerContainer = ContainerName( MPIcontainerName ,
+// &theComputer ,
+// &theMPIContainer ) ;
+// Engines::MPIContainer_var cont = FindOrStartMPIContainer( theComputerContainer ,
+// theComputer ,
+// theMPIContainer,
+// nbproc) ;
+// // ASSERT(!CORBA::is_nil(cont));
+
+// string path( theComputerContainer );
+// path = path + "/";
+// path = path + MPIcomponentName;
+// SCRUTE(path);
+// try
+// {
+// CORBA::Object_var obj = _NS->Resolve(path.c_str());
+// if (CORBA::is_nil(obj))
+// {
+// MESSAGE("MPIComponent not found ! trying to load " << path);
+// Engines::Component_var compo
+// = cont->load_impl(MPIcomponentName, implementation);
+// // ASSERT(!CORBA::is_nil(compo));
+// MESSAGE("MPIComponent launched !" << path);
+// return compo;
+// }
+// else
+// {
+// MESSAGE("MPIComponent found !" << path);
+// Engines::Component_var compo = Engines::Component::_narrow(obj);
+// // ASSERT(!CORBA::is_nil(compo));
+// try
+// {
+// compo->ping();
+// }
+// catch (CORBA::COMM_FAILURE&)
+// {
+// INFOS("Caught CORBA::SystemException CommFailure. Engine "
+// << path << "does not respond" );
+// }
+// return compo;
+// }
+// }
+// catch (ServiceUnreachable&)
+// {
+// INFOS("Caught exception: Naming Service Unreachable");
+// }
+// catch (...)
+// {
+// INFOS("Caught unknown exception.");
+// }
+// return Engines::Component::_nil();
+// }
+
+Engines::Component_var SALOME_MPILifeCycleCORBA::FindOrLoad_MPIComponent
+ (const char *MPIcontainerName,
+ const char *MPIcomponentName,
+ const int nbproc)
+{
+
+ char nbp[1024];
+
+ sprintf(nbp,"_%d",nbproc);
+// BEGIN_OF("FindOrLoad_Component(2)");
+ ASSERT(_NS != NULL);
+ string theComputer ;
+ string theMPIContainerRoot ;
+ string theMPIContainer;
+ string theComputerContainer = ContainerName( MPIcontainerName ,
+ &theComputer ,
+ &theMPIContainerRoot ) ;
+ theMPIContainer = theMPIContainerRoot + nbp;
+ Engines::MPIContainer_var cont = FindOrStartMPIContainer( theComputer ,
+ theMPIContainerRoot,
+ nbproc ) ;
+
+ if ( CORBA::is_nil( cont ) ) {
+ MESSAGE("MPIContainer not found ! " << theComputerContainer );
+ return Engines::Component::_nil();
+ }
+
+// char * machine = cont->machineName() ;
+ const char * machine = theComputer.c_str() ;
+
+ string path( theComputerContainer );
+ path += nbp;
+ path += "/";
+ path += MPIcomponentName;
+ SCRUTE(path);
+
+ try {
+ CORBA::Object_var obj = _NS->Resolve(path.c_str());
+ if ( CORBA::is_nil( obj ) ) {
+ MESSAGE("MPIComponent not found ! trying to load " << path);
+ CORBA::Object_var obj2 = _NS->Resolve("/Kernel/ModulCatalog");
+ SALOME_ModuleCatalog::ModuleCatalog_var Catalog =
+ SALOME_ModuleCatalog::ModuleCatalog::_narrow(obj2);
+
+ SALOME_ModuleCatalog::Acomponent_ptr compoInfo =
+ Catalog->GetComponent(MPIcomponentName);
+ if (CORBA::is_nil (compoInfo))
+ {
+ INFOS("Catalog Error : Component not found in the catalog")
+ return Engines::Component::_nil();
+// exit (-1);
+ }
+
+ string path;
+ try
+ {
+ path = compoInfo->GetPathPrefix( machine ) ;
+ path += "/" ;
+ }
+ catch (SALOME_ModuleCatalog::NotFound&)
+ {
+ MESSAGE("GetPathPrefix(" << machine << ") not found!"
+ << "trying localhost");
+ try {
+ path = compoInfo->GetPathPrefix("localhost") ;
+ path += "/" ;
+ }
+ catch (SALOME_ModuleCatalog::NotFound&) {
+ MESSAGE("GetPathPrefix(localhost) not found!") ;
+ path = "" ;
+ }
+ }
+
+ SCRUTE(path);
+ string implementation(path);
+ implementation += "lib";
+ implementation += MPIcomponentName;
+ implementation += "Engine.so";
+
+ Engines::Component_var compo
+ = cont->load_impl(MPIcomponentName, implementation.c_str());
+
+// ASSERT(!CORBA::is_nil(compo));
+// MESSAGE("Component launched !" << path);
+ return compo;
+ }
+ else
+ {
+ MESSAGE("MPIComponent found !" << path);
+ Engines::Component_var compo = Engines::Component::_narrow(obj);
+ // ASSERT(!CORBA::is_nil(compo));
+ try
+ {
+ string instanceName = compo->instanceName();
+ }
+ catch (CORBA::COMM_FAILURE&)
+ {
+ INFOS("Caught CORBA::SystemException CommFailure. Engine "
+ << path << "does not respond" );
+ }
+ return compo;
+ }
+ }
+ catch (ServiceUnreachable&)
+ {
+ INFOS("Caught exception: Naming Service Unreachable");
+ }
+ catch (...)
+ {
+ INFOS("Caught unknown exception.");
+ }
+ return Engines::Component::_nil();
+}
--- /dev/null
+//=============================================================================
+// File : SALOME_MPILifeCycleCORBA.hxx
+// Created : mar jui 03 14:55:45 CEST 2003
+// Author : Bernard SECHER, CEA
+// Project : SALOME
+// Copyright : CEA 2003
+// $Header$
+//=============================================================================
+
+#ifndef _SALOME_MPILIFECYCLECORBA_HXX_
+#define _SALOME_MPILIFECYCLECORBA_HXX_
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <string>
+
+#include <SALOMEconfig.h>
+#include CORBA_CLIENT_HEADER(SALOME_MPIContainer)
+#include "SALOME_LifeCycleCORBA.hxx"
+
+class SALOME_MPILifeCycleCORBA : public SALOME_LifeCycleCORBA
+{
+public:
+ SALOME_MPILifeCycleCORBA();
+ SALOME_MPILifeCycleCORBA(SALOME_NamingService *ns);
+ virtual ~SALOME_MPILifeCycleCORBA();
+
+// Engines::Component_var FindOrLoad_MPIComponent(const char *MPIcontainerName,
+// const char *MPIcomponentName,
+// const char *implementationPath,
+// const int nbproc);
+ Engines::Component_var FindOrLoad_MPIComponent(const char *MPIcontainerName,
+ const char *MPIcomponentName,
+ const int nbproc);
+protected:
+ Engines::MPIContainer_var _MPIFactoryServer ;
+
+private:
+ Engines::MPIContainer_var FindOrStartMPIContainer(const string theComputer ,
+ const string theMPIContainerRoot,
+ const int nbproc) ;
+
+} ;
+
+#endif
--- /dev/null
+// using namespace std;
+//=============================================================================
+// File : TestMPILifeCycleCORBA.cxx
+// Created : mer jui 4 13:11:27 CEST 2003
+// Author : Bernard SECHER, 2003
+// Project : SALOME
+// Copyright : CEA 2003
+// $Header$
+//=============================================================================
+
+#include "utilities.h"
+#include <iostream>
+#include <unistd.h>
+#include <string>
+#include <SALOMEconfig.h>
+#include CORBA_CLIENT_HEADER(SALOME_MPIContainer)
+#include CORBA_CLIENT_HEADER(SALOME_TestMPIComponent)
+
+# include "Utils_ORB_INIT.hxx"
+# include "Utils_SINGLETON.hxx"
+#include "SALOME_NamingService.hxx"
+#include "SALOME_MPILifeCycleCORBA.hxx"
+#include "OpUtil.hxx"
+
+int main (int argc, char * argv[])
+{
+
+ try{
+ // Initializing omniORB
+ CORBA::ORB_var orb = CORBA::ORB_init(argc, argv);
+
+ // Obtain a reference to the root POA
+ CORBA::Object_var obj = orb->resolve_initial_references("RootPOA") ;
+ PortableServer::POA_var poa = PortableServer::POA::_narrow(obj) ;
+
+ // Use Name Service to find container
+ SALOME_NamingService NS(orb);
+
+ SALOME_MPILifeCycleCORBA LCC(&NS);
+
+ Engines::Component_var comp = LCC.FindOrLoad_MPIComponent("MPIFactoryServer","TestMPIComponent",2);
+
+ Engines::TestMPIComponent_var m1 = Engines::TestMPIComponent::_narrow(comp);
+ if(CORBA::is_nil(m1)){
+ INFOS("echec recuperation poignee composant");
+ }
+ else{
+
+ INFOS("Lancement de coucou");
+ m1->Coucou(1L);
+ }
+
+ orb->destroy();
+ }
+ catch(CORBA::COMM_FAILURE& ex) {
+ INFOS("Caught system exception COMM_FAILURE -- unable to contact the object.");
+ }
+ catch(CORBA::SystemException&) {
+ INFOS("Caught a CORBA::SystemException.");
+ }
+ catch(CORBA::Exception&) {
+ INFOS("Caught CORBA::Exception.");
+ }
+ catch(...) {
+ INFOS("Caught unknown exception.");
+ }
+
+ return 0;
+}
SALOME_PYQT Loader
ifeq (@WITHMPICH@,yes)
- SUBDIRS+= MPIContainer
+ SUBDIRS+= MPIContainer MPILifeCycleCORBA TestMPIContainer
endif
@MODULE@
for (int iter = 0; iter < 3 ; iter++)
{
INFOS("----------------------------------------------------" << iter);
- string dirn = getenv("SALOME_ROOT_DIR");
+ string dirn = getenv("KERNEL_ROOT_DIR");
dirn += "/lib/salome/libSalomeTestComponentEngine.so";
obj = iGenFact->load_impl("SalomeTestComponent",dirn.c_str());
m1 = Engines::TestComponent::_narrow(obj);
--- /dev/null
+#==============================================================================
+# File : Makefile.in
+# Created : mer jui 04 12:58:13 CEST 2003
+# Author : Bernard SECHER, CEA
+# Project : SALOME
+# Copyright : CEA 2003
+# $Header$
+#==============================================================================
+
+# source path
+top_srcdir=@top_srcdir@
+top_builddir=../..
+srcdir=@srcdir@
+VPATH=.:@srcdir@:@top_srcdir@/idl
+
+@COMMENCE@
+
+EXPORT_PYSCRIPTS =
+
+EXPORT_HEADERS = TestMPIComponentEngine.hxx
+
+# Libraries targets
+
+LIB = libSalomeTestMPIComponentEngine.la
+LIB_SRC = TestMPIComponentEngine.cxx
+LIB_CLIENT_IDL = Logger.idl SALOME_MPIObject.idl SALOME_TestMPIComponent.idl
+
+# Executables targets
+BIN = TestMPIContainer TestBidon
+BIN_SRC =
+BIN_CLIENT_IDL = Logger.idl SALOME_MPIObject.idl SALOME_MPIContainer.idl SALOME_TestMPIComponent.idl
+BIN_SERVER_IDL =
+
+LDFLAGS+= -lSalomeNotification -lSalomeNS -lSalomeMPILifeCycleCORBA -lSalomeLifeCycleCORBA -lSalomeMPIContainer -lSalomeContainer -lRegistry -lOpUtil -lSalomeLoggerServer
+
+
+@CONCLUDE@