From 620a780eff40f0872090599367145a05c82365d8 Mon Sep 17 00:00:00 2001 From: ribes Date: Thu, 27 Sep 2007 14:49:01 +0000 Subject: [PATCH] - Parallel Salome_file are now ok with parallel applications - const char * into HDFPersist constructor of classes - --warning into Container --- src/Container/Container_i.cxx | 3 +- src/Container/Salome_file_i.cxx | 55 ++++++++++++++---- src/Container/Salome_file_i.hxx | 1 + src/HDFPersist/HDFcontainerObject.cc | 2 +- src/HDFPersist/HDFcontainerObject.hxx | 2 +- src/HDFPersist/HDFdataset.cc | 4 +- src/HDFPersist/HDFdataset.hxx | 4 +- src/HDFPersist/HDFgroup.cc | 2 +- src/HDFPersist/HDFgroup.hxx | 2 +- src/HDFPersist/HDFinternalObject.cc | 2 +- src/HDFPersist/HDFinternalObject.hxx | 2 +- src/HDFPersist/HDFobject.cc | 2 +- src/HDFPersist/HDFobject.hxx | 2 +- .../Parallel_Salome_file_i.cxx | 18 ++++-- .../SALOME_ParallelComponent_i.cxx | 55 ++++++++++++------ .../SALOME_ParallelComponent_i.hxx | 7 +++ .../SALOME_ParallelContainerNodeMpi.cxx | 57 ++++++++++++++++++- 17 files changed, 170 insertions(+), 50 deletions(-) diff --git a/src/Container/Container_i.cxx b/src/Container/Container_i.cxx index e5270c674..2a7fe8bc6 100644 --- a/src/Container/Container_i.cxx +++ b/src/Container/Container_i.cxx @@ -189,7 +189,8 @@ Engines_Container_i::Engines_Container_i (CORBA::ORB_ptr orb, // if no thread state defined if ( !myTstate ) myTstate = PyThreadState_New(KERNEL_PYTHON::_interp); - PyThreadState *myoldTstate = PyThreadState_Swap(myTstate); +// PyThreadState *myoldTstate = PyThreadState_Swap(myTstate); + PyThreadState_Swap(myTstate); #else Py_ACQUIRE_NEW_THREAD; #endif diff --git a/src/Container/Salome_file_i.cxx b/src/Container/Salome_file_i.cxx index 67de70206..8f10ad2cf 100644 --- a/src/Container/Salome_file_i.cxx +++ b/src/Container/Salome_file_i.cxx @@ -45,6 +45,7 @@ Salome_file_i::Salome_file_i() _state.number_of_files = 0; _state.files_ok = true; _container = Engines::Container::_nil(); + _default_source_Salome_file = Engines::Salome_file::_nil(); } //============================================================================= @@ -108,7 +109,7 @@ Salome_file_i::load(const char* hdf5_file) { std::string dataset_group_name("DATASET"); dataset_group_name += file_name; - hdf_group = new HDFgroup((char *) dataset_group_name.c_str(), hdf_file); + hdf_group = new HDFgroup(dataset_group_name.c_str(), hdf_file); hdf_group->OpenOnDisk(); hdf_dataset = new HDFdataset("NAME",hdf_group); @@ -160,7 +161,7 @@ Salome_file_i::load(const char* hdf5_file) { std::string group_name("GROUP"); group_name += file_name; - hdf_group = new HDFgroup((char *) group_name.c_str(),hdf_file); + hdf_group = new HDFgroup(group_name.c_str(),hdf_file); hdf_group->OpenOnDisk(); hdf_dataset = new HDFdataset("FILE DATASET",hdf_group); hdf_dataset->OpenOnDisk(); @@ -584,6 +585,12 @@ Salome_file_i::setDistributedFile(const char* comp_file_name) _fileManaged[file_name] = infos; + if(!CORBA::is_nil(_default_source_Salome_file)) + { + _fileDistributedSource[file_name] = + Engines::Salome_file::_duplicate(_default_source_Salome_file); + } + // Update Salome_file state _state.number_of_files++; _state.files_ok = false; @@ -598,25 +605,49 @@ Salome_file_i::setDistributedFile(const char* comp_file_name) void Salome_file_i::connect(Engines::Salome_file_ptr source_Salome_file) { - // We can connect this Salome_file if there is only one file managed - // by the Salome_file - std::string fname; - if (_fileManaged.size() == 1) + if(CORBA::is_nil(_default_source_Salome_file)) { - // only one file managed - _t_fileManaged::iterator it = _fileManaged.begin(); - fname = it->first; - _fileDistributedSource[fname] = Engines::Salome_file::_duplicate(source_Salome_file); + _default_source_Salome_file = Engines::Salome_file::_duplicate(source_Salome_file); + _t_fileManaged::iterator begin = _fileManaged.begin(); + _t_fileManaged::iterator end = _fileManaged.end(); + for(;begin!=end;begin++) { + // Get the name of the file + std::string file_name = begin->first; + _t_fileDistributedSource::iterator it = _fileDistributedSource.find(file_name); + if (it == _fileDistributedSource.end()) + { + _fileDistributedSource[file_name] = Engines::Salome_file::_duplicate(source_Salome_file); + } + } } - else + else { SALOME::ExceptionStruct es; es.type = SALOME::INTERNAL_ERROR; - std::string text = "cannot connect"; + std::string text = "already connected to a default Salome_file"; es.text = CORBA::string_dup(text.c_str()); throw SALOME::SALOME_Exception(es); } + // We can connect this Salome_file if there is only one file managed + // by the Salome_file + //std::string fname; + //if (_fileManaged.size() == 1) + //{ + // only one file managed + // _t_fileManaged::iterator it = _fileManaged.begin(); + // fname = it->first; + // _fileDistributedSource[fname] = Engines::Salome_file::_duplicate(source_Salome_file); + //} + //else + //{ + // SALOME::ExceptionStruct es; + // es.type = SALOME::INTERNAL_ERROR; + // std::string text = "cannot connect"; + // es.text = CORBA::string_dup(text.c_str()); + // throw SALOME::SALOME_Exception(es); + //} } + //============================================================================= /*! * CORBA method diff --git a/src/Container/Salome_file_i.hxx b/src/Container/Salome_file_i.hxx index 565cd7f68..16b09034e 100644 --- a/src/Container/Salome_file_i.hxx +++ b/src/Container/Salome_file_i.hxx @@ -100,6 +100,7 @@ class CONTAINER_EXPORT Salome_file_i: _t_fileDistributedSource _fileDistributedSource; Engines::SfState _state; Engines::Container_ptr _container; + Engines::Salome_file_ptr _default_source_Salome_file; }; #endif diff --git a/src/HDFPersist/HDFcontainerObject.cc b/src/HDFPersist/HDFcontainerObject.cc index 04324b3b7..6f8d1c353 100644 --- a/src/HDFPersist/HDFcontainerObject.cc +++ b/src/HDFPersist/HDFcontainerObject.cc @@ -33,7 +33,7 @@ extern "C" //#include "utilities.h" using namespace std; -HDFcontainerObject::HDFcontainerObject(char *name) +HDFcontainerObject::HDFcontainerObject(const char *name) : HDFinternalObject(name) { _nsons = 0; diff --git a/src/HDFPersist/HDFcontainerObject.hxx b/src/HDFPersist/HDFcontainerObject.hxx index ea8881e32..dfc237b26 100644 --- a/src/HDFPersist/HDFcontainerObject.hxx +++ b/src/HDFPersist/HDFcontainerObject.hxx @@ -42,7 +42,7 @@ private : HDFinternalObject *_lastson; int _nsons; public : - HDFcontainerObject(char *name); + HDFcontainerObject(const char *name); virtual ~HDFcontainerObject(); virtual int nInternalObjects(); diff --git a/src/HDFPersist/HDFdataset.cc b/src/HDFPersist/HDFdataset.cc index de5d07afa..4a0b7b160 100644 --- a/src/HDFPersist/HDFdataset.cc +++ b/src/HDFPersist/HDFdataset.cc @@ -43,7 +43,7 @@ herr_t dataset_attr(hid_t loc_id, const char *attr_name, void *operator_data) return 1; } -HDFdataset::HDFdataset(char *name, HDFcontainerObject *father,hdf_type type, +HDFdataset::HDFdataset(const char *name, HDFcontainerObject *father,hdf_type type, hdf_size dim[], int dimsize, hdf_byte_order order) : HDFinternalObject(name) { @@ -66,7 +66,7 @@ HDFdataset::HDFdataset(char *name, HDFcontainerObject *father,hdf_type type, } -HDFdataset::HDFdataset(char *name,HDFcontainerObject *father) +HDFdataset::HDFdataset(const char *name,HDFcontainerObject *father) : HDFinternalObject(name) { _father = father; diff --git a/src/HDFPersist/HDFdataset.hxx b/src/HDFPersist/HDFdataset.hxx index 8375497ea..1e2a62401 100644 --- a/src/HDFPersist/HDFdataset.hxx +++ b/src/HDFPersist/HDFdataset.hxx @@ -48,10 +48,10 @@ private : char* _attribute; public: - HDFdataset(char *name, HDFcontainerObject *father,hdf_type type, + HDFdataset(const char *name, HDFcontainerObject *father,hdf_type type, hdf_size dim[],int dimsize, hdf_byte_order order = H5T_ORDER_NONE); - HDFdataset(char *name,HDFcontainerObject *father); + HDFdataset(const char *name,HDFcontainerObject *father); virtual ~HDFdataset(); void CreateOnDisk(); diff --git a/src/HDFPersist/HDFgroup.cc b/src/HDFPersist/HDFgroup.cc index 467525320..2b0f2fb19 100644 --- a/src/HDFPersist/HDFgroup.cc +++ b/src/HDFPersist/HDFgroup.cc @@ -40,7 +40,7 @@ herr_t group_attr(hid_t loc_id, const char *attr_name, void *operator_data) return 1; } -HDFgroup::HDFgroup(char *name, HDFcontainerObject *father) +HDFgroup::HDFgroup(const char *name, HDFcontainerObject *father) : HDFcontainerObject(name) { _father = father; diff --git a/src/HDFPersist/HDFgroup.hxx b/src/HDFPersist/HDFgroup.hxx index 01c969d92..8c0d02b88 100644 --- a/src/HDFPersist/HDFgroup.hxx +++ b/src/HDFPersist/HDFgroup.hxx @@ -42,7 +42,7 @@ private : hdf_idt _mid; char* _attribute; public : - HDFgroup(char *name, HDFcontainerObject *father); + HDFgroup(const char *name, HDFcontainerObject *father); void CreateOnDisk(); void OpenOnDisk(); diff --git a/src/HDFPersist/HDFinternalObject.cc b/src/HDFPersist/HDFinternalObject.cc index a3cdc1f7f..716e6da49 100644 --- a/src/HDFPersist/HDFinternalObject.cc +++ b/src/HDFPersist/HDFinternalObject.cc @@ -33,7 +33,7 @@ extern "C" using namespace std; #endif -HDFinternalObject::HDFinternalObject(char *name) +HDFinternalObject::HDFinternalObject(const char *name) : HDFobject(name) { _previousbrother = NULL; diff --git a/src/HDFPersist/HDFinternalObject.hxx b/src/HDFPersist/HDFinternalObject.hxx index 06921850d..ee4c32e51 100644 --- a/src/HDFPersist/HDFinternalObject.hxx +++ b/src/HDFPersist/HDFinternalObject.hxx @@ -40,7 +40,7 @@ private : HDFinternalObject *_previousbrother; HDFinternalObject *_nextbrother; public : - HDFinternalObject(char *name); + HDFinternalObject(const char *name); HDFinternalObject *GetPreviousBrother(); HDFinternalObject *GetNextBrother(); diff --git a/src/HDFPersist/HDFobject.cc b/src/HDFPersist/HDFobject.cc index 1a032278a..5adb8aafe 100644 --- a/src/HDFPersist/HDFobject.cc +++ b/src/HDFPersist/HDFobject.cc @@ -33,7 +33,7 @@ extern "C" //#include "utilities.h" using namespace std; -HDFobject::HDFobject(char *name) +HDFobject::HDFobject(const char *name) { // MESSAGE("-------- constructor " << name << " " << this); HDFerrorModeLock(); diff --git a/src/HDFPersist/HDFobject.hxx b/src/HDFPersist/HDFobject.hxx index e1396985a..3c829822a 100644 --- a/src/HDFPersist/HDFobject.hxx +++ b/src/HDFPersist/HDFobject.hxx @@ -38,7 +38,7 @@ protected : char *_name; hdf_idt _id; public : - HDFobject(char *name); + HDFobject(const char *name); virtual ~HDFobject(); hdf_idt GetId(); diff --git a/src/ParallelContainer/Parallel_Salome_file_i.cxx b/src/ParallelContainer/Parallel_Salome_file_i.cxx index 19cecbb25..ac06b5005 100644 --- a/src/ParallelContainer/Parallel_Salome_file_i.cxx +++ b/src/ParallelContainer/Parallel_Salome_file_i.cxx @@ -73,12 +73,15 @@ Parallel_Salome_file_i::connect(Engines::Salome_file_ptr source_Salome_file) { // Test if the file is managed in an another node // If yes, node is updated - _t_fileManaged::iterator it = _fileManaged.begin(); - std::string file_name = it->first; - if (_fileManaged[file_name].node > 0 && getMyRank() == 0) { - if (parallel_file == NULL) - parallel_file = Engines::PaCO_Parallel_Salome_file::PaCO_narrow(proxy, _orb); - parallel_file->connect(source_Salome_file, _fileManaged[file_name].node); + _t_fileManaged::iterator begin = _fileManaged.begin(); + _t_fileManaged::iterator end = _fileManaged.end(); + for(;begin!=end;begin++) { + std::string file_name = begin->first; + if (_fileManaged[file_name].node > 0 && getMyRank() == 0) { + if (parallel_file == NULL) + parallel_file = Engines::PaCO_Parallel_Salome_file::PaCO_narrow(proxy, _orb); + parallel_file->connect(source_Salome_file, _fileManaged[file_name].node); + } } } @@ -296,6 +299,9 @@ Parallel_Salome_file_i::setFileNode(const char* file_name, CORBA::Long node) { parallel_file = Engines::PaCO_Parallel_Salome_file::PaCO_narrow(proxy, _orb); Engines::Container_ptr cont = parallel_file->updateFile(_fileManaged[fname], node); + parallel_file->connectDistributedFile(fname.c_str(), + _fileDistributedSource[fname], + node); // Update file infos with the new reference of the container _fileManaged[fname].container = Engines::Container::_duplicate(cont); diff --git a/src/ParallelContainer/SALOME_ParallelComponent_i.cxx b/src/ParallelContainer/SALOME_ParallelComponent_i.cxx index 870c4dd92..db60ae5e4 100644 --- a/src/ParallelContainer/SALOME_ParallelComponent_i.cxx +++ b/src/ParallelContainer/SALOME_ParallelComponent_i.cxx @@ -43,6 +43,7 @@ int SIGUSR11 = 1000; #endif +#include using namespace std; @@ -796,10 +797,14 @@ Engines_Parallel_Component_i::setInputFileToService(const char* service_name, _Input_Service_file_map[service_name] = _map; _t_Proxy_Salome_file_map * _proxy_map = new _t_Proxy_Salome_file_map(); _Proxy_Input_Service_file_map[service_name] = _proxy_map; + _t_IOR_Proxy_Salome_file_map * _IOR_proxy_map = new _t_IOR_Proxy_Salome_file_map(); + _IOR_Proxy_Input_Service_file_map[service_name] = _IOR_proxy_map; } _t_Salome_file_map * _map = _Input_Service_file_map[service_name]; _t_Proxy_Salome_file_map * _proxy_map = _Proxy_Input_Service_file_map[service_name]; + _t_IOR_Proxy_Salome_file_map * _IOR_proxy_map = _IOR_Proxy_Input_Service_file_map[service_name]; + pthread_mutex_lock(deploy_mutex); std::string proxy_ior; // Try to find the Salome_file ... @@ -810,7 +815,6 @@ Engines_Parallel_Component_i::setInputFileToService(const char* service_name, // He has the same configuration than // his component - pthread_mutex_lock(deploy_mutex); // Firstly, we have to create the proxy object // of the Salome_file and transmit his // reference to the other nodes. @@ -818,6 +822,16 @@ Engines_Parallel_Component_i::setInputFileToService(const char* service_name, Engines::Parallel_Salome_file_proxy_impl * proxy = new Engines::Parallel_Salome_file_proxy_impl(CORBA::ORB::_duplicate(_orb)); PaCO_operation * proxy_global_ptr = proxy->getContext("global_paco_context"); + // We initialize the object with the context of the Parallel component + PaCO_operation * compo_global_ptr = getContext("global_paco_context"); + //compo_global_ptr->init_context(proxy_global_ptr); + proxy_global_ptr->init_context(compo_global_ptr); + + paco_fabrique_manager* pfm = paco_getFabriqueManager(); + pfm->register_com("dummy", new paco_dummy_fabrique()); + proxy_global_ptr->setComFab(NULL); + proxy_global_ptr->setLibCom("dummy",NULL); + proxy_global_ptr->setTypeClient(true); PaCO::PacoTopology_t client_topo; client_topo.total = 1; @@ -826,10 +840,6 @@ Engines_Parallel_Component_i::setInputFileToService(const char* service_name, serveur_topo.total = getTotalNode(); proxy->setTopo(serveur_topo); - // We initialize the object with the context of the Parallel component - PaCO_operation * compo_global_ptr = getContext("global_paco_context"); - //compo_global_ptr->init_context(proxy_global_ptr); - proxy_global_ptr->init_context(compo_global_ptr); // We register the CORBA objet into the POA CORBA::Object_ptr proxy_ref = proxy->_this(); @@ -846,6 +856,7 @@ Engines_Parallel_Component_i::setInputFileToService(const char* service_name, } proxy_ior = this->get_parallel_proxy_object(); + (*_IOR_proxy_map)[Salome_file_name] = proxy_ior; // We register each node of the parallel Salome_file object // into the proxy. @@ -877,10 +888,11 @@ Engines_Parallel_Component_i::setInputFileToService(const char* service_name, // Parallel_Salome_file is created and deployed delete _proxy; _proxy = NULL; - pthread_mutex_unlock(deploy_mutex); } - - return (*_proxy_map)[Salome_file_name]->_this(); + pthread_mutex_unlock(deploy_mutex); + proxy_ior = (*_IOR_proxy_map)[Salome_file_name]; + CORBA::Object_ptr proxy_ref = _orb->string_to_object(proxy_ior.c_str()); + return Engines::Salome_file::_narrow(proxy_ref); } Engines::Salome_file_ptr @@ -894,10 +906,14 @@ Engines_Parallel_Component_i::setOutputFileToService(const char* service_name, _Output_Service_file_map[service_name] = _map; _t_Proxy_Salome_file_map * _proxy_map = new _t_Proxy_Salome_file_map(); _Proxy_Output_Service_file_map[service_name] = _proxy_map; + _t_IOR_Proxy_Salome_file_map * _IOR_proxy_map = new _t_IOR_Proxy_Salome_file_map(); + _IOR_Proxy_Output_Service_file_map[service_name] = _IOR_proxy_map; } _t_Salome_file_map * _map = _Output_Service_file_map[service_name]; _t_Proxy_Salome_file_map * _proxy_map = _Proxy_Output_Service_file_map[service_name]; + _t_IOR_Proxy_Salome_file_map * _IOR_proxy_map = _IOR_Proxy_Output_Service_file_map[service_name]; + pthread_mutex_lock(deploy_mutex); std::string proxy_ior; // Try to find the Salome_file ... @@ -908,7 +924,6 @@ Engines_Parallel_Component_i::setOutputFileToService(const char* service_name, // He has the same configuration than // his component - pthread_mutex_lock(deploy_mutex); // Firstly, we have to create the proxy object // of the Salome_file and transmit his // reference to the other nodes. @@ -916,6 +931,16 @@ Engines_Parallel_Component_i::setOutputFileToService(const char* service_name, Engines::Parallel_Salome_file_proxy_impl * proxy = new Engines::Parallel_Salome_file_proxy_impl(CORBA::ORB::_duplicate(_orb)); PaCO_operation * proxy_global_ptr = proxy->getContext("global_paco_context"); + // We initialize the object with the context of the Parallel component + PaCO_operation * compo_global_ptr = getContext("global_paco_context"); + //compo_global_ptr->init_context(proxy_global_ptr); + proxy_global_ptr->init_context(compo_global_ptr); + + paco_fabrique_manager* pfm = paco_getFabriqueManager(); + pfm->register_com("dummy", new paco_dummy_fabrique()); + proxy_global_ptr->setComFab(NULL); + proxy_global_ptr->setLibCom("dummy",NULL); + proxy_global_ptr->setTypeClient(true); PaCO::PacoTopology_t client_topo; client_topo.total = 1; @@ -924,10 +949,6 @@ Engines_Parallel_Component_i::setOutputFileToService(const char* service_name, serveur_topo.total = getTotalNode(); proxy->setTopo(serveur_topo); - // We initialize the object with the context of the Parallel component - PaCO_operation * compo_global_ptr = getContext("global_paco_context"); - //compo_global_ptr->init_context(proxy_global_ptr); - proxy_global_ptr->init_context(compo_global_ptr); // We register the CORBA objet into the POA CORBA::Object_ptr proxy_ref = proxy->_this(); @@ -944,6 +965,7 @@ Engines_Parallel_Component_i::setOutputFileToService(const char* service_name, } proxy_ior = this->get_parallel_proxy_object(); + (*_IOR_proxy_map)[Salome_file_name] = proxy_ior; // We register each node of the parallel Salome_file object // into the proxy. @@ -975,10 +997,11 @@ Engines_Parallel_Component_i::setOutputFileToService(const char* service_name, // Parallel_Salome_file is created and deployed delete _proxy; _proxy = NULL; - pthread_mutex_unlock(deploy_mutex); } - - return (*_proxy_map)[Salome_file_name]->_this(); + pthread_mutex_unlock(deploy_mutex); + proxy_ior = (*_IOR_proxy_map)[Salome_file_name]; + CORBA::Object_ptr proxy_ref = _orb->string_to_object(proxy_ior.c_str()); + return Engines::Salome_file::_narrow(proxy_ref); } Engines::Salome_file_ptr diff --git a/src/ParallelContainer/SALOME_ParallelComponent_i.hxx b/src/ParallelContainer/SALOME_ParallelComponent_i.hxx index afebfc896..edadf93ce 100644 --- a/src/ParallelContainer/SALOME_ParallelComponent_i.hxx +++ b/src/ParallelContainer/SALOME_ParallelComponent_i.hxx @@ -161,10 +161,12 @@ protected: // Map Salome_file_name to Parallel_Salome_file* typedef std::map _t_Salome_file_map; typedef std::map _t_Proxy_Salome_file_map; + typedef std::map _t_IOR_Proxy_Salome_file_map; // Map Service_name to _Salome_file_map typedef std::map _t_Service_file_map; typedef std::map _t_Proxy_Service_file_map; + typedef std::map _t_IOR_Proxy_Service_file_map; _t_Service_file_map _Input_Service_file_map; _t_Service_file_map _Output_Service_file_map; @@ -176,6 +178,11 @@ protected: _t_Proxy_Service_file_map::iterator _Proxy_Service_file_map_it; _t_Proxy_Salome_file_map::iterator _Proxy_Salome_file_map_it; + _t_IOR_Proxy_Service_file_map _IOR_Proxy_Input_Service_file_map; + _t_IOR_Proxy_Service_file_map _IOR_Proxy_Output_Service_file_map; + _t_IOR_Proxy_Service_file_map::iterator _IOR_Proxy_Service_file_map_it; + _t_IOR_Proxy_Salome_file_map::iterator _IOR_Proxy_Salome_file_map_it; + std::string _serviceName ; std::string _graphName ; std::string _nodeName ; diff --git a/src/ParallelContainer/SALOME_ParallelContainerNodeMpi.cxx b/src/ParallelContainer/SALOME_ParallelContainerNodeMpi.cxx index c046a2c9f..4852bcafb 100644 --- a/src/ParallelContainer/SALOME_ParallelContainerNodeMpi.cxx +++ b/src/ParallelContainer/SALOME_ParallelContainerNodeMpi.cxx @@ -75,20 +75,71 @@ void handler(int t) { } #endif +typedef void (*sighandler_t)(int); +sighandler_t setsig(int sig, sighandler_t handler) +{ + struct sigaction context, ocontext; + context.sa_handler = handler; + sigemptyset(&context.sa_mask); + context.sa_flags = 0; + if (sigaction(sig, &context, &ocontext) == -1) + return SIG_ERR; + return ocontext.sa_handler; +} + +void AttachDebugger() +{ + if(getenv ("DEBUGGER")) + { + std::stringstream exec; + exec << "$DEBUGGER SALOME_ParallelContainerNodeMpi " << getpid() << "&"; + std::cerr << exec.str() << std::endl; + system(exec.str().c_str()); + while(1); + } +} + +void Handler(int theSigId) +{ + std::cerr << "SIGSEGV: " << std::endl; + AttachDebugger(); + //to exit or not to exit + exit(1); +} + +void terminateHandler(void) +{ + std::cerr << "Terminate: not managed exception !" << std::endl; + AttachDebugger(); +} + +void unexpectedHandler(void) +{ + std::cerr << "Unexpected: unexpected exception !" << std::endl; + AttachDebugger(); +} + int main(int argc, char* argv[]) { INFOS("Launching a parallel Mpi container node"); #ifdef _DEBUG_ - struct sigaction action; - action.sa_handler = &test; - sigaction(SIGSEGV, &action, NULL); +// struct sigaction action; +// action.sa_handler = &test; +// sigaction(SIGSEGV, &action, NULL); #endif // MPI Init int provided; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE ,&provided); + if(getenv ("DEBUGGER")) + { + std::cerr << "Unexpected: unexpected exception !" << std::endl; + setsig(SIGSEGV,&Handler); + set_terminate(&terminateHandler); + set_unexpected(&unexpectedHandler); + } #ifdef _DEBUG_ cerr << "Level MPI_THREAD_SINGLE : " << MPI_THREAD_SINGLE << endl; cerr << "Level MPI_THREAD_SERIALIZED : " << MPI_THREAD_SERIALIZED << endl; -- 2.39.2