// if no thread state defined
if ( !myTstate )
myTstate = PyThreadState_New(KERNEL_PYTHON::_interp);
- PyThreadState *myoldTstate = PyThreadState_Swap(myTstate);
+// PyThreadState *myoldTstate = PyThreadState_Swap(myTstate);
+ PyThreadState_Swap(myTstate);
#else
Py_ACQUIRE_NEW_THREAD;
#endif
_state.number_of_files = 0;
_state.files_ok = true;
_container = Engines::Container::_nil();
+ _default_source_Salome_file = Engines::Salome_file::_nil();
}
//=============================================================================
std::string dataset_group_name("DATASET");
dataset_group_name += file_name;
- hdf_group = new HDFgroup((char *) dataset_group_name.c_str(), hdf_file);
+ hdf_group = new HDFgroup(dataset_group_name.c_str(), hdf_file);
hdf_group->OpenOnDisk();
hdf_dataset = new HDFdataset("NAME",hdf_group);
std::string group_name("GROUP");
group_name += file_name;
- hdf_group = new HDFgroup((char *) group_name.c_str(),hdf_file);
+ hdf_group = new HDFgroup(group_name.c_str(),hdf_file);
hdf_group->OpenOnDisk();
hdf_dataset = new HDFdataset("FILE DATASET",hdf_group);
hdf_dataset->OpenOnDisk();
_fileManaged[file_name] = infos;
+ if(!CORBA::is_nil(_default_source_Salome_file))
+ {
+ _fileDistributedSource[file_name] =
+ Engines::Salome_file::_duplicate(_default_source_Salome_file);
+ }
+
// Update Salome_file state
_state.number_of_files++;
_state.files_ok = false;
void
Salome_file_i::connect(Engines::Salome_file_ptr source_Salome_file)
{
- // We can connect this Salome_file if there is only one file managed
- // by the Salome_file
- std::string fname;
- if (_fileManaged.size() == 1)
+ if(CORBA::is_nil(_default_source_Salome_file))
{
- // only one file managed
- _t_fileManaged::iterator it = _fileManaged.begin();
- fname = it->first;
- _fileDistributedSource[fname] = Engines::Salome_file::_duplicate(source_Salome_file);
+ _default_source_Salome_file = Engines::Salome_file::_duplicate(source_Salome_file);
+ _t_fileManaged::iterator begin = _fileManaged.begin();
+ _t_fileManaged::iterator end = _fileManaged.end();
+ for(;begin!=end;begin++) {
+ // Get the name of the file
+ std::string file_name = begin->first;
+ _t_fileDistributedSource::iterator it = _fileDistributedSource.find(file_name);
+ if (it == _fileDistributedSource.end())
+ {
+ _fileDistributedSource[file_name] = Engines::Salome_file::_duplicate(source_Salome_file);
+ }
+ }
}
- else
+ else
{
SALOME::ExceptionStruct es;
es.type = SALOME::INTERNAL_ERROR;
- std::string text = "cannot connect";
+ std::string text = "already connected to a default Salome_file";
es.text = CORBA::string_dup(text.c_str());
throw SALOME::SALOME_Exception(es);
}
+ // We can connect this Salome_file if there is only one file managed
+ // by the Salome_file
+ //std::string fname;
+ //if (_fileManaged.size() == 1)
+ //{
+ // only one file managed
+ // _t_fileManaged::iterator it = _fileManaged.begin();
+ // fname = it->first;
+ // _fileDistributedSource[fname] = Engines::Salome_file::_duplicate(source_Salome_file);
+ //}
+ //else
+ //{
+ // SALOME::ExceptionStruct es;
+ // es.type = SALOME::INTERNAL_ERROR;
+ // std::string text = "cannot connect";
+ // es.text = CORBA::string_dup(text.c_str());
+ // throw SALOME::SALOME_Exception(es);
+ //}
}
+
//=============================================================================
/*!
* CORBA method
_t_fileDistributedSource _fileDistributedSource;
Engines::SfState _state;
Engines::Container_ptr _container;
+ Engines::Salome_file_ptr _default_source_Salome_file;
};
#endif
//#include "utilities.h"
using namespace std;
-HDFcontainerObject::HDFcontainerObject(char *name)
+HDFcontainerObject::HDFcontainerObject(const char *name)
: HDFinternalObject(name)
{
_nsons = 0;
HDFinternalObject *_lastson;
int _nsons;
public :
- HDFcontainerObject(char *name);
+ HDFcontainerObject(const char *name);
virtual ~HDFcontainerObject();
virtual int nInternalObjects();
return 1;
}
-HDFdataset::HDFdataset(char *name, HDFcontainerObject *father,hdf_type type,
+HDFdataset::HDFdataset(const char *name, HDFcontainerObject *father,hdf_type type,
hdf_size dim[], int dimsize, hdf_byte_order order)
: HDFinternalObject(name)
{
}
-HDFdataset::HDFdataset(char *name,HDFcontainerObject *father)
+HDFdataset::HDFdataset(const char *name,HDFcontainerObject *father)
: HDFinternalObject(name)
{
_father = father;
char* _attribute;
public:
- HDFdataset(char *name, HDFcontainerObject *father,hdf_type type,
+ HDFdataset(const char *name, HDFcontainerObject *father,hdf_type type,
hdf_size dim[],int dimsize, hdf_byte_order order = H5T_ORDER_NONE);
- HDFdataset(char *name,HDFcontainerObject *father);
+ HDFdataset(const char *name,HDFcontainerObject *father);
virtual ~HDFdataset();
void CreateOnDisk();
return 1;
}
-HDFgroup::HDFgroup(char *name, HDFcontainerObject *father)
+HDFgroup::HDFgroup(const char *name, HDFcontainerObject *father)
: HDFcontainerObject(name)
{
_father = father;
hdf_idt _mid;
char* _attribute;
public :
- HDFgroup(char *name, HDFcontainerObject *father);
+ HDFgroup(const char *name, HDFcontainerObject *father);
void CreateOnDisk();
void OpenOnDisk();
using namespace std;
#endif
-HDFinternalObject::HDFinternalObject(char *name)
+HDFinternalObject::HDFinternalObject(const char *name)
: HDFobject(name)
{
_previousbrother = NULL;
HDFinternalObject *_previousbrother;
HDFinternalObject *_nextbrother;
public :
- HDFinternalObject(char *name);
+ HDFinternalObject(const char *name);
HDFinternalObject *GetPreviousBrother();
HDFinternalObject *GetNextBrother();
//#include "utilities.h"
using namespace std;
-HDFobject::HDFobject(char *name)
+HDFobject::HDFobject(const char *name)
{
// MESSAGE("-------- constructor " << name << " " << this);
HDFerrorModeLock();
char *_name;
hdf_idt _id;
public :
- HDFobject(char *name);
+ HDFobject(const char *name);
virtual ~HDFobject();
hdf_idt GetId();
// Test if the file is managed in an another node
// If yes, node is updated
- _t_fileManaged::iterator it = _fileManaged.begin();
- std::string file_name = it->first;
- if (_fileManaged[file_name].node > 0 && getMyRank() == 0) {
- if (parallel_file == NULL)
- parallel_file = Engines::PaCO_Parallel_Salome_file::PaCO_narrow(proxy, _orb);
- parallel_file->connect(source_Salome_file, _fileManaged[file_name].node);
+ _t_fileManaged::iterator begin = _fileManaged.begin();
+ _t_fileManaged::iterator end = _fileManaged.end();
+ for(;begin!=end;begin++) {
+ std::string file_name = begin->first;
+ if (_fileManaged[file_name].node > 0 && getMyRank() == 0) {
+ if (parallel_file == NULL)
+ parallel_file = Engines::PaCO_Parallel_Salome_file::PaCO_narrow(proxy, _orb);
+ parallel_file->connect(source_Salome_file, _fileManaged[file_name].node);
+ }
}
}
parallel_file = Engines::PaCO_Parallel_Salome_file::PaCO_narrow(proxy, _orb);
Engines::Container_ptr cont = parallel_file->updateFile(_fileManaged[fname], node);
+ parallel_file->connectDistributedFile(fname.c_str(),
+ _fileDistributedSource[fname],
+ node);
// Update file infos with the new reference of the container
_fileManaged[fname].container = Engines::Container::_duplicate(cont);
int SIGUSR11 = 1000;
#endif
+#include <paco_dummy.h>
using namespace std;
_Input_Service_file_map[service_name] = _map;
_t_Proxy_Salome_file_map * _proxy_map = new _t_Proxy_Salome_file_map();
_Proxy_Input_Service_file_map[service_name] = _proxy_map;
+ _t_IOR_Proxy_Salome_file_map * _IOR_proxy_map = new _t_IOR_Proxy_Salome_file_map();
+ _IOR_Proxy_Input_Service_file_map[service_name] = _IOR_proxy_map;
}
_t_Salome_file_map * _map = _Input_Service_file_map[service_name];
_t_Proxy_Salome_file_map * _proxy_map = _Proxy_Input_Service_file_map[service_name];
+ _t_IOR_Proxy_Salome_file_map * _IOR_proxy_map = _IOR_Proxy_Input_Service_file_map[service_name];
+ pthread_mutex_lock(deploy_mutex);
std::string proxy_ior;
// Try to find the Salome_file ...
// He has the same configuration than
// his component
- pthread_mutex_lock(deploy_mutex);
// Firstly, we have to create the proxy object
// of the Salome_file and transmit his
// reference to the other nodes.
Engines::Parallel_Salome_file_proxy_impl * proxy =
new Engines::Parallel_Salome_file_proxy_impl(CORBA::ORB::_duplicate(_orb));
PaCO_operation * proxy_global_ptr = proxy->getContext("global_paco_context");
+ // We initialize the object with the context of the Parallel component
+ PaCO_operation * compo_global_ptr = getContext("global_paco_context");
+ //compo_global_ptr->init_context(proxy_global_ptr);
+ proxy_global_ptr->init_context(compo_global_ptr);
+
+ paco_fabrique_manager* pfm = paco_getFabriqueManager();
+ pfm->register_com("dummy", new paco_dummy_fabrique());
+ proxy_global_ptr->setComFab(NULL);
+ proxy_global_ptr->setLibCom("dummy",NULL);
+
proxy_global_ptr->setTypeClient(true);
PaCO::PacoTopology_t client_topo;
client_topo.total = 1;
serveur_topo.total = getTotalNode();
proxy->setTopo(serveur_topo);
- // We initialize the object with the context of the Parallel component
- PaCO_operation * compo_global_ptr = getContext("global_paco_context");
- //compo_global_ptr->init_context(proxy_global_ptr);
- proxy_global_ptr->init_context(compo_global_ptr);
// We register the CORBA objet into the POA
CORBA::Object_ptr proxy_ref = proxy->_this();
}
proxy_ior = this->get_parallel_proxy_object();
+ (*_IOR_proxy_map)[Salome_file_name] = proxy_ior;
// We register each node of the parallel Salome_file object
// into the proxy.
// Parallel_Salome_file is created and deployed
delete _proxy;
_proxy = NULL;
- pthread_mutex_unlock(deploy_mutex);
}
-
- return (*_proxy_map)[Salome_file_name]->_this();
+ pthread_mutex_unlock(deploy_mutex);
+ proxy_ior = (*_IOR_proxy_map)[Salome_file_name];
+ CORBA::Object_ptr proxy_ref = _orb->string_to_object(proxy_ior.c_str());
+ return Engines::Salome_file::_narrow(proxy_ref);
}
Engines::Salome_file_ptr
_Output_Service_file_map[service_name] = _map;
_t_Proxy_Salome_file_map * _proxy_map = new _t_Proxy_Salome_file_map();
_Proxy_Output_Service_file_map[service_name] = _proxy_map;
+ _t_IOR_Proxy_Salome_file_map * _IOR_proxy_map = new _t_IOR_Proxy_Salome_file_map();
+ _IOR_Proxy_Output_Service_file_map[service_name] = _IOR_proxy_map;
}
_t_Salome_file_map * _map = _Output_Service_file_map[service_name];
_t_Proxy_Salome_file_map * _proxy_map = _Proxy_Output_Service_file_map[service_name];
+ _t_IOR_Proxy_Salome_file_map * _IOR_proxy_map = _IOR_Proxy_Output_Service_file_map[service_name];
+ pthread_mutex_lock(deploy_mutex);
std::string proxy_ior;
// Try to find the Salome_file ...
// He has the same configuration than
// his component
- pthread_mutex_lock(deploy_mutex);
// Firstly, we have to create the proxy object
// of the Salome_file and transmit his
// reference to the other nodes.
Engines::Parallel_Salome_file_proxy_impl * proxy =
new Engines::Parallel_Salome_file_proxy_impl(CORBA::ORB::_duplicate(_orb));
PaCO_operation * proxy_global_ptr = proxy->getContext("global_paco_context");
+ // We initialize the object with the context of the Parallel component
+ PaCO_operation * compo_global_ptr = getContext("global_paco_context");
+ //compo_global_ptr->init_context(proxy_global_ptr);
+ proxy_global_ptr->init_context(compo_global_ptr);
+
+ paco_fabrique_manager* pfm = paco_getFabriqueManager();
+ pfm->register_com("dummy", new paco_dummy_fabrique());
+ proxy_global_ptr->setComFab(NULL);
+ proxy_global_ptr->setLibCom("dummy",NULL);
+
proxy_global_ptr->setTypeClient(true);
PaCO::PacoTopology_t client_topo;
client_topo.total = 1;
serveur_topo.total = getTotalNode();
proxy->setTopo(serveur_topo);
- // We initialize the object with the context of the Parallel component
- PaCO_operation * compo_global_ptr = getContext("global_paco_context");
- //compo_global_ptr->init_context(proxy_global_ptr);
- proxy_global_ptr->init_context(compo_global_ptr);
// We register the CORBA objet into the POA
CORBA::Object_ptr proxy_ref = proxy->_this();
}
proxy_ior = this->get_parallel_proxy_object();
+ (*_IOR_proxy_map)[Salome_file_name] = proxy_ior;
// We register each node of the parallel Salome_file object
// into the proxy.
// Parallel_Salome_file is created and deployed
delete _proxy;
_proxy = NULL;
- pthread_mutex_unlock(deploy_mutex);
}
-
- return (*_proxy_map)[Salome_file_name]->_this();
+ pthread_mutex_unlock(deploy_mutex);
+ proxy_ior = (*_IOR_proxy_map)[Salome_file_name];
+ CORBA::Object_ptr proxy_ref = _orb->string_to_object(proxy_ior.c_str());
+ return Engines::Salome_file::_narrow(proxy_ref);
}
Engines::Salome_file_ptr
// Map Salome_file_name to Parallel_Salome_file*
typedef std::map<std::string, Parallel_Salome_file_i*> _t_Salome_file_map;
typedef std::map<std::string, Engines::Parallel_Salome_file_proxy_impl*> _t_Proxy_Salome_file_map;
+ typedef std::map<std::string, std::string> _t_IOR_Proxy_Salome_file_map;
// Map Service_name to _Salome_file_map
typedef std::map<std::string, Engines_Parallel_Component_i::_t_Salome_file_map*> _t_Service_file_map;
typedef std::map<std::string, Engines_Parallel_Component_i::_t_Proxy_Salome_file_map*> _t_Proxy_Service_file_map;
+ typedef std::map<std::string, Engines_Parallel_Component_i::_t_IOR_Proxy_Salome_file_map*> _t_IOR_Proxy_Service_file_map;
_t_Service_file_map _Input_Service_file_map;
_t_Service_file_map _Output_Service_file_map;
_t_Proxy_Service_file_map::iterator _Proxy_Service_file_map_it;
_t_Proxy_Salome_file_map::iterator _Proxy_Salome_file_map_it;
+ _t_IOR_Proxy_Service_file_map _IOR_Proxy_Input_Service_file_map;
+ _t_IOR_Proxy_Service_file_map _IOR_Proxy_Output_Service_file_map;
+ _t_IOR_Proxy_Service_file_map::iterator _IOR_Proxy_Service_file_map_it;
+ _t_IOR_Proxy_Salome_file_map::iterator _IOR_Proxy_Salome_file_map_it;
+
std::string _serviceName ;
std::string _graphName ;
std::string _nodeName ;
}
#endif
+typedef void (*sighandler_t)(int);
+sighandler_t setsig(int sig, sighandler_t handler)
+{
+ struct sigaction context, ocontext;
+ context.sa_handler = handler;
+ sigemptyset(&context.sa_mask);
+ context.sa_flags = 0;
+ if (sigaction(sig, &context, &ocontext) == -1)
+ return SIG_ERR;
+ return ocontext.sa_handler;
+}
+
+void AttachDebugger()
+{
+ if(getenv ("DEBUGGER"))
+ {
+ std::stringstream exec;
+ exec << "$DEBUGGER SALOME_ParallelContainerNodeMpi " << getpid() << "&";
+ std::cerr << exec.str() << std::endl;
+ system(exec.str().c_str());
+ while(1);
+ }
+}
+
+void Handler(int theSigId)
+{
+ std::cerr << "SIGSEGV: " << std::endl;
+ AttachDebugger();
+ //to exit or not to exit
+ exit(1);
+}
+
+void terminateHandler(void)
+{
+ std::cerr << "Terminate: not managed exception !" << std::endl;
+ AttachDebugger();
+}
+
+void unexpectedHandler(void)
+{
+ std::cerr << "Unexpected: unexpected exception !" << std::endl;
+ AttachDebugger();
+}
+
int main(int argc, char* argv[])
{
INFOS("Launching a parallel Mpi container node");
#ifdef _DEBUG_
- struct sigaction action;
- action.sa_handler = &test;
- sigaction(SIGSEGV, &action, NULL);
+// struct sigaction action;
+// action.sa_handler = &test;
+// sigaction(SIGSEGV, &action, NULL);
#endif
// MPI Init
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE ,&provided);
+ if(getenv ("DEBUGGER"))
+ {
+ std::cerr << "Unexpected: unexpected exception !" << std::endl;
+ setsig(SIGSEGV,&Handler);
+ set_terminate(&terminateHandler);
+ set_unexpected(&unexpectedHandler);
+ }
#ifdef _DEBUG_
cerr << "Level MPI_THREAD_SINGLE : " << MPI_THREAD_SINGLE << endl;
cerr << "Level MPI_THREAD_SERIALIZED : " << MPI_THREAD_SERIALIZED << endl;