1 // SALOME ParallelContainerNodeMpi : Launch mpi PaCO++ object nodes
3 // Copyright (C) 2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
4 // CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
6 // This library is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 2.1 of the License.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // Lesser General Public License for more details.
16 // You should have received a copy of the GNU Lesser General Public
17 // License along with this library; if not, write to the Free Software
18 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 // See http://www.opencascade.org/SALOME/ or email : webmaster.salome@opencascade.org
24 // File : SALOME_ParallelContainerNodeMpi.cxx
25 // Author : André Ribes, EDF
26 // Module : SALOME PARALLEL
38 #include "SALOME_ParallelContainer_i.hxx"
41 #include <paco_omni.h>
46 #include "SALOME_NamingService.hxx"
48 #include "utilities.h"
49 #include "Utils_ORB_INIT.hxx"
50 #include "Utils_SINGLETON.hxx"
51 #include "SALOMETraceCollector.hxx"
60 cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << endl;
61 cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << endl;
62 cerr << "SIGSEGV in :" << getpid() << endl;
63 cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << endl;
64 cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << endl;
69 int main(int argc, char* argv[])
71 INFOS("Launching a parallel Mpi container node");
74 signal(SIGSEGV, handler);
79 MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE ,&provided);
82 cerr << "Level MPI_THREAD_SINGLE : " << MPI_THREAD_SINGLE << endl;
83 cerr << "Level MPI_THREAD_SERIALIZED : " << MPI_THREAD_SERIALIZED << endl;
84 cerr << "Level MPI_THREAD_FUNNELED : " << MPI_THREAD_FUNNELED << endl;
85 cerr << "Level MPI_THREAD_MULTIPLE : " << MPI_THREAD_MULTIPLE << endl;
86 cerr << "Level provided : " << provided << endl;
88 // Initialise the ORB.
89 ORB_INIT &init = *SINGLETON_<ORB_INIT>::Instance();
90 ASSERT(SINGLETON_<ORB_INIT>::IsAlreadyExisting());
91 CORBA::ORB_var orb = init(0, 0);
92 //CORBA::ORB_var orb = CORBA::ORB_init(argc, argv);
94 // Code pour choisir le reseau infiniband .....
95 /* string hostname_temp = GetHostname();
96 hostent * t = gethostbyname(hostname_temp.c_str());
97 cerr << " AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA " << t->h_addr << " " << hostname_temp << endl;
98 cerr << t->h_addr << endl;
99 in_addr * address=(in_addr * ) t->h_addr;
100 cerr << inet_ntoa(* address) << endl;
101 string ip = inet_ntoa(* address);
102 cerr << " AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA " << endl;
103 string com = "giop:tcp:" + ip + ":";
104 const char* options[][2] = { { "endPoint", com.c_str() }, { 0, 0 } };
105 CORBA::ORB_var orb = CORBA::ORB_init(argc, argv, "omniORB4", options);
107 char * containerName = "";
109 containerName = argv[1];
112 char * hostname = "";
118 CORBA::Object_var obj = orb->resolve_initial_references("RootPOA");
119 PortableServer::POA_var root_poa = PortableServer::POA::_narrow(obj);
120 PortableServer::POAManager_var pman = root_poa->the_POAManager();
123 // add this container to the kill list
125 sprintf(aCommand, "addToKillList.py %d SALOME_ParallelContainerNodeMpi", getpid());
129 SALOME_NamingService * ns = new SALOME_NamingService(CORBA::ORB::_duplicate(orb));
130 // On récupère le proxy
131 string proxyNameInNS = ns->BuildContainerNameForNS(containerName, hostname);
132 obj = ns->Resolve(proxyNameInNS.c_str());
133 char * proxy_ior = orb->object_to_string(obj);
136 string name(containerName);
137 string node_name = name + "Node";
138 Engines_Parallel_Container_i * servant = new Engines_Parallel_Container_i(CORBA::ORB::_duplicate(orb), proxy_ior,
140 (char*) node_name.c_str(),
143 paco_fabrique_manager * pfm = paco_getFabriqueManager();
144 pfm->register_com("mpi", new paco_mpi_fabrique());
145 pfm->register_thread("omni", new paco_omni_fabrique());
148 PaCO_operation * global_ptr = servant->getContext("global_paco_context");
149 MPI_Comm group = MPI_COMM_WORLD;
150 global_ptr->setLibCom("mpi", &group);
151 global_ptr->setLibThread("omni");
154 PortableServer::ObjectId * _id = root_poa->activate_object(servant);
155 servant->set_id(_id);
156 obj = root_poa->id_to_reference(*_id);
158 // In the NamingService
159 string hostname = GetHostname();
162 MPI_Comm_rank(MPI_COMM_WORLD, &myid);
164 snprintf(buffer, 5, "%d", myid);
165 node_name = node_name + buffer;
166 string _containerName = ns->BuildContainerNameForNS((char*) node_name.c_str(),
168 cerr << "---------" << _containerName << "----------" << endl;
169 ns->Register(obj, _containerName.c_str());
173 catch(CORBA::SystemException&)
175 INFOS("Caught CORBA::SystemException.");
177 catch(PortableServer::POA::ServantAlreadyActive&)
179 INFOS("Caught CORBA::ServantAlreadyActiveException");
181 catch(CORBA::Exception&)
183 INFOS("Caught CORBA::Exception.");
185 catch(std::exception& exc)
187 INFOS("Caught std::exception - "<<exc.what());
191 INFOS("Caught unknown exception.");