From be238b4bb0723b0dd34dd72f410dc58df1e43414 Mon Sep 17 00:00:00 2001 From: Yoann Audouin Date: Thu, 9 Mar 2023 16:32:55 +0100 Subject: [PATCH] Adding Multinode method for smesh parallelism (with windows fixed) Adding walltime for multinode + keeping temporary folder activated with environement variable bos #37471: fix compilation on Windows operating system. Note that SMESH_Gen::send_mesh current implementation is vetoed on windows OS (system call) --- doc/examples/creating_parallel_mesh.py | 191 +++++---------- doc/gui/input/parallel_compute.rst | 52 +++- idl/SMESH_Gen.idl | 2 +- idl/SMESH_Mesh.idl | 41 +++- src/SMESH/CMakeLists.txt | 2 + src/SMESH/SMESH_Gen.cxx | 112 +++++++-- src/SMESH/SMESH_Gen.hxx | 5 +- src/SMESH/SMESH_Mesh.hxx | 23 +- src/SMESH/SMESH_ParallelMesh.cxx | 88 +++++-- src/SMESH/SMESH_ParallelMesh.hxx | 97 ++++++-- src/SMESH/SMESH_SequentialMesh.hxx | 5 - src/SMESH/SMESH_subMesh.cxx | 10 +- src/SMESH_I/CMakeLists.txt | 1 + src/SMESH_I/SMESH_Gen_i.cxx | 58 ++++- src/SMESH_I/SMESH_Gen_i.hxx | 5 +- src/SMESH_I/SMESH_Mesh_i.cxx | 18 -- src/SMESH_I/SMESH_Mesh_i.hxx | 9 +- src/SMESH_I/SMESH_ParallelMesh_i.cxx | 177 +++++++++++--- src/SMESH_I/SMESH_ParallelMesh_i.hxx | 30 +++ src/SMESH_SWIG/CMakeLists.txt | 6 + src/SMESH_SWIG/mesher_launcher.py | 323 +++++++++++++++++++++++++ src/SMESH_SWIG/send_files.py | 125 ++++++++++ src/SMESH_SWIG/smeshBuilder.py | 200 +++++++++++++-- 23 files changed, 1250 insertions(+), 330 deletions(-) create mode 100644 src/SMESH_SWIG/mesher_launcher.py create mode 100644 src/SMESH_SWIG/send_files.py diff --git a/doc/examples/creating_parallel_mesh.py b/doc/examples/creating_parallel_mesh.py index 809d8ef69..309674954 100644 --- a/doc/examples/creating_parallel_mesh.py +++ b/doc/examples/creating_parallel_mesh.py @@ -1,16 +1,6 @@ # contains function to compute a mesh in parallel -from platform import java_ver -import sys -try: - from tkinter import W -except: - print("warning: could not import tkinter") - import salome -import time - - salome.salome_init() import salome_notebook notebook = salome_notebook.NoteBook() @@ -22,128 +12,77 @@ notebook = salome_notebook.NoteBook() import GEOM from salome.geom import geomBuilder from salome.smesh import smeshBuilder -import math import SALOMEDS import numpy as np geompy = geomBuilder.New() -smesh = smeshBuilder.New() +nbox = 2 +boxsize = 100 +offset = 0 +# Create 3D faces +boxes = [] +# First creating all the boxes +for i in range(nbox): + for j in range(nbox): + for k in range(nbox): + + x_orig = i*(boxsize+offset) + y_orig = j*(boxsize+offset) + z_orig = k*(boxsize+offset) + + tmp_box = geompy.MakeBoxDXDYDZ(boxsize, boxsize, boxsize) + + if not i == j == k == 0: + box = geompy.MakeTranslation(tmp_box, x_orig, + y_orig, z_orig) + else: + box = tmp_box -def build_seq_mesh(nbox, boxsize, offset): - # Create 3D faces - boxes = [] - # First creating all the boxes - for i in range(nbox): - for j in range(nbox): - for k in range(nbox): + geompy.addToStudy(box, 'box_{}:{}:{}'.format(i, j, k)) - x_orig = i*(boxsize+offset) - y_orig = j*(boxsize+offset) - z_orig = k*(boxsize+offset) - - tmp_box = geompy.MakeBoxDXDYDZ(boxsize, boxsize, boxsize) - - if not i == j == k == 0: - box = geompy.MakeTranslation(tmp_box, x_orig, - y_orig, z_orig) - else: - box = tmp_box - - geompy.addToStudy(box, 'box_{}:{}:{}'.format(i, j, k)) - - boxes.append(box) - - # Create fuse of all boxes - all_boxes = geompy.MakeCompound(boxes) - geompy.addToStudy(all_boxes, 'Compound_1') - - # Removing duplicates faces and edges - all_boxes = geompy.MakeGlueFaces(all_boxes, 1e-07) - geompy.addToStudy(all_boxes, 'Glued_Faces_1') - - all_boxes = geompy.MakeGlueEdges(all_boxes, 1e-07) - geompy.addToStudy(all_boxes, 'rubik_cube') - - - # Building sequetial mesh - print("Creating mesh") - all_box_mesh = smesh.Mesh(all_boxes, "seq_mesh") - - print("Adding algo") - algo3d = all_box_mesh.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D) - - netgen_parameters = algo3d.Parameters() - netgen_parameters.SetMaxSize(34.641) - netgen_parameters.SetMinSize(0.141421) - netgen_parameters.SetOptimize(1) - netgen_parameters.SetCheckOverlapping(0) - netgen_parameters.SetCheckChartBoundary(0) - netgen_parameters.SetFineness(5) - netgen_parameters.SetNbSegPerEdge(16*(boxsize//100)) - netgen_parameters.SetNbSegPerRadius(1.5) - netgen_parameters.SetGrowthRate(0.15) - netgen_parameters.SetChordalError(-1) - netgen_parameters.SetChordalErrorEnabled(0) - netgen_parameters.SetUseSurfaceCurvature(1) - netgen_parameters.SetQuadAllowed(0) - netgen_parameters.SetCheckOverlapping(False) - netgen_parameters.SetNbThreads(2) - - return all_boxes, all_box_mesh, netgen_parameters - -def run_test(nbox=2, boxsize=100): - """ Run sequential mesh and parallel version of it - - nbox: NUmber of boxes - boxsize: Size of each box - """ - geom, seq_mesh, netgen_parameters = build_seq_mesh(nbox, boxsize, 0) - - print("Creating Parallel Mesh") - par_mesh = smesh.ParallelMesh(geom, name="par_mesh") - par_mesh.AddGlobalHypothesis(netgen_parameters) - param = par_mesh.GetParallelismSettings() - param.SetNbThreads(6) - - assert param.GetNbThreads() == 6, param.GetNbThreads() - - print("Starting sequential compute") - start = time.monotonic() - is_done = seq_mesh.Compute() - if not is_done: - raise Exception("Error when computing Mesh") - - stop = time.monotonic() - time_seq = stop-start - - print("Starting parallel compute") - start = time.monotonic() - is_done = par_mesh.Compute() - if not is_done: - raise Exception("Error when computing Mesh") - - stop = time.monotonic() - time_par = stop-start - - print(" Tetrahedron: ", seq_mesh.NbTetras(), par_mesh.NbTetras()) - print(" Triangle: ", seq_mesh.NbTriangles(), par_mesh.NbTriangles()) - print(" edge: ", seq_mesh.NbEdges(), par_mesh.NbEdges()) - - assert par_mesh.NbTetras() > 0 - assert par_mesh.NbTriangles() > 0 - assert par_mesh.NbEdges() > 0 - - print("Time elapsed (seq, par): ", time_seq, time_par) - -def main(): - if sys.platform == "win32": - print("Test disabled on Windows") - return - nbox = 2 - boxsize = 100 - run_test(nbox, boxsize) - -main() + boxes.append(box) + +# Create fuse of all boxes +all_boxes = geompy.MakeCompound(boxes) +geompy.addToStudy(all_boxes, 'Compound_1') + +# Removing duplicates faces and edges +all_boxes = geompy.MakeGlueFaces(all_boxes, 1e-07) +geompy.addToStudy(all_boxes, 'Glued_Faces_1') + +rubik_cube = geompy.MakeGlueEdges(all_boxes, 1e-07) +geompy.addToStudy(all_boxes, 'rubik_cube') + + +smesh = smeshBuilder.New() +print("Creating Parallel Mesh") +par_mesh = smesh.ParallelMesh(rubik_cube, name="par_mesh") + +print("Creating hypoehtesis for netgen") +NETGEN_3D_Parameters_1 = smesh.CreateHypothesisByAverageLength( 'NETGEN_Parameters', + 'NETGENEngine', 34.641, 0 ) +print("Adding hypothesis") +par_mesh.AddGlobalHypothesis(NETGEN_3D_Parameters_1) + +print("Setting parallelism method") +par_mesh.SetParallelismMethod(smeshBuilder.MULTITHREAD) + +print("Setting parallelism options") +param = par_mesh.GetParallelismSettings() +param.SetNbThreads(6) + +print("Starting parallel compute") +is_done = par_mesh.Compute() +if not is_done: + raise Exception("Error when computing Mesh") + +print(" Tetrahedron: ", par_mesh.NbTetras()) +print(" Triangle: ", par_mesh.NbTriangles()) +print(" edge: ", par_mesh.NbEdges()) + +assert par_mesh.NbTetras() > 0 +assert par_mesh.NbTriangles() > 0 +assert par_mesh.NbEdges() > 0 diff --git a/doc/gui/input/parallel_compute.rst b/doc/gui/input/parallel_compute.rst index fecbf2fea..97d91db9e 100644 --- a/doc/gui/input/parallel_compute.rst +++ b/doc/gui/input/parallel_compute.rst @@ -43,28 +43,64 @@ How to You follow the same principle as the creation of a sequential Mesh. -#. First you create the mesh: +1. First you create the mesh: + .. code-block:: python - par_mesh = smesh.ParallelMesh(geom, name="par_mesh") + par_mesh = smesh.ParallelMesh(my_geom, name="par_mesh") -#. Define the Global Hypothesis that will be split into an hypothesis for the +2. Define the Global Hypothesis that will be split into an hypothesis for the 1D+2D compound and one for each of the 3D solids: - .. code-block:: python + + .. code-block:: python NETGEN_3D_Parameters_1 = smesh.CreateHypothesisByAverageLength( 'NETGEN_Parameters', 'NETGENEngine', 34.641, 0 ) - par_mesh.AddGlobalHypothesis(netgen_parameters) + par_mesh.AddGlobalHypothesis(NETGEN_3D_Parameters_1) + +3. Set the method for the parallelisation: + + You have two methods for parallelisation: + + * Multihtreading: Will run the computation on your computer using the processors on your computer. + + .. code-block:: python + + par_mesh.SetParallelismMethod(smeshBuilder.MULTITHREAD) + + + * MultiNodal: Will run the computation on a remote resource (cluster) that is defined in your salome catalog. + + .. code-block:: python + + par_mesh.SetParallelismMethod(smeshBuilder.MULTINODE) + + +4. Set the parameters for the parallelism: + + * Multithread: -#. Set the parameters for the parallelisation: .. code-block:: python param = par_mesh.GetParallelismSettings() param.SetNbThreads(6) -#. Compute the mesh: + * Multinode: + + .. code-block:: python + + param = par_mesh.GetParallelismSettings() + param.SetResource("cronos") + param.SetNbProc(nbox**3) + param.SetNbProcPerNode(2) + param.SetNbNode(6) + param.SetWcKey("P11N0:SALOME_COFEE") + +5. Compute the mesh: .. code-block:: python - mesh.Compute() + is_done = par_mesh.Compute() + if not is_done: + raise Exception("Error when computing Mesh") **See Also** a sample script of :ref:`tui_create_parallel_mesh`. diff --git a/idl/SMESH_Gen.idl b/idl/SMESH_Gen.idl index d376df1d8..5f751319a 100644 --- a/idl/SMESH_Gen.idl +++ b/idl/SMESH_Gen.idl @@ -250,7 +250,7 @@ module SMESH * with TopoDS_Shapes * The mesh is a parallel one */ - SMESH_Mesh CreateParallelMesh( in GEOM::GEOM_Object theObject ) + SMESH_ParallelMesh CreateParallelMesh( in GEOM::GEOM_Object theObject ) raises ( SALOME::SALOME_Exception ); /*! * Create an empty mesh object diff --git a/idl/SMESH_Mesh.idl b/idl/SMESH_Mesh.idl index e0159023e..8ef9a454b 100644 --- a/idl/SMESH_Mesh.idl +++ b/idl/SMESH_Mesh.idl @@ -899,17 +899,6 @@ module SMESH */ boolean SetMeshOrder(in submesh_array_array theSubMeshArray); - /*! - * \brief Set Number of Threads - */ - void SetNbThreads(in long nbThreads); - /*! - /*! - * \brief Get Number of Threads - */ - long GetNbThreads(); - /*! - /*! * Get mesh description */ @@ -1114,7 +1103,35 @@ module SMESH }; interface SMESH_SequentialMesh:SMESH_Mesh{}; - interface SMESH_ParallelMesh:SMESH_Mesh{}; + interface SMESH_ParallelMesh:SMESH_Mesh{ + + // Parallism method + long GetParallelismMethod(); + void SetParallelismMethod(in long aMethod); + + // Parameters for MutliThreading + long GetNbThreads(); + void SetNbThreads(in long nbThreads); + + // Parameters for MultiNode + string GetResource(); + void SetResource(in string aResource); + + long GetNbProc(); + void SetNbProc(in long nbProc); + + long GetNbProcPerNode(); + void SetNbProcPerNode(in long nbProcPerNode); + + long GetNbNode(); + void SetNbNode(in long nbNode); + + string GetWcKey(); + void SetWcKey(in string wcKey); + + string GetWalltime(); + void SetWalltime(in string walltime); + }; }; diff --git a/src/SMESH/CMakeLists.txt b/src/SMESH/CMakeLists.txt index 4dc9499de..540389b0c 100644 --- a/src/SMESH/CMakeLists.txt +++ b/src/SMESH/CMakeLists.txt @@ -20,6 +20,7 @@ # --- options --- # additional include directories INCLUDE_DIRECTORIES( + ${QT_INCLUDES} ${KERNEL_INCLUDE_DIRS} ${GEOM_INCLUDE_DIRS} ${OpenCASCADE_INCLUDE_DIR} @@ -69,6 +70,7 @@ SET(_link_LIBRARIES MeshDriverGMF ${DriverCGNS_LIB} ${MEDCoupling_medloader} + Qt5::Core ) # --- headers --- diff --git a/src/SMESH/SMESH_Gen.cxx b/src/SMESH/SMESH_Gen.cxx index aaa72c429..48fc0da2f 100644 --- a/src/SMESH/SMESH_Gen.cxx +++ b/src/SMESH/SMESH_Gen.cxx @@ -51,6 +51,9 @@ #include "memoire.h" #include +#include +#include + #ifdef WIN32 #include #endif @@ -174,12 +177,12 @@ SMESH_Mesh* SMESH_Gen::CreateMesh(bool theIsEmbeddedMode) */ //============================================================================= -SMESH_Mesh* SMESH_Gen::CreateParallelMesh(bool theIsEmbeddedMode) +SMESH_ParallelMesh* SMESH_Gen::CreateParallelMesh(bool theIsEmbeddedMode) { Unexpect aCatch(SalomeException); // create a new SMESH_mesh object - SMESH_Mesh *aMesh = new SMESH_ParallelMesh( + SMESH_ParallelMesh *aMesh = new SMESH_ParallelMesh( _localId++, this, theIsEmbeddedMode, @@ -206,7 +209,7 @@ bool SMESH_Gen::sequentialComputeSubMeshes( const bool complexShapeFirst, const bool aShapeOnly) { - MESSAGE("Compute submeshes sequentialy"); + MESSAGE("Sequential Compute of submeshes"); bool ret = true; @@ -290,6 +293,68 @@ const std::function(aMesh); + // Calling run_mesher + // Path to mesher script + fs::path send_files = fs::path(std::getenv("SMESH_ROOT_DIR"))/ + fs::path("bin")/ + fs::path("salome")/ + fs::path("send_files.py"); + + std::string s_program="python3"; + std::list params; + params.push_back(send_files.string()); + params.push_back(file_name); + params.push_back("--resource="+aParMesh.GetResource()); + + // log file + fs::path log_file=aParMesh.GetTmpFolder() / fs::path("copy.log"); + QString out_file = log_file.string().c_str(); + + // Building arguments for QProcess + QString program = QString::fromStdString(s_program); + QStringList arguments; + for(auto arg : params){ + arguments << arg.c_str(); + } + + std::string cmd = ""; + cmd += s_program; + for(auto arg: params){ + cmd += " " + arg; + } + MESSAGE("Send files command: "); + MESSAGE(cmd); + + QProcess myProcess; + myProcess.setProcessChannelMode(QProcess::MergedChannels); + myProcess.setStandardOutputFile(out_file); + + myProcess.start(program, arguments); + // Waiting for process to finish (argument -1 make it wait until the end of + // the process otherwise it just waits 30 seconds) + bool finished = myProcess.waitForFinished(-1); + int ret = myProcess.exitCode(); + + if(ret != 0 || !finished){ + // Run crahed + std::string msg = "Issue with send_files: \n"; + msg += "See log for more details: " + log_file.string() + "\n"; + msg += cmd + "\n"; + throw SALOME_Exception(msg); + } +#endif +} + //============================================================================= /*! * Algo to run the computation of all the submeshes of a mesh in parallel @@ -315,10 +380,10 @@ bool SMESH_Gen::parallelComputeSubMeshes( SMESH_subMeshIteratorPtr smIt; SMESH_subMesh *shapeSM = aMesh.GetSubMesh(aShape); + SMESH_ParallelMesh &aParMesh = dynamic_cast(aMesh); TopAbs_ShapeEnum previousShapeType = TopAbs_VERTEX; - int nbThreads = aMesh.GetNbThreads(); - MESSAGE("Compute submeshes with threads: " << nbThreads); + MESSAGE("Parallel Compute of submeshes"); smIt = shapeSM->getDependsOnIterator(includeSelf, !complexShapeFirst); @@ -332,11 +397,6 @@ bool SMESH_Gen::parallelComputeSubMeshes( // Not doing in parallel 1D and 2D meshes if ( !aMesh.HasShapeToMesh() && shapeType == TopAbs_VERTEX ) continue; - if(shapeType==TopAbs_FACE||shapeType==TopAbs_EDGE) - aMesh.SetNbThreads(0); - else - aMesh.SetNbThreads(nbThreads); - if (shapeType != previousShapeType) { // Waiting for all threads for the previous type to end @@ -347,12 +407,12 @@ bool SMESH_Gen::parallelComputeSubMeshes( case TopAbs_FACE: file_name = "Mesh2D.med"; break; - case TopAbs_EDGE: - file_name = "Mesh1D.med"; - break; - case TopAbs_VERTEX: - file_name = "Mesh0D.med"; - break; + //case TopAbs_EDGE: + // file_name = "Mesh1D.med"; + // break; + //case TopAbs_VERTEX: + // file_name = "Mesh0D.med"; + // break; case TopAbs_SOLID: default: file_name = ""; @@ -360,8 +420,11 @@ bool SMESH_Gen::parallelComputeSubMeshes( } if(file_name != "") { - fs::path mesh_file = fs::path(aMesh.GetTmpFolder()) / fs::path(file_name); + fs::path mesh_file = fs::path(aParMesh.GetTmpFolder()) / fs::path(file_name); SMESH_DriverMesh::exportMesh(mesh_file.string(), aMesh, "MESH"); + if (aParMesh.GetParallelismMethod() == ParallelismMethod::MultiNode) { + this->send_mesh(aMesh, mesh_file.string()); + } } //Resetting threaded pool info previousShapeType = shapeType; @@ -375,9 +438,16 @@ bool SMESH_Gen::parallelComputeSubMeshes( smToCompute->ComputeStateEngine( SMESH_subMesh::CHECK_COMPUTE_STATE ); continue; } - boost::asio::post(*(aMesh.GetPool()), std::bind(compute_function, smToCompute, computeEvent, + // Parallelism is only for 3D parts + if(shapeType!=TopAbs_SOLID){ + compute_function(smToCompute, computeEvent, shapeSM, aShapeOnly, allowedSubShapes, - aShapesId)); + aShapesId); + }else{ + boost::asio::post(*(aParMesh.GetPool()), std::bind(compute_function, smToCompute, computeEvent, + shapeSM, aShapeOnly, allowedSubShapes, + aShapesId)); + } } // Waiting for the thread for Solids to finish @@ -385,6 +455,9 @@ bool SMESH_Gen::parallelComputeSubMeshes( aMesh.GetMeshDS()->Modified(); + // Cleanup done here as in Python the destructor is not called + aParMesh.cleanup(); + return ret; #endif }; @@ -648,6 +721,7 @@ bool SMESH_Gen::Compute(SMESH_Mesh & aMesh, if ( aShapesId && GetShapeDim( shapeType ) > (int)aDim ) continue; sm->SetAllowedSubShapes( fillAllowed( shapeSM, aShapeOnly, allowedSubShapes )); + setCurrentSubMesh( sm ); sm->ComputeStateEngine( computeEvent ); diff --git a/src/SMESH/SMESH_Gen.hxx b/src/SMESH/SMESH_Gen.hxx index 732379813..6925ac3db 100644 --- a/src/SMESH/SMESH_Gen.hxx +++ b/src/SMESH/SMESH_Gen.hxx @@ -49,6 +49,7 @@ class SMESHDS_Document; class SMESH_Algo; class SMESH_Mesh; +class SMESH_ParallelMesh; class TopoDS_Shape; @@ -70,7 +71,7 @@ public: ~SMESH_Gen(); SMESH_Mesh* CreateMesh(bool theIsEmbeddedMode); - SMESH_Mesh* CreateParallelMesh(bool theIsEmbeddedMode); + SMESH_ParallelMesh* CreateParallelMesh(bool theIsEmbeddedMode); enum ComputeFlags { @@ -169,6 +170,8 @@ public: int GetANewId(); public: + void send_mesh(SMESH_Mesh & aMesh, std::string filename); + bool parallelComputeSubMeshes( SMESH_Mesh & aMesh, const TopoDS_Shape & aShape, diff --git a/src/SMESH/SMESH_Mesh.hxx b/src/SMESH/SMESH_Mesh.hxx index e01595ae1..4fa3a355d 100644 --- a/src/SMESH/SMESH_Mesh.hxx +++ b/src/SMESH/SMESH_Mesh.hxx @@ -51,7 +51,6 @@ #ifndef WIN32 #include -#include #endif #include @@ -395,19 +394,9 @@ class SMESH_EXPORT SMESH_Mesh virtual void Lock(){}; virtual void Unlock(){}; - virtual int GetNbThreads(){return 0;}; - virtual void SetNbThreads(long nbThreads){(void) nbThreads;}; + virtual void wait(){}; - virtual void InitPoolThreads(){std::cout << "Should not pass here: InitPoolThread" << std::endl;}; - virtual void DeletePoolThreads(){std::cout << "Should not pass here: DeletePoolThread" << std::endl;}; - virtual void wait(){std::cout << "Should not pass here: wait" << std::endl;}; - - virtual bool IsParallel(){std::cout << "Should not pass here: IsParallel" << std::endl;return false;}; - -#ifndef WIN32 - virtual boost::filesystem::path GetTmpFolder() {return "";}; - virtual boost::asio::thread_pool* GetPool() {return NULL;}; -#endif + virtual bool IsParallel(){throw SALOME_Exception("Calling SMESH_Mesh::IsParallel");return false;}; virtual bool ComputeSubMeshes( SMESH_Gen* gen, @@ -419,7 +408,7 @@ class SMESH_EXPORT SMESH_Mesh SMESH_subMesh::compute_event &computeEvent, const bool includeSelf, const bool complexShapeFirst, - const bool aShapeOnly){(void) gen;(void) aMesh;(void) aShape;(void) aDim;(void) aShapesId;(void) allowedSubShapes;(void) computeEvent;(void) includeSelf;(void) complexShapeFirst;(void) aShapeOnly;std::cout << "Should not pass here: computesubmesh" << std::endl;return false;}; + const bool aShapeOnly){(void) gen;(void) aMesh;(void) aShape;(void) aDim;(void) aShapesId;(void) allowedSubShapes;(void) computeEvent;(void) includeSelf;(void) complexShapeFirst;(void) aShapeOnly;throw SALOME_Exception("Calling SMESH_Mesh::ComputeSubMeshes");return false;}; private: @@ -467,12 +456,6 @@ protected: // 2) to forget not loaded mesh data at hyp modification TCallUp* _callUp; - // Mutex for multhitreading write in SMESH_Mesh -#ifndef WIN32 - boost::mutex _my_lock; -#endif - int _NbThreads=-1; - protected: SMESH_Mesh(); SMESH_Mesh(const SMESH_Mesh&) {}; diff --git a/src/SMESH/SMESH_ParallelMesh.cxx b/src/SMESH/SMESH_ParallelMesh.cxx index 6d70e15a2..2bce451f3 100644 --- a/src/SMESH/SMESH_ParallelMesh.cxx +++ b/src/SMESH/SMESH_ParallelMesh.cxx @@ -32,10 +32,8 @@ #include #endif -#ifndef WIN32 #include namespace fs=boost::filesystem; -#endif #ifndef WIN32 #include @@ -43,12 +41,6 @@ namespace fs=boost::filesystem; #include -#ifdef _DEBUG_ -static int MYDEBUG = 1; -#else -static int MYDEBUG = 0; -#endif - SMESH_ParallelMesh::SMESH_ParallelMesh(int theLocalId, SMESH_Gen* theGen, bool theIsEmbeddedMode, @@ -58,21 +50,51 @@ SMESH_ParallelMesh::SMESH_ParallelMesh(int theLocalId, theDocument) { MESSAGE("SMESH_ParallelMesh::SMESH_ParallelMesh(int localId)"); -#ifndef WIN32 - _NbThreads = std::thread::hardware_concurrency(); -#else - _NbThreads = 0; -#endif CreateTmpFolder(); }; SMESH_ParallelMesh::~SMESH_ParallelMesh() +{ + cleanup(); +}; + +void SMESH_ParallelMesh::cleanup() { DeletePoolThreads(); - if(!MYDEBUG) + std::cout << "Keeping tmp folder" << keepingTmpFolfer() << std::endl; + if(!keepingTmpFolfer()) + { + MESSAGE("Set SMESH_KEEP_TMP to > 0 to keep temporary folders") DeleteTmpFolder(); + } }; +//============================================================================= +/*! + * \brief Checking if we should keep the temporary folder + * They are kept if the variable SMESH_KEEP_TMP is set to higher than 0 + */ +//============================================================================= +bool SMESH_ParallelMesh::keepingTmpFolfer() +{ + const char* envVar = std::getenv("SMESH_KEEP_TMP"); + std::cout << "smesh_keep_tmp: " << envVar << std::endl; + + if (envVar && (envVar[0] != '\0')) + { + try + { + const long long numValue = std::stoll(envVar); + return numValue > 0; + } + catch(const std::exception& e) + { + std::cerr << e.what() << '\n'; + } + } + + return false; +}; //============================================================================= @@ -82,11 +104,9 @@ SMESH_ParallelMesh::~SMESH_ParallelMesh() //============================================================================= void SMESH_ParallelMesh::CreateTmpFolder() { -#ifndef WIN32 // Temporary folder that will be used by parallel computation tmp_folder = fs::temp_directory_path()/fs::unique_path(fs::path("SMESH_%%%%-%%%%")); fs::create_directories(tmp_folder); -#endif } // //============================================================================= @@ -96,11 +116,43 @@ void SMESH_ParallelMesh::CreateTmpFolder() //============================================================================= void SMESH_ParallelMesh::DeleteTmpFolder() { -#ifndef WIN32 + MESSAGE("Deleting temporary folder" << tmp_folder.string()); fs::remove_all(tmp_folder); -#endif } +//============================================================================= +/*! + * \brief Get the number of Threads to be used for the pool of Threads + */ +//============================================================================= +int SMESH_ParallelMesh::GetPoolNbThreads() +{ + int nbThreads = -1; + + if(_method == ParallelismMethod::MultiThread){ + nbThreads = _NbThreads; + }else if( _method == ParallelismMethod::MultiNode){ + //TODO: Check of that is the right way + nbThreads = std::max(_nbProc, _nbNode*_nbProcPerNode); + } else { + throw SALOME_Exception("Unknown method "+std::to_string(_method)); + } + + return nbThreads; +} + +//============================================================================= +/*! + * \brief Set Number of thread for multithread run + */ +//============================================================================= +void SMESH_ParallelMesh::SetNbThreads(long nbThreads) +{ + if(nbThreads < 1) + throw SALOME_Exception("Number of threads should be higher than 1"); + _NbThreads=nbThreads; +}; + bool SMESH_ParallelMesh::ComputeSubMeshes( SMESH_Gen* gen, SMESH_Mesh & aMesh, diff --git a/src/SMESH/SMESH_ParallelMesh.hxx b/src/SMESH/SMESH_ParallelMesh.hxx index 2fff4e3b4..4a418a49f 100644 --- a/src/SMESH/SMESH_ParallelMesh.hxx +++ b/src/SMESH/SMESH_ParallelMesh.hxx @@ -29,8 +29,17 @@ #include "SMESH_Mesh.hxx" +#ifndef WIN32 +#include +#endif + #include "SMESH_Gen.hxx" #include "SMESH_subMesh.hxx" +#ifdef WIN32 +#include +#include +#endif +enum ParallelismMethod {MultiThread, MultiNode}; class SMESH_EXPORT SMESH_ParallelMesh: public SMESH_Mesh { @@ -40,44 +49,67 @@ class SMESH_EXPORT SMESH_ParallelMesh: public SMESH_Mesh bool theIsEmbeddedMode, SMESHDS_Document* theDocument); - virtual ~SMESH_ParallelMesh(); + ~SMESH_ParallelMesh(); -#ifndef WIN32 + // Locking mechanism + #ifndef WIN32 void Lock() override {_my_lock.lock();}; void Unlock() override {_my_lock.unlock();}; - - int GetNbThreads() override{return _NbThreads;}; - void SetNbThreads(long nbThreads) override{_NbThreads=nbThreads;}; - - void InitPoolThreads() override {_pool = new boost::asio::thread_pool(_NbThreads);}; - void DeletePoolThreads() override {delete _pool;}; - + // We need to recreate the pool afterthe join void wait() override {_pool->join(); DeletePoolThreads(); InitPoolThreads(); }; + #endif + + // Thread Pool +#ifndef WIN32 + void InitPoolThreads() {_pool = new boost::asio::thread_pool(GetPoolNbThreads());}; + boost::asio::thread_pool* GetPool() {return _pool;}; + void DeletePoolThreads() {delete _pool;}; +#else + void InitPoolThreads() {}; + void* GetPool() {return NULL;}; + void DeletePoolThreads(){}; +#endif - bool IsParallel() override {return _NbThreads > 0;}; + int GetPoolNbThreads(); + // Temporary folder + bool keepingTmpFolfer(); void CreateTmpFolder(); void DeleteTmpFolder(); + boost::filesystem::path GetTmpFolder() {return tmp_folder;}; + void cleanup(); - boost::filesystem::path GetTmpFolder() override {return tmp_folder;}; - boost::asio::thread_pool* GetPool() override {return _pool;}; -#else - void Lock() override {}; - void Unlock() override {}; + // + bool IsParallel() override {return true;}; - int GetNbThreads() override {return 0;}; - void SetNbThreads(long nbThreads) {(void) nbThreads;}; + // Parallelims paramaters + int GetParallelismMethod() {return _method;}; + void SetParallelismMethod(int aMethod) {_method = aMethod;}; - void InitPoolThreads() override {}; - void DeletePoolThreads() override {}; - void wait() override {}; + // Mutlithreading parameters + int GetNbThreads() {return _NbThreads;}; + void SetNbThreads(long nbThreads); - bool IsParallel() override {return false;}; + // Multinode parameters + std::string GetResource() {return _resource;}; + void SetResource(std::string aResource) {_resource = aResource;}; - void CreateTmpFolder(); - void DeleteTmpFolder(); -#endif + int GetNbProc() {return _nbProc;}; + void SetNbProc(long nbProc) {_nbProc = nbProc;}; + + int GetNbProcPerNode() {return _nbProcPerNode;}; + void SetNbProcPerNode(long nbProcPerNodes) {_nbProcPerNode = nbProcPerNodes;}; + + int GetNbNode() {return _nbNode;}; + void SetNbNode(long nbNodes) {_nbNode = nbNodes;}; + std::string GetWcKey() {return _wcKey;}; + void SetWcKey(std::string wcKey) {_wcKey = wcKey;}; + + std::string GetWalltime() {return _walltime;}; + void SetWalltime(std::string walltime) {_walltime = walltime;}; + + // Parallel computation bool ComputeSubMeshes( SMESH_Gen* gen, SMESH_Mesh & aMesh, @@ -94,9 +126,22 @@ class SMESH_EXPORT SMESH_ParallelMesh: public SMESH_Mesh SMESH_ParallelMesh():SMESH_Mesh() {}; SMESH_ParallelMesh(const SMESH_ParallelMesh& aMesh):SMESH_Mesh(aMesh) {}; private: + // Mutex for multhitreading write in SMESH_Mesh #ifndef WIN32 - boost::filesystem::path tmp_folder; - boost::asio::thread_pool * _pool = nullptr; //thread pool for computation + boost::mutex _my_lock; + // thread pool for computation + boost::asio::thread_pool * _pool = nullptr; #endif + boost::filesystem::path tmp_folder; + int _method = ParallelismMethod::MultiThread; + + int _NbThreads = std::thread::hardware_concurrency(); + + int _nbProc = 1; + int _nbProcPerNode = 1; + int _nbNode = 1; + std::string _resource = ""; + std::string _wcKey = "P11N0:SALOME"; + std::string _walltime = "01:00:00"; }; #endif diff --git a/src/SMESH/SMESH_SequentialMesh.hxx b/src/SMESH/SMESH_SequentialMesh.hxx index 2906bd1a1..5abf93bd0 100644 --- a/src/SMESH/SMESH_SequentialMesh.hxx +++ b/src/SMESH/SMESH_SequentialMesh.hxx @@ -45,11 +45,6 @@ class SMESH_EXPORT SMESH_SequentialMesh: public SMESH_Mesh void Lock() override {}; void Unlock() override {}; - int GetNbThreads() override {return 0;}; - void SetNbThreads(long nbThreads) {(void) nbThreads;}; - - void InitPoolThreads() override {}; - void DeletePoolThreads() override {}; void wait() override {}; bool IsParallel() override {return false;}; diff --git a/src/SMESH/SMESH_subMesh.cxx b/src/SMESH/SMESH_subMesh.cxx index 67e1ab12a..a019213ee 100644 --- a/src/SMESH/SMESH_subMesh.cxx +++ b/src/SMESH/SMESH_subMesh.cxx @@ -1517,7 +1517,7 @@ bool SMESH_subMesh::ComputeStateEngine(compute_event event) // check submeshes needed // When computing in parallel mode we do not have a additional layer of submesh // The check should not be done in parallel as that check is not thread-safe - if (_father->HasShapeToMesh() && !_father->IsParallel()) { + if (_father->HasShapeToMesh() && (!_father->IsParallel() || shape.ShapeType() != TopAbs_SOLID )) { bool subComputed = false, subFailed = false; if (!algo->OnlyUnaryInput()) { // --- commented for bos#22320 to compute all sub-shapes at once if possible; @@ -2188,10 +2188,13 @@ TopoDS_Shape SMESH_subMesh::getCollection(SMESH_Gen * /*theGen*/, { SMESH_subMesh* subMesh = smIt->next(); const TopoDS_Shape& S = subMesh->_subShape; - if ( S.ShapeType() != this->_subShape.ShapeType() ) + + if ( S.ShapeType() != this->_subShape.ShapeType() ){ continue; - if ( _allowedSubShapes && !_allowedSubShapes->IsEmpty() && !_allowedSubShapes->Contains( S )) + } + if ( _allowedSubShapes && !_allowedSubShapes->IsEmpty() && !_allowedSubShapes->Contains( S )){ continue; + } if ( subMesh == this ) { aBuilder.Add( aCompound, S ); @@ -2200,6 +2203,7 @@ TopoDS_Shape SMESH_subMesh::getCollection(SMESH_Gen * /*theGen*/, else if ( subMesh->GetComputeState() == READY_TO_COMPUTE ) { SMESH_Algo* anAlgo = subMesh->GetAlgo(); + if (( anAlgo->IsSameName( *theAlgo )) && // same algo ( anAlgo->GetUsedHypothesis( *_father, S, skipAuxHyps ) == usedHyps ) && // same hyps ( anAlgo->GetAssignedShapes() == assiShapes ) && // on same sub-shapes diff --git a/src/SMESH_I/CMakeLists.txt b/src/SMESH_I/CMakeLists.txt index f7fe422d0..a9788ca89 100644 --- a/src/SMESH_I/CMakeLists.txt +++ b/src/SMESH_I/CMakeLists.txt @@ -146,6 +146,7 @@ SET(SMESHEngine_SOURCES SMESH_PreMeshInfo.cxx MG_ADAPT_i.cxx SMESH_Homard_i.cxx + SMESH_ParallelMesh_i.cxx ) # --- rules --- diff --git a/src/SMESH_I/SMESH_Gen_i.cxx b/src/SMESH_I/SMESH_Gen_i.cxx index f8ce6b472..81dcf5a37 100644 --- a/src/SMESH_I/SMESH_Gen_i.cxx +++ b/src/SMESH_I/SMESH_Gen_i.cxx @@ -101,8 +101,11 @@ #include "SMESH_Hypothesis.hxx" #include "SMESH_Hypothesis_i.hxx" #include "SMESH_Mesh.hxx" +#include "SMESH_ParallelMesh.hxx" #include "SMESH_MeshEditor.hxx" #include "SMESH_Mesh_i.hxx" +#include +#include "SMESH_ParallelMesh_i.hxx" #include "SMESH_PreMeshInfo.hxx" #include "SMESH_PythonDump.hxx" #include "SMESH_ControlsDef.hxx" @@ -562,7 +565,7 @@ SMESH::SMESH_Hypothesis_ptr SMESH_Gen_i::createHypothesis(const char* theHypName */ //============================================================================= -SMESH::SMESH_Mesh_ptr SMESH_Gen_i::createMesh(bool parallel /*=false*/) +SMESH::SMESH_Mesh_ptr SMESH_Gen_i::createMesh() { Unexpect aCatch(SALOME_SalomeException); MESSAGE( "SMESH_Gen_i::createMesh" ); @@ -573,11 +576,10 @@ SMESH::SMESH_Mesh_ptr SMESH_Gen_i::createMesh(bool parallel /*=false*/) SMESH_Mesh_i* meshServant = new SMESH_Mesh_i( GetPOA(), this ); // create a new mesh object MESSAGE("myIsEmbeddedMode " << myIsEmbeddedMode); - if(parallel) { - meshServant->SetImpl( dynamic_cast(myGen.CreateParallelMesh( myIsEmbeddedMode ))); - }else{ - meshServant->SetImpl( dynamic_cast(myGen.CreateMesh( myIsEmbeddedMode ))); - } + SMESH_Mesh* myImpl = dynamic_cast(myGen.CreateMesh( myIsEmbeddedMode )); + if(myImpl == NULL ) + THROW_SALOME_CORBA_EXCEPTION( "Could not cast SequentialMesh as Mesh", SALOME::INTERNAL_ERROR ); + meshServant->SetImpl(myImpl); // activate the CORBA servant of Mesh SMESH::SMESH_Mesh_var mesh = SMESH::SMESH_Mesh::_narrow( meshServant->_this() ); @@ -592,6 +594,42 @@ SMESH::SMESH_Mesh_ptr SMESH_Gen_i::createMesh(bool parallel /*=false*/) return SMESH::SMESH_Mesh::_nil(); } +//============================================================================= +/*! + * SMESH_Gen_i::createParallelMesh + * + * Create empty parallel mesh on shape + */ +//============================================================================= +SMESH::SMESH_ParallelMesh_ptr SMESH_Gen_i::createParallelMesh() +{ + Unexpect aCatch(SALOME_SalomeException); + MESSAGE( "SMESH_Gen_i::createParallelMesh" ); + + // Get or create the GEOM_Client instance + try { + // create a new mesh object servant, store it in a map in study context + SMESH_ParallelMesh_i* meshServant = new SMESH_ParallelMesh_i( GetPOA(), this ); + // create a new mesh object + MESSAGE("myIsEmbeddedMode " << myIsEmbeddedMode); + SMESH_Mesh* myImpl = dynamic_cast(myGen.CreateParallelMesh( myIsEmbeddedMode )); + if(myImpl == NULL ) + THROW_SALOME_CORBA_EXCEPTION( "Could not cast ParallelMesh as Mesh", SALOME::INTERNAL_ERROR ); + meshServant->SetImpl(myImpl); + + // activate the CORBA servant of Mesh + SMESH::SMESH_ParallelMesh_var mesh = SMESH::SMESH_ParallelMesh::_narrow( meshServant->_this() ); + int nextId = RegisterObject( mesh ); + MESSAGE( "Add mesh to map with id = "<< nextId); + + return mesh._retn(); + } + catch (SALOME_Exception& S_ex) { + THROW_SALOME_CORBA_EXCEPTION( S_ex.what(), SALOME::BAD_PARAM ); + } + return SMESH::SMESH_ParallelMesh::_nil(); +} + //============================================================================= /*! * SMESH_Gen_i::GetShapeReader @@ -1235,14 +1273,14 @@ SMESH::SMESH_Mesh_ptr SMESH_Gen_i::CreateMesh( GEOM::GEOM_Object_ptr theShapeObj */ //============================================================================= -SMESH::SMESH_Mesh_ptr SMESH_Gen_i::CreateParallelMesh( GEOM::GEOM_Object_ptr theShapeObject ) +SMESH::SMESH_ParallelMesh_ptr SMESH_Gen_i::CreateParallelMesh( GEOM::GEOM_Object_ptr theShapeObject ) { Unexpect aCatch(SALOME_SalomeException); MESSAGE( "SMESH_Gen_i::CreateParallelMesh" ); // create mesh - SMESH::SMESH_Mesh_var mesh = this->createMesh(true); + SMESH::SMESH_ParallelMesh_var mesh = this->createParallelMesh(); // set shape - SMESH_Mesh_i* meshServant = SMESH::DownCast( mesh ); + SMESH_ParallelMesh_i* meshServant = SMESH::DownCast( mesh ); ASSERT( meshServant ); meshServant->SetShape( theShapeObject ); @@ -1254,7 +1292,7 @@ SMESH::SMESH_Mesh_ptr SMESH_Gen_i::CreateParallelMesh( GEOM::GEOM_Object_ptr the aStudyBuilder->CommitCommand(); if ( !aSO->_is_nil() ) { // Update Python script - TPythonDump(this) << aSO << " = " << this << ".CreateMesh(" << theShapeObject << ")"; + TPythonDump(this) << aSO << " = " << this << ".CreateParallelMesh(" << theShapeObject << ")"; } } diff --git a/src/SMESH_I/SMESH_Gen_i.hxx b/src/SMESH_I/SMESH_Gen_i.hxx index fd32852d0..26a4227e9 100644 --- a/src/SMESH_I/SMESH_Gen_i.hxx +++ b/src/SMESH_I/SMESH_Gen_i.hxx @@ -232,7 +232,7 @@ public: SMESH::SMESH_Mesh_ptr CreateMesh( GEOM::GEOM_Object_ptr theShapeObject ); // Create empty parallel mesh on a shape - SMESH::SMESH_Mesh_ptr CreateParallelMesh( GEOM::GEOM_Object_ptr theShapeObject ); + SMESH::SMESH_ParallelMesh_ptr CreateParallelMesh( GEOM::GEOM_Object_ptr theShapeObject ); // Create empty mesh SMESH::SMESH_Mesh_ptr CreateEmptyMesh(); @@ -634,7 +634,8 @@ private: SMESH::SMESH_Hypothesis_ptr createHypothesis( const char* theHypName, const char* theLibName); // Create empty mesh on shape - SMESH::SMESH_Mesh_ptr createMesh(bool parallel=false); + SMESH::SMESH_Mesh_ptr createMesh(); + SMESH::SMESH_ParallelMesh_ptr createParallelMesh(); // Check mesh icon bool isGeomModifIcon( SMESH::SMESH_Mesh_ptr mesh ); diff --git a/src/SMESH_I/SMESH_Mesh_i.cxx b/src/SMESH_I/SMESH_Mesh_i.cxx index f00e6eaa6..37394ce64 100644 --- a/src/SMESH_I/SMESH_Mesh_i.cxx +++ b/src/SMESH_I/SMESH_Mesh_i.cxx @@ -7028,24 +7028,6 @@ TListOfListOfInt SMESH_Mesh_i::findConcurrentSubMeshes() return res; } -//============================================================================= -/*! - * \brief Set the number of threads for a parallel computation - */ -//============================================================================= -void SMESH_Mesh_i::SetNbThreads(CORBA::Long nbThreads){ - _impl->SetNbThreads(nbThreads); -} - -//============================================================================= -/*! - * \brief Get the number of threads for a parallel computation - */ -//============================================================================= -CORBA::Long SMESH_Mesh_i::GetNbThreads(){ - return _impl->GetNbThreads(); -} - //============================================================================= /*! diff --git a/src/SMESH_I/SMESH_Mesh_i.hxx b/src/SMESH_I/SMESH_Mesh_i.hxx index 208d5d275..dec57a7e3 100644 --- a/src/SMESH_I/SMESH_Mesh_i.hxx +++ b/src/SMESH_I/SMESH_Mesh_i.hxx @@ -673,21 +673,16 @@ private: SMESH::submesh_array_array& theSubMeshOrder, const bool theIsDump); - /*! - * Parallelims informations - */ - void SetNbThreads(CORBA::Long nbThreads); - CORBA::Long GetNbThreads(); - /*! * \brief Finds concurrent sub-meshes */ TListOfListOfInt findConcurrentSubMeshes(); + protected: + ::SMESH_Mesh* _impl; // :: force no namespace here private: static int _idGenerator; - ::SMESH_Mesh* _impl; // :: force no namespace here SMESH_Gen_i* _gen_i; int _id; // id given by creator (unique within the creator instance) int _nbInvalidHypos; diff --git a/src/SMESH_I/SMESH_ParallelMesh_i.cxx b/src/SMESH_I/SMESH_ParallelMesh_i.cxx index f9f30dc2a..6e8bad1a6 100644 --- a/src/SMESH_I/SMESH_ParallelMesh_i.cxx +++ b/src/SMESH_I/SMESH_ParallelMesh_i.cxx @@ -24,28 +24,11 @@ // Module : SMESH #include "SMESH_ParallelMesh_i.hxx" +#include "SMESH_Mesh_i.hxx" #include "SMESH_Gen_i.hxx" -#ifdef _DEBUG_ -static int MYDEBUG = 0; -#else -static int MYDEBUG = 0; -#endif - -//============================================================================= -/*! - * Constructor - */ -//============================================================================= - -SMESH_ParallelMesh_i::SMESH_ParallelMesh_i( PortableServer::POA_ptr thePOA, - SMESH_Gen_i* gen_i ) -: SMESH_Mesh_i(thePOA, gen_i) -{ -} - //============================================================================= namespace { @@ -70,28 +53,156 @@ namespace }; } -//================================================================================ +::SMESH_ParallelMesh* SMESH_ParallelMesh_i::DownCast() +{ + ::SMESH_ParallelMesh* myImpl = dynamic_cast<::SMESH_ParallelMesh*>(_impl); + if (myImpl == NULL) + THROW_SALOME_CORBA_EXCEPTION("Could not cast as ParallelMesh", SALOME::INTERNAL_ERROR); + + return myImpl; +} + +//============================================================================= /*! - * \brief Set mesh implementation + * \brief Get the parallellism method */ -//================================================================================ +//============================================================================= -void SMESH_ParallelMesh_i::SetImpl(::SMESH_ParallelMesh * impl) -{ - if(MYDEBUG) MESSAGE("SMESH_ParallelMesh_i::SetImpl"); - _impl = impl; - if ( _impl ) - _impl->SetCallUp( new TCallUp_i(this)); +CORBA::Long SMESH_ParallelMesh_i::GetParallelismMethod(){ + return DownCast()->GetParallelismMethod(); } //============================================================================= /*! - * Return a mesh implementation + * \brief Set the parallellism method */ //============================================================================= +void SMESH_ParallelMesh_i::SetParallelismMethod(CORBA::Long aMethod){ + DownCast()->SetParallelismMethod(aMethod); +} -::SMESH_ParallelMesh & SMESH_ParallelMesh_i::GetImpl() -{ - if(MYDEBUG) MESSAGE("SMESH_ParallelMesh_i::GetImpl()"); - return *_impl; -} \ No newline at end of file +//============================================================================= +/*! + * \brief Get the number of threads for a parallel computation + */ +//============================================================================= +CORBA::Long SMESH_ParallelMesh_i::GetNbThreads(){ + return DownCast()->GetNbThreads(); +} + +//============================================================================= +/*! + * \brief Set the number of threads for a parallel computation + */ +//============================================================================= +void SMESH_ParallelMesh_i::SetNbThreads(CORBA::Long nbThreads){ + DownCast()->SetNbThreads(nbThreads); +} + +//============================================================================= +/*! + * \brief Get the ressource to connect to + */ +//============================================================================= +char* SMESH_ParallelMesh_i::GetResource(){ + return CORBA::string_dup(DownCast()->GetResource().c_str()); +} + +//============================================================================= +/*! + * \brief Set the ressource to connect to + */ +//============================================================================= +void SMESH_ParallelMesh_i::SetResource(const char* aResource){ + DownCast()->SetResource(std::string(aResource)); +} + +//============================================================================= +/*! + * \brief Get the number of processor to use on ressource + */ +//============================================================================= +CORBA::Long SMESH_ParallelMesh_i::GetNbProc(){ + return DownCast()->GetNbProc(); +} + +//============================================================================= +/*! + * \brief Set the number of processor to use on ressource + */ +//============================================================================= +void SMESH_ParallelMesh_i::SetNbProc(CORBA::Long nbProcs){ + DownCast()->SetNbProc(nbProcs); +} + +//============================================================================= +/*! + * \brief Get the number of processor per node to use on ressource + */ +//============================================================================= +CORBA::Long SMESH_ParallelMesh_i::GetNbProcPerNode(){ + return DownCast()->GetNbProcPerNode(); +} + +//============================================================================= +/*! + * \brief Set the number of processor per node to use on ressource + */ +//============================================================================= +void SMESH_ParallelMesh_i::SetNbProcPerNode(CORBA::Long nbProcPerNodes){ + DownCast()->SetNbProcPerNode(nbProcPerNodes); +} + +//============================================================================= +/*! + * \brief Get the number of node to use on ressource + */ +//============================================================================= +CORBA::Long SMESH_ParallelMesh_i::GetNbNode(){ + return DownCast()->GetNbNode(); +} + +//============================================================================= +/*! + * \brief Set the number of node to use on ressource + */ +//============================================================================= +void SMESH_ParallelMesh_i::SetNbNode(CORBA::Long nbNodes){ + DownCast()->SetNbNode(nbNodes); +} + +//============================================================================= +/*! + * \brief Get the wckey to use on ressource + */ +//============================================================================= +char* SMESH_ParallelMesh_i::GetWcKey(){ + return CORBA::string_dup(DownCast()->GetWcKey().c_str()); +} + +//============================================================================= +/*! + * \brief Set the wckey to use on ressource + */ +//============================================================================= +void SMESH_ParallelMesh_i::SetWcKey(const char* wcKey){ + DownCast()->SetWcKey(std::string(wcKey)); +} + +//============================================================================= +/*! + * \brief Get the walltime to use on ressource + */ +//============================================================================= +char* SMESH_ParallelMesh_i::GetWalltime(){ + return CORBA::string_dup(DownCast()->GetWalltime().c_str()); +} + +//============================================================================= +/*! + * \brief Set the walltime to use on ressource + */ +//============================================================================= +void SMESH_ParallelMesh_i::SetWalltime(const char* walltime){ + DownCast()->SetWalltime(std::string(walltime)); +} diff --git a/src/SMESH_I/SMESH_ParallelMesh_i.hxx b/src/SMESH_I/SMESH_ParallelMesh_i.hxx index 67eb639d4..51ae0c93f 100644 --- a/src/SMESH_I/SMESH_ParallelMesh_i.hxx +++ b/src/SMESH_I/SMESH_ParallelMesh_i.hxx @@ -30,6 +30,7 @@ #include "SMESH_Hypothesis.hxx" #include "SMESH_Mesh_i.hxx" +#include "SMESH_ParallelMesh.hxx" #include #include @@ -41,11 +42,40 @@ class SMESH_I_EXPORT SMESH_ParallelMesh_i: public virtual POA_SMESH::SMESH_ParallelMesh, public virtual SMESH_Mesh_i { + SMESH_ParallelMesh_i(); + SMESH_ParallelMesh_i(const SMESH_ParallelMesh_i&); + public: SMESH_ParallelMesh_i( PortableServer::POA_ptr thePOA, SMESH_Gen_i* myGen_i ):SMESH_Mesh_i(thePOA, myGen_i){}; virtual ~SMESH_ParallelMesh_i(){}; + CORBA::Long GetParallelismMethod(); + void SetParallelismMethod(CORBA::Long aMethod); + + CORBA::Long GetNbThreads(); + void SetNbThreads(CORBA::Long nbThreads); + + char* GetResource(); + void SetResource(const char* aResource); + + CORBA::Long GetNbProc(); + void SetNbProc(CORBA::Long nbProcs); + + CORBA::Long GetNbProcPerNode(); + void SetNbProcPerNode(CORBA::Long nbProcPerNodes); + + CORBA::Long GetNbNode(); + void SetNbNode(CORBA::Long nbNodes); + + char* GetWcKey(); + void SetWcKey(const char* wcKey); + + char* GetWalltime(); + void SetWalltime(const char* walltime); + + private: + ::SMESH_ParallelMesh* DownCast(); }; #endif diff --git a/src/SMESH_SWIG/CMakeLists.txt b/src/SMESH_SWIG/CMakeLists.txt index e1abcfd2b..ae973e7be 100644 --- a/src/SMESH_SWIG/CMakeLists.txt +++ b/src/SMESH_SWIG/CMakeLists.txt @@ -40,6 +40,11 @@ SET(smesh_SCRIPTS smesh_tools.py ) +SET(smesh_exe_SCRIPTS + mesher_launcher.py + send_files.py +) + SET(StdMeshers_SCRIPTS __init__.py StdMeshersBuilder.py @@ -66,6 +71,7 @@ ENDIF(WIN32) install(TARGETS _SMeshHelper DESTINATION ${SALOME_INSTALL_LIBS}) install(FILES ${SMeshHelper_HEADERS} DESTINATION ${SALOME_INSTALL_HEADERS}) SALOME_INSTALL_SCRIPTS("${_swig_SCRIPTS}" ${SALOME_INSTALL_BINS} EXTRA_DPYS "${SWIG_MODULE_SMeshHelper_REAL_NAME}") +SALOME_INSTALL_SCRIPTS("${smesh_exe_SCRIPTS}" ${SALOME_INSTALL_BINS}) # --- rules --- SALOME_INSTALL_SCRIPTS("${smesh_SCRIPTS}" ${SALOME_INSTALL_PYTHON}/salome/smesh DEF_PERMS) diff --git a/src/SMESH_SWIG/mesher_launcher.py b/src/SMESH_SWIG/mesher_launcher.py new file mode 100644 index 000000000..65de14925 --- /dev/null +++ b/src/SMESH_SWIG/mesher_launcher.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +## Copyright (C) 2021-2023 CEA/DEN, EDF R&D, OPEN CASCADE +## +## This library is free software; you can redistribute it and/or +## modify it under the terms of the GNU Lesser General Public +## License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## +## This library is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +## Lesser General Public License for more details. +## +## You should have received a copy of the GNU Lesser General Public +## License along with this library; if not, write to the Free Software +## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +## +## See http://www.salome-platform.org/ or email : +## webmaster.salome@opencascade.com +## +""" +File to run mesher from command line +""" +from os import environ, path +import sys +import subprocess as sp + +from argparse import ArgumentParser +import pydefx +import pylauncher + +MESHER_HANDLED = ["NETGEN3D"] + +CMD_TEMPLATE = \ +"""{runner} {mesher} {mesh_file} {shape_file} {param_file} {elem_orientation_file} {new_element_file} {output_mesh_file} > {log_file} 2>&1""" + +PYTHON_CODE = \ +""" +import subprocess as sp +def _exec(cmd): + error_code = -1 + try: + output = sp.check_output(cmd, shell=True) + error_code = 0 + except sp.CalledProcessError as e: + print('Code crash') + print(e.output) + error_code = e.returncode + raise e + return error_code +""" + +def create_launcher(): + """ Initialise pylauncher + """ + launcher = pylauncher.Launcher_cpp() + launcher.SetResourcesManager(create_resources_manager()) + return launcher + +def create_resources_manager(): + """ Look for the catalog file and create a ressource manager with it """ + # localhost is defined anyway, even if the catalog file does not exist. + catalog_path = environ.get("USER_CATALOG_RESOURCES_FILE", "") + if not path.isfile(catalog_path): + salome_path = environ.get("ROOT_SALOME_INSTALL", "") + catalog_path = path.join(salome_path, "CatalogResources.xml") + if not path.isfile(catalog_path): + catalog_path = "" + + return pylauncher.ResourcesManager_cpp(catalog_path) + +def create_job_parameters(): + """ Initialsie JobParameters """ + jparam = pylauncher.JobParameters_cpp() + jparam.resource_required = create_resource_parameters() + return jparam + +def create_resource_parameters(): + """ Init resourceParams """ + return pylauncher.resourceParams() + +def get_runner(mesher): + """ + Get path to exe for mesher + + Arguments: + mesher: Name of the mesher (NETGEN2D/NETGEN3D...) + + retuns (string) Path to the exe + """ + if sys.platform.startswith('win'): + ext = ".exe" + else: + ext = "" + + if mesher in ['NETGEN3D']: + exe_path = path.join("${NETGENPLUGIN_ROOT_DIR}", + "bin", + "salome", + "NETGENPlugin_Runner"+ext) + else: + raise Exception("Mesher {mesher} is not handled".format(mesher=mesher)) + + return exe_path + +def run_local(args): + """ Simple Local run """ + print("Local run") + #TODO: Check on how to handle log for windows (through sp.check_output) + cmd = CMD_TEMPLATE.format(\ + runner=get_runner(args.mesher), + mesher=args.mesher, + mesh_file=args.input_mesh_file, + shape_file=args.shape_file, + param_file=args.hypo_file, + elem_orientation_file=args.elem_orient_file, + new_element_file=args.new_element_file, + log_file=path.join(path.dirname(args.shape_file), "run.log"), + output_mesh_file=args.output_mesh_file) + print("Executing:") + print(cmd) + sp.check_output(cmd, shell=True, cwd=path.dirname(args.shape_file)) + +def run_pylauncher(args): + """ Run exe throuhg pylauncher """ + import time + print("Cluster run") + + cmd = CMD_TEMPLATE.format(\ + runner=get_runner(args.mesher), + mesher=args.mesher, + mesh_file="../"+path.basename(args.input_mesh_file), + shape_file=path.basename(args.shape_file), + param_file=path.basename(args.hypo_file), + elem_orientation_file=path.basename(args.elem_orient_file), + new_element_file=path.basename(args.new_element_file), + log_file="run.log", + output_mesh_file=path.basename(args.output_mesh_file)) + + print("Cmd: ", cmd) + + # salome launcher + launcher = create_launcher() + + # See SALOME_Launcher documentation for parameters + job_params = create_job_parameters() + # different type are: + # command Shell out of salome session + # command_salome Shell in salome shell + # python_salome Python script + # yacs_file + job_params.job_type = "command_salome" # creates CatalogResources.xml + + job_params.wckey = args.wc_key + job_params.resource_required.nb_proc = args.nb_proc + job_params.resource_required.nb_proc_per_node = args.nb_proc_per_node + job_params.resource_required.nb_node = args.nb_node + job_params.maximum_duration = args.walltime + + # job_params.pre_command = pre_command # command to run on frontal + # script to run in batch mode + run_script = path.join(path.dirname(args.shape_file), "run.sh") + with open(run_script, "w") as f: + f.write("#!/bin/bash\n") + f.write(cmd) + job_params.job_file = run_script + + local_dir = path.dirname(args.shape_file) + + # files to copy to remote working dir + # Directories are copied recursively. + # job_file script is automaticaly copied. + job_params.in_files = [args.shape_file, + args.hypo_file, + args.elem_orient_file] + + print("in_files", job_params.in_files) + # local path for in_files + job_params.local_directory = local_dir + # result files you want to bring back with getJobResults + # TODO: replace run.log by argument ? by path + out_files = ["run.log"] + if args.new_element_file != "NONE": + out_files.append(path.relpath(args.new_element_file, local_dir)) + if args.output_mesh_file != "NONE": + out_files.append(path.relpath(args.output_mesh_file, local_dir)) + job_params.out_files = out_files + print("out_files", job_params.out_files) + # local path where to copy out_files + job_params.result_directory = local_dir + + job_params.job_name = "SMESH_parallel" + job_params.resource_required.name = args.resource + + # Extra parameters + # String that is directly added to the job submission file + # job_params.extra_params = "#SBATCH --nodes=2" + + # remote job directory + # Retrieve working dir from catalog + res_manager = create_resources_manager() + res_params = res_manager.GetResourceDefinition(args.resource) + job_params.work_directory = path.join(\ + res_params.working_directory, + path.basename(path.dirname(path.dirname(args.shape_file))), + path.basename(path.dirname(args.shape_file))) + print("work directory", job_params.work_directory) + + job_id = launcher.createJob(job_params) #SALOME id of the job + launcher.launchJob(job_id) # copy files, run pre_command, submit job + + # wait for the end of the job + job_state = launcher.getJobState(job_id) + print("Job %d state: %s" % (job_id, job_state)) + while job_state not in ["FINISHED", "FAILED"]: + time.sleep(3) + job_state = launcher.getJobState(job_id) + + if job_state == "FAILED": + raise Exception("Job failed") + else: + # verify the return code of the execution + if(launcher.getJobWorkFile(job_id, + "logs/exit_code.log", + job_params.result_directory)): + exit_code_file = path.join(job_params.result_directory, + "exit_code.log") + exit_code = "" + if path.isfile(exit_code_file): + with open(exit_code_file) as myfile: + exit_code = myfile.read() + exit_code = exit_code.strip() + if exit_code != "0": + raise Exception(\ + "An error occured during the execution of the job.") + else: + raise Exception("Failed to get the exit code of the job.") + + # Retrieve result files + launcher.getJobResults(job_id, "") + + # Delete remote working dir + del_tmp_folder = True + try: + val = int(environ.get("SMESH_KEEP_TMP", "0")) + del_tmp_folder = val > 0 + except Exception as e: + del_tmp_folder = True + + launcher.clearJobWorkingDir(job_id) + +def def_arg(): + """ Define and parse arguments for the script """ + parser = ArgumentParser() + parser.add_argument("mesher", + choices=MESHER_HANDLED, + help="mesher to use from ("+",".join(MESHER_HANDLED)+")") + parser.add_argument("input_mesh_file",\ + help="MED File containing lower-dimension-elements already meshed") + parser.add_argument("shape_file", + help="STEP file containing the shape to mesh") + parser.add_argument("hypo_file", + help="Ascii file containint the list of parameters") + parser.add_argument("--elem-orient-file",\ + help="binary file containing the list of elements from "\ + "INPUT_MESH_FILE associated to the shape and their orientation") + # Output file parameters + output = parser.add_argument_group("Output files", "Possible output files") + output.add_argument("--new-element-file", + default="NONE", + help="contains elements and nodes added by the meshing") + output.add_argument(\ + "--output-mesh-file", + default="NONE", + help="MED File containing the mesh after the run of the mesher") + + # Run parameters + run_param = parser.add_argument_group(\ + "Run parameters", + "Parameters for the run of the mesher") + run_param.add_argument("--method", + default="local", + choices=["local", "cluster"], + help="Running method (default: local)") + + run_param.add_argument("--resource", + help="resource from SALOME Catalog") + run_param.add_argument("--nb-proc", + default=1, + type=int, + help="Number of processors") + run_param.add_argument("--nb-proc-per-node", + default=1, + type=int, + help="Number of processeor per node") + run_param.add_argument("--nb-node", + default=1, + type=int, + help="Number of node") + run_param.add_argument("--walltime", + default="01:00:00", + help="walltime for job submission HH:MM:SS (default 01:00:00)") + run_param.add_argument("--wc-key", + default="P11N0:SALOME", + help="wc-key for job submission (default P11N0:SALOME)") + + args = parser.parse_args() + + return args + +def main(): + """ Main function """ + args = def_arg() + if args.method == "local": + run_local(args) + elif args.method == "cluster": + run_pylauncher(args) + else: + raise Exception("Unknown method {}".format(args.method)) + +if __name__ == "__main__": + main() diff --git a/src/SMESH_SWIG/send_files.py b/src/SMESH_SWIG/send_files.py new file mode 100644 index 000000000..35d815e2f --- /dev/null +++ b/src/SMESH_SWIG/send_files.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +## Copyright (C) 2021-2023 CEA/DEN, EDF R&D, OPEN CASCADE +## +## This library is free software; you can redistribute it and/or +## modify it under the terms of the GNU Lesser General Public +## License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## +## This library is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +## Lesser General Public License for more details. +## +## You should have received a copy of the GNU Lesser General Public +## License along with this library; if not, write to the Free Software +## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +## +## See http://www.salome-platform.org/ or email : +## webmaster.salome@opencascade.com +## +""" +File to send files on remote ressource +""" +from os import environ, path + +from argparse import ArgumentParser +import pydefx +import pylauncher + +def create_launcher(): + """ Initialise pylauncher + """ + launcher = pylauncher.Launcher_cpp() + launcher.SetResourcesManager(create_resources_manager()) + return launcher + +def create_resources_manager(): + """ Look for the catalog file and create a ressource manager with it """ + # localhost is defined anyway, even if the catalog file does not exist. + catalog_path = environ.get("USER_CATALOG_RESOURCES_FILE", "") + if not path.isfile(catalog_path): + salome_path = environ.get("ROOT_SALOME_INSTALL", "") + catalog_path = path.join(salome_path, "CatalogResources.xml") + if not path.isfile(catalog_path): + catalog_path = "" + + return pylauncher.ResourcesManager_cpp(catalog_path) + +def create_job_parameters(): + """ Initialsie JobParameters """ + jparam = pylauncher.JobParameters_cpp() + jparam.resource_required = create_resource_parameters() + return jparam + +def create_resource_parameters(): + """ Init resourceParams """ + return pylauncher.resourceParams() + +def send_file(args): + """ job to send a file to the cluster """ + # salome launcher + launcher = create_launcher() + + # See SALOME_Launcher documentation for parameters + job_params = create_job_parameters() + job_params.job_type = "command_salome" # creates CatalogResources.xml + + local_dir = path.dirname(args.input_file) + + # job_params.pre_command = pre_command # command to run on frontal + # script to run in batch mode + run_script = path.join(path.dirname(args.input_file), "run.sh") + with open(run_script, "w") as f: + f.write("#!/bin/bash\n") + job_params.job_file = run_script + job_params.resource_required.nb_proc = 1 + + # files to copy to remote working dir + # Directories are copied recursively. + # job_file script is automaticaly copied. + job_params.in_files = [args.input_file] + print("in_files", job_params.in_files) + # local path where to copy out_files + job_params.result_directory = local_dir + + job_params.job_name = "SMESH_transfer" + job_params.resource_required.name = args.resource + + # remote job directory + # Retrieve working dir from catalog + res_manager = create_resources_manager() + res_params = res_manager.GetResourceDefinition(args.resource) + job_params.work_directory = path.join(\ + res_params.working_directory, + path.basename(path.dirname(args.input_file))) + + print("work_directory", job_params.work_directory) + + job_id = launcher.createJob(job_params) #SALOME id of the job + launcher.exportInputFiles(job_id) + + +def def_arg(): + """ Define and parse arguments for the script """ + parser = ArgumentParser() + parser.add_argument("input_file",\ + help="file to copy") + + # Run parameters + + parser.add_argument("--resource", + help="resource from SALOME Catalog") + + args = parser.parse_args() + + return args + +def main(): + """ Main function """ + args = def_arg() + send_file(args) + +if __name__ == "__main__": + main() diff --git a/src/SMESH_SWIG/smeshBuilder.py b/src/SMESH_SWIG/smeshBuilder.py index 13d5117b9..781137a69 100644 --- a/src/SMESH_SWIG/smeshBuilder.py +++ b/src/SMESH_SWIG/smeshBuilder.py @@ -1629,7 +1629,9 @@ class Mesh(metaclass = MeshMeta): geo_name = "%s_%s to mesh"%(self.geom.GetShapeType(), id(self.geom)%100) geompyD.addToStudy( self.geom, geo_name ) if parallel and isinstance(self, ParallelMesh): - self.SetMesh( self.smeshpyD.CreateParallelMesh(self.geom) ) + mymesh = self.smeshpyD.CreateParallelMesh(self.geom) + mymesh2 = mymesh._narrow(SMESH._objref_SMESH_Mesh) + self.SetMesh( mymesh ) else: self.SetMesh( self.smeshpyD.CreateMesh(self.geom) ) @@ -5490,7 +5492,7 @@ class Mesh(metaclass = MeshMeta): toCopyAll,toCreateAllElements,groups) if mesh: mesh = self.smeshpyD.Mesh(mesh) return nb, mesh, group - + def MakeBoundaryElements(self, dimension=SMESH.BND_2DFROM3D, groupName="", meshName="", toCopyAll=False, groups=[]): """ @@ -7550,7 +7552,7 @@ def _copy_netgen_param(dim, local_param, global_param): Create 1D/2D/3D netgen parameters from a NETGEN 1D2D3D parameter """ if dim==1: - #TODO: Try to identify why we need to substract 1 + #TODO: Try to identify why we need to substract 1 to have same results local_param.NumberOfSegments(int(global_param.GetNbSegPerEdge())-1) elif dim==2: local_param.SetMaxSize(global_param.GetMaxSize()) @@ -7559,6 +7561,7 @@ def _copy_netgen_param(dim, local_param, global_param): local_param.SetFineness(global_param.GetFineness()) local_param.SetNbSegPerEdge(global_param.GetNbSegPerEdge()) local_param.SetNbSegPerRadius(global_param.GetNbSegPerRadius()) + #TODO: Why the 0.9 to have same results local_param.SetGrowthRate(global_param.GetGrowthRate()*0.9) local_param.SetChordalError(global_param.GetChordalError()) local_param.SetChordalErrorEnabled(global_param.GetChordalErrorEnabled()) @@ -7580,6 +7583,31 @@ def _copy_netgen_param(dim, local_param, global_param): local_param.SetGrowthRate(global_param.GetGrowthRate()) local_param.SetNbThreads(global_param.GetNbThreads()) + +def _shaperstudy2geom(geompyD, shaper_obj): + """ + Convertion of shaper object to geom object + + Parameters: + geompyD: geomBuilder instance + shaper_obj: Shaper study object + + Returns: + geom object + + """ + import tempfile + #Writing shaperstudy object into a brep file + fid, tmp_file = tempfile.mkstemp(suffix='.brep') + with open(fid, 'wb') as f: + f.write(shaper_obj.GetShapeStream()) + # Reimporting brep file into geom + real_geom = geompyD.ImportBREP(tmp_file) + os.remove(tmp_file) + + return real_geom + + def _split_geom(geompyD, geom): """ Splitting geometry into n solids and a 2D/1D compound @@ -7588,7 +7616,11 @@ def _split_geom(geompyD, geom): geompyD: geomBuilder instance geom: geometrical object for meshing + Returns: + compound containing all the 1D,2D elements + list of solids """ + # Splitting geometry into 3D elements and all the 2D/1D into one compound object_solids = geompyD.ExtractShapes(geom, geompyD.ShapeType["SOLID"], True) @@ -7615,7 +7647,6 @@ def _split_geom(geompyD, geom): 'Face_{}'.format(iface)) # Creating submesh for edges 1D/2D part - all_faces = geompyD.MakeCompound(faces) geompyD.addToStudy(all_faces, 'Compound_1') all_faces = geompyD.MakeGlueEdges(all_faces, 1e-07) @@ -7624,6 +7655,8 @@ def _split_geom(geompyD, geom): return all_faces, solids + +MULTITHREAD, MULTINODE = range(2) class ParallelismSettings: """ Defines the parameters for the parallelism of ParallelMesh @@ -7640,21 +7673,109 @@ class ParallelismSettings: self._mesh = mesh + +class MTParallelismSettings(ParallelismSettings): + """ + Defines the parameters for the parallelism of ParallelMesh using MultiThreading + """ + def __init__(self, mesh): + ParallelismSettings.__init__(self, mesh) + + # Multithreading methods def SetNbThreads(self, nbThreads): - """ - Set the number of threads for multithreading - """ + """ Set the number of threads for multithread """ if nbThreads < 1: raise ValueError("Number of threads must be stricly greater than 1") self._mesh.mesh.SetNbThreads(nbThreads) def GetNbThreads(self): - """ - Get Number of threads - """ + """ Get Number of threads """ return self._mesh.mesh.GetNbThreads() + def __str__(self): + """ str conversion """ + string = "\nParameter for MultiThreading parallelism:\n" + string += "NbThreads: {}\n".format(self.GetNbThreads()) + + return string + + +class MNParallelismSettings(ParallelismSettings): + """ + Defines the parameters for the parallelism of ParallelMesh using MultiNodal + """ + def __init__(self, mesh): + ParallelismSettings.__init__(self, mesh) + + def GetResource(self): + """ Get the resource on which to run """ + return self._mesh.mesh.GetResource() + + def SetResource(self, resource): + """ Set the resource on which to run """ + self._mesh.mesh.SetResource(resource) + + def SetNbProc(self, nbProc): + """ Set the number of Processor for multinode """ + if nbProc < 1: + raise ValueError("Number of Proc must be stricly greater than 1") + self._mesh.mesh.SetNbProc(nbProc) + + def GetNbProc(self): + """ Get Number of Processor """ + return self._mesh.mesh.GetNbProc() + + def SetNbProcPerNode(self, nbProcPerNode): + """ Set the number of Processor Per Node for multinode """ + if nbProcPerNode < 1: + raise ValueError("Number of Processor Per Node must be stricly greater than 1") + + self._mesh.mesh.SetNbProcPerNode(nbProcPerNode) + + def GetNbProcPerNode(self): + """ Get Number of Processor Per Node """ + return self._mesh.mesh.GetNbProcPerNode() + + def SetNbNode(self, nbNode): + """ Set the number of Node for multinode """ + if nbNode < 1: + raise ValueError("Number of Node must be stricly greater than 1") + self._mesh.mesh.SetNbNode(nbNode) + + def GetNbNode(self): + """ Get Number of Node """ + return self._mesh.mesh.GetNbNode() + + def SetWcKey(self, wcKey): + """ Set the number of Node for multinode """ + self._mesh.mesh.SetWcKey(wcKey) + + def GetWcKey(self): + """ Get Number of Node """ + return self._mesh.mesh.GetWcKey() + + def SetWalltime(self, walltime): + """ Set the number of Node for multinode """ + self._mesh.mesh.SetWalltime(walltime) + + def GetWalltime(self): + """ Get Number of Node """ + return self._mesh.mesh.GetWalltime() + + def __str__(self): + """ str conversion """ + string = "\nParameter for MultiNode parallelism:\n" + string += "Reource: {}\n".format(self.GetResource()) + string += "NbProc: {}\n".format(self.GetNbProc()) + string += "NbProcPerNode: {}\n".format(self.GetNbProcPerNode()) + string += "NbNode: {}\n".format(self.GetNbNode()) + string += "WcKey: {}\n".format(self.GetWcKey()) + string += "Walltime: {}\n".format(self.GetWalltime()) + + return string + + class ParallelMesh(Mesh): """ Surcharge on Mesh for parallel computation of a mesh @@ -7678,33 +7799,61 @@ class ParallelMesh(Mesh): if not isinstance(geom, geomBuilder.GEOM._objref_GEOM_Object): raise ValueError("geom argument must be a geometry") + import SHAPERSTUDY + import shaperBuilder + # If we have a shaper object converting it into geom (temporary solution) + if isinstance(geom, SHAPERSTUDY.SHAPERSTUDY_ORB._objref_SHAPER_Object): + geom_obj = _shaperstudy2geom(geompyD, geom) + else: + geom_obj = geom + # Splitting geometry into one geom containing 1D and 2D elements and a # list of 3D elements - super(ParallelMesh, self).__init__(smeshpyD, geompyD, geom, name, parallel=True) + super(ParallelMesh, self).__init__(smeshpyD, geompyD, geom_obj, name, parallel=True) if split_geom: - self._all_faces, self._solids = _split_geom(geompyD, geom) + self._all_faces, self._solids = _split_geom(geompyD, geom_obj) - self.UseExistingSegments() - self.UseExistingFaces() - - self._algo2d = self.Triangle(geom=self._all_faces, algo="NETGEN_2D") + order = [] + self._algo2d = self.Triangle(geom=geom_obj, algo="NETGEN_2D") self._algo3d = [] for solid_id, solid in enumerate(self._solids): name = "Solid_{}".format(solid_id) - self.UseExistingSegments(geom=solid) - self.UseExistingFaces(geom=solid) algo3d = self.Tetrahedron(geom=solid, algo="NETGEN_3D_Remote") self._algo3d.append(algo3d) - self._param = ParallelismSettings(self) + self._param = None + def GetNbSolids(self): + """ + Return the number of 3D solids + """ + return len(self._solids) + + def GetParallelismMethod(self): + """ Get the parallelims method """ + return self.mesh.GetParallelismMethod() + + def SetParallelismMethod(self, method): + """ Set the parallelims method """ + if method not in [MULTITHREAD , MULTINODE]: + raise ValueError("Parallelism method can only be 0:MultiThread or 1:MultiNode") + + self.mesh.SetParallelismMethod(method) + + if method == MULTITHREAD: + self._param = MTParallelismSettings(self) + else: + self._param = MNParallelismSettings(self) + def GetParallelismSettings(self): """ Return class to set parameters for the parallelism """ + if self._param is None: + raise Exception("You need to set Parallelism method first (SetParallelismMethod)") return self._param def AddGlobalHypothesis(self, hyp): @@ -7731,7 +7880,6 @@ class ParallelMesh(Mesh): pass # End of ParallelMesh - class meshProxy(SMESH._objref_SMESH_Mesh): """ Private class used to compensate change of CORBA API of SMESH_Mesh for backward compatibility @@ -7774,10 +7922,20 @@ class meshProxy(SMESH._objref_SMESH_Mesh): if len( args ) == 1: args += True, return SMESH._objref_SMESH_Mesh.ExportDAT(self, *args) - pass + omniORB.registerObjref(SMESH._objref_SMESH_Mesh._NP_RepositoryId, meshProxy) +class parallelMeshProxy(SMESH._objref_SMESH_ParallelMesh): + def __init__(self,*args): + SMESH._objref_SMESH_ParallelMesh.__init__(self,*args) + def __deepcopy__(self, memo=None): + new = self.__class__(self) + return new +omniORB.registerObjref(SMESH._objref_SMESH_ParallelMesh._NP_RepositoryId, parallelMeshProxy) + + + class submeshProxy(SMESH._objref_SMESH_subMesh): """ -- 2.30.2