SMESH_SMESH.hxx
MG_ADAPT.hxx
SMESH_Homard.hxx
+ ctpl.h
)
# --- sources ---
ADD_LIBRARY(SMESHimpl ${SMESHimpl_SOURCES})
IF(WIN32)
TARGET_COMPILE_OPTIONS(SMESHimpl PRIVATE /bigobj)
- ADD_DEFINITIONS(-DNOMINMAX)
+ ADD_DEFINITIONS(-DNOMINMAX)
ENDIF(WIN32)
TARGET_LINK_LIBRARIES(SMESHimpl ${_link_LIBRARIES} )
//=============================================================================
/*!
- *
+ *
*/
//=============================================================================
//=============================================================================
/*!
- *
+ *
*/
//=============================================================================
//=============================================================================
/*!
- *
+ *
*/
//=============================================================================
//=============================================================================
/*!
- *
+ *
*/
//=============================================================================
* List the hypothesis used by the algorithm associated to the shape.
* Hypothesis associated to father shape -are- taken into account (see
* GetAppliedHypothesis). Relevant hypothesis have a name (type) listed in
- * the algorithm. This method could be surcharged by specific algorithms, in
+ * the algorithm. This method could be surcharged by specific algorithms, in
* case of several hypothesis simultaneously applicable.
*/
//=============================================================================
{
SMESH_Algo* me = const_cast< SMESH_Algo* >( this );
- std::list<const SMESHDS_Hypothesis *> savedHyps; // don't delete the list if
+ std::list<const SMESHDS_Hypothesis *> savedHyps; // don't delete the list if
savedHyps.swap( me->_usedHypList ); // it does not change (#16578)
me->_usedHypList.clear();
return false; // E seems closed
double edgeTol = 10 * curve.Tolerance();
- double lenTol2 = lineLen2 * 1e-4;
+ double lenTol2 = lineLen2 * 1e-4;
double tol2 = Min( edgeTol * edgeTol, lenTol2 );
const double nbSamples = 7;
/*!
* \brief Sets event listener to submeshes if necessary
* \param subMesh - submesh where algo is set
- *
+ *
* After being set, event listener is notified on each event of a submesh.
* By default non listener is set
*/
//=============================================================================
/*!
- *
+ *
*/
//=============================================================================
//=============================================================================
/*!
- *
+ *
*/
//=============================================================================
return aMesh;
}
+//=============================================================================
+/*
+ * Parallel compute of a submesh
+ * This function is used to pass to thread_pool
+ */
+//=============================================================================
+const std::function<void(int,
+ SMESH_subMesh*,
+ SMESH_subMesh::compute_event,
+ SMESH_subMesh*,
+ bool,
+ TopTools_IndexedMapOfShape *,
+ TSetOfInt*)>
+ parallel_compute([&] (int id,
+ SMESH_subMesh* sm,
+ SMESH_subMesh::compute_event event,
+ SMESH_subMesh *shapeSM,
+ bool aShapeOnly,
+ TopTools_IndexedMapOfShape *allowedSubShapes,
+ TSetOfInt* aShapesId) -> void
+{
+ if (sm->GetComputeState() == SMESH_subMesh::READY_TO_COMPUTE)
+ {
+ sm->SetAllowedSubShapes( fillAllowed( shapeSM, aShapeOnly, allowedSubShapes ));
+ //setCurrentSubMesh( sm );
+ sm->ComputeStateEngine(event);
+ //setCurrentSubMesh( nullptr );
+ sm->SetAllowedSubShapes( nullptr );
+ }
+
+ if ( aShapesId )
+ aShapesId->insert( sm->GetId() );
+
+});
+
//=============================================================================
/*
* Compute a mesh
const bool complexShapeFirst = true;
const int globalAlgoDim = 100;
+ // Pool of thread for computation
+ if (!_pool){
+ _pool = new ctpl::thread_pool(2);
+ }
+
SMESH_subMeshIteratorPtr smIt;
// Fix of Issue 22150. Due to !BLSURF->OnlyUnaryInput(), BLSURF computes edges
// Mesh all the sub-shapes starting from vertices
// ===============================================
+ TopAbs_ShapeEnum previousShapeType = TopAbs_VERTEX;
smIt = shapeSM->getDependsOnIterator(includeSelf, !complexShapeFirst);
+ std::vector<std::future<void>> pending;
while ( smIt->more() )
{
SMESH_subMesh* smToCompute = smIt->next();
if ( !aMesh.HasShapeToMesh() && shapeType == TopAbs_VERTEX )
continue;
+ std::cout << "Shape Type" << shapeType << " previous" << previousShapeType << std::endl;
+ if (shapeType != previousShapeType) {
+ // Waiting for all thread for the previous type to end
+ for(auto it =std::begin(pending); it != std::end(pending); ++it){
+ std::cout << "Waiting" << std::endl;
+ it->wait();
+ }
+ //Resetting threaded pool info
+ previousShapeType = shapeType;
+ pending.clear();
+ }
+
// check for preview dimension limitations
if ( aShapesId && GetShapeDim( shapeType ) > (int)aDim )
{
smToCompute->ComputeStateEngine( SMESH_subMesh::CHECK_COMPUTE_STATE );
continue;
}
+ pending.push_back(_pool->push(parallel_compute, smToCompute, computeEvent,
+ shapeSM, aShapeOnly, allowedSubShapes,
+ aShapesId));
+ std::cout << "Launched " << smToCompute << " shape type " << shapeType << std::endl;
- if (smToCompute->GetComputeState() == SMESH_subMesh::READY_TO_COMPUTE)
- {
- if (_compute_canceled)
- return false;
- smToCompute->SetAllowedSubShapes( fillAllowed( shapeSM, aShapeOnly, allowedSubShapes ));
- setCurrentSubMesh( smToCompute );
- smToCompute->ComputeStateEngine( computeEvent );
- setCurrentSubMesh( nullptr );
- smToCompute->SetAllowedSubShapes( nullptr );
- }
- // we check all the sub-meshes here and detect if any of them failed to compute
- if (smToCompute->GetComputeState() == SMESH_subMesh::FAILED_TO_COMPUTE &&
- ( shapeType != TopAbs_EDGE || !SMESH_Algo::isDegenerated( TopoDS::Edge( shape ))))
- ret = false;
- else if ( aShapesId )
- aShapesId->insert( smToCompute->GetId() );
}
+
+ for(auto it =std::begin(pending); it != std::end(pending); ++it){
+ it->wait();
+ }
+ pending.clear();
//aMesh.GetMeshDS()->Modified();
return ret;
}
const TopoDS_Shape& aSubShape = smToCompute->GetSubShape();
const int aShapeDim = GetShapeDim( aSubShape );
if ( aShapeDim < 1 ) break;
-
+
SMESH_Algo* algo = GetAlgo( smToCompute );
if ( algo && !algo->NeedDiscreteBoundary() ) {
if ( algo->SupportSubmeshes() ) {
#include "SMESH_Algo.hxx"
#include "SMESH_ComputeError.hxx"
+#include "ctpl.h"
+
#include <map>
#include <list>
#include <vector>
#include <string>
+
#include <TopoDS_Shape.hxx>
#include <TopTools_IndexedMapOfShape.hxx>
SHAPE_ONLY_UPWARD = 3 // SHAPE_ONLY | UPWARD
};
/*!
- * \brief Computes aMesh on aShape
+ * \brief Computes aMesh on aShape
* \param aMesh - the mesh.
* \param aShape - the shape.
* \param aFlags - ComputeFlags. By default compute the whole mesh and compact at the end.
const SMESH_subMesh* GetCurrentSubMesh() const;
/*!
- * \brief evaluates size of prospective mesh on a shape
+ * \brief evaluates size of prospective mesh on a shape
* \param aMesh - the mesh
* \param aShape - the shape
* \param aResMap - map for prospective numbers of elements
volatile bool _compute_canceled;
std::list< SMESH_subMesh* > _sm_current;
+ // TODO: Replace by number of thread
+ ctpl::thread_pool * _pool = nullptr; //thread pool for computation
};
#endif
#include "MEDCouplingMemArray.hxx"
+#include "ctpl.h"
+
#include <map>
#include <list>
#include <vector>
int UNVToMesh(const char* theFileName);
int MEDToMesh(const char* theFileName, const char* theMeshName);
-
+
std::string STLToMesh(const char* theFileName);
int CGNSToMesh(const char* theFileName, const int theMeshIndex, std::string& theMeshName);
-
+
SMESH_ComputeErrorPtr GMFToMesh(const char* theFileName,
bool theMakeRequiredGroups = true );
SMESH_Hypothesis::Hypothesis_Status
AddHypothesis(const TopoDS_Shape & aSubShape, int anHypId, std::string* error=0);
-
+
SMESH_Hypothesis::Hypothesis_Status
RemoveHypothesis(const TopoDS_Shape & aSubShape, int anHypId);
-
+
const std::list <const SMESHDS_Hypothesis * >&
GetHypothesisList(const TopoDS_Shape & aSubShape) const;
const SMESH_HypoFilter& aFilter,
const bool andAncestors,
TopoDS_Shape* assignedTo=0) const;
-
+
int GetHypotheses(const TopoDS_Shape & aSubShape,
const SMESH_HypoFilter& aFilter,
std::list< const SMESHDS_Hypothesis * >& aHypList,
const SMESH_HypoFilter& aFilter,
const bool andAncestors,
TopoDS_Shape* assignedTo=0) const;
-
+
int GetHypotheses(const SMESH_subMesh * aSubMesh,
const SMESH_HypoFilter& aFilter,
std::list< const SMESHDS_Hypothesis * >& aHypList,
SMESH_Hypothesis * GetHypothesis(const int aHypID) const;
const std::list<SMESHDS_Command*> & GetLog();
-
+
void ClearLog();
-
+
int GetId() const { return _id; }
-
+
bool MeshExists( int meshId ) const;
-
+
SMESH_Mesh* FindMesh( int meshId ) const;
SMESHDS_Mesh * GetMeshDS() { return _meshDS; }
const SMESHDS_Mesh * GetMeshDS() const { return _meshDS; }
-
+
SMESH_Gen *GetGen() { return _gen; }
SMESH_subMesh *GetSubMesh(const TopoDS_Shape & aSubShape);
-
+
SMESH_subMesh *GetSubMeshContaining(const TopoDS_Shape & aSubShape) const;
-
+
SMESH_subMesh *GetSubMeshContaining(const int aShapeID) const;
/*!
* \brief Return submeshes of groups containing the given subshape
* \brief check if a hypothesis allowing notconform mesh is present
*/
bool IsNotConformAllowed() const;
-
+
bool IsMainShape(const TopoDS_Shape& theShape) const;
TopoDS_Shape GetShapeByEntry(const std::string& entry) const;
bool withRequiredGroups = true );
double GetComputeProgress() const;
-
+
smIdType NbNodes() const;
smIdType Nb0DElements() const;
smIdType NbBalls() const;
-
+
smIdType NbEdges(SMDSAbs_ElementOrder order = ORDER_ANY) const;
-
+
smIdType NbFaces(SMDSAbs_ElementOrder order = ORDER_ANY) const;
smIdType NbTriangles(SMDSAbs_ElementOrder order = ORDER_ANY) const;
smIdType NbQuadrangles(SMDSAbs_ElementOrder order = ORDER_ANY) const;
smIdType NbBiQuadQuadrangles() const;
smIdType NbBiQuadTriangles() const;
smIdType NbPolygons(SMDSAbs_ElementOrder order = ORDER_ANY) const;
-
+
smIdType NbVolumes(SMDSAbs_ElementOrder order = ORDER_ANY) const;
smIdType NbTetras(SMDSAbs_ElementOrder order = ORDER_ANY) const;
smIdType NbHexas(SMDSAbs_ElementOrder order = ORDER_ANY) const;
smIdType NbBiQuadPrisms() const;
smIdType NbHexagonalPrisms() const;
smIdType NbPolyhedrons() const;
-
+
smIdType NbSubMesh() const;
-
+
size_t NbGroup() const { return _mapGroup.size(); }
int NbMeshes() const; // nb meshes in the Study
typedef boost::shared_ptr< SMDS_Iterator<SMESH_Group*> > GroupIteratorPtr;
GroupIteratorPtr GetGroups() const;
-
+
std::list<int> GetGroupIds() const;
-
+
SMESH_Group* GetGroup (const int theGroupID) const;
bool RemoveGroup (const int theGroupID);
const SMESH_subMesh* smAfter ) const;
std::ostream& Dump(std::ostream & save);
-
+
+ void Lock() {_my_lock.lock();};
+ void Unlock() {_my_lock.unlock();};
+
private:
void exportMEDCommmon(DriverMED_W_SMESHDS_Mesh& myWriter,
void fillAncestorsMap(const TopoDS_Shape& theShape);
void getAncestorsSubMeshes(const TopoDS_Shape& theSubShape,
std::vector< SMESH_subMesh* >& theSubMeshes) const;
-
+
protected:
int _id; // id given by creator (unique within the creator instance)
int _groupId; // id generator for group objects
class SubMeshHolder;
SubMeshHolder* _subMeshHolder;
-
+
bool _isAutoColor;
bool _isModified; //!< modified since last total re-compute, issue 0020693
double _shapeDiagonal; //!< diagonal size of bounding box of shape to mesh
-
+
TopTools_IndexedDataMapOfShapeListOfShape _mapAncestors;
mutable std::vector<SMESH_subMesh*> _ancestorSubMeshes; // to speed up GetHypothes[ei]s()
// 2) to forget not loaded mesh data at hyp modification
TCallUp* _callUp;
+ // Mutex for multhitreading write in SMESH_Mesh
+ std::mutex _my_lock;
+
protected:
SMESH_Mesh();
SMESH_Mesh(const SMESH_Mesh&) {};
* \param [in] event - what happens
* \param [in] anHyp - a hypothesis
* \return SMESH_Hypothesis::Hypothesis_Status - a treatment result.
- *
+ *
* Optional description of a problematic situation (if any) can be retrieved
* via GetComputeError().
*/
// detect algorithm hiding
//
- if ( ret == SMESH_Hypothesis::HYP_OK &&
- ( event == ADD_ALGO || event == ADD_FATHER_ALGO ) && algo &&
+ if ( ret == SMESH_Hypothesis::HYP_OK &&
+ ( event == ADD_ALGO || event == ADD_FATHER_ALGO ) && algo &&
algo->GetName() == anHyp->GetName() )
{
// is algo hidden?
else if (( event == COMPUTE || event == COMPUTE_SUBMESH )
&& !_alwaysComputed )
{
+ _father->Lock();
const TopoDS_Vertex & V = TopoDS::Vertex( _subShape );
gp_Pnt P = BRep_Tool::Pnt(V);
if ( SMDS_MeshNode * n = _father->GetMeshDS()->AddNode(P.X(), P.Y(), P.Z()) ) {
_father->GetMeshDS()->SetNodeOnVertex(n,_Id);
_computeState = COMPUTE_OK;
}
+ _father->Unlock();
}
if ( event == MODIF_ALGO_STATE )
cleanDependants();
case COMPUTE:
case COMPUTE_SUBMESH:
{
+ _father->Lock();
algo = GetAlgo();
ASSERT(algo);
ret = algo->CheckHypothesis((*_father), _subShape, hyp_status);
break; // goto exit
}
}
+ _father->Unlock();
// Compute
// to restore cout that may be redirected by algo
}
else
{
+ std::cout<<"Running compute for " << _father << " of shape type " << shape.ShapeType() << std::endl;
ret = algo->Compute((*_father), shape);
}
// algo can set _computeError of submesh
updateDependantsState( SUBMESH_COMPUTED );
}
// let algo clear its data gathered while algo->Compute()
+ _father->Lock();
algo->CheckHypothesis((*_father), _subShape, hyp_status);
+ _father->Unlock();
}
break;
case COMPUTE_CANCELED: // nothing to do
break;
}
- notifyListenersOnEvent( event, COMPUTE_EVENT );
+ //notifyListenersOnEvent( event, COMPUTE_EVENT );
return ret;
}
//=======================================================================
//function : cleanDependants
-//purpose :
+//purpose :
//=======================================================================
void SMESH_subMesh::cleanDependants()
//=======================================================================
//function : removeSubMeshElementsAndNodes
-//purpose :
+//purpose :
//=======================================================================
void SMESH_subMesh::removeSubMeshElementsAndNodes()
* \param listener - the listener to store
* \param data - the listener data to store
* \param where - the submesh to store the listener and it's data
- *
+ *
* It remembers the submesh where it puts the listener in order to delete
* them when HYP_OK algo_state is lost
* After being set, event listener is notified on each event of where submesh.
* \brief Sets an event listener and its data to a submesh
* \param listener - the listener to store
* \param data - the listener data to store
- *
+ *
* After being set, event listener is notified on each event of a submesh.
*/
//================================================================================
* \param subMesh - the submesh where the event occurs
* \param data - listener data stored in the subMesh
* \param hyp - hypothesis, if eventType is algo_event
- *
+ *
* The base implementation translates CLEAN event to the subMesh
* stored in listener data. Also it sends SUBMESH_COMPUTED event in case of
* successful COMPUTE event.
--- /dev/null
+
+/*********************************************************
+ *
+ * Copyright (C) 2014 by Vitaliy Vitsentiy
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *********************************************************/
+
+
+#ifndef __ctpl_thread_pool_H__
+#define __ctpl_thread_pool_H__
+
+#include <functional>
+#include <thread>
+#include <atomic>
+#include <vector>
+#include <memory>
+#include <exception>
+#include <future>
+#include <mutex>
+#include <boost/lockfree/queue.hpp>
+
+
+#ifndef _ctplThreadPoolLength_
+#define _ctplThreadPoolLength_ 100
+#endif
+
+
+// thread pool to run user's functors with signature
+// ret func(int id, other_params)
+// where id is the index of the thread that runs the functor
+// ret is some return type
+
+
+namespace ctpl {
+
+ class thread_pool {
+
+ public:
+
+ thread_pool() : q(_ctplThreadPoolLength_) { this->init(); }
+ thread_pool(int nThreads, int queueSize = _ctplThreadPoolLength_) : q(queueSize) { this->init(); this->resize(nThreads); }
+
+ // the destructor waits for all the functions in the queue to be finished
+ ~thread_pool() {
+ this->stop(true);
+ }
+
+ // get the number of running threads in the pool
+ int size() { return static_cast<int>(this->threads.size()); }
+
+ // number of idle threads
+ int n_idle() { return this->nWaiting; }
+ std::thread & get_thread(int i) { return *this->threads[i]; }
+
+ // change the number of threads in the pool
+ // should be called from one thread, otherwise be careful to not interleave, also with this->stop()
+ // nThreads must be >= 0
+ void resize(int nThreads) {
+ if (!this->isStop && !this->isDone) {
+ int oldNThreads = static_cast<int>(this->threads.size());
+ if (oldNThreads <= nThreads) { // if the number of threads is increased
+ this->threads.resize(nThreads);
+ this->flags.resize(nThreads);
+
+ for (int i = oldNThreads; i < nThreads; ++i) {
+ this->flags[i] = std::make_shared<std::atomic<bool>>(false);
+ this->set_thread(i);
+ }
+ }
+ else { // the number of threads is decreased
+ for (int i = oldNThreads - 1; i >= nThreads; --i) {
+ *this->flags[i] = true; // this thread will finish
+ this->threads[i]->detach();
+ }
+ {
+ // stop the detached threads that were waiting
+ std::unique_lock<std::mutex> lock(this->mutex);
+ this->cv.notify_all();
+ }
+ this->threads.resize(nThreads); // safe to delete because the threads are detached
+ this->flags.resize(nThreads); // safe to delete because the threads have copies of shared_ptr of the flags, not originals
+ }
+ }
+ }
+
+ // empty the queue
+ void clear_queue() {
+ std::function<void(int id)> * _f;
+ while (this->q.pop(_f))
+ delete _f; // empty the queue
+ }
+
+ // pops a functional wraper to the original function
+ std::function<void(int)> pop() {
+ std::function<void(int id)> * _f = nullptr;
+ this->q.pop(_f);
+ std::unique_ptr<std::function<void(int id)>> func(_f); // at return, delete the function even if an exception occurred
+
+ std::function<void(int)> f;
+ if (_f)
+ f = *_f;
+ return f;
+ }
+
+
+ // wait for all computing threads to finish and stop all threads
+ // may be called asyncronously to not pause the calling thread while waiting
+ // if isWait == true, all the functions in the queue are run, otherwise the queue is cleared without running the functions
+ void stop(bool isWait = false) {
+ if (!isWait) {
+ if (this->isStop)
+ return;
+ this->isStop = true;
+ for (int i = 0, n = this->size(); i < n; ++i) {
+ *this->flags[i] = true; // command the threads to stop
+ }
+ this->clear_queue(); // empty the queue
+ }
+ else {
+ if (this->isDone || this->isStop)
+ return;
+ this->isDone = true; // give the waiting threads a command to finish
+ }
+ {
+ std::unique_lock<std::mutex> lock(this->mutex);
+ this->cv.notify_all(); // stop all waiting threads
+ }
+ for (int i = 0; i < static_cast<int>(this->threads.size()); ++i) { // wait for the computing threads to finish
+ if (this->threads[i]->joinable())
+ this->threads[i]->join();
+ }
+ // if there were no threads in the pool but some functors in the queue, the functors are not deleted by the threads
+ // therefore delete them here
+ this->clear_queue();
+ this->threads.clear();
+ this->flags.clear();
+ }
+
+ template<typename F, typename... Rest>
+ auto push(F && f, Rest&&... rest) ->std::future<decltype(f(0, rest...))> {
+ auto pck = std::make_shared<std::packaged_task<decltype(f(0, rest...))(int)>>(
+ std::bind(std::forward<F>(f), std::placeholders::_1, std::forward<Rest>(rest)...)
+ );
+
+ auto _f = new std::function<void(int id)>([pck](int id) {
+ (*pck)(id);
+ });
+ this->q.push(_f);
+
+ std::unique_lock<std::mutex> lock(this->mutex);
+ this->cv.notify_one();
+
+ return pck->get_future();
+ }
+
+ // run the user's function that excepts argument int - id of the running thread. returned value is templatized
+ // operator returns std::future, where the user can get the result and rethrow the catched exceptins
+ template<typename F>
+ auto push(F && f) ->std::future<decltype(f(0))> {
+ auto pck = std::make_shared<std::packaged_task<decltype(f(0))(int)>>(std::forward<F>(f));
+
+ auto _f = new std::function<void(int id)>([pck](int id) {
+ (*pck)(id);
+ });
+ this->q.push(_f);
+
+ std::unique_lock<std::mutex> lock(this->mutex);
+ this->cv.notify_one();
+
+ return pck->get_future();
+ }
+
+
+ private:
+
+ // deleted
+ thread_pool(const thread_pool &);// = delete;
+ thread_pool(thread_pool &&);// = delete;
+ thread_pool & operator=(const thread_pool &);// = delete;
+ thread_pool & operator=(thread_pool &&);// = delete;
+
+ void set_thread(int i) {
+ std::shared_ptr<std::atomic<bool>> flag(this->flags[i]); // a copy of the shared ptr to the flag
+ auto f = [this, i, flag/* a copy of the shared ptr to the flag */]() {
+ std::atomic<bool> & _flag = *flag;
+ std::function<void(int id)> * _f;
+ bool isPop = this->q.pop(_f);
+ while (true) {
+ while (isPop) { // if there is anything in the queue
+ std::unique_ptr<std::function<void(int id)>> func(_f); // at return, delete the function even if an exception occurred
+ (*_f)(i);
+
+ if (_flag)
+ return; // the thread is wanted to stop, return even if the queue is not empty yet
+ else
+ isPop = this->q.pop(_f);
+ }
+
+ // the queue is empty here, wait for the next command
+ std::unique_lock<std::mutex> lock(this->mutex);
+ ++this->nWaiting;
+ this->cv.wait(lock, [this, &_f, &isPop, &_flag](){ isPop = this->q.pop(_f); return isPop || this->isDone || _flag; });
+ --this->nWaiting;
+
+ if (!isPop)
+ return; // if the queue is empty and this->isDone == true or *flag then return
+ }
+ };
+ this->threads[i].reset(new std::thread(f)); // compiler may not support std::make_unique()
+ }
+
+ void init() { this->nWaiting = 0; this->isStop = false; this->isDone = false; }
+
+ std::vector<std::unique_ptr<std::thread>> threads;
+ std::vector<std::shared_ptr<std::atomic<bool>>> flags;
+ mutable boost::lockfree::queue<std::function<void(int id)> *> q;
+ std::atomic<bool> isDone;
+ std::atomic<bool> isStop;
+ std::atomic<int> nWaiting; // how many threads are waiting
+
+ std::mutex mutex;
+ std::condition_variable cv;
+ };
+
+}
+
+#endif // __ctpl_thread_pool_H__
+
//=============================================================================
/*!
- *
+ *
*/
//=============================================================================
bool StdMeshers_Regular_1D::computeInternalParameters(SMESH_Mesh & theMesh,
//=============================================================================
/*!
- *
+ *
*/
//=============================================================================
SMESHDS_Mesh * meshDS = theMesh.GetMeshDS();
+ theMesh.Lock();
const TopoDS_Edge & EE = TopoDS::Edge(theShape);
TopoDS_Edge E = TopoDS::Edge(EE.Oriented(TopAbs_FORWARD));
int shapeID = meshDS->ShapeToIndex( E );
ASSERT(!VLast.IsNull());
const SMDS_MeshNode * nFirst = SMESH_Algo::VertexNode( VFirst, meshDS );
const SMDS_MeshNode * nLast = SMESH_Algo::VertexNode( VLast, meshDS );
- if ( !nFirst || !nLast )
+ if ( !nFirst || !nLast ){
+ theMesh.Unlock();
return error( COMPERR_BAD_INPUT_MESH, "No node on vertex");
-
+ }
// remove elements created by e.g. pattern mapping (PAL21999)
// CLEAN event is incorrectly ptopagated seemingly due to Propagation hyp
// so TEMPORARY solution is to clean the submesh manually
BRepAdaptor_Curve C3d( E );
if ( ! computeInternalParameters( theMesh, C3d, length, f, l, params, reversed, true )) {
+ theMesh.Unlock();
return false;
}
redistributeNearVertices( theMesh, C3d, length, params, VFirst, VLast );
meshDS->SetMeshElementOnShape(edge, shapeID);
}
}
+ theMesh.Unlock();
return true;
}
//=============================================================================
/*!
- *
+ *
*/
//=============================================================================