bool operator==(const T *other) const { return _ptr==other; }
MCAuto &operator=(const MCAuto& other) { if(_ptr!=other._ptr) { destroyPtr(); referPtr(other._ptr); } return *this; }
MCAuto &operator=(T *ptr) { if(_ptr!=ptr) { destroyPtr(); _ptr=ptr; } return *this; }
- void takeRef(T *ptr) { if(_ptr!=ptr) { destroyPtr(); _ptr=ptr; if(_ptr) _ptr->incrRef(); } }
+ void takeRef(T *ptr) { if(_ptr!=ptr) { destroyPtr(); referPtr(ptr); } }
T *operator->() { return _ptr ; }
const T *operator->() const { return _ptr; }
T& operator*() { return *_ptr; }
void rearrange(std::size_t newNbOfCompo);
void transpose();
void pushBackSilent(T val);
- void pushBackValsSilent(const T *valsBg, const T *valsEnd);
+ template<class InputIterator>
+ void pushBackValsSilent(InputIterator valsBg, InputIterator valsEnd);
T popBackSilent();
T front() const;
T back() const;
void sortToHaveConsecutivePairs();
MCAuto<DataArrayType> fromLinkedListOfPairToList() const;
DataArrayType *getDifferentValues() const;
+ MCAuto<DataArrayType> forThisAsPartitionBuildReduction(const MCAuto<DataArrayIdType>& commonEntities, const MCAuto<DataArrayIdType>& commonEntitiesIndex,
+ MCAuto<DataArrayType>& partitionsToBeModified, MCAuto<DataArrayIdType>& partitionsToBeModifiedIndex) const;
std::vector<DataArrayIdType *> partitionByDifferentValues(std::vector<T>& differentIds) const;
std::vector< std::pair<mcIdType,mcIdType> > splitInBalancedSlices(mcIdType nbOfSlices) const;
static DataArrayType *Modulus(const DataArrayType *a1, const DataArrayType *a2);
void modulusEqual(const DataArrayType *other);
static DataArrayType *Pow(const DataArrayType *a1, const DataArrayType *a2);
void powEqual(const DataArrayType *other);
- //MemArray<T>& accessToMemArray() { return _mem; }
- //const MemArray<T>& accessToMemArray() const { return _mem; }
public:
static DataArrayIdType *FindPermutationFromFirstToSecond(const DataArrayType *ids1, const DataArrayType *ids2);
static DataArrayIdType *FindPermutationFromFirstToSecondDuplicate(const DataArrayType *ids1, const DataArrayType *ids2);
else
throw INTERP_KERNEL::Exception("DataArrayDouble::insertAtTheEnd : not available for DataArrayDouble with number of components different than 1 !");
}
+
+ /*!
+ * This method adds at the end of \a this a series of values [\c valsBg,\c valsEnd). This method do \b not update its time label to avoid useless incrementation
+ * of counter. So the caller is expected to call TimeLabel::declareAsNew on \a this at the end of the push session.
+ *
+ * \param [in] valsBg - an array of values to push at the end of \c this.
+ * \param [in] valsEnd - specifies the end of the array \a valsBg, so that
+ * the last value of \a valsBg is \a valsEnd[ -1 ].
+ * \throw If \a this has already been allocated with number of components different from one.
+ * \sa DataArrayDouble::pushBackSilent
+ */
+ template<class T>
+ template<class InputIterator>
+ void DataArrayTemplate<T>::pushBackValsSilent(InputIterator valsBg, InputIterator valsEnd)
+ {
+ std::size_t nbCompo(getNumberOfComponents());
+ if(nbCompo==1)
+ _mem.insertAtTheEnd(valsBg,valsEnd);
+ else if(nbCompo==0)
+ {
+ _info_on_compo.resize(1);
+ _mem.insertAtTheEnd(valsBg,valsEnd);
+ }
+ else
+ {
+ std::ostringstream oss; oss << Traits<T>::ArrayTypeName << "::pushBackValsSilent : not available for DataArrayDouble with number of components different than 1 !";
+ throw INTERP_KERNEL::Exception(oss.str().c_str());
+ }
+ }
}
#include "MCAuto.hxx"
#include "MEDCouplingMap.txx"
+#include <set>
#include <sstream>
#include <cstdlib>
#include <numeric>
#include <algorithm>
+#include <iterator>
namespace MEDCoupling
{
}
}
- /*!
- * This method adds at the end of \a this a series of values [\c valsBg,\c valsEnd). This method do \b not update its time label to avoid useless incrementation
- * of counter. So the caller is expected to call TimeLabel::declareAsNew on \a this at the end of the push session.
- *
- * \param [in] valsBg - an array of values to push at the end of \c this.
- * \param [in] valsEnd - specifies the end of the array \a valsBg, so that
- * the last value of \a valsBg is \a valsEnd[ -1 ].
- * \throw If \a this has already been allocated with number of components different from one.
- * \sa DataArrayDouble::pushBackSilent
- */
- template<class T>
- void DataArrayTemplate<T>::pushBackValsSilent(const T *valsBg, const T *valsEnd)
- {
- std::size_t nbCompo(getNumberOfComponents());
- if(nbCompo==1)
- _mem.insertAtTheEnd(valsBg,valsEnd);
- else if(nbCompo==0)
- {
- _info_on_compo.resize(1);
- _mem.insertAtTheEnd(valsBg,valsEnd);
- }
- else
- {
- std::ostringstream oss; oss << Traits<T>::ArrayTypeName << "::pushBackValsSilent : not available for DataArrayDouble with number of components different than 1 !";
- throw INTERP_KERNEL::Exception(oss.str().c_str());
- }
- }
-
/*!
* This method returns silently ( without updating time label in \a this ) the last value, if any and suppress it.
* \throw If \a this is already empty.
* one component.
* \return double - the maximal value among all values of \a this array.
* \throw If \a this is not allocated.
+ * If \a this is empty.
* \sa getMaxAbsValueInArray, getMinValueInArray
*/
template<class T>
T DataArrayTemplate<T>::getMaxValueInArray() const
{
checkAllocated();
+ if( empty() )
+ THROW_IK_EXCEPTION("getMaxValueInArray : this is empty !");
const T *loc(std::max_element(begin(),end()));
return *loc;
}
* The caller is to delete this array using decrRef() as it is no more needed.
* \throw If \a this is not allocated.
* \throw If \a this->getNumberOfComponents() != 1.
- * \throw If \a this->getNumberOfTuples() < 2.
+ * \throw If \a this->getNumberOfTuples() < 1.
*
* \b Example: <br>
* - this contains [1,3,6,7,7,9,15]
if(this->getNumberOfComponents()!=1)
throw INTERP_KERNEL::Exception("DataArrayInt::deltaShiftIndex : only single component allowed !");
std::size_t nbOfElements=this->getNumberOfTuples();
- if(nbOfElements<2)
+ if(nbOfElements<1)
throw INTERP_KERNEL::Exception("DataArrayInt::deltaShiftIndex : 2 tuples at least must be present in 'this' !");
const T *ptr=this->getConstPointer();
DataArrayType *ret=DataArrayType::New();
return ret2.retn();
}
+ template<class T>
+ class PartitionCfg : public std::set<T>
+ {
+ public:
+ PartitionCfg() = default;
+ void setID(T id) { _id = id; }
+ T getID() const { return _id; }
+ bool operator<(const PartitionCfg<T>& other) { return std::set<T>::operator<( other._data ); }
+ bool operator>(const PartitionCfg<T>& other) { return std::set<T>::operator>( other._data ); }
+ bool operator==(const PartitionCfg<T>& other) { return std::set<T>::operator==( other._data ); }
+ private:
+ T _id = 0;
+ };
+
+ /*!
+ * \a this is considered as an array defining a partition. It means that values in \a this define partition-ids. All tuples in \a this with same value can be considered as partition.
+ * Typically MED stores families uses this compact storage method.
+ *
+ * This method computes and returns the partition stored in \a this on reduced space using a reduction operation specified by pairs \a commonEntities and \a commonEntitiesIndex parameters.
+ * The reduction operation is the consequence of a fusion of enties. It explains the storage format defining reduction.
+ *
+ * An another way to consider this method : This method can be seen as an interpolation on integer field (\a this). For fused entities it returns in compact way all combinations of partition configuration.
+ *
+ * \param [out] partitionsToBeModified - For all partitions impacted by the reduction a n-uplet is returned (n is deduced thanks to \a partitionsToBeModifiedIndex)
+ * Each n-uplet represents a list of partitions impacted by reduction. The first element of n-uplet represents the ID to locate entities (using for example findIdsEqual in returned array)
+ * Remaining IDs in n-uplet are Partition IDs impacted.
+ * \param [out] partitionsToBeModifiedIndex - Index attached to \a partitionsToBeModified to interprete.
+ *
+ * \return - The partition array that is the output of the reduction of \a this.
+ *
+ * \sa MEDCouplingUMesh::findCommonCells, DataArrayDouble::findCommonTuples
+ */
+ template <class T>
+ MCAuto< typename DataArrayDiscrete<T>::DataArrayType > DataArrayDiscrete<T>::forThisAsPartitionBuildReduction(const MCAuto<DataArrayIdType>& commonEntities, const MCAuto<DataArrayIdType>& commonEntitiesIndex, MCAuto<DataArrayType>& partitionsToBeModified, MCAuto<DataArrayIdType>& partitionsToBeModifiedIndex) const
+ {
+ constexpr char MSG[] = "this should have only one component";
+ this->checkAllocated(); this->checkNbOfComps(1,MSG);
+ commonEntities->checkAllocated(); commonEntities->checkNbOfComps(1,MSG);
+ commonEntitiesIndex->checkAllocated(); commonEntitiesIndex->checkNbOfComps(1,MSG);
+ std::size_t initialSpaceSz( this->getNumberOfTuples() );
+ mcIdType returnedSpaceSz( 0 );// store size of reducted returned size
+ //
+ MCAuto<DataArrayIdType> sizeOfPacks( commonEntitiesIndex->deltaShiftIndex() );
+ //
+ MCAuto<DataArrayIdType> o2n( DataArrayIdType::ConvertIndexArrayToO2N(initialSpaceSz,commonEntities->begin(),commonEntitiesIndex->begin(),commonEntitiesIndex->end(),returnedSpaceSz) );
+ MCAuto< DataArrayType > ret( DataArrayDiscrete<T>::New() );
+ ret->alloc( returnedSpaceSz, 1 );
+ ret->fillWithValue( std::numeric_limits<T>::max() );
+ // First deal with entities not fused.
+ MCAuto<DataArrayIdType> eltsNotFused( commonEntities->copySorted() );
+ eltsNotFused = eltsNotFused->buildComplement( initialSpaceSz );
+ MCAuto<DataArrayType> partionIdsOfNotFused = this->mySelectByTupleIdSafe(eltsNotFused->begin(),eltsNotFused->end());
+ MCAuto<DataArrayIdType> tupleIdsToSelect = o2n->selectByTupleIdSafe(eltsNotFused->begin(),eltsNotFused->end());
+ ret->setPartOfValues3(partionIdsOfNotFused,tupleIdsToSelect->begin(),tupleIdsToSelect->end(),0,1,1);
+ T *retPt( ret->getPointer() );
+ const mcIdType *o2nPt( o2n->begin() );
+ //
+ partitionsToBeModified = DataArrayType::New(); partitionsToBeModified->alloc(0,1);
+ partitionsToBeModifiedIndex = DataArrayIdType::New(); partitionsToBeModifiedIndex->alloc(1,1); partitionsToBeModifiedIndex->setIJSilent(0,0,0);
+ if( !sizeOfPacks->empty() )// if empty -> no fusion -> ret is already ready at this point -> nothing to do.
+ {// ready to work
+ mcIdType maxSizeOfPacks = sizeOfPacks->getMaxValueInArray();// store the max size of common entities
+ T newValueInThis = this->getMaxValueInArray() + 1;
+ const T *ptOfThisData( this->begin() );
+ const mcIdType *ceBg( commonEntities->begin() ), *ceiBg( commonEntitiesIndex->begin() );
+ for(mcIdType szOfPack = 2 ; szOfPack <= maxSizeOfPacks ; ++szOfPack)
+ {
+ MCAuto<DataArrayIdType> idsInThisWithSamePackSz = sizeOfPacks->findIdsEqual( FromIdType<T>( szOfPack ) );
+ std::set< PartitionCfg<T> > partitionCfgHolder;
+ for( const mcIdType *idsInThisWithSamePackSzIt = idsInThisWithSamePackSz->begin() ; idsInThisWithSamePackSzIt != idsInThisWithSamePackSz->end() ; ++idsInThisWithSamePackSzIt )
+ {
+ PartitionCfg<T> partitionCfg;
+ std::transform(ceBg + ceiBg[*idsInThisWithSamePackSzIt],ceBg + ceiBg[*idsInThisWithSamePackSzIt + 1], std::inserter(partitionCfg,partitionCfg.end()),[ptOfThisData](mcIdType elt) { return ptOfThisData[elt]; });
+ auto existCfg = partitionCfgHolder.find( partitionCfg );
+ if( existCfg != partitionCfgHolder.end() )
+ {//partition already exist by a previous pack -> reuse it !
+ T newPartitionID = existCfg->getID();
+ retPt[ o2nPt[ ceBg [ ceiBg[*idsInThisWithSamePackSzIt] ] ] ] = newPartitionID;// hypothesis that o2n is so that all o2n[ceBg + ceiBg[*idsInThisWithSamePackSzIt],ceBg + ceiBg[*idsInThisWithSamePackSzIt + 1]) points to the same point in new renumbering
+ }
+ else
+ {//partition does not exist yet -> create it !
+ partitionCfg.setID( newValueInThis++ );
+ partitionCfgHolder.insert( partitionCfg );
+ retPt[ o2nPt[ ceBg [ ceiBg[*idsInThisWithSamePackSzIt] ] ] ] = partitionCfg.getID();
+ partitionsToBeModified->pushBackSilent( partitionCfg.getID() );
+ partitionsToBeModified->pushBackValsSilent(partitionCfg.begin(),partitionCfg.end());
+ partitionsToBeModifiedIndex->pushBackSilent( partitionsToBeModifiedIndex->back() + partitionCfg.size() + 1 );
+ }
+ }
+ }
+ }
+ return ret;
+ }
+
/*!
* This method is a refinement of DataArrayInt::getDifferentValues because it returns not only different values in \a this but also, for each of
* them it tells which tuple id have this id.
PyTuple_SetItem(pyRet,1,SWIG_NewPointerObj(SWIG_as_voidptr(ret1),SWIGTITraits<INT>::TI, SWIG_POINTER_OWN | 0 ));
return pyRet;
}
+
+ PyObject *forThisAsPartitionBuildReduction(DataArrayIdType *commonEntities, DataArrayIdType *commonEntitiesIndex) const
+ {
+ MCAuto<DataArrayIdType> ceCpp( MCAuto<DataArrayIdType>::TakeRef(commonEntities) );
+ MCAuto<DataArrayIdType> ceiCpp( MCAuto<DataArrayIdType>::TakeRef(commonEntitiesIndex) );
+ MCAuto<ARRAY> ret1;
+ MCAuto<DataArrayIdType> ret2;
+ MCAuto<ARRAY> ret0 = self->forThisAsPartitionBuildReduction(ceCpp,ceiCpp,ret1,ret2);
+ PyObject *pyRet( PyTuple_New(3) );
+ PyTuple_SetItem(pyRet,0,SWIG_NewPointerObj(SWIG_as_voidptr(ret0.retn()),SWIGTITraits<INT>::TI, SWIG_POINTER_OWN | 0 ));
+ PyTuple_SetItem(pyRet,1,SWIG_NewPointerObj(SWIG_as_voidptr(ret1.retn()),SWIGTITraits<INT>::TI, SWIG_POINTER_OWN | 0 ));
+ PyTuple_SetItem(pyRet,2,SWIG_NewPointerObj(SWIG_as_voidptr(ret2.retn()),SWIGTITraits<mcIdType>::TI, SWIG_POINTER_OWN | 0 ));
+ return pyRet;
+ }
PyObject *isRange() const
{
toTest = c-a
self.assertTrue( toTest < 10*ref )
-
-
+ def testFuseOfFamilyField0(self):
+ """
+ EDF30179 : Core algo for family field linked to fusion of entities
+ """
+ d = DataArrayInt([2,2,2,2,2,3,3,3,3,1,1,1,1,1,1]) # 5 x 2 , 4 x 3, 6 x 1
+
+ c = DataArrayInt([]) ; ci = DataArrayInt([0])
+ #### Case 0 : simplest
+ assert( ci.deltaShiftIndex().empty() ) # EDF30179 : extension of deltaShiftIndex to single elt
+ a,b,f = d.forThisAsPartitionBuildReduction(c,ci)
+ assert( a.isEqual( d ) )
+ assert( b.empty() )
+ assert( f.isEqual(DataArrayInt([0])) )
+ #### Case 1 : single fusion
+ c = DataArrayInt([3,6]) ; ci = DataArrayInt([0,2])
+ a,b,f = d.forThisAsPartitionBuildReduction(c,ci)
+ assert( a.isEqual( DataArrayInt([2,2,2,4,2,3,3,3,1,1,1,1,1,1]) ) )
+ assert( b.isEqual( DataArrayInt([4,2,3]) ) )
+ assert( f.isEqual( DataArrayInt([0,3]) ) )
+ #### Case 2 : single fusion - same partition id
+ c = DataArrayInt([6,7]) ; ci = DataArrayInt([0,2])
+ a,b,f = d.forThisAsPartitionBuildReduction(c,ci)
+ assert( a.isEqual( DataArrayInt([2,2,2,2,2,3,4,3,1,1,1,1,1,1]) ) )
+ assert( b.isEqual( DataArrayInt([4,3]) ) )
+ assert( f.isEqual( DataArrayInt([0,2]) ) )
+ #### Case 3 : multi fusion single tuple
+ c = DataArrayInt([2,7,3,6]) ; ci = DataArrayInt([0,2,4]) # elts (2,7) and (3,6) to merge. These 2 couples refers to partitionIDs (2,3)
+ a,b,f = d.forThisAsPartitionBuildReduction(c,ci)
+ assert( a.isEqual( DataArrayInt([2,2,4,4,2,3,3,1,1,1,1,1,1]) ) )
+ assert( b.isEqual( DataArrayInt([4,2,3]) ) ) # Fuse element can be located with ID 4
+ assert( f.isEqual( DataArrayInt([0,3]) ) )
+
+ #### Case 4 : multi fusion
+ c = DataArrayInt([2,7,3,10]) ; ci = DataArrayInt([0,2,4])
+ a,b,f = d.forThisAsPartitionBuildReduction(c,ci)
+ assert( a.isEqual( DataArrayInt([2,2,4,5,2,3,3,3,1,1,1,1,1]) ) )
+ assert( b.isEqual( DataArrayInt([4,2,3,5,1,2]) ) )
+ assert( f.isEqual( DataArrayInt([0,3,6]) ) )
if __name__ == '__main__':
unittest.main()
%pythoncode %{
import MEDLoaderFinalize
MEDFileUMesh.reduceToCells = MEDLoaderFinalize.MEDFileUMeshReduceToCells
+MEDFileUMesh.fuseNodesAndCells = MEDLoaderFinalize.MEDFileUMeshFuseNodesAndCells
del MEDLoaderFinalize
MEDFileMeshesIterator.__next__ = MEDFileMeshesIterator.next
MEDFileAnyTypeFieldMultiTSIterator.__next__ = MEDFileAnyTypeFieldMultiTSIterator.next
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
+import logging
+
+def MEDFileUMeshFuseNodesAndCells(self, compType = 2 , eps = 1e-6, logLev = logging.INFO):
+ """
+ [EDF30179] : Method fusing nodes in this, then fusing cells in this. Fusion is done following eps and compType.
+
+ :param compType : see MEDCouplingPointSet.zipConnectivityTraducer method for explanations
+ :param eps: see DataArrayDouble.findCommonTuples for explanations.
+ :param logLev: Integer specifying log level
+ :return: MEDFileUMesh instance containing the result of nodes and cells fusion
+ """
+ import MEDLoader as ml
+ def getLogger( level = logging.INFO ):
+ FORMAT = '%(levelname)s : %(asctime)s : [%(filename)s:%(funcName)s:%(lineno)s] : %(message)s'
+ logging.basicConfig( format = FORMAT, level = level )
+ return logging.getLogger()
+ logger = getLogger( logLev )
+ def updateMap( mm : ml.MEDFileUMesh, lev : int, famMap : ml.DataArrayInt, famMapI : ml.DataArrayInt):
+ """
+ mm instance to be updated
+ """
+ def famIdManager(lev, famId):
+ if lev<= 0:
+ return -famId
+ else:
+ return famId
+ nbOfPartSetToBeUpdated = len(famMapI) -1
+ for partSetId in range( nbOfPartSetToBeUpdated ):
+ newFamId = famIdManager( lev, famMap[ famMapI[partSetId] ] )
+ newFamName = f"Family_{newFamId}"
+ logger.debug(f"For level {lev} new family : {newFamId}")
+ mm.addFamily(newFamName,newFamId)
+ for famId in famMap[ famMapI[partSetId]+1 :famMapI[partSetId+1] ]:
+ grpsToBeUpdated = mm.getGroupsOnFamily( mm.getFamilyNameGivenId( famIdManager( lev, int(famId) ) ) )
+ for grpToBeUpdated in grpsToBeUpdated:
+ mm.addFamilyOnGrp( grpToBeUpdated, newFamName )
+ pass
+ getLogger( level = logLev )
+ initNbNodes = len( self.getCoords() )
+ logger.info(f"Begin merging nodes with eps = {eps}")
+ cc,cci = self.getCoords().findCommonTuples( eps )
+ logger.info(f"End of merging nodes with eps = {eps} : Nb of nodes groups to be merged : {len(cci)-1} / {self.getNumberOfNodes()}")
+ o2n,newNbNodes = ml.DataArrayInt.ConvertIndexArrayToO2N(initNbNodes,cc,cci)
+ n2oNodes = o2n.invertArrayO2N2N2O( newNbNodes )
+ newCoords = self.getCoords()[n2oNodes]
+ # creation of
+ mmOut = ml.MEDFileUMesh()
+ mmOut.copyFamGrpMapsFrom( self )
+
+ for lev in self.getNonEmptyLevels():
+ logger.debug(f"Begin level {lev}")
+ m1 = self[lev].deepCopy()
+ logger.debug(f"Begin renumbering connectivity of level {lev}")
+ m1.renumberNodesInConn( o2n )
+ logger.debug(f"End renumbering connectivity of level {lev}")
+ m1.setCoords( newCoords )
+ logger.info(f"Begin of finding of same cells of level {lev}")
+ cce,ccei = m1.findCommonCells(compType,0)
+ logger.info(f"End of finding of same cells of level {lev} : Nb of cells groups to be merged : {len(ccei)-1} / {m1.getNumberOfCells()}")
+ famsCell = self.getFamilyFieldAtLevel(lev)
+ if famsCell:
+ famsCell = -famsCell
+ famsMergedCell,famMap,famMapI = famsCell.forThisAsPartitionBuildReduction(cce,ccei) # <- method updating family field array
+ updateMap(mmOut,lev,famMap,famMapI)
+ famsMergedCell = -famsMergedCell
+ o2nCells,newNbCells = ml.DataArrayInt.ConvertIndexArrayToO2N(m1.getNumberOfCells(),cce,ccei)
+ n2oCells = o2nCells.invertArrayO2N2N2O( newNbCells )
+ m1 = m1[ n2oCells ]
+ m1.setCoords( newCoords )
+ m1.setName( self.getName() )
+ mmOut[lev] = m1
+ if famsCell:
+ mmOut.setFamilyFieldArr( lev, famsMergedCell )
+
+ famsNode = self.getFamilyFieldAtLevel(1)
+ if famsNode:
+ famsMergedNode,famMap,famMapI = famsNode.forThisAsPartitionBuildReduction(cc,cci)
+ updateMap(mmOut,1,famMap,famMapI)
+ mmOut.setFamilyFieldArr(1, famsMergedNode)
+ return mmOut
+
def MEDFileUMeshReduceToCells(self, level, keepCells, removeOrphanNodes=True):
"""
Method returning a new MEDFileUMesh, restriction of self to level and keepCell cells at this level.
self.assertTrue(f1ts_read.getUndergroundDataArray().isAllocated())
self.assertTrue(not f1ts_read.getUndergroundDataArray().isAllocated())
+ def test49(self):
+ """
+ [EDF30179] : test of MEDFileUMesh.fuseNodesAndCells
+ """
+ import logging
+ mm = MEDFileUMesh()
+ arr = DataArrayDouble(4) ; arr.iota()
+ m = MEDCouplingCMesh() ; m.setCoords(arr,arr) ; m=m.buildUnstructured() ; m.changeSpaceDimension(3,0.)
+ m2 = m.deepCopy() ; m2.translate([3,0,0])
+ m = MEDCouplingUMesh.MergeUMeshes([m,m2]) # generate voluntary duplication of nodes and seg2
+ meshName = "mesh"
+ m.setName(meshName)
+ m1 = m.buildDescendingConnectivity()[0]
+ mm[0] = m
+ mm[-1] = m1
+ infoOnComponents = ["X [m]","YY [m]","ZZZ [m]"]
+ description = "description"
+ mm.getCoords().setInfoOnComponents( infoOnComponents )
+ mm.setDescription( description )
+ # groups on seg2
+ blackGrp = DataArrayInt([7,9,16,22,23,24,25,34,41,42]) ; blackGrp.setName("Black") # represent I at jonction
+ mm.addGroup(-1,blackGrp)
+ redGrp = DataArrayInt([8,14,15,16]) ; redGrp.setName("Red") # square of seg2 left side of junction
+ mm.addGroup(-1,redGrp)
+ greenGrp = DataArrayInt([26,34,35,36]) ; greenGrp.setName("Green")# square of seg2 right side of junction
+ mm.addGroup(-1,greenGrp)
+ # groups on nodes
+ blackGrp = DataArrayInt([2,3,7,11,14,15,16,17,20,24,28,29]) ; blackGrp.setName("BlackNode") # represent I at jonction
+ mm.addGroup(1,blackGrp)
+ redGrp = DataArrayInt([6,7,10,11,20,24]) ; redGrp.setName("RedNode") # square of seg2 left side of junction
+ mm.addGroup(1,redGrp)
+ greenGrp = DataArrayInt([20,21,24,25]) ; greenGrp.setName("GreenNode")# square of seg2 right side of junction
+ mm.addGroup(1,greenGrp)
+ ###
+ mmOut = mm.fuseNodesAndCells(compType = 2 , eps = 1e-6, logLev = logging.WARNING) # <<- hot point is here
+ ###
+ self.assertEqual( mmOut.getName() , meshName )
+ self.assertEqual( mmOut.getCoords().getInfoOnComponents() , infoOnComponents )
+ self.assertEqual( mmOut.getDescription() , description )
+ self.assertEqual( mmOut.getNumberOfNodes() , 28 )
+ self.assertEqual( mmOut.getNumberOfCellsAtLevel(0) , 18 )
+ self.assertEqual( mmOut.getNumberOfCellsAtLevel(-1) , 45 )
+ expCoo = DataArrayDouble( [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 2.0, 1.0, 0.0, 3.0, 1.0, 0.0, 0.0, 2.0, 0.0, 1.0, 2.0, 0.0, 2.0, 2.0, 0.0, 3.0, 2.0, 0.0, 0.0, 3.0, 0.0, 1.0, 3.0, 0.0, 2.0, 3.0, 0.0, 3.0, 3.0, 0.0, 4.0, 0.0, 0.0, 5.0, 0.0, 0.0, 6.0, 0.0, 0.0, 4.0, 1.0, 0.0, 5.0, 1.0, 0.0, 6.0, 1.0, 0.0, 4.0, 2.0, 0.0, 5.0, 2.0, 0.0, 6.0, 2.0, 0.0, 4.0, 3.0, 0.0, 5.0, 3.0, 0.0, 6.0, 3.0, 0.0], 28, 3)
+ self.assertTrue( mmOut.getCoords().isEqualWithoutConsideringStr( expCoo, 1e-12) )
+ refConn0 = DataArrayInt( [1, 0, 4, 5, 2, 1, 5, 6, 3, 2, 6, 7, 5, 4, 8, 9, 6, 5, 9, 10, 7, 6, 10, 11, 9, 8, 12, 13, 10, 9, 13, 14, 11, 10, 14, 15, 16, 3, 7, 19, 17, 16, 19, 20, 18, 17, 20, 21, 19, 7, 11, 22, 20, 19, 22, 23, 21, 20, 23, 24, 22, 11, 15, 25, 23, 22, 25, 26, 24, 23, 26, 27] )
+ self.assertTrue( MEDCoupling1SGTUMesh( mmOut[0] ).getNodalConnectivity().isEqualWithoutConsideringStr( refConn0 ) )
+ refConn1 = DataArrayInt( [1, 0, 0, 4, 4, 5, 5, 1, 2, 1, 5, 6, 6, 2, 3, 2, 6, 7, 3, 7, 4, 8, 8, 9, 9, 5, 9, 10, 10, 6, 10, 11, 7, 11, 8, 12, 12, 13, 13, 9, 13, 14, 14, 10, 14, 15, 11, 15, 16, 3, 7, 19, 19, 16, 17, 16, 19, 20, 20, 17, 18, 17, 20, 21, 21, 18, 11, 22, 22, 19, 22, 23, 23, 20, 23, 24, 24, 21, 15, 25, 25, 22, 25, 26, 26, 23, 26, 27, 27, 24] )
+ self.assertTrue( MEDCoupling1SGTUMesh( mmOut[-1] ).getNodalConnectivity().isEqualWithoutConsideringStr( refConn1 ) )
+ self.assertTrue( mmOut.getGroupArr(-1,"Black").isEqualWithoutConsideringStr( DataArrayInt([7, 9, 16, 22, 23, 24, 39]) ) )
+ self.assertTrue( mmOut.getGroupArr(-1,"Green").isEqualWithoutConsideringStr( DataArrayInt([16, 25, 33, 34]) ) )
+ self.assertTrue( mmOut.getGroupArr(-1,"Red").isEqualWithoutConsideringStr( DataArrayInt([8, 14, 15, 16]) ) )
+ self.assertTrue( mmOut.getGroupArr(1,"BlackNode").isEqualWithoutConsideringStr( DataArrayInt([2, 3, 7, 11, 14, 15, 16, 25]) ) )
+ self.assertTrue( mmOut.getGroupArr(1,"RedNode").isEqualWithoutConsideringStr( DataArrayInt([6, 7, 10, 11]) ) )
+ self.assertTrue( mmOut.getGroupArr(1,"GreenNode").isEqualWithoutConsideringStr( DataArrayInt([7, 11, 19, 22]) ) )
+ self.assertEqual( len( mmOut.getFamilyFieldAtLevel(1).findIdsGreaterOrEqualTo(0) ) , 28 )
+ self.assertEqual( len( mmOut.getFamilyFieldAtLevel(-1).findIdsLowerOrEqualTo(0) ) , 45 )
+
pass
if __name__ == "__main__":