Salome HOME
Merge branch 'occ/24009'
[tools/medcoupling.git] / src / ParaMEDMEM / ParaUMesh.cxx
1 //
2 // Copyright (C) 2020-2021  CEA/DEN, EDF R&D
3 //
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 //
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12 // Lesser General Public License for more details.
13 //
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
17 //
18 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
19 //
20 // Author : Anthony Geay (EDF R&D)
21
22 #include "ParaUMesh.hxx"
23 #include "ProcessorGroup.hxx"
24 #include "MPIProcessorGroup.hxx"
25 #include "Topology.hxx"
26 #include "BlockTopology.hxx"
27 #include "CommInterface.hxx"
28 #include "MEDCouplingMemArray.hxx"
29
30 #include "mpi.h"
31
32 #include <fstream>
33 #include <sstream>
34 #include <numeric>
35 #include <memory>
36 #include <vector>
37
38 using namespace MEDCoupling;
39
40 ParaUMesh *ParaUMesh::New(MEDCouplingUMesh *mesh, DataArrayIdType *globalCellIds, DataArrayIdType *globalNodeIds)
41 {
42   return new ParaUMesh(mesh,globalCellIds,globalNodeIds);
43 }
44
45 ParaUMesh::ParaUMesh(MEDCouplingUMesh *mesh, DataArrayIdType *globalCellIds, DataArrayIdType *globalNodeIds)
46 {
47   _mesh.takeRef(mesh);
48   _cell_global.takeRef(globalCellIds);
49   _node_global.takeRef(globalNodeIds);
50   _mesh.checkNotNull();
51   _cell_global.checkNotNull();
52   _node_global.checkNotNull();
53   _mesh->checkConsistencyLight();
54   if(_mesh->getNumberOfNodes() != _node_global->getNumberOfTuples())
55     throw INTERP_KERNEL::Exception("ParaUMesh constructor : mismatch between # nodes and len of global # nodes.");
56   if(_mesh->getNumberOfCells() != _cell_global->getNumberOfTuples())
57     throw INTERP_KERNEL::Exception("ParaUMesh constructor : mismatch between # cells and len of global # cells.");
58 }
59
60 std::size_t ParaUMesh::getHeapMemorySizeWithoutChildren() const
61 {
62   return 0;
63 }
64
65 std::vector<const BigMemoryObject *> ParaUMesh::getDirectChildrenWithNull() const
66 {
67   return {_mesh,_cell_global,_node_global};
68 }
69
70 /*!
71 * This method computes the cells part of distributed mesh lying on \a globalNodeIds nodes.
72 * The input \a globalNodeIds are not supposed to reside on the current process.
73 */
74 MCAuto<DataArrayIdType> ParaUMesh::getCellIdsLyingOnNodes(const DataArrayIdType *globalNodeIds, bool fullyIn) const
75 {
76   if(fullyIn)
77     return this->getCellIdsLyingOnNodesTrue(globalNodeIds);
78   else
79     return this->getCellIdsLyingOnNodesFalse(globalNodeIds);
80 }
81
82 MCAuto<DataArrayIdType> ParaUMesh::getCellIdsLyingOnNodesTrue(const DataArrayIdType *globalNodeIds) const
83 {
84   MPI_Comm comm(MPI_COMM_WORLD);
85   CommInterface ci;
86   int size;
87   ci.commSize(comm,&size);
88   std::unique_ptr<mcIdType[]> nbOfElems(new mcIdType[size]),nbOfElems2(new mcIdType[size]),nbOfElems3(new mcIdType[size]);
89   mcIdType nbOfNodeIdsLoc(globalNodeIds->getNumberOfTuples());
90   ci.allGather(&nbOfNodeIdsLoc,1,MPI_ID_TYPE,nbOfElems.get(),1,MPI_ID_TYPE,comm);
91   std::vector< MCAuto<DataArrayIdType> > tabs(size);
92   //store for each proc the local nodeids intercepted by current proc
93   int nbOfCollectiveCalls = 1;// this parameter controls the memory peak
94   // loop to avoid to all procs to have all the nodes per proc
95   for(int subDiv = 0 ; subDiv < nbOfCollectiveCalls ; ++subDiv)
96   {
97     std::unique_ptr<mcIdType[]> nbOfElemsSp(CommInterface::SplitArrayOfLength(nbOfElems,size,subDiv,nbOfCollectiveCalls));
98     mcIdType nbOfNodeIdsSum(std::accumulate(nbOfElemsSp.get(),nbOfElemsSp.get()+size,0));
99     std::unique_ptr<mcIdType[]> allGlobalNodeIds(new mcIdType[nbOfNodeIdsSum]);
100     std::unique_ptr<int[]> nbOfElemsInt( CommInterface::ToIntArray<mcIdType>(nbOfElemsSp,size) );
101     std::unique_ptr<int[]> offsetsIn( CommInterface::ComputeOffset(nbOfElemsInt,size) );
102     mcIdType startGlobalNodeIds,endGlobalNodeIds;
103     DataArray::GetSlice(0,globalNodeIds->getNumberOfTuples(),1,subDiv,nbOfCollectiveCalls,startGlobalNodeIds,endGlobalNodeIds);
104     ci.allGatherV(globalNodeIds->begin()+startGlobalNodeIds,FromIdType<int>(endGlobalNodeIds-startGlobalNodeIds),MPI_ID_TYPE,allGlobalNodeIds.get(),nbOfElemsInt.get(),offsetsIn.get(),MPI_ID_TYPE,comm);
105     mcIdType offset(0);
106     for(int curRk = 0 ; curRk < size ; ++curRk)
107     {
108       MCAuto<DataArrayIdType> globalNodeIdsOfCurProc(DataArrayIdType::New());
109       globalNodeIdsOfCurProc->useArray(allGlobalNodeIds.get()+offset,false,DeallocType::CPP_DEALLOC,nbOfElemsSp[curRk],1);
110       offset += nbOfElemsSp[curRk];
111       MCAuto<DataArrayIdType> globalNodeIdsCaptured(_node_global->buildIntersection(globalNodeIdsOfCurProc));
112       MCAuto<DataArrayIdType> localNodeIdsToLocate(_node_global->findIdForEach(globalNodeIdsCaptured->begin(),globalNodeIdsCaptured->end()));
113       if(tabs[curRk].isNull())
114         tabs[curRk] = localNodeIdsToLocate;
115       else
116         tabs[curRk]->insertAtTheEnd(localNodeIdsToLocate->begin(),localNodeIdsToLocate->end());
117     }
118   }
119
120   for(int curRk = 0 ; curRk < size ; ++curRk)
121   {
122     MCAuto<DataArrayIdType> localNodeIds(tabs[curRk]);
123     localNodeIds->sort();
124     MCAuto<DataArrayIdType> localNodeIdsUnique(localNodeIds->buildUnique());
125     MCAuto<DataArrayIdType> localCellCaptured(_mesh->getCellIdsLyingOnNodes(localNodeIdsUnique->begin(),localNodeIdsUnique->end(),true));
126     MCAuto<DataArrayIdType> localCellCapturedGlob(_cell_global->selectByTupleIdSafe(localCellCaptured->begin(),localCellCaptured->end()));
127     tabs[curRk] = localCellCapturedGlob;
128   }
129   
130   for(int curRk = 0 ; curRk < size ; ++curRk)
131   {
132     tabs[curRk] = tabs[curRk]->buildUniqueNotSorted();
133     nbOfElems3[curRk] = tabs[curRk]->getNumberOfTuples();
134   }
135   std::vector<const DataArrayIdType *> tabss(tabs.begin(),tabs.end());
136   MCAuto<DataArrayIdType> cells(DataArrayIdType::Aggregate(tabss));
137   ci.allToAll(nbOfElems3.get(),1,MPI_ID_TYPE,nbOfElems2.get(),1,MPI_ID_TYPE,comm);
138   mcIdType nbOfCellIdsSum(std::accumulate(nbOfElems2.get(),nbOfElems2.get()+size,0));
139   MCAuto<DataArrayIdType> cellIdsFromProcs(DataArrayIdType::New());
140   cellIdsFromProcs->alloc(nbOfCellIdsSum,1);
141   {
142     std::unique_ptr<int[]> nbOfElemsInt( CommInterface::ToIntArray<mcIdType>(nbOfElems3,size) ),nbOfElemsOutInt( CommInterface::ToIntArray<mcIdType>(nbOfElems2,size) );
143     std::unique_ptr<int[]> offsetsIn( CommInterface::ComputeOffset(nbOfElemsInt,size) ), offsetsOut( CommInterface::ComputeOffset(nbOfElemsOutInt,size) );
144     ci.allToAllV(cells->begin(),nbOfElemsInt.get(),offsetsIn.get(),MPI_ID_TYPE,
145                  cellIdsFromProcs->getPointer(),nbOfElemsOutInt.get(),offsetsOut.get(),MPI_ID_TYPE,comm);
146   }
147   cellIdsFromProcs->sort();
148   return cellIdsFromProcs;
149 }
150
151 MCAuto<DataArrayIdType> ParaUMesh::getCellIdsLyingOnNodesFalse(const DataArrayIdType *globalNodeIds) const
152 {
153   MPI_Comm comm(MPI_COMM_WORLD);
154   CommInterface ci;
155   int size;
156   ci.commSize(comm,&size);
157   std::unique_ptr<mcIdType[]> nbOfElems(new mcIdType[size]),nbOfElems2(new mcIdType[size]),nbOfElems3(new mcIdType[size]);
158   mcIdType nbOfNodeIdsLoc(globalNodeIds->getNumberOfTuples());
159   ci.allGather(&nbOfNodeIdsLoc,1,MPI_ID_TYPE,nbOfElems.get(),1,MPI_ID_TYPE,comm);
160   // loop to avoid to all procs to have all the nodes per proc
161   int nbOfCollectiveCalls = 1;// this parameter controls the memory peak
162   std::vector< MCAuto<DataArrayIdType> > tabs(size);
163   for(int subDiv = 0 ; subDiv < nbOfCollectiveCalls ; ++subDiv)
164   {
165     std::unique_ptr<mcIdType[]> nbOfElemsSp(CommInterface::SplitArrayOfLength(nbOfElems,size,subDiv,nbOfCollectiveCalls));
166     mcIdType nbOfNodeIdsSum(std::accumulate(nbOfElemsSp.get(),nbOfElemsSp.get()+size,0));
167     std::unique_ptr<mcIdType[]> allGlobalNodeIds(new mcIdType[nbOfNodeIdsSum]);
168     std::unique_ptr<int[]> nbOfElemsInt( CommInterface::ToIntArray<mcIdType>(nbOfElemsSp,size) );
169     std::unique_ptr<int[]> offsetsIn( CommInterface::ComputeOffset(nbOfElemsInt,size) );
170     mcIdType startGlobalNodeIds,endGlobalNodeIds;
171     DataArray::GetSlice(0,globalNodeIds->getNumberOfTuples(),1,subDiv,nbOfCollectiveCalls,startGlobalNodeIds,endGlobalNodeIds);
172     ci.allGatherV(globalNodeIds->begin()+startGlobalNodeIds,FromIdType<int>(endGlobalNodeIds-startGlobalNodeIds),MPI_ID_TYPE,allGlobalNodeIds.get(),nbOfElemsInt.get(),offsetsIn.get(),MPI_ID_TYPE,comm);
173     mcIdType offset(0);
174     for(int curRk = 0 ; curRk < size ; ++curRk)
175     {
176       MCAuto<DataArrayIdType> globalNodeIdsOfCurProc(DataArrayIdType::New());
177       globalNodeIdsOfCurProc->useArray(allGlobalNodeIds.get()+offset,false,DeallocType::CPP_DEALLOC,nbOfElemsSp[curRk],1);
178       offset += nbOfElemsSp[curRk];
179       MCAuto<DataArrayIdType> globalNodeIdsCaptured(_node_global->buildIntersection(globalNodeIdsOfCurProc));
180       MCAuto<DataArrayIdType> localNodeIdsToLocate(_node_global->findIdForEach(globalNodeIdsCaptured->begin(),globalNodeIdsCaptured->end()));
181       MCAuto<DataArrayIdType> localCellCaptured(_mesh->getCellIdsLyingOnNodes(localNodeIdsToLocate->begin(),localNodeIdsToLocate->end(),false));
182       MCAuto<DataArrayIdType> localCellCapturedGlob(_cell_global->selectByTupleIdSafe(localCellCaptured->begin(),localCellCaptured->end()));
183       if(tabs[curRk].isNull())
184         tabs[curRk] = localCellCapturedGlob;
185       else
186         tabs[curRk]->insertAtTheEnd(localCellCapturedGlob->begin(),localCellCapturedGlob->end());
187     }
188   }
189   for(int curRk = 0 ; curRk < size ; ++curRk)
190   {
191     tabs[curRk] = tabs[curRk]->buildUniqueNotSorted();
192     nbOfElems3[curRk] = tabs[curRk]->getNumberOfTuples();
193   }
194   std::vector<const DataArrayIdType *> tabss(tabs.begin(),tabs.end());
195   MCAuto<DataArrayIdType> cells(DataArrayIdType::Aggregate(tabss));
196   ci.allToAll(nbOfElems3.get(),1,MPI_ID_TYPE,nbOfElems2.get(),1,MPI_ID_TYPE,comm);
197   mcIdType nbOfCellIdsSum(std::accumulate(nbOfElems2.get(),nbOfElems2.get()+size,0));
198   MCAuto<DataArrayIdType> cellIdsFromProcs(DataArrayIdType::New());
199   cellIdsFromProcs->alloc(nbOfCellIdsSum,1);
200   {
201     std::unique_ptr<int[]> nbOfElemsInt( CommInterface::ToIntArray<mcIdType>(nbOfElems3,size) ),nbOfElemsOutInt( CommInterface::ToIntArray<mcIdType>(nbOfElems2,size) );
202     std::unique_ptr<int[]> offsetsIn( CommInterface::ComputeOffset(nbOfElemsInt,size) ), offsetsOut( CommInterface::ComputeOffset(nbOfElemsOutInt,size) );
203     ci.allToAllV(cells->begin(),nbOfElemsInt.get(),offsetsIn.get(),MPI_ID_TYPE,
204                  cellIdsFromProcs->getPointer(),nbOfElemsOutInt.get(),offsetsOut.get(),MPI_ID_TYPE,comm);
205   }
206   cellIdsFromProcs->sort();
207   return cellIdsFromProcs;
208 }
209
210 DataArrayIdType *ParaUMesh::redistributeCellField(const DataArrayIdType *globalCellIds, const DataArrayIdType *fieldValueToRed) const
211 {
212   return this->redistributeCellFieldT<mcIdType>(globalCellIds,fieldValueToRed);
213 }
214
215 DataArrayDouble *ParaUMesh::redistributeCellField(const DataArrayIdType *globalCellIds, const DataArrayDouble *fieldValueToRed) const
216 {
217   return this->redistributeCellFieldT<double>(globalCellIds,fieldValueToRed);
218 }
219
220 DataArrayIdType *ParaUMesh::redistributeNodeField(const DataArrayIdType *globalCellIds, const DataArrayIdType *fieldValueToRed) const
221 {
222   return this->redistributeNodeFieldT<mcIdType>(globalCellIds,fieldValueToRed);
223 }
224
225 DataArrayDouble *ParaUMesh::redistributeNodeField(const DataArrayIdType *globalCellIds, const DataArrayDouble *fieldValueToRed) const
226 {
227   return this->redistributeNodeFieldT<double>(globalCellIds,fieldValueToRed);
228 }
229
230 /*!
231  * Return part of \a this mesh split over COMM_WORLD. Part is defined by global cell ids array \a globaCellIds.
232  */
233 ParaUMesh *ParaUMesh::redistributeCells(const DataArrayIdType *globalCellIds) const
234 {
235   MPI_Comm comm(MPI_COMM_WORLD);
236   CommInterface ci;
237   std::unique_ptr<mcIdType[]> allGlobalCellIds,allGlobalCellIdsIndex;
238   int size(ci.allGatherArrays(comm,globalCellIds,allGlobalCellIds,allGlobalCellIdsIndex));
239   // Prepare ParaUMesh parts to be sent : compute for each proc the contribution of current rank.
240   std::vector< MCAuto<DataArrayIdType> > globalCellIdsToBeSent(size),globalNodeIdsToBeSent(size);
241   std::vector< MCAuto<MEDCouplingUMesh> > meshPartsToBeSent(size);
242   for(int curRk = 0 ; curRk < size ; ++curRk)
243   {
244     mcIdType offset(allGlobalCellIdsIndex[curRk]);
245     MCAuto<DataArrayIdType> globalCellIdsOfCurProc(DataArrayIdType::New());
246     globalCellIdsOfCurProc->useArray(allGlobalCellIds.get()+offset,false,DeallocType::CPP_DEALLOC,allGlobalCellIdsIndex[curRk+1]-offset,1);
247     // the key call is here : compute for rank curRk the cells to be sent
248     MCAuto<DataArrayIdType> globalCellIdsCaptured(_cell_global->buildIntersection(globalCellIdsOfCurProc));// OK for the global cellIds
249     MCAuto<DataArrayIdType> localCellIdsCaptured(_cell_global->findIdForEach(globalCellIdsCaptured->begin(),globalCellIdsCaptured->end()));
250     MCAuto<MEDCouplingUMesh> meshPart(_mesh->buildPartOfMySelf(localCellIdsCaptured->begin(),localCellIdsCaptured->end(),true));
251     MCAuto<DataArrayIdType> o2n(meshPart->zipCoordsTraducer());// OK for the mesh
252     MCAuto<DataArrayIdType> n2o(o2n->invertArrayO2N2N2O(meshPart->getNumberOfNodes()));
253     MCAuto<DataArrayIdType> globalNodeIdsPart(_node_global->selectByTupleIdSafe(n2o->begin(),n2o->end())); // OK for the global nodeIds
254     meshPartsToBeSent[curRk] = meshPart;
255     globalCellIdsToBeSent[curRk] = globalCellIdsCaptured;
256     globalNodeIdsToBeSent[curRk] = globalNodeIdsPart;
257   }
258   // Receive
259   std::vector< MCAuto<DataArrayIdType> > globalCellIdsReceived,globalNodeIdsReceived;
260   ci.allToAllArrays(comm,globalCellIdsToBeSent,globalCellIdsReceived);
261   ci.allToAllArrays(comm,globalNodeIdsToBeSent,globalNodeIdsReceived);
262   //now exchange the 3 arrays for the umesh : connectivity, connectivityindex and coordinates
263   std::vector<const MEDCouplingUMesh *> meshPartsToBeSent2(FromVecAutoToVecOfConst<MEDCouplingUMesh>(meshPartsToBeSent));
264   //connectivityindex
265   std::vector< MCAuto<DataArrayIdType> > connectivityIndexReceived,connectivityReceived;
266   {
267     std::vector<const DataArrayIdType *> connectivityIndexToBeSent(UMeshConnectivityIndexIterator(0,&meshPartsToBeSent2),UMeshConnectivityIndexIterator(meshPartsToBeSent2.size(),&meshPartsToBeSent2));
268     ci.allToAllArrays(comm,FromVecConstToVecAuto<DataArrayIdType>(connectivityIndexToBeSent),connectivityIndexReceived);
269   }
270   //connectivity
271   {
272     std::vector<const DataArrayIdType *> connectivityToBeSent(UMeshConnectivityIterator(0,&meshPartsToBeSent2),UMeshConnectivityIterator(meshPartsToBeSent2.size(),&meshPartsToBeSent2));
273     ci.allToAllArrays(comm,FromVecConstToVecAuto<DataArrayIdType>(connectivityToBeSent),connectivityReceived);
274   }
275   //coordinates
276   MCAuto<DataArrayDouble> coords;
277   {
278     std::vector<const DataArrayDouble *> coordsToBeSent(UMeshCoordsIterator(0,&meshPartsToBeSent2),UMeshCoordsIterator(meshPartsToBeSent2.size(),&meshPartsToBeSent2));
279     ci.allToAllArrays(comm,FromVecConstToVecAuto<DataArrayDouble>(coordsToBeSent),coords);
280   }
281   /////// Sort it all !
282   // firstly deal with nodes.
283   MCAuto<DataArrayIdType> aggregatedNodeIds( DataArrayIdType::Aggregate(FromVecAutoToVecOfConst<DataArrayIdType>(globalNodeIdsReceived)) );
284   MCAuto<DataArrayIdType> aggregatedNodeIdsSorted(aggregatedNodeIds->copySorted());
285   MCAuto<DataArrayIdType> nodeIdsIntoAggregatedIds(DataArrayIdType::FindPermutationFromFirstToSecondDuplicate(aggregatedNodeIdsSorted,aggregatedNodeIds));
286   MCAuto<DataArrayIdType> idxOfSameNodeIds(aggregatedNodeIdsSorted->indexOfSameConsecutiveValueGroups());
287   MCAuto<DataArrayIdType> n2o_nodes(nodeIdsIntoAggregatedIds->selectByTupleIdSafe(idxOfSameNodeIds->begin(),idxOfSameNodeIds->end()-1));//new == new ordering so that global node ids are sorted . old == coarse ordering implied by the aggregation
288   MCAuto<DataArrayIdType> finalGlobalNodeIds(aggregatedNodeIdsSorted->selectByTupleIdSafe(idxOfSameNodeIds->begin(),idxOfSameNodeIds->end()-1));
289   MCAuto<DataArrayDouble> finalCoords(coords->selectByTupleIdSafe(n2o_nodes->begin(),n2o_nodes->end()));
290   finalCoords->copyStringInfoFrom(*_mesh->getCoords());
291   // secondly renumbering of node ids in connectivityReceived
292   for(int curRk = 0 ; curRk < size ; ++curRk)
293   {
294     auto current(globalNodeIdsReceived[curRk]);
295     MCAuto<DataArrayIdType> aa(finalGlobalNodeIds->findIdForEach(current->begin(),current->end()));
296     // work on connectivityReceived[curRk] with transformWithIndArr but do not forget type of cells that should be excluded !
297     auto connectivityToModify(connectivityReceived[curRk]);
298     auto connectivityIndex(connectivityIndexReceived[curRk]);
299     MCAuto<DataArrayIdType> types(connectivityToModify->selectByTupleIdSafe(connectivityIndex->begin(),connectivityIndex->end()-1));
300     connectivityToModify->setPartOfValuesSimple3(0,connectivityIndex->begin(),connectivityIndex->end()-1,0,1,1);
301     connectivityToModify->transformWithIndArr(aa->begin(),aa->end());
302     connectivityToModify->setPartOfValues3(types,connectivityIndex->begin(),connectivityIndex->end()-1,0,1,1,true);
303   }
304   // thirdly renumber cells
305   MCAuto<DataArrayIdType> aggregatedCellIds( DataArrayIdType::Aggregate(FromVecAutoToVecOfConst<DataArrayIdType>(globalCellIdsReceived)) );
306   MCAuto<DataArrayIdType> aggregatedCellIdsSorted(aggregatedCellIds->copySorted());
307   MCAuto<DataArrayIdType> idsIntoAggregatedIds(DataArrayIdType::FindPermutationFromFirstToSecondDuplicate(aggregatedCellIdsSorted,aggregatedCellIds));
308   MCAuto<DataArrayIdType> cellIdsOfSameNodeIds(aggregatedCellIdsSorted->indexOfSameConsecutiveValueGroups());
309   MCAuto<DataArrayIdType> n2o_cells(idsIntoAggregatedIds->selectByTupleIdSafe(cellIdsOfSameNodeIds->begin(),cellIdsOfSameNodeIds->end()-1));//new == new ordering so that global cell ids are sorted . old == coarse ordering implied by the aggregation
310   // TODO : check coordsReceived==globalCellIds
311   MCAuto<DataArrayIdType> connSorted,indicesSorted;
312   {
313     MCAuto<DataArrayIdType> conn(DataArrayIdType::Aggregate(FromVecAutoToVecOfConst<DataArrayIdType>(connectivityReceived)));
314     MCAuto<DataArrayIdType> connIndex(DataArrayIdType::AggregateIndexes(FromVecAutoToVecOfConst<DataArrayIdType>(connectivityIndexReceived))); 
315     {
316       DataArrayIdType *indicesSortedTmp(nullptr),*valuesSortedTmp(nullptr);
317       DataArrayIdType::ExtractFromIndexedArrays(n2o_cells->begin(),n2o_cells->end(),conn,connIndex,valuesSortedTmp,indicesSortedTmp);
318       indicesSorted = indicesSortedTmp; connSorted=valuesSortedTmp;
319     }
320   }
321   // finalize all
322   MCAuto<MEDCouplingUMesh> mesh(MEDCouplingUMesh::New(_mesh->getName(),_mesh->getMeshDimension()));
323   mesh->setConnectivity(connSorted,indicesSorted,true);
324   mesh->setCoords(finalCoords);
325   mesh->setDescription(_mesh->getDescription());
326   MCAuto<ParaUMesh> ret(ParaUMesh::New(mesh,aggregatedCellIdsSorted,finalGlobalNodeIds));
327   return ret.retn();
328 }