ProcessorGroup.cxx
MPIProcessorGroup.cxx
ParaMESH.cxx
+ CommInterface.cxx
ParaUMesh.cxx
+ ParaSkyLineArray.cxx
ComponentTopology.cxx
MPIAccess.cxx
InterpolationMatrix.cxx
#include "CommInterface.hxx"
+#include <numeric>
+
namespace MEDCoupling
{
/*! \anchor CommInterface-det
\endverbatim
*/
- CommInterface::CommInterface()
+ /*!
+ * Generalized AllGather collective communication.
+ * This method send input \a array to all procs.
+ */
+ void CommInterface::allGatherArrays(MPI_Comm comm, const DataArrayIdType *array, std::unique_ptr<mcIdType[]>& result, std::unique_ptr<mcIdType[]>& resultIndex) const
{
+ int size;
+ this->commSize(comm,&size);
+ std::unique_ptr<mcIdType[]> nbOfElems(new mcIdType[size]);
+ mcIdType nbOfCellsRequested(array->getNumberOfTuples());
+ this->allGather(&nbOfCellsRequested,1,MPI_ID_TYPE,nbOfElems.get(),1,MPI_ID_TYPE,comm);
+ mcIdType nbOfCellIdsSum(std::accumulate(nbOfElems.get(),nbOfElems.get()+size,0));
+ result.reset(new mcIdType[nbOfCellIdsSum]);
+ std::unique_ptr<int[]> nbOfElemsInt( CommInterface::ToIntArray<mcIdType>(nbOfElems,size) );
+ std::unique_ptr<int[]> offsetsIn( CommInterface::ComputeOffset(nbOfElemsInt,size) );
+ this->allGatherV(array->begin(),nbOfCellsRequested,MPI_ID_TYPE,result.get(),nbOfElemsInt.get(),offsetsIn.get(),MPI_ID_TYPE,comm);
+ resultIndex = std::move(nbOfElems);
}
- CommInterface::~CommInterface()
+ /*!
+ * Generalized AllToAll collective communication.
+ */
+ void CommInterface::allToAllArrays(MPI_Comm comm, const std::vector< MCAuto<DataArrayIdType> >& arrays, std::vector< MCAuto<DataArrayIdType> >& arraysOut) const
{
+ int size;
+ this->commSize(comm,&size);
+ if( arrays.size() != ToSizeT(size) )
+ throw INTERP_KERNEL::Exception("AllToAllArrays : internal error ! Invalid size of input array.");
+ std::vector< const DataArrayIdType *> arraysBis(size);
+ {
+ std::vector< const DataArrayIdType *>::iterator itArraysBis(arraysBis.begin());
+ std::for_each(arrays.begin(),arrays.end(),[&itArraysBis](MCAuto<DataArrayIdType> elt) { *itArraysBis++=elt; });
+ }
+ std::unique_ptr<mcIdType[]> nbOfElems2(new mcIdType[size]),nbOfElems3(new mcIdType[size]);
+ for(int curRk = 0 ; curRk < size ; ++curRk)
+ {
+ nbOfElems3[curRk] = arrays[curRk]->getNumberOfTuples();
+ }
+ this->allToAll(nbOfElems3.get(),1,MPI_ID_TYPE,nbOfElems2.get(),1,MPI_ID_TYPE,comm);
+ mcIdType nbOfCellIdsSum(std::accumulate(nbOfElems2.get(),nbOfElems2.get()+size,0));
+ MCAuto<DataArrayIdType> cellIdsFromProcs(DataArrayIdType::New());
+ cellIdsFromProcs->alloc(nbOfCellIdsSum,1);
+ std::unique_ptr<int[]> nbOfElemsInt( CommInterface::ToIntArray<mcIdType>(nbOfElems3,size) ),nbOfElemsOutInt( CommInterface::ToIntArray<mcIdType>(nbOfElems2,size) );
+ std::unique_ptr<int[]> offsetsIn( CommInterface::ComputeOffset(nbOfElemsInt,size) ), offsetsOut( CommInterface::ComputeOffset(nbOfElemsOutInt,size) );
+ {
+ MCAuto<DataArrayIdType> arraysAcc(DataArrayIdType::Aggregate(arraysBis));
+ this->allToAllV(arraysAcc->begin(),nbOfElemsInt.get(),offsetsIn.get(),MPI_ID_TYPE,
+ cellIdsFromProcs->getPointer(),nbOfElemsOutInt.get(),offsetsOut.get(),MPI_ID_TYPE,comm);
+ }
+ std::unique_ptr<mcIdType[]> offsetsOutIdType( CommInterface::ComputeOffset(nbOfElems2,size) );
+ // build output arraysOut by spliting cellIdsFromProcs into parts
+ arraysOut.resize(size);
+ for(int curRk = 0 ; curRk < size ; ++curRk)
+ {
+ arraysOut[curRk] = DataArrayIdType::NewFromArray(cellIdsFromProcs->begin()+offsetsOutIdType[curRk],cellIdsFromProcs->begin()+offsetsOutIdType[curRk]+nbOfElems2[curRk]);
+ }
}
}
class CommInterface
{
public:
- CommInterface(){}
- virtual ~CommInterface(){}
+ CommInterface() { }
+ virtual ~CommInterface() { }
int worldSize() const {
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Op op, int root, MPI_Comm comm) const { return MPI_Reduce(sendbuf, recvbuf, count, datatype, op, root, comm); }
int allReduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) const { return MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm); }
public:
+ void allGatherArrays(MPI_Comm comm, const DataArrayIdType *array, std::unique_ptr<mcIdType[]>& result, std::unique_ptr<mcIdType[]>& resultIndex) const;
+ void allToAllArrays(MPI_Comm comm, const std::vector< MCAuto<DataArrayIdType> >& arrays, std::vector< MCAuto<DataArrayIdType> >& arraysOut) const;
+ public:
/*!
* \a counts is expected to be an array of array length. This method returns an array of split array.
--- /dev/null
+//
+// Copyright (C) 2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+// Author : Anthony Geay (EDF R&D)
+
+#include "ParaSkyLineArray.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+#include "CommInterface.hxx"
+#include "MEDCouplingMemArray.hxx"
+
+#include "mpi.h"
+
+#include <fstream>
+#include <sstream>
+#include <numeric>
+#include <memory>
+#include <vector>
+
+using namespace std;
+using namespace MEDCoupling;
+
+ParaSkyLineArray::ParaSkyLineArray(MEDCouplingSkyLineArray *ska, DataArrayIdType *globalIds)
+{
+ _ska.takeRef(ska);
+ _global_ids.takeRef(globalIds);
+ _ska.checkNotNull();
+ _global_ids.checkNotNull();
+ if(_ska->getNumberOf() != _global_ids->getNumberOfTuples())
+ throw INTERP_KERNEL::Exception("ParaSkyLineArray constructor : mismatch between # globalIds and len of indices in SkyLineArray.");
+}
+
+MCAuto<ParaSkyLineArray> ParaSkyLineArray::equiRedistribute(mcIdType nbOfEntities) const
+{
+ //TODO
+}
\ No newline at end of file
--- /dev/null
+// Copyright (C) 2020 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+// Author : Anthony Geay (EDF R&D)
+
+#pragma once
+
+#include "MEDCouplingSkyLineArray.hxx"
+#include "ProcessorGroup.hxx"
+#include "MEDCouplingMemArray.hxx"
+
+#include <string>
+#include <vector>
+
+namespace MEDCoupling
+{
+ /*!
+ * Parallel representation of a SkyLineArray
+ *
+ * This class is very specific to the requirement of parallel code computations.
+ */
+ class ParaSkyLineArray
+ {
+ public:
+ ParaSkyLineArray(MEDCouplingSkyLineArray *ska, DataArrayIdType *globalIds);
+ MCAuto<ParaSkyLineArray> equiRedistribute(mcIdType nbOfEntities) const;
+ virtual ~ParaSkyLineArray() { }
+ private:
+ MCAuto<MEDCouplingSkyLineArray> _ska;
+ MCAuto<DataArrayIdType> _global_ids;
+ };
+}
return cellIdsFromProcs;
}
-/*!
- * Generalized AllGather collective communication.
- * This method send input \a array to all procs.
- */
-void AllGatherArrays(const CommInterface& ci, MPI_Comm comm, const DataArrayIdType *array, std::unique_ptr<mcIdType[]>& result, std::unique_ptr<mcIdType[]>& resultIndex)
-{
- int size;
- ci.commSize(comm,&size);
- std::unique_ptr<mcIdType[]> nbOfElems(new mcIdType[size]);
- mcIdType nbOfCellsRequested(array->getNumberOfTuples());
- ci.allGather(&nbOfCellsRequested,1,MPI_ID_TYPE,nbOfElems.get(),1,MPI_ID_TYPE,comm);
- mcIdType nbOfCellIdsSum(std::accumulate(nbOfElems.get(),nbOfElems.get()+size,0));
- result.reset(new mcIdType[nbOfCellIdsSum]);
- std::unique_ptr<int[]> nbOfElemsInt( CommInterface::ToIntArray<mcIdType>(nbOfElems,size) );
- std::unique_ptr<int[]> offsetsIn( CommInterface::ComputeOffset(nbOfElemsInt,size) );
- ci.allGatherV(array->begin(),nbOfCellsRequested,MPI_ID_TYPE,result.get(),nbOfElemsInt.get(),offsetsIn.get(),MPI_ID_TYPE,comm);
- resultIndex = std::move(nbOfElems);
-}
-
-/*!
- * Generalized AllToAll collective communication.
- */
-void AllToAllArrays(const CommInterface& ci, MPI_Comm comm, const std::vector< MCAuto<DataArrayIdType> >& arrays, std::vector< MCAuto<DataArrayIdType> >& arraysOut)
-{
- int size;
- ci.commSize(comm,&size);
- if( arrays.size() != ToSizeT(size) )
- throw INTERP_KERNEL::Exception("AllToAllArrays : internal error ! Invalid size of input array.");
- std::vector< const DataArrayIdType *> arraysBis(size);
- {
- std::vector< const DataArrayIdType *>::iterator itArraysBis(arraysBis.begin());
- std::for_each(arrays.begin(),arrays.end(),[&itArraysBis](MCAuto<DataArrayIdType> elt) { *itArraysBis++=elt; });
- }
- std::unique_ptr<mcIdType[]> nbOfElems2(new mcIdType[size]),nbOfElems3(new mcIdType[size]);
- for(int curRk = 0 ; curRk < size ; ++curRk)
- {
- nbOfElems3[curRk] = arrays[curRk]->getNumberOfTuples();
- }
- ci.allToAll(nbOfElems3.get(),1,MPI_ID_TYPE,nbOfElems2.get(),1,MPI_ID_TYPE,comm);
- mcIdType nbOfCellIdsSum(std::accumulate(nbOfElems2.get(),nbOfElems2.get()+size,0));
- MCAuto<DataArrayIdType> cellIdsFromProcs(DataArrayIdType::New());
- cellIdsFromProcs->alloc(nbOfCellIdsSum,1);
- std::unique_ptr<int[]> nbOfElemsInt( CommInterface::ToIntArray<mcIdType>(nbOfElems3,size) ),nbOfElemsOutInt( CommInterface::ToIntArray<mcIdType>(nbOfElems2,size) );
- std::unique_ptr<int[]> offsetsIn( CommInterface::ComputeOffset(nbOfElemsInt,size) ), offsetsOut( CommInterface::ComputeOffset(nbOfElemsOutInt,size) );
- {
- MCAuto<DataArrayIdType> arraysAcc(DataArrayIdType::Aggregate(arraysBis));
- ci.allToAllV(arraysAcc->begin(),nbOfElemsInt.get(),offsetsIn.get(),MPI_ID_TYPE,
- cellIdsFromProcs->getPointer(),nbOfElemsOutInt.get(),offsetsOut.get(),MPI_ID_TYPE,comm);
- }
- std::unique_ptr<mcIdType[]> offsetsOutIdType( CommInterface::ComputeOffset(nbOfElems2,size) );
- // build output arraysOut by spliting cellIdsFromProcs into parts
- arraysOut.resize(size);
- for(int curRk = 0 ; curRk < size ; ++curRk)
- {
- arraysOut[curRk] = DataArrayIdType::NewFromArray(cellIdsFromProcs->begin()+offsetsOutIdType[curRk],cellIdsFromProcs->begin()+offsetsOutIdType[curRk]+nbOfElems2[curRk]);
- }
-}
-
/*!
*/
MCAuto<ParaUMesh> ParaUMesh::redistributeCells(const DataArrayIdType *globalCellIds) const