#include "MEDPARTITIONER_PTScotchGraph.hxx"
#include "MEDPARTITIONER_Graph.hxx"
+#include "MEDPARTITIONER_Utils.hxx"
#include "MCIdType.hxx"
#include "MEDCouplingSkyLineArray.hxx"
#include "MEDPARTITIONER_Utils.hxx"
#include "MEDCouplingUMesh.hxx"
+#include "MEDCouplingSkyLineArray.hxx"
#include "MCIdType.hxx"
#include <cstddef>
//bug: (isOnDifferentHosts here and there) is not (isOnDifferentHosts somewhere)
//return string(name_here) != string(name_there);
- int const sum_same = -1;
+ int sum_same = -1;
int same = 1;
if (std::string(name_here) != std::string(name_there))
same=0;
MPI_Send( (void*)str.data(), (int)str.length(), MPI_CHAR, target, tag+100, MPI_COMM_WORLD );
}
- int const recSize=0;
+ int recSize=0;
if (rank == target)
{
MPI_Recv(&recSize, 1, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
double temps_debut=MPI_Wtime();
int const rank=MyGlobals::_Rank;
MPI_Comm newcomm;
- int const color=1;
+ int color=1;
int const rankMax=4;
if (rank>=rankMax)
color=MPI_UNDEFINED;
*/
+#include "MEDPARTITIONER_Utils.hxx"
#include <iostream>
#include "MEDFileEntities.hxx"
#include "med.h"
#include "NormalizedGeometricTypes"
+#include "MEDLoader.hxx"
#include <string>
void checkDistribution(const MPI_Comm& com, mcIdType totalNumberOfElements, const std::vector<mcIdType>& distrib)
{
mcIdType nbEltsInDistribLoc = distrib.size();
- mcIdType const nbEltsInDistribTot = -1;
+ mcIdType nbEltsInDistribTot = -1;
#ifdef HAVE_MPI
MPI_Allreduce(&nbEltsInDistribLoc, &nbEltsInDistribTot, 1, MPI_LONG, MPI_SUM, com);
#else
#include <string>
#include "MEDCouplingRefCountObject.hxx"
#include "NormalizedGeometricTypes"
+#include "MCIdType.hxx"
namespace MEDCoupling
{
// ArrayOfSendRequests for the call to SendRequestIds
int MPIAccess::sendRequestIdsSize()
{
- int const size = 0;
+ int size = 0;
for (int i = 0 ; i < _processor_group_size ; i++ )
size += (int)_send_requests[ i ].size() ;
return size ;
// ArrayOfRecvRequests for the call to RecvRequestIds
int MPIAccess::recvRequestIdsSize()
{
- int const size = 0 ;
+ int size = 0 ;
for (int i = 0 ; i < _processor_group_size ; i++ )
size += (int)_recv_requests[ i ].size() ;
return size ;
int MPItag = newRecvTag( datatype, source , aMethodIdent , false , RequestId ) ;
sts = _comm_interface.recv(buffer, count, datatype, source, MPItag,
*_intra_communicator , MPIStatus( RequestId ) ) ;
- int const outcount = 0 ;
+ int outcount = 0 ;
if ( sts == MPI_SUCCESS )
{
MPI_Datatype datatype2 = MPIDatatype( RequestId ) ;
{
return allToAllTime( sendbuf, sendcount, sendtype , recvbuf, recvcount, recvtype ) ;
}
- int const sts = 0;
+ int sts = 0;
int target ;
int sendoffset = 0 ;
int recvoffset = 0 ;
return allToAllvTime( sendbuf, sendcounts, sdispls, sendtype ,
recvbuf, recvcounts, rdispls, recvtype ) ;
}
- int const sts = 0;
+ int sts = 0;
int target ;
int SendRequestId ;
int RecvRequestId ;
int MPIAccessDEC::allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
void* recvbuf, int recvcount , MPI_Datatype recvtype )
{
- int const sts = 0;
+ int sts = 0;
int target ;
int sendoffset = 0 ;
int SendTimeRequestId ;
void* recvbuf, int* recvcounts, int* rdispls,
MPI_Datatype recvtype )
{
- int const sts = 0;
+ int sts = 0;
int target ;
int SendTimeRequestId ;
int SendDataRequestId ;
#define __NONCOINCIDENTDEC_HXX__
#include "DEC.hxx"
+#include "ProcessorGroup.hxx"
struct _fvm_locator_t;
-using InterpolationMethod = enum {NN};
+typedef enum {NN} InterpolationMethod;
namespace MEDCoupling
{
#include "MEDCouplingUMesh.hxx"
#include "InterpolationOptions.hxx"
#include "ElementLocator.hxx"
+#include "MEDCouplingFieldDouble.hxx"
#include <string>
{
std::size_t const grpSize=_group.size();
std::fill<int *>(count,count+grpSize,0);
- std::size_t const szz=0;
+ std::size_t szz=0;
int const myProcId=_group.myRank();
for(std::size_t i=0;i<_matrixes_st.size();i++)
{
#include "MEDCouplingMemArray.hxx"
#include "OverlapElementLocator.hxx"
+#include <vector>
+#include <map>
+
//#define DEC_DEBUG
namespace MEDCoupling
class MEDCouplingFieldDouble;
using namespace std;
- using SparseDoubleVec = int;
+ typedef map<mcIdType,double> SparseDoubleVec;
/*!
* Internal class, not part of the public API.
double ParaFIELD::getVolumeIntegral(int icomp, bool isWAbs) const
{
CommInterface const comm_interface = _topology->getProcGroup()->getCommInterface();
- double const integral=_field->integral(icomp,isWAbs);
- double const total=0.;
+ double integral=_field->integral(icomp,isWAbs);
+ double total=0.;
const MPI_Comm* comm = (dynamic_cast<const MPIProcessorGroup*>(_topology->getProcGroup()))->getComm();
comm_interface.allReduce(&integral, &total, 1, MPI_DOUBLE, MPI_SUM, *comm);
}
MCAuto<DataArrayIdType> idxOfSameIds(aggregatedIdsSort->indexOfSameConsecutiveValueGroups());
//
- MCAuto<DataArrayIdType> const globalIdsOut(aggregatedIdsSort->buildUnique());
+ MCAuto<DataArrayIdType> globalIdsOut(aggregatedIdsSort->buildUnique());
MCAuto<MEDCouplingSkyLineArray> skOut(MEDCouplingSkyLineArray::New(indicesSorted,valuesSorted));
skOut = skOut->groupPacks(idxOfSameIds);//group partial packs coming from different procs
skOut = skOut->uniqueNotSortedByPack();//remove duplicates
int size;
ci.commSize(comm,&size);
std::unique_ptr<mcIdType[]> nbOfElems(new mcIdType[size]),nbOfElems2(new mcIdType[size]),nbOfElems3(new mcIdType[size]);
- mcIdType const nbOfNodeIdsLoc(globalNodeIds->getNumberOfTuples());
+ mcIdType nbOfNodeIdsLoc(globalNodeIds->getNumberOfTuples());
ci.allGather(&nbOfNodeIdsLoc,1,MPI_ID_TYPE,nbOfElems.get(),1,MPI_ID_TYPE,comm);
std::vector< MCAuto<DataArrayIdType> > tabs(size);
//store for each proc the local nodeids intercepted by current proc
int size;
ci.commSize(comm,&size);
std::unique_ptr<mcIdType[]> nbOfElems(new mcIdType[size]),nbOfElems2(new mcIdType[size]),nbOfElems3(new mcIdType[size]);
- mcIdType const nbOfNodeIdsLoc(globalNodeIds->getNumberOfTuples());
+ mcIdType nbOfNodeIdsLoc(globalNodeIds->getNumberOfTuples());
ci.allGather(&nbOfNodeIdsLoc,1,MPI_ID_TYPE,nbOfElems.get(),1,MPI_ID_TYPE,comm);
// loop to avoid to all procs to have all the nodes per proc
int const nbOfCollectiveCalls = 1;// this parameter controls the memory peak
#include <stdexcept>
#include <stdlib.h>
#include <string>
+#include <list>
#ifndef WIN32
#include <unistd.h>
{
string const master = filename_xml1;
- ostringstream const strstream;
+ ostringstream strstream;
strstream <<master<<rank+1<<".med";
string const fName = INTERP_TEST::getResourceFile(strstream.str());
ostringstream meshname ;
if (target_group->containsMyRank())
{
string const master= filename_xml2;
- ostringstream const strstream;
+ ostringstream strstream;
strstream << master<<(rank-nproc_source+1)<<".med";
string const fName = INTERP_TEST::getResourceFile(strstream.str());
ostringstream meshname ;
{
string const master = filename_xml1;
- ostringstream const strstream;
+ ostringstream strstream;
strstream <<master<<rank+1<<".med";
string const fName = INTERP_TEST::getResourceFile(strstream.str());
ostringstream meshname ;
if (target_group->containsMyRank())
{
string const master= filename_xml2;
- ostringstream const strstream;
+ ostringstream strstream;
strstream << master<<(rank-nproc_source+1)<<".med";
string const fName = INTERP_TEST::getResourceFile(strstream.str());
ostringstream meshname ;
{
string const master = filename_xml1;
- ostringstream const strstream;
+ ostringstream strstream;
strstream <<master<<rank+1<<".med";
std::string const fName = INTERP_TEST::getResourceFile(strstream.str());
ostringstream meshname ;
if (target_group->containsMyRank())
{
string const master= filename_xml2;
- ostringstream const strstream;
+ ostringstream strstream;
strstream << master << ".med";
std::string const fName = INTERP_TEST::getResourceFile(strstream.str());
ostringstream meshname ;
MPI_Barrier(MPI_COMM_WORLD);
double targetCoords[8]={ 0.,0., 1., 0., 0., 1., 1., 1. };
CommInterface const comm;
- int const grpIds[2]={0,1};
+ int grpIds[2]={0,1};
MPI_Group grp,group_world;
comm.commGroup(MPI_COMM_WORLD,&group_world);
comm.groupIncl(group_world,2,grpIds,&grp);
{
string const master = filename_xml1;
- ostringstream const strstream;
+ ostringstream strstream;
strstream <<master<<rank+1<<".med";
string const fName = INTERP_TEST::getResourceFile(strstream.str());
ostringstream meshname ;
if (target_group->containsMyRank())
{
string const master= filename_xml2;
- ostringstream const strstream;
+ ostringstream strstream;
strstream << master<<(rank-nproc_source+1)<<".med";
string const fName = INTERP_TEST::getResourceFile(strstream.str());
ostringstream meshname ;
meshRef=genLocMeshMultipleTypes3();
//checking that all 3 procs have correctly loaded their part
int equal = (int)mesh->isEqual(meshRef,1e-12);
- int const allEqual = -1;
+ int allEqual = -1;
MPI_Allreduce(&equal, &allEqual, 1, MPI_INT,MPI_SUM,MPI_COMM_WORLD);
CPPUNIT_ASSERT(allEqual==3);
if (source_group.containsMyRank()) {
string const master = filename_xml1;
- ostringstream const strstream;
+ ostringstream strstream;
strstream <<master<<rank+1<<".med";
ostringstream meshname;
meshname<< "Mesh_2_"<< rank+1;
// --- generic Main program from KERNEL_SRC/src/Basics/Test
+#include "MPIMainTest.hxx"