Also: documentation + typedefs to make code easier to read.
To come: more tests + new algo for job distribution.
/*!
* Returns a new MEDCouplingMesh constituted by some cells of the underlying mesh of \a
- * this filed, and returns ids of entities (nodes, cells, Gauss points) lying on the
+ * this field, and returns ids of entities (nodes, cells, Gauss points) lying on the
* specified cells. The cells to include to the result mesh are specified by an array of
* cell ids. The new mesh shares the coordinates array with the underlying mesh.
* \param [in] start - an array of cell ids to include to the result mesh.
return ret.retn();
}
+DataArrayDouble *DataArrayDouble::selectByTupleId(const DataArrayInt & di) const
+{
+ return selectByTupleId(di.getConstPointer(), di.getConstPointer()+di.getNumberOfTuples());
+}
+
/*!
* Returns a shorten and permuted copy of \a this array. The new DataArrayDouble is
* of size \a new2OldEnd - \a new2OldBg and it's values are permuted as required by
MEDCOUPLING_EXPORT DataArrayDouble *renumberR(const int *new2Old) const;
MEDCOUPLING_EXPORT DataArrayDouble *renumberAndReduce(const int *old2New, int newNbOfTuple) const;
MEDCOUPLING_EXPORT DataArrayDouble *selectByTupleId(const int *new2OldBg, const int *new2OldEnd) const;
+ MEDCOUPLING_EXPORT DataArrayDouble *selectByTupleId(const DataArrayInt & di) const;
MEDCOUPLING_EXPORT DataArrayDouble *selectByTupleIdSafe(const int *new2OldBg, const int *new2OldEnd) const;
MEDCOUPLING_EXPORT DataArrayDouble *selectByTupleId2(int bg, int end2, int step) const;
MEDCOUPLING_EXPORT DataArray *selectByTupleRanges(const std::vector<std::pair<int,int> >& ranges) const;
int MEDCouplingRemapper::prepareInterpKernelOnly()
{
int meshInterpType=((int)_src_ft->getMesh()->getType()*16)+(int)_target_ft->getMesh()->getType();
+ // *** Remember:
+// typedef enum
+// {
+// UNSTRUCTURED = 5,
+// CARTESIAN = 7,
+// EXTRUDED = 8,
+// CURVE_LINEAR = 9,
+// SINGLE_STATIC_GEO_TYPE_UNSTRUCTURED = 10,
+// SINGLE_DYNAMIC_GEO_TYPE_UNSTRUCTURED = 11,
+// IMAGE_GRID = 12
+// } MEDCouplingMeshType;
+
switch(meshInterpType)
{
- case 90:
- case 91:
- case 165:
- case 181:
- case 170:
- case 171:
- case 186:
- case 187:
- case 85://Unstructured-Unstructured
+ case 90: // UNSTRUCTURED - SINGLE_STATIC_GEO_TYPE_UNSTRUCTURED
+ case 91: // UNSTRUCTURED - SINGLE_DYNAMIC_GEO_TYPE_UNSTRUCTURED
+ case 165: // SINGLE_STATIC_GEO_TYPE_UNSTRUCTURED - UNSTRUCTURED
+ case 181: // SINGLE_DYNAMIC_GEO_TYPE_UNSTRUCTURED - UNSTRUCTURED
+ case 170: // SINGLE_STATIC_GEO_TYPE_UNSTRUCTURED - SINGLE_STATIC_GEO_TYPE_UNSTRUCTURED
+ case 171: // SINGLE_STATIC_GEO_TYPE_UNSTRUCTURED - SINGLE_DYNAMIC_GEO_TYPE_UNSTRUCTURED
+ case 186: // SINGLE_DYNAMIC_GEO_TYPE_UNSTRUCTURED - SINGLE_STATIC_GEO_TYPE_UNSTRUCTURED
+ case 187: // SINGLE_DYNAMIC_GEO_TYPE_UNSTRUCTURED - SINGLE_DYNAMIC_GEO_TYPE_UNSTRUCTURED
+ case 85: // UNSTRUCTURED - UNSTRUCTURED
return prepareInterpKernelOnlyUU();
- case 167:
- case 183:
- case 87://Unstructured-Cartesian
+ case 167: // SINGLE_STATIC_GEO_TYPE_UNSTRUCTURED - CARTESIAN
+ case 183: // SINGLE_DYNAMIC_GEO_TYPE_UNSTRUCTURED - CARTESIAN
+ case 87: // UNSTRUCTURED - CARTESIAN
return prepareInterpKernelOnlyUC();
- case 122:
- case 123:
- case 117://Cartesian-Unstructured
+ case 122: // CARTESIAN - SINGLE_STATIC_GEO_TYPE_UNSTRUCTURED
+ case 123: // CARTESIAN - SINGLE_DYNAMIC_GEO_TYPE_UNSTRUCTURED
+ case 117: // CARTESIAN - UNSTRUCTURED
return prepareInterpKernelOnlyCU();
- case 119://Cartesian-Cartesian
+ case 119: // CARTESIAN - CARTESIAN
return prepareInterpKernelOnlyCC();
- case 136://Extruded-Extruded
+ case 136: // EXTRUDED - EXTRUDED
return prepareInterpKernelOnlyEE();
default:
throw INTERP_KERNEL::Exception("MEDCouplingRemapper::prepareInterpKernelOnly : Not managed type of meshes ! Dealt meshes type are : Unstructured<->Unstructured, Unstructured<->Cartesian, Cartesian<->Cartesian, Extruded<->Extruded !");
INTERP_KERNEL::Interpolation2D1D::DuplicateFacesType duplicateFaces=interpolation.retrieveDuplicateFaces();
if(!duplicateFaces.empty())
{
- std::ostringstream oss; oss << "An unexpected situation happend ! For the following 1D Cells are part of edges shared by 2D cells :\n";
+ std::ostringstream oss; oss << "An unexpected situation happened ! For the following 1D Cells are part of edges shared by 2D cells :\n";
for(std::map<int,std::set<int> >::const_iterator it=duplicateFaces.begin();it!=duplicateFaces.end();it++)
{
oss << "1D Cell #" << (*it).first << " is part of common edge of following 2D cells ids : ";
/*!
* This method determines regarding \c _interp_matrix_pol attribute ( set by MEDCouplingRemapper::setInterpolationMatrixPolicy and by default equal
- * to IK_ONLY_PREFERED = 0 ) , which method will be applied. If \c true is returned the INTERP_KERNEL only method should be applied to \c false the \b not
+ * to IK_ONLY_PREFERED, which method will be applied. If \c true is returned the INTERP_KERNEL only method should be applied to \c false the \b not
* only INTERP_KERNEL method should be applied.
*/
bool MEDCouplingRemapper::isInterpKernelOnlyOrNotOnly() const
double* local_bb = _domain_bounding_boxes+_union_group->myRank()*2*_local_cell_mesh_space_dim;
double* distant_bb = _domain_bounding_boxes+irank*2*_local_cell_mesh_space_dim;
+ const double eps = 1e-12;
for (int idim=0; idim < _local_cell_mesh_space_dim; idim++)
{
- const double eps = 1e-12;
bool intersects = (distant_bb[idim*2]<local_bb[idim*2+1]+eps)
&& (local_bb[idim*2]<distant_bb[idim*2+1]+eps);
if (!intersects) return false;
\anchor ParaMEDMEMOverlapDECImgTest1
\image html OverlapDEC1.png "Example split of the source and target mesh among the 3 procs"
- \subsection ParaMEDMEMOverlapDECAlgoStep1 Step 1 : Bounding box exchange and global interaction
- between procs computation.
+ \subsection ParaMEDMEMOverlapDECAlgoStep1 Step 1 : Bounding box exchange and global interaction between procs computation.
In order to reduce as much as possible the amount of communications between distant processors,
every processor computes a bounding box for A and B. Then a AllToAll communication is performed
{
if(!isInGroup())
return ;
+ // Check number of components of field on both side (for now allowing void field/mesh on one proc is not allowed)
+ if (!_source_field || !_source_field->getField())
+ throw INTERP_KERNEL::Exception("OverlapDEC::synchronize(): currently, having a void source field on a proc is not allowed!");
+ if (!_target_field || !_target_field->getField())
+ throw INTERP_KERNEL::Exception("OverlapDEC::synchronize(): currently, having a void target field on a proc is not allowed!");
+ if (_target_field->getField()->getNumberOfComponents() != _source_field->getField()->getNumberOfComponents())
+ throw INTERP_KERNEL::Exception("OverlapDEC::synchronize(): source and target field have different number of components!");
delete _interpolation_matrix;
_interpolation_matrix=new OverlapInterpolationMatrix(_source_field,_target_field,*_group,*this,*this);
- OverlapElementLocator locator(_source_field,_target_field,*_group);
+ OverlapElementLocator locator(_source_field,_target_field,*_group, getBoundingBoxAdjustmentAbs());
locator.copyOptions(*this);
locator.exchangeMeshes(*_interpolation_matrix);
std::vector< std::pair<int,int> > jobs=locator.getToDoList();
const DataArrayInt *trgIds=locator.getTargetIds((*it).second);
_interpolation_matrix->addContribution(src,srcIds,srcMeth,(*it).first,trg,trgIds,trgMeth,(*it).second);
}
- _interpolation_matrix->prepare(locator.getProcsInInteraction());
+ _interpolation_matrix->prepare(locator.getProcsToSendFieldData());
_interpolation_matrix->computeDeno();
}
void synchronize();
void attachSourceLocalField(ParaFIELD *field, bool ownPt=false);
void attachTargetLocalField(ParaFIELD *field, bool ownPt=false);
- ProcessorGroup *getGrp() { return _group; }
+ ProcessorGroup *getGroup() { return _group; }
bool isInGroup() const;
private:
bool _own_group;
namespace ParaMEDMEM
{
- OverlapElementLocator::OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField, const ProcessorGroup& group)
+ const int OverlapElementLocator::START_TAG_MESH_XCH = 1140;
+
+ OverlapElementLocator::OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField,
+ const ProcessorGroup& group, double epsAbs)
: _local_source_field(sourceField),
_local_target_field(targetField),
_local_source_mesh(0),
_local_target_mesh(0),
_domain_bounding_boxes(0),
- _group(group)
+ _group(group),
+ _epsAbs(epsAbs)
{
if(_local_source_field)
_local_source_mesh=_local_source_field->getSupport()->getCellMesh();
if(_local_target_field)
_local_target_mesh=_local_target_field->getSupport()->getCellMesh();
_comm=getCommunicator();
- computeBoundingBoxes();
+ computeBoundingBoxesAndTodoList();
}
OverlapElementLocator::~OverlapElementLocator()
return group->getComm();
}
- void OverlapElementLocator::computeBoundingBoxes()
+ void OverlapElementLocator::computeBoundingBoxesAndTodoList()
{
CommInterface comm_interface=_group.getCommInterface();
const MPIProcessorGroup* group=static_cast<const MPIProcessorGroup*> (&_group);
for(int j=0;j<_group.size();j++)
{
if(intersectsBoundingBox(i,j))
+ {
_proc_pairs[i].push_back(j);
+// std::cout << "(" << _group.myRank() << ") PROC pair: " << i << "," << j << std::endl;
+ }
}
-
// OK now let's assigning as balanced as possible, job to each proc of group
- std::vector< std::vector< std::pair<int,int> > > pairsToBeDonePerProc(_group.size());
+ std::vector< std::vector< ProcCouple > > pairsToBeDonePerProc(_group.size());
int i=0;
for(std::vector< std::vector< int > >::const_iterator it1=_proc_pairs.begin();it1!=_proc_pairs.end();it1++,i++)
for(std::vector< int >::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
{
if(pairsToBeDonePerProc[i].size()<=pairsToBeDonePerProc[*it2].size())//it includes the fact that i==*it2
- pairsToBeDonePerProc[i].push_back(std::pair<int,int>(i,*it2));
+ pairsToBeDonePerProc[i].push_back(ProcCouple(i,*it2));
else
- pairsToBeDonePerProc[*it2].push_back(std::pair<int,int>(i,*it2));
+ pairsToBeDonePerProc[*it2].push_back(ProcCouple(i,*it2));
}
//Keeping todo list of current proc. _to_do_list contains a set of pair where at least _group.myRank() appears once.
//This proc will be in charge to perform interpolation of any of element of '_to_do_list'
int myProcId=_group.myRank();
_to_do_list=pairsToBeDonePerProc[myProcId];
- //Feeding now '_procs_to_send'. A same id can appears twice. The second parameter in pair means what to send true=source, false=target
- _procs_to_send.clear();
+// std::stringstream scout;
+// scout << "(" << myProcId << ") my TODO list is: ";
+// for (std::vector< ProcCouple >::const_iterator itdbg=_to_do_list.begin(); itdbg!=_to_do_list.end(); itdbg++)
+// scout << "(" << (*itdbg).first << "," << (*itdbg).second << ")";
+// std::cout << scout.str() << "\n";
+
+ // Feeding now '_procs_to_send*'. A same id can appears twice. The second parameter in pair means what
+ // to send true=source, false=target
+ _procs_to_send_mesh.clear();
+ _procs_to_send_field.clear();
for(int i=_group.size()-1;i>=0;i--)
- if(i!=myProcId)
- {
- const std::vector< std::pair<int,int> >& anRemoteProcToDoList=pairsToBeDonePerProc[i];
- for(std::vector< std::pair<int,int> >::const_iterator it=anRemoteProcToDoList.begin();it!=anRemoteProcToDoList.end();it++)
- {
- if((*it).first==myProcId)
- _procs_to_send.push_back(std::pair<int,bool>(i,true));
- if((*it).second==myProcId)
- _procs_to_send.push_back(std::pair<int,bool>(i,false));
- }
- }
+ {
+ const std::vector< ProcCouple >& anRemoteProcToDoList=pairsToBeDonePerProc[i];
+ for(std::vector< ProcCouple >::const_iterator it=anRemoteProcToDoList.begin();it!=anRemoteProcToDoList.end();it++)
+ {
+ if((*it).first==myProcId)
+ {
+ if(i!=myProcId)
+ _procs_to_send_mesh.push_back(Proc_SrcOrTgt(i,true));
+ _procs_to_send_field.push_back((*it).second);
+ }
+ if((*it).second==myProcId)
+ if(i!=myProcId)
+ _procs_to_send_mesh.push_back(Proc_SrcOrTgt(i,false));
+ }
+ }
}
/*!
{
int myProcId=_group.myRank();
//starting to receive every procs whose id is lower than myProcId.
- std::vector< std::pair<int,int> > toDoListForFetchRemaining;
- for(std::vector< std::pair<int,int> >::const_iterator it=_to_do_list.begin();it!=_to_do_list.end();it++)
+ std::vector< ProcCouple > toDoListForFetchRemaining;
+ for(std::vector< ProcCouple >::const_iterator it=_to_do_list.begin();it!=_to_do_list.end();it++)
{
- if((*it).first!=(*it).second)
+ int first = (*it).first, second = (*it).second;
+ if(first!=second)
{
- if((*it).first==myProcId)
+ if(first==myProcId)
{
- if((*it).second<myProcId)
- receiveRemoteMesh((*it).second,false);
+ if(second<myProcId)
+ receiveRemoteMeshFrom(second,false);
else
- toDoListForFetchRemaining.push_back(std::pair<int,int>((*it).first,(*it).second));
+ toDoListForFetchRemaining.push_back(ProcCouple(first,second));
}
else
{//(*it).second==myProcId
- if((*it).first<myProcId)
- receiveRemoteMesh((*it).first,true);
+ if(first<myProcId)
+ receiveRemoteMeshFrom(first,true);
else
- toDoListForFetchRemaining.push_back(std::pair<int,int>((*it).first,(*it).second));
+ toDoListForFetchRemaining.push_back(ProcCouple(first,second));
}
}
}
//sending source or target mesh to remote procs
- for(std::vector< std::pair<int,bool> >::const_iterator it2=_procs_to_send.begin();it2!=_procs_to_send.end();it2++)
+ for(std::vector< Proc_SrcOrTgt >::const_iterator it2=_procs_to_send_mesh.begin();it2!=_procs_to_send_mesh.end();it2++)
sendLocalMeshTo((*it2).first,(*it2).second,matrix);
//fetching remaining meshes
- for(std::vector< std::pair<int,int> >::const_iterator it=toDoListForFetchRemaining.begin();it!=toDoListForFetchRemaining.end();it++)
+ for(std::vector< ProcCouple >::const_iterator it=toDoListForFetchRemaining.begin();it!=toDoListForFetchRemaining.end();it++)
{
if((*it).first!=(*it).second)
{
if((*it).first==myProcId)
- receiveRemoteMesh((*it).second,false);
+ receiveRemoteMeshFrom((*it).second,false);
else//(*it).second==myProcId
- receiveRemoteMesh((*it).first,true);
+ receiveRemoteMeshFrom((*it).first,true);
}
}
}
int myProcId=_group.myRank();
if(myProcId==procId)
return _local_source_mesh;
- std::pair<int,bool> p(procId,true);
- std::map<std::pair<int,bool>, MEDCouplingAutoRefCountObjectPtr< MEDCouplingPointSet > >::const_iterator it=_remote_meshes.find(p);
+ Proc_SrcOrTgt p(procId,true);
+ std::map<Proc_SrcOrTgt, AutoMCPointSet >::const_iterator it=_remote_meshes.find(p);
return (*it).second;
}
int myProcId=_group.myRank();
if(myProcId==procId)
return 0;
- std::pair<int,bool> p(procId,true);
- std::map<std::pair<int,bool>, MEDCouplingAutoRefCountObjectPtr< DataArrayInt > >::const_iterator it=_remote_elems.find(p);
+ Proc_SrcOrTgt p(procId,true);
+ std::map<Proc_SrcOrTgt, AutoDAInt >::const_iterator it=_remote_elems.find(p);
return (*it).second;
}
int myProcId=_group.myRank();
if(myProcId==procId)
return _local_target_mesh;
- std::pair<int,bool> p(procId,false);
- std::map<std::pair<int,bool>, MEDCouplingAutoRefCountObjectPtr< MEDCouplingPointSet > >::const_iterator it=_remote_meshes.find(p);
+ Proc_SrcOrTgt p(procId,false);
+ std::map<Proc_SrcOrTgt, AutoMCPointSet >::const_iterator it=_remote_meshes.find(p);
return (*it).second;
}
int myProcId=_group.myRank();
if(myProcId==procId)
return 0;
- std::pair<int,bool> p(procId,false);
- std::map<std::pair<int,bool>, MEDCouplingAutoRefCountObjectPtr< DataArrayInt > >::const_iterator it=_remote_elems.find(p);
+ Proc_SrcOrTgt p(procId,false);
+ std::map<Proc_SrcOrTgt, AutoDAInt >::const_iterator it=_remote_elems.find(p);
return (*it).second;
}
+
bool OverlapElementLocator::intersectsBoundingBox(int isource, int itarget) const
{
const double *source_bb=_domain_bounding_boxes+isource*2*2*_local_space_dim;
for (int idim=0; idim < _local_space_dim; idim++)
{
- const double eps = -1e-12;//tony to change
- bool intersects = (target_bb[idim*2]<source_bb[idim*2+1]+eps)
- && (source_bb[idim*2]<target_bb[idim*2+1]+eps);
+ bool intersects = (target_bb[idim*2]<source_bb[idim*2+1]+_epsAbs)
+ && (source_bb[idim*2]<target_bb[idim*2+1]+_epsAbs);
if (!intersects)
return false;
}
}
/*!
- * This methods sends local source if 'sourceOrTarget'==True to proc 'procId'.
- * This methods sends local target if 'sourceOrTarget'==False to proc 'procId'.
+ * This methods sends (part of) local source if 'sourceOrTarget'==True to proc 'procId'.
+ * This methods sends (part of) local target if 'sourceOrTarget'==False to proc 'procId'.
*
* This method prepares the matrix too, for matrix assembling and future matrix-vector computation.
*/
const double *distant_bb=0;
MEDCouplingPointSet *local_mesh=0;
const ParaFIELD *field=0;
- if(sourceOrTarget)//source for local but target for distant
+ if(sourceOrTarget)//source for local mesh but target for distant mesh
{
distant_bb=_domain_bounding_boxes+procId*2*2*_local_space_dim+2*_local_space_dim;
local_mesh=_local_source_mesh;
local_mesh=_local_target_mesh;
field=_local_target_field;
}
- MEDCouplingAutoRefCountObjectPtr<DataArrayInt> elems=local_mesh->getCellsInBoundingBox(distant_bb,getBoundingBoxAdjustment());
- DataArrayInt *idsToSend;
- MEDCouplingPointSet *send_mesh=static_cast<MEDCouplingPointSet *>(field->getField()->buildSubMeshData(elems->begin(),elems->end(),idsToSend));
+ AutoDAInt elems=local_mesh->getCellsInBoundingBox(distant_bb,getBoundingBoxAdjustment());
+ DataArrayInt *old2new_map;
+ MEDCouplingPointSet *send_mesh=static_cast<MEDCouplingPointSet *>(field->getField()->buildSubMeshData(elems->begin(),elems->end(),old2new_map));
if(sourceOrTarget)
- matrix.keepTracksOfSourceIds(procId,idsToSend);//Case#1 in Step2 of main algorithm.
+ matrix.keepTracksOfSourceIds(procId,old2new_map);
else
- matrix.keepTracksOfTargetIds(procId,idsToSend);//Case#0 in Step2 of main algorithm.
- sendMesh(procId,send_mesh,idsToSend);
+ matrix.keepTracksOfTargetIds(procId,old2new_map);
+ sendMesh(procId,send_mesh,old2new_map);
send_mesh->decrRef();
- idsToSend->decrRef();
+ old2new_map->decrRef();
}
/*!
* This method recieves source remote mesh on proc 'procId' if sourceOrTarget==True
* This method recieves target remote mesh on proc 'procId' if sourceOrTarget==False
*/
- void OverlapElementLocator::receiveRemoteMesh(int procId, bool sourceOrTarget)
+ void OverlapElementLocator::receiveRemoteMeshFrom(int procId, bool sourceOrTarget)
{
- DataArrayInt *da=0;
+ DataArrayInt *old2new_map=0;
MEDCouplingPointSet *m=0;
- receiveMesh(procId,m,da);
- std::pair<int,bool> p(procId,sourceOrTarget);
+ receiveMesh(procId,m,old2new_map);
+ Proc_SrcOrTgt p(procId,sourceOrTarget);
_remote_meshes[p]=m;
- _remote_elems[p]=da;
+ _remote_elems[p]=old2new_map;
}
void OverlapElementLocator::sendMesh(int procId, const MEDCouplingPointSet *mesh, const DataArrayInt *idsToSend) const
{
CommInterface comInterface=_group.getCommInterface();
+
// First stage : exchanging sizes
vector<double> tinyInfoLocalD;//tinyInfoLocalD not used for the moment
vector<int> tinyInfoLocal;
int lgth[2];
lgth[0]=tinyInfoLocal.size();
lgth[1]=idsToSend->getNbOfElems();
- comInterface.send(&lgth,2,MPI_INT,procId,1140,*_comm);
- comInterface.send(&tinyInfoLocal[0],tinyInfoLocal.size(),MPI_INT,procId,1141,*comm);
+ comInterface.send(&lgth,2,MPI_INT,procId,START_TAG_MESH_XCH,*_comm);
+ comInterface.send(&tinyInfoLocal[0],tinyInfoLocal.size(),MPI_INT,procId,START_TAG_MESH_XCH+1,*comm);
//
DataArrayInt *v1Local=0;
DataArrayDouble *v2Local=0;
mesh->serialize(v1Local,v2Local);
- comInterface.send(v1Local->getPointer(),v1Local->getNbOfElems(),MPI_INT,procId,1142,*comm);
- comInterface.send(v2Local->getPointer(),v2Local->getNbOfElems(),MPI_DOUBLE,procId,1143,*comm);
+ comInterface.send(v1Local->getPointer(),v1Local->getNbOfElems(),MPI_INT,procId,START_TAG_MESH_XCH+2,*comm);
+ comInterface.send(v2Local->getPointer(),v2Local->getNbOfElems(),MPI_DOUBLE,procId,START_TAG_MESH_XCH+3,*comm);
//finished for mesh, ids now
- comInterface.send(const_cast<int *>(idsToSend->getConstPointer()),lgth[1],MPI_INT,procId,1144,*comm);
+ comInterface.send(const_cast<int *>(idsToSend->getConstPointer()),lgth[1],MPI_INT,procId,START_TAG_MESH_XCH+4,*comm);
//
v1Local->decrRef();
v2Local->decrRef();
MPI_Status status;
const MPI_Comm *comm=getCommunicator();
CommInterface comInterface=_group.getCommInterface();
- comInterface.recv(lgth,2,MPI_INT,procId,1140,*_comm,&status);
+ comInterface.recv(lgth,2,MPI_INT,procId,START_TAG_MESH_XCH,*_comm,&status);
std::vector<int> tinyInfoDistant(lgth[0]);
ids=DataArrayInt::New();
ids->alloc(lgth[1],1);
- comInterface.recv(&tinyInfoDistant[0],lgth[0],MPI_INT,procId,1141,*comm,&status);
+ comInterface.recv(&tinyInfoDistant[0],lgth[0],MPI_INT,procId,START_TAG_MESH_XCH+1,*comm,&status);
mesh=MEDCouplingPointSet::BuildInstanceFromMeshType((MEDCouplingMeshType)tinyInfoDistant[0]);
std::vector<std::string> unusedTinyDistantSts;
vector<double> tinyInfoDistantD(1);//tinyInfoDistantD not used for the moment
DataArrayInt *v1Distant=DataArrayInt::New();
DataArrayDouble *v2Distant=DataArrayDouble::New();
mesh->resizeForUnserialization(tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts);
- comInterface.recv(v1Distant->getPointer(),v1Distant->getNbOfElems(),MPI_INT,procId,1142,*comm,&status);
- comInterface.recv(v2Distant->getPointer(),v2Distant->getNbOfElems(),MPI_DOUBLE,procId,1143,*comm,&status);
+ comInterface.recv(v1Distant->getPointer(),v1Distant->getNbOfElems(),MPI_INT,procId,START_TAG_MESH_XCH+2,*comm,&status);
+ comInterface.recv(v2Distant->getPointer(),v2Distant->getNbOfElems(),MPI_DOUBLE,procId,START_TAG_MESH_XCH+3,*comm,&status);
mesh->unserialization(tinyInfoDistantD,tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts);
//finished for mesh, ids now
comInterface.recv(ids->getPointer(),lgth[1],MPI_INT,procId,1144,*comm,&status);
class ProcessorGroup;
class OverlapInterpolationMatrix;
+ typedef std::pair<int,int> ProcCouple; // a couple of proc IDs, typically used to define a exchange betw 2 procs
+
class OverlapElementLocator : public INTERP_KERNEL::InterpolationOptions
{
public:
- OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField, const ProcessorGroup& group);
+ OverlapElementLocator(const ParaFIELD *sourceField, const ParaFIELD *targetField, const ProcessorGroup& group,double epsAbs);
virtual ~OverlapElementLocator();
const MPI_Comm *getCommunicator() const;
void exchangeMeshes(OverlapInterpolationMatrix& matrix);
std::vector< std::pair<int,int> > getToDoList() const { return _to_do_list; }
- std::vector< std::vector< int > > getProcsInInteraction() const { return _proc_pairs; }
+ std::vector< int > getProcsToSendFieldData() const { return _procs_to_send_field; }
std::string getSourceMethod() const;
std::string getTargetMethod() const;
const MEDCouplingPointSet *getSourceMesh(int procId) const;
const MEDCouplingPointSet *getTargetMesh(int procId) const;
const DataArrayInt *getTargetIds(int procId) const;
private:
- void computeBoundingBoxes();
+ void computeBoundingBoxesAndTodoList();
bool intersectsBoundingBox(int i, int j) const;
void sendLocalMeshTo(int procId, bool sourceOrTarget, OverlapInterpolationMatrix& matrix) const;
- void receiveRemoteMesh(int procId, bool sourceOrTarget);
+ void receiveRemoteMeshFrom(int procId, bool sourceOrTarget);
void sendMesh(int procId, const MEDCouplingPointSet *mesh, const DataArrayInt *idsToSend) const;
void receiveMesh(int procId, MEDCouplingPointSet* &mesh, DataArrayInt *&ids) const;
private:
+ typedef MEDCouplingAutoRefCountObjectPtr< MEDCouplingPointSet > AutoMCPointSet;
+ typedef MEDCouplingAutoRefCountObjectPtr< DataArrayInt > AutoDAInt;
+ typedef std::pair<int,bool> Proc_SrcOrTgt; // a key indicating a proc ID and whether the data is for source mesh/field or target mesh/field
+
+ static const int START_TAG_MESH_XCH;
+
const ParaFIELD *_local_source_field;
const ParaFIELD *_local_target_field;
+
int _local_space_dim;
MEDCouplingPointSet *_local_source_mesh;
MEDCouplingPointSet *_local_target_mesh;
- std::vector<MEDCouplingPointSet*> _distant_cell_meshes;
- std::vector<MEDCouplingPointSet*> _distant_face_meshes;
- //! of size _group.size(). Contains for each source proc i, the ids of proc j the targets interact with. This vector is common for all procs in _group.
+
+ /*! of size _group.size(). Contains for each source proc i, the ids of proc j the targets interact with.
+ This vector is common for all procs in _group. */
std::vector< std::vector< int > > _proc_pairs;
- //! list of interpolations couple to be done
- std::vector< std::pair<int,int> > _to_do_list;
- std::vector< std::pair<int,bool> > _procs_to_send;
- std::map<std::pair<int,bool>, MEDCouplingAutoRefCountObjectPtr< MEDCouplingPointSet > > _remote_meshes;
- std::map<std::pair<int,bool>, MEDCouplingAutoRefCountObjectPtr< DataArrayInt > > _remote_elems;
+ //! list of interpolation couples to be done by this proc only. This is a simple extraction of the member _pairsToBeDonePerProc
+ std::vector< ProcCouple > _to_do_list;
+ //! list of procs the local proc will have to send mesh data to:
+ std::vector< Proc_SrcOrTgt > _procs_to_send_mesh;
+ /*! list of procs the local proc will have to send field data to for the final matrix-vector computation:
+ * This can be different from _procs_to_send_mesh because interpolation matrix bits are computed on a potentially
+ * different proc than the target one. */
+ std::vector< int > _procs_to_send_field;
+ //! Set of distant meshes
+ std::map< Proc_SrcOrTgt, AutoMCPointSet > _remote_meshes;
+ //! Set of cell ID mappings for the above distant meshes (because only part of the meshes are exchanged)
+ std::map< Proc_SrcOrTgt, AutoDAInt > _remote_elems;
double* _domain_bounding_boxes;
- const ProcessorGroup& _group;
+ //! bounding box absolute adjustment
+ double _epsAbs;
+
std::vector<int> _distant_proc_ids;
+
+ const ProcessorGroup& _group;
const MPI_Comm *_comm;
- //Attributes only used by lazy side
- //std::vector<double> _values_added;
- //std::vector< std::vector<int> > _ids_per_working_proc;
- //std::vector< std::vector<int> > _ids_per_working_proc3;
- //std::vector< std::vector<double> > _values_per_working_proc;
};
}
_mapping(group),
_group(group)
{
- int nbelems = source_field->getField()->getNumberOfTuples();
- _row_offsets.resize(nbelems+1);
- _coeffs.resize(nbelems);
- _target_volume.resize(nbelems);
}
void OverlapInterpolationMatrix::keepTracksOfSourceIds(int procId, DataArrayInt *ids)
{
}
+ // TODO? Merge with MEDCouplingRemapper::prepareInterpKernelOnlyUU() ?
+ /**!
+ * @param srcIds is null if the source mesh is on the local proc
+ * @param trgIds is null if the source mesh is on the local proc
+ *
+ * One of the 2 is necessarily null (the two can be null together)
+ */
void OverlapInterpolationMatrix::addContribution(const MEDCouplingPointSet *src, const DataArrayInt *srcIds, const std::string& srcMeth, int srcProcId,
const MEDCouplingPointSet *trg, const DataArrayInt *trgIds, const std::string& trgMeth, int trgProcId)
{
std::string interpMethod(srcMeth);
interpMethod+=trgMeth;
//creating the interpolator structure
- vector<map<int,double> > surfaces;
+ vector<SparseDoubleVec > sparse_matrix_part;
int colSize=0;
//computation of the intersection volumes between source and target elements
const MEDCouplingUMesh *trgC=dynamic_cast<const MEDCouplingUMesh *>(trg);
{
MEDCouplingNormalizedUnstructuredMesh<2,2> target_mesh_wrapper(trgC);
INTERP_KERNEL::Interpolation2D interpolation(*this);
- colSize=interpolation.fromIntegralUniform(target_mesh_wrapper,surfaces,trgMeth);
+ colSize=interpolation.fromIntegralUniform(target_mesh_wrapper,sparse_matrix_part,trgMeth);
}
else if(trgC->getMeshDimension()==3 && trgC->getSpaceDimension()==3)
{
MEDCouplingNormalizedUnstructuredMesh<3,3> target_mesh_wrapper(trgC);
INTERP_KERNEL::Interpolation3D interpolation(*this);
- colSize=interpolation.fromIntegralUniform(target_mesh_wrapper,surfaces,trgMeth);
+ colSize=interpolation.fromIntegralUniform(target_mesh_wrapper,sparse_matrix_part,trgMeth);
}
else if(trgC->getMeshDimension()==2 && trgC->getSpaceDimension()==3)
{
MEDCouplingNormalizedUnstructuredMesh<3,2> target_mesh_wrapper(trgC);
INTERP_KERNEL::Interpolation3DSurf interpolation(*this);
- colSize=interpolation.fromIntegralUniform(target_mesh_wrapper,surfaces,trgMeth);
+ colSize=interpolation.fromIntegralUniform(target_mesh_wrapper,sparse_matrix_part,trgMeth);
}
else
throw INTERP_KERNEL::Exception("No para interpolation available for the given mesh and space dimension of source mesh to -1D targetMesh");
{
MEDCouplingNormalizedUnstructuredMesh<2,2> local_mesh_wrapper(srcC);
INTERP_KERNEL::Interpolation2D interpolation(*this);
- colSize=interpolation.toIntegralUniform(local_mesh_wrapper,surfaces,srcMeth);
+ colSize=interpolation.toIntegralUniform(local_mesh_wrapper,sparse_matrix_part,srcMeth);
}
else if(srcC->getMeshDimension()==3 && srcC->getSpaceDimension()==3)
{
MEDCouplingNormalizedUnstructuredMesh<3,3> local_mesh_wrapper(srcC);
INTERP_KERNEL::Interpolation3D interpolation(*this);
- colSize=interpolation.toIntegralUniform(local_mesh_wrapper,surfaces,srcMeth);
+ colSize=interpolation.toIntegralUniform(local_mesh_wrapper,sparse_matrix_part,srcMeth);
}
else if(srcC->getMeshDimension()==2 && srcC->getSpaceDimension()==3)
{
MEDCouplingNormalizedUnstructuredMesh<3,2> local_mesh_wrapper(srcC);
INTERP_KERNEL::Interpolation3DSurf interpolation(*this);
- colSize=interpolation.toIntegralUniform(local_mesh_wrapper,surfaces,srcMeth);
+ colSize=interpolation.toIntegralUniform(local_mesh_wrapper,sparse_matrix_part,srcMeth);
}
else
throw INTERP_KERNEL::Exception("No para interpolation available for the given mesh and space dimension of distant mesh to -1D sourceMesh");
MEDCouplingNormalizedUnstructuredMesh<3,3> source_wrapper(srcC);
INTERP_KERNEL::Interpolation3D2D interpolator (*this);
- colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod);
- target_wrapper.releaseTempArrays();
- source_wrapper.releaseTempArrays();
+ colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,sparse_matrix_part,interpMethod);
}
else if ( src->getMeshDimension() == 3 && trg->getMeshDimension() == 2
&& trg->getSpaceDimension() == 3 && src->getSpaceDimension() == 3 )
MEDCouplingNormalizedUnstructuredMesh<3,3> source_wrapper(srcC);
INTERP_KERNEL::Interpolation3D2D interpolator (*this);
- vector<map<int,double> > surfacesTranspose;
- colSize=interpolator.interpolateMeshes(target_wrapper,source_wrapper,surfaces,interpMethod);//not a bug target in source.
- TransposeMatrix(surfacesTranspose,colSize,surfaces);
- colSize=surfacesTranspose.size();
- target_wrapper.releaseTempArrays();
- source_wrapper.releaseTempArrays();
+ vector<SparseDoubleVec > matrixTranspose;
+ colSize=interpolator.interpolateMeshes(target_wrapper,source_wrapper,sparse_matrix_part,interpMethod);//not a bug target in source.
+ TransposeMatrix(matrixTranspose,colSize,sparse_matrix_part);
+ colSize=matrixTranspose.size();
}
else if ( src->getMeshDimension() == 1 && trg->getMeshDimension() == 2
&& trg->getSpaceDimension() == 2 && src->getSpaceDimension() == 2 )
MEDCouplingNormalizedUnstructuredMesh<2,2> source_wrapper(srcC);
INTERP_KERNEL::Interpolation2D1D interpolator (*this);
- colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod);
- target_wrapper.releaseTempArrays();
- source_wrapper.releaseTempArrays();
+ colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,sparse_matrix_part,interpMethod);
}
else if ( src->getMeshDimension() == 2 && trg->getMeshDimension() == 1
&& trg->getSpaceDimension() == 2 && src->getSpaceDimension() == 2 )
MEDCouplingNormalizedUnstructuredMesh<2,2> source_wrapper(srcC);
INTERP_KERNEL::Interpolation2D1D interpolator (*this);
- vector<map<int,double> > surfacesTranspose;
- colSize=interpolator.interpolateMeshes(target_wrapper,source_wrapper,surfacesTranspose,interpMethod);//not a bug target in source.
- TransposeMatrix(surfacesTranspose,colSize,surfaces);
- colSize=surfacesTranspose.size();
- target_wrapper.releaseTempArrays();
- source_wrapper.releaseTempArrays();
+ vector<SparseDoubleVec > matrixTranspose;
+ colSize=interpolator.interpolateMeshes(target_wrapper,source_wrapper,matrixTranspose,interpMethod);//not a bug target in source.
+ TransposeMatrix(matrixTranspose,colSize,sparse_matrix_part);
+ colSize=matrixTranspose.size();
}
else if (trg->getMeshDimension() != _source_support->getMeshDimension())
{
MEDCouplingNormalizedUnstructuredMesh<1,1> source_wrapper(srcC);
INTERP_KERNEL::Interpolation1D interpolation(*this);
- colSize=interpolation.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod);
- target_wrapper.releaseTempArrays();
- source_wrapper.releaseTempArrays();
+ colSize=interpolation.interpolateMeshes(source_wrapper,target_wrapper,sparse_matrix_part,interpMethod);
}
else if( trg->getMeshDimension() == 1
&& trg->getSpaceDimension() == 2 )
MEDCouplingNormalizedUnstructuredMesh<2,1> source_wrapper(srcC);
INTERP_KERNEL::Interpolation2DCurve interpolation(*this);
- colSize=interpolation.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod);
- target_wrapper.releaseTempArrays();
- source_wrapper.releaseTempArrays();
+ colSize=interpolation.interpolateMeshes(source_wrapper,target_wrapper,sparse_matrix_part,interpMethod);
}
else if ( trg->getMeshDimension() == 2
&& trg->getSpaceDimension() == 3 )
MEDCouplingNormalizedUnstructuredMesh<3,2> source_wrapper(srcC);
INTERP_KERNEL::Interpolation3DSurf interpolator (*this);
- colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod);
- target_wrapper.releaseTempArrays();
- source_wrapper.releaseTempArrays();
+ colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,sparse_matrix_part,interpMethod);
}
else if ( trg->getMeshDimension() == 2
&& trg->getSpaceDimension() == 2)
MEDCouplingNormalizedUnstructuredMesh<2,2> source_wrapper(srcC);
INTERP_KERNEL::Interpolation2D interpolator (*this);
- colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod);
- target_wrapper.releaseTempArrays();
- source_wrapper.releaseTempArrays();
+ colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,sparse_matrix_part,interpMethod);
}
else if ( trg->getMeshDimension() == 3
&& trg->getSpaceDimension() == 3 )
MEDCouplingNormalizedUnstructuredMesh<3,3> source_wrapper(srcC);
INTERP_KERNEL::Interpolation3D interpolator (*this);
- colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,surfaces,interpMethod);
- target_wrapper.releaseTempArrays();
- source_wrapper.releaseTempArrays();
+ colSize=interpolator.interpolateMeshes(source_wrapper,target_wrapper,sparse_matrix_part,interpMethod);
}
else
{
- throw INTERP_KERNEL::Exception("no interpolator exists for these mesh and space dimensions ");
+ throw INTERP_KERNEL::Exception("No interpolator exists for these mesh and space dimensions!");
}
- bool needSourceSurf=isSurfaceComputationNeeded(srcMeth);
- MEDCouplingFieldDouble *source_triangle_surf=0;
- if(needSourceSurf)
- source_triangle_surf=src->getMeasureField(getMeasureAbsStatus());
- //
- fillDistributedMatrix(surfaces,srcIds,srcProcId,trgIds,trgProcId);
- //
- if(needSourceSurf)
- source_triangle_surf->decrRef();
+ /* Fill distributed matrix:
+ In sparse_matrix_part rows refer to target, and columns (=first param of map in SparseDoubleVec)
+ refer to source.
+ */
+ _mapping.addContributionST(sparse_matrix_part,srcIds,srcProcId,trgIds,trgProcId);
}
- /*!
- * \b res rows refers to target and column (first param of map) to source.
- */
- void OverlapInterpolationMatrix::fillDistributedMatrix(const std::vector< std::map<int,double> >& res,
- const DataArrayInt *srcIds, int srcProc,
- const DataArrayInt *trgIds, int trgProc)
- {
- _mapping.addContributionST(res,srcIds,srcProc,trgIds,trgProc);
- }
/*!
- * 'procsInInteraction' gives the global view of interaction between procs.
- * In 'procsInInteraction' for a proc with id i, is in interaction with procs listed in procsInInteraction[i]
+ * 'procsToSendField' gives the list of procs field data has to be sent to.
+ * See OverlapElementLocator::computeBoundingBoxesAndTodoList()
*/
- void OverlapInterpolationMatrix::prepare(const std::vector< std::vector<int> >& procsInInteraction)
+ void OverlapInterpolationMatrix::prepare(const std::vector< int >& procsToSendField)
{
if(_source_support)
- _mapping.prepare(procsInInteraction,_target_field->getField()->getNumberOfTuplesExpected());
+ _mapping.prepare(procsToSendField,_target_field->getField()->getNumberOfTuplesExpected());
else
- _mapping.prepare(procsInInteraction,0);
+ _mapping.prepare(procsToSendField,0);
}
void OverlapInterpolationMatrix::computeDeno()
_mapping.transposeMultiply(_target_field->getField(),_source_field->getField());
}
- bool OverlapInterpolationMatrix::isSurfaceComputationNeeded(const std::string& method) const
- {
- return method=="P0";
- }
+// bool OverlapInterpolationMatrix::isSurfaceComputationNeeded(const std::string& method) const
+// {
+// return method=="P0";
+// }
- void OverlapInterpolationMatrix::TransposeMatrix(const std::vector<std::map<int,double> >& matIn, int nbColsMatIn, std::vector<std::map<int,double> >& matOut)
+ void OverlapInterpolationMatrix::TransposeMatrix(const std::vector<SparseDoubleVec >& matIn,
+ int nbColsMatIn, std::vector<SparseDoubleVec >& matOut)
{
matOut.resize(nbColsMatIn);
int id=0;
- for(std::vector<std::map<int,double> >::const_iterator iter1=matIn.begin();iter1!=matIn.end();iter1++,id++)
- for(std::map<int,double>::const_iterator iter2=(*iter1).begin();iter2!=(*iter1).end();iter2++)
+ for(std::vector<SparseDoubleVec >::const_iterator iter1=matIn.begin();iter1!=matIn.end();iter1++,id++)
+ for(SparseDoubleVec::const_iterator iter2=(*iter1).begin();iter2!=(*iter1).end();iter2++)
matOut[(*iter2).first][id]=(*iter2).second;
}
}
void addContribution(const MEDCouplingPointSet *src, const DataArrayInt *srcIds, const std::string& srcMeth, int srcProcId,
const MEDCouplingPointSet *trg, const DataArrayInt *trgIds, const std::string& trgMeth, int trgProcId);
- void prepare(const std::vector< std::vector<int> >& procsInInteraction);
+ void prepare(const std::vector< int > & procsToSendField);
void computeDeno();
void transposeMultiply();
virtual ~OverlapInterpolationMatrix();
-#if 0
- void addContribution(MEDCouplingPointSet& distant_support, int iproc_distant,
- const int* distant_elems, const std::string& srcMeth, const std::string& targetMeth);
- void finishContributionW(ElementLocator& elementLocator);
- void finishContributionL(ElementLocator& elementLocator);
- void multiply(MEDCouplingFieldDouble& field) const;
- void transposeMultiply(MEDCouplingFieldDouble& field)const;
- void prepare();
- int getNbRows() const { return _row_offsets.size(); }
- MPIAccessDEC* getAccessDEC() { return _mapping.getAccessDEC(); }
private:
- void computeConservVolDenoW(ElementLocator& elementLocator);
- void computeIntegralDenoW(ElementLocator& elementLocator);
- void computeRevIntegralDenoW(ElementLocator& elementLocator);
- void computeGlobConstraintDenoW(ElementLocator& elementLocator);
- void computeConservVolDenoL(ElementLocator& elementLocator);
- void computeIntegralDenoL(ElementLocator& elementLocator);
- void computeRevIntegralDenoL(ElementLocator& elementLocator);
-
- void computeLocalColSum(std::vector<double>& res) const;
- void computeLocalRowSum(const std::vector<int>& distantProcs, std::vector<std::vector<int> >& resPerProcI,
- std::vector<std::vector<double> >& resPerProcD) const;
- void computeGlobalRowSum(ElementLocator& elementLocator, std::vector<std::vector<double> >& denoStrorage, std::vector<std::vector<double> >& denoStrorageInv);
- void computeGlobalColSum(std::vector<std::vector<double> >& denoStrorage);
- void resizeGlobalColSum(std::vector<std::vector<double> >& denoStrorage);
- void fillDSFromVM(int iproc_distant, const int* distant_elems, const std::vector< std::map<int,double> >& values, MEDCouplingFieldDouble *surf);
- void serializeMe(std::vector< std::vector< std::map<int,double> > >& data1, std::vector<int>& data2) const;
- void initialize();
- void findAdditionnalElements(ElementLocator& elementLocator, std::vector<std::vector<int> >& elementsToAdd,
- const std::vector<std::vector<int> >& resPerProcI, const std::vector<std::vector<int> >& globalIdsPartial);
- void addGhostElements(const std::vector<int>& distantProcs, const std::vector<std::vector<int> >& elementsToAdd);
- int mergePolicies(const std::vector<int>& policyPartial);
- void mergeRowSum(const std::vector< std::vector<double> >& rowsPartialSumD, const std::vector< std::vector<int> >& globalIdsPartial,
- std::vector<int>& globalIdsLazySideInteraction, std::vector<double>& sumCorresponding);
- void mergeRowSum2(const std::vector< std::vector<int> >& globalIdsPartial, std::vector< std::vector<double> >& rowsPartialSumD,
- const std::vector<int>& globalIdsLazySideInteraction, const std::vector<double>& sumCorresponding);
- void mergeRowSum3(const std::vector< std::vector<int> >& globalIdsPartial, std::vector< std::vector<double> >& rowsPartialSumD);
- void mergeCoeffs(const std::vector<int>& procsInInteraction, const std::vector< std::vector<int> >& rowsPartialSumI,
- const std::vector<std::vector<int> >& globalIdsPartial, std::vector<std::vector<double> >& denoStrorageInv);
- void divideByGlobalRowSum(const std::vector<int>& distantProcs, const std::vector<std::vector<int> >& resPerProcI,
- const std::vector<std::vector<double> >& resPerProcD, std::vector<std::vector<double> >& deno);
-#endif
- private:
- bool isSurfaceComputationNeeded(const std::string& method) const;
- void fillDistributedMatrix(const std::vector< std::map<int,double> >& res,
- const DataArrayInt *srcIds, int srcProc,
- const DataArrayInt *trgIds, int trgProc);
- static void TransposeMatrix(const std::vector<std::map<int,double> >& matIn, int nbColsMatIn, std::vector<std::map<int,double> >& matOut);
+
+ static void TransposeMatrix(const std::vector<SparseDoubleVec>& matIn, int nbColsMatIn,
+ std::vector<SparseDoubleVec>& matOut);
+// bool isSurfaceComputationNeeded(const std::string& method) const;
private:
- ParaMEDMEM::ParaFIELD *_source_field;
- ParaMEDMEM::ParaFIELD *_target_field;
- std::vector<int> _row_offsets;
- std::map<std::pair<int,int>, int > _col_offsets;
+ ParaFIELD *_source_field;
+ ParaFIELD *_target_field;
MEDCouplingPointSet *_source_support;
MEDCouplingPointSet *_target_support;
- OverlapMapping _mapping;
+ OverlapMapping _mapping;
const ProcessorGroup& _group;
- std::vector< std::vector<double> > _target_volume;
- std::vector<std::vector<std::pair<int,double> > > _coeffs;
- std::vector<std::vector<double> > _deno_multiply;
- std::vector<std::vector<double> > _deno_reverse_multiply;
};
}
}
/*!
- * This method keeps tracks of source ids to know in step 6 of main algorithm, which tuple ids to send away.
- * This method incarnates item#1 of step2 algorithm.
+ * Keeps the link between a given a proc holding source mesh data, and the corresponding cell IDs.
*/
void OverlapMapping::keepTracksOfSourceIds(int procId, DataArrayInt *ids)
{
ids->incrRef();
- _src_ids_st2.push_back(ids);
- _src_proc_st2.push_back(procId);
+ _sent_src_ids_st2.push_back(ids);
+ _sent_src_proc_st2.push_back(procId);
}
/*!
- * This method keeps tracks of target ids to know in step 6 of main algorithm.
- * This method incarnates item#0 of step2 algorithm.
- */
+ * Same as keepTracksOfSourceIds() but for target mesh data.
+*/
void OverlapMapping::keepTracksOfTargetIds(int procId, DataArrayInt *ids)
{
ids->incrRef();
- _trg_ids_st2.push_back(ids);
- _trg_proc_st2.push_back(procId);
+ _sent_trg_ids_st2.push_back(ids);
+ _sent_trg_proc_st2.push_back(procId);
}
/*!
- * This method stores from a matrix in format Target(rows)/Source(cols) for a source procId 'srcProcId' and for a target procId 'trgProcId'.
- * All ids (source and target) are in format of local ids.
+ * This method stores in the local members the contribution coming from a matrix in format
+ * Target(rows)/Source(cols) for a source procId 'srcProcId' and for a target procId 'trgProcId'.
+ * All IDs received here (source and target) are in the format of local IDs.
+ *
+ * @param srcIds is null if the source mesh is on the local proc
+ * @param trgIds is null if the source mesh is on the local proc
+ *
+ * One of the 2 is necessarily null (the two can be null together)
*/
-void OverlapMapping::addContributionST(const std::vector< std::map<int,double> >& matrixST, const DataArrayInt *srcIds, int srcProcId, const DataArrayInt *trgIds, int trgProcId)
+void OverlapMapping::addContributionST(const std::vector< SparseDoubleVec >& matrixST, const DataArrayInt *srcIds, int srcProcId, const DataArrayInt *trgIds, int trgProcId)
{
_matrixes_st.push_back(matrixST);
_source_proc_id_st.push_back(srcProcId);
_target_proc_id_st.push_back(trgProcId);
- if(srcIds)
+ if(srcIds) // source mesh part is remote
{//item#1 of step2 algorithm in proc m. Only to know in advanced nb of recv ids [ (0,1) computed on proc1 and Matrix-Vector on proc1 ]
_nb_of_src_ids_proc_st2.push_back(srcIds->getNumberOfTuples());
_src_ids_proc_st2.push_back(srcProcId);
}
- else
+ else // source mesh part is local
{//item#0 of step2 algorithm in proc k
std::set<int> s;
- for(std::vector< std::map<int,double> >::const_iterator it1=matrixST.begin();it1!=matrixST.end();it1++)
- for(std::map<int,double>::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
+ // For all source IDs (=col indices) in the sparse matrix:
+ for(std::vector< SparseDoubleVec >::const_iterator it1=matrixST.begin();it1!=matrixST.end();it1++)
+ for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
s.insert((*it2).first);
_src_ids_zip_st2.resize(_src_ids_zip_st2.size()+1);
_src_ids_zip_st2.back().insert(_src_ids_zip_st2.back().end(),s.begin(),s.end());
}
/*!
- * 'procsInInteraction' gives the global view of interaction between procs.
- * In 'procsInInteraction' for a proc with id i, is in interaction with procs listed in procsInInteraction[i].
+ * This method is in charge to send matrices in AlltoAll mode.
+ *
+ * 'procsToSendField' gives the list of procs field data has to be sent to.
+ * See OverlapElementLocator::computeBoundingBoxesAndTodoList()
*
- * This method is in charge to send matrixes in AlltoAll mode.
- * After the call of this method 'this' contains the matrixST for all source elements of the current proc
+ * After the call of this method, 'this' contains the matrixST for all source cells of the current proc
*/
-void OverlapMapping::prepare(const std::vector< std::vector<int> >& procsInInteraction, int nbOfTrgElems)
+void OverlapMapping::prepare(const std::vector< int >& procsToSendField, int nbOfTrgElems)
{
+// printMatrixesST();
+
CommInterface commInterface=_group.getCommInterface();
const MPIProcessorGroup *group=static_cast<const MPIProcessorGroup*>(&_group);
const MPI_Comm *comm=group->getComm();
INTERP_KERNEL::AutoPtr<int> nbsend3=new int[grpSize];
std::fill<int *>(nbsend,nbsend+grpSize,0);
int myProcId=_group.myRank();
- _proc_ids_to_recv_vector_st.clear();
- int curProc=0;
- for(std::vector< std::vector<int> >::const_iterator it1=procsInInteraction.begin();it1!=procsInInteraction.end();it1++,curProc++)
- if(std::find((*it1).begin(),(*it1).end(),myProcId)!=(*it1).end())
- _proc_ids_to_recv_vector_st.push_back(curProc);
- _proc_ids_to_send_vector_st=procsInInteraction[myProcId];
for(std::size_t i=0;i<_matrixes_st.size();i++)
if(_source_proc_id_st[i]==myProcId)
nbsend[_target_proc_id_st[i]]=_matrixes_st[i].size();
bigArrRecv2,bigArrDRecv2,nbrecv3,nbrecv4);
//updating _src_ids_zip_st2 and _src_ids_zip_st2 with received matrix.
updateZipSourceIdsForFuture();
- //finish to fill _the_matrix_st with already in place matrix in _matrixes_st
+ //finish to fill _the_matrix_st with already in place matrix in _matrixes_st (local computation)
finishToFillFinalMatrixST();
- //printTheMatrix();
+ // Prepare proc lists for future field data exchange (mutliply()):
+ fillProcToSendRcvForMultiply(procsToSendField);
+ // Make some space on local proc:
+ _matrixes_st.clear();
+
+// std::stringstream scout;
+// scout << "\n(" << myProcId << ") to send:";
+// for (std::vector< int >::const_iterator itdbg=_proc_ids_to_send_vector_st.begin(); itdbg!=_proc_ids_to_send_vector_st.end(); itdbg++)
+// scout << "," << *itdbg;
+// scout << "\n(" << myProcId << ") to recv:";
+// for (std::vector< int >::const_iterator itdbg=_proc_ids_to_recv_vector_st.begin(); itdbg!=_proc_ids_to_recv_vector_st.end(); itdbg++)
+// scout << "," << *itdbg;
+// std::cout << scout.str() << "\n";
+//
+// printTheMatrix();
+}
+
+/**
+ * Fill the members _proc_ids_to_send_vector_st and _proc_ids_to_recv_vector_st
+ * indicating for each proc to which proc it should send its source field data
+ * and from which proc it should receive source field data.
+ *
+ * Should be called once finishToFillFinalMatrixST() has been executed, i.e. when the
+ * local matrices are completly ready.
+ */
+void OverlapMapping::fillProcToSendRcvForMultiply(const std::vector< int >& procsToSendField)
+{
+// _proc_ids_to_send_vector_st.clear();
+ _proc_ids_to_recv_vector_st.clear();
+ // Receiving side is easy - just inspect non-void terms in the final matrix
+ int i=0;
+ std::vector< std::vector< SparseDoubleVec > >::const_iterator it;
+ std::vector< SparseDoubleVec >::const_iterator it2;
+ for(it=_the_matrix_st.begin(); it!=_the_matrix_st.end(); it++, i++)
+ _proc_ids_to_recv_vector_st.push_back(_the_matrix_st_source_proc_id[i]);
+
+ // Sending side was computed in OverlapElementLocator::computeBoundingBoxesAndTodoList()
+ _proc_ids_to_send_vector_st = procsToSendField;
}
/*!
for(std::size_t j=0;j<sz2;j++)
{
double sum=0;
- std::map<int,double>& mToFill=_the_deno_st[i][j];
- const std::map<int,double>& m=_the_matrix_st[i][j];
- for(std::map<int,double>::const_iterator it=m.begin();it!=m.end();it++)
+ SparseDoubleVec& mToFill=_the_deno_st[i][j];
+ const SparseDoubleVec& m=_the_matrix_st[i][j];
+ for(SparseDoubleVec::const_iterator it=m.begin();it!=m.end();it++)
sum+=(*it).second;
- for(std::map<int,double>::const_iterator it=m.begin();it!=m.end();it++)
+ for(SparseDoubleVec::const_iterator it=m.begin();it!=m.end();it++)
mToFill[(*it).first]=sum;
}
}
*/
void OverlapMapping::computeDenoConservativeVolumic(int nbOfTuplesTrg)
{
- CommInterface commInterface=_group.getCommInterface();
int myProcId=_group.myRank();
//
_the_deno_st.clear();
std::vector<double> deno(nbOfTuplesTrg);
for(std::size_t i=0;i<sz1;i++)
{
- const std::vector< std::map<int,double> >& mat=_the_matrix_st[i];
+ const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i];
int curSrcId=_the_matrix_st_source_proc_id[i];
- std::vector<int>::iterator isItem1=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),curSrcId);
+ std::vector<int>::iterator isItem1=std::find(_sent_trg_proc_st2.begin(),_sent_trg_proc_st2.end(),curSrcId);
int rowId=0;
- if(isItem1==_trg_proc_st2.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids.
+ if(isItem1==_sent_trg_proc_st2.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids.
{
- for(std::vector< std::map<int,double> >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
- for(std::map<int,double>::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
+ for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
+ for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
deno[rowId]+=(*it2).second;
}
else
{//item0 of step2 main algo. More complicated.
std::vector<int>::iterator fnd=isItem1;//std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),curSrcId);
- int locId=std::distance(_trg_proc_st2.begin(),fnd);
- const DataArrayInt *trgIds=_trg_ids_st2[locId];
+ int locId=std::distance(_sent_trg_proc_st2.begin(),fnd);
+ const DataArrayInt *trgIds=_sent_trg_ids_st2[locId];
const int *trgIds2=trgIds->getConstPointer();
- for(std::vector< std::map<int,double> >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
- for(std::map<int,double>::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
+ for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
+ for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
deno[trgIds2[rowId]]+=(*it2).second;
}
}
for(std::size_t i=0;i<sz1;i++)
{
int rowId=0;
- const std::vector< std::map<int,double> >& mat=_the_matrix_st[i];
+ const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i];
int curSrcId=_the_matrix_st_source_proc_id[i];
- std::vector<int>::iterator isItem1=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),curSrcId);
- std::vector< std::map<int,double> >& denoM=_the_deno_st[i];
+ std::vector<int>::iterator isItem1=std::find(_sent_trg_proc_st2.begin(),_sent_trg_proc_st2.end(),curSrcId);
+ std::vector< SparseDoubleVec >& denoM=_the_deno_st[i];
denoM.resize(mat.size());
- if(isItem1==_trg_proc_st2.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids.
+ if(isItem1==_sent_trg_proc_st2.end() || curSrcId==myProcId)//item1 of step2 main algo. Simple, because rowId of mat are directly target ids.
{
int rowId=0;
- for(std::vector< std::map<int,double> >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
- for(std::map<int,double>::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
+ for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
+ for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
denoM[rowId][(*it2).first]=deno[rowId];
}
else
{
std::vector<int>::iterator fnd=isItem1;
- int locId=std::distance(_trg_proc_st2.begin(),fnd);
- const DataArrayInt *trgIds=_trg_ids_st2[locId];
+ int locId=std::distance(_sent_trg_proc_st2.begin(),fnd);
+ const DataArrayInt *trgIds=_sent_trg_ids_st2[locId];
const int *trgIds2=trgIds->getConstPointer();
- for(std::vector< std::map<int,double> >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
- for(std::map<int,double>::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
+ for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++,rowId++)
+ for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
denoM[rowId][(*it2).first]=deno[trgIds2[rowId]];
}
}
int start=offsets[_target_proc_id_st[i]];
int *work=bigArr+start;
*work=0;
- const std::vector< std::map<int,double> >& mat=_matrixes_st[i];
- for(std::vector< std::map<int,double> >::const_iterator it=mat.begin();it!=mat.end();it++,work++)
+ const std::vector< SparseDoubleVec >& mat=_matrixes_st[i];
+ for(std::vector< SparseDoubleVec >::const_iterator it=mat.begin();it!=mat.end();it++,work++)
work[1]=work[0]+(*it).size();
}
}
/*!
* This method performs step#1 and step#2/3. It returns the size of expected array to get allToAllV.
+ * It is where the locally computed matrices are serialized to be sent to adequate final proc.
*/
int OverlapMapping::serializeMatrixStep1ST(const int *nbOfElemsSrc, const int *recvStep0, const int *countStep0, const int *offsStep0,
int *&bigArrI, double *&bigArrD, int *count, int *offsets,
{
if(_source_proc_id_st[i]==myProcId)
{
- const std::vector< std::map<int,double> >& mat=_matrixes_st[i];
+ const std::vector< SparseDoubleVec >& mat=_matrixes_st[i];
int lgthToSend=0;
- for(std::vector< std::map<int,double> >::const_iterator it=mat.begin();it!=mat.end();it++)
+ for(std::vector< SparseDoubleVec >::const_iterator it=mat.begin();it!=mat.end();it++)
lgthToSend+=(*it).size();
count[_target_proc_id_st[i]]=lgthToSend;
fullLgth+=lgthToSend;
{
if(_source_proc_id_st[i]==myProcId)
{
- const std::vector< std::map<int,double> >& mat=_matrixes_st[i];
- for(std::vector< std::map<int,double> >::const_iterator it1=mat.begin();it1!=mat.end();it1++)
+ const std::vector< SparseDoubleVec >& mat=_matrixes_st[i];
+ for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++)
{
int j=0;
- for(std::map<int,double>::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++,j++)
+ for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++,j++)
{
bigArrI[fullLgth+j]=(*it2).first;
bigArrD[fullLgth+j]=(*it2).second;
}
/*!
- * This method should be called when all remote matrix with sourceProcId==thisProcId have been retrieved and are in 'this->_the_matrix_st' and 'this->_the_matrix_st_target_proc_id'
- * and 'this->_the_matrix_st_target_ids'.
- * This method finish the job of filling 'this->_the_matrix_st' and 'this->_the_matrix_st_target_proc_id' by putting candidates in 'this->_matrixes_st' into them.
+ * This method should be called when all remote matrix with sourceProcId==thisProcId have been retrieved and are
+ * in 'this->_the_matrix_st' and 'this->_the_matrix_st_target_proc_id' and 'this->_the_matrix_st_target_ids'.
+ * This method finish the job of filling 'this->_the_matrix_st' and 'this->_the_matrix_st_target_proc_id'
+ * by putting candidates in 'this->_matrixes_st' into them (i.e. local computation result).
*/
void OverlapMapping::finishToFillFinalMatrixST()
{
for(int i=0;i<sz;i++)
if(_source_proc_id_st[i]!=myProcId)
{
- const std::vector<std::map<int,double> >& mat=_matrixes_st[i];
+ const std::vector<SparseDoubleVec >& mat=_matrixes_st[i];
_the_matrix_st[j]=mat;
_the_matrix_st_source_proc_id.push_back(_source_proc_id_st[i]);
j++;
}
- _matrixes_st.clear();
-}
-
-/*!
- * This method performs the operation of target ids broadcasting.
- */
-void OverlapMapping::prepareIdsToSendST()
-{
- CommInterface commInterface=_group.getCommInterface();
- const MPIProcessorGroup *group=static_cast<const MPIProcessorGroup*>(&_group);
- const MPI_Comm *comm=group->getComm();
- int grpSize=_group.size();
- _source_ids_to_send_st.clear();
- _source_ids_to_send_st.resize(grpSize);
- INTERP_KERNEL::AutoPtr<int> nbsend=new int[grpSize];
- std::fill<int *>(nbsend,nbsend+grpSize,0);
- for(std::size_t i=0;i<_the_matrix_st_source_proc_id.size();i++)
- nbsend[_the_matrix_st_source_proc_id[i]]=_the_matrix_st_source_ids[i].size();
- INTERP_KERNEL::AutoPtr<int> nbrecv=new int[grpSize];
- commInterface.allToAll(nbsend,1,MPI_INT,nbrecv,1,MPI_INT,*comm);
- //
- INTERP_KERNEL::AutoPtr<int> nbsend2=new int[grpSize];
- std::copy((int *)nbsend,((int *)nbsend)+grpSize,(int *)nbsend2);
- INTERP_KERNEL::AutoPtr<int> nbsend3=new int[grpSize];
- nbsend3[0]=0;
- for(int i=1;i<grpSize;i++)
- nbsend3[i]=nbsend3[i-1]+nbsend2[i-1];
- int sendSz=nbsend3[grpSize-1]+nbsend2[grpSize-1];
- INTERP_KERNEL::AutoPtr<int> bigDataSend=new int[sendSz];
- for(std::size_t i=0;i<_the_matrix_st_source_proc_id.size();i++)
- {
- int offset=nbsend3[_the_matrix_st_source_proc_id[i]];
- std::copy(_the_matrix_st_source_ids[i].begin(),_the_matrix_st_source_ids[i].end(),((int *)nbsend3)+offset);
- }
- INTERP_KERNEL::AutoPtr<int> nbrecv2=new int[grpSize];
- INTERP_KERNEL::AutoPtr<int> nbrecv3=new int[grpSize];
- std::copy((int *)nbrecv,((int *)nbrecv)+grpSize,(int *)nbrecv2);
- nbrecv3[0]=0;
- for(int i=1;i<grpSize;i++)
- nbrecv3[i]=nbrecv3[i-1]+nbrecv2[i-1];
- int recvSz=nbrecv3[grpSize-1]+nbrecv2[grpSize-1];
- INTERP_KERNEL::AutoPtr<int> bigDataRecv=new int[recvSz];
- //
- commInterface.allToAllV(bigDataSend,nbsend2,nbsend3,MPI_INT,
- bigDataRecv,nbrecv2,nbrecv3,MPI_INT,
- *comm);
- for(int i=0;i<grpSize;i++)
- {
- if(nbrecv2[i]>0)
- {
- _source_ids_to_send_st[i].insert(_source_ids_to_send_st[i].end(),((int *)bigDataRecv)+nbrecv3[i],((int *)bigDataRecv)+nbrecv3[i]+nbrecv2[i]);
- }
- }
}
/*!
nbsend2[0]=0;
nbrecv2[0]=0;
std::vector<double> valsToSend;
+
+ /*
+ * FIELD VALUE XCHGE
+ */
for(int i=0;i<grpSize;i++)
{
+ // Prepare field data to be sent/received through a MPI_AllToAllV
if(std::find(_proc_ids_to_send_vector_st.begin(),_proc_ids_to_send_vector_st.end(),i)!=_proc_ids_to_send_vector_st.end())
{
- std::vector<int>::const_iterator isItem1=std::find(_src_proc_st2.begin(),_src_proc_st2.end(),i);
+ std::vector<int>::const_iterator isItem1=std::find(_sent_src_proc_st2.begin(),_sent_src_proc_st2.end(),i);
MEDCouplingAutoRefCountObjectPtr<DataArrayDouble> vals;
- if(isItem1!=_src_proc_st2.end())//item1 of step2 main algo
+ if(isItem1!=_sent_src_proc_st2.end()) // source data was sent to proc 'i'
{
- int id=std::distance(_src_proc_st2.begin(),isItem1);
- vals=fieldInput->getArray()->selectByTupleId(_src_ids_st2[id]->getConstPointer(),_src_ids_st2[id]->getConstPointer()+_src_ids_st2[id]->getNumberOfTuples());
+ // Prepare local field data to send to proc i
+ int id=std::distance(_sent_src_proc_st2.begin(),isItem1);
+ vals=fieldInput->getArray()->selectByTupleId(*_sent_src_ids_st2[id]);
}
- else
+ else // no source data was sent to proc 'i'
{//item0 of step2 main algo
- int id=std::distance(_src_ids_zip_proc_st2.begin(),std::find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),i));
+ std::vector<int>::const_iterator isItem22 = std::find(_src_ids_zip_proc_st2.begin(),_src_ids_zip_proc_st2.end(),i );
+ int id=std::distance(_src_ids_zip_proc_st2.begin(),isItem22);
+ if (isItem22 == _src_ids_zip_proc_st2.end())
+ std::cout << "(" << myProcId << ") has end iterator!!!!!\n";
vals=fieldInput->getArray()->selectByTupleId(&(_src_ids_zip_st2[id])[0],&(_src_ids_zip_st2[id])[0]+_src_ids_zip_st2[id].size());
}
nbsend[i]=vals->getNbOfElems();
}
if(std::find(_proc_ids_to_recv_vector_st.begin(),_proc_ids_to_recv_vector_st.end(),i)!=_proc_ids_to_recv_vector_st.end())
{
- std::vector<int>::const_iterator isItem0=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),i);
- if(isItem0==_trg_proc_st2.end())//item1 of step2 main algo [ (0,1) computed on proc1 and Matrix-Vector on proc1 ]
+ std::vector<int>::const_iterator isItem0=std::find(_sent_trg_proc_st2.begin(),_sent_trg_proc_st2.end(),i);
+ if(isItem0==_sent_trg_proc_st2.end())//item1 of step2 main algo [ (0,1) computed on proc1 and Matrix-Vector on proc1 ]
{
std::vector<int>::const_iterator it1=std::find(_src_ids_proc_st2.begin(),_src_ids_proc_st2.end(),i);
if(it1!=_src_ids_proc_st2.end())
int id=std::distance(_src_ids_proc_st2.begin(),it1);
nbrecv[i]=_nb_of_src_ids_proc_st2[id]*nbOfCompo;
}
- else if(i==myProcId)
+ else if(i==myProcId) // diagonal element (i.e. proc #i talking to same proc #i)
{
- nbrecv[i]=fieldInput->getNumberOfTuplesExpected()*nbOfCompo;
+ nbrecv[i] = nbsend[i];
}
else
- throw INTERP_KERNEL::Exception("Plouff ! send email to anthony.geay@cea.fr ! ");
+ throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): Internal error at field values exchange!");
}
else
{//item0 of step2 main algo [ (2,1) computed on proc2 but Matrix-Vector on proc1 ] [(1,0) computed on proc1 but Matrix-Vector on proc0]
nbrecv2[i]=nbrecv2[i-1]+nbrecv[i-1];
}
INTERP_KERNEL::AutoPtr<double> bigArr=new double[nbrecv2[grpSize-1]+nbrecv[grpSize-1]];
+
+// scout << "(" << myProcId << ") nbsend :" << nbsend[0] << "," << nbsend[1] << "," << nbsend[2] << "\n";
+// scout << "(" << myProcId << ") nbrecv :" << nbrecv[0] << "," << nbrecv[1] << "," << nbrecv[2] << "\n";
+// std::cout << scout.str() << "\n";
+ /*
+ * *********************** ALL-TO-ALL
+ */
commInterface.allToAllV(&valsToSend[0],nbsend,nbsend2,MPI_DOUBLE,
bigArr,nbrecv,nbrecv2,MPI_DOUBLE,*comm);
+ /*
+ *
+ * TARGET FIELD COMPUTATION (matrix-vec computation)
+ */
fieldOutput->getArray()->fillWithZero();
INTERP_KERNEL::AutoPtr<double> tmp=new double[nbOfCompo];
for(int i=0;i<grpSize;i++)
if(it==_the_matrix_st_source_proc_id.end())
throw INTERP_KERNEL::Exception("Big problem !");
int id=std::distance(_the_matrix_st_source_proc_id.begin(),it);
- const std::vector< std::map<int,double> >& mat=_the_matrix_st[id];
- const std::vector< std::map<int,double> >& deno=_the_deno_st[id];
- std::vector<int>::const_iterator isItem0=std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),i);
- if(isItem0==_trg_proc_st2.end())//item1 of step2 main algo [ (0,1) computed on proc1 and Matrix-Vector on proc1 ]
+ const std::vector< SparseDoubleVec >& mat=_the_matrix_st[id];
+ const std::vector< SparseDoubleVec >& deno=_the_deno_st[id];
+ std::vector<int>::const_iterator isItem0=std::find(_sent_trg_proc_st2.begin(),_sent_trg_proc_st2.end(),i);
+ if(isItem0==_sent_trg_proc_st2.end())//item1 of step2 main algo [ (0,1) computed on proc1 and Matrix-Vector on proc1 ]
{
int nbOfTrgTuples=mat.size();
for(int j=0;j<nbOfTrgTuples;j++,pt+=nbOfCompo)
{
- const std::map<int,double>& mat1=mat[j];
- const std::map<int,double>& deno1=deno[j];
- std::map<int,double>::const_iterator it4=deno1.begin();
- for(std::map<int,double>::const_iterator it3=mat1.begin();it3!=mat1.end();it3++,it4++)
+ const SparseDoubleVec& mat1=mat[j];
+ const SparseDoubleVec& deno1=deno[j];
+ SparseDoubleVec::const_iterator it4=deno1.begin();
+ for(SparseDoubleVec::const_iterator it3=mat1.begin();it3!=mat1.end();it3++,it4++)
{
- std::transform(bigArr+nbrecv2[i]+((*it3).first)*nbOfCompo,bigArr+nbrecv2[i]+((*it3).first+1)*(nbOfCompo),(double *)tmp,std::bind2nd(std::multiplies<double>(),(*it3).second/(*it4).second));
+ std::transform(bigArr+nbrecv2[i]+((*it3).first)*nbOfCompo,
+ bigArr+nbrecv2[i]+((*it3).first+1)*(nbOfCompo),
+ (double *)tmp,
+ std::bind2nd(std::multiplies<double>(),(*it3).second/(*it4).second));
std::transform((double *)tmp,(double *)tmp+nbOfCompo,pt,pt,std::plus<double>());
}
}
int newId=0;
for(std::vector<int>::const_iterator it=zipIds.begin();it!=zipIds.end();it++,newId++)
zipCor[*it]=newId;
- int id2=std::distance(_trg_proc_st2.begin(),std::find(_trg_proc_st2.begin(),_trg_proc_st2.end(),i));
- const DataArrayInt *tgrIds=_trg_ids_st2[id2];
+ int id2=std::distance(_sent_trg_proc_st2.begin(),std::find(_sent_trg_proc_st2.begin(),_sent_trg_proc_st2.end(),i));
+ const DataArrayInt *tgrIds=_sent_trg_ids_st2[id2];
const int *tgrIds2=tgrIds->getConstPointer();
int nbOfTrgTuples=mat.size();
for(int j=0;j<nbOfTrgTuples;j++)
{
- const std::map<int,double>& mat1=mat[j];
- const std::map<int,double>& deno1=deno[j];
- std::map<int,double>::const_iterator it5=deno1.begin();
- for(std::map<int,double>::const_iterator it3=mat1.begin();it3!=mat1.end();it3++,it5++)
+ const SparseDoubleVec& mat1=mat[j];
+ const SparseDoubleVec& deno1=deno[j];
+ SparseDoubleVec::const_iterator it5=deno1.begin();
+ for(SparseDoubleVec::const_iterator it3=mat1.begin();it3!=mat1.end();it3++,it5++)
{
std::map<int,int>::const_iterator it4=zipCor.find((*it3).first);
if(it4==zipCor.end())
- throw INTERP_KERNEL::Exception("Hmmmmm send e mail to anthony.geay@cea.fr !");
- std::transform(bigArr+nbrecv2[i]+((*it4).second)*nbOfCompo,bigArr+nbrecv2[i]+((*it4).second+1)*(nbOfCompo),(double *)tmp,std::bind2nd(std::multiplies<double>(),(*it3).second/(*it5).second));
+ throw INTERP_KERNEL::Exception("OverlapMapping::multiply(): Internal error in final value computation!");
+ std::transform(bigArr+nbrecv2[i]+((*it4).second)*nbOfCompo,
+ bigArr+nbrecv2[i]+((*it4).second+1)*(nbOfCompo),
+ (double *)tmp,
+ std::bind2nd(std::multiplies<double>(),(*it3).second/(*it5).second));
std::transform((double *)tmp,(double *)tmp+nbOfCompo,pt+tgrIds2[j]*nbOfCompo,pt+tgrIds2[j]*nbOfCompo,std::plus<double>());
}
}
}
/*!
- * This method should be called immediately after _the_matrix_st has been filled with remote computed matrix put in this proc for Matrix-Vector.
- * This method computes for these matrix the minimal set of source ids corresponding to the source proc id.
+ * This method should be called immediately after _the_matrix_st has been filled with remote computed matrix
+ * put in this proc for Matrix-Vector.
+ * This method computes for these matrix the minimal set of source ids corresponding to the source proc id.
+ *
*/
void OverlapMapping::updateZipSourceIdsForFuture()
{
+ /* When it is called, only the bits received from other processors (i.e. the remotely executed jobs) are in the
+ big matrix _the_matrix_st. */
+
CommInterface commInterface=_group.getCommInterface();
int myProcId=_group.myRank();
int nbOfMatrixRecveived=_the_matrix_st_source_proc_id.size();
int curSrcProcId=_the_matrix_st_source_proc_id[i];
if(curSrcProcId!=myProcId)
{
- const std::vector< std::map<int,double> >& mat=_the_matrix_st[i];
+ const std::vector< SparseDoubleVec >& mat=_the_matrix_st[i];
_src_ids_zip_proc_st2.push_back(curSrcProcId);
_src_ids_zip_st2.resize(_src_ids_zip_st2.size()+1);
std::set<int> s;
- for(std::vector< std::map<int,double> >::const_iterator it1=mat.begin();it1!=mat.end();it1++)
- for(std::map<int,double>::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
+ for(std::vector< SparseDoubleVec >::const_iterator it1=mat.begin();it1!=mat.end();it1++)
+ for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
s.insert((*it2).first);
_src_ids_zip_st2.back().insert(_src_ids_zip_st2.back().end(),s.begin(),s.end());
+
+// std::stringstream scout;
+// scout << "(" << myProcId << ") pushed " << _src_ids_zip_st2.back().size() << " values for tgt proc " << curSrcProcId;
+// std::cout << scout.str() << "\n";
}
}
}
-// #include <iostream>
-
// void OverlapMapping::printTheMatrix() const
// {
// CommInterface commInterface=_group.getCommInterface();
// const MPI_Comm *comm=group->getComm();
// int grpSize=_group.size();
// int myProcId=_group.myRank();
-// std::cerr << "I am proc #" << myProcId << std::endl;
+// std::stringstream oscerr;
// int nbOfMat=_the_matrix_st.size();
-// std::cerr << "I do manage " << nbOfMat << "matrix : "<< std::endl;
+// oscerr << "(" << myProcId << ") I hold " << nbOfMat << " matrix(ces) : "<< std::endl;
// for(int i=0;i<nbOfMat;i++)
// {
-// std::cerr << " - Matrix #" << i << " on source proc #" << _the_matrix_st_source_proc_id[i];
-// const std::vector< std::map<int,double> >& locMat=_the_matrix_st[i];
-// for(std::vector< std::map<int,double> >::const_iterator it1=locMat.begin();it1!=locMat.end();it1++)
+// oscerr << " - Matrix #" << i << " coming from source proc #" << _the_matrix_st_source_proc_id[i] << ": ";
+// const std::vector< SparseDoubleVec >& locMat=_the_matrix_st[i];
+// int j = 0;
+// for(std::vector< SparseDoubleVec >::const_iterator it1=locMat.begin();it1!=locMat.end();it1++, j++)
// {
-// for(std::map<int,double>::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
-// std::cerr << "(" << (*it2).first << "," << (*it2).second << "), ";
-// std::cerr << std::endl;
+// oscerr << " Target Cell #" << j;
+// for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
+// oscerr << " (" << (*it2).first << "," << (*it2).second << "), ";
+// oscerr << std::endl;
// }
// }
-// std::cerr << "*********" << std::endl;
+// oscerr << "*********" << std::endl;
+//
+// // Hope this will be flushed in one go:
+// std::cerr << oscerr.str() << std::endl;
+//// if(myProcId != 0)
+//// MPI_Barrier(MPI_COMM_WORLD);
// }
+//
+// void OverlapMapping::printMatrixesST() const
+// {
+// CommInterface commInterface=_group.getCommInterface();
+// const MPIProcessorGroup *group=static_cast<const MPIProcessorGroup*>(&_group);
+// const MPI_Comm *comm=group->getComm();
+// int grpSize=_group.size();
+// int myProcId=_group.myRank();
+// std::stringstream oscerr;
+// int nbOfMat=_matrixes_st.size();
+// oscerr << "(" << myProcId << ") I hold " << nbOfMat << " LOCAL matrix(ces) : "<< std::endl;
+// for(int i=0;i<nbOfMat;i++)
+// {
+// oscerr << " - Matrix #" << i << ": (source proc #" << _source_proc_id_st[i] << " / tgt proc#" << _target_proc_id_st[i] << "):";
+// const std::vector< SparseDoubleVec >& locMat=_matrixes_st[i];
+// int j = 0;
+// for(std::vector< SparseDoubleVec >::const_iterator it1=locMat.begin();it1!=locMat.end();it1++, j++)
+// {
+// oscerr << " Target Cell #" << j;
+// for(SparseDoubleVec::const_iterator it2=(*it1).begin();it2!=(*it1).end();it2++)
+// oscerr << " (" << (*it2).first << "," << (*it2).second << "), ";
+// oscerr << std::endl;
+// }
+// }
+// oscerr << "*********" << std::endl;
+//
+// // Hope this will be flushed in one go:
+// std::cerr << oscerr.str() << std::endl;
+// }
class DataArrayInt;
class MEDCouplingFieldDouble;
- /*
+ typedef std::map<int,double> SparseDoubleVec;
+
+ /*!
* Internal class, not part of the public API.
*
* Used by the impl of OverlapInterpolationMatrix, plays an equivalent role than what the NxM_Mapping
class OverlapMapping
{
public:
+
OverlapMapping(const ProcessorGroup& group);
void keepTracksOfSourceIds(int procId, DataArrayInt *ids);
void keepTracksOfTargetIds(int procId, DataArrayInt *ids);
- void addContributionST(const std::vector< std::map<int,double> >& matrixST, const DataArrayInt *srcIds, int srcProcId, const DataArrayInt *trgIds, int trgProcId);
- void prepare(const std::vector< std::vector<int> >& procsInInteraction, int nbOfTrgElems);
+ void addContributionST(const std::vector< SparseDoubleVec >& matrixST, const DataArrayInt *srcIds, int srcProcId, const DataArrayInt *trgIds, int trgProcId);
+ void prepare(const std::vector< int >& procsToSendField, int nbOfTrgElems);
void computeDenoConservativeVolumic(int nbOfTuplesTrg);
void computeDenoGlobConstraint();
//
void multiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput) const;
void transposeMultiply(const MEDCouplingFieldDouble *fieldInput, MEDCouplingFieldDouble *fieldOutput);
private:
+ void fillProcToSendRcvForMultiply(const std::vector< int >& procsToSendField);
void serializeMatrixStep0ST(const int *nbOfElemsSrc, int *&bigArr, int *count, int *offsets,
int *countForRecv, int *offsetsForRecv) const;
int serializeMatrixStep1ST(const int *nbOfElemsSrc, const int *recvStep0, const int *countStep0, const int *offsStep0,
void unserializationST(int nbOfTrgElems, const int *nbOfElemsSrcPerProc, const int *bigArrRecv, const int *bigArrRecvCounts, const int *bigArrRecvOffs,
const int *bigArrRecv2, const double *bigArrDRecv2, const int *bigArrRecv2Count, const int *bigArrRecv2Offs);
void finishToFillFinalMatrixST();
- void prepareIdsToSendST();
void updateZipSourceIdsForFuture();
- //void printTheMatrix() const;
+
+ // Debug
+// void printMatrixesST() const;
+// void printTheMatrix() const;
private:
const ProcessorGroup &_group;
- //! vector of ids
- std::vector< MEDCouplingAutoRefCountObjectPtr<DataArrayInt> > _src_ids_st2;//item #1
- std::vector< int > _src_proc_st2;//item #1
- std::vector< MEDCouplingAutoRefCountObjectPtr<DataArrayInt> > _trg_ids_st2;//item #0
- std::vector< int > _trg_proc_st2;//item #0
- std::vector< int > _nb_of_src_ids_proc_st2;//item #1
- std::vector< int > _src_ids_proc_st2;//item #1
- std::vector< std::vector<int> > _src_ids_zip_st2;//same size as _src_ids_zip_proc_st2. Sorted. specifies for each id the corresponding ids to send. This is for item0 of Step2 of main algorithm
- std::vector< int > _src_ids_zip_proc_st2;
- //! vector of matrixes the first entry correspond to source proc id in _source_ids_st
- std::vector< std::vector< std::map<int,double> > > _matrixes_st;
- std::vector< std::vector<int> > _source_ids_st;
+ /**! Vector of DAInt of cell identifiers. The 2 following class members work in pair. For a proc ID i,
+ * first member gives an old2new map for the local part of the source mesh that has been sent.
+ * Second member gives proc ID. */
+ std::vector< MEDCouplingAutoRefCountObjectPtr<DataArrayInt> > _sent_src_ids_st2;
+ //! see above _sent_src_ids_st2
+ std::vector< int > _sent_src_proc_st2;
+
+ //! See _src_ids_st2 and _sent_src_proc_st2. Same for target mesh.
+ std::vector< MEDCouplingAutoRefCountObjectPtr<DataArrayInt> > _sent_trg_ids_st2;
+ //! See _src_ids_st2 and _sent_src_proc_st2. Same for target mesh.
+ std::vector< int > _sent_trg_proc_st2;
+
+
+ /**! Vector of matrixes (partial interpolation ratios), result of the local interpolator run.
+ * Indexing shared with _source_proc_id_st, and _target_proc_id_st. */
+ std::vector< std::vector< SparseDoubleVec > > _matrixes_st;
+ //! See _matrixes_st - vec of source proc IDs
std::vector< int > _source_proc_id_st;
- std::vector< std::vector<int> > _target_ids_st;
+ //! See _matrixes_st - vec of target proc IDs
std::vector< int > _target_proc_id_st;
- //! the matrix for matrix-vector product. The first dimension the set of target procs that interacts with local source mesh. The second dimension correspond to nb of local source ids.
- std::vector< std::vector< std::map<int,double> > > _the_matrix_st;
+
+ //! Vector of remote remote proc IDs for source mesh. Indexing shared with _nb_of_src_ids_proc_st2
+ std::vector< int > _src_ids_proc_st2;
+ //! Number of cells in the mesh/mapping received from the remote proc i for source mesh. See _src_ids_proc_st2 above
+ std::vector< int > _nb_of_src_ids_proc_st2;
+
+ /**! Specifies for each remote proc ID (given in _src_ids_zip_proc_st2 below) the corresponding local
+ * source cell IDs to use/send. Same indexing as _src_ids_zip_proc_st2. Sorted.
+ * On a given proc, those two members contain exactly the same set of cell identifiers as what is given
+ * in the locally held interpolation matrices. */
+ std::vector< std::vector<int> > _src_ids_zip_st2;
+ //! Vector of remote proc ID to which the local source mapping above corresponds. See _src_ids_zip_st2 above.
+ std::vector< int > _src_ids_zip_proc_st2;
+
+ /**! THE matrix for matrix-vector product. The first dimension is indexed in the set of target procs
+ * that interacts with local source mesh. The second dim is the pseudo id of source proc.
+ * Same indexing as _the_matrix_st_source_proc_id */
+ std::vector< std::vector< SparseDoubleVec > > _the_matrix_st;
+ //! See _the_matrix_st above. List of source proc IDs contributing to _the_matrix_st
std::vector< int > _the_matrix_st_source_proc_id;
- std::vector< std::vector<int> > _the_matrix_st_source_ids;
- std::vector< std::vector< std::map<int,double> > > _the_deno_st;
- //! this attribute stores the proc ids that wait for data from this proc ids for matrix-vector computation
+
+ //! Proc IDs to which data will be sent (originating this current proc) for matrix-vector computation
std::vector< int > _proc_ids_to_send_vector_st;
+ //! Proc IDs from which data will be received (on this current proc) for matrix-vector computation
std::vector< int > _proc_ids_to_recv_vector_st;
- //! this attribute is of size _group.size(); for each procId in _group _source_ids_to_send_st[procId] contains tupleId to send abroad
- std::vector< std::vector<int> > _source_ids_to_send_st;
+
+ // Denominators (computed from the numerator matrix)
+ std::vector< std::vector< SparseDoubleVec > > _the_deno_st;
+
+// std::vector< std::vector<int> > _the_matrix_st_source_ids;
+// //! this attribute is of size _group.size(); for each procId in _group _source_ids_to_send_st[procId] contains tupleId to send abroad
+// std::vector< std::vector<int> > _source_ids_to_send_st;
};
}
ADD_LIBRARY(ParaMEDMEMTest SHARED ${ParaMEDMEMTest_SOURCES})
SET_TARGET_PROPERTIES(ParaMEDMEMTest PROPERTIES COMPILE_FLAGS "")
-TARGET_LINK_LIBRARIES(ParaMEDMEMTest paramedmem paramedloader ${CPPUNIT_LIBRARIES})
+TARGET_LINK_LIBRARIES(ParaMEDMEMTest paramedmem paramedloader medcouplingremapper ${CPPUNIT_LIBRARIES})
INSTALL(TARGETS ParaMEDMEMTest DESTINATION ${SALOME_INSTALL_LIBS})
SET(TESTSParaMEDMEM)
CPPUNIT_TEST(testInterpKernelDECPartialProcs);
CPPUNIT_TEST(testInterpKernelDEC3DSurfEmptyBBox);
CPPUNIT_TEST(testOverlapDEC1);
-
+ CPPUNIT_TEST(testOverlapDEC2);
+// CPPUNIT_TEST(testOverlapDEC_LMEC_seq);
+// CPPUNIT_TEST(testOverlapDEC_LMEC_para);
+//
CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D);
CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpDEC_2D);
CPPUNIT_TEST(testSynchronousEqualInterpKernelDEC_2D);
CPPUNIT_TEST(testAsynchronousSlowerSourceInterpKernelDEC_2D);
CPPUNIT_TEST(testAsynchronousSlowSourceInterpKernelDEC_2D);
CPPUNIT_TEST(testAsynchronousFastSourceInterpKernelDEC_2D);
-#ifdef MED_ENABLE_FVM
- //can be added again after FVM correction for 2D
- // CPPUNIT_TEST(testNonCoincidentDEC_2D);
- CPPUNIT_TEST(testNonCoincidentDEC_3D);
-#endif
+//#ifdef MED_ENABLE_FVM
+// //can be added again after FVM correction for 2D
+// // CPPUNIT_TEST(testNonCoincidentDEC_2D);
+// CPPUNIT_TEST(testNonCoincidentDEC_3D);
+//#endif
CPPUNIT_TEST(testStructuredCoincidentDEC);
CPPUNIT_TEST(testStructuredCoincidentDEC);
CPPUNIT_TEST(testICoco1);
void testInterpKernelDECPartialProcs();
void testInterpKernelDEC3DSurfEmptyBBox();
void testOverlapDEC1();
+ void testOverlapDEC2();
+ void testOverlapDEC_LMEC_seq();
+ void testOverlapDEC_LMEC_para();
#ifdef MED_ENABLE_FVM
void testNonCoincidentDEC_2D();
void testNonCoincidentDEC_3D();
void testInterpKernelDEC_2D_(const char *srcMeth, const char *targetMeth);
void testInterpKernelDEC2_2D_(const char *srcMeth, const char *targetMeth);
void testInterpKernelDEC_3D_(const char *srcMeth, const char *targetMeth);
+
};
// to automatically remove temporary files from disk
void ParaMEDMEMTest::testGauthier2()
{
+ std::cout << "testGauthier2\n";
double valuesExpected1[2]={0.,0.};
double valuesExpected2[2]={0.95,0.970625};
#include <set>
-void ParaMEDMEMTest::testOverlapDEC1()
-{
- std::string srcM("P0");
- std::string targetM("P0");
- int size;
- int rank;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+using namespace std;
- if (size != 3) return ;
-
- int nproc = 3;
- std::set<int> procs;
-
- for (int i=0; i<nproc; i++)
- procs.insert(i);
-
- ParaMEDMEM::CommInterface interface;
-
- ParaMEDMEM::OverlapDEC dec(procs);
-
- ParaMEDMEM::MEDCouplingUMesh* meshS=0;
- ParaMEDMEM::MEDCouplingUMesh* meshT=0;
- ParaMEDMEM::ParaMESH* parameshS=0;
- ParaMEDMEM::ParaMESH* parameshT=0;
- ParaMEDMEM::ParaFIELD* parafieldS=0;
- ParaMEDMEM::ParaFIELD* parafieldT=0;
-
- MPI_Barrier(MPI_COMM_WORLD);
+#include "MEDCouplingAutoRefCountObjectPtr.hxx"
+#include "MEDLoader.hxx"
+#include "MEDLoaderBase.hxx"
+#include "MEDCouplingFieldDouble.hxx"
+#include "MEDCouplingMemArray.hxx"
+#include "MEDCouplingRemapper.hxx"
+
+using namespace ParaMEDMEM;
+
+typedef MEDCouplingAutoRefCountObjectPtr<MEDCouplingUMesh> MUMesh;
+typedef MEDCouplingAutoRefCountObjectPtr<MEDCouplingFieldDouble> MFDouble;
+
+//void ParaMEDMEMTest::testOverlapDEC_LMEC_seq()
+//{
+// // T_SC_Trio_src.med -- "SupportOf_"
+// // T_SC_Trio_dst.med -- "SupportOf_T_SC_Trio"
+// // h_TH_Trio_src.med -- "SupportOf_"
+// // h_TH_Trio_dst.med -- "SupportOf_h_TH_Trio"
+// string rep("/export/home/adrien/support/antoine_LMEC/");
+// string src_mesh_nam(rep + string("T_SC_Trio_src.med"));
+// string tgt_mesh_nam(rep + string("T_SC_Trio_dst.med"));
+//// string src_mesh_nam(rep + string("h_TH_Trio_src.med"));
+//// string tgt_mesh_nam(rep + string("h_TH_Trio_dst.med"));
+// MUMesh src_mesh=MEDLoader::ReadUMeshFromFile(src_mesh_nam,"SupportOf_",0);
+// MUMesh tgt_mesh=MEDLoader::ReadUMeshFromFile(tgt_mesh_nam,"SupportOf_T_SC_Trio",0);
+//// MUMesh tgt_mesh=MEDLoader::ReadUMeshFromFile(tgt_mesh_nam,"SupportOf_h_TH_Trio",0);
+//
+// MFDouble srcField = MEDCouplingFieldDouble::New(ON_CELLS, ONE_TIME);
+// srcField->setMesh(src_mesh);
+// DataArrayDouble * dad = DataArrayDouble::New(); dad->alloc(src_mesh->getNumberOfCells(),1);
+// dad->fillWithValue(1.0);
+// srcField->setArray(dad);
+// srcField->setNature(ConservativeVolumic);
+//
+// MEDCouplingRemapper remap;
+// remap.setOrientation(2); // always consider surface intersections as absolute areas.
+// remap.prepare(src_mesh, tgt_mesh, "P0P0");
+// MFDouble tgtField = remap.transferField(srcField, 1.0e+300);
+// tgtField->setName("result");
+// string out_nam(rep + string("adrien.med"));
+// MEDLoader::WriteField(out_nam,tgtField, true);
+// cout << "wrote: " << out_nam << "\n";
+// double integ1 = 0.0, integ2 = 0.0;
+// srcField->integral(true, &integ1);
+// tgtField->integral(true, &integ2);
+//// tgtField->reprQuickOverview(cout);
+// CPPUNIT_ASSERT_DOUBLES_EQUAL(integ1,integ2,1e-8);
+//
+// dad->decrRef();
+//}
+//
+//void ParaMEDMEMTest::testOverlapDEC_LMEC_para()
+//{
+// using namespace ParaMEDMEM;
+//
+// int size;
+// int rank;
+// MPI_Comm_size(MPI_COMM_WORLD,&size);
+// MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+//
+// if (size != 1) return ;
+//
+// int nproc = 1;
+// std::set<int> procs;
+//
+// for (int i=0; i<nproc; i++)
+// procs.insert(i);
+//
+// CommInterface interface;
+// OverlapDEC dec(procs);
+//
+// ParaMESH* parameshS=0;
+// ParaMESH* parameshT=0;
+// ParaFIELD* parafieldS=0;
+// ParaFIELD* parafieldT=0;
+// MFDouble srcField;
+//
+// // **** FILE LOADING
+// // T_SC_Trio_src.med -- "SupportOf_"
+// // T_SC_Trio_dst.med -- "SupportOf_T_SC_Trio"
+// // h_TH_Trio_src.med -- "SupportOf_"
+// // h_TH_Trio_dst.med -- "SupportOf_h_TH_Trio"
+// string rep("/export/home/adrien/support/antoine_LMEC/");
+// string src_mesh_nam(rep + string("T_SC_Trio_src.med"));
+// string tgt_mesh_nam(rep + string("T_SC_Trio_dst.med"));
+//
+//
+// MPI_Barrier(MPI_COMM_WORLD);
+// if(rank==0)
+// {
+// // string src_mesh_nam(rep + string("h_TH_Trio_src.med"));
+// // string tgt_mesh_nam(rep + string("h_TH_Trio_dst.med"));
+// MUMesh src_mesh=MEDLoader::ReadUMeshFromFile(src_mesh_nam,"SupportOf_",0);
+// MUMesh tgt_mesh=MEDLoader::ReadUMeshFromFile(tgt_mesh_nam,"SupportOf_T_SC_Trio",0);
+// // MUMesh tgt_mesh=MEDLoader::ReadUMeshFromFile(tgt_mesh_nam,"SupportOf_h_TH_Trio",0);
+//
+// // **** SOURCE
+// srcField = MEDCouplingFieldDouble::New(ON_CELLS, ONE_TIME);
+// srcField->setMesh(src_mesh);
+// DataArrayDouble * dad = DataArrayDouble::New(); dad->alloc(src_mesh->getNumberOfCells(),1);
+// dad->fillWithValue(1.0);
+// srcField->setArray(dad);
+// srcField->setNature(ConservativeVolumic);
+//
+// ComponentTopology comptopo;
+// parameshS = new ParaMESH(src_mesh,*dec.getGroup(),"source mesh");
+// parafieldS = new ParaFIELD(ON_CELLS,ONE_TIME,parameshS,comptopo);
+// parafieldS->getField()->setNature(ConservativeVolumic);//IntegralGlobConstraint
+// parafieldS->getField()->setArray(dad);
+//
+// // **** TARGET
+// parameshT=new ParaMESH(tgt_mesh,*dec.getGroup(),"target mesh");
+// parafieldT=new ParaFIELD(ON_CELLS,ONE_TIME,parameshT,comptopo);
+// parafieldT->getField()->setNature(ConservativeVolumic);//IntegralGlobConstraint
+// parafieldT->getField()->getArray()->fillWithValue(1.0e300);
+//// valsT[0]=7.;
+// }
+// dec.setOrientation(2);
+// dec.attachSourceLocalField(parafieldS);
+// dec.attachTargetLocalField(parafieldT);
+// dec.synchronize();
+// dec.sendRecvData(true);
+// //
+// if(rank==0)
+// {
+// double integ1 = 0.0, integ2 = 0.0;
+// MEDCouplingFieldDouble * tgtField;
+//
+// srcField->integral(true, &integ1);
+// tgtField = parafieldT->getField();
+//// tgtField->reprQuickOverview(cout);
+// tgtField->integral(true, &integ2);
+// tgtField->setName("result");
+// string out_nam(rep + string("adrien_para.med"));
+// MEDLoader::WriteField(out_nam,tgtField, true);
+// cout << "wrote: " << out_nam << "\n";
+// CPPUNIT_ASSERT_DOUBLES_EQUAL(integ1,integ2,1e-8);
+// }
+// delete parafieldS;
+// delete parafieldT;
+// delete parameshS;
+// delete parameshT;
+//
+// MPI_Barrier(MPI_COMM_WORLD);
+//}
+
+void prepareData1(int rank, ProcessorGroup * grp,
+ MEDCouplingUMesh *& meshS, MEDCouplingUMesh *& meshT,
+ ParaMESH*& parameshS, ParaMESH*& parameshT,
+ ParaFIELD*& parafieldS, ParaFIELD*& parafieldT)
+{
if(rank==0)
{
const double coordsS[10]={0.,0.,0.5,0.,1.,0.,0.,0.5,0.5,0.5};
const double coordsT[6]={0.,0.,1.,0.,1.,1.};
- meshS=ParaMEDMEM::MEDCouplingUMesh::New();
+ meshS=MEDCouplingUMesh::New();
meshS->setMeshDimension(2);
- ParaMEDMEM::DataArrayDouble *myCoords=ParaMEDMEM::DataArrayDouble::New();
+ DataArrayDouble *myCoords=DataArrayDouble::New();
myCoords->alloc(5,2);
std::copy(coordsS,coordsS+10,myCoords->getPointer());
meshS->setCoords(myCoords);
meshS->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,connS);
meshS->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connS+4);
meshS->finishInsertingCells();
- ParaMEDMEM::ComponentTopology comptopo;
- parameshS=new ParaMEDMEM::ParaMESH(meshS,*dec.getGrp(),"source mesh");
- parafieldS=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshS,comptopo);
- parafieldS->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint
+ ComponentTopology comptopo;
+ parameshS=new ParaMESH(meshS, *grp,"source mesh");
+ parafieldS=new ParaFIELD(ON_CELLS,NO_TIME,parameshS,comptopo);
+ parafieldS->getField()->setNature(ConservativeVolumic);//IntegralGlobConstraint
double *valsS=parafieldS->getField()->getArray()->getPointer();
valsS[0]=7.; valsS[1]=8.;
//
- meshT=ParaMEDMEM::MEDCouplingUMesh::New();
+ meshT=MEDCouplingUMesh::New();
meshT->setMeshDimension(2);
- myCoords=ParaMEDMEM::DataArrayDouble::New();
+ myCoords=DataArrayDouble::New();
myCoords->alloc(3,2);
std::copy(coordsT,coordsT+6,myCoords->getPointer());
meshT->setCoords(myCoords);
meshT->allocateCells(1);
meshT->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connT);
meshT->finishInsertingCells();
- parameshT=new ParaMEDMEM::ParaMESH(meshT,*dec.getGrp(),"target mesh");
- parafieldT=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshT,comptopo);
- parafieldT->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint
+ parameshT=new ParaMESH(meshT,*grp,"target mesh");
+ parafieldT=new ParaFIELD(ON_CELLS,NO_TIME,parameshT,comptopo);
+ parafieldT->getField()->setNature(ConservativeVolumic);//IntegralGlobConstraint
double *valsT=parafieldT->getField()->getArray()->getPointer();
valsT[0]=7.;
}
{
const double coordsS[10]={1.,0.,0.5,0.5,1.,0.5,0.5,1.,1.,1.};
const double coordsT[6]={0.,0.,0.5,0.5,0.,1.};
- meshS=ParaMEDMEM::MEDCouplingUMesh::New();
+ meshS=MEDCouplingUMesh::New();
meshS->setMeshDimension(2);
- ParaMEDMEM::DataArrayDouble *myCoords=ParaMEDMEM::DataArrayDouble::New();
+ DataArrayDouble *myCoords=DataArrayDouble::New();
myCoords->alloc(5,2);
std::copy(coordsS,coordsS+10,myCoords->getPointer());
meshS->setCoords(myCoords);
meshS->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connS);
meshS->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,connS+3);
meshS->finishInsertingCells();
- ParaMEDMEM::ComponentTopology comptopo;
- parameshS=new ParaMEDMEM::ParaMESH(meshS,*dec.getGrp(),"source mesh");
- parafieldS=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshS,comptopo);
- parafieldS->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint
+ ComponentTopology comptopo;
+ parameshS=new ParaMESH(meshS,*grp,"source mesh");
+ parafieldS=new ParaFIELD(ON_CELLS,NO_TIME,parameshS,comptopo);
+ parafieldS->getField()->setNature(ConservativeVolumic);//IntegralGlobConstraint
double *valsS=parafieldS->getField()->getArray()->getPointer();
- valsS[0]=9.; valsS[1]=11.;
+ valsS[0]=9.;
+ valsS[1]=11.;
//
- meshT=ParaMEDMEM::MEDCouplingUMesh::New();
+ meshT=MEDCouplingUMesh::New();
meshT->setMeshDimension(2);
- myCoords=ParaMEDMEM::DataArrayDouble::New();
+ myCoords=DataArrayDouble::New();
myCoords->alloc(3,2);
std::copy(coordsT,coordsT+6,myCoords->getPointer());
meshT->setCoords(myCoords);
meshT->allocateCells(1);
meshT->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connT);
meshT->finishInsertingCells();
- parameshT=new ParaMEDMEM::ParaMESH(meshT,*dec.getGrp(),"target mesh");
- parafieldT=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshT,comptopo);
- parafieldT->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint
+ parameshT=new ParaMESH(meshT,*grp,"target mesh");
+ parafieldT=new ParaFIELD(ON_CELLS,NO_TIME,parameshT,comptopo);
+ parafieldT->getField()->setNature(ConservativeVolumic);//IntegralGlobConstraint
double *valsT=parafieldT->getField()->getArray()->getPointer();
valsT[0]=8.;
}
{
const double coordsS[8]={0.,0.5, 0.5,0.5, 0.,1., 0.5,1.};
const double coordsT[6]={0.5,0.5,0.,1.,1.,1.};
- meshS=ParaMEDMEM::MEDCouplingUMesh::New();
+ meshS=MEDCouplingUMesh::New();
meshS->setMeshDimension(2);
- ParaMEDMEM::DataArrayDouble *myCoords=ParaMEDMEM::DataArrayDouble::New();
+ DataArrayDouble *myCoords=DataArrayDouble::New();
myCoords->alloc(4,2);
std::copy(coordsS,coordsS+8,myCoords->getPointer());
meshS->setCoords(myCoords);
meshS->allocateCells(1);
meshS->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,connS);
meshS->finishInsertingCells();
- ParaMEDMEM::ComponentTopology comptopo;
- parameshS=new ParaMEDMEM::ParaMESH(meshS,*dec.getGrp(),"source mesh");
- parafieldS=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshS,comptopo);
- parafieldS->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint
+ ComponentTopology comptopo;
+ parameshS=new ParaMESH(meshS,*grp,"source mesh");
+ parafieldS=new ParaFIELD(ON_CELLS,NO_TIME,parameshS,comptopo);
+ parafieldS->getField()->setNature(ConservativeVolumic);//IntegralGlobConstraint
double *valsS=parafieldS->getField()->getArray()->getPointer();
valsS[0]=10.;
//
- meshT=ParaMEDMEM::MEDCouplingUMesh::New();
+ meshT=MEDCouplingUMesh::New();
meshT->setMeshDimension(2);
- myCoords=ParaMEDMEM::DataArrayDouble::New();
+ myCoords=DataArrayDouble::New();
myCoords->alloc(3,2);
std::copy(coordsT,coordsT+6,myCoords->getPointer());
meshT->setCoords(myCoords);
meshT->allocateCells(1);
meshT->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,connT);
meshT->finishInsertingCells();
- parameshT=new ParaMEDMEM::ParaMESH(meshT,*dec.getGrp(),"target mesh");
- parafieldT=new ParaMEDMEM::ParaFIELD(ParaMEDMEM::ON_CELLS,ParaMEDMEM::NO_TIME,parameshT,comptopo);
- parafieldT->getField()->setNature(ParaMEDMEM::ConservativeVolumic);//IntegralGlobConstraint
+ parameshT=new ParaMESH(meshT,*grp,"target mesh");
+ parafieldT=new ParaFIELD(ON_CELLS,NO_TIME,parameshT,comptopo);
+ parafieldT->getField()->setNature(ConservativeVolumic);//IntegralGlobConstraint
double *valsT=parafieldT->getField()->getArray()->getPointer();
valsT[0]=9.;
}
+
+}
+
+/*! Test case from the official doc of the OverlapDEC.
+ * WARNING: bounding boxes are tweaked here to make the case more interesting (i.e. to avoid an all to all exchange
+ * between all procs).
+ */
+void ParaMEDMEMTest::testOverlapDEC1()
+{
+ int size, rank;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ // char hostname[256];
+ // printf("(%d) PID %d on localhost ready for attach\n", rank, getpid());
+ // fflush(stdout);
+
+ if (size != 3) return ;
+
+ int nproc = 3;
+ std::set<int> procs;
+
+ for (int i=0; i<nproc; i++)
+ procs.insert(i);
+
+ CommInterface interface;
+ OverlapDEC dec(procs);
+ ProcessorGroup * grp = dec.getGroup();
+ MEDCouplingUMesh* meshS=0, *meshT=0;
+ ParaMESH* parameshS=0, *parameshT=0;
+ ParaFIELD* parafieldS=0, *parafieldT=0;
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ prepareData1(rank, grp, meshS, meshT, parameshS, parameshT, parafieldS, parafieldT);
+
+ /*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ * HACK ON BOUNDING BOX TO MAKE THIS CASE SIMPLE AND USABLE IN DEBUG
+ * Bounding boxes are slightly smaller than should be thus localising the work to be done
+ * and avoiding every proc talking to everyone else.
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ */
+ dec.setBoundingBoxAdjustmentAbs(-1.0e-12);
+
+ dec.attachSourceLocalField(parafieldS);
+ dec.attachTargetLocalField(parafieldT);
+ dec.synchronize();
+ dec.sendRecvData(true);
+ //
+ if(rank==0)
+ {
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(8.75,parafieldT->getField()->getArray()->getIJ(0,0),1e-12);
+ }
+ if(rank==1)
+ {
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(8.5,parafieldT->getField()->getArray()->getIJ(0,0),1e-12);
+ }
+ if(rank==2)
+ {
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(10.5,parafieldT->getField()->getArray()->getIJ(0,0),1e-12);
+ }
+ delete parafieldS;
+ delete parafieldT;
+ delete parameshS;
+ delete parameshT;
+ meshS->decrRef();
+ meshT->decrRef();
+
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*!
+ * Same as testOverlapDEC1() but with regular bounding boxes. If you're looking for a nice debug case,
+ * testOverlapDEC1() is more appropriate.
+ */
+void ParaMEDMEMTest::testOverlapDEC2()
+{
+ int size, rank;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+
+ if (size != 3) return ;
+
+ int nproc = 3;
+ std::set<int> procs;
+
+ for (int i=0; i<nproc; i++)
+ procs.insert(i);
+
+ CommInterface interface;
+ OverlapDEC dec(procs);
+ ProcessorGroup * grp = dec.getGroup();
+ MEDCouplingUMesh* meshS=0, *meshT=0;
+ ParaMESH* parameshS=0, *parameshT=0;
+ ParaFIELD* parafieldS=0, *parafieldT=0;
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ prepareData1(rank, grp, meshS, meshT, parameshS, parameshT, parafieldS, parafieldT);
+
+ /* Normal bounding boxes here: */
+ dec.setBoundingBoxAdjustmentAbs(+1.0e-12);
+
dec.attachSourceLocalField(parafieldS);
dec.attachTargetLocalField(parafieldT);
dec.synchronize();