From: Anthony Geay Date: Wed, 16 Aug 2023 08:16:04 +0000 (+0200) Subject: [EDF27375] : Add InterpKernelDEC.synchronizeWithDefaultValue method to specify a... X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=a0b8f32f06ce603e9392afc4528c48700ecbdb61;p=tools%2Fmedcoupling.git [EDF27375] : Add InterpKernelDEC.synchronizeWithDefaultValue method to specify a default value for non intercepted cells --- diff --git a/src/ParaMEDMEM/InterpKernelDEC.cxx b/src/ParaMEDMEM/InterpKernelDEC.cxx index ca48b0450..8ed6b1301 100644 --- a/src/ParaMEDMEM/InterpKernelDEC.cxx +++ b/src/ParaMEDMEM/InterpKernelDEC.cxx @@ -165,6 +165,15 @@ namespace MEDCoupling _interpolation_matrix->prepare(); } + /*! + * Set a default value for non fetched entities + */ + void InterpKernelDEC::synchronizeWithDefaultValue(double val) + { + this->synchronize(); + if(_interpolation_matrix ) + _interpolation_matrix->setDefaultValue(val); + } /*! Receives the data whether the processor is on the working side or on the lazy side. It must match a \a sendData() call on the other side. diff --git a/src/ParaMEDMEM/InterpKernelDEC.hxx b/src/ParaMEDMEM/InterpKernelDEC.hxx index 653a0e398..52891b8b9 100644 --- a/src/ParaMEDMEM/InterpKernelDEC.hxx +++ b/src/ParaMEDMEM/InterpKernelDEC.hxx @@ -135,13 +135,14 @@ namespace MEDCoupling void release(); void synchronize(); + void synchronizeWithDefaultValue(double val); void recvData(); void recvData(double time); void sendData(); void sendData(double time , double deltatime); void prepareSourceDE() { } void prepareTargetDE() { } - private : + private: InterpolationMatrix* _interpolation_matrix; }; } diff --git a/src/ParaMEDMEM/InterpolationMatrix.cxx b/src/ParaMEDMEM/InterpolationMatrix.cxx index 91a664add..7bf59e2ec 100644 --- a/src/ParaMEDMEM/InterpolationMatrix.cxx +++ b/src/ParaMEDMEM/InterpolationMatrix.cxx @@ -871,7 +871,6 @@ namespace MEDCoupling if (_source_group.containsMyRank()) { mcIdType nbrows = ToIdType(_coeffs.size()); - // performing W.S // W is the intersection matrix // S is the source vector @@ -894,17 +893,12 @@ namespace MEDCoupling if (_target_group.containsMyRank()) { - mcIdType nbelems = field.getArray()->getNumberOfTuples() ; - double* value = const_cast (field.getArray()->getPointer()); - for (mcIdType i=0; ifillWithZero(); } //on source side : sending T=VT^(-1).(W.S) //on target side :: receiving T and storing it in field - _mapping.sendRecv(&target_value[0],field); + _mapping.sendRecv(target_value.data(),field,this->_presence_dft_value,this->_dft_value); } @@ -941,17 +935,25 @@ namespace MEDCoupling //T is the target vector for (mcIdType irow = 0; irow < nbrows; irow++) { - for (mcIdType icol = _row_offsets[irow]; icol < _row_offsets[irow+1]; icol++) - { - int colid = _coeffs[irow][icol-_row_offsets[irow]].first; - double value = _coeffs[irow][icol-_row_offsets[irow]].second; - double deno = _deno_reverse_multiply[irow][icol-_row_offsets[irow]]; - for (std::size_t icomp=0; icomp _row_offsets[irow] ) + { + for (mcIdType icol = _row_offsets[irow]; icol < _row_offsets[irow+1]; icol++) + { + int colid = _coeffs[irow][icol-_row_offsets[irow]].first; + double value = _coeffs[irow][icol-_row_offsets[irow]].second; + double deno = _deno_reverse_multiply[irow][icol-_row_offsets[irow]]; + for (std::size_t icomp=0; icomp_dft_value); + } } } } diff --git a/src/ParaMEDMEM/InterpolationMatrix.hxx b/src/ParaMEDMEM/InterpolationMatrix.hxx index 5712cccc2..7e4f7a989 100644 --- a/src/ParaMEDMEM/InterpolationMatrix.hxx +++ b/src/ParaMEDMEM/InterpolationMatrix.hxx @@ -59,6 +59,7 @@ namespace MEDCoupling void prepare(); mcIdType getNbRows() const { return ToIdType(_row_offsets.size()); } MPIAccessDEC* getAccessDEC() { return _mapping.getAccessDEC(); } + void setDefaultValue(double val) { _presence_dft_value = true; _dft_value = val; } private: void computeConservVolDenoW(ElementLocator& elementLocator); void computeIntegralDenoW(ElementLocator& elementLocator); @@ -93,6 +94,8 @@ namespace MEDCoupling private: bool isSurfaceComputationNeeded(const std::string& method) const; private: + bool _presence_dft_value = false; + double _dft_value = 0.0; const MEDCoupling::ParaFIELD *_source_field; std::vector _row_offsets; std::map, mcIdType > _col_offsets; diff --git a/src/ParaMEDMEM/MxN_Mapping.cxx b/src/ParaMEDMEM/MxN_Mapping.cxx index 5380d74ee..fca6395a9 100644 --- a/src/ParaMEDMEM/MxN_Mapping.cxx +++ b/src/ParaMEDMEM/MxN_Mapping.cxx @@ -141,7 +141,7 @@ namespace MEDCoupling * The ids that were defined by addElementFromSource method * are sent. */ - void MxN_Mapping::sendRecv(double* sendfield, MEDCouplingFieldDouble& field) const + void MxN_Mapping::sendRecv(double* sendfield, MEDCouplingFieldDouble& field, bool isDftValue, double dftValue) const { CommInterface comm_interface=_union_group->getCommInterface(); const MPIProcessorGroup* group = static_cast(_union_group); @@ -153,7 +153,6 @@ namespace MEDCoupling sendbuf = new double[_sending_ids.size()*nbcomp]; if (_recv_ids.size()>0) recvbuf = new double[_recv_ids.size()*nbcomp]; - int* sendcounts = new int[_union_group->size()]; int* senddispls=new int[_union_group->size()]; int* recvcounts=new int[_union_group->size()]; @@ -196,16 +195,29 @@ namespace MEDCoupling //setting the received values in the field DataArrayDouble *fieldArr=field.getArray(); - double* recvptr=recvbuf; + double* recvptr=recvbuf; + mcIdType nbTuples(fieldArr->getNumberOfTuples()); + std::vector hitMachine(nbTuples,false); for (int i=0; i< _recv_proc_offsets[_union_group->size()]; i++) { + mcIdType recvId(_recv_ids[i]); + hitMachine[recvId] = true; for (int icomp=0; icompgetIJ(_recv_ids[i],icomp); - fieldArr->setIJ(_recv_ids[i],icomp,temp+*recvptr); + double temp = fieldArr->getIJ(recvId,icomp); + fieldArr->setIJ(recvId,icomp,temp+*recvptr); recvptr++; } - } + } + if( isDftValue ) + { + double *fieldPtr( fieldArr->getPointerSilent() ); + for( mcIdType ituple = 0 ; ituple < nbTuples ; ++ituple ) + { + if( ! hitMachine[ituple] ) + std::fill( fieldPtr + ituple*nbcomp, fieldPtr + (ituple+1)*nbcomp, dftValue); + } + } if (sendbuf!=0 && getAllToAllMethod()== Native) delete[] sendbuf; if (recvbuf !=0) diff --git a/src/ParaMEDMEM/MxN_Mapping.hxx b/src/ParaMEDMEM/MxN_Mapping.hxx index 3d90c16b9..2c958e709 100644 --- a/src/ParaMEDMEM/MxN_Mapping.hxx +++ b/src/ParaMEDMEM/MxN_Mapping.hxx @@ -45,7 +45,7 @@ namespace MEDCoupling void addElementFromSource(int distant_proc, mcIdType distant_elem); void prepareSendRecv(); void sendRecv(MEDCouplingFieldDouble& field); - void sendRecv(double* sendfield, MEDCouplingFieldDouble& field) const ; + void sendRecv(double* sendfield, MEDCouplingFieldDouble& field, bool isDftValue, double dftValue) const ; void reverseSendRecv(double* recvfield, MEDCouplingFieldDouble& field) const ; // diff --git a/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i b/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i index 280b5eed5..5c65a95f4 100644 --- a/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i +++ b/src/ParaMEDMEM_Swig/ParaMEDMEMCommon.i @@ -296,6 +296,7 @@ namespace MEDCoupling void release(); void synchronize(); + void synchronizeWithDefaultValue(double val); void recvData(); void recvData(double time); void sendData(); diff --git a/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py b/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py index 3167bf943..7be4f19da 100755 --- a/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py +++ b/src/ParaMEDMEM_Swig/test_InterpKernelDEC.py @@ -255,6 +255,104 @@ class ParaMEDMEM_IK_DEC_Tests(unittest.TestCase): source_group.release() MPI.COMM_WORLD.Barrier() + def test_InterpKernelDEC_default(self): + """ + [EDF27375] : Put a default value when non intersecting case + """ + size = MPI.COMM_WORLD.size + rank = MPI.COMM_WORLD.rank + if size != 4: + print("Should be run on 4 procs!") + return + nproc_source = 2 + procs_source = list(range(nproc_source)) + procs_target = list(range(size - nproc_source, size)) + + interface = CommInterface() + target_group = MPIProcessorGroup(interface, procs_target) + source_group = MPIProcessorGroup(interface, procs_source) + dec = InterpKernelDEC(source_group, target_group) + # + MPI.COMM_WORLD.Barrier() + if source_group.containsMyRank(): + mesh = eval("Source_Proc_{}".format(rank))() + nb_local=mesh.getNumberOfCells() + field = MEDCouplingFieldDouble(ON_CELLS) + field.setNature(IntensiveMaximum) + field.setMesh(mesh) + arr = DataArrayDouble(nb_local) ; arr.iota() ; arr += rank + field.setArray(arr) + dec.attachLocalField(field) + dec.synchronizeWithDefaultValue(-2000.0) + dec.sendData() + # target -> source + dec.recvData() + if rank == 0: + self.assertTrue(field.getArray().isEqual(DataArrayDouble([0.6,0.6,-2000]),1e-12)) + if rank == 1: + self.assertTrue(field.getArray().isEqual(DataArrayDouble([1.0,-2000]),1e-12)) + else: + mesh = eval("Target_Proc_{}".format(rank))() + nb_local=mesh.getNumberOfCells() + field = MEDCouplingFieldDouble(ON_CELLS) + field.setNature(IntensiveMaximum) + field.setMesh(mesh) + arr = DataArrayDouble(nb_local) ; arr[:] = -20 + field.setArray(arr) + dec.attachLocalField(field) + dec.synchronizeWithDefaultValue(-1000.0) + dec.recvData() + if rank == 2: + # matrix S0 / T2 = [[(0,S0,1),(1,S0,1.5)] + # IntensiveMaximum => [[(0,S0,1/2.5),(1,S0,1.5/2.5)] + # + self.assertTrue(field.getArray().isEqual(DataArrayDouble([0.6]),1e-12)) + if rank == 3: + # matrix S1 / T3 = [[],[(0,S1,1.0)],[(0,S1,2.0)],[]] + # IntensiveMaximum => [[],[(0,S1,1.0/1.0)],[(0,S1,2.0/2.0)],[]] + self.assertTrue(field.getArray().isEqual(DataArrayDouble([-1000.0, 1.0, 1.0, -1000.0]),1e-8)) + # target -> source + dec.sendData() + + # Some clean up that still needs MPI communication, so to be done before MPI_Finalize() + dec.release() + target_group.release() + source_group.release() + MPI.COMM_WORLD.Barrier() + +def Source_Proc_0(): + coo = DataArrayDouble([(0,2),(2,2),(4,2),(0,4),(2,4),(4,4),(0,6),(2,6)]) + m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells() + m.insertNextCell(NORM_QUAD4,[0,1,4,3]) + m.insertNextCell(NORM_QUAD4,[1,2,5,4]) + m.insertNextCell(NORM_QUAD4,[3,4,7,6]) + return m + +def Source_Proc_1(): + coo = DataArrayDouble([(6,2),(8,2),(10,2),(6,4),(8,4),(10,4)]) + m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells() + m.insertNextCell(NORM_QUAD4,[0,1,4,3]) + m.insertNextCell(NORM_QUAD4,[1,2,5,4]) + return m + +def Target_Proc_2(): + coo = DataArrayDouble([(1,0),(3.5,0),(1,3),(3.5,3)]) + m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells() + m.insertNextCell(NORM_QUAD4,[0,1,3,2]) + return m + +def Target_Proc_3(): + coo = DataArrayDouble([(6,0),(7,0),(8,0),(9,0),(10,0), + (6,1),(7,1),(9,1),(10,1), + (7,3),(8,3), + (6,4),(7,4)]) + m = MEDCouplingUMesh("mesh",2) ; m.setCoords(coo) ; m.allocateCells() + m.insertNextCell(NORM_QUAD4,[0,1,6,5]) + m.insertNextCell(NORM_QUAD4,[1,2,10,9]) + m.insertNextCell(NORM_QUAD4,[5,6,12,11]) + m.insertNextCell(NORM_QUAD4,[3,4,8,7]) + return m + if __name__ == "__main__": unittest.main() MPI.Finalize()