--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include "MEDLoader.hxx"
+#include "CellModel.hxx"
+#include "MEDCouplingUMesh.hxx"
+#include "MEDCouplingMemArray.hxx"
+#include "MEDCouplingFieldDouble.hxx"
+
+extern "C"
+{
+#include "med.h"
+}
+
+#include <string>
+#include <cstring>
+#include <sstream>
+#include <fstream>
+#include <iterator>
+#include <algorithm>
+#include <numeric>
+
+med_geometrie_element typmai[MED_NBR_GEOMETRIE_MAILLE+2] = { MED_POINT1,
+ MED_SEG2,
+ MED_SEG3,
+ MED_TRIA3,
+ MED_TRIA6,
+ MED_QUAD4,
+ MED_QUAD8,
+ MED_TETRA4,
+ MED_TETRA10,
+ MED_HEXA8,
+ MED_HEXA20,
+ MED_PENTA6,
+ MED_PENTA15,
+ MED_PYRA5,
+ MED_PYRA13,
+ MED_POLYGONE,
+ MED_POLYEDRE };
+
+med_geometrie_element typmainoeud[1] = { MED_NONE };
+
+INTERP_KERNEL::NormalizedCellType typmai2[MED_NBR_GEOMETRIE_MAILLE+2] = { INTERP_KERNEL::NORM_ERROR,
+ INTERP_KERNEL::NORM_SEG2,
+ INTERP_KERNEL::NORM_SEG3,
+ INTERP_KERNEL::NORM_TRI3,
+ INTERP_KERNEL::NORM_TRI6,
+ INTERP_KERNEL::NORM_QUAD4,
+ INTERP_KERNEL::NORM_QUAD8,
+ INTERP_KERNEL::NORM_TETRA4,
+ INTERP_KERNEL::NORM_TETRA10,
+ INTERP_KERNEL::NORM_HEXA8,
+ INTERP_KERNEL::NORM_HEXA20,
+ INTERP_KERNEL::NORM_PENTA6,
+ INTERP_KERNEL::NORM_PENTA15,
+ INTERP_KERNEL::NORM_PYRA5,
+ INTERP_KERNEL::NORM_PYRA13,
+ INTERP_KERNEL::NORM_POLYGON,
+ INTERP_KERNEL::NORM_POLYHED };
+
+using namespace ParaMEDMEM;
+
+namespace MEDLoader
+{
+ class FieldPerTypeCopier
+ {
+ public:
+ FieldPerTypeCopier(double *ptr):_ptr(ptr) { }
+ void operator()(const MEDLoader::MEDFieldDoublePerCellType& elt) { _ptr=std::copy(elt.getArray(),elt.getArray()+elt.getNbOfValues(),_ptr); }
+ private:
+ double *_ptr;
+ };
+
+ std::string buildStringFromFortran(const char *expr, int lgth);
+ std::vector<std::string> getMeshNamesFid(med_idt fid);
+ void readFieldDoubleDataInMedFile(const char *fileName, const char *meshName, const char *fieldName, std::list<MEDLoader::MEDFieldDoublePerCellType>& field,
+ int iteration, int order, ParaMEDMEM::TypeOfField typeOfOutField, double& time);
+ std::vector<int> getIdsFromFamilies(const char *fileName, const char *meshName, const std::vector<std::string>& fams);
+ std::vector<int> getIdsFromGroups(const char *fileName, const char *meshName, const std::vector<std::string>& grps);
+ med_int getIdFromMeshName(med_idt fid, const char *meshName) throw(INTERP_KERNEL::Exception);
+ void dispatchElems(int nbOfElemCell, int nbOfElemFace, int& nbOfElem, med_entite_maillage& whichEntity);
+ void readUMeshDataInMedFile(med_idt fid, med_int meshId, double *&coords, int& nCoords, int& spaceDim, std::list<MEDLoader::MEDConnOfOneElemType>& conn);
+ int buildMEDSubConnectivityOfOneType(DataArrayInt *conn, DataArrayInt *connIndex, INTERP_KERNEL::NormalizedCellType type, std::vector<int>& conn4MEDFile,
+ std::vector<int>& connIndex4MEDFile, std::vector<int>& connIndexRk24MEDFile);
+ MEDCouplingUMesh *readUMeshFromFileLev1(const char *fileName, const char *meshName, int meshDimRelToMax, const std::vector<int>& ids,
+ const std::vector<INTERP_KERNEL::NormalizedCellType>& typesToKeep, unsigned& meshDimExtract) throw(INTERP_KERNEL::Exception);
+ void tradMEDFileCoreFrmt2MEDCouplingUMesh(const std::list<MEDLoader::MEDConnOfOneElemType>& medConnFrmt,
+ DataArrayInt* &conn,
+ DataArrayInt* &connIndex,
+ const std::vector<int>& familiesToKeep);
+ ParaMEDMEM::DataArrayDouble *buildArrayFromRawData(const std::list<MEDLoader::MEDFieldDoublePerCellType>& fieldPerType);
+ int buildMEDSubConnectivityOfOneTypesPolyg(DataArrayInt *conn, DataArrayInt *connIndex, std::vector<int>& conn4MEDFile, std::vector<int>& connIndex4MEDFile);
+ int buildMEDSubConnectivityOfOneTypesPolyh(DataArrayInt *conn, DataArrayInt *connIndex, std::vector<int>& conn4MEDFile,
+ std::vector<int>& connIndex4MEDFile, std::vector<int>& connIndexRk24MEDFile);
+ int buildMEDSubConnectivityOfOneTypeStaticTypes(DataArrayInt *conn, DataArrayInt *connIndex, INTERP_KERNEL::NormalizedCellType type, std::vector<int>& conn4MEDFile);
+ ParaMEDMEM::MEDCouplingFieldDouble *readFieldDoubleLev1(const char *fileName, const char *meshName, int meshDimRelToMax, const char *fieldName, int iteration, int order,
+ ParaMEDMEM::TypeOfField typeOfOutField);
+}
+
+const char WHITE_SPACES[]=" \n";
+
+/*!
+ * @param lgth is the size of fam tab. For classical types conn is size of 'lgth'*number_of_nodes_in_type.
+ * @param index is optionnal only for polys. Set it to 0 if it is not the case.
+ * @param connLgth is the size of conn in the case of poly. Unsued if it is not the case.
+ */
+MEDLoader::MEDConnOfOneElemType::MEDConnOfOneElemType(INTERP_KERNEL::NormalizedCellType type, int *conn, int *index, int *fam, int lgth, int connLgth):_lgth(lgth),_fam(fam),
+ _conn(conn),_index(index),
+ _global(0),_type(type),
+ _conn_lgth(connLgth)
+{
+}
+
+void MEDLoader::MEDConnOfOneElemType::setGlobal(int *global)
+{
+ if(_global!=global)
+ {
+ if(_global)
+ delete [] _global;
+ _global=global;
+ }
+}
+
+void MEDLoader::MEDConnOfOneElemType::releaseArray()
+{
+ delete [] _fam;
+ delete [] _conn;
+ delete [] _index;
+ delete [] _global;
+}
+
+MEDLoader::MEDFieldDoublePerCellType::MEDFieldDoublePerCellType(INTERP_KERNEL::NormalizedCellType type, double *values, int ncomp, int nval):_nval(nval),_ncomp(ncomp),_values(values),_type(type)
+{
+}
+
+void MEDLoader::MEDFieldDoublePerCellType::releaseArray()
+{
+ delete [] _values;
+}
+
+namespace MEDLoader
+{
+ std::string buildStringFromFortran(const char *expr, int lgth)
+ {
+ std::string ret(expr,lgth);
+ std::string whiteSpaces(WHITE_SPACES);
+ std::size_t lgthReal=strlen(ret.c_str());
+ std::string ret2=ret.substr(0,lgthReal);
+ std::size_t found=ret2.find_last_not_of(whiteSpaces);
+ if (found!=std::string::npos)
+ ret2.erase(found+1);
+ else
+ ret2.clear();//ret is all whitespace
+ return ret2;
+ }
+
+ std::vector<std::string> getMeshNamesFid(med_idt fid)
+ {
+ med_maillage type_maillage;
+ char maillage_description[MED_TAILLE_DESC+1];
+ med_int dim;
+ char nommaa[MED_TAILLE_NOM+1];
+ med_int n=MEDnMaa(fid);
+ std::vector<std::string> ret(n);
+ for(int i=0;i<n;i++)
+ {
+ MEDmaaInfo(fid,i+1,nommaa,&dim,&type_maillage,maillage_description);
+ std::string cur=buildStringFromFortran(nommaa,sizeof(nommaa));
+ ret[i]=cur;
+ }
+ return ret;
+ }
+
+ std::vector<std::string> GetMeshNames(const char *fileName)
+ {
+ med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
+ std::vector<std::string> ret=getMeshNamesFid(fid);
+ MEDfermer(fid);
+ return ret;
+ }
+
+ std::vector<std::string> GetMeshFamilyNames(const char *fileName, const char *meshName)
+ {
+ med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
+ med_int nfam=MEDnFam(fid,(char *)meshName);
+ std::vector<std::string> ret(nfam);
+ char nomfam[MED_TAILLE_NOM+1];
+ med_int numfam;
+ for(int i=0;i<nfam;i++)
+ {
+ int ngro=MEDnGroupe(fid,(char *)meshName,i+1);
+ med_int natt=MEDnAttribut(fid,(char *)meshName,i+1);
+ med_int *attide=new int[natt];
+ med_int *attval=new int[natt];
+ char *attdes=new char[MED_TAILLE_DESC*natt+1];
+ char *gro=new char[MED_TAILLE_LNOM*ngro+1];
+ MEDfamInfo(fid,(char *)meshName,i+1,nomfam,&numfam,attide,attval,attdes,&natt,gro,&ngro);
+ std::string cur=buildStringFromFortran(nomfam,sizeof(nomfam));
+ ret[i]=cur;
+ delete [] attdes;
+ delete [] gro;
+ delete [] attide;
+ delete [] attval;
+ }
+ MEDfermer(fid);
+ return ret;
+ }
+
+ std::vector<std::string> GetMeshGroupsNames(const char *fileName, const char *meshName)
+ {
+ med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
+ med_int nfam=MEDnFam(fid,(char *)meshName);
+ std::vector<std::string> ret;
+ char nomfam[MED_TAILLE_NOM+1];
+ med_int numfam;
+ for(int i=0;i<nfam;i++)
+ {
+ int ngro=MEDnGroupe(fid,(char *)meshName,i+1);
+ med_int natt=MEDnAttribut(fid,(char *)meshName,i+1);
+ med_int *attide=new int[natt];
+ med_int *attval=new int[natt];
+ char *attdes=new char[MED_TAILLE_DESC*natt+1];
+ char *gro=new char[MED_TAILLE_LNOM*ngro+1];
+ MEDfamInfo(fid,(char *)meshName,i+1,nomfam,&numfam,attide,attval,attdes,&natt,gro,&ngro);
+ for(int j=0;j<ngro;j++)
+ {
+ std::string cur=buildStringFromFortran(gro+j*MED_TAILLE_LNOM,MED_TAILLE_LNOM);
+ if(std::find(ret.begin(),ret.end(),cur)==ret.end())
+ ret.push_back(cur);
+ }
+ delete [] attdes;
+ delete [] gro;
+ delete [] attide;
+ delete [] attval;
+ }
+ MEDfermer(fid);
+ return ret;
+ }
+
+ std::vector<std::string> GetCellFieldNamesOnMesh(const char *fileName, const char *meshName)
+ {
+ std::vector<std::string> ret;
+ med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
+ med_int nbFields=MEDnChamp(fid,0);
+ //
+ med_type_champ typcha;
+ //med_int nbpdtnor=0,pflsize,*pflval,lnsize;
+ med_int ngauss=0;
+ med_int numdt=0,numo=0,nbrefmaa;
+ med_float dt=0.0;
+ med_booleen local;
+ //char pflname[MED_TAILLE_NOM+1]="";
+ //char locname[MED_TAILLE_NOM+1]="";
+ char maa_ass[MED_TAILLE_NOM+1]="";
+ char dt_unit[MED_TAILLE_PNOM+1]="";
+ char nomcha[MED_TAILLE_NOM+1]="";
+ //
+ for(int i=0;i<nbFields;i++)
+ {
+ med_int ncomp=MEDnChamp(fid,i+1);
+ char *comp=new char[ncomp*MED_TAILLE_PNOM+1];
+ char *unit=new char[ncomp*MED_TAILLE_PNOM+1];
+ MEDchampInfo(fid,i+1,nomcha,&typcha,comp,unit,ncomp);
+ std::string curFieldName=buildStringFromFortran(nomcha,MED_TAILLE_NOM+1);
+ delete [] comp;
+ delete [] unit;
+ bool found=false;
+ for(int j=0;j<MED_NBR_GEOMETRIE_MAILLE+2 && !found;j++)
+ {
+ med_int nbPdt=MEDnPasdetemps(fid,nomcha,MED_MAILLE,typmai[j]);
+ if(nbPdt>0)
+ {
+ MEDpasdetempsInfo(fid,nomcha,MED_MAILLE,typmai[j],1, &ngauss, &numdt, &numo, dt_unit,&dt, maa_ass, &local, &nbrefmaa);
+ std::string curMeshName=buildStringFromFortran(maa_ass,MED_TAILLE_NOM+1);
+ if(curMeshName==meshName)
+ {
+ found=true;
+ ret.push_back(curFieldName);
+ }
+ }
+ }
+ }
+ MEDfermer(fid);
+ return ret;
+ }
+
+ std::vector<std::string> GetNodeFieldNamesOnMesh(const char *fileName, const char *meshName)
+ {
+ std::vector<std::string> ret;
+ med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
+ med_int nbFields=MEDnChamp(fid,0);
+ //
+ med_type_champ typcha;
+ med_int ngauss=0;
+ med_int numdt=0,numo=0,nbrefmaa;
+ med_float dt=0.0;
+ med_booleen local;
+ char maa_ass[MED_TAILLE_NOM+1]="";
+ char dt_unit[MED_TAILLE_PNOM+1]="";
+ char nomcha[MED_TAILLE_NOM+1]="";
+ //
+ for(int i=0;i<nbFields;i++)
+ {
+ med_int ncomp=MEDnChamp(fid,i+1);
+ char *comp=new char[ncomp*MED_TAILLE_PNOM+1];
+ char *unit=new char[ncomp*MED_TAILLE_PNOM+1];
+ MEDchampInfo(fid,i+1,nomcha,&typcha,comp,unit,ncomp);
+ std::string curFieldName=buildStringFromFortran(nomcha,MED_TAILLE_NOM+1);
+ delete [] comp;
+ delete [] unit;
+ bool found=false;
+ med_int nbPdt=MEDnPasdetemps(fid,nomcha,MED_NOEUD,MED_NONE);
+ if(nbPdt>0)
+ {
+ MEDpasdetempsInfo(fid,nomcha,MED_NOEUD,MED_NONE,1, &ngauss, &numdt, &numo, dt_unit,&dt, maa_ass, &local, &nbrefmaa);
+ std::string curMeshName=buildStringFromFortran(maa_ass,MED_TAILLE_NOM+1);
+ if(curMeshName==meshName)
+ {
+ found=true;
+ ret.push_back(curFieldName);
+ }
+ }
+ }
+ MEDfermer(fid);
+ return ret;
+ }
+
+ std::vector< std::pair<int,int> > GetCellFieldIterations(const char *fileName, const char *fieldName)
+ {
+ std::vector< std::pair<int,int> > ret;
+ med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
+ med_int nbFields=MEDnChamp(fid,0);
+ //
+ med_type_champ typcha;
+ med_int ngauss=0;
+ med_int numdt=0,numo=0,nbrefmaa;
+ med_float dt=0.0;
+ med_booleen local;
+ char maa_ass[MED_TAILLE_NOM+1]="";
+ char dt_unit[MED_TAILLE_PNOM+1]="";
+ char nomcha[MED_TAILLE_NOM+1]="";
+ //
+ for(int i=0;i<nbFields;i++)
+ {
+ med_int ncomp=MEDnChamp(fid,i+1);
+ char *comp=new char[ncomp*MED_TAILLE_PNOM+1];
+ char *unit=new char[ncomp*MED_TAILLE_PNOM+1];
+ MEDchampInfo(fid,i+1,nomcha,&typcha,comp,unit,ncomp);
+ std::string curFieldName=buildStringFromFortran(nomcha,MED_TAILLE_NOM+1);
+ delete [] comp;
+ delete [] unit;
+ if(curFieldName==fieldName)
+ {
+ bool found=false;
+ for(int j=0;j<MED_NBR_GEOMETRIE_MAILLE+2 && !found;j++)
+ {
+ med_int nbPdt=MEDnPasdetemps(fid,nomcha,MED_MAILLE,typmai[j]);
+ for(int k=0;k<nbPdt;k++)
+ {
+ MEDpasdetempsInfo(fid,nomcha,MED_MAILLE,typmai[j],k+1, &ngauss, &numdt, &numo, dt_unit,&dt, maa_ass, &local, &nbrefmaa);
+ found=true;
+ ret.push_back(std::make_pair(numdt,numo));
+ }
+ }
+ }
+ }
+ MEDfermer(fid);
+ return ret;
+ }
+
+ std::vector< std::pair<int,int> > GetNodeFieldIterations(const char *fileName, const char *fieldName)
+ {
+ std::vector< std::pair<int,int> > ret;
+ med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
+ med_int nbFields=MEDnChamp(fid,0);
+ //
+ med_type_champ typcha;
+ med_int ngauss=0;
+ med_int numdt=0,numo=0,nbrefmaa;
+ med_float dt=0.0;
+ med_booleen local;
+ char maa_ass[MED_TAILLE_NOM+1]="";
+ char dt_unit[MED_TAILLE_PNOM+1]="";
+ char nomcha[MED_TAILLE_NOM+1]="";
+ //
+ for(int i=0;i<nbFields;i++)
+ {
+ med_int ncomp=MEDnChamp(fid,i+1);
+ char *comp=new char[ncomp*MED_TAILLE_PNOM+1];
+ char *unit=new char[ncomp*MED_TAILLE_PNOM+1];
+ MEDchampInfo(fid,i+1,nomcha,&typcha,comp,unit,ncomp);
+ std::string curFieldName=buildStringFromFortran(nomcha,MED_TAILLE_NOM+1);
+ delete [] comp;
+ delete [] unit;
+ if(curFieldName==fieldName)
+ {
+ med_int nbPdt=MEDnPasdetemps(fid,nomcha,MED_NOEUD,MED_NONE);
+ for(int k=0;k<nbPdt;k++)
+ {
+ MEDpasdetempsInfo(fid,nomcha,MED_NOEUD,MED_NONE,k+1, &ngauss, &numdt, &numo, dt_unit,&dt, maa_ass, &local, &nbrefmaa);
+ ret.push_back(std::make_pair(numdt,numo));
+ }
+ }
+ }
+ MEDfermer(fid);
+ return ret;
+ }
+
+ void readFieldDoubleDataInMedFile(const char *fileName, const char *meshName, const char *fieldName, std::list<MEDLoader::MEDFieldDoublePerCellType>& field,
+ int iteration, int order, ParaMEDMEM::TypeOfField typeOfOutField, double& time)
+ {
+ time=0.;
+ med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
+ med_int nbFields=MEDnChamp(fid,0);
+ //
+ med_type_champ typcha;
+ char nomcha[MED_TAILLE_NOM+1]="";
+ char pflname [MED_TAILLE_NOM+1]="";
+ char locname [MED_TAILLE_NOM+1]="";
+ std::map<ParaMEDMEM::TypeOfField, med_entite_maillage> tabEnt;
+ std::map<ParaMEDMEM::TypeOfField, med_geometrie_element *> tabType;
+ std::map<ParaMEDMEM::TypeOfField, int> tabTypeLgth;
+ tabEnt[ON_CELLS]=MED_MAILLE;
+ tabType[ON_CELLS]=typmai;
+ tabTypeLgth[ON_CELLS]=MED_NBR_GEOMETRIE_MAILLE+2;
+ tabEnt[ON_NODES]=MED_NOEUD;
+ tabType[ON_NODES]=typmainoeud;
+ tabTypeLgth[ON_NODES]=1;
+ //
+ for(int i=0;i<nbFields;i++)
+ {
+ med_int ncomp=MEDnChamp(fid,i+1);
+ char *comp=new char[ncomp*MED_TAILLE_PNOM+1];
+ char *unit=new char[ncomp*MED_TAILLE_PNOM+1];
+ MEDchampInfo(fid,i+1,nomcha,&typcha,comp,unit,ncomp);
+ std::string curFieldName=buildStringFromFortran(nomcha,MED_TAILLE_NOM+1);
+ delete [] comp;
+ delete [] unit;
+ if(curFieldName==fieldName)
+ {
+ bool found=false;
+ for(int j=0;j<tabTypeLgth[typeOfOutField] && !found;j++)
+ {
+ med_int nbPdt=MEDnPasdetemps(fid,nomcha,tabEnt[typeOfOutField],typmai[j]);
+ if(nbPdt>0)
+ {
+ int nval=MEDnVal(fid,(char *)fieldName,tabEnt[typeOfOutField],tabType[typeOfOutField][j],iteration,order,(char *)meshName,MED_COMPACT);
+ double *valr=new double[ncomp*nval];
+ MEDchampLire(fid,(char *)meshName,(char *)fieldName,(unsigned char*)valr,MED_FULL_INTERLACE,MED_ALL,locname,
+ pflname,MED_COMPACT,tabEnt[typeOfOutField],tabType[typeOfOutField][j],iteration,order);
+ field.push_back(MEDFieldDoublePerCellType(typmai2[j],valr,ncomp,nval));
+ }
+ }
+ }
+ }
+ MEDfermer(fid);
+ }
+
+ std::vector<int> getIdsFromFamilies(const char *fileName, const char *meshName, const std::vector<std::string>& fams)
+ {
+ std::vector<int> ret;
+ med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
+ med_int nfam=MEDnFam(fid,(char *)meshName);
+ char nomfam[MED_TAILLE_NOM+1];
+ med_int numfam;
+ for(int i=0;i<nfam;i++)
+ {
+ int ngro=MEDnGroupe(fid,(char *)meshName,i+1);
+ med_int natt=MEDnAttribut(fid,(char *)meshName,i+1);
+ med_int *attide=new int[natt];
+ med_int *attval=new int[natt];
+ char *attdes=new char[MED_TAILLE_DESC*natt+1];
+ char *gro=new char[MED_TAILLE_LNOM*ngro+1];
+ MEDfamInfo(fid,(char *)meshName,i+1,nomfam,&numfam,attide,attval,attdes,&natt,gro,&ngro);
+ std::string cur=buildStringFromFortran(nomfam,sizeof(nomfam));
+ if(std::find(fams.begin(),fams.end(),cur)!=fams.end())
+ ret.push_back(numfam);
+ delete [] attdes;
+ delete [] gro;
+ delete [] attide;
+ delete [] attval;
+ }
+ MEDfermer(fid);
+ return ret;
+ }
+
+ std::vector<int> getIdsFromGroups(const char *fileName, const char *meshName, const std::vector<std::string>& grps)
+ {
+ std::vector<int> ret;
+ med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
+ med_int nfam=MEDnFam(fid,(char *)meshName);
+ char nomfam[MED_TAILLE_NOM+1];
+ med_int numfam;
+ for(int i=0;i<nfam;i++)
+ {
+ int ngro=MEDnGroupe(fid,(char *)meshName,i+1);
+ med_int natt=MEDnAttribut(fid,(char *)meshName,i+1);
+ med_int *attide=new int[natt];
+ med_int *attval=new int[natt];
+ char *attdes=new char[MED_TAILLE_DESC*natt+1];
+ char *gro=new char[MED_TAILLE_LNOM*ngro+1];
+ MEDfamInfo(fid,(char *)meshName,i+1,nomfam,&numfam,attide,attval,attdes,&natt,gro,&ngro);
+ std::string cur=buildStringFromFortran(nomfam,sizeof(nomfam));
+ for(int j=0;j<ngro;j++)
+ {
+ std::string cur=buildStringFromFortran(gro+j*MED_TAILLE_LNOM,MED_TAILLE_LNOM);
+ if(std::find(grps.begin(),grps.end(),cur)!=grps.end())
+ {
+ ret.push_back(numfam);
+ break;
+ }
+ }
+ delete [] attdes;
+ delete [] gro;
+ delete [] attide;
+ delete [] attval;
+ }
+ MEDfermer(fid);
+ return ret;
+ }
+
+ med_int getIdFromMeshName(med_idt fid, const char *meshName) throw(INTERP_KERNEL::Exception)
+ {
+ if(meshName==0)
+ return 1;
+ std::string meshNameStr(meshName);
+ if(meshNameStr=="?")
+ return 1;
+ std::vector<std::string> meshes=getMeshNamesFid(fid);
+ if(meshes.empty())
+ throw INTERP_KERNEL::Exception("No mesh in file");
+ std::vector<std::string>::iterator iter=std::find(meshes.begin(),meshes.end(),meshNameStr);
+ if(iter==meshes.end())
+ {
+ std::ostringstream os2;
+ os2 << "MeshName '" << meshName << "' not in file : meshes available : ";
+ std::copy(meshes.begin(),meshes.end(),std::ostream_iterator<std::string>(os2," "));
+ throw INTERP_KERNEL::Exception(os2.str().c_str());
+ }
+ return iter-meshes.begin()+1;
+ }
+
+ /*!
+ * This methods allows to merger all entities and to considerate only cell types.
+ */
+ void dispatchElems(int nbOfElemCell, int nbOfElemFace, int& nbOfElem, med_entite_maillage& whichEntity)
+ {
+ if(nbOfElemCell>=nbOfElemFace)
+ {
+ whichEntity=MED_MAILLE;
+ nbOfElem=nbOfElemCell;
+ }
+ else
+ {
+ whichEntity=MED_FACE;
+ nbOfElem=nbOfElemFace;
+ }
+ }
+
+ void readUMeshDataInMedFile(med_idt fid, med_int meshId, double *&coords, int& nCoords, int& spaceDim, std::list<MEDLoader::MEDConnOfOneElemType>& conn)
+ {
+ char nommaa[MED_TAILLE_NOM+1];
+ char maillage_description[MED_TAILLE_DESC+1];
+ char comp[3*MED_TAILLE_PNOM+1];
+ char unit[3*MED_TAILLE_PNOM+1];
+ med_maillage type_maillage;
+ med_int Mdim;
+ MEDmaaInfo(fid,meshId,nommaa,&Mdim,&type_maillage,maillage_description);
+ spaceDim=(int)Mdim;
+ nCoords=MEDnEntMaa(fid,nommaa,MED_COOR,MED_NOEUD,(med_geometrie_element)0,(med_connectivite)0);
+ coords=new double[nCoords*spaceDim];
+ med_repere repere;
+ MEDcoordLire(fid,nommaa,Mdim,coords,MED_FULL_INTERLACE,MED_ALL,NULL,0,&repere,comp,unit);
+ med_booleen inoele, inuele;
+ for(int i=0;i<MED_NBR_GEOMETRIE_MAILLE;i++)
+ {
+ med_geometrie_element curMedType=typmai[i];
+ med_entite_maillage whichEntity;
+ int curNbOfElemM=MEDnEntMaa(fid,nommaa,MED_CONN,MED_MAILLE,curMedType,MED_NOD);
+ int curNbOfElemF=MEDnEntMaa(fid,nommaa,MED_CONN,MED_FACE,curMedType,MED_NOD);
+ int curNbOfElem;
+ dispatchElems(curNbOfElemM,curNbOfElemF,curNbOfElem,whichEntity);
+ if(curNbOfElem>0)
+ {
+ int *connTab=new int[(curMedType%100)*curNbOfElem];
+ int *fam=new int[curNbOfElem];
+ MEDLoader::MEDConnOfOneElemType elem(typmai2[i],connTab,0,fam,curNbOfElem,-1);
+ int *tmp=new int[curNbOfElem];
+ char *noms=new char[MED_TAILLE_PNOM*curNbOfElem+1];
+ MEDelementsLire(fid,nommaa,Mdim,connTab,MED_FULL_INTERLACE,noms,&inoele,tmp,&inuele,fam,curNbOfElem,whichEntity,curMedType,MED_NOD);
+ delete [] tmp;
+ delete [] noms;
+ //trying to read global numbering
+ int *globArr=new int[curNbOfElem];
+ if(MEDglobalNumLire(fid,nommaa,globArr,curNbOfElem,whichEntity,curMedType)==0)
+ elem.setGlobal(globArr);
+ else
+ delete [] globArr;
+ conn.push_back(elem);
+ }
+ }
+ int curNbOfPolyElem;
+ int curNbOfPolyElemM=MEDnEntMaa(fid,nommaa,MED_CONN,MED_MAILLE,MED_POLYGONE,MED_NOD);
+ int curNbOfPolyElemF=MEDnEntMaa(fid,nommaa,MED_CONN,MED_FACE,MED_POLYGONE,MED_NOD);
+ med_entite_maillage whichPolyEntity;
+ dispatchElems(curNbOfPolyElemM,curNbOfPolyElemF,curNbOfPolyElem,whichPolyEntity);
+ if(curNbOfPolyElem>0)
+ {
+ med_int arraySize;
+ MEDpolygoneInfo(fid,nommaa,whichPolyEntity,MED_NOD,&arraySize);
+ int *index=new int[curNbOfPolyElem+1];
+ int *locConn=new int[arraySize];
+ int *fam=new int[curNbOfPolyElem];
+ MEDLoader::MEDConnOfOneElemType elem(INTERP_KERNEL::NORM_POLYGON,locConn,index,fam,curNbOfPolyElem,arraySize);
+ MEDpolygoneConnLire(fid,nommaa,index,curNbOfPolyElem+1,locConn,whichPolyEntity,MED_NOD);
+ MEDfamLire(fid,nommaa,fam,curNbOfPolyElem,MED_MAILLE,MED_POLYGONE);
+ conn.push_back(elem);
+ }
+ curNbOfPolyElem=MEDnEntMaa(fid,nommaa,MED_CONN,MED_MAILLE,MED_POLYEDRE,MED_NOD);
+ if(curNbOfPolyElem>0)
+ {
+ med_int indexFaceLgth,connFaceLgth;
+ MEDpolyedreInfo(fid,nommaa,MED_NOD,&indexFaceLgth,&connFaceLgth);
+ int *index=new int[curNbOfPolyElem+1];
+ int *indexFace=new int[indexFaceLgth];
+ int *locConn=new int[connFaceLgth];
+ int *fam=new int[curNbOfPolyElem];
+ MEDpolyedreConnLire(fid,nommaa,index,curNbOfPolyElem+1,indexFace,indexFaceLgth,locConn,MED_NOD);
+ MEDfamLire(fid,nommaa,fam,curNbOfPolyElem,MED_MAILLE,MED_POLYEDRE);
+ int arraySize=connFaceLgth;
+ for(int i=0;i<curNbOfPolyElem;i++)
+ arraySize+=index[i+1]-index[i]-1;
+ int *finalConn=new int[arraySize];
+ int *finalIndex=new int[curNbOfPolyElem+1];
+ finalIndex[0]=1;
+ int *wFinalConn=finalConn;
+ for(int i=0;i<curNbOfPolyElem;i++)
+ {
+ finalIndex[i+1]=finalIndex[i]+index[i+1]-index[i]-1+indexFace[index[i+1]-1]-indexFace[index[i]-1];
+ wFinalConn=std::copy(locConn+indexFace[index[i]-1]-1,locConn+indexFace[index[i]]-1,wFinalConn);
+ for(int j=index[i];j<index[i+1]-1;j++)
+ {
+ *wFinalConn++=0;
+ wFinalConn=std::copy(locConn+indexFace[j]-1,locConn+indexFace[j+1]-1,wFinalConn);
+ }
+ }
+ delete [] index;
+ delete [] locConn;
+ delete [] indexFace;
+ MEDLoader::MEDConnOfOneElemType elem(INTERP_KERNEL::NORM_POLYHED,finalConn,finalIndex,fam,curNbOfPolyElem,arraySize);
+ conn.push_back(elem);
+ }
+ }
+
+ template<class T>
+ unsigned calculateHighestMeshDim(const std::list<T>& conn)
+ {
+ unsigned ret=0;
+ for(typename std::list<T>::const_iterator iter=conn.begin();iter!=conn.end();iter++)
+ {
+ unsigned curDim=INTERP_KERNEL::CellModel::getCellModel((*iter).getType()).getDimension();
+ if(ret<curDim)
+ ret=curDim;
+ }
+ return ret;
+ }
+
+ template<class T>
+ void keepSpecifiedMeshDim(typename std::list<T>& conn, unsigned meshDim)
+ {
+ for(typename std::list<T>::iterator iter=conn.begin();iter!=conn.end();)
+ {
+ unsigned curDim=INTERP_KERNEL::CellModel::getCellModel((*iter).getType()).getDimension();
+ if(curDim!=meshDim)
+ {
+ (*iter).releaseArray();
+ iter=conn.erase(iter);
+ }
+ else
+ iter++;
+ }
+ }
+
+ template<class T>
+ void keepTypes(typename std::list<T>& conn, const std::vector<INTERP_KERNEL::NormalizedCellType>& typesToKeep)
+ {
+ if(!typesToKeep.empty())
+ {
+ for(typename std::list<T>::iterator iter=conn.begin();iter!=conn.end();)
+ {
+ INTERP_KERNEL::NormalizedCellType curType=(*iter).getType();
+ if(std::find(typesToKeep.begin(),typesToKeep.end(),curType)==typesToKeep.end())
+ {
+ (*iter).releaseArray();
+ iter=conn.erase(iter);
+ }
+ else
+ iter++;
+ }
+ }
+ }
+
+ class FieldPerTypeAccumulator
+ {
+ public:
+ int operator()(int res, const MEDLoader::MEDFieldDoublePerCellType& elt) { return res+elt.getNbOfTuple(); }
+ };
+
+ ParaMEDMEM::DataArrayDouble *buildArrayFromRawData(const std::list<MEDLoader::MEDFieldDoublePerCellType>& fieldPerType)
+ {
+ ParaMEDMEM::DataArrayDouble *ret=ParaMEDMEM::DataArrayDouble::New();
+ int totalNbOfTuple=std::accumulate(fieldPerType.begin(),fieldPerType.end(),0,FieldPerTypeAccumulator());
+ int nbOfComp=(*fieldPerType.begin()).getNbComp();
+ double *ptr=new double[nbOfComp*totalNbOfTuple];
+ ret->useArray(ptr,true,ParaMEDMEM::CPP_DEALLOC,totalNbOfTuple,nbOfComp);
+ std::for_each(fieldPerType.begin(),fieldPerType.end(),FieldPerTypeCopier(ptr));
+ return ret;
+ }
+
+ class PolyCounterForFams
+ {
+ public:
+ PolyCounterForFams(int id, const int *index):_id(id),_index(index),_count(0),_sigma(0) { }
+ void operator()(int val) { if(val==_id) _sigma+=_index[_count+1]-_index[_count]; _count++; }
+ int getSigma() const { return _sigma; }
+ private:
+ int _id;
+ const int *_index;
+ int _count;
+ int _sigma;
+ };
+
+ void tradMEDFileCoreFrmt2MEDCouplingUMesh(const std::list<MEDLoader::MEDConnOfOneElemType>& medConnFrmt,
+ DataArrayInt* &conn,
+ DataArrayInt* &connIndex,
+ const std::vector<int>& familiesToKeep)
+ {
+ bool keepAll=familiesToKeep.empty();
+ if(medConnFrmt.empty())
+ {
+ conn=0;
+ connIndex=0;
+ return ;
+ }
+ std::list<MEDLoader::MEDConnOfOneElemType>::const_iterator iter=medConnFrmt.begin();
+ int totalNbOfCells=0;
+ int totalNbOfMedConn=0;
+ for(;iter!=medConnFrmt.end();iter++)
+ {
+ const INTERP_KERNEL::CellModel& cellMod=INTERP_KERNEL::CellModel::getCellModel((*iter).getType());
+ if(keepAll)
+ totalNbOfCells+=(*iter).getLength();
+ else
+ for(std::vector<int>::const_iterator iter2=familiesToKeep.begin();iter2!=familiesToKeep.end();iter2++)
+ totalNbOfCells+=std::count((*iter).getFam(),(*iter).getFam()+(*iter).getLength(),*iter2);
+ if(!cellMod.isDynamic())
+ if(keepAll)
+ totalNbOfMedConn+=(*iter).getLength()*cellMod.getNumberOfNodes();
+ else
+ for(std::vector<int>::const_iterator iter2=familiesToKeep.begin();iter2!=familiesToKeep.end();iter2++)
+ totalNbOfMedConn+=std::count((*iter).getFam(),(*iter).getFam()+(*iter).getLength(),*iter2)*cellMod.getNumberOfNodes();
+ else
+ if(keepAll)
+ totalNbOfMedConn+=(*iter).getConnLength();
+ else
+ for(std::vector<int>::const_iterator iter2=familiesToKeep.begin();iter2!=familiesToKeep.end();iter2++)
+ {
+ PolyCounterForFams res=std::for_each((*iter).getFam(),(*iter).getFam()+(*iter).getLength(),PolyCounterForFams(*iter2,(*iter).getIndex()));
+ totalNbOfMedConn+=res.getSigma();
+ }
+ }
+ connIndex=DataArrayInt::New();
+ conn=DataArrayInt::New();
+ connIndex->alloc(totalNbOfCells+1,1);
+ int *connIdxPtr=connIndex->getPointer();
+ int connFillId=0;
+ conn->alloc(totalNbOfMedConn+totalNbOfCells,1);
+ int *connPtr=conn->getPointer();
+ for(iter=medConnFrmt.begin();iter!=medConnFrmt.end();iter++)
+ {
+ INTERP_KERNEL::NormalizedCellType type=(*iter).getType();
+ const int *sourceConn=(*iter).getArray();
+ const int *sourceIndex=(*iter).getIndex();
+ const INTERP_KERNEL::CellModel& cellMod=INTERP_KERNEL::CellModel::getCellModel(type);
+ int nbOfCellsInCurType;
+ int nbOfNodesIn1Cell=cellMod.getNumberOfNodes();
+ nbOfCellsInCurType=(*iter).getLength();
+ bool isDyn=cellMod.isDynamic();
+ int *tmpConnPtr;
+ for(int i=0;i<nbOfCellsInCurType;i++)
+ {
+ if(keepAll)
+ {
+ *connIdxPtr=connFillId;
+ *connPtr++=type;
+ if(!isDyn)
+ tmpConnPtr=std::transform(sourceConn,sourceConn+nbOfNodesIn1Cell,connPtr,std::bind2nd(std::minus<int>(),1));
+ else
+ tmpConnPtr=std::transform(sourceConn,sourceConn+sourceIndex[i+1]-sourceIndex[i],connPtr,std::bind2nd(std::minus<int>(),1));
+ connIdxPtr++;
+ nbOfNodesIn1Cell=tmpConnPtr-connPtr;
+ connFillId+=nbOfNodesIn1Cell+1;
+ connPtr=tmpConnPtr;
+ }
+ else if(std::find(familiesToKeep.begin(),familiesToKeep.end(),(*iter).getFam()[i])!=familiesToKeep.end())
+ {
+ *connIdxPtr=connFillId;
+ *connPtr++=type;
+ if(!isDyn)
+ tmpConnPtr=std::transform(sourceConn,sourceConn+nbOfNodesIn1Cell,connPtr,std::bind2nd(std::minus<int>(),1));
+ else
+ tmpConnPtr=std::transform(sourceConn,sourceConn+sourceIndex[i+1]-sourceIndex[i],connPtr,std::bind2nd(std::minus<int>(),1));
+ connIdxPtr++;
+ nbOfNodesIn1Cell=tmpConnPtr-connPtr;
+ connFillId+=nbOfNodesIn1Cell+1;
+ connPtr=tmpConnPtr;
+ }
+ sourceConn+=nbOfNodesIn1Cell;
+ }
+ *connIdxPtr=connFillId;
+ }
+ }
+
+ template<class T>
+ void releaseMEDFileCoreFrmt(typename std::list<T>& medConnFrmt)
+ {
+ for(typename std::list<T>::iterator iter=medConnFrmt.begin();iter!=medConnFrmt.end();iter++)
+ (*iter).releaseArray();
+ medConnFrmt.clear();
+ }
+
+ /*!
+ * This method builds a sub set of connectivity for a given type 'type'.
+ * @param conn input containing connectivity with MEDCoupling format.
+ * @param connIndex input containing connectivity index in MEDCoupling format.
+ * @param type input specifying which cell types will be extracted in conn4MEDFile.
+ * @param conn4MEDFile output containing the connectivity directly understandable by MEDFile; conn4MEDFile has to be empty before this method called.
+ * @param connIndex4MEDFile output containing index connectivity understandable by MEDFile; only used by polygons and polyhedrons (it is face nodal connec).
+ * @param connIndexRk24MEDFile output containing index of rank 2 understandable by MEDFile; only used by polyhedrons.
+ * @return nb of elements extracted.
+ */
+ int buildMEDSubConnectivityOfOneTypeStaticTypes(DataArrayInt *conn, DataArrayInt *connIndex, INTERP_KERNEL::NormalizedCellType type, std::vector<int>& conn4MEDFile)
+ {
+ int ret=0;
+ int nbOfElem=connIndex->getNbOfElems()-1;
+ const int *connPtr=conn->getPointer();
+ const int *connIdxPtr=connIndex->getPointer();
+ for(int i=0;i<nbOfElem;i++)
+ {
+ int delta=connIdxPtr[1]-connIdxPtr[0];
+ if(*connPtr==type)
+ {
+ conn4MEDFile.insert(conn4MEDFile.end(),connPtr+1,connPtr+delta);
+ ret++;
+ }
+ connIdxPtr++;
+ connPtr+=delta;
+ }
+ std::transform(conn4MEDFile.begin(),conn4MEDFile.end(),conn4MEDFile.begin(),std::bind2nd(std::plus<int>(),1));
+ return ret;
+ }
+
+ int buildMEDSubConnectivityOfOneTypesPolyg(DataArrayInt *conn, DataArrayInt *connIndex, std::vector<int>& conn4MEDFile, std::vector<int>& connIndex4MEDFile)
+ {
+ int ret=0;
+ int nbOfElem=connIndex->getNbOfElems()-1;
+ const int *connPtr=conn->getPointer();
+ const int *connIdxPtr=connIndex->getPointer();
+ connIndex4MEDFile.push_back(1);
+ for(int i=0;i<nbOfElem;i++)
+ {
+ int delta=connIdxPtr[1]-connIdxPtr[0];
+ if(*connPtr==INTERP_KERNEL::NORM_POLYGON)
+ {
+ conn4MEDFile.insert(conn4MEDFile.end(),connPtr+1,connPtr+delta);
+ connIndex4MEDFile.push_back(connIndex4MEDFile.back()+delta-1);
+ ret++;
+ }
+ connIdxPtr++;
+ connPtr+=delta;
+ }
+ std::transform(conn4MEDFile.begin(),conn4MEDFile.end(),conn4MEDFile.begin(),std::bind2nd(std::plus<int>(),1));
+ return ret;
+ }
+
+ int buildMEDSubConnectivityOfOneTypesPolyh(DataArrayInt *conn, DataArrayInt *connIndex, std::vector<int>& conn4MEDFile, std::vector<int>& connIndex4MEDFile, std::vector<int>& connIndexRk24MEDFile)
+ {
+ return 0;
+ }
+
+ /*!
+ * This method builds a sub set of connectivity for a given type 'type'.
+ * @param conn input containing connectivity with MEDCoupling format.
+ * @param connIndex input containing connectivity index in MEDCoupling format.
+ * @param type input specifying which cell types will be extracted in conn4MEDFile.
+ * @param conn4MEDFile output containing the connectivity directly understandable by MEDFile; conn4MEDFile has to be empty before this method called.
+ * @param connIndex4MEDFile output containing index connectivity understandable by MEDFile; only used by polygons and polyhedrons (it is face nodal connec).
+ * @param connIndexRk24MEDFile output containing index of rank 2 understandable by MEDFile; only used by polyhedrons.
+ * @return nb of elements extracted.
+ */
+ int buildMEDSubConnectivityOfOneType(DataArrayInt *conn, DataArrayInt *connIndex, INTERP_KERNEL::NormalizedCellType type, std::vector<int>& conn4MEDFile,
+ std::vector<int>& connIndex4MEDFile, std::vector<int>& connIndexRk24MEDFile)
+ {
+
+ const INTERP_KERNEL::CellModel& cellMod=INTERP_KERNEL::CellModel::getCellModel(type);
+ if(!cellMod.isDynamic())
+ return buildMEDSubConnectivityOfOneTypeStaticTypes(conn,connIndex,type,conn4MEDFile);
+ else
+ {
+ if(type==INTERP_KERNEL::NORM_POLYGON)
+ return buildMEDSubConnectivityOfOneTypesPolyg(conn,connIndex,conn4MEDFile,connIndex4MEDFile);
+ else
+ return buildMEDSubConnectivityOfOneTypesPolyh(conn,connIndex,conn4MEDFile,connIndex4MEDFile,connIndexRk24MEDFile);
+ }
+ }
+
+ /*!
+ * @param ids is a in vector containing families ids whose cells have to be kept. If empty all cells are kept.
+ * @param typesToKeep is a in vector that indicates which types to keep after dimension filtering.
+ * @param meshDimExtract out parameter that gives the mesh dimension.
+ */
+ MEDCouplingUMesh *readUMeshFromFileLev1(const char *fileName, const char *meshName, int meshDimRelToMax, const std::vector<int>& ids,
+ const std::vector<INTERP_KERNEL::NormalizedCellType>& typesToKeep, unsigned& meshDimExtract) throw(INTERP_KERNEL::Exception)
+ {
+ //Extraction data from MED file.
+ med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
+ med_int mid=getIdFromMeshName(fid,meshName);
+ double *coords;
+ int nCoords;
+ int spaceDim;
+ std::list<MEDLoader::MEDConnOfOneElemType> conn;
+ readUMeshDataInMedFile(fid,mid,coords,nCoords,spaceDim,conn);
+ meshDimExtract=calculateHighestMeshDim<MEDConnOfOneElemType>(conn);
+ meshDimExtract=meshDimExtract+meshDimRelToMax;
+ keepSpecifiedMeshDim<MEDConnOfOneElemType>(conn,meshDimExtract);
+ keepTypes<MEDConnOfOneElemType>(conn,typesToKeep);
+ MEDfermer(fid);
+ //Put data in returned data structure.
+ MEDCouplingUMesh *ret=MEDCouplingUMesh::New();
+ ret->setName(meshName);
+ ret->setMeshDimension(meshDimExtract);
+ //
+ DataArrayDouble *coordsArr=DataArrayDouble::New();
+ coordsArr->useArray(coords,true,ParaMEDMEM::CPP_DEALLOC,nCoords,spaceDim);
+ ret->setCoords(coordsArr);
+ coordsArr->decrRef();
+ //
+ DataArrayInt *connArr,*connIndexArr;
+ tradMEDFileCoreFrmt2MEDCouplingUMesh(conn,connArr,connIndexArr,ids);
+ ret->setConnectivity(connArr,connIndexArr);
+ //clean-up
+ if(connArr)
+ connArr->decrRef();
+ if(connIndexArr)
+ connIndexArr->decrRef();
+ releaseMEDFileCoreFrmt<MEDLoader::MEDConnOfOneElemType>(conn);
+ return ret;
+ }
+
+ ParaMEDMEM::MEDCouplingFieldDouble *readFieldDoubleLev1(const char *fileName, const char *meshName, int meshDimRelToMax, const char *fieldName, int iteration, int order,
+ ParaMEDMEM::TypeOfField typeOfOutField)
+ {
+ std::list<MEDLoader::MEDFieldDoublePerCellType> fieldPerCellType;
+ double time;
+ readFieldDoubleDataInMedFile(fileName,meshName,fieldName,fieldPerCellType,iteration,order,typeOfOutField,time);
+ std::vector<int> familiesToKeep;
+ std::vector<INTERP_KERNEL::NormalizedCellType> typesToKeep;
+ if(typeOfOutField==ON_CELLS)
+ for(std::list<MEDLoader::MEDFieldDoublePerCellType>::const_iterator iter=fieldPerCellType.begin();iter!=fieldPerCellType.end();iter++)
+ typesToKeep.push_back((*iter).getType());
+ unsigned meshDim;
+ ParaMEDMEM::MEDCouplingUMesh *mesh=readUMeshFromFileLev1(fileName,meshName,meshDimRelToMax,familiesToKeep,typesToKeep,meshDim);
+ if(typeOfOutField==ON_CELLS)
+ keepSpecifiedMeshDim<MEDFieldDoublePerCellType>(fieldPerCellType,meshDim);
+ ParaMEDMEM::MEDCouplingFieldDouble *ret=ParaMEDMEM::MEDCouplingFieldDouble::New(typeOfOutField,ONE_TIME);
+ ret->setName(fieldName);
+ ret->setTime(time,iteration,order);
+ ret->setMesh(mesh);
+ mesh->decrRef();
+ ParaMEDMEM::DataArrayDouble *arr=buildArrayFromRawData(fieldPerCellType);
+ ret->setArray(arr);
+ arr->decrRef();
+ releaseMEDFileCoreFrmt<MEDLoader::MEDFieldDoublePerCellType>(fieldPerCellType);
+ return ret;
+ }
+}
+
+MEDCouplingUMesh *MEDLoader::ReadUMeshFromFile(const char *fileName, const char *meshName, int meshDimRelToMax) throw(INTERP_KERNEL::Exception)
+{
+ std::vector<int> familiesToKeep;
+ std::vector<INTERP_KERNEL::NormalizedCellType> typesToKeep;
+ unsigned meshDim;
+ return readUMeshFromFileLev1(fileName,meshName,meshDimRelToMax,familiesToKeep,typesToKeep,meshDim);
+}
+
+ParaMEDMEM::MEDCouplingUMesh *MEDLoader::ReadUMeshFromFamilies(const char *fileName, const char *meshName, int meshDimRelToMax, const std::vector<std::string>& fams)
+{
+ std::vector<int> familiesToKeep=getIdsFromFamilies(fileName,meshName,fams);
+ std::vector<INTERP_KERNEL::NormalizedCellType> typesToKeep;
+ unsigned meshDim;
+ return readUMeshFromFileLev1(fileName,meshName,meshDimRelToMax,familiesToKeep,typesToKeep,meshDim);
+}
+
+ParaMEDMEM::MEDCouplingUMesh *MEDLoader::ReadUMeshFromGroups(const char *fileName, const char *meshName, int meshDimRelToMax, const std::vector<std::string>& grps)
+{
+ std::vector<int> familiesToKeep=getIdsFromGroups(fileName,meshName,grps);
+ std::vector<INTERP_KERNEL::NormalizedCellType> typesToKeep;
+ unsigned meshDim;
+ return readUMeshFromFileLev1(fileName,meshName,meshDimRelToMax,familiesToKeep,typesToKeep,meshDim);
+}
+
+ParaMEDMEM::MEDCouplingFieldDouble *MEDLoader::ReadFieldDoubleCell(const char *fileName, const char *meshName, int meshDimRelToMax, const char *fieldName, int iteration, int order)
+{
+ return readFieldDoubleLev1(fileName,meshName,meshDimRelToMax,fieldName,iteration,order,ON_CELLS);
+}
+
+ParaMEDMEM::MEDCouplingFieldDouble *MEDLoader::ReadFieldDoubleNode(const char *fileName, const char *meshName, int meshDimRelToMax, const char *fieldName, int iteration, int order)
+{
+ return readFieldDoubleLev1(fileName,meshName,meshDimRelToMax,fieldName,iteration,order,ON_NODES);
+}
+
+void MEDLoader::writeUMesh(const char *fileName, ParaMEDMEM::MEDCouplingUMesh *mesh)
+{
+ med_idt fid=MEDouvrir((char *)fileName,MED_CREATION);
+ std::string meshName(mesh->getName());
+ if(meshName=="")
+ {
+ MEDfermer(fid);
+ throw INTERP_KERNEL::Exception("MEDCouplingMesh must have a not null name !");
+ }
+ char maa[MED_TAILLE_NOM+1];
+ strcpy(maa,meshName.c_str());
+ MEDmaaCr(fid,maa,mesh->getSpaceDimension(),MED_NON_STRUCTURE,maa);
+ std::set<INTERP_KERNEL::NormalizedCellType> allTypes(mesh->getAllTypes());
+ DataArrayInt *conn=mesh->getNodalConnectivity();
+ DataArrayInt *connIndex=mesh->getNodalConnectivityIndex();
+ char familyName[MED_TAILLE_NOM+1];
+ std::fill(familyName,familyName+MED_TAILLE_NOM+1,'\0');
+ const char DftFamilyName[]="DftFamily";
+ std::copy(DftFamilyName,DftFamilyName+sizeof(DftFamilyName),familyName);
+ for(int i=0;i<MED_NBR_GEOMETRIE_MAILLE+2;i++)
+ {
+ med_geometrie_element curMedType=typmai[i];
+ INTERP_KERNEL::NormalizedCellType curType=typmai2[i];
+ if(allTypes.find(curType)!=allTypes.end())
+ {
+ std::vector<int> medConn;
+ std::vector<int> medConnIndex;
+ std::vector<int> medConnIndex2;
+ int nbOfElt=buildMEDSubConnectivityOfOneType(conn,connIndex,curType,medConn,medConnIndex,medConnIndex2);
+ if(curMedType!=MED_POLYGONE && curMedType!=MED_POLYEDRE)
+ MEDconnEcr(fid,maa,mesh->getMeshDimension(),&medConn[0],MED_FULL_INTERLACE,nbOfElt,MED_MAILLE,curMedType,MED_NOD);
+ else
+ {
+ if(curMedType==MED_POLYGONE)
+ MEDpolygoneConnEcr(fid,maa,&medConnIndex[0],medConnIndex.size(),&medConn[0],MED_MAILLE,MED_NOD);
+ }
+ }
+ }
+ MEDfamCr(fid,maa,familyName,0,0,0,0,0,0,0);
+ DataArrayDouble *arr=mesh->getCoords();
+ char comp[2*MED_TAILLE_PNOM+1];
+ char unit[2*MED_TAILLE_PNOM+1];
+ std::fill(comp,comp+2*MED_TAILLE_PNOM,' ');
+ comp[2*MED_TAILLE_PNOM]='\0';
+ char *work=comp;
+ for(int i=0;i<mesh->getSpaceDimension();i++,work+=3)
+ *work='X'+i;
+ std::fill(unit,unit+2*MED_TAILLE_PNOM+1,'\0');
+ MEDcoordEcr(fid,maa,mesh->getSpaceDimension(),arr->getPointer(),MED_FULL_INTERLACE,mesh->getNumberOfNodes(),MED_CART,comp,unit);
+ MEDfermer(fid);
+}
+
+void MEDLoader::writeField(const char *fileName, const char *meshName, ParaMEDMEM::MEDCouplingFieldDouble *f)
+{
+}
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#ifndef __MEDLOADER_HXX__
+#define __MEDLOADER_HXX__
+
+#include "InterpKernelException.hxx"
+#include "NormalizedUnstructuredMesh.hxx"
+
+#include <list>
+#include <vector>
+
+namespace ParaMEDMEM
+{
+ class DataArrayInt;
+ class MEDCouplingUMesh;
+ class MEDCouplingFieldDouble;
+}
+
+namespace MEDLoader
+{
+ class MEDConnOfOneElemType
+ {
+ public:
+ MEDConnOfOneElemType(INTERP_KERNEL::NormalizedCellType type, int *conn, int *index, int *fam, int lgth, int connLgth);
+ INTERP_KERNEL::NormalizedCellType getType() const { return _type; }
+ int getLength() const { return _lgth; }
+ int getConnLength() const { return _conn_lgth; }
+ int *getArray() const { return _conn; }
+ int *getIndex() const { return _index; }
+ int *getFam() const { return _fam; }
+ void setGlobal(int *global);
+ void releaseArray();
+ private:
+ int _lgth;
+ int *_fam;
+ int *_conn;
+ int *_index;
+ int *_global;
+ int _conn_lgth;
+ INTERP_KERNEL::NormalizedCellType _type;
+ };
+
+ class MEDFieldDoublePerCellType
+ {
+ public:
+ MEDFieldDoublePerCellType(INTERP_KERNEL::NormalizedCellType type, double *values, int ncomp, int nval);
+ INTERP_KERNEL::NormalizedCellType getType() const { return _type; }
+ int getNbComp() const { return _ncomp; }
+ int getNbOfTuple() const { return _nval; }
+ int getNbOfValues() const { return _ncomp*_nval; }
+ double *getArray() const { return _values; }
+ void releaseArray();
+ private:
+ int _nval;
+ int _ncomp;
+ double *_values;
+ INTERP_KERNEL::NormalizedCellType _type;
+ };
+ //
+ std::vector<std::string> GetMeshNames(const char *fileName);
+ std::vector<std::string> GetMeshGroupsNames(const char *fileName, const char *meshName);
+ std::vector<std::string> GetMeshFamilyNames(const char *fileName, const char *meshName);
+ std::vector<std::string> GetCellFieldNamesOnMesh(const char *fileName, const char *meshName);
+ std::vector<std::string> GetNodeFieldNamesOnMesh(const char *fileName, const char *meshName);
+ std::vector< std::pair<int,int> > GetCellFieldIterations(const char *fileName, const char *fieldName);
+ std::vector< std::pair<int,int> > GetNodeFieldIterations(const char *fileName, const char *fieldName);
+ ParaMEDMEM::MEDCouplingUMesh *ReadUMeshFromFamilies(const char *fileName, const char *meshName, int meshDimRelToMax, const std::vector<std::string>& fams);
+ ParaMEDMEM::MEDCouplingUMesh *ReadUMeshFromGroups(const char *fileName, const char *meshName, int meshDimRelToMax, const std::vector<std::string>& grps);
+ ParaMEDMEM::MEDCouplingUMesh *ReadUMeshFromFile(const char *fileName, const char *meshName=0, int meshDimRelToMax=0) throw(INTERP_KERNEL::Exception);
+ ParaMEDMEM::MEDCouplingFieldDouble *ReadFieldDoubleCell(const char *fileName, const char *meshName, int meshDimRelToMax, const char *fieldName, int iteration, int order);
+ ParaMEDMEM::MEDCouplingFieldDouble *ReadFieldDoubleNode(const char *fileName, const char *meshName, int meshDimRelToMax, const char *fieldName, int iteration, int order);
+ void writeUMesh(const char *fileName, ParaMEDMEM::MEDCouplingUMesh *mesh);
+ void writeField(const char *fileName, const char *meshName, ParaMEDMEM::MEDCouplingFieldDouble *f);
+}
+
+#endif
--- /dev/null
+# Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+include $(top_srcdir)/adm_local/unix/make_common_starter.am
+
+lib_LTLIBRARIES = libmedloader.la
+
+salomeinclude_HEADERS= \
+MEDLoader.hxx
+
+dist_libmedloader_la_SOURCES= \
+MEDLoader.cxx
+
+libmedloader_la_CPPFLAGS= $(MPI_INCLUDES) $(MED2_INCLUDES) $(HDF5_INCLUDES) @CXXTMPDPTHFLAGS@ \
+ -I$(srcdir)/../INTERP_KERNEL \
+ -I$(srcdir)/../INTERP_KERNEL/Geometric2D \
+ -I$(srcdir)/../INTERP_KERNEL/Bases \
+ -I$(srcdir)/../MEDCoupling
+
+# change motivated by the bug KERNEL4778.
+libmedloader_la_LDFLAGS= ../MEDCoupling/libmedcoupling.la \
+../INTERP_KERNEL/libinterpkernel.la $(MPI_LIBS) $(MED2_LIBS) $(HDF5_LIBS)
--- /dev/null
+# Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+include $(top_srcdir)/adm_local/unix/make_common_starter.am
+
+lib_LTLIBRARIES = libparamedloader.la
+
+salomeinclude_HEADERS= \
+ParaMEDLoader.hxx
+
+dist_libparamedloader_la_SOURCES= \
+ParaMEDLoader.cxx
+
+#libmedmem_la_LDFLAGS= -L$(top_builddir)/lib@LIB_LOCATION_SUFFIX@/salome
+libparamedloader_la_CPPFLAGS= $(MPI_INCLUDES) $(MED2_INCLUDES) $(HDF5_INCLUDES) @CXXTMPDPTHFLAGS@ \
+ -I$(srcdir)/../INTERP_KERNEL \
+ -I$(srcdir)/../INTERP_KERNEL/Geometric2D \
+ -I$(srcdir)/../INTERP_KERNEL/Bases \
+ -I$(srcdir)/../MEDCoupling \
+ -I$(srcdir)/../MEDLoader \
+ -I$(srcdir)/../ParaMEDMEM
+
+libparamedloader_la_LDFLAGS= ../ParaMEDMEM/libparamedmem.la \
+ ../MEDLoader/libmedloader.la \
+ ../MEDCoupling/libmedcoupling.la \
+ ../INTERP_KERNEL/libinterpkernel.la $(MPI_LIBS) $(MED2_LIBS) $(HDF5_LIBS)
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include "ParaMEDLoader.hxx"
+#include "MEDLoader.hxx"
+#include "ParaMESH.hxx"
+#include "BlockTopology.hxx"
+#include "MEDCouplingUMesh.hxx"
+
+#include <fstream>
+
+using namespace ParaMEDMEM;
+
+void MEDLoader::writeParaMesh(const char *fileName, ParaMEDMEM::ParaMESH *mesh)
+{
+ if(!mesh->getBlockTopology()->getProcGroup()->containsMyRank())
+ return ;
+ int myRank=mesh->getBlockTopology()->getProcGroup()->myRank();
+ int nbDomains=mesh->getBlockTopology()->getProcGroup()->size();
+ std::vector<std::string> fileNames(nbDomains);
+ for(int i=0;i<nbDomains;i++)
+ {
+ std::ostringstream sstr;
+ sstr << fileName << i+1 << ".med";
+ fileNames[i]=sstr.str();
+ }
+ if(myRank==0)
+ writeMasterFile(fileName,fileNames,mesh->getCellMesh()->getName());
+ writeUMesh(fileNames[myRank].c_str(),dynamic_cast<MEDCouplingUMesh *>(mesh->getCellMesh()));
+}
+
+/*!
+ * This method builds the master file 'fileName' of a parallel MED file defined in 'fileNames'.
+ */
+void MEDLoader::writeMasterFile(const char *fileName, const std::vector<std::string>& fileNames, const char *meshName)
+{
+ int nbOfDom=fileNames.size();
+ std::ofstream fs(fileName);
+ fs << "#MED Fichier V 2.3" << " " << std::endl;
+ fs << "#"<<" " << std::endl;
+ fs << nbOfDom <<" " << std::endl;
+ for(int i=0;i<nbOfDom;i++)
+ fs << meshName << " " << i+1 << " " << meshName << "_" << i+1 << " localhost " << fileNames[i] << " " << std::endl;
+}
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#ifndef __PARAMEDLOADER_HXX__
+#define __PARAMEDLOADER_HXX__
+
+#include <string>
+#include <vector>
+
+namespace ParaMEDMEM
+{
+ class ParaMESH;
+ class ParaFIELD;
+}
+
+namespace MEDLoader
+{
+ void writeParaMesh(const char *fileName, ParaMEDMEM::ParaMESH *mesh);
+ void writeMasterFile(const char *fileName, const std::vector<std::string>& fileNames, const char *meshName);
+}
+
+#endif
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include "MEDLoader.hxx"
-#include "CellModel.hxx"
-#include "ParaMESH.hxx"
-#include "BlockTopology.hxx"
-#include "MEDCouplingUMesh.hxx"
-#include "MEDCouplingFieldDouble.hxx"
-
-extern "C"
-{
-#include "med.h"
-}
-
-#include <string>
-#include <cstring>
-#include <sstream>
-#include <fstream>
-#include <iterator>
-#include <algorithm>
-#include <numeric>
-
-med_geometrie_element typmai[MED_NBR_GEOMETRIE_MAILLE+2] = { MED_POINT1,
- MED_SEG2,
- MED_SEG3,
- MED_TRIA3,
- MED_TRIA6,
- MED_QUAD4,
- MED_QUAD8,
- MED_TETRA4,
- MED_TETRA10,
- MED_HEXA8,
- MED_HEXA20,
- MED_PENTA6,
- MED_PENTA15,
- MED_PYRA5,
- MED_PYRA13,
- MED_POLYGONE,
- MED_POLYEDRE };
-
-med_geometrie_element typmainoeud[1] = { MED_NONE };
-
-INTERP_KERNEL::NormalizedCellType typmai2[MED_NBR_GEOMETRIE_MAILLE+2] = { INTERP_KERNEL::NORM_ERROR,
- INTERP_KERNEL::NORM_SEG2,
- INTERP_KERNEL::NORM_SEG3,
- INTERP_KERNEL::NORM_TRI3,
- INTERP_KERNEL::NORM_TRI6,
- INTERP_KERNEL::NORM_QUAD4,
- INTERP_KERNEL::NORM_QUAD8,
- INTERP_KERNEL::NORM_TETRA4,
- INTERP_KERNEL::NORM_TETRA10,
- INTERP_KERNEL::NORM_HEXA8,
- INTERP_KERNEL::NORM_HEXA20,
- INTERP_KERNEL::NORM_PENTA6,
- INTERP_KERNEL::NORM_PENTA15,
- INTERP_KERNEL::NORM_PYRA5,
- INTERP_KERNEL::NORM_PYRA13,
- INTERP_KERNEL::NORM_POLYGON,
- INTERP_KERNEL::NORM_POLYHED };
-
-using namespace ParaMEDMEM;
-
-const char WHITE_SPACES[]=" \n";
-
-/*!
- * @param lgth is the size of fam tab. For classical types conn is size of 'lgth'*number_of_nodes_in_type.
- * @param index is optionnal only for polys. Set it to 0 if it is not the case.
- * @param connLgth is the size of conn in the case of poly. Unsued if it is not the case.
- */
-MEDLoader::MEDConnOfOneElemType::MEDConnOfOneElemType(INTERP_KERNEL::NormalizedCellType type, int *conn, int *index, int *fam, int lgth, int connLgth):_lgth(lgth),_fam(fam),
- _conn(conn),_index(index),
- _global(0),_type(type),
- _conn_lgth(connLgth)
-{
-}
-
-void MEDLoader::MEDConnOfOneElemType::setGlobal(int *global)
-{
- if(_global!=global)
- {
- if(_global)
- delete [] _global;
- _global=global;
- }
-}
-
-void MEDLoader::MEDConnOfOneElemType::releaseArray()
-{
- delete [] _fam;
- delete [] _conn;
- delete [] _index;
- delete [] _global;
-}
-
-MEDLoader::MEDFieldDoublePerCellType::MEDFieldDoublePerCellType(INTERP_KERNEL::NormalizedCellType type, double *values, int ncomp, int nval):_nval(nval),_ncomp(ncomp),_values(values),_type(type)
-{
-}
-
-void MEDLoader::MEDFieldDoublePerCellType::releaseArray()
-{
- delete [] _values;
-}
-
-std::string buildStringFromFortran(const char *expr, int lgth)
-{
- std::string ret(expr,lgth);
- std::string whiteSpaces(WHITE_SPACES);
- std::size_t lgthReal=strlen(ret.c_str());
- std::string ret2=ret.substr(0,lgthReal);
- std::size_t found=ret2.find_last_not_of(whiteSpaces);
- if (found!=std::string::npos)
- ret2.erase(found+1);
- else
- ret2.clear();//ret is all whitespace
- return ret2;
-}
-
-namespace MEDLoader
-{
- std::vector<std::string> getMeshNamesFid(med_idt fid)
- {
- med_maillage type_maillage;
- char maillage_description[MED_TAILLE_DESC+1];
- med_int dim;
- char nommaa[MED_TAILLE_NOM+1];
- med_int n=MEDnMaa(fid);
- std::vector<std::string> ret(n);
- for(int i=0;i<n;i++)
- {
- MEDmaaInfo(fid,i+1,nommaa,&dim,&type_maillage,maillage_description);
- std::string cur=buildStringFromFortran(nommaa,sizeof(nommaa));
- ret[i]=cur;
- }
- return ret;
- }
-
- std::vector<std::string> GetMeshNames(const char *fileName)
- {
- med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
- std::vector<std::string> ret=getMeshNamesFid(fid);
- MEDfermer(fid);
- return ret;
- }
-
- std::vector<std::string> GetMeshFamilyNames(const char *fileName, const char *meshName)
- {
- med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
- med_int nfam=MEDnFam(fid,(char *)meshName);
- std::vector<std::string> ret(nfam);
- char nomfam[MED_TAILLE_NOM+1];
- med_int numfam;
- for(int i=0;i<nfam;i++)
- {
- int ngro=MEDnGroupe(fid,(char *)meshName,i+1);
- med_int natt=MEDnAttribut(fid,(char *)meshName,i+1);
- med_int *attide=new int[natt];
- med_int *attval=new int[natt];
- char *attdes=new char[MED_TAILLE_DESC*natt+1];
- char *gro=new char[MED_TAILLE_LNOM*ngro+1];
- MEDfamInfo(fid,(char *)meshName,i+1,nomfam,&numfam,attide,attval,attdes,&natt,gro,&ngro);
- std::string cur=buildStringFromFortran(nomfam,sizeof(nomfam));
- ret[i]=cur;
- delete [] attdes;
- delete [] gro;
- delete [] attide;
- delete [] attval;
- }
- MEDfermer(fid);
- return ret;
- }
-
- std::vector<std::string> GetMeshGroupsNames(const char *fileName, const char *meshName)
- {
- med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
- med_int nfam=MEDnFam(fid,(char *)meshName);
- std::vector<std::string> ret;
- char nomfam[MED_TAILLE_NOM+1];
- med_int numfam;
- for(int i=0;i<nfam;i++)
- {
- int ngro=MEDnGroupe(fid,(char *)meshName,i+1);
- med_int natt=MEDnAttribut(fid,(char *)meshName,i+1);
- med_int *attide=new int[natt];
- med_int *attval=new int[natt];
- char *attdes=new char[MED_TAILLE_DESC*natt+1];
- char *gro=new char[MED_TAILLE_LNOM*ngro+1];
- MEDfamInfo(fid,(char *)meshName,i+1,nomfam,&numfam,attide,attval,attdes,&natt,gro,&ngro);
- for(int j=0;j<ngro;j++)
- {
- std::string cur=buildStringFromFortran(gro+j*MED_TAILLE_LNOM,MED_TAILLE_LNOM);
- if(std::find(ret.begin(),ret.end(),cur)==ret.end())
- ret.push_back(cur);
- }
- delete [] attdes;
- delete [] gro;
- delete [] attide;
- delete [] attval;
- }
- MEDfermer(fid);
- return ret;
- }
-
- std::vector<std::string> GetCellFieldNamesOnMesh(const char *fileName, const char *meshName)
- {
- std::vector<std::string> ret;
- med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
- med_int nbFields=MEDnChamp(fid,0);
- //
- med_type_champ typcha;
- //med_int nbpdtnor=0,pflsize,*pflval,lnsize;
- med_int ngauss=0;
- med_int numdt=0,numo=0,nbrefmaa;
- med_float dt=0.0;
- med_booleen local;
- //char pflname[MED_TAILLE_NOM+1]="";
- //char locname[MED_TAILLE_NOM+1]="";
- char maa_ass[MED_TAILLE_NOM+1]="";
- char dt_unit[MED_TAILLE_PNOM+1]="";
- char nomcha[MED_TAILLE_NOM+1]="";
- //
- for(int i=0;i<nbFields;i++)
- {
- med_int ncomp=MEDnChamp(fid,i+1);
- char *comp=new char[ncomp*MED_TAILLE_PNOM+1];
- char *unit=new char[ncomp*MED_TAILLE_PNOM+1];
- MEDchampInfo(fid,i+1,nomcha,&typcha,comp,unit,ncomp);
- std::string curFieldName=buildStringFromFortran(nomcha,MED_TAILLE_NOM+1);
- delete [] comp;
- delete [] unit;
- bool found=false;
- for(int j=0;j<MED_NBR_GEOMETRIE_MAILLE+2 && !found;j++)
- {
- med_int nbPdt=MEDnPasdetemps(fid,nomcha,MED_MAILLE,typmai[j]);
- if(nbPdt>0)
- {
- MEDpasdetempsInfo(fid,nomcha,MED_MAILLE,typmai[j],1, &ngauss, &numdt, &numo, dt_unit,&dt, maa_ass, &local, &nbrefmaa);
- std::string curMeshName=buildStringFromFortran(maa_ass,MED_TAILLE_NOM+1);
- if(curMeshName==meshName)
- {
- found=true;
- ret.push_back(curFieldName);
- }
- }
- }
- }
- MEDfermer(fid);
- return ret;
- }
-
- std::vector<std::string> GetNodeFieldNamesOnMesh(const char *fileName, const char *meshName)
- {
- std::vector<std::string> ret;
- med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
- med_int nbFields=MEDnChamp(fid,0);
- //
- med_type_champ typcha;
- med_int ngauss=0;
- med_int numdt=0,numo=0,nbrefmaa;
- med_float dt=0.0;
- med_booleen local;
- char maa_ass[MED_TAILLE_NOM+1]="";
- char dt_unit[MED_TAILLE_PNOM+1]="";
- char nomcha[MED_TAILLE_NOM+1]="";
- //
- for(int i=0;i<nbFields;i++)
- {
- med_int ncomp=MEDnChamp(fid,i+1);
- char *comp=new char[ncomp*MED_TAILLE_PNOM+1];
- char *unit=new char[ncomp*MED_TAILLE_PNOM+1];
- MEDchampInfo(fid,i+1,nomcha,&typcha,comp,unit,ncomp);
- std::string curFieldName=buildStringFromFortran(nomcha,MED_TAILLE_NOM+1);
- delete [] comp;
- delete [] unit;
- bool found=false;
- med_int nbPdt=MEDnPasdetemps(fid,nomcha,MED_NOEUD,MED_NONE);
- if(nbPdt>0)
- {
- MEDpasdetempsInfo(fid,nomcha,MED_NOEUD,MED_NONE,1, &ngauss, &numdt, &numo, dt_unit,&dt, maa_ass, &local, &nbrefmaa);
- std::string curMeshName=buildStringFromFortran(maa_ass,MED_TAILLE_NOM+1);
- if(curMeshName==meshName)
- {
- found=true;
- ret.push_back(curFieldName);
- }
- }
- }
- MEDfermer(fid);
- return ret;
- }
-
- std::vector< std::pair<int,int> > GetCellFieldIterations(const char *fileName, const char *fieldName)
- {
- std::vector< std::pair<int,int> > ret;
- med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
- med_int nbFields=MEDnChamp(fid,0);
- //
- med_type_champ typcha;
- med_int ngauss=0;
- med_int numdt=0,numo=0,nbrefmaa;
- med_float dt=0.0;
- med_booleen local;
- char maa_ass[MED_TAILLE_NOM+1]="";
- char dt_unit[MED_TAILLE_PNOM+1]="";
- char nomcha[MED_TAILLE_NOM+1]="";
- //
- for(int i=0;i<nbFields;i++)
- {
- med_int ncomp=MEDnChamp(fid,i+1);
- char *comp=new char[ncomp*MED_TAILLE_PNOM+1];
- char *unit=new char[ncomp*MED_TAILLE_PNOM+1];
- MEDchampInfo(fid,i+1,nomcha,&typcha,comp,unit,ncomp);
- std::string curFieldName=buildStringFromFortran(nomcha,MED_TAILLE_NOM+1);
- delete [] comp;
- delete [] unit;
- if(curFieldName==fieldName)
- {
- bool found=false;
- for(int j=0;j<MED_NBR_GEOMETRIE_MAILLE+2 && !found;j++)
- {
- med_int nbPdt=MEDnPasdetemps(fid,nomcha,MED_MAILLE,typmai[j]);
- for(int k=0;k<nbPdt;k++)
- {
- MEDpasdetempsInfo(fid,nomcha,MED_MAILLE,typmai[j],k+1, &ngauss, &numdt, &numo, dt_unit,&dt, maa_ass, &local, &nbrefmaa);
- found=true;
- ret.push_back(std::make_pair(numdt,numo));
- }
- }
- }
- }
- MEDfermer(fid);
- return ret;
- }
-
- std::vector< std::pair<int,int> > GetNodeFieldIterations(const char *fileName, const char *fieldName)
- {
- std::vector< std::pair<int,int> > ret;
- med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
- med_int nbFields=MEDnChamp(fid,0);
- //
- med_type_champ typcha;
- med_int ngauss=0;
- med_int numdt=0,numo=0,nbrefmaa;
- med_float dt=0.0;
- med_booleen local;
- char maa_ass[MED_TAILLE_NOM+1]="";
- char dt_unit[MED_TAILLE_PNOM+1]="";
- char nomcha[MED_TAILLE_NOM+1]="";
- //
- for(int i=0;i<nbFields;i++)
- {
- med_int ncomp=MEDnChamp(fid,i+1);
- char *comp=new char[ncomp*MED_TAILLE_PNOM+1];
- char *unit=new char[ncomp*MED_TAILLE_PNOM+1];
- MEDchampInfo(fid,i+1,nomcha,&typcha,comp,unit,ncomp);
- std::string curFieldName=buildStringFromFortran(nomcha,MED_TAILLE_NOM+1);
- delete [] comp;
- delete [] unit;
- if(curFieldName==fieldName)
- {
- med_int nbPdt=MEDnPasdetemps(fid,nomcha,MED_NOEUD,MED_NONE);
- for(int k=0;k<nbPdt;k++)
- {
- MEDpasdetempsInfo(fid,nomcha,MED_NOEUD,MED_NONE,k+1, &ngauss, &numdt, &numo, dt_unit,&dt, maa_ass, &local, &nbrefmaa);
- ret.push_back(std::make_pair(numdt,numo));
- }
- }
- }
- MEDfermer(fid);
- return ret;
- }
-
- void readFieldDoubleDataInMedFile(const char *fileName, const char *meshName, const char *fieldName, std::list<MEDLoader::MEDFieldDoublePerCellType>& field,
- int iteration, int order, ParaMEDMEM::TypeOfField typeOfOutField, double& time)
- {
- time=0.;
- med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
- med_int nbFields=MEDnChamp(fid,0);
- //
- med_type_champ typcha;
- char nomcha[MED_TAILLE_NOM+1]="";
- char pflname [MED_TAILLE_NOM+1]="";
- char locname [MED_TAILLE_NOM+1]="";
- std::map<ParaMEDMEM::TypeOfField, med_entite_maillage> tabEnt;
- std::map<ParaMEDMEM::TypeOfField, med_geometrie_element *> tabType;
- std::map<ParaMEDMEM::TypeOfField, int> tabTypeLgth;
- tabEnt[ON_CELLS]=MED_MAILLE;
- tabType[ON_CELLS]=typmai;
- tabTypeLgth[ON_CELLS]=MED_NBR_GEOMETRIE_MAILLE+2;
- tabEnt[ON_NODES]=MED_NOEUD;
- tabType[ON_NODES]=typmainoeud;
- tabTypeLgth[ON_NODES]=1;
- //
- for(int i=0;i<nbFields;i++)
- {
- med_int ncomp=MEDnChamp(fid,i+1);
- char *comp=new char[ncomp*MED_TAILLE_PNOM+1];
- char *unit=new char[ncomp*MED_TAILLE_PNOM+1];
- MEDchampInfo(fid,i+1,nomcha,&typcha,comp,unit,ncomp);
- std::string curFieldName=buildStringFromFortran(nomcha,MED_TAILLE_NOM+1);
- delete [] comp;
- delete [] unit;
- if(curFieldName==fieldName)
- {
- bool found=false;
- for(int j=0;j<tabTypeLgth[typeOfOutField] && !found;j++)
- {
- med_int nbPdt=MEDnPasdetemps(fid,nomcha,tabEnt[typeOfOutField],typmai[j]);
- if(nbPdt>0)
- {
- int nval=MEDnVal(fid,(char *)fieldName,tabEnt[typeOfOutField],tabType[typeOfOutField][j],iteration,order,(char *)meshName,MED_COMPACT);
- double *valr=new double[ncomp*nval];
- MEDchampLire(fid,(char *)meshName,(char *)fieldName,(unsigned char*)valr,MED_FULL_INTERLACE,MED_ALL,locname,
- pflname,MED_COMPACT,tabEnt[typeOfOutField],tabType[typeOfOutField][j],iteration,order);
- field.push_back(MEDFieldDoublePerCellType(typmai2[j],valr,ncomp,nval));
- }
- }
- }
- }
- MEDfermer(fid);
- }
-
- std::vector<int> getIdsFromFamilies(const char *fileName, const char *meshName, const std::vector<std::string>& fams)
- {
- std::vector<int> ret;
- med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
- med_int nfam=MEDnFam(fid,(char *)meshName);
- char nomfam[MED_TAILLE_NOM+1];
- med_int numfam;
- for(int i=0;i<nfam;i++)
- {
- int ngro=MEDnGroupe(fid,(char *)meshName,i+1);
- med_int natt=MEDnAttribut(fid,(char *)meshName,i+1);
- med_int *attide=new int[natt];
- med_int *attval=new int[natt];
- char *attdes=new char[MED_TAILLE_DESC*natt+1];
- char *gro=new char[MED_TAILLE_LNOM*ngro+1];
- MEDfamInfo(fid,(char *)meshName,i+1,nomfam,&numfam,attide,attval,attdes,&natt,gro,&ngro);
- std::string cur=buildStringFromFortran(nomfam,sizeof(nomfam));
- if(std::find(fams.begin(),fams.end(),cur)!=fams.end())
- ret.push_back(numfam);
- delete [] attdes;
- delete [] gro;
- delete [] attide;
- delete [] attval;
- }
- MEDfermer(fid);
- return ret;
- }
-
- std::vector<int> getIdsFromGroups(const char *fileName, const char *meshName, const std::vector<std::string>& grps)
- {
- std::vector<int> ret;
- med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
- med_int nfam=MEDnFam(fid,(char *)meshName);
- char nomfam[MED_TAILLE_NOM+1];
- med_int numfam;
- for(int i=0;i<nfam;i++)
- {
- int ngro=MEDnGroupe(fid,(char *)meshName,i+1);
- med_int natt=MEDnAttribut(fid,(char *)meshName,i+1);
- med_int *attide=new int[natt];
- med_int *attval=new int[natt];
- char *attdes=new char[MED_TAILLE_DESC*natt+1];
- char *gro=new char[MED_TAILLE_LNOM*ngro+1];
- MEDfamInfo(fid,(char *)meshName,i+1,nomfam,&numfam,attide,attval,attdes,&natt,gro,&ngro);
- std::string cur=buildStringFromFortran(nomfam,sizeof(nomfam));
- for(int j=0;j<ngro;j++)
- {
- std::string cur=buildStringFromFortran(gro+j*MED_TAILLE_LNOM,MED_TAILLE_LNOM);
- if(std::find(grps.begin(),grps.end(),cur)!=grps.end())
- {
- ret.push_back(numfam);
- break;
- }
- }
- delete [] attdes;
- delete [] gro;
- delete [] attide;
- delete [] attval;
- }
- MEDfermer(fid);
- return ret;
- }
-
- med_int getIdFromMeshName(med_idt fid, const char *meshName) throw(INTERP_KERNEL::Exception)
- {
- if(meshName==0)
- return 1;
- std::string meshNameStr(meshName);
- if(meshNameStr=="?")
- return 1;
- std::vector<std::string> meshes=getMeshNamesFid(fid);
- if(meshes.empty())
- throw INTERP_KERNEL::Exception("No mesh in file");
- std::vector<std::string>::iterator iter=std::find(meshes.begin(),meshes.end(),meshNameStr);
- if(iter==meshes.end())
- {
- std::ostringstream os2;
- os2 << "MeshName '" << meshName << "' not in file : meshes available : ";
- std::copy(meshes.begin(),meshes.end(),std::ostream_iterator<std::string>(os2," "));
- throw INTERP_KERNEL::Exception(os2.str().c_str());
- }
- return iter-meshes.begin()+1;
- }
-
- /*!
- * This methods allows to merger all entities and to considerate only cell types.
- */
- void dispatchElems(int nbOfElemCell, int nbOfElemFace, int& nbOfElem, med_entite_maillage& whichEntity)
- {
- if(nbOfElemCell>=nbOfElemFace)
- {
- whichEntity=MED_MAILLE;
- nbOfElem=nbOfElemCell;
- }
- else
- {
- whichEntity=MED_FACE;
- nbOfElem=nbOfElemFace;
- }
- }
-
- void readUMeshDataInMedFile(med_idt fid, med_int meshId, double *&coords, int& nCoords, int& spaceDim, std::list<MEDLoader::MEDConnOfOneElemType>& conn)
- {
- char nommaa[MED_TAILLE_NOM+1];
- char maillage_description[MED_TAILLE_DESC+1];
- char comp[3*MED_TAILLE_PNOM+1];
- char unit[3*MED_TAILLE_PNOM+1];
- med_maillage type_maillage;
- med_int Mdim;
- MEDmaaInfo(fid,meshId,nommaa,&Mdim,&type_maillage,maillage_description);
- spaceDim=(int)Mdim;
- nCoords=MEDnEntMaa(fid,nommaa,MED_COOR,MED_NOEUD,(med_geometrie_element)0,(med_connectivite)0);
- coords=new double[nCoords*spaceDim];
- med_repere repere;
- MEDcoordLire(fid,nommaa,Mdim,coords,MED_FULL_INTERLACE,MED_ALL,NULL,0,&repere,comp,unit);
- med_booleen inoele, inuele;
- for(int i=0;i<MED_NBR_GEOMETRIE_MAILLE;i++)
- {
- med_geometrie_element curMedType=typmai[i];
- med_entite_maillage whichEntity;
- int curNbOfElemM=MEDnEntMaa(fid,nommaa,MED_CONN,MED_MAILLE,curMedType,MED_NOD);
- int curNbOfElemF=MEDnEntMaa(fid,nommaa,MED_CONN,MED_FACE,curMedType,MED_NOD);
- int curNbOfElem;
- dispatchElems(curNbOfElemM,curNbOfElemF,curNbOfElem,whichEntity);
- if(curNbOfElem>0)
- {
- int *connTab=new int[(curMedType%100)*curNbOfElem];
- int *fam=new int[curNbOfElem];
- MEDLoader::MEDConnOfOneElemType elem(typmai2[i],connTab,0,fam,curNbOfElem,-1);
- int *tmp=new int[curNbOfElem];
- char *noms=new char[MED_TAILLE_PNOM*curNbOfElem+1];
- MEDelementsLire(fid,nommaa,Mdim,connTab,MED_FULL_INTERLACE,noms,&inoele,tmp,&inuele,fam,curNbOfElem,whichEntity,curMedType,MED_NOD);
- delete [] tmp;
- delete [] noms;
- //trying to read global numbering
- int *globArr=new int[curNbOfElem];
- if(MEDglobalNumLire(fid,nommaa,globArr,curNbOfElem,whichEntity,curMedType)==0)
- elem.setGlobal(globArr);
- else
- delete [] globArr;
- conn.push_back(elem);
- }
- }
- int curNbOfPolyElem;
- int curNbOfPolyElemM=MEDnEntMaa(fid,nommaa,MED_CONN,MED_MAILLE,MED_POLYGONE,MED_NOD);
- int curNbOfPolyElemF=MEDnEntMaa(fid,nommaa,MED_CONN,MED_FACE,MED_POLYGONE,MED_NOD);
- med_entite_maillage whichPolyEntity;
- dispatchElems(curNbOfPolyElemM,curNbOfPolyElemF,curNbOfPolyElem,whichPolyEntity);
- if(curNbOfPolyElem>0)
- {
- med_int arraySize;
- MEDpolygoneInfo(fid,nommaa,whichPolyEntity,MED_NOD,&arraySize);
- int *index=new int[curNbOfPolyElem+1];
- int *locConn=new int[arraySize];
- int *fam=new int[curNbOfPolyElem];
- MEDLoader::MEDConnOfOneElemType elem(INTERP_KERNEL::NORM_POLYGON,locConn,index,fam,curNbOfPolyElem,arraySize);
- MEDpolygoneConnLire(fid,nommaa,index,curNbOfPolyElem+1,locConn,whichPolyEntity,MED_NOD);
- MEDfamLire(fid,nommaa,fam,curNbOfPolyElem,MED_MAILLE,MED_POLYGONE);
- conn.push_back(elem);
- }
- curNbOfPolyElem=MEDnEntMaa(fid,nommaa,MED_CONN,MED_MAILLE,MED_POLYEDRE,MED_NOD);
- if(curNbOfPolyElem>0)
- {
- med_int indexFaceLgth,connFaceLgth;
- MEDpolyedreInfo(fid,nommaa,MED_NOD,&indexFaceLgth,&connFaceLgth);
- int *index=new int[curNbOfPolyElem+1];
- int *indexFace=new int[indexFaceLgth];
- int *locConn=new int[connFaceLgth];
- int *fam=new int[curNbOfPolyElem];
- MEDpolyedreConnLire(fid,nommaa,index,curNbOfPolyElem+1,indexFace,indexFaceLgth,locConn,MED_NOD);
- MEDfamLire(fid,nommaa,fam,curNbOfPolyElem,MED_MAILLE,MED_POLYEDRE);
- int arraySize=connFaceLgth;
- for(int i=0;i<curNbOfPolyElem;i++)
- arraySize+=index[i+1]-index[i]-1;
- int *finalConn=new int[arraySize];
- int *finalIndex=new int[curNbOfPolyElem+1];
- finalIndex[0]=1;
- int *wFinalConn=finalConn;
- for(int i=0;i<curNbOfPolyElem;i++)
- {
- finalIndex[i+1]=finalIndex[i]+index[i+1]-index[i]-1+indexFace[index[i+1]-1]-indexFace[index[i]-1];
- wFinalConn=std::copy(locConn+indexFace[index[i]-1]-1,locConn+indexFace[index[i]]-1,wFinalConn);
- for(int j=index[i];j<index[i+1]-1;j++)
- {
- *wFinalConn++=0;
- wFinalConn=std::copy(locConn+indexFace[j]-1,locConn+indexFace[j+1]-1,wFinalConn);
- }
- }
- delete [] index;
- delete [] locConn;
- delete [] indexFace;
- MEDLoader::MEDConnOfOneElemType elem(INTERP_KERNEL::NORM_POLYHED,finalConn,finalIndex,fam,curNbOfPolyElem,arraySize);
- conn.push_back(elem);
- }
- }
-
- template<class T>
- unsigned calculateHighestMeshDim(const std::list<T>& conn)
- {
- unsigned ret=0;
- for(typename std::list<T>::const_iterator iter=conn.begin();iter!=conn.end();iter++)
- {
- unsigned curDim=INTERP_KERNEL::CellModel::getCellModel((*iter).getType()).getDimension();
- if(ret<curDim)
- ret=curDim;
- }
- return ret;
- }
-
- template<class T>
- void keepSpecifiedMeshDim(typename std::list<T>& conn, unsigned meshDim)
- {
- for(typename std::list<T>::iterator iter=conn.begin();iter!=conn.end();)
- {
- unsigned curDim=INTERP_KERNEL::CellModel::getCellModel((*iter).getType()).getDimension();
- if(curDim!=meshDim)
- {
- (*iter).releaseArray();
- iter=conn.erase(iter);
- }
- else
- iter++;
- }
- }
-
- template<class T>
- void keepTypes(typename std::list<T>& conn, const std::vector<INTERP_KERNEL::NormalizedCellType>& typesToKeep)
- {
- if(!typesToKeep.empty())
- {
- for(typename std::list<T>::iterator iter=conn.begin();iter!=conn.end();)
- {
- INTERP_KERNEL::NormalizedCellType curType=(*iter).getType();
- if(std::find(typesToKeep.begin(),typesToKeep.end(),curType)==typesToKeep.end())
- {
- (*iter).releaseArray();
- iter=conn.erase(iter);
- }
- else
- iter++;
- }
- }
- }
-
- class FieldPerTypeAccumulator
- {
- public:
- int operator()(int res, const MEDLoader::MEDFieldDoublePerCellType& elt) { return res+elt.getNbOfTuple(); }
- };
-
- class FieldPerTypeCopier
- {
- public:
- FieldPerTypeCopier(double *ptr):_ptr(ptr) { }
- void operator()(const MEDLoader::MEDFieldDoublePerCellType& elt) { _ptr=std::copy(elt.getArray(),elt.getArray()+elt.getNbOfValues(),_ptr); }
- private:
- double *_ptr;
- };
-
- ParaMEDMEM::DataArrayDouble *buildArrayFromRawData(const std::list<MEDLoader::MEDFieldDoublePerCellType>& fieldPerType)
- {
- ParaMEDMEM::DataArrayDouble *ret=ParaMEDMEM::DataArrayDouble::New();
- int totalNbOfTuple=std::accumulate(fieldPerType.begin(),fieldPerType.end(),0,FieldPerTypeAccumulator());
- int nbOfComp=(*fieldPerType.begin()).getNbComp();
- double *ptr=new double[nbOfComp*totalNbOfTuple];
- ret->useArray(ptr,true,ParaMEDMEM::CPP_DEALLOC,totalNbOfTuple,nbOfComp);
- std::for_each(fieldPerType.begin(),fieldPerType.end(),FieldPerTypeCopier(ptr));
- return ret;
- }
-
- class PolyCounterForFams
- {
- public:
- PolyCounterForFams(int id, const int *index):_id(id),_index(index),_count(0),_sigma(0) { }
- void operator()(int val) { if(val==_id) _sigma+=_index[_count+1]-_index[_count]; _count++; }
- int getSigma() const { return _sigma; }
- private:
- int _id;
- const int *_index;
- int _count;
- int _sigma;
- };
-
- void tradMEDFileCoreFrmt2MEDCouplingUMesh(const std::list<MEDLoader::MEDConnOfOneElemType>& medConnFrmt,
- DataArrayInt* &conn,
- DataArrayInt* &connIndex,
- const std::vector<int>& familiesToKeep)
- {
- bool keepAll=familiesToKeep.empty();
- if(medConnFrmt.empty())
- {
- conn=0;
- connIndex=0;
- return ;
- }
- std::list<MEDLoader::MEDConnOfOneElemType>::const_iterator iter=medConnFrmt.begin();
- int totalNbOfCells=0;
- int totalNbOfMedConn=0;
- for(;iter!=medConnFrmt.end();iter++)
- {
- const INTERP_KERNEL::CellModel& cellMod=INTERP_KERNEL::CellModel::getCellModel((*iter).getType());
- if(keepAll)
- totalNbOfCells+=(*iter).getLength();
- else
- for(std::vector<int>::const_iterator iter2=familiesToKeep.begin();iter2!=familiesToKeep.end();iter2++)
- totalNbOfCells+=std::count((*iter).getFam(),(*iter).getFam()+(*iter).getLength(),*iter2);
- if(!cellMod.isDynamic())
- if(keepAll)
- totalNbOfMedConn+=(*iter).getLength()*cellMod.getNumberOfNodes();
- else
- for(std::vector<int>::const_iterator iter2=familiesToKeep.begin();iter2!=familiesToKeep.end();iter2++)
- totalNbOfMedConn+=std::count((*iter).getFam(),(*iter).getFam()+(*iter).getLength(),*iter2)*cellMod.getNumberOfNodes();
- else
- if(keepAll)
- totalNbOfMedConn+=(*iter).getConnLength();
- else
- for(std::vector<int>::const_iterator iter2=familiesToKeep.begin();iter2!=familiesToKeep.end();iter2++)
- {
- PolyCounterForFams res=std::for_each((*iter).getFam(),(*iter).getFam()+(*iter).getLength(),PolyCounterForFams(*iter2,(*iter).getIndex()));
- totalNbOfMedConn+=res.getSigma();
- }
- }
- connIndex=DataArrayInt::New();
- conn=DataArrayInt::New();
- connIndex->alloc(totalNbOfCells+1,1);
- int *connIdxPtr=connIndex->getPointer();
- int connFillId=0;
- conn->alloc(totalNbOfMedConn+totalNbOfCells,1);
- int *connPtr=conn->getPointer();
- for(iter=medConnFrmt.begin();iter!=medConnFrmt.end();iter++)
- {
- INTERP_KERNEL::NormalizedCellType type=(*iter).getType();
- const int *sourceConn=(*iter).getArray();
- const int *sourceIndex=(*iter).getIndex();
- const INTERP_KERNEL::CellModel& cellMod=INTERP_KERNEL::CellModel::getCellModel(type);
- int nbOfCellsInCurType;
- int nbOfNodesIn1Cell=cellMod.getNumberOfNodes();
- nbOfCellsInCurType=(*iter).getLength();
- bool isDyn=cellMod.isDynamic();
- int *tmpConnPtr;
- for(int i=0;i<nbOfCellsInCurType;i++)
- {
- if(keepAll)
- {
- *connIdxPtr=connFillId;
- *connPtr++=type;
- if(!isDyn)
- tmpConnPtr=std::transform(sourceConn,sourceConn+nbOfNodesIn1Cell,connPtr,std::bind2nd(std::minus<int>(),1));
- else
- tmpConnPtr=std::transform(sourceConn,sourceConn+sourceIndex[i+1]-sourceIndex[i],connPtr,std::bind2nd(std::minus<int>(),1));
- connIdxPtr++;
- nbOfNodesIn1Cell=tmpConnPtr-connPtr;
- connFillId+=nbOfNodesIn1Cell+1;
- connPtr=tmpConnPtr;
- }
- else if(std::find(familiesToKeep.begin(),familiesToKeep.end(),(*iter).getFam()[i])!=familiesToKeep.end())
- {
- *connIdxPtr=connFillId;
- *connPtr++=type;
- if(!isDyn)
- tmpConnPtr=std::transform(sourceConn,sourceConn+nbOfNodesIn1Cell,connPtr,std::bind2nd(std::minus<int>(),1));
- else
- tmpConnPtr=std::transform(sourceConn,sourceConn+sourceIndex[i+1]-sourceIndex[i],connPtr,std::bind2nd(std::minus<int>(),1));
- connIdxPtr++;
- nbOfNodesIn1Cell=tmpConnPtr-connPtr;
- connFillId+=nbOfNodesIn1Cell+1;
- connPtr=tmpConnPtr;
- }
- sourceConn+=nbOfNodesIn1Cell;
- }
- *connIdxPtr=connFillId;
- }
- }
-
- template<class T>
- void releaseMEDFileCoreFrmt(typename std::list<T>& medConnFrmt)
- {
- for(typename std::list<T>::iterator iter=medConnFrmt.begin();iter!=medConnFrmt.end();iter++)
- (*iter).releaseArray();
- medConnFrmt.clear();
- }
-
- /*!
- * This method builds a sub set of connectivity for a given type 'type'.
- * @param conn input containing connectivity with MEDCoupling format.
- * @param connIndex input containing connectivity index in MEDCoupling format.
- * @param type input specifying which cell types will be extracted in conn4MEDFile.
- * @param conn4MEDFile output containing the connectivity directly understandable by MEDFile; conn4MEDFile has to be empty before this method called.
- * @param connIndex4MEDFile output containing index connectivity understandable by MEDFile; only used by polygons and polyhedrons (it is face nodal connec).
- * @param connIndexRk24MEDFile output containing index of rank 2 understandable by MEDFile; only used by polyhedrons.
- * @return nb of elements extracted.
- */
- int buildMEDSubConnectivityOfOneTypeStaticTypes(DataArrayInt *conn, DataArrayInt *connIndex, INTERP_KERNEL::NormalizedCellType type, std::vector<int>& conn4MEDFile)
- {
- int ret=0;
- int nbOfElem=connIndex->getNbOfElems()-1;
- const int *connPtr=conn->getPointer();
- const int *connIdxPtr=connIndex->getPointer();
- for(int i=0;i<nbOfElem;i++)
- {
- int delta=connIdxPtr[1]-connIdxPtr[0];
- if(*connPtr==type)
- {
- conn4MEDFile.insert(conn4MEDFile.end(),connPtr+1,connPtr+delta);
- ret++;
- }
- connIdxPtr++;
- connPtr+=delta;
- }
- std::transform(conn4MEDFile.begin(),conn4MEDFile.end(),conn4MEDFile.begin(),std::bind2nd(std::plus<int>(),1));
- return ret;
- }
-
- int buildMEDSubConnectivityOfOneTypesPolyg(DataArrayInt *conn, DataArrayInt *connIndex, std::vector<int>& conn4MEDFile, std::vector<int>& connIndex4MEDFile)
- {
- int ret=0;
- int nbOfElem=connIndex->getNbOfElems()-1;
- const int *connPtr=conn->getPointer();
- const int *connIdxPtr=connIndex->getPointer();
- connIndex4MEDFile.push_back(1);
- for(int i=0;i<nbOfElem;i++)
- {
- int delta=connIdxPtr[1]-connIdxPtr[0];
- if(*connPtr==INTERP_KERNEL::NORM_POLYGON)
- {
- conn4MEDFile.insert(conn4MEDFile.end(),connPtr+1,connPtr+delta);
- connIndex4MEDFile.push_back(connIndex4MEDFile.back()+delta-1);
- ret++;
- }
- connIdxPtr++;
- connPtr+=delta;
- }
- std::transform(conn4MEDFile.begin(),conn4MEDFile.end(),conn4MEDFile.begin(),std::bind2nd(std::plus<int>(),1));
- return ret;
- }
-
- int buildMEDSubConnectivityOfOneTypesPolyh(DataArrayInt *conn, DataArrayInt *connIndex, std::vector<int>& conn4MEDFile, std::vector<int>& connIndex4MEDFile, std::vector<int>& connIndexRk24MEDFile)
- {
- return 0;
- }
-
- /*!
- * This method builds a sub set of connectivity for a given type 'type'.
- * @param conn input containing connectivity with MEDCoupling format.
- * @param connIndex input containing connectivity index in MEDCoupling format.
- * @param type input specifying which cell types will be extracted in conn4MEDFile.
- * @param conn4MEDFile output containing the connectivity directly understandable by MEDFile; conn4MEDFile has to be empty before this method called.
- * @param connIndex4MEDFile output containing index connectivity understandable by MEDFile; only used by polygons and polyhedrons (it is face nodal connec).
- * @param connIndexRk24MEDFile output containing index of rank 2 understandable by MEDFile; only used by polyhedrons.
- * @return nb of elements extracted.
- */
- int buildMEDSubConnectivityOfOneType(DataArrayInt *conn, DataArrayInt *connIndex, INTERP_KERNEL::NormalizedCellType type, std::vector<int>& conn4MEDFile,
- std::vector<int>& connIndex4MEDFile, std::vector<int>& connIndexRk24MEDFile)
- {
-
- const INTERP_KERNEL::CellModel& cellMod=INTERP_KERNEL::CellModel::getCellModel(type);
- if(!cellMod.isDynamic())
- return buildMEDSubConnectivityOfOneTypeStaticTypes(conn,connIndex,type,conn4MEDFile);
- else
- {
- if(type==INTERP_KERNEL::NORM_POLYGON)
- return buildMEDSubConnectivityOfOneTypesPolyg(conn,connIndex,conn4MEDFile,connIndex4MEDFile);
- else
- return buildMEDSubConnectivityOfOneTypesPolyh(conn,connIndex,conn4MEDFile,connIndex4MEDFile,connIndexRk24MEDFile);
- }
- }
-
- /*!
- * @param ids is a in vector containing families ids whose cells have to be kept. If empty all cells are kept.
- * @param typesToKeep is a in vector that indicates which types to keep after dimension filtering.
- * @param meshDimExtract out parameter that gives the mesh dimension.
- */
- MEDCouplingUMesh *readUMeshFromFileLev1(const char *fileName, const char *meshName, int meshDimRelToMax, const std::vector<int>& ids,
- const std::vector<INTERP_KERNEL::NormalizedCellType>& typesToKeep, unsigned& meshDimExtract) throw(INTERP_KERNEL::Exception)
- {
- //Extraction data from MED file.
- med_idt fid=MEDouvrir((char *)fileName,MED_LECTURE);
- med_int mid=getIdFromMeshName(fid,meshName);
- double *coords;
- int nCoords;
- int spaceDim;
- std::list<MEDLoader::MEDConnOfOneElemType> conn;
- readUMeshDataInMedFile(fid,mid,coords,nCoords,spaceDim,conn);
- meshDimExtract=calculateHighestMeshDim<MEDConnOfOneElemType>(conn);
- meshDimExtract=meshDimExtract+meshDimRelToMax;
- keepSpecifiedMeshDim<MEDConnOfOneElemType>(conn,meshDimExtract);
- keepTypes<MEDConnOfOneElemType>(conn,typesToKeep);
- MEDfermer(fid);
- //Put data in returned data structure.
- MEDCouplingUMesh *ret=MEDCouplingUMesh::New();
- ret->setName(meshName);
- ret->setMeshDimension(meshDimExtract);
- //
- DataArrayDouble *coordsArr=DataArrayDouble::New();
- coordsArr->useArray(coords,true,ParaMEDMEM::CPP_DEALLOC,nCoords,spaceDim);
- ret->setCoords(coordsArr);
- coordsArr->decrRef();
- //
- DataArrayInt *connArr,*connIndexArr;
- tradMEDFileCoreFrmt2MEDCouplingUMesh(conn,connArr,connIndexArr,ids);
- ret->setConnectivity(connArr,connIndexArr);
- //clean-up
- if(connArr)
- connArr->decrRef();
- if(connIndexArr)
- connIndexArr->decrRef();
- releaseMEDFileCoreFrmt<MEDLoader::MEDConnOfOneElemType>(conn);
- return ret;
- }
-
- ParaMEDMEM::MEDCouplingFieldDouble *readFieldDoubleLev1(const char *fileName, const char *meshName, int meshDimRelToMax, const char *fieldName, int iteration, int order,
- ParaMEDMEM::TypeOfField typeOfOutField)
- {
- std::list<MEDLoader::MEDFieldDoublePerCellType> fieldPerCellType;
- double time;
- readFieldDoubleDataInMedFile(fileName,meshName,fieldName,fieldPerCellType,iteration,order,typeOfOutField,time);
- std::vector<int> familiesToKeep;
- std::vector<INTERP_KERNEL::NormalizedCellType> typesToKeep;
- if(typeOfOutField==ON_CELLS)
- for(std::list<MEDLoader::MEDFieldDoublePerCellType>::const_iterator iter=fieldPerCellType.begin();iter!=fieldPerCellType.end();iter++)
- typesToKeep.push_back((*iter).getType());
- unsigned meshDim;
- ParaMEDMEM::MEDCouplingUMesh *mesh=readUMeshFromFileLev1(fileName,meshName,meshDimRelToMax,familiesToKeep,typesToKeep,meshDim);
- if(typeOfOutField==ON_CELLS)
- keepSpecifiedMeshDim<MEDFieldDoublePerCellType>(fieldPerCellType,meshDim);
- ParaMEDMEM::MEDCouplingFieldDouble *ret=ParaMEDMEM::MEDCouplingFieldDouble::New(typeOfOutField,ONE_TIME);
- ret->setName(fieldName);
- ret->setTime(time,iteration,order);
- ret->setMesh(mesh);
- mesh->decrRef();
- ParaMEDMEM::DataArrayDouble *arr=buildArrayFromRawData(fieldPerCellType);
- ret->setArray(arr);
- arr->decrRef();
- releaseMEDFileCoreFrmt<MEDLoader::MEDFieldDoublePerCellType>(fieldPerCellType);
- return ret;
- }
-
- /*!
- * This method builds the master file 'fileName' of a parallel MED file defined in 'fileNames'.
- */
- void writeMasterFile(const char *fileName, const std::vector<std::string>& fileNames, const char *meshName)
- {
- int nbOfDom=fileNames.size();
- std::ofstream fs(fileName);
- fs << "#MED Fichier V 2.3" << " " << std::endl;
- fs << "#"<<" " << std::endl;
- fs << nbOfDom <<" " << std::endl;
- for(int i=0;i<nbOfDom;i++)
- fs << meshName << " " << i+1 << " " << meshName << "_" << i+1 << " localhost " << fileNames[i] << " " << std::endl;
- }
-}
-
-MEDCouplingUMesh *MEDLoader::ReadUMeshFromFile(const char *fileName, const char *meshName, int meshDimRelToMax) throw(INTERP_KERNEL::Exception)
-{
- std::vector<int> familiesToKeep;
- std::vector<INTERP_KERNEL::NormalizedCellType> typesToKeep;
- unsigned meshDim;
- return readUMeshFromFileLev1(fileName,meshName,meshDimRelToMax,familiesToKeep,typesToKeep,meshDim);
-}
-
-ParaMEDMEM::MEDCouplingUMesh *MEDLoader::ReadUMeshFromFamilies(const char *fileName, const char *meshName, int meshDimRelToMax, const std::vector<std::string>& fams)
-{
- std::vector<int> familiesToKeep=getIdsFromFamilies(fileName,meshName,fams);
- std::vector<INTERP_KERNEL::NormalizedCellType> typesToKeep;
- unsigned meshDim;
- return readUMeshFromFileLev1(fileName,meshName,meshDimRelToMax,familiesToKeep,typesToKeep,meshDim);
-}
-
-ParaMEDMEM::MEDCouplingUMesh *MEDLoader::ReadUMeshFromGroups(const char *fileName, const char *meshName, int meshDimRelToMax, const std::vector<std::string>& grps)
-{
- std::vector<int> familiesToKeep=getIdsFromGroups(fileName,meshName,grps);
- std::vector<INTERP_KERNEL::NormalizedCellType> typesToKeep;
- unsigned meshDim;
- return readUMeshFromFileLev1(fileName,meshName,meshDimRelToMax,familiesToKeep,typesToKeep,meshDim);
-}
-
-ParaMEDMEM::MEDCouplingFieldDouble *MEDLoader::ReadFieldDoubleCell(const char *fileName, const char *meshName, int meshDimRelToMax, const char *fieldName, int iteration, int order)
-{
- return readFieldDoubleLev1(fileName,meshName,meshDimRelToMax,fieldName,iteration,order,ON_CELLS);
-}
-
-ParaMEDMEM::MEDCouplingFieldDouble *MEDLoader::ReadFieldDoubleNode(const char *fileName, const char *meshName, int meshDimRelToMax, const char *fieldName, int iteration, int order)
-{
- return readFieldDoubleLev1(fileName,meshName,meshDimRelToMax,fieldName,iteration,order,ON_NODES);
-}
-
-void MEDLoader::writeUMesh(const char *fileName, ParaMEDMEM::MEDCouplingUMesh *mesh)
-{
- med_idt fid=MEDouvrir((char *)fileName,MED_CREATION);
- std::string meshName(mesh->getName());
- if(meshName=="")
- {
- MEDfermer(fid);
- throw INTERP_KERNEL::Exception("MEDCouplingMesh must have a not null name !");
- }
- char maa[MED_TAILLE_NOM+1];
- strcpy(maa,meshName.c_str());
- MEDmaaCr(fid,maa,mesh->getSpaceDimension(),MED_NON_STRUCTURE,maa);
- std::set<INTERP_KERNEL::NormalizedCellType> allTypes(mesh->getAllTypes());
- DataArrayInt *conn=mesh->getNodalConnectivity();
- DataArrayInt *connIndex=mesh->getNodalConnectivityIndex();
- char familyName[MED_TAILLE_NOM+1];
- std::fill(familyName,familyName+MED_TAILLE_NOM+1,'\0');
- const char DftFamilyName[]="DftFamily";
- std::copy(DftFamilyName,DftFamilyName+sizeof(DftFamilyName),familyName);
- for(int i=0;i<MED_NBR_GEOMETRIE_MAILLE+2;i++)
- {
- med_geometrie_element curMedType=typmai[i];
- INTERP_KERNEL::NormalizedCellType curType=typmai2[i];
- if(allTypes.find(curType)!=allTypes.end())
- {
- std::vector<int> medConn;
- std::vector<int> medConnIndex;
- std::vector<int> medConnIndex2;
- int nbOfElt=buildMEDSubConnectivityOfOneType(conn,connIndex,curType,medConn,medConnIndex,medConnIndex2);
- if(curMedType!=MED_POLYGONE && curMedType!=MED_POLYEDRE)
- MEDconnEcr(fid,maa,mesh->getMeshDimension(),&medConn[0],MED_FULL_INTERLACE,nbOfElt,MED_MAILLE,curMedType,MED_NOD);
- else
- {
- if(curMedType==MED_POLYGONE)
- MEDpolygoneConnEcr(fid,maa,&medConnIndex[0],medConnIndex.size(),&medConn[0],MED_MAILLE,MED_NOD);
- }
- }
- }
- MEDfamCr(fid,maa,familyName,0,0,0,0,0,0,0);
- DataArrayDouble *arr=mesh->getCoords();
- char comp[2*MED_TAILLE_PNOM+1];
- char unit[2*MED_TAILLE_PNOM+1];
- std::fill(comp,comp+2*MED_TAILLE_PNOM,' ');
- comp[2*MED_TAILLE_PNOM]='\0';
- char *work=comp;
- for(int i=0;i<mesh->getSpaceDimension();i++,work+=3)
- *work='X'+i;
- std::fill(unit,unit+2*MED_TAILLE_PNOM+1,'\0');
- MEDcoordEcr(fid,maa,mesh->getSpaceDimension(),arr->getPointer(),MED_FULL_INTERLACE,mesh->getNumberOfNodes(),MED_CART,comp,unit);
- MEDfermer(fid);
-}
-
-void MEDLoader::writeParaMesh(const char *fileName, ParaMEDMEM::ParaMESH *mesh)
-{
- if(!mesh->getBlockTopology()->getProcGroup()->containsMyRank())
- return ;
- int myRank=mesh->getBlockTopology()->getProcGroup()->myRank();
- int nbDomains=mesh->getBlockTopology()->getProcGroup()->size();
- std::vector<std::string> fileNames(nbDomains);
- for(int i=0;i<nbDomains;i++)
- {
- std::ostringstream sstr;
- sstr << fileName << i+1 << ".med";
- fileNames[i]=sstr.str();
- }
- if(myRank==0)
- writeMasterFile(fileName,fileNames,mesh->getCellMesh()->getName());
- writeUMesh(fileNames[myRank].c_str(),dynamic_cast<MEDCouplingUMesh *>(mesh->getCellMesh()));
-}
-
-void MEDLoader::writeParaField(const char *fileName, const char *meshName, ParaMEDMEM::ParaFIELD *f)
-{
-}
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#ifndef __MEDLOADER_HXX__
-#define __MEDLOADER_HXX__
-
-#include "InterpKernelException.hxx"
-#include "NormalizedUnstructuredMesh.hxx"
-
-#include <list>
-#include <vector>
-
-namespace ParaMEDMEM
-{
- class ParaMESH;
- class ParaFIELD;
- class DataArrayInt;
- class MEDCouplingUMesh;
- class MEDCouplingFieldDouble;
-}
-
-namespace MEDLoader
-{
- class MEDConnOfOneElemType
- {
- public:
- MEDConnOfOneElemType(INTERP_KERNEL::NormalizedCellType type, int *conn, int *index, int *fam, int lgth, int connLgth);
- INTERP_KERNEL::NormalizedCellType getType() const { return _type; }
- int getLength() const { return _lgth; }
- int getConnLength() const { return _conn_lgth; }
- int *getArray() const { return _conn; }
- int *getIndex() const { return _index; }
- int *getFam() const { return _fam; }
- void setGlobal(int *global);
- void releaseArray();
- private:
- int _lgth;
- int *_fam;
- int *_conn;
- int *_index;
- int *_global;
- int _conn_lgth;
- INTERP_KERNEL::NormalizedCellType _type;
- };
-
- class MEDFieldDoublePerCellType
- {
- public:
- MEDFieldDoublePerCellType(INTERP_KERNEL::NormalizedCellType type, double *values, int ncomp, int nval);
- INTERP_KERNEL::NormalizedCellType getType() const { return _type; }
- int getNbComp() const { return _ncomp; }
- int getNbOfTuple() const { return _nval; }
- int getNbOfValues() const { return _ncomp*_nval; }
- double *getArray() const { return _values; }
- void releaseArray();
- private:
- int _nval;
- int _ncomp;
- double *_values;
- INTERP_KERNEL::NormalizedCellType _type;
- };
- //
- std::vector<std::string> GetMeshNames(const char *fileName);
- std::vector<std::string> GetMeshGroupsNames(const char *fileName, const char *meshName);
- std::vector<std::string> GetMeshFamilyNames(const char *fileName, const char *meshName);
- std::vector<std::string> GetCellFieldNamesOnMesh(const char *fileName, const char *meshName);
- std::vector<std::string> GetNodeFieldNamesOnMesh(const char *fileName, const char *meshName);
- std::vector< std::pair<int,int> > GetCellFieldIterations(const char *fileName, const char *fieldName);
- std::vector< std::pair<int,int> > GetNodeFieldIterations(const char *fileName, const char *fieldName);
- ParaMEDMEM::MEDCouplingUMesh *ReadUMeshFromFamilies(const char *fileName, const char *meshName, int meshDimRelToMax, const std::vector<std::string>& fams);
- ParaMEDMEM::MEDCouplingUMesh *ReadUMeshFromGroups(const char *fileName, const char *meshName, int meshDimRelToMax, const std::vector<std::string>& grps);
- ParaMEDMEM::MEDCouplingUMesh *ReadUMeshFromFile(const char *fileName, const char *meshName=0, int meshDimRelToMax=0) throw(INTERP_KERNEL::Exception);
- ParaMEDMEM::MEDCouplingFieldDouble *ReadFieldDoubleCell(const char *fileName, const char *meshName, int meshDimRelToMax, const char *fieldName, int iteration, int order);
- ParaMEDMEM::MEDCouplingFieldDouble *ReadFieldDoubleNode(const char *fileName, const char *meshName, int meshDimRelToMax, const char *fieldName, int iteration, int order);
- void writeUMesh(const char *fileName, ParaMEDMEM::MEDCouplingUMesh *mesh);
- void writeParaMesh(const char *fileName, ParaMEDMEM::ParaMESH *mesh);
- void writeParaField(const char *fileName, const char *meshName, ParaMEDMEM::ParaFIELD *f);
-}
-
-#endif
+++ /dev/null
-# Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-#
-include $(top_srcdir)/adm_local/unix/make_common_starter.am
-
-lib_LTLIBRARIES= libparamedmemmedloader.la
-
-salomeinclude_HEADERS= \
-MEDLoader.hxx
-
-dist_libparamedmemmedloader_la_SOURCES= \
-MEDLoader.cxx
-
-#libmedmem_la_LDFLAGS= -L$(top_builddir)/lib@LIB_LOCATION_SUFFIX@/salome
-libparamedmemmedloader_la_CPPFLAGS= $(MPI_INCLUDES) $(MED2_INCLUDES) $(HDF5_INCLUDES) @CXXTMPDPTHFLAGS@ \
- -I$(srcdir)/../../INTERP_KERNEL \
- -I$(srcdir)/../../INTERP_KERNEL/Geometric2D \
- -I$(srcdir)/../../INTERP_KERNEL/Bases \
- -I$(srcdir)/../../MEDCoupling \
- -I$(srcdir)/../
-
-# change motivated by the bug KERNEL4778.
-libparamedmemmedloader_la_LDFLAGS= ../../MEDCoupling/libmedcoupling.la \
-../../INTERP_KERNEL/libinterpkernel.la $(MPI_LIBS) $(MED2_LIBS) $(HDF5_LIBS)
#
include $(top_srcdir)/adm_local/unix/make_common_starter.am
-###
-# MEDLoader and Test sub-folders are available only if MED2 is detected ok
-###
-
-if MED_ENABLE_MED2
- SUBDIRS = . MEDLoader
-
-if CPPUNIT_IS_OK
- SUBDIRS += Test
-endif
-
-endif
-
-DIST_SUBDIRS = MEDLoader Test
-
lib_LTLIBRARIES= libparamedmem.la
salomeinclude_HEADERS= \
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-
-#include <sstream>
-#include <cmath>
-
-using namespace std;
-
-
-
-/*!
- * Tool to remove temporary files.
- * Allows automatique removal of temporary files in case of test failure.
- */
-MPIAccessDECTest_TmpFilesRemover::~MPIAccessDECTest_TmpFilesRemover()
-{
- set<string>::iterator it = myTmpFiles.begin();
- for (; it != myTmpFiles.end(); it++) {
- if (access((*it).data(), F_OK) == 0)
- remove((*it).data());
- }
- myTmpFiles.clear();
- //cout << "~MPIAccessTest_TmpFilesRemover()" << endl;
-}
-
-bool MPIAccessDECTest_TmpFilesRemover::Register(const string theTmpFile)
-{
- return (myTmpFiles.insert(theTmpFile)).second;
-}
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#ifndef _MPIACCESSDECTEST_HXX_
-#define _MPIACCESSDECTEST_HXX_
-
-#include <cppunit/extensions/HelperMacros.h>
-
-#include <set>
-#include <string>
-#include <iostream>
-#include "mpi.h"
-
-
-class MPIAccessDECTest : public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE( MPIAccessDECTest );
- // CPPUNIT_TEST( test_AllToAllDECSynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllDECAsynchronousPointToPoint ) ;
- //CPPUNIT_TEST( test_AllToAllvDECSynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllvDECAsynchronousPointToPoint ) ;
- //CPPUNIT_TEST( test_AllToAllTimeDECSynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllTimeDECAsynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllvTimeDECSynchronousNative ) ;
- //CPPUNIT_TEST( test_AllToAllvTimeDECSynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllvTimeDECAsynchronousPointToPoint ) ;
- //CPPUNIT_TEST( test_AllToAllvTimeDoubleDECSynchronousPointToPoint ) ;
- CPPUNIT_TEST( test_AllToAllvTimeDoubleDECAsynchronousPointToPoint ) ;
- CPPUNIT_TEST_SUITE_END();
-
-
-public:
-
- MPIAccessDECTest():CppUnit::TestFixture(){}
- ~MPIAccessDECTest(){}
- void setUp(){}
- void tearDown(){}
- void test_AllToAllDECSynchronousPointToPoint() ;
- void test_AllToAllDECAsynchronousPointToPoint() ;
- void test_AllToAllvDECSynchronousPointToPoint() ;
- void test_AllToAllvDECAsynchronousPointToPoint() ;
- void test_AllToAllTimeDECSynchronousPointToPoint() ;
- void test_AllToAllTimeDECAsynchronousPointToPoint() ;
- void test_AllToAllvTimeDECSynchronousNative() ;
- void test_AllToAllvTimeDECSynchronousPointToPoint() ;
- void test_AllToAllvTimeDECAsynchronousPointToPoint() ;
- void test_AllToAllvTimeDoubleDECSynchronousPointToPoint() ;
- void test_AllToAllvTimeDoubleDECAsynchronousPointToPoint() ;
-
-private:
- void test_AllToAllDEC( bool Asynchronous ) ;
- void test_AllToAllvDEC( bool Asynchronous ) ;
- void test_AllToAllTimeDEC( bool Asynchronous ) ;
- void test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINative ) ;
- void test_AllToAllvTimeDoubleDEC( bool Asynchronous ) ;
- };
-
-// to automatically remove temporary files from disk
-class MPIAccessDECTest_TmpFilesRemover
-{
-public:
- MPIAccessDECTest_TmpFilesRemover() {}
- ~MPIAccessDECTest_TmpFilesRemover();
- bool Register(const std::string theTmpFile);
-
-private:
- std::set<std::string> myTmpFiles;
-};
-
-/*!
- * Tool to print array to stream.
- */
-template<class T>
-void MPIAccessDECTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text)
-{
- stream << text << ": {";
- if (length > 0) {
- stream << array[0];
- for (int i = 1; i < length; i++) {
- stream << ", " << array[i];
- }
- }
- stream << "}" << std::endl;
-};
-
-#endif
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-#include <sstream>
-#include <cmath>
-
-using namespace std;
-
-
-
-/*!
- * Tool to remove temporary files.
- * Allows automatique removal of temporary files in case of test failure.
- */
-MPIAccessTest_TmpFilesRemover::~MPIAccessTest_TmpFilesRemover()
-{
- set<string>::iterator it = myTmpFiles.begin();
- for (; it != myTmpFiles.end(); it++) {
- if (access((*it).data(), F_OK) == 0)
- remove((*it).data());
- }
- myTmpFiles.clear();
- //cout << "~MPIAccessTest_TmpFilesRemover()" << endl;
-}
-
-bool MPIAccessTest_TmpFilesRemover::Register(const string theTmpFile)
-{
- return (myTmpFiles.insert(theTmpFile)).second;
-}
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#ifndef _MPIACCESSTEST_HXX_
-#define _MPIACCESSTEST_HXX_
-
-#include <cppunit/extensions/HelperMacros.h>
-
-#include <set>
-#include <string>
-#include <iostream>
-#include "mpi.h"
-
-
-class MPIAccessTest : public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE( MPIAccessTest );
- CPPUNIT_TEST( test_MPI_Access_Send_Recv ) ;
- CPPUNIT_TEST( test_MPI_Access_Cyclic_Send_Recv ) ;
- CPPUNIT_TEST( test_MPI_Access_SendRecv ) ;
- CPPUNIT_TEST( test_MPI_Access_ISend_IRecv ) ;
- CPPUNIT_TEST( test_MPI_Access_Cyclic_ISend_IRecv ) ;
- CPPUNIT_TEST( test_MPI_Access_ISendRecv ) ;
- CPPUNIT_TEST( test_MPI_Access_Probe ) ;
- CPPUNIT_TEST( test_MPI_Access_IProbe ) ;
- CPPUNIT_TEST( test_MPI_Access_Cancel ) ;
- CPPUNIT_TEST( test_MPI_Access_Send_Recv_Length ) ;
- CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_Length ) ;
- CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_Length_1 ) ;
- CPPUNIT_TEST( test_MPI_Access_Time ) ;
- CPPUNIT_TEST( test_MPI_Access_Time_0 ) ;
- CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_BottleNeck ) ;
- CPPUNIT_TEST_SUITE_END();
-
-
-public:
-
- MPIAccessTest():CppUnit::TestFixture(){}
- ~MPIAccessTest(){}
- void setUp(){}
- void tearDown(){}
- void test_MPI_Access_Send_Recv() ;
- void test_MPI_Access_Cyclic_Send_Recv() ;
- void test_MPI_Access_SendRecv() ;
- void test_MPI_Access_ISend_IRecv() ;
- void test_MPI_Access_Cyclic_ISend_IRecv() ;
- void test_MPI_Access_ISendRecv() ;
- void test_MPI_Access_Probe() ;
- void test_MPI_Access_IProbe() ;
- void test_MPI_Access_Cancel() ;
- void test_MPI_Access_Send_Recv_Length() ;
- void test_MPI_Access_ISend_IRecv_Length() ;
- void test_MPI_Access_ISend_IRecv_Length_1() ;
- void test_MPI_Access_Time() ;
- void test_MPI_Access_Time_0() ;
- void test_MPI_Access_ISend_IRecv_BottleNeck() ;
-
-private:
- };
-
-// to automatically remove temporary files from disk
-class MPIAccessTest_TmpFilesRemover
-{
-public:
- MPIAccessTest_TmpFilesRemover() {}
- ~MPIAccessTest_TmpFilesRemover();
- bool Register(const std::string theTmpFile);
-
-private:
- std::set<std::string> myTmpFiles;
-};
-
-/*!
- * Tool to print array to stream.
- */
-template<class T>
-void MPIAccessTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text)
-{
- stream << text << ": {";
- if (length > 0) {
- stream << array[0];
- for (int i = 1; i < length; i++) {
- stream << ", " << array[i];
- }
- }
- stream << "}" << std::endl;
-};
-
-#endif
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#ifndef _MPIMAINTEST_HXX_
-#define _MPIMAINTEST_HXX_
-
-#include <cppunit/CompilerOutputter.h>
-#include <cppunit/TestResult.h>
-#include <cppunit/TestResultCollector.h>
-#include <cppunit/TextTestProgressListener.h>
-#include <cppunit/BriefTestProgressListener.h>
-#include <cppunit/extensions/TestFactoryRegistry.h>
-#include <cppunit/TestRunner.h>
-#include <stdexcept>
-
-#include <mpi.h>
-
-#include <iostream>
-#include <fstream>
-#ifndef WIN32
-#include <fpu_control.h>
-#endif
-
-// ============================================================================
-/*!
- * Main program source for Unit Tests with cppunit package does not depend
- * on actual tests, so we use the same for all partial unit tests.
- */
-// ============================================================================
-
-int main(int argc, char* argv[])
-{
-#ifndef WIN32
- fpu_control_t cw = _FPU_DEFAULT & ~(_FPU_MASK_IM | _FPU_MASK_ZM | _FPU_MASK_OM);
- _FPU_SETCW(cw);
-#endif
- MPI_Init(&argc,&argv);
- int rank;
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
-
- // --- Create the event manager and test controller
- CPPUNIT_NS::TestResult controller;
-
- // --- Add a listener that colllects test result
- CPPUNIT_NS::TestResultCollector result;
- controller.addListener( &result );
-
- // --- Add a listener that print dots as test run.
-#ifdef WIN32
- CPPUNIT_NS::TextTestProgressListener progress;
-#else
- CPPUNIT_NS::BriefTestProgressListener progress;
-#endif
- controller.addListener( &progress );
-
- // --- Get the top level suite from the registry
-
- CPPUNIT_NS::Test *suite =
- CPPUNIT_NS::TestFactoryRegistry::getRegistry().makeTest();
-
- // --- Adds the test to the list of test to run
-
- CPPUNIT_NS::TestRunner runner;
- runner.addTest( suite );
- runner.run( controller);
-
- // --- Print test in a compiler compatible format.
-
- std::ostringstream testFileName;
- testFileName<<"UnitTestResult"<<rank;
- std::ofstream testFile;
- testFile.open(testFileName.str().c_str(), std::ios::out | std::ios::trunc);
- //CPPUNIT_NS::CompilerOutputter outputter( &result, std::cerr );
- CPPUNIT_NS::CompilerOutputter outputter( &result, testFile );
- outputter.write();
-
- // --- Run the tests.
-
- bool wasSucessful = result.wasSuccessful();
- testFile.close();
-
- // --- Return error code 1 if the one of test failed.
-
- MPI_Finalize();
-
- return wasSucessful ? 0 : 1;
-}
-
-#endif
+++ /dev/null
-# Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-#
-
-include $(top_srcdir)/adm_local/unix/make_common_starter.am
-
-lib_LTLIBRARIES = libParaMEDMEMTest.la
-
-salomeinclude_HEADERS = \
- MPIMainTest.hxx \
- MPIAccessDECTest.hxx \
- MPIAccessTest.hxx \
- ParaMEDMEMTest.hxx
-
-EXTRA_DIST += MPIMainTest.hxx ParaMEDMEMTest_NonCoincidentDEC.cxx
-
-dist_libParaMEDMEMTest_la_SOURCES = \
- ParaMEDMEMTest.cxx \
- ParaMEDMEMTest_MPIProcessorGroup.cxx \
- ParaMEDMEMTest_BlockTopology.cxx \
- ParaMEDMEMTest_InterpKernelDEC.cxx \
- ParaMEDMEMTest_StructuredCoincidentDEC.cxx \
- ParaMEDMEMTest_MEDLoader.cxx \
- ParaMEDMEMTest_ICocoTrio.cxx \
- ParaMEDMEMTest_Gauthier1.cxx \
- MPIAccessDECTest.cxx \
- test_AllToAllDEC.cxx \
- test_AllToAllvDEC.cxx \
- test_AllToAllTimeDEC.cxx \
- test_AllToAllvTimeDEC.cxx \
- test_AllToAllvTimeDoubleDEC.cxx \
- MPIAccessTest.cxx \
- test_MPI_Access_Send_Recv.cxx \
- test_MPI_Access_Cyclic_Send_Recv.cxx \
- test_MPI_Access_SendRecv.cxx \
- test_MPI_Access_ISend_IRecv.cxx \
- test_MPI_Access_Cyclic_ISend_IRecv.cxx \
- test_MPI_Access_ISendRecv.cxx \
- test_MPI_Access_Probe.cxx \
- test_MPI_Access_IProbe.cxx \
- test_MPI_Access_Cancel.cxx \
- test_MPI_Access_Send_Recv_Length.cxx \
- test_MPI_Access_ISend_IRecv_Length.cxx \
- test_MPI_Access_ISend_IRecv_Length_1.cxx \
- test_MPI_Access_Time.cxx \
- test_MPI_Access_Time_0.cxx \
- test_MPI_Access_ISend_IRecv_BottleNeck.cxx
-
-
-libParaMEDMEMTest_la_CPPFLAGS = \
- @CPPUNIT_INCLUDES@ \
- $(MPI_INCLUDES) \
- -I$(srcdir)/../../INTERP_KERNEL \
- -I$(srcdir)/../../INTERP_KERNEL/Bases \
- -I$(srcdir)/../../INTERP_KERNEL/Geometric2D \
- -I$(srcdir)/../../ParaMEDMEM \
- -I$(srcdir)/../../MEDCoupling \
- -I$(srcdir)/../MEDLoader
-
-libParaMEDMEMTest_la_LDFLAGS = \
- ../libparamedmem.la \
- ../MEDLoader/libparamedmemmedloader.la \
- @CPPUNIT_LIBS@ $(MPI_LIBS)
-
-# Executables targets
-bin_PROGRAMS = \
- TestParaMEDMEM \
- TestMPIAccessDEC \
- TestMPIAccess \
- test_perf
-
-dist_TestParaMEDMEM_SOURCES = TestParaMEDMEM.cxx
-dist_TestMPIAccessDEC_SOURCES = TestMPIAccessDEC.cxx
-dist_TestMPIAccess_SOURCES = TestMPIAccess.cxx
-dist_test_perf_SOURCES = test_perf.cxx
-
-LDADD = $(MED2_LIBS) $(libMEDMEMTest_la_LDFLAGS) -lm $(MPI_LIBS) \
- libParaMEDMEMTest.la \
- ../../INTERP_KERNEL/libinterpkernel.la
-
-if MED_ENABLE_FVM
- LDADD += $(FVM_LIBS)
- dist_libParaMEDMEMTest_la_SOURCES += ParaMEDMEMTest_NonCoincidentDEC.cxx
- libParaMEDMEMTest_la_CPPFLAGS += -DMED_ENABLE_FVM $(FVM_INCLUDES)
- libParaMEDMEMTest_la_LDFLAGS += $(FVM_LIBS)
-endif
-
-AM_CPPFLAGS += $(libParaMEDMEMTest_la_CPPFLAGS)
-
-UNIT_TEST_PROG = TestParaMEDMEM
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include "ParaMEDMEMTest.hxx"
-#include <cppunit/TestAssert.h>
-
-#include <sstream>
-#include <cmath>
-#include <list>
-#include <stdexcept>
-#include <stdlib.h>
-
-//================================================================================
-/*!
- * \brief Get path to the resources file.
- *
- * When running 'make test' source file is taken from MED_SRC/resources folder.
- * Otherwise, file is searched in ${MED_ROOT_DIR}/share/salome/resources/med folder.
- *
- * \param filename name of the resource file (should not include a path)
- * \return full path to the resource file
- */
-//================================================================================
-
-std::string ParaMEDMEMTest::getResourceFile( const std::string& filename )
-{
- std::string resourceFile = "";
-
- if ( getenv("top_srcdir") ) {
- // we are in 'make check' step
- resourceFile = getenv("top_srcdir");
- resourceFile += "/resources/";
- }
- else if ( getenv("MED_ROOT_DIR") ) {
- // use MED_ROOT_DIR env.var
- resourceFile = getenv("MED_ROOT_DIR");
- resourceFile += "/share/salome/resources/med/";
- }
- resourceFile += filename;
- return resourceFile;
-}
-
-
-//================================================================================
-/*!
- * \brief Returns writable temporary directory
- * \return full path to the temporary directory
- */
-//================================================================================
-
-std::string ParaMEDMEMTest::getTmpDirectory()
-{
- std::string path;
-
- std::list<std::string> dirs;
- if ( getenv("TMP") ) dirs.push_back( getenv("TMP" ));
- if ( getenv("TMPDIR") ) dirs.push_back( getenv("TMPDIR" ));
- dirs.push_back( "/tmp" );
-
- std::string tmpd = "";
- for ( std::list<std::string>::iterator dir = dirs.begin(); dir != dirs.end() && tmpd == "" ; ++dir ) {
- if ( access( dir->data(), W_OK ) == 0 ) {
- tmpd = dir->data();
- }
- }
-
- if ( tmpd == "" )
- throw std::runtime_error("Can't find writable temporary directory. Set TMP environment variable");
-
- return tmpd;
-}
-
-//================================================================================
-/*!
- * \brief Creates a copy of source file (if source file is specified)
- * in the temporary directory and returns a path to the tmp file
- *
- * \param tmpfile name of the temporary file (without path)
- * \param srcfile source file
- * \return path to the temporary file
- */
-//================================================================================
-std::string ParaMEDMEMTest::makeTmpFile( const std::string& tmpfile, const std::string& srcfile )
-{
- std::string tmpf = getTmpDirectory() + "/" + tmpfile;
- if ( srcfile != "" ) {
- std::string cmd = "cp " + srcfile + " " + tmpf + " ; chmod +w " + tmpf;
- system( cmd.c_str() );
- }
- return tmpf;
-}
-
-
-/*!
- * Tool to remove temporary files.
- * Allows automatique removal of temporary files in case of test failure.
- */
-ParaMEDMEMTest_TmpFilesRemover::~ParaMEDMEMTest_TmpFilesRemover()
-{
- std::set<std::string>::iterator it = myTmpFiles.begin();
- for (; it != myTmpFiles.end(); it++) {
- if (access((*it).data(), F_OK) == 0)
- remove((*it).data());
- }
- myTmpFiles.clear();
- //cout << "~ParaMEDMEMTest_TmpFilesRemover()" << endl;
-}
-
-bool ParaMEDMEMTest_TmpFilesRemover::Register(const std::string theTmpFile)
-{
- return (myTmpFiles.insert(theTmpFile)).second;
-}
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#ifndef _ParaMEDMEMTEST_HXX_
-#define _ParaMEDMEMTEST_HXX_
-
-#include <cppunit/extensions/HelperMacros.h>
-
-#include <set>
-#include <string>
-#include <iostream>
-#include "mpi.h"
-
-
-class ParaMEDMEMTest : public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE( ParaMEDMEMTest );
- CPPUNIT_TEST(testMPIProcessorGroup_constructor);
- CPPUNIT_TEST(testMPIProcessorGroup_boolean);
- CPPUNIT_TEST(testMPIProcessorGroup_rank);
- CPPUNIT_TEST(testBlockTopology_constructor);
- CPPUNIT_TEST(testBlockTopology_serialize);
- CPPUNIT_TEST(testInterpKernelDEC_2D);
- CPPUNIT_TEST(testInterpKernelDEC2_2D);
- CPPUNIT_TEST(testInterpKernelDEC_2DP0P1);
- CPPUNIT_TEST(testInterpKernelDEC_3D);
- CPPUNIT_TEST(testInterpKernelDECNonOverlapp_2D_P0P0);
- CPPUNIT_TEST(testInterpKernelDECNonOverlapp_2D_P0P1P1P0);
-
- CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D);
- CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpDEC_2D);
- CPPUNIT_TEST(testSynchronousEqualInterpKernelDEC_2D);
- CPPUNIT_TEST(testSynchronousFasterSourceInterpKernelDEC_2D);
- CPPUNIT_TEST(testSynchronousSlowerSourceInterpKernelDEC_2D);
- CPPUNIT_TEST(testSynchronousSlowSourceInterpKernelDEC_2D);
- CPPUNIT_TEST(testSynchronousFastSourceInterpKernelDEC_2D);
- CPPUNIT_TEST(testAsynchronousEqualInterpKernelDEC_2D);
- CPPUNIT_TEST(testAsynchronousFasterSourceInterpKernelDEC_2D);
- CPPUNIT_TEST(testAsynchronousSlowerSourceInterpKernelDEC_2D);
- CPPUNIT_TEST(testAsynchronousSlowSourceInterpKernelDEC_2D);
- CPPUNIT_TEST(testAsynchronousFastSourceInterpKernelDEC_2D);
-#ifdef MED_ENABLE_FVM
- //can be added again after FVM correction for 2D
- // CPPUNIT_TEST(testNonCoincidentDEC_2D);
- CPPUNIT_TEST(testNonCoincidentDEC_3D);
-#endif
- CPPUNIT_TEST(testStructuredCoincidentDEC);
- CPPUNIT_TEST(testStructuredCoincidentDEC);
- CPPUNIT_TEST(testICocoTrio1);
- CPPUNIT_TEST(testGauthier1);
- CPPUNIT_TEST(testGauthier2);
- CPPUNIT_TEST(testMEDLoaderRead1);
- CPPUNIT_TEST(testMEDLoaderPolygonRead);
- CPPUNIT_TEST(testMEDLoaderPolyhedronRead);
- //CPPUNIT_TEST(testMEDLoaderWrite1);
- //CPPUNIT_TEST(testMEDLoaderPolygonWrite);
- CPPUNIT_TEST_SUITE_END();
-
-
-public:
-
- ParaMEDMEMTest():CppUnit::TestFixture(){}
- ~ParaMEDMEMTest(){}
- void setUp(){}
- void tearDown(){}
- void testMPIProcessorGroup_constructor();
- void testMPIProcessorGroup_boolean();
- void testMPIProcessorGroup_rank();
- void testBlockTopology_constructor();
- void testBlockTopology_serialize();
- void testInterpKernelDEC_2D();
- void testInterpKernelDEC2_2D();
- void testInterpKernelDEC_2DP0P1();
- void testInterpKernelDEC_3D();
- void testInterpKernelDECNonOverlapp_2D_P0P0();
- void testInterpKernelDECNonOverlapp_2D_P0P1P1P0();
-#ifdef MED_ENABLE_FVM
- void testNonCoincidentDEC_2D();
- void testNonCoincidentDEC_3D();
-#endif
- void testStructuredCoincidentDEC();
- void testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D();
- void testSynchronousEqualInterpKernelWithoutInterpDEC_2D();
- void testSynchronousEqualInterpKernelDEC_2D();
- void testSynchronousFasterSourceInterpKernelDEC_2D();
- void testSynchronousSlowerSourceInterpKernelDEC_2D();
- void testSynchronousSlowSourceInterpKernelDEC_2D();
- void testSynchronousFastSourceInterpKernelDEC_2D();
-
- void testAsynchronousEqualInterpKernelDEC_2D();
- void testAsynchronousFasterSourceInterpKernelDEC_2D();
- void testAsynchronousSlowerSourceInterpKernelDEC_2D();
- void testAsynchronousSlowSourceInterpKernelDEC_2D();
- void testAsynchronousFastSourceInterpKernelDEC_2D();
- //
- void testICocoTrio1();
- void testGauthier1();
- void testGauthier2();
- //
- void testMEDLoaderRead1();
- void testMEDLoaderPolygonRead();
- void testMEDLoaderPolyhedronRead();
- void testMEDLoaderWrite1();
- void testMEDLoaderPolygonWrite();
-
- std::string getResourceFile( const std::string& );
- std::string getTmpDirectory();
- std::string makeTmpFile( const std::string&, const std::string& = "" );
-
-private:
- void testNonCoincidentDEC(const std::string& filename1,
- const std::string& meshname1,
- const std::string& filename2,
- const std::string& meshname2,
- int nbprocsource, double epsilon);
- void testAsynchronousInterpKernelDEC_2D(double dtA, double tmaxA,
- double dtB, double tmaxB,
- bool WithPointToPoint, bool Asynchronous, bool WithInterp, const char *srcMeth, const char *targetMeth);
- void testInterpKernelDEC_2D_(const char *srcMeth, const char *targetMeth);
- void testInterpKernelDEC2_2D_(const char *srcMeth, const char *targetMeth);
- void testInterpKernelDEC_3D_(const char *srcMeth, const char *targetMeth);
-};
-
-// to automatically remove temporary files from disk
-class ParaMEDMEMTest_TmpFilesRemover
-{
-public:
- ParaMEDMEMTest_TmpFilesRemover() {}
- ~ParaMEDMEMTest_TmpFilesRemover();
- bool Register(const std::string theTmpFile);
-
-private:
- std::set<std::string> myTmpFiles;
-};
-
-/*!
- * Tool to print array to stream.
- */
-template<class T>
-void ParaMEDMEMTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text)
-{
- stream << text << ": {";
- if (length > 0) {
- stream << array[0];
- for (int i = 1; i < length; i++) {
- stream << ", " << array[i];
- }
- }
- stream << "}" << std::endl;
-};
-
-#endif
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include "ParaMEDMEMTest.hxx"
-#include <cppunit/TestAssert.h>
-
-#include "InterpolationUtils.hxx"
-#include "CommInterface.hxx"
-#include "ProcessorGroup.hxx"
-#include "MPIProcessorGroup.hxx"
-#include "Topology.hxx"
-#include "BlockTopology.hxx"
-
-#include <string>
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-/*
- * Check methods defined in BlockTopology.hxx
- *
- BlockTopology(){};
- BlockTopology(const ProcessorGroup& group, const MEDMEM::GRID& grid);
- BlockTopology(const BlockTopology& geom_topo, const ComponentTopology& comp_topo);
- (+) BlockTopology(const ProcessorGroup& group, int nb_elem);
- virtual ~BlockTopology();
- (+) inline int getNbElements()const;
- (+) inline int getNbLocalElements() const;
- const ProcessorGroup* getProcGroup()const {return _proc_group;};
- (+) inline std::pair<int,int> globalToLocal (const int) const ;
- (+) inline int localToGlobal (const std::pair<int,int>) const;
- (+) std::vector<std::pair<int,int> > getLocalArrayMinMax() const ;
- (+) int getDimension() const {return _dimension;};
- (+) void serialize(int* & serializer, int& size) const ;
- (+) void unserialize(const int* serializer, const CommInterface& comm_interface);
-
- */
-
-void ParaMEDMEMTest::testBlockTopology_constructor()
-{
- //test constructor
- int size;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- int rank;
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
- CommInterface interface;
- MPIProcessorGroup group(interface);
- BlockTopology blocktopo(group,1);
- CPPUNIT_ASSERT_EQUAL(1,blocktopo.getNbLocalElements());
- CPPUNIT_ASSERT_EQUAL(size,blocktopo.getNbElements());
- CPPUNIT_ASSERT_EQUAL(1,blocktopo.getDimension());
-
- //checking access methods
- BlockTopology blocktopo2(group,2);
- std::pair<int,int> local= blocktopo2.globalToLocal(0);
- CPPUNIT_ASSERT_EQUAL(local.first,0);
- CPPUNIT_ASSERT_EQUAL(local.second,0);
- int global=blocktopo2.localToGlobal(local);
- CPPUNIT_ASSERT_EQUAL(global,0);
-
- local = blocktopo2.globalToLocal(1);
- CPPUNIT_ASSERT_EQUAL(local.first,0);
- CPPUNIT_ASSERT_EQUAL(local.second,1);
- global=blocktopo2.localToGlobal(local);
- CPPUNIT_ASSERT_EQUAL(global,1);
-
- local = blocktopo2.globalToLocal(2*size-1);
- CPPUNIT_ASSERT_EQUAL(local.first,size-1);
- CPPUNIT_ASSERT_EQUAL(local.second,1);
- global=blocktopo2.localToGlobal(local);
- CPPUNIT_ASSERT_EQUAL(global,2*size-1);
-
- std::vector<std::pair<int,int> > bounds = blocktopo2.getLocalArrayMinMax();
- int vecsize = bounds.size();
- CPPUNIT_ASSERT_EQUAL(1,vecsize);
- CPPUNIT_ASSERT_EQUAL(2*rank, (bounds[0]).first);
- CPPUNIT_ASSERT_EQUAL(2*rank+2, (bounds[0]).second);
- }
-
-void ParaMEDMEMTest::testBlockTopology_serialize()
-{
-
- int size;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- int rank;
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
- CommInterface interface;
- MPIProcessorGroup group(interface);
- BlockTopology blocktopo(group,3);
-
-//testing the serialization process that is used to transfer a
-//block topology via a MPI_Send/Recv comm
- BlockTopology blocktopo_recv;
- int* serializer;
- int sersize;
- blocktopo.serialize(serializer,sersize);
- blocktopo_recv.unserialize(serializer,interface);
- CPPUNIT_ASSERT_EQUAL(blocktopo.getNbElements(),blocktopo_recv.getNbElements());
- delete [] serializer;
-}
+++ /dev/null
-#include "ParaMEDMEMTest.hxx"
-#include <cppunit/TestAssert.h>
-
-#include <string>
-#include "CommInterface.hxx"
-#include "ProcessorGroup.hxx"
-#include "MPIProcessorGroup.hxx"
-#include "DEC.hxx"
-#include "InterpKernelDEC.hxx"
-#include <set>
-#include <time.h>
-#include "ICoCoTrioField.hxx"
-#include <iostream>
-#include <assert.h>
-#include <math.h>
-
-using namespace std;
-using namespace ParaMEDMEM;
-using namespace ICoCo;
-
-void afficheGauthier1( const TrioField& field, const double *vals, int lgth)
-{
- CPPUNIT_ASSERT_EQUAL(lgth,field._nb_elems);
- for (int ele=0;ele<field._nb_elems;ele++)
- CPPUNIT_ASSERT_DOUBLES_EQUAL(vals[ele],field._field[ele],1e-12);
-}
-
-void remplit_coordGauthier1(double* coords)
-{
- double angle,epaisseur;
- angle=0*45*(asin(1)/90);
- epaisseur=1e-0;
- coords[0*3+0]=0.;
- coords[0*3+1]=0.;
- coords[0*3+2]=0.;
-
- coords[1*3+0]=cos(angle);
- coords[1*3+1]=0.;
- coords[1*3+2]=sin(angle);
-
-
- coords[2*3+0]=-sin(angle);
- coords[2*3+1]=0.;
- coords[2*3+2]=cos(angle);
-
- for (int d=0;d<3;d++)
- coords[3*3+d]=coords[1*3+d]+ coords[2*3+d];
-
- for (int i=4;i<8;i++)
- {
- for (int d=0;d<3;d++)
- coords[i*3+d]=coords[(i-4)*3+d];
- coords[i*3+1]+=epaisseur;
- }
-
-}
-
-void init_quadGauthier1(TrioField& champ_quad,int is_master)
-{
-
- champ_quad.setName("champ_quad");
- champ_quad._space_dim=3;
- champ_quad._mesh_dim=2;
- champ_quad._nodes_per_elem=4;
- champ_quad._itnumber=0;
- champ_quad._time1=0;
- champ_quad._time2=1;
- champ_quad._nb_field_components=1;
-
- if (is_master)
- {
- champ_quad._nbnodes=8;
- champ_quad._nb_elems=2;
-
- champ_quad._coords=new double[champ_quad._nbnodes*champ_quad._space_dim];
- //memcpy(afield._coords,sommets.addr(),champ_quad._nbnodes*champ_quad._space_dim*sizeof(double));
-
- remplit_coordGauthier1(champ_quad._coords);
-
-
- champ_quad._connectivity=new int[champ_quad._nb_elems*champ_quad._nodes_per_elem];
- champ_quad._connectivity[0*champ_quad._nodes_per_elem+0]=0;
- champ_quad._connectivity[0*champ_quad._nodes_per_elem+1]=1;
- champ_quad._connectivity[0*champ_quad._nodes_per_elem+2]=3;
- champ_quad._connectivity[0*champ_quad._nodes_per_elem+3]=2;
- champ_quad._connectivity[1*champ_quad._nodes_per_elem+0]=4;
- champ_quad._connectivity[1*champ_quad._nodes_per_elem+1]=5;
- champ_quad._connectivity[1*champ_quad._nodes_per_elem+2]=7;
- champ_quad._connectivity[1*champ_quad._nodes_per_elem+3]=6;
-
- }
- else
- {
- champ_quad._nbnodes=0;
- champ_quad._nb_elems=0;
- champ_quad._coords=new double[champ_quad._nbnodes*champ_quad._space_dim];
-
- }
- champ_quad._has_field_ownership=false;
- champ_quad._field=0;
- //champ_quad._field=new double[champ_quad._nb_elems];
- // assert(champ_quad._nb_field_components==1);
-}
-void init_triangleGauthier1(TrioField& champ_triangle,int is_master)
-{
-
- champ_triangle.setName("champ_triangle");
- champ_triangle._space_dim=3;
- champ_triangle._mesh_dim=2;
- champ_triangle._nodes_per_elem=3;
- champ_triangle._itnumber=0;
- champ_triangle._time1=0;
- champ_triangle._time2=1;
- champ_triangle._nb_field_components=1;
-
- if (is_master)
- {
- champ_triangle._nb_elems=4;
- champ_triangle._nbnodes=8;
-
- champ_triangle._coords=new double[champ_triangle._nbnodes*champ_triangle._space_dim];
- //memcpy(afield._coords,sommets.addr(),champ_triangle._nbnodes*champ_triangle._space_dim*sizeof(double));
- remplit_coordGauthier1(champ_triangle._coords);
-
- champ_triangle._connectivity=new int[champ_triangle._nb_elems*champ_triangle._nodes_per_elem];
- champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+0]=0;
- champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+1]=1;
- champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+2]=2;
- champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+0]=1;
- champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+1]=2;
- champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+2]=3;
-
- champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+0]=4;
- champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+1]=5;
- champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+2]=7;
- champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+0]=4;
- champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+1]=6;
- champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+2]=7;
- }
- else
- {
- champ_triangle._nb_elems=0;
- champ_triangle._nbnodes=0;
- champ_triangle._coords=new double[champ_triangle._nbnodes*champ_triangle._space_dim];
-
- }
- champ_triangle._has_field_ownership=false;
- // champ_triangle._field=new double[champ_triangle._nb_elems];
- champ_triangle._field=0;
-
-}
-
-
-void ParaMEDMEMTest::testGauthier1()
-{
- int num_cas=0;
- int rank, size;
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
- MPI_Comm_size(MPI_COMM_WORLD,&size);
-
- int is_master=0;
-
- CommInterface comm;
- set<int> emetteur_ids;
- set<int> recepteur_ids;
- emetteur_ids.insert(0);
- if(size!=4)
- return;
- recepteur_ids.insert(1);
- if (size >2)
- recepteur_ids.insert(2);
- if (size >2)
- emetteur_ids.insert(3);
- if ((rank==0)||(rank==1))
- is_master=1;
-
- MPIProcessorGroup recepteur_group(comm,recepteur_ids);
- MPIProcessorGroup emetteur_group(comm,emetteur_ids);
-
-
- string cas;
- if (recepteur_group.containsMyRank())
- {
- cas="recepteur";
- //freopen("recpeteur.out","w",stdout);
- //freopen("recepteur.err","w",stderr);
-
- }
- else
- {
- cas="emetteur";
- // freopen("emetteur.out","w",stdout);
- //freopen("emetteur.err","w",stderr);
- }
- double expected[8][4]={
- {1.,1.,1.,1.},
- {40., 40., 1., 1.},
- {1.,1.,1e200,1e200},
- {40.,1.,1e200,1e200},
- {1.,1.,1.,1.},
- {40.,1.,1.,1.},
- {1.,1.,1e200,1e200},
- {20.5,1.,1e200,1e200}
- };
-
- int expectedLgth[8]={4,4,2,2,4,4,2,2};
-
- for (int send=0;send<2;send++)
- for (int rec=0;rec<2;rec++)
- {
- InterpKernelDEC dec_emetteur(emetteur_group, recepteur_group);
- dec_emetteur.setOrientation(2);
- TrioField champ_emetteur, champ_recepteur;
-
- if (send==0)
- init_quadGauthier1(champ_emetteur,is_master);
- else
- init_triangleGauthier1(champ_emetteur,is_master);
- if (rec==0)
- init_triangleGauthier1(champ_recepteur,is_master);
- else
- init_quadGauthier1(champ_recepteur,is_master);
-
- if (cas=="emetteur")
- {
- champ_emetteur._field=new double[champ_emetteur._nb_elems];
- for (int ele=0;ele<champ_emetteur._nb_elems;ele++)
- champ_emetteur._field[ele]=1;
-
- champ_emetteur._has_field_ownership=true;
- }
-
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- clock_t clock0= clock ();
- int compti=0;
-
- bool init=true; // first time step ??
- bool stop=false;
- //boucle sur les pas de quads
- while (!stop) {
-
- compti++;
- clock_t clocki= clock ();
- //cout << compti << " CLOCK " << (clocki-clock0)*1.e-6 << endl;
- for (int non_unif=0;non_unif<2;non_unif++)
- {
- // if (champ_recepteur._field)
- // delete [] champ_recepteur._field;
- champ_recepteur._field=0;
- // champ_recepteur._has_field_ownership=false;
-
-
-
- if (cas=="emetteur")
- {
- if (non_unif)
- if(rank!=3)
- champ_emetteur._field[0]=40;
- }
- bool ok=false; // Is the time interval successfully solved ?
-
- // Loop on the time interval tries
- if(1) {
-
-
- if (cas=="emetteur")
- dec_emetteur.attachLocalField((ICoCo::Field*) &champ_emetteur);
- else
- dec_emetteur.attachLocalField((ICoCo::Field*) &champ_recepteur);
-
-
- if(init) dec_emetteur.synchronize();
- init=false;
-
- if (cas=="emetteur") {
- // affiche(champ_emetteur);
- dec_emetteur.sendData();
- }
- else if (cas=="recepteur")
- {
- dec_emetteur.recvData();
- if (is_master)
- afficheGauthier1(champ_recepteur,expected[num_cas],expectedLgth[num_cas]);
- }
- else
- throw 0;
- MPI_Barrier(MPI_COMM_WORLD);
- }
- stop=true;
- num_cas++;
- }
- // destruction des champs, des DEC, et des tableaux associés
- }
- }
-}
-
-void ParaMEDMEMTest::testGauthier2()
-{
- const char save_vit_in_2[]="VITESSE_P1_OUT\n1\n2\n3\n63\n3\n80\n0\n 0 1 2\n 3 4 5\n 6 7 8\n 9 10 11\n 12 13 14\n 15 16 17\n 18 19 20\n 21 22 23\n 24 25 26\n 27 28 29\n 30 2 1\n 31 5 4\n 32 8 7\n 33 11 10\n 34 14 13\n 35 17 16\n 36 20 19\n 37 23 22\n 38 26 25\n 39 29 28\n 30 40 2\n 31 41 5\n 32 42 8\n 33 43 11\n 34 44 14\n 35 45 17\n 36 46 20\n 37 47 23\n 38 48 26\n 39 49 29\n 31 2 40\n 32 5 41\n 33 8 42\n 34 11 43\n 35 14 44\n 36 17 45\n 37 20 46\n 38 23 47\n 39 26 48\n 50 29 49\n 3 2 4\n 6 5 7\n 9 8 10\n 12 11 13\n 15 14 16\n 18 17 19\n 21 20 22\n 24 23 25\n 27 26 28\n 51 29 52\n 31 4 2\n 32 7 5\n 33 10 8\n 34 13 11\n 35 16 14\n 36 19 17\n 37 22 20\n 38 25 23\n 39 28 26\n 50 52 29\n 0 2 53\n 3 5 54\n 6 8 55\n 9 11 56\n 12 14 57\n 15 17 58\n 18 20 59\n 21 23 60\n 24 26 61\n 27 29 62\n 3 53 2\n 6 54 5\n 9 55 8\n 12 56 11\n 15 57 14\n 18 58 17\n 21 59 20\n 24 60 23\n 27 61 26\n 51 62 29\n 0 0 0\n 0.5 0 0\n 0.5 0.05 0\n 0 0.1 0\n 0.5 0.1 0\n 0.5 0.15 0\n 0 0.2 0\n 0.5 0.2 0\n 0.5 0.25 0\n 0 0.3 0\n 0.5 0.3 0\n 0.5 0.35 0\n 0 0.4 0\n 0.5 0.4 0\n 0.5 0.45 0\n 0 0.5 0\n 0.5 0.5 0\n 0.5 0.55 0\n 0 0.6 0\n 0.5 0.6 0\n 0.5 0.65 0\n 0 0.7 0\n 0.5 0.7 0\n 0.5 0.75 0\n 0 0.8 0\n 0.5 0.8 0\n 0.5 0.85 0\n 0 0.9 0\n 0.5 0.9 0\n 0.5 0.95 0\n 1 0 0\n 1 0.1 0\n 1 0.2 0\n 1 0.3 0\n 1 0.4 0\n 1 0.5 0\n 1 0.6 0\n 1 0.7 0\n 1 0.8 0\n 1 0.9 0\n 1 0.05 0\n 1 0.15 0\n 1 0.25 0\n 1 0.35 0\n 1 0.45 0\n 1 0.55 0\n 1 0.65 0\n 1 0.75 0\n 1 0.85 0\n 1 0.95 0\n 1 1 0\n 0 1 0\n 0.5 1 0\n 0 0.05 0\n 0 0.15 0\n 0 0.25 0\n 0 0.35 0\n 0 0.45 0\n 0 0.55 0\n 0 0.65 0\n 0 0.75 0\n 0 0.85 0\n 0 0.95 0\n2.9268\n3.1707\n3\n1\n 0 0 0\n 0 0 0\n 0 0 0.05\n 0 0 0.1\n 0 0 0.1\n 0 0 0.15\n 0 0 0.2\n 0 0 0.2\n 0 0 0.25\n 0 0 0.3\n 0 0 0.3\n 0 0 0.35\n 0 0 0.4\n 0 0 0.4\n 0 0 0.45\n 0 0 0.5\n 0 0 0.5\n 0 0 0.55\n 0 0 0.6\n 0 0 0.6\n 0 0 0.65\n 0 0 0.7\n 0 0 0.7\n 0 0 0.75\n 0 0 0.8\n 0 0 0.8\n 0 0 0.85\n 0 0 0.9\n 0 0 0.9\n 0 0 0.95\n 0 0 0\n 0 0 0.1\n 0 0 0.2\n 0 0 0.3\n 0 0 0.4\n 0 0 0.5\n 0 0 0.6\n 0 0 0.7\n 0 0 0.8\n 0 0 0.9\n 0 0 0.05\n 0 0 0.15\n 0 0 0.25\n 0 0 0.35\n 0 0 0.45\n 0 0 0.55\n 0 0 0.65\n 0 0 0.75\n 0 0 0.85\n 0 0 0.95\n 0 0 1\n 0 0 1\n 0 0 1\n 0 0 0.05\n 0 0 0.15\n 0 0 0.25\n 0 0 0.35\n 0 0 0.45\n 0 0 0.55\n 0 0 0.65\n 0 0 0.75\n 0 0 0.85\n 0 0 0.95\n1\n";
-
- const char save_vit_out_0_2[]="vitesse_in_chaude\n0\n2\n3\n22\n4\n10\n-1081737852\n 0 1 3 2\n 2 3 5 4\n 4 5 7 6\n 6 7 9 8\n 8 9 11 10\n 10 11 13 12\n 12 13 15 14\n 14 15 17 16\n 16 17 19 18\n 18 19 21 20\n 0 0 0\n 1 0 0\n 0 0.1 0\n 1 0.1 0\n 0 0.2 0\n 1 0.2 0\n 0 0.3 0\n 1 0.3 0\n 0 0.4 0\n 1 0.4 0\n 0 0.5 0\n 1 0.5 0\n 0 0.6 0\n 1 0.6 0\n 0 0.7 0\n 1 0.7 0\n 0 0.8 0\n 1 0.8 0\n 0 0.9 0\n 1 0.9 0\n 0 1 0\n 1 1 0\n2.9268\n3.1707\n3\n1\n 0 0 0.05\n 0 0 0.15\n 0 0 0.25\n 0 0 0.35\n 0 0 0.45\n 0 0 0.55\n 0 0 0.65\n 0 0 0.75\n 0 0 0.85\n 0 0 0.95\n0\n";
- const char save_vit_out_1_2[]="vitesse_in_chaude\n1\n2\n3\n22\n4\n10\n-1081737852\n 0 1 3 2\n 2 3 5 4\n 4 5 7 6\n 6 7 9 8\n 8 9 11 10\n 10 11 13 12\n 12 13 15 14\n 14 15 17 16\n 16 17 19 18\n 18 19 21 20\n 0 0 0\n 1 0 0\n 0 0.1 0\n 1 0.1 0\n 0 0.2 0\n 1 0.2 0\n 0 0.3 0\n 1 0.3 0\n 0 0.4 0\n 1 0.4 0\n 0 0.5 0\n 1 0.5 0\n 0 0.6 0\n 1 0.6 0\n 0 0.7 0\n 1 0.7 0\n 0 0.8 0\n 1 0.8 0\n 0 0.9 0\n 1 0.9 0\n 0 1 0\n 1 1 0\n2.9268\n3.1707\n3\n1\n 0 0 0.029375\n 0 0 0.029375\n 0 0 0.1\n 0 0 0.1\n 0 0 0.2\n 0 0 0.2\n 0 0 0.3\n 0 0 0.3\n 0 0 0.4\n 0 0 0.4\n 0 0 0.5\n 0 0 0.5\n 0 0 0.6\n 0 0 0.6\n 0 0 0.7\n 0 0 0.7\n 0 0 0.8\n 0 0 0.8\n 0 0 0.9\n 0 0 0.9\n 0 0 0.970625\n 0 0 0.970625\n0\n";
-
- const char *save_vit_outs[2]={save_vit_out_1_2,save_vit_out_0_2};
-
- const char save_vit_out_1_0[]="vitesse_in_chaude\n1\n2\n3\n22\n4\n10\n-1081737852\n 0 1 3 2\n 2 3 5 4\n 4 5 7 6\n 6 7 9 8\n 8 9 11 10\n 10 11 13 12\n 12 13 15 14\n 14 15 17 16\n 16 17 19 18\n 18 19 21 20\n 0 0 0\n 1 0 0\n 0 0.1 0\n 1 0.1 0\n 0 0.2 0\n 1 0.2 0\n 0 0.3 0\n 1 0.3 0\n 0 0.4 0\n 1 0.4 0\n 0 0.5 0\n 1 0.5 0\n 0 0.6 0\n 1 0.6 0\n 0 0.7 0\n 1 0.7 0\n 0 0.8 0\n 1 0.8 0\n 0 0.9 0\n 1 0.9 0\n 0 1 0\n 1 1 0\n2.9268\n3.1707\n3\n1\n 0 0 0.029375\n 0 0 0.029375\n 0 0 0.1\n 0 0 0.1\n 0 0 0.2\n 0 0 0.2\n 0 0 0.3\n 0 0 0.3\n 0 0 0.4\n 0 0 0.4\n 0 0 0.5\n 0 0 0.5\n 0 0 0.6\n 0 0 0.6\n 0 0 0.7\n 0 0 0.7\n 0 0 0.8\n 0 0 0.8\n 0 0 0.9\n 0 0 0.9\n 0 0 0.970625\n 0 0 0.970625\n0\n";
-
- const char save_vit_in[]="VITESSE_P1_OUT\n1\n2\n3\n63\n3\n80\n0\n 0 1 2\n 3 4 5\n 6 7 8\n 9 10 11\n 12 13 14\n 15 16 17\n 18 19 20\n 21 22 23\n 24 25 26\n 27 28 29\n 30 2 1\n 31 5 4\n 32 8 7\n 33 11 10\n 34 14 13\n 35 17 16\n 36 20 19\n 37 23 22\n 38 26 25\n 39 29 28\n 30 40 2\n 31 41 5\n 32 42 8\n 33 43 11\n 34 44 14\n 35 45 17\n 36 46 20\n 37 47 23\n 38 48 26\n 39 49 29\n 31 2 40\n 32 5 41\n 33 8 42\n 34 11 43\n 35 14 44\n 36 17 45\n 37 20 46\n 38 23 47\n 39 26 48\n 50 29 49\n 3 2 4\n 6 5 7\n 9 8 10\n 12 11 13\n 15 14 16\n 18 17 19\n 21 20 22\n 24 23 25\n 27 26 28\n 51 29 52\n 31 4 2\n 32 7 5\n 33 10 8\n 34 13 11\n 35 16 14\n 36 19 17\n 37 22 20\n 38 25 23\n 39 28 26\n 50 52 29\n 0 2 53\n 3 5 54\n 6 8 55\n 9 11 56\n 12 14 57\n 15 17 58\n 18 20 59\n 21 23 60\n 24 26 61\n 27 29 62\n 3 53 2\n 6 54 5\n 9 55 8\n 12 56 11\n 15 57 14\n 18 58 17\n 21 59 20\n 24 60 23\n 27 61 26\n 51 62 29\n 0 0 0\n 0.5 0 0\n 0.5 0.05 0\n 0 0.1 0\n 0.5 0.1 0\n 0.5 0.15 0\n 0 0.2 0\n 0.5 0.2 0\n 0.5 0.25 0\n 0 0.3 0\n 0.5 0.3 0\n 0.5 0.35 0\n 0 0.4 0\n 0.5 0.4 0\n 0.5 0.45 0\n 0 0.5 0\n 0.5 0.5 0\n 0.5 0.55 0\n 0 0.6 0\n 0.5 0.6 0\n 0.5 0.65 0\n 0 0.7 0\n 0.5 0.7 0\n 0.5 0.75 0\n 0 0.8 0\n 0.5 0.8 0\n 0.5 0.85 0\n 0 0.9 0\n 0.5 0.9 0\n 0.5 0.95 0\n 1 0 0\n 1 0.1 0\n 1 0.2 0\n 1 0.3 0\n 1 0.4 0\n 1 0.5 0\n 1 0.6 0\n 1 0.7 0\n 1 0.8 0\n 1 0.9 0\n 1 0.05 0\n 1 0.15 0\n 1 0.25 0\n 1 0.35 0\n 1 0.45 0\n 1 0.55 0\n 1 0.65 0\n 1 0.75 0\n 1 0.85 0\n 1 0.95 0\n 1 1 0\n 0 1 0\n 0.5 1 0\n 0 0.05 0\n 0 0.15 0\n 0 0.25 0\n 0 0.35 0\n 0 0.45 0\n 0 0.55 0\n 0 0.65 0\n 0 0.75 0\n 0 0.85 0\n 0 0.95 0\n2.9268\n3.1707\n3\n1\n 0 0 0\n 0 0 0\n 0 0 0.05\n 0 0 0.1\n 0 0 0.1\n 0 0 0.15\n 0 0 0.2\n 0 0 0.2\n 0 0 0.25\n 0 0 0.3\n 0 0 0.3\n 0 0 0.35\n 0 0 0.4\n 0 0 0.4\n 0 0 0.45\n 0 0 0.5\n 0 0 0.5\n 0 0 0.55\n 0 0 0.6\n 0 0 0.6\n 0 0 0.65\n 0 0 0.7\n 0 0 0.7\n 0 0 0.75\n 0 0 0.8\n 0 0 0.8\n 0 0 0.85\n 0 0 0.9\n 0 0 0.9\n 0 0 0.95\n 0 0 0\n 0 0 0.1\n 0 0 0.2\n 0 0 0.3\n 0 0 0.4\n 0 0 0.5\n 0 0 0.6\n 0 0 0.7\n 0 0 0.8\n 0 0 0.9\n 0 0 0.05\n 0 0 0.15\n 0 0 0.25\n 0 0 0.35\n 0 0 0.45\n 0 0 0.55\n 0 0 0.65\n 0 0 0.75\n 0 0 0.85\n 0 0 0.95\n 0 0 1\n 0 0 1\n 0 0 1\n 0 0 0.05\n 0 0 0.15\n 0 0 0.25\n 0 0 0.35\n 0 0 0.45\n 0 0 0.55\n 0 0 0.65\n 0 0 0.75\n 0 0 0.85\n 0 0 0.95\n1\n";
-
- double valuesExpected1[2]={0.,0.};
- double valuesExpected2[2]={0.95,0.970625};
-
- double valuesExpected30[]={0., 0., 0.05, 0., 0., 0.15, 0., 0., 0.25, 0., 0., 0.35, 0., 0., 0.45, 0., 0., 0.55, 0., 0., 0.65, 0., 0., 0.75, 0., 0., 0.85, 0., 0., 0.95};
- double valuesExpected31[]={0., 0., 0.029375, 0., 0., 0.029375, 0., 0., 0.1, 0., 0., 0.1, 0., 0., 0.2, 0., 0., 0.2, 0., 0., 0.3, 0., 0., 0.3, 0., 0., 0.4, 0., 0., 0.4, 0., 0., 0.5, 0., 0., 0.5, 0., 0., 0.6, 0., 0., 0.6, 0., 0., 0.7, 0., 0., 0.7, 0., 0., 0.8, 0., 0., 0.8, 0., 0., 0.9, 0., 0., 0.9, 0., 0., 0.970625, 0., 0., 0.970625 };
-
- double *valuesExpected3[2]={valuesExpected30,valuesExpected31};
-
- int rank, size;
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- if (size <2)
- return ;
- CommInterface comm;
- set<int> Genepi_ids;
- set<int> entree_chaude_ids;
- Genepi_ids.insert(0);
- for (int i=1;i<size;i++)
- entree_chaude_ids.insert(i);
- for (int type=0;type<2;type++)
- {
- MPIProcessorGroup entree_chaude_group(comm,entree_chaude_ids);
- MPIProcessorGroup Genepi_group(comm,Genepi_ids);
-
- TrioField vitesse;
- InterpKernelDEC dec_vit_in_chaude(entree_chaude_group, Genepi_group);
-
- if ( entree_chaude_group.containsMyRank())
- {
- istringstream save_vit(save_vit_in);
- vitesse.restore(save_vit);
- }
- else
- {
- istringstream save_vit(save_vit_out_1_0);
- vitesse.restore(save_vit);
- vitesse._has_field_ownership=false;
-
- if (vitesse._field)
- {
- delete [] vitesse._field;
- // cette ligne est super importante sinon c'est tout faux !!!!!!!
- vitesse._field=0;
- }
- // pour tester P1->P0
- vitesse._type=type;
- }
-
- if (vitesse._type==1)
- dec_vit_in_chaude.setMethod("P1");
-
-
-
- dec_vit_in_chaude.attachLocalField((ICoCo::Field*) &vitesse);
-
- dec_vit_in_chaude.synchronize();
-
-
- // Envois - receptions
- if (entree_chaude_group.containsMyRank())
- {
- dec_vit_in_chaude.sendData();
- }
- else
- {
- dec_vit_in_chaude.recvData();
- }
- if (entree_chaude_group.containsMyRank() )
- {
- if (1)
- {
- ostringstream save_vit(save_vit_in_2);
- vitesse.save(save_vit);
- }
- }
- else
- {
-
- double pmin=1e38, pmax=-1e38;
-
- for(int i=0;i<vitesse.nb_values()*vitesse._nb_field_components;i++)
- {
- double p=*(vitesse._field+i);
- if (p<pmin) pmin=p;
- if (p>pmax) pmax=p;
- }
- CPPUNIT_ASSERT_DOUBLES_EQUAL(valuesExpected1[type],pmin,1e-12);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(valuesExpected2[type],pmax,1e-12);
-
- ostringstream save_vit(save_vit_outs[type]);
- vitesse.save(save_vit);
-
- for(int i=0;i<vitesse.nb_values();i++)
- {
- for(int c=0;c<vitesse._nb_field_components;c++)
- {
- double p=vitesse._field[i*vitesse._nb_field_components+c];
- CPPUNIT_ASSERT_DOUBLES_EQUAL(valuesExpected3[type][i*vitesse._nb_field_components+c],p,1e-12);
- }
- }
-
- }
- }
-}
+++ /dev/null
-#include "ParaMEDMEMTest.hxx"
-#include <string>
-#include "CommInterface.hxx"
-#include "ProcessorGroup.hxx"
-#include "MPIProcessorGroup.hxx"
-#include "DEC.hxx"
-#include "InterpKernelDEC.hxx"
-#include <set>
-#include <time.h>
-#include "ICoCoTrioField.hxx"
-#include <iostream>
-#include <assert.h>
-
-using namespace std;
-using namespace ParaMEDMEM;
-using namespace ICoCo;
-
-typedef enum {sync_and,sync_or} synctype;
-void synchronize_bool(bool& stop, synctype s)
-{
- int my_stop;
- int my_stop_temp = stop?1:0;
- if (s==sync_and)
- MPI_Allreduce(&my_stop_temp,&my_stop,1,MPI_INTEGER,MPI_MIN,MPI_COMM_WORLD);
- else if (s==sync_or)
- MPI_Allreduce(&my_stop_temp,&my_stop,1,MPI_INTEGER,MPI_MAX,MPI_COMM_WORLD);
- stop =(my_stop==1);
-}
-
-void synchronize_dt(double& dt)
-{
- double dttemp=dt;
- MPI_Allreduce(&dttemp,&dt,1,MPI_DOUBLE,MPI_MIN,MPI_COMM_WORLD);
-}
-
-
-void affiche( const TrioField& field)
-{
- cout <<field.getName()<<endl;
- for (int ele=0;ele<field._nb_elems;ele++)
- cout <<ele <<": "<<field._field[ele]<<endl;;
-
-}
-
-void remplit_coord(double* coords)
-{
- coords[0*3+0]=0.;
- coords[0*3+1]=0.;
- coords[0*3+2]=0.;
-
- coords[1*3+0]=1.;
- coords[1*3+1]=0.;
- coords[1*3+2]=0.;
-
-
- coords[2*3+0]=0.;
- coords[2*3+1]=0.;
- coords[2*3+2]=1.;
-
- coords[3*3+0]=1.;
- coords[3*3+1]=0.;
- coords[3*3+2]=1.;
-
- for (int i=4;i<8;i++)
- {
- for (int d=0;d<3;d++)
- coords[i*3+d]=coords[(i-4)*3+d];
- coords[i*3+1]+=1e-5;
- }
-
-}
-
-void init_quad(TrioField& champ_quad)
-{
-
- champ_quad.setName("champ_quad");
- champ_quad._space_dim=3;
- champ_quad._mesh_dim=2;
- champ_quad._nbnodes=8;
- champ_quad._nodes_per_elem=4;
- champ_quad._nb_elems=2;
- champ_quad._itnumber=0;
- champ_quad._time1=0;
- champ_quad._time2=1;
- champ_quad._nb_field_components=1;
-
- champ_quad._coords=new double[champ_quad._nbnodes*champ_quad._space_dim];
- //memcpy(afield._coords,sommets.addr(),champ_quad._nbnodes*champ_quad._space_dim*sizeof(double));
-
- remplit_coord(champ_quad._coords);
-
-
- champ_quad._connectivity=new int[champ_quad._nb_elems*champ_quad._nodes_per_elem];
- champ_quad._connectivity[0*champ_quad._nodes_per_elem+0]=0;
- champ_quad._connectivity[0*champ_quad._nodes_per_elem+1]=1;
- champ_quad._connectivity[0*champ_quad._nodes_per_elem+2]=3;
- champ_quad._connectivity[0*champ_quad._nodes_per_elem+3]=2;
- champ_quad._connectivity[1*champ_quad._nodes_per_elem+0]=4;
- champ_quad._connectivity[1*champ_quad._nodes_per_elem+1]=5;
- champ_quad._connectivity[1*champ_quad._nodes_per_elem+2]=7;
- champ_quad._connectivity[1*champ_quad._nodes_per_elem+3]=6;
-
-
- champ_quad._has_field_ownership=false;
- champ_quad._field=0;
- //champ_quad._field=new double[champ_quad._nb_elems];
- // assert(champ_quad._nb_field_components==1);
-}
-void init_triangle(TrioField& champ_triangle)
-{
-
- champ_triangle.setName("champ_triangle");
- champ_triangle._space_dim=3;
- champ_triangle._mesh_dim=2;
- champ_triangle._nbnodes=8;
- champ_triangle._nodes_per_elem=3;
- champ_triangle._nb_elems=4;
- champ_triangle._itnumber=0;
- champ_triangle._time1=0;
- champ_triangle._time2=1;
- champ_triangle._nb_field_components=1;
-
- champ_triangle._coords=new double[champ_triangle._nbnodes*champ_triangle._space_dim];
- //memcpy(afield._coords,sommets.addr(),champ_triangle._nbnodes*champ_triangle._space_dim*sizeof(double));
- remplit_coord(champ_triangle._coords);
-
- champ_triangle._connectivity=new int[champ_triangle._nb_elems*champ_triangle._nodes_per_elem];
- champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+0]=0;
- champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+1]=1;
- champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+2]=2;
- champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+0]=1;
- champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+1]=3;
- champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+2]=2;
-
- champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+0]=4;
- champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+1]=5;
- champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+2]=7;
- champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+0]=4;
- champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+1]=7;
- champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+2]=6;
-
- champ_triangle._has_field_ownership=false;
- // champ_triangle._field=new double[champ_triangle._nb_elems];
- champ_triangle._field=0;
-}
-
-void ParaMEDMEMTest::testICocoTrio1()
-{
- int size;
- int rank;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
-
- //the test is meant to run on five processors
- if (size !=2) return ;
-
- CommInterface comm;
- set<int> emetteur_ids;
- set<int> recepteur_ids;
- emetteur_ids.insert(0);
- recepteur_ids.insert(1);
-
- MPIProcessorGroup recepteur_group(comm,recepteur_ids);
- MPIProcessorGroup emetteur_group(comm,emetteur_ids);
-
-
- string cas;
- if (recepteur_group.containsMyRank())
- {
- cas="recepteur";
-
- }
- else
- cas="emetteur";
-
- InterpKernelDEC dec_emetteur(emetteur_group, recepteur_group);
-
- TrioField champ_emetteur, champ_recepteur;
-
- init_triangle(champ_emetteur);
- //init_triangle(champ_emetteur);
- init_quad(champ_recepteur);
- //init_emetteur(champ_recepteur);
-
- if (cas=="emetteur")
- {
- champ_emetteur._field=new double[champ_emetteur._nb_elems];
- for (int ele=0;ele<champ_emetteur._nb_elems;ele++)
- champ_emetteur._field[ele]=1;
-
- champ_emetteur._has_field_ownership=true;
- }
-
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- clock_t clock0= clock ();
- int compti=0;
-
- bool init=true; // first time step ??
- bool stop=false;
- //boucle sur les pas de quads
- while (!stop) {
-
- compti++;
- clock_t clocki= clock ();
- cout << compti << " CLOCK " << (clocki-clock0)*1.e-6 << endl;
- for (int non_unif=0;non_unif<2;non_unif++)
- {
- // if (champ_recepteur._field)
- // delete [] champ_recepteur._field;
- champ_recepteur._field=0;
- // champ_recepteur._has_field_ownership=false;
-
-
-
- if (cas=="emetteur")
- if (non_unif)
- champ_emetteur._field[0]=40;
- bool ok=false; // Is the time interval successfully solved ?
-
- // Loop on the time interval tries
- if(1)
- {
- if (cas=="emetteur")
- dec_emetteur.attachLocalField((ICoCo::Field*) &champ_emetteur);
- else
- dec_emetteur.attachLocalField((ICoCo::Field*) &champ_recepteur);
-
- dec_emetteur.setNature(ConservativeVolumic);
-
- if(init)
- dec_emetteur.synchronize();
- init=false;
-
- if (cas=="emetteur")
- {
- dec_emetteur.sendData();
- affiche(champ_emetteur);
- }
- else if (cas=="recepteur")
- {
- dec_emetteur.recvData();
- affiche(champ_recepteur);
- }
- else
- throw 0;
- }
- stop=true;
- }
- }
-}
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include "ParaMEDMEMTest.hxx"
-#include <cppunit/TestAssert.h>
-
-#include "CommInterface.hxx"
-#include "ProcessorGroup.hxx"
-#include "MPIProcessorGroup.hxx"
-#include "Topology.hxx"
-#include "DEC.hxx"
-#include "MxN_Mapping.hxx"
-#include "InterpKernelDEC.hxx"
-#include "ParaMESH.hxx"
-#include "ParaFIELD.hxx"
-#include "ComponentTopology.hxx"
-#include "ICoCoMEDField.hxx"
-#include "MEDLoader.hxx"
-
-#include <string>
-#include <iterator>
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void ParaMEDMEMTest::testInterpKernelDEC_2D()
-{
- testInterpKernelDEC_2D_("P0","P0");
-}
-
-void ParaMEDMEMTest::testInterpKernelDEC2_2D()
-{
- testInterpKernelDEC2_2D_("P0","P0");
-}
-
-void ParaMEDMEMTest::testInterpKernelDEC_3D()
-{
- testInterpKernelDEC_3D_("P0","P0");
-}
-
-void ParaMEDMEMTest::testInterpKernelDEC_2DP0P1()
-{
- //testInterpKernelDEC_2D_("P0","P1");
-}
-
-/*
- * Check methods defined in InterpKernelDEC.hxx
- *
- InterpKernelDEC();
- InterpKernelDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group);
- virtual ~InterpKernelDEC();
- void synchronize();
- void recvData();
- void sendData();
-*/
-
-void ParaMEDMEMTest::testInterpKernelDEC_2D_(const char *srcMeth, const char *targetMeth)
-{
- std::string srcM(srcMeth);
- std::string targetM(targetMeth);
- int size;
- int rank;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
-
- //the test is meant to run on five processors
- if (size !=5) return ;
-
- int nproc_source = 3;
- set<int> self_procs;
- set<int> procs_source;
- set<int> procs_target;
-
- for (int i=0; i<nproc_source; i++)
- procs_source.insert(i);
- for (int i=nproc_source; i<size; i++)
- procs_target.insert(i);
- self_procs.insert(rank);
-
- ParaMEDMEM::CommInterface interface;
-
- ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
- ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
- ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
-
- //loading the geometry for the source group
-
- ParaMEDMEM::InterpKernelDEC dec (*source_group,*target_group);
-
- ParaMEDMEM::MEDCouplingUMesh* mesh;
- ParaMEDMEM::ParaMESH* paramesh;
- ParaMEDMEM::ParaFIELD* parafield;
- ICoCo::Field* icocofield ;
-
- string filename_xml1 = getResourceFile("square1_split");
- string filename_xml2 = getResourceFile("square2_split");
- //string filename_seq_wr = makeTmpFile("");
- //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med");
-
- // To remove tmp files from disk
- ParaMEDMEMTest_TmpFilesRemover aRemover;
-
- MPI_Barrier(MPI_COMM_WORLD);
- if (source_group->containsMyRank())
- {
- string master = filename_xml1;
-
- ostringstream strstream;
- strstream <<master<<rank+1<<".med";
- ostringstream meshname ;
- meshname<< "Mesh_2_"<< rank+1;
-
- mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
-
-
- paramesh=new ParaMESH (mesh,*source_group,"source mesh");
-
- // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT( support,*source_group);
- ParaMEDMEM::ComponentTopology comptopo;
- if(srcM=="P0")
- {
- parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
- parafield->getField()->setNature(ConservativeVolumic);
- }
- else
- parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
- int nb_local;
- if(srcM=="P0")
- nb_local=mesh->getNumberOfCells();
- else
- nb_local=mesh->getNumberOfNodes();
- // double * value= new double[nb_local];
- double *value=parafield->getField()->getArray()->getPointer();
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=1.0;
-
- // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
- icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
- dec.setMethod(srcMeth);
- dec.attachLocalField(icocofield);
- }
-
- //loading the geometry for the target group
- if (target_group->containsMyRank())
- {
- string master= filename_xml2;
- ostringstream strstream;
- strstream << master<<(rank-nproc_source+1)<<".med";
- ostringstream meshname ;
- meshname<< "Mesh_3_"<<rank-nproc_source+1;
- mesh = MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
-
- paramesh=new ParaMESH (mesh,*target_group,"target mesh");
- // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT(support,*target_group);
- ParaMEDMEM::ComponentTopology comptopo;
- if(targetM=="P0")
- {
- parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
- parafield->getField()->setNature(ConservativeVolumic);
- }
- else
- parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
- int nb_local;
- if(targetM=="P0")
- nb_local=mesh->getNumberOfCells();
- else
- nb_local=mesh->getNumberOfNodes();
- // double * value= new double[nb_local];
- double *value=parafield->getField()->getArray()->getPointer();
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=0.0;
- // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
- icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
- dec.setMethod(targetMeth);
- dec.attachLocalField(icocofield);
- }
-
-
- //attaching a DEC to the source group
- double field_before_int;
- double field_after_int;
-
- if (source_group->containsMyRank())
- {
- field_before_int = parafield->getVolumeIntegral(0,true);
- dec.synchronize();
- cout<<"DEC usage"<<endl;
- dec.setForcedRenormalization(false);
-
- dec.sendData();
- MEDLoader::writeParaMesh("./sourcesquareb",paramesh);
- if (source_group->myRank()==0)
- aRemover.Register("./sourcesquareb");
- ostringstream filename;
- filename<<"./sourcesquareb_"<<source_group->myRank()+1;
- aRemover.Register(filename.str().c_str());
- MEDLoader::writeParaField("./sourcesquareb","boundary",parafield);
-
- dec.recvData();
- cout <<"writing"<<endl;
- MEDLoader::writeParaMesh("./sourcesquare",paramesh);
- if (source_group->myRank()==0)
- aRemover.Register("./sourcesquare");
- MEDLoader::writeParaField("./sourcesquare","boundary",parafield);
-
-
- filename<<"./sourcesquare_"<<source_group->myRank()+1;
- aRemover.Register(filename.str().c_str());
- field_after_int = parafield->getVolumeIntegral(0,true);
-
-
- // MPI_Bcast(&field_before_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
- // MPI_Bcast(&field_after_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
-
- CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, 1e-6);
-
- }
-
- //attaching a DEC to the target group
- if (target_group->containsMyRank())
- {
- dec.synchronize();
- dec.setForcedRenormalization(false);
-
- dec.recvData();
- MEDLoader::writeParaMesh("./targetsquareb",paramesh);
- MEDLoader::writeParaField("./targetsquareb", "boundary",parafield);
- if (target_group->myRank()==0)
- aRemover.Register("./targetsquareb");
- ostringstream filename;
- filename<<"./targetsquareb_"<<target_group->myRank()+1;
- aRemover.Register(filename.str().c_str());
- dec.sendData();
- MEDLoader::writeParaMesh("./targetsquare",paramesh);
- MEDLoader::writeParaField("./targetsquare", "boundary",parafield);
-
- if (target_group->myRank()==0)
- aRemover.Register("./targetsquareb");
-
- filename<<"./targetsquareb_"<<target_group->myRank()+1;
- aRemover.Register(filename.str().c_str());
- // double field_before_int, field_after_int;
- // MPI_Bcast(&field_before_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
- // MPI_Bcast(&field_after_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
-
- // CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, 1e-6);
-
- }
-
- delete source_group;
- delete target_group;
- delete self_group;
- delete parafield;
- delete paramesh;
- mesh->decrRef();
-
- delete icocofield;
-
- MPI_Barrier(MPI_COMM_WORLD);
- cout << "end of InterpKernelDEC_2D test"<<endl;
-}
-
-void ParaMEDMEMTest::testInterpKernelDEC2_2D_(const char *srcMeth, const char *targetMeth)
-{
- std::string srcM(srcMeth);
- std::string targetM(targetMeth);
- int size;
- int rank;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
-
- //the test is meant to run on five processors
- if (size !=5) return ;
-
- int nproc_source = 3;
- set<int> self_procs;
- set<int> procs_source;
- set<int> procs_target;
-
- for (int i=0; i<nproc_source; i++)
- procs_source.insert(i);
- for (int i=nproc_source; i<size; i++)
- procs_target.insert(i);
- self_procs.insert(rank);
-
- ParaMEDMEM::CommInterface interface;
-
- ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
- ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
- ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
-
- //loading the geometry for the source group
-
- ParaMEDMEM::InterpKernelDEC dec (*source_group,*target_group);
-
- ParaMEDMEM::MEDCouplingUMesh* mesh;
- ParaMEDMEM::MEDCouplingFieldDouble* mcfield;
-
- string filename_xml1 = getResourceFile("square1_split");
- string filename_xml2 = getResourceFile("square2_split");
-
- // To remove tmp files from disk
- ParaMEDMEMTest_TmpFilesRemover aRemover;
-
- MPI_Barrier(MPI_COMM_WORLD);
- if (source_group->containsMyRank())
- {
- string master = filename_xml1;
-
- ostringstream strstream;
- strstream <<master<<rank+1<<".med";
- ostringstream meshname ;
- meshname<< "Mesh_2_"<< rank+1;
-
- mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
- ParaMEDMEM::ComponentTopology comptopo;
- if(srcM=="P0")
- {
- mcfield = MEDCouplingFieldDouble::New(ON_CELLS,NO_TIME);
- mcfield->setMesh(mesh);
- DataArrayDouble *array=DataArrayDouble::New();
- array->alloc(mcfield->getNumberOfTuples(),1);
- mcfield->setArray(array);
- array->decrRef();
- mcfield->setNature(ConservativeVolumic);
- }
- else
- {
- mcfield = MEDCouplingFieldDouble::New(ON_CELLS,NO_TIME);
- mcfield->setMesh(mesh);
- DataArrayDouble *array=DataArrayDouble::New();
- array->alloc(mcfield->getNumberOfTuples(),1);
- mcfield->setArray(array);
- array->decrRef();
- }
- int nb_local;
- if(srcM=="P0")
- nb_local=mesh->getNumberOfCells();
- else
- nb_local=mesh->getNumberOfNodes();
- double *value=mcfield->getArray()->getPointer();
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=1.0;
- dec.setMethod(srcMeth);
- dec.attachLocalField(mcfield);
- }
-
- //loading the geometry for the target group
- if (target_group->containsMyRank())
- {
- string master= filename_xml2;
- ostringstream strstream;
- strstream << master<<(rank-nproc_source+1)<<".med";
- ostringstream meshname ;
- meshname<< "Mesh_3_"<<rank-nproc_source+1;
- mesh = MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
- ParaMEDMEM::ComponentTopology comptopo;
- if(targetM=="P0")
- {
- mcfield = MEDCouplingFieldDouble::New(ON_CELLS,NO_TIME);
- mcfield->setMesh(mesh);
- DataArrayDouble *array=DataArrayDouble::New();
- array->alloc(mcfield->getNumberOfTuples(),1);
- mcfield->setArray(array);
- array->decrRef();
- mcfield->setNature(ConservativeVolumic);
- }
- else
- {
- mcfield = MEDCouplingFieldDouble::New(ON_NODES,NO_TIME);
- mcfield->setMesh(mesh);
- DataArrayDouble *array=DataArrayDouble::New();
- array->alloc(mcfield->getNumberOfTuples(),1);
- mcfield->setArray(array);
- array->decrRef();
- }
- int nb_local;
- if(targetM=="P0")
- nb_local=mesh->getNumberOfCells();
- else
- nb_local=mesh->getNumberOfNodes();
- double *value=mcfield->getArray()->getPointer();
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=0.0;
- dec.setMethod(targetMeth);
- dec.attachLocalField(mcfield);
- }
-
-
- //attaching a DEC to the source group
-
- if (source_group->containsMyRank())
- {
- dec.synchronize();
- dec.setForcedRenormalization(false);
- dec.sendData();
- dec.recvData();
- }
-
- //attaching a DEC to the target group
- if (target_group->containsMyRank())
- {
- dec.synchronize();
- dec.setForcedRenormalization(false);
- dec.recvData();
- dec.sendData();
- }
- delete source_group;
- delete target_group;
- delete self_group;
- mcfield->decrRef();
- mesh->decrRef();
-
- MPI_Barrier(MPI_COMM_WORLD);
- cout << "end of InterpKernelDEC2_2D test"<<endl;
-}
-
-void ParaMEDMEMTest::testInterpKernelDEC_3D_(const char *srcMeth, const char *targetMeth)
-{
- std::string srcM(srcMeth);
- std::string targetM(targetMeth);
- int size;
- int rank;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
-
- //the test is meant to run on five processors
- if (size !=3) return ;
-
- int nproc_source = 2;
- set<int> self_procs;
- set<int> procs_source;
- set<int> procs_target;
-
- for (int i=0; i<nproc_source; i++)
- procs_source.insert(i);
- for (int i=nproc_source; i<size; i++)
- procs_target.insert(i);
- self_procs.insert(rank);
-
- ParaMEDMEM::CommInterface interface;
-
- ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
- ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
- ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
-
- //loading the geometry for the source group
-
- ParaMEDMEM::InterpKernelDEC dec (*source_group,*target_group);
-
- ParaMEDMEM::MEDCouplingUMesh* mesh;
- ParaMEDMEM::ParaMESH* paramesh;
- ParaMEDMEM::ParaFIELD* parafield;
- ICoCo::Field* icocofield ;
-
- string tmp_dir = getenv("TMP");
- if (tmp_dir == "")
- tmp_dir = "/tmp";
- string filename_xml1 = getResourceFile("Mesh3D_10_2d");
- string filename_xml2 = getResourceFile("Mesh3D_11");
- //string filename_seq_wr = makeTmpFile("");
- //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med");
-
- // To remove tmp files from disk
- ParaMEDMEMTest_TmpFilesRemover aRemover;
-
- MPI_Barrier(MPI_COMM_WORLD);
- if (source_group->containsMyRank())
- {
- string master = filename_xml1;
-
- ostringstream strstream;
- strstream <<master<<rank+1<<".med";
- ostringstream meshname ;
- meshname<< "Mesh_3_"<< rank+1;
-
- mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
-
-
- paramesh=new ParaMESH (mesh,*source_group,"source mesh");
-
- // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT( support,*source_group);
- ParaMEDMEM::ComponentTopology comptopo;
- if(srcM=="P0")
- {
- parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
- parafield->getField()->setNature(ConservativeVolumic);
- }
- else
- parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
- int nb_local;
- if(srcM=="P0")
- nb_local=mesh->getNumberOfCells();
- else
- nb_local=mesh->getNumberOfNodes();
- // double * value= new double[nb_local];
- double *value=parafield->getField()->getArray()->getPointer();
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=1.0;
-
- // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
- icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
- dec.setMethod(srcMeth);
- dec.attachLocalField(icocofield);
- }
-
- //loading the geometry for the target group
- if (target_group->containsMyRank())
- {
- string master= filename_xml2;
- ostringstream strstream;
- strstream << master << ".med";
- ostringstream meshname ;
- meshname<< "Mesh_6";
- mesh = MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
-
- paramesh=new ParaMESH (mesh,*target_group,"target mesh");
- // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT(support,*target_group);
- ParaMEDMEM::ComponentTopology comptopo;
- if(targetM=="P0")
- {
- parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
- parafield->getField()->setNature(ConservativeVolumic);
- }
- else
- parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
- int nb_local;
- if(targetM=="P0")
- nb_local=mesh->getNumberOfCells();
- else
- nb_local=mesh->getNumberOfNodes();
- // double * value= new double[nb_local];
- double *value=parafield->getField()->getArray()->getPointer();
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=0.0;
- // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
- icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
- dec.setMethod(targetMeth);
- dec.attachLocalField(icocofield);
- }
- //attaching a DEC to the source group
- double field_before_int;
- double field_after_int;
-
- if (source_group->containsMyRank())
- {
- field_before_int = parafield->getVolumeIntegral(0,true);
- dec.synchronize();
- cout<<"DEC usage"<<endl;
- dec.setForcedRenormalization(false);
-
- dec.sendData();
- MEDLoader::writeParaMesh("./sourcesquareb",paramesh);
- if (source_group->myRank()==0)
- aRemover.Register("./sourcesquareb");
- ostringstream filename;
- filename<<"./sourcesquareb_"<<source_group->myRank()+1;
- aRemover.Register(filename.str().c_str());
- MEDLoader::writeParaField("./sourcesquareb","boundary",parafield);
-
- dec.recvData();
- cout <<"writing"<<endl;
- MEDLoader::writeParaMesh("./sourcesquare",paramesh);
- if (source_group->myRank()==0)
- aRemover.Register("./sourcesquare");
- MEDLoader::writeParaField("./sourcesquare","boundary",parafield);
-
-
- filename<<"./sourcesquare_"<<source_group->myRank()+1;
- aRemover.Register(filename.str().c_str());
- field_after_int = parafield->getVolumeIntegral(0,true);
-
- CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, 1e-6);
-
- }
-
- //attaching a DEC to the target group
- if (target_group->containsMyRank())
- {
- dec.synchronize();
- dec.setForcedRenormalization(false);
-
- dec.recvData();
- MEDLoader::writeParaMesh("./targetsquareb",paramesh);
- MEDLoader::writeParaField("./targetsquareb", "boundary",parafield);
- if (target_group->myRank()==0)
- aRemover.Register("./targetsquareb");
- ostringstream filename;
- filename<<"./targetsquareb_"<<target_group->myRank()+1;
- aRemover.Register(filename.str().c_str());
- dec.sendData();
- MEDLoader::writeParaMesh("./targetsquare",paramesh);
- MEDLoader::writeParaField("./targetsquare", "boundary",parafield);
-
- if (target_group->myRank()==0)
- aRemover.Register("./targetsquareb");
-
- filename<<"./targetsquareb_"<<target_group->myRank()+1;
- aRemover.Register(filename.str().c_str());
- }
- delete source_group;
- delete target_group;
- delete self_group;
- delete parafield;
- delete paramesh;
- mesh->decrRef();
-
- delete icocofield;
-
- MPI_Barrier(MPI_COMM_WORLD);
- cout << "end of InterpKernelDEC_3D test"<<endl;
-}
-
-//Synchronous tests without interpolation with native mode (AllToAll(v) from lam/MPI:
-void ParaMEDMEMTest::testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.1,1,0.1,1,false,false,false,"P0","P0");
-}
-
-//Synchronous tests without interpolation :
-void ParaMEDMEMTest::testSynchronousEqualInterpKernelWithoutInterpDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.1,1,0.1,1,true,false,false,"P0","P0");
-}
-
-//Synchronous tests with interpolation :
-void ParaMEDMEMTest::testSynchronousEqualInterpKernelDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.1,1,0.1,1,true,false,true,"P0","P0");
-}
-void ParaMEDMEMTest::testSynchronousFasterSourceInterpKernelDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.09,1,0.1,1,true,false,true,"P0","P0");
-}
-void ParaMEDMEMTest::testSynchronousSlowerSourceInterpKernelDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.11,1,0.1,1,true,false,true,"P0","P0");
-}
-void ParaMEDMEMTest::testSynchronousSlowSourceInterpKernelDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.11,1,0.01,1,true,false,true,"P0","P0");
-}
-void ParaMEDMEMTest::testSynchronousFastSourceInterpKernelDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.01,1,0.11,1,true,false,true,"P0","P0");
-}
-
-//Asynchronous tests with interpolation :
-void ParaMEDMEMTest::testAsynchronousEqualInterpKernelDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.1,1,0.1,1,true,true,true,"P0","P0");
-}
-void ParaMEDMEMTest::testAsynchronousFasterSourceInterpKernelDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.09,1,0.1,1,true,true,true,"P0","P0");
-}
-void ParaMEDMEMTest::testAsynchronousSlowerSourceInterpKernelDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.11,1,0.1,1,true,true,true,"P0","P0");
-}
-void ParaMEDMEMTest::testAsynchronousSlowSourceInterpKernelDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.11,1,0.01,1,true,true,true,"P0","P0");
-}
-void ParaMEDMEMTest::testAsynchronousFastSourceInterpKernelDEC_2D()
-{
- testAsynchronousInterpKernelDEC_2D(0.01,1,0.11,1,true,true,true,"P0","P0");
-}
-
-void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P0()
-{
- //
- const double sourceCoordsAll[2][8]={{0.4,0.5,0.4,1.5,1.6,1.5,1.6,0.5},
- {0.3,-0.5,1.6,-0.5,1.6,-1.5,0.3,-1.5}};
- const double targetCoordsAll[3][16]={{0.7,1.45,0.7,1.65,0.9,1.65,0.9,1.45, 1.1,1.4,1.1,1.6,1.3,1.6,1.3,1.4},
- {0.7,-0.6,0.7,0.7,0.9,0.7,0.9,-0.6, 1.1,-0.7,1.1,0.6,1.3,0.6,1.3,-0.7},
- {0.7,-1.55,0.7,-1.35,0.9,-1.35,0.9,-1.55, 1.1,-1.65,1.1,-1.45,1.3,-1.45,1.3,-1.65}};
- int conn4All[8]={0,1,2,3,4,5,6,7};
- double targetResults[3][2]={{34.,34.},{38.333333333333336,42.666666666666664},{47.,47.}};
- double targetResults2[3][2]={{0.28333333333333344,0.56666666666666687},{1.8564102564102569,2.0128205128205132},{1.0846153846153845,0.36153846153846159}};
- double targetResults3[3][2]={{3.7777777777777781,7.5555555555555562},{24.511111111111113,26.355555555555558},{14.1,4.7}};
- //
- int size;
- int rank;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
- //
- if(size!=5)
- return ;
- int nproc_source = 2;
- set<int> self_procs;
- set<int> procs_source;
- set<int> procs_target;
-
- for (int i=0; i<nproc_source; i++)
- procs_source.insert(i);
- for (int i=nproc_source; i<size; i++)
- procs_target.insert(i);
- self_procs.insert(rank);
- //
- ParaMEDMEM::MEDCouplingUMesh *mesh=0;
- ParaMEDMEM::ParaMESH *paramesh=0;
- ParaMEDMEM::ParaFIELD* parafield=0;
- //
- ParaMEDMEM::CommInterface interface;
- //
- ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
- ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
- ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
- //
- MPI_Barrier(MPI_COMM_WORLD);
- if(source_group->containsMyRank())
- {
- std::ostringstream stream; stream << "sourcemesh2D proc " << rank;
- mesh=MEDCouplingUMesh::New(stream.str().c_str(),2);
- mesh->allocateCells(2);
- mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All);
- mesh->finishInsertingCells();
- DataArrayDouble *myCoords=DataArrayDouble::New();
- myCoords->alloc(4,2);
- const double *sourceCoords=sourceCoordsAll[rank];
- std::copy(sourceCoords,sourceCoords+8,myCoords->getPointer());
- mesh->setCoords(myCoords);
- myCoords->decrRef();
- paramesh=new ParaMESH(mesh,*source_group,"source mesh");
- ParaMEDMEM::ComponentTopology comptopo;
- parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
- double *value=parafield->getField()->getArray()->getPointer();
- value[0]=34+13*((double)rank);
- }
- else
- {
- std::ostringstream stream; stream << "targetmesh2D proc " << rank-nproc_source;
- mesh=MEDCouplingUMesh::New(stream.str().c_str(),2);
- mesh->allocateCells(2);
- mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All);
- mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All+4);
- mesh->finishInsertingCells();
- DataArrayDouble *myCoords=DataArrayDouble::New();
- myCoords->alloc(8,2);
- const double *targetCoords=targetCoordsAll[rank-nproc_source];
- std::copy(targetCoords,targetCoords+16,myCoords->getPointer());
- mesh->setCoords(myCoords);
- myCoords->decrRef();
- paramesh=new ParaMESH (mesh,*target_group,"target mesh");
- ParaMEDMEM::ComponentTopology comptopo;
- parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
- }
- //test 1 - Conservative volumic
- ParaMEDMEM::InterpKernelDEC dec(*source_group,*target_group);
- parafield->getField()->setNature(ConservativeVolumic);
- if (source_group->containsMyRank())
- {
- dec.setMethod("P0");
- dec.attachLocalField(parafield);
- dec.synchronize();
- dec.setForcedRenormalization(false);
- dec.sendData();
- }
- else
- {
- dec.setMethod("P0");
- dec.attachLocalField(parafield);
- dec.synchronize();
- dec.setForcedRenormalization(false);
- dec.recvData();
- const double *res=parafield->getField()->getArray()->getConstPointer();
- const double *expected=targetResults[rank-nproc_source];
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13);
- }
- //test 2 - Integral
- ParaMEDMEM::InterpKernelDEC dec2(*source_group,*target_group);
- parafield->getField()->setNature(Integral);
- if (source_group->containsMyRank())
- {
- dec2.setMethod("P0");
- dec2.attachLocalField(parafield);
- dec2.synchronize();
- dec2.setForcedRenormalization(false);
- dec2.sendData();
- }
- else
- {
- dec2.setMethod("P0");
- dec2.attachLocalField(parafield);
- dec2.synchronize();
- dec2.setForcedRenormalization(false);
- dec2.recvData();
- const double *res=parafield->getField()->getArray()->getConstPointer();
- const double *expected=targetResults2[rank-nproc_source];
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13);
- }
- //test 3 - Integral with global constraint
- ParaMEDMEM::InterpKernelDEC dec3(*source_group,*target_group);
- parafield->getField()->setNature(IntegralGlobConstraint);
- if (source_group->containsMyRank())
- {
- dec3.setMethod("P0");
- dec3.attachLocalField(parafield);
- dec3.synchronize();
- dec3.setForcedRenormalization(false);
- dec3.sendData();
- }
- else
- {
- dec3.setMethod("P0");
- dec3.attachLocalField(parafield);
- dec3.synchronize();
- dec3.setForcedRenormalization(false);
- dec3.recvData();
- const double *res=parafield->getField()->getArray()->getConstPointer();
- const double *expected=targetResults3[rank-nproc_source];
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13);
- }
- //test 4 - Conservative volumic reversed
- ParaMEDMEM::InterpKernelDEC dec4(*source_group,*target_group);
- parafield->getField()->setNature(ConservativeVolumic);
- if (source_group->containsMyRank())
- {
- dec4.setMethod("P0");
- dec4.attachLocalField(parafield);
- dec4.synchronize();
- dec4.setForcedRenormalization(false);
- dec4.recvData();
- const double *res=parafield->getField()->getArray()->getConstPointer();
- CPPUNIT_ASSERT_EQUAL(1,parafield->getField()->getNumberOfTuples());
- const double expected[]={37.8518518518519,43.5333333333333};
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[rank],res[0],1e-13);
- }
- else
- {
- dec4.setMethod("P0");
- dec4.attachLocalField(parafield);
- dec4.synchronize();
- dec4.setForcedRenormalization(false);
- double *res=parafield->getField()->getArray()->getPointer();
- const double *toSet=targetResults[rank-nproc_source];
- res[0]=toSet[0];
- res[1]=toSet[1];
- dec4.sendData();
- }
- //test 5 - Integral reversed
- ParaMEDMEM::InterpKernelDEC dec5(*source_group,*target_group);
- parafield->getField()->setNature(Integral);
- if (source_group->containsMyRank())
- {
- dec5.setMethod("P0");
- dec5.attachLocalField(parafield);
- dec5.synchronize();
- dec5.setForcedRenormalization(false);
- dec5.recvData();
- const double *res=parafield->getField()->getArray()->getConstPointer();
- CPPUNIT_ASSERT_EQUAL(1,parafield->getField()->getNumberOfTuples());
- const double expected[]={0.794600591715977,1.35631163708087};
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[rank],res[0],1e-13);
- }
- else
- {
- dec5.setMethod("P0");
- dec5.attachLocalField(parafield);
- dec5.synchronize();
- dec5.setForcedRenormalization(false);
- double *res=parafield->getField()->getArray()->getPointer();
- const double *toSet=targetResults2[rank-nproc_source];
- res[0]=toSet[0];
- res[1]=toSet[1];
- dec5.sendData();
- }
- //test 6 - Integral with global constraint reversed
- ParaMEDMEM::InterpKernelDEC dec6(*source_group,*target_group);
- parafield->getField()->setNature(IntegralGlobConstraint);
- if (source_group->containsMyRank())
- {
- dec6.setMethod("P0");
- dec6.attachLocalField(parafield);
- dec6.synchronize();
- dec6.setForcedRenormalization(false);
- dec6.recvData();
- const double *res=parafield->getField()->getArray()->getConstPointer();
- CPPUNIT_ASSERT_EQUAL(1,parafield->getField()->getNumberOfTuples());
- const double expected[]={36.4592592592593,44.5407407407407};
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[rank],res[0],1e-13);
- }
- else
- {
- dec6.setMethod("P0");
- dec6.attachLocalField(parafield);
- dec6.synchronize();
- dec6.setForcedRenormalization(false);
- double *res=parafield->getField()->getArray()->getPointer();
- const double *toSet=targetResults3[rank-nproc_source];
- res[0]=toSet[0];
- res[1]=toSet[1];
- dec6.sendData();
- }
- //
- delete parafield;
- mesh->decrRef();
- delete paramesh;
- delete self_group;
- delete target_group;
- delete source_group;
- //
- MPI_Barrier(MPI_COMM_WORLD);
-}
-
-void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P1P1P0()
-{
- int size;
- int rank;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
- //
- if(size!=5)
- return ;
- int nproc_source = 2;
- set<int> self_procs;
- set<int> procs_source;
- set<int> procs_target;
-
- for (int i=0; i<nproc_source; i++)
- procs_source.insert(i);
- for (int i=nproc_source; i<size; i++)
- procs_target.insert(i);
- self_procs.insert(rank);
- //
- ParaMEDMEM::MEDCouplingUMesh *mesh=0;
- ParaMEDMEM::ParaMESH *paramesh=0;
- ParaMEDMEM::ParaFIELD *parafieldP0=0,*parafieldP1=0;
- //
- ParaMEDMEM::CommInterface interface;
- //
- ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
- ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
- ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
- //
- MPI_Barrier(MPI_COMM_WORLD);
- if(source_group->containsMyRank())
- {
- if(rank==0)
- {
- double coords[6]={-0.3,-0.3, 0.7,0.7, 0.7,-0.3};
- int conn[3]={0,1,2};
- //int globalNode[3]={1,2,0};
- mesh=MEDCouplingUMesh::New("Source mesh Proc0",2);
- mesh->allocateCells(1);
- mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn);
- mesh->finishInsertingCells();
- DataArrayDouble *myCoords=DataArrayDouble::New();
- myCoords->alloc(3,2);
- std::copy(coords,coords+6,myCoords->getPointer());
- mesh->setCoords(myCoords);
- myCoords->decrRef();
- }
- if(rank==1)
- {
- double coords[6]={-0.3,-0.3, -0.3,0.7, 0.7,0.7};
- int conn[3]={0,1,2};
- //int globalNode[3]={1,3,2};
- mesh=MEDCouplingUMesh::New("Source mesh Proc1",2);
- mesh->allocateCells(1);
- mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn);
- mesh->finishInsertingCells();
- DataArrayDouble *myCoords=DataArrayDouble::New();
- myCoords->alloc(3,2);
- std::copy(coords,coords+6,myCoords->getPointer());
- mesh->setCoords(myCoords);
- myCoords->decrRef();
- }
- paramesh=new ParaMESH(mesh,*source_group,"source mesh");
- ParaMEDMEM::ComponentTopology comptopo;
- parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
- parafieldP1 = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
- double *valueP0=parafieldP0->getField()->getArray()->getPointer();
- double *valueP1=parafieldP1->getField()->getArray()->getPointer();
- parafieldP0->getField()->setNature(ConservativeVolumic);
- parafieldP1->getField()->setNature(ConservativeVolumic);
- if(rank==0)
- {
- valueP0[0]=31.;
- valueP1[0]=34.; valueP1[1]=77.; valueP1[2]=53.;
- }
- if(rank==1)
- {
- valueP0[0]=47.;
- valueP1[0]=34.; valueP1[1]=57.; valueP1[2]=77.;
- }
- }
- else
- {
- const char targetMeshName[]="target mesh";
- if(rank==2)
- {
- double coords[10]={-0.3,-0.3, 0.2,-0.3, 0.7,-0.3, -0.3,0.2, 0.2,0.2 };
- int conn[7]={0,3,4,1, 1,4,2};
- //int globalNode[5]={4,3,0,2,1};
- mesh=MEDCouplingUMesh::New("Target mesh Proc2",2);
- mesh->allocateCells(2);
- mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn);
- mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn+4);
- mesh->finishInsertingCells();
- DataArrayDouble *myCoords=DataArrayDouble::New();
- myCoords->alloc(5,2);
- std::copy(coords,coords+10,myCoords->getPointer());
- mesh->setCoords(myCoords);
- myCoords->decrRef();
- paramesh=new ParaMESH(mesh,*target_group,targetMeshName);
- DataArrayInt *da=DataArrayInt::New();
- const int globalNumberingP2[5]={0,1,2,3,4};
- da->useArray(globalNumberingP2,false,CPP_DEALLOC,5,1);
- paramesh->setNodeGlobal(da);
- da->decrRef();
- }
- if(rank==3)
- {
- double coords[6]={0.2,0.2, 0.7,-0.3, 0.7,0.2};
- int conn[3]={0,2,1};
- //int globalNode[3]={1,0,5};
- mesh=MEDCouplingUMesh::New("Target mesh Proc3",2);
- mesh->allocateCells(1);
- mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn);
- mesh->finishInsertingCells();
- DataArrayDouble *myCoords=DataArrayDouble::New();
- myCoords->alloc(3,2);
- std::copy(coords,coords+6,myCoords->getPointer());
- mesh->setCoords(myCoords);
- myCoords->decrRef();
- paramesh=new ParaMESH(mesh,*target_group,targetMeshName);
- DataArrayInt *da=DataArrayInt::New();
- const int globalNumberingP3[3]={4,2,5};
- da->useArray(globalNumberingP3,false,CPP_DEALLOC,3,1);
- paramesh->setNodeGlobal(da);
- da->decrRef();
- }
- if(rank==4)
- {
- double coords[12]={-0.3,0.2, -0.3,0.7, 0.2,0.7, 0.2,0.2, 0.7,0.7, 0.7,0.2};
- int conn[8]={0,1,2,3, 3,2,4,5};
- //int globalNode[6]={2,6,7,1,8,5};
- mesh=MEDCouplingUMesh::New("Target mesh Proc4",2);
- mesh->allocateCells(2);
- mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn);
- mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn+4);
- mesh->finishInsertingCells();
- DataArrayDouble *myCoords=DataArrayDouble::New();
- myCoords->alloc(6,2);
- std::copy(coords,coords+12,myCoords->getPointer());
- mesh->setCoords(myCoords);
- myCoords->decrRef();
- paramesh=new ParaMESH(mesh,*target_group,targetMeshName);
- DataArrayInt *da=DataArrayInt::New();
- const int globalNumberingP4[6]={3,6,7,4,8,5};
- da->useArray(globalNumberingP4,false,CPP_DEALLOC,6,1);
- paramesh->setNodeGlobal(da);
- da->decrRef();
- }
- ParaMEDMEM::ComponentTopology comptopo;
- parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
- parafieldP1 = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
- parafieldP0->getField()->setNature(ConservativeVolumic);
- parafieldP1->getField()->setNature(ConservativeVolumic);
- }
- // test 1 - P0 P1
- ParaMEDMEM::InterpKernelDEC dec(*source_group,*target_group);
- if (source_group->containsMyRank())
- {
- dec.setMethod("P0");
- dec.attachLocalField(parafieldP0);
- dec.synchronize();
- dec.setForcedRenormalization(false);
- dec.sendData();
- dec.recvData();
- const double *valueP0=parafieldP0->getField()->getArray()->getPointer();
- if(rank==0)
- {
- CPPUNIT_ASSERT_DOUBLES_EQUAL(34.42857143,valueP0[0],1e-7);
- }
- if(rank==1)
- {
- CPPUNIT_ASSERT_DOUBLES_EQUAL(44.,valueP0[0],1e-7);
- }
- }
- else
- {
- dec.setMethod("P1");
- dec.attachLocalField(parafieldP1);
- dec.synchronize();
- dec.setForcedRenormalization(false);
- dec.recvData();
- const double *res=parafieldP1->getField()->getArray()->getConstPointer();
- if(rank==2)
- {
- const double expectP2[5]={39.0, 31.0, 31.0, 47.0, 39.0};
- CPPUNIT_ASSERT_EQUAL(5,parafieldP1->getField()->getNumberOfTuples());
- CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents());
- for(int kk=0;kk<5;kk++)
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP2[kk],res[kk],1e-12);
- }
- if(rank==3)
- {
- const double expectP3[3]={39.0, 31.0, 31.0};
- CPPUNIT_ASSERT_EQUAL(3,parafieldP1->getField()->getNumberOfTuples());
- CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents());
- for(int kk=0;kk<3;kk++)
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP3[kk],res[kk],1e-12);
- }
- if(rank==4)
- {
- const double expectP4[6]={47.0, 47.0, 47.0, 39.0, 39.0, 31.0};
- CPPUNIT_ASSERT_EQUAL(6,parafieldP1->getField()->getNumberOfTuples());
- CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents());
- for(int kk=0;kk<6;kk++)
- CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP4[kk],res[kk],1e-12);
- }
- dec.sendData();
- }
- //
- delete parafieldP0;
- delete parafieldP1;
- mesh->decrRef();
- delete paramesh;
- delete self_group;
- delete target_group;
- delete source_group;
- //
- MPI_Barrier(MPI_COMM_WORLD);
-}
-
-/*!
- * Tests an asynchronous exchange between two codes
- * one sends data with dtA as an interval, the max time being tmaxA
- * the other one receives with dtB as an interval, the max time being tmaxB
- */
-void ParaMEDMEMTest::testAsynchronousInterpKernelDEC_2D(double dtA, double tmaxA,
- double dtB, double tmaxB, bool WithPointToPoint, bool Asynchronous,
- bool WithInterp, const char *srcMeth, const char *targetMeth)
-{
- std::string srcM(srcMeth);
- std::string targetM(targetMeth);
- int size;
- int rank;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
-
- //the test is meant to run on five processors
- if (size !=5) return ;
-
- int nproc_source = 3;
- set<int> self_procs;
- set<int> procs_source;
- set<int> procs_target;
-
- for (int i=0; i<nproc_source; i++)
- procs_source.insert(i);
- for (int i=nproc_source; i<size; i++)
- procs_target.insert(i);
- self_procs.insert(rank);
-
- ParaMEDMEM::CommInterface interface;
-
- ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
- ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
- ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
-
- //loading the geometry for the source group
-
- ParaMEDMEM::InterpKernelDEC dec (*source_group,*target_group);
-
- ParaMEDMEM::MEDCouplingUMesh* mesh;
- ParaMEDMEM::ParaMESH* paramesh;
- ParaMEDMEM::ParaFIELD* parafield;
-
- ICoCo::Field* icocofield ;
-
- string tmp_dir = getenv("TMP");
- if (tmp_dir == "")
- tmp_dir = "/tmp";
- string filename_xml1 = getResourceFile("square1_split");
- string filename_xml2 = getResourceFile("square2_split");
- //string filename_seq_wr = makeTmpFile("");
- //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med");
-
- // To remove tmp files from disk
- ParaMEDMEMTest_TmpFilesRemover aRemover;
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- if (source_group->containsMyRank())
- {
- string master = filename_xml1;
-
- ostringstream strstream;
- strstream <<master<<rank+1<<".med";
- ostringstream meshname ;
- meshname<< "Mesh_2_"<< rank+1;
-
- mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
-
- paramesh=new ParaMESH (mesh,*source_group,"source mesh");
-
- // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT( support,*source_group);
- ParaMEDMEM::ComponentTopology comptopo;
- if(srcM=="P0")
- {
- parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
- parafield->getField()->setNature(ConservativeVolumic);//InvertIntegral);//ConservativeVolumic);
- }
- else
- parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
-
- int nb_local;
- if(srcM=="P0")
- nb_local=mesh->getNumberOfCells();
- else
- nb_local=mesh->getNumberOfNodes();
- // double * value= new double[nb_local];
- double *value=parafield->getField()->getArray()->getPointer();
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=0.0;
-
- // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
- icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
-
- dec.attachLocalField(icocofield);
-
-
- }
-
- //loading the geometry for the target group
- if (target_group->containsMyRank())
- {
- string master= filename_xml2;
- ostringstream strstream;
- strstream << master<<(rank-nproc_source+1)<<".med";
- ostringstream meshname ;
- meshname<< "Mesh_3_"<<rank-nproc_source+1;
-
- mesh = MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
-
- paramesh=new ParaMESH (mesh,*target_group,"target mesh");
- // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT(support,*target_group);
- ParaMEDMEM::ComponentTopology comptopo;
- if(targetM=="P0")
- {
- parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
- parafield->getField()->setNature(ConservativeVolumic);//InvertIntegral);//ConservativeVolumic);
- }
- else
- parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
-
- int nb_local;
- if(targetM=="P0")
- nb_local=mesh->getNumberOfCells();
- else
- nb_local=mesh->getNumberOfNodes();
-
- double *value=parafield->getField()->getArray()->getPointer();
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=0.0;
- // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
- icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
-
- dec.attachLocalField(icocofield);
- }
-
-
- //attaching a DEC to the source group
-
- if (source_group->containsMyRank())
- {
- cout<<"DEC usage"<<endl;
- dec.setAsynchronous(Asynchronous);
- if ( WithInterp ) {
- dec.setTimeInterpolationMethod(LinearTimeInterp);
- }
- if ( WithPointToPoint ) {
- dec.setAllToAllMethod(PointToPoint);
- }
- else {
- dec.setAllToAllMethod(Native);
- }
- dec.synchronize();
- dec.setForcedRenormalization(false);
- for (double time=0; time<tmaxA+1e-10; time+=dtA)
- {
- cout << "testAsynchronousInterpKernelDEC_2D" << rank << " time " << time
- << " dtA " << dtA << " tmaxA " << tmaxA << endl ;
- if ( time+dtA < tmaxA+1e-7 ) {
- dec.sendData( time , dtA );
- }
- else {
- dec.sendData( time , 0 );
- }
- double* value = parafield->getField()->getArray()->getPointer();
- int nb_local=parafield->getField()->getMesh()->getNumberOfCells();
- for (int i=0; i<nb_local;i++)
- value[i]= time+dtA;
-
-
- }
- }
-
- //attaching a DEC to the target group
- if (target_group->containsMyRank())
- {
- cout<<"DEC usage"<<endl;
- dec.setAsynchronous(Asynchronous);
- if ( WithInterp ) {
- dec.setTimeInterpolationMethod(LinearTimeInterp);
- }
- if ( WithPointToPoint ) {
- dec.setAllToAllMethod(PointToPoint);
- }
- else {
- dec.setAllToAllMethod(Native);
- }
- dec.synchronize();
- dec.setForcedRenormalization(false);
- vector<double> times;
- for (double time=0; time<tmaxB+1e-10; time+=dtB)
- {
- cout << "testAsynchronousInterpKernelDEC_2D" << rank << " time " << time
- << " dtB " << dtB << " tmaxB " << tmaxB << endl ;
- dec.recvData( time );
- double vi = parafield->getVolumeIntegral(0,true);
- cout << "testAsynchronousInterpKernelDEC_2D" << rank << " time " << time
- << " VolumeIntegral " << vi
- << " time*10000 " << time*10000 << endl ;
-
- CPPUNIT_ASSERT_DOUBLES_EQUAL(vi,time*10000,0.001);
- }
-
- }
-
- delete source_group;
- delete target_group;
- delete self_group;
- delete parafield ;
- delete paramesh ;
- mesh->decrRef() ;
- delete icocofield ;
-
- cout << "testAsynchronousInterpKernelDEC_2D" << rank << " MPI_Barrier " << endl ;
-
- if (Asynchronous) MPI_Barrier(MPI_COMM_WORLD);
- cout << "end of InterpKernelDEC_2D test"<<endl;
-}
+++ /dev/null
-#include "ParaMEDMEMTest.hxx"
-#include <cppunit/TestAssert.h>
-#include "MEDLoader.hxx"
-#include "MEDCouplingUMesh.hxx"
-#include "MEDCouplingFieldDouble.hxx"
-
-#include <algorithm>
-#include <numeric>
-#include <iostream>
-#include <iterator>
-
-using namespace std;
-using namespace INTERP_KERNEL;
-using namespace ParaMEDMEM;
-
-void ParaMEDMEMTest::testMEDLoaderRead1()
-{
- string fileName=getResourceFile("pointe_import22.med");
- vector<string> meshNames=MEDLoader::GetMeshNames(fileName.c_str());
- CPPUNIT_ASSERT_EQUAL(1,(int)meshNames.size());
- MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),0);
- CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(16,mesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(3,(int)mesh->getAllTypes().size());
- for(int i=0;i<12;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(i));
- for(int i=12;i<14;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,mesh->getTypeOfCell(i));
- for(int i=14;i<16;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,mesh->getTypeOfCell(i));
- CPPUNIT_ASSERT_EQUAL(90,mesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(701,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+90,0));
- CPPUNIT_ASSERT_EQUAL(711,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+17,0));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12);
- mesh->decrRef();
- //
- vector<string> families=MEDLoader::GetMeshFamilyNames(fileName.c_str(),meshNames[0].c_str());
- CPPUNIT_ASSERT_EQUAL(8,(int)families.size());
- CPPUNIT_ASSERT(families[2]=="FAMILLE_ELEMENT_3");
- //
- vector<string> families2;
- families2.push_back(families[2]);
- mesh=MEDLoader::ReadUMeshFromFamilies(fileName.c_str(),meshNames[0].c_str(),0,families2);
- CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(2,mesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllTypes().size());
- CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(0));
- CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,mesh->getTypeOfCell(1));
- CPPUNIT_ASSERT_EQUAL(11,mesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(132,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+11,0));
- CPPUNIT_ASSERT_EQUAL(16,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+3,0));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12);
- mesh->decrRef();
- //
- vector<string> groups=MEDLoader::GetMeshGroupsNames(fileName.c_str(),meshNames[0].c_str());
- CPPUNIT_ASSERT_EQUAL(5,(int)groups.size());
- CPPUNIT_ASSERT(groups[0]=="groupe1");
- CPPUNIT_ASSERT(groups[1]=="groupe2");
- CPPUNIT_ASSERT(groups[2]=="groupe3");
- CPPUNIT_ASSERT(groups[3]=="groupe4");
- CPPUNIT_ASSERT(groups[4]=="groupe5");
- vector<string> groups2;
- groups2.push_back(groups[0]);
- mesh=MEDLoader::ReadUMeshFromGroups(fileName.c_str(),meshNames[0].c_str(),0,groups2);
- CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(7,mesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllTypes().size());
- for(int i=0;i<6;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(i));
- CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,mesh->getTypeOfCell(6));
- CPPUNIT_ASSERT_EQUAL(36,mesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(254,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+36,0));
- CPPUNIT_ASSERT_EQUAL(141,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+8,0));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12);
- mesh->decrRef();
- //
- std::vector<std::string> fieldsName=MEDLoader::GetCellFieldNamesOnMesh(fileName.c_str(),meshNames[0].c_str());
- CPPUNIT_ASSERT_EQUAL(2,(int)fieldsName.size());
- CPPUNIT_ASSERT(fieldsName[0]=="fieldcelldoublescalar");
- CPPUNIT_ASSERT(fieldsName[1]=="fieldcelldoublevector");
- std::vector<std::pair<int,int> > its0=MEDLoader::GetCellFieldIterations(fileName.c_str(),fieldsName[0].c_str());
- CPPUNIT_ASSERT_EQUAL(1,(int)its0.size());
- CPPUNIT_ASSERT_EQUAL(-1,its0[0].first);
- CPPUNIT_ASSERT_EQUAL(-1,its0[0].second);
- std::vector<std::pair<int,int> > its1=MEDLoader::GetCellFieldIterations(fileName.c_str(),fieldsName[1].c_str());
- CPPUNIT_ASSERT_EQUAL(1,(int)its1.size());
- CPPUNIT_ASSERT_EQUAL(-1,its1[0].first);
- CPPUNIT_ASSERT_EQUAL(-1,its1[0].second);
- //
- MEDCouplingFieldDouble *field0=MEDLoader::ReadFieldDoubleCell(fileName.c_str(),meshNames[0].c_str(),0,fieldsName[0].c_str(),its0[0].first,its0[0].second);
- field0->checkCoherency();
- CPPUNIT_ASSERT(field0->getName()==fieldsName[0]);
- CPPUNIT_ASSERT_EQUAL(1,field0->getNumberOfComponents());
- CPPUNIT_ASSERT_EQUAL(16,field0->getNumberOfTuples());
- const double expectedValues[16]={1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,3.,3.,2.,2.};
- double diffValue[16];
- std::transform(field0->getArray()->getPointer(),field0->getArray()->getPointer()+16,expectedValues,diffValue,std::minus<double>());
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue,diffValue+16),1e-12);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue,diffValue+16),1e-12);
- const MEDCouplingUMesh *constMesh=dynamic_cast<const MEDCouplingUMesh *>(field0->getMesh());
- CPPUNIT_ASSERT(constMesh);
- CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllTypes().size());
- for(int i=0;i<12;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i));
- for(int i=12;i<14;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(i));
- for(int i=14;i<16;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(i));
- CPPUNIT_ASSERT_EQUAL(90,constMesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getPointer(),constMesh->getNodalConnectivity()->getPointer()+90,0));
- CPPUNIT_ASSERT_EQUAL(711,std::accumulate(constMesh->getNodalConnectivityIndex()->getPointer(),constMesh->getNodalConnectivityIndex()->getPointer()+17,0));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+57,0),1e-12);
- field0->decrRef();
- //
- MEDCouplingFieldDouble *field1=MEDLoader::ReadFieldDoubleCell(fileName.c_str(),meshNames[0].c_str(),0,fieldsName[1].c_str(),its1[0].first,its1[0].second);
- field1->checkCoherency();
- CPPUNIT_ASSERT(field1->getName()==fieldsName[1]);
- CPPUNIT_ASSERT_EQUAL(3,field1->getNumberOfComponents());
- CPPUNIT_ASSERT_EQUAL(16,field1->getNumberOfTuples());
- const double expectedValues2[48]={1.,0.,1.,1.,0.,1.,1.,0.,1.,2.,1.,0.,2.,1.,0.,2.,1.,0.,3.,0.,1.,3.,0.,1.,3.,0.,1.,4.,1.,0.,4.,1.,0.,4.,1.,0.,6.,1.,1.,6.,0.,0.,5.,0.,0.,5.,1.,1.};
- double diffValue2[48];
- std::transform(field1->getArray()->getPointer(),field1->getArray()->getPointer()+48,expectedValues2,diffValue2,std::minus<double>());
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue2,diffValue2+48),1e-12);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue2,diffValue2+48),1e-12);
- constMesh=dynamic_cast<const MEDCouplingUMesh *>(field1->getMesh());
- CPPUNIT_ASSERT(constMesh);
- CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllTypes().size());
- for(int i=0;i<12;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i));
- for(int i=12;i<14;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(i));
- for(int i=14;i<16;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(i));
- CPPUNIT_ASSERT_EQUAL(90,constMesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getPointer(),constMesh->getNodalConnectivity()->getPointer()+90,0));
- CPPUNIT_ASSERT_EQUAL(711,std::accumulate(constMesh->getNodalConnectivityIndex()->getPointer(),constMesh->getNodalConnectivityIndex()->getPointer()+17,0));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+57,0),1e-12);
- field1->decrRef();
- //fields on nodes
- std::vector<std::string> fieldsNameNode=MEDLoader::GetNodeFieldNamesOnMesh(fileName.c_str(),meshNames[0].c_str());
- CPPUNIT_ASSERT_EQUAL(2,(int)fieldsNameNode.size());
- CPPUNIT_ASSERT(fieldsNameNode[0]=="fieldnodedouble");
- CPPUNIT_ASSERT(fieldsNameNode[1]=="fieldnodeint");
- std::vector<std::pair<int,int> > its0Node=MEDLoader::GetNodeFieldIterations(fileName.c_str(),fieldsNameNode[0].c_str());
- CPPUNIT_ASSERT_EQUAL(3,(int)its0Node.size());
- CPPUNIT_ASSERT_EQUAL(1,its0Node[0].first);
- CPPUNIT_ASSERT_EQUAL(-1,its0Node[0].second);
- CPPUNIT_ASSERT_EQUAL(2,its0Node[1].first);
- CPPUNIT_ASSERT_EQUAL(-1,its0Node[1].second);
- CPPUNIT_ASSERT_EQUAL(-1,its0Node[2].first);//strange but like that
- CPPUNIT_ASSERT_EQUAL(-1,its0Node[2].second);
- MEDCouplingFieldDouble *field0Nodes=MEDLoader::ReadFieldDoubleNode(fileName.c_str(),meshNames[0].c_str(),0,fieldsNameNode[0].c_str(),its0Node[0].first,its0Node[0].second);
- field0Nodes->checkCoherency();
- CPPUNIT_ASSERT(field0Nodes->getName()==fieldsNameNode[0]);
- CPPUNIT_ASSERT_EQUAL(1,field0Nodes->getNumberOfComponents());
- CPPUNIT_ASSERT_EQUAL(19,field0Nodes->getNumberOfTuples());
- const double expectedValues3[19]={1.,1.,1.,2.,2.,2.,3.,3.,3.,4.,4.,4.,5.,5.,5.,6.,6.,6.,7.};
- double diffValue3[19];
- std::transform(field0Nodes->getArray()->getPointer(),field0Nodes->getArray()->getPointer()+19,expectedValues3,diffValue3,std::minus<double>());
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue3,diffValue3+19),1e-12);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue3,diffValue3+19),1e-12);
- constMesh=dynamic_cast<const MEDCouplingUMesh *>(field0Nodes->getMesh());
- CPPUNIT_ASSERT(constMesh);
- field0Nodes->decrRef();
- //
- field0Nodes=MEDLoader::ReadFieldDoubleNode(fileName.c_str(),meshNames[0].c_str(),0,fieldsNameNode[0].c_str(),its0Node[1].first,its0Node[1].second);
- field0Nodes->checkCoherency();
- CPPUNIT_ASSERT(field0Nodes->getName()==fieldsNameNode[0]);
- CPPUNIT_ASSERT_EQUAL(1,field0Nodes->getNumberOfComponents());
- CPPUNIT_ASSERT_EQUAL(19,field0Nodes->getNumberOfTuples());
- const double expectedValues4[19]={1.,2.,2.,2.,3.,3.,3.,4.,4.,4.,5.,5.,5.,6.,6.,6.,7.,7.,7.};
- std::transform(field0Nodes->getArray()->getPointer(),field0Nodes->getArray()->getPointer()+19,expectedValues4,diffValue3,std::minus<double>());
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue3,diffValue3+19),1e-12);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue3,diffValue3+19),1e-12);
- constMesh=dynamic_cast<const MEDCouplingUMesh *>(field0Nodes->getMesh());
- CPPUNIT_ASSERT(constMesh);
- CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllTypes().size());
- for(int i=0;i<12;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i));
- for(int i=12;i<14;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(i));
- for(int i=14;i<16;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(i));
- CPPUNIT_ASSERT_EQUAL(90,constMesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getPointer(),constMesh->getNodalConnectivity()->getPointer()+90,0));
- CPPUNIT_ASSERT_EQUAL(711,std::accumulate(constMesh->getNodalConnectivityIndex()->getPointer(),constMesh->getNodalConnectivityIndex()->getPointer()+17,0));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+57,0),1e-12);
- field0Nodes->decrRef();
- //
- field0Nodes=MEDLoader::ReadFieldDoubleNode(fileName.c_str(),meshNames[0].c_str(),0,fieldsNameNode[0].c_str(),its0Node[2].first,its0Node[2].second);
- field0Nodes->checkCoherency();
- CPPUNIT_ASSERT(field0Nodes->getName()==fieldsNameNode[0]);
- CPPUNIT_ASSERT_EQUAL(1,field0Nodes->getNumberOfComponents());
- CPPUNIT_ASSERT_EQUAL(19,field0Nodes->getNumberOfTuples());
- const double expectedValues5[19]={1.,1.,1.,2.,2.,2.,3.,3.,3.,4.,4.,4.,5.,5.,5.,6.,6.,6.,7.};
- std::transform(field0Nodes->getArray()->getPointer(),field0Nodes->getArray()->getPointer()+19,expectedValues5,diffValue3,std::minus<double>());
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue3,diffValue3+19),1e-12);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue3,diffValue3+19),1e-12);
- constMesh=dynamic_cast<const MEDCouplingUMesh *>(field0Nodes->getMesh());
- CPPUNIT_ASSERT(constMesh);
- CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllTypes().size());
- for(int i=0;i<12;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i));
- for(int i=12;i<14;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(i));
- for(int i=14;i<16;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(i));
- CPPUNIT_ASSERT_EQUAL(90,constMesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getPointer(),constMesh->getNodalConnectivity()->getPointer()+90,0));
- CPPUNIT_ASSERT_EQUAL(711,std::accumulate(constMesh->getNodalConnectivityIndex()->getPointer(),constMesh->getNodalConnectivityIndex()->getPointer()+17,0));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+57,0),1e-12);
- field0Nodes->decrRef();
-}
-
-void ParaMEDMEMTest::testMEDLoaderPolygonRead()
-{
- string fileName=getResourceFile("polygones.med");
- vector<string> meshNames=MEDLoader::GetMeshNames(fileName.c_str());
- CPPUNIT_ASSERT_EQUAL(1,(int)meshNames.size());
- CPPUNIT_ASSERT(meshNames[0]=="Bord");
- MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),0);
- mesh->checkCoherency();
- CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(538,mesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(579,mesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllTypes().size());
- for(int i=0;i<514;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(i));
- for(int i=514;i<538;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,mesh->getTypeOfCell(i));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+1737,0),1e-12);
- const double expectedVals1[12]={1.4851585216522212,-0.5,0.,1.4851585216522212,-0.4,0.,1.4851585216522212,-0.3,0., 1.5741585216522211, -0.5, 0. };
- double diffValue1[12];
- std::transform(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+12,expectedVals1,diffValue1,std::minus<double>());
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue1,diffValue1+12),1e-12);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue1,diffValue1+12),1e-12);
- CPPUNIT_ASSERT_EQUAL(2768,mesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(651050,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+2768,0));
- CPPUNIT_ASSERT_EQUAL(725943,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+539,0));
- mesh->decrRef();
- //
- std::vector<std::string> fieldsName=MEDLoader::GetCellFieldNamesOnMesh(fileName.c_str(),meshNames[0].c_str());
- CPPUNIT_ASSERT_EQUAL(3,(int)fieldsName.size());
- CPPUNIT_ASSERT(fieldsName[0]=="bord_:_distorsion");
- CPPUNIT_ASSERT(fieldsName[1]=="bord_:_familles");
- CPPUNIT_ASSERT(fieldsName[2]=="bord_:_non-ortho");
- std::vector<std::pair<int,int> > its0=MEDLoader::GetCellFieldIterations(fileName.c_str(),fieldsName[0].c_str());
- CPPUNIT_ASSERT_EQUAL(1,(int)its0.size());
- MEDCouplingFieldDouble *field=MEDLoader::ReadFieldDoubleCell(fileName.c_str(),meshNames[0].c_str(),0,fieldsName[0].c_str(),its0[0].first,its0[0].second);
- field->checkCoherency();
- CPPUNIT_ASSERT(field->getName()==fieldsName[0]);
- CPPUNIT_ASSERT_EQUAL(1,field->getNumberOfComponents());
- CPPUNIT_ASSERT_EQUAL(538,field->getNumberOfTuples());
- const MEDCouplingUMesh *constMesh=dynamic_cast<const MEDCouplingUMesh *>(field->getMesh());
- CPPUNIT_ASSERT(constMesh);
- CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(2,constMesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(538,constMesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(579,constMesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(2,(int)constMesh->getAllTypes().size());
- for(int i=0;i<514;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,constMesh->getTypeOfCell(i));
- for(int i=514;i<538;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,constMesh->getTypeOfCell(i));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,std::accumulate(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+1737,0),1e-12);
- std::transform(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+12,expectedVals1,diffValue1,std::minus<double>());
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue1,diffValue1+12),1e-12);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue1,diffValue1+12),1e-12);
- CPPUNIT_ASSERT_EQUAL(2768,constMesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(651050,std::accumulate(constMesh->getNodalConnectivity()->getPointer(),constMesh->getNodalConnectivity()->getPointer()+2768,0));
- CPPUNIT_ASSERT_EQUAL(725943,std::accumulate(constMesh->getNodalConnectivityIndex()->getPointer(),constMesh->getNodalConnectivityIndex()->getPointer()+539,0));
- const double *values=field->getArray()->getPointer();
- CPPUNIT_ASSERT_DOUBLES_EQUAL(2.87214203182918,std::accumulate(values,values+538,0.),1e-12);
- field->decrRef();
-}
-
-void ParaMEDMEMTest::testMEDLoaderPolyhedronRead()
-{
- string fileName=getResourceFile("poly3D.med");
- vector<string> meshNames=MEDLoader::GetMeshNames(fileName.c_str());
- CPPUNIT_ASSERT_EQUAL(1,(int)meshNames.size());
- CPPUNIT_ASSERT(meshNames[0]=="poly3D");
- MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),0);
- mesh->checkCoherency();
- CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(3,mesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllTypes().size());
- CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(0));
- CPPUNIT_ASSERT_EQUAL(NORM_POLYHED,mesh->getTypeOfCell(1));
- CPPUNIT_ASSERT_EQUAL(NORM_POLYHED,mesh->getTypeOfCell(2));
- CPPUNIT_ASSERT_EQUAL(98,mesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(725,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+98,0));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(110.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12);
- CPPUNIT_ASSERT_EQUAL(155,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+4,0));
- mesh->decrRef();
- //
- mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),-1);
- mesh->checkCoherency();
- CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(17,mesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(3,(int)mesh->getAllTypes().size());
- for(int i=0;i<6;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_TRI3,mesh->getTypeOfCell(i));
- for(int i=6;i<14;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(i));
- for(int i=14;i<17;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,mesh->getTypeOfCell(i));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(110.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12);
- CPPUNIT_ASSERT_EQUAL(83,mesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(619,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+83,0));
- mesh->decrRef();
- //
- vector<string> families=MEDLoader::GetMeshFamilyNames(fileName.c_str(),meshNames[0].c_str());
- CPPUNIT_ASSERT_EQUAL(4,(int)families.size());
- CPPUNIT_ASSERT(families[0]=="FAMILLE_FACE_POLYGONS3");
- CPPUNIT_ASSERT(families[1]=="FAMILLE_FACE_QUAD41");
- CPPUNIT_ASSERT(families[2]=="FAMILLE_FACE_TRIA32");
- CPPUNIT_ASSERT(families[3]=="FAMILLE_ZERO");
- vector<string> families2;
- families2.push_back(families[0]);
- mesh=MEDLoader::ReadUMeshFromFamilies(fileName.c_str(),meshNames[0].c_str(),-1,families2);
- mesh->checkCoherency();
- CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(3,mesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(1,(int)mesh->getAllTypes().size());
- for(int i=0;i<3;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,mesh->getTypeOfCell(i));
- CPPUNIT_ASSERT_EQUAL(19,mesh->getNodalConnectivity()->getNbOfElems());
- CPPUNIT_ASSERT_EQUAL(117,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+19,0));
- mesh->decrRef();
- //
- mesh=MEDLoader::ReadUMeshFromFamilies(fileName.c_str(),meshNames[0].c_str(),0,families2);
- CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(0,mesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(0,(int)mesh->getAllTypes().size());
- mesh->decrRef();
-}
-
-void ParaMEDMEMTest::testMEDLoaderWrite1()
-{
- const char meshName[]="MEDLoaderWrite1";
- string outFileName=makeTmpFile("toto22137.med");
- double targetCoords[18]={-0.3,-0.3, 0.2,-0.3, 0.7,-0.3, -0.3,0.2, 0.2,0.2, 0.7,0.2, -0.3,0.7, 0.2,0.7, 0.7,0.7 };
- int targetConn[18]={0,3,4,1, 1,4,2, 4,5,2, 6,7,4,3, 7,8,5,4};
- MEDCouplingUMesh *mesh=MEDCouplingUMesh::New();
- mesh->setMeshDimension(2);
- mesh->allocateCells(5);
- mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn);
- mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn+4);
- mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn+7);
- mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn+10);
- mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn+14);
- mesh->finishInsertingCells();
- DataArrayDouble *myCoords=DataArrayDouble::New();
- myCoords->alloc(9,2);
- std::copy(targetCoords,targetCoords+18,myCoords->getPointer());
- mesh->setCoords(myCoords);
- myCoords->decrRef();
- mesh->checkCoherency();
- CPPUNIT_ASSERT_EQUAL(2,mesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(5,mesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(9,mesh->getNumberOfNodes());
- bool normalThrow=false;
- try
- {
- MEDLoader::writeUMesh(outFileName.c_str(),mesh);
- }
- catch(INTERP_KERNEL::Exception& e)
- {
- normalThrow=true;
- }
- CPPUNIT_ASSERT(normalThrow);
- mesh->setName(meshName);
- MEDLoader::writeUMesh(outFileName.c_str(),mesh);
- mesh->decrRef();
- //
- mesh=MEDLoader::ReadUMeshFromFile(outFileName.c_str(),meshName,0);
- CPPUNIT_ASSERT_EQUAL(2,mesh->getSpaceDimension());
- CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension());
- CPPUNIT_ASSERT_EQUAL(5,mesh->getNumberOfCells());
- CPPUNIT_ASSERT_EQUAL(9,mesh->getNumberOfNodes());
- CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllTypes().size());
- for(int i=0;i<2;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_TRI3,mesh->getTypeOfCell(i));
- for(int i=2;i<5;i++)
- CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(i));
- CPPUNIT_ASSERT_DOUBLES_EQUAL(3.6,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+18,0.),1.e-12);
- mesh->decrRef();
-}
-
-void ParaMEDMEMTest::testMEDLoaderPolygonWrite()
-{
- string fileName=getResourceFile("polygones.med");
- vector<string> meshNames=MEDLoader::GetMeshNames(fileName.c_str());
- CPPUNIT_ASSERT_EQUAL(1,(int)meshNames.size());
- CPPUNIT_ASSERT(meshNames[0]=="Bord");
- MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),0);
- mesh->checkCoherency();
- string outFileName=makeTmpFile("toto22138.med");
- MEDLoader::writeUMesh(outFileName.c_str(),mesh);
- //
- MEDCouplingUMesh *mesh2=MEDLoader::ReadUMeshFromFile(outFileName.c_str(),meshNames[0].c_str(),0);
- //
- mesh2->decrRef();
- mesh->decrRef();
-}
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include "ParaMEDMEMTest.hxx"
-#include <cppunit/TestAssert.h>
-#include "CommInterface.hxx"
-#include "ProcessorGroup.hxx"
-#include "MPIProcessorGroup.hxx"
-#include "InterpolationUtils.hxx"
-
-#include <string>
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-/*
- * Check methods defined in MPPIProcessorGroup.hxx
- *
- (+) MPIProcessorGroup(const CommInterface& interface);
- (+) MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids);
- (u) MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids);
- (+) MPIProcessorGroup(const CommInterface& interface,int pstart, int pend);
- (+) virtual ~MPIProcessorGroup();
- (+) virtual ProcessorGroup* fuse (const ProcessorGroup&) const;
- (u) void intersect (ProcessorGroup&){};
- (+) int myRank() const {int rank; MPI_Comm_rank(_comm,&rank); return rank;}
- (+) bool containsMyRank() const { int rank; MPI_Group_rank(_group, &rank); return (rank!=MPI_UNDEFINED);}
- (+) int translateRank(const ProcessorGroup* group, int rank) const;
- (+) const MPI_Comm* getComm() const {return &_comm;}
- (+) ProcessorGroup* createComplementProcGroup() const;
- (o) ProcessorGroup* createProcGroup() const;
-
-*/
-
-void ParaMEDMEMTest::testMPIProcessorGroup_constructor()
-{
- CommInterface comm_interface;
- MPIProcessorGroup* group= new MPIProcessorGroup(comm_interface);
- int size;
- MPI_Comm_size(MPI_COMM_WORLD, &size);
- CPPUNIT_ASSERT_EQUAL(size,group->size());
- int size2;
- const MPI_Comm* communicator=group->getComm();
- MPI_Comm_size(*communicator, &size2);
- CPPUNIT_ASSERT_EQUAL(size,size2);
- delete group;
-
- set <int> procs;
-
- procs.insert(0);
- procs.insert(1);
- if (size==1)
- CPPUNIT_ASSERT_THROW(group=new MPIProcessorGroup(comm_interface,procs),INTERP_KERNEL::Exception);
- else
- {
- CPPUNIT_ASSERT_NO_THROW( group=new MPIProcessorGroup(comm_interface,procs));
- CPPUNIT_ASSERT_EQUAL (group->size(),2);
- delete group;
- }
-
-
- //throws because plast<pfirst
- CPPUNIT_ASSERT_THROW(group=new MPIProcessorGroup(comm_interface,1,0),INTERP_KERNEL::Exception);
- //throws because plast is beyond size-1
- CPPUNIT_ASSERT_THROW(group=new MPIProcessorGroup(comm_interface,0,size),INTERP_KERNEL::Exception);
- if (size>1)
- {
- group=new MPIProcessorGroup(comm_interface,0,size-2);
- CPPUNIT_ASSERT_EQUAL(group->size(),size-1);
- delete group;
- }
-
-}
-
-void ParaMEDMEMTest::testMPIProcessorGroup_boolean()
-{
- int size;
- MPI_Comm_size(MPI_COMM_WORLD, &size);
-
- CommInterface comm_interface;
- MPIProcessorGroup group(comm_interface,0,0);
- MPIProcessorGroup group2(comm_interface,size-1,size-1);
- ProcessorGroup* group_fuse=group.fuse(group2);
- int group_fuse_size=(size==1)?1:2;
- CPPUNIT_ASSERT_EQUAL(group_fuse_size,group_fuse->size());
-
- ProcessorGroup* group_complement=((MPIProcessorGroup*)group_fuse)->createComplementProcGroup();
- CPPUNIT_ASSERT_EQUAL(group_complement->size(),size-group_fuse_size);
-
- delete group_fuse;
- delete group_complement;
-
- //intersect not implemented yet
- // if (size>1)
- // {
- // MPIProcessorGroup group3(comm_interface,0,size-2);
- // MPIProcessorGroup group4(comm_interface,1,size-1);
- // group3.intersect(group4);
- // CPPUNIT_ASSERT_EQUAL(group3.size(),size-2);
- // }
-}
-
-void ParaMEDMEMTest::testMPIProcessorGroup_rank()
-{
- int size;
- MPI_Comm_size(MPI_COMM_WORLD, &size);
- int rank;
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-
- CommInterface comm_interface;
- MPIProcessorGroup group(comm_interface,0,0);
- MPIProcessorGroup group2(comm_interface,size-1,size-1);
- ProcessorGroup* group_fuse=group2.fuse(group);
-
- if (group.containsMyRank())
- CPPUNIT_ASSERT_EQUAL (group.myRank(), rank);
-
- if (group2.containsMyRank())
- {
- int trank=group_fuse->translateRank(&group2,0);
- if (size==1)
- CPPUNIT_ASSERT_EQUAL(trank,0);
- else
- CPPUNIT_ASSERT_EQUAL(trank,1);
- }
- delete group_fuse;
-}
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include "ParaMEDMEMTest.hxx"
-#include <cppunit/TestAssert.h>
-
-#include "MEDMEM_Exception.hxx"
-#include "CommInterface.hxx"
-#include "ProcessorGroup.hxx"
-#include "MPIProcessorGroup.hxx"
-#include "Topology.hxx"
-#include "DEC.hxx"
-#include "NonCoincidentDEC.hxx"
-#include "ParaMESH.hxx"
-#include "ParaFIELD.hxx"
-#include "UnstructuredParaSUPPORT.hxx"
-#include "ICoCoMEDField.hxx"
-
-#include <string>
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-
-using namespace std;
-using namespace ParaMEDMEM;
-using namespace MEDMEM;
-
-/*
- * Check methods defined in InterpKernelDEC.hxx
- *
- InterpKernelDEC();
- InterpKernelDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group);
- virtual ~InterpKernelDEC();
- void synchronize();
- void recvData();
- void sendData();
-*/
-
-void ParaMEDMEMTest::testNonCoincidentDEC_2D()
-{
-
- int size;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
-
- //the test is meant to run on five processors
- if (size !=5) return ;
-
- testNonCoincidentDEC( "/share/salome/resources/med/square1_split",
- "Mesh_2",
- "/share/salome/resources/med/square2_split",
- "Mesh_3",
- 3,
- 1e-6);
-}
-
-void ParaMEDMEMTest::testNonCoincidentDEC_3D()
-{
- int size;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
-
- //the test is meant to run on five processors
- if (size !=4) return ;
-
- testNonCoincidentDEC( "/share/salome/resources/med/blade_12000_split2",
- "Mesh_1",
- "/share/salome/resources/med/blade_3000_split2",
- "Mesh_1",
- 2,
- 1e4);
-}
-
-void ParaMEDMEMTest::testNonCoincidentDEC(const string& filename1,
- const string& meshname1,
- const string& filename2,
- const string& meshname2,
- int nproc_source,
- double epsilon)
-{
- int size;
- int rank;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
-
- set<int> self_procs;
- set<int> procs_source;
- set<int> procs_target;
-
- for (int i=0; i<nproc_source; i++)
- procs_source.insert(i);
- for (int i=nproc_source; i<size; i++)
- procs_target.insert(i);
- self_procs.insert(rank);
-
- ParaMEDMEM::CommInterface interface;
-
- ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
- ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
- ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
-
- ParaMEDMEM::ParaMESH* source_mesh=0;
- ParaMEDMEM::ParaMESH* target_mesh=0;
- ParaMEDMEM::ParaSUPPORT* parasupport=0;
- //loading the geometry for the source group
-
- ParaMEDMEM::NonCoincidentDEC dec (*source_group,*target_group);
-
- MEDMEM::MESH* mesh;
- MEDMEM::SUPPORT* support;
- MEDMEM::FIELD<double>* field;
- ParaMEDMEM::ParaMESH* paramesh;
- ParaMEDMEM::ParaFIELD* parafield;
-
- string filename_xml1 = getResourceFile(filename1);
- string filename_xml2 = getResourceFile(filename2);
- //string filename_seq_wr = makeTmpFile("");
- //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med");
-
- // To remove tmp files from disk
- ParaMEDMEMTest_TmpFilesRemover aRemover;
- //aRemover.Register(filename_seq_wr);
- //aRemover.Register(filename_seq_med);
- MPI_Barrier(MPI_COMM_WORLD);
- ICoCo::Field* icocofield;
- if (source_group->containsMyRank())
- {
- string master = filename_xml1;
-
- ostringstream strstream;
- strstream <<master<<rank+1<<".med";
- ostringstream meshname ;
- meshname<< meshname1<<"_"<< rank+1;
-
- CPPUNIT_ASSERT_NO_THROW(mesh = new MESH(MED_DRIVER,strstream.str(),meshname.str()));
- support=new MEDMEM::SUPPORT(mesh,"all elements",MED_EN::MED_CELL);
-
- paramesh=new ParaMESH (*mesh,*source_group,"source mesh");
-
- parasupport=new UnstructuredParaSUPPORT( support,*source_group);
- ParaMEDMEM::ComponentTopology comptopo;
- parafield = new ParaFIELD(parasupport, comptopo);
-
-
- int nb_local=support->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);
- double * value= new double[nb_local];
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=1.0;
- parafield->getField()->setValue(value);
-
- icocofield=new ICoCo::MEDField(paramesh,parafield);
-
- dec.attachLocalField(icocofield);
- delete [] value;
- }
-
- //loading the geometry for the target group
- if (target_group->containsMyRank())
- {
- string master= filename_xml2;
- ostringstream strstream;
- strstream << master<<(rank-nproc_source+1)<<".med";
- ostringstream meshname ;
- meshname<< meshname2<<"_"<<rank-nproc_source+1;
-
- CPPUNIT_ASSERT_NO_THROW(mesh = new MESH(MED_DRIVER,strstream.str(),meshname.str()));
- support=new MEDMEM::SUPPORT(mesh,"all elements",MED_EN::MED_CELL);
-
- paramesh=new ParaMESH (*mesh,*target_group,"target mesh");
- parasupport=new UnstructuredParaSUPPORT(support,*target_group);
- ParaMEDMEM::ComponentTopology comptopo;
- parafield = new ParaFIELD(parasupport, comptopo);
-
-
- int nb_local=support->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);
- double * value= new double[nb_local];
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=0.0;
- parafield->getField()->setValue(value);
- icocofield=new ICoCo::MEDField(paramesh,parafield);
-
- dec.attachLocalField(icocofield);
- delete [] value;
- }
-
-
- //attaching a DEC to the source group
- double field_before_int;
- double field_after_int;
-
- if (source_group->containsMyRank())
- {
- field_before_int = parafield->getVolumeIntegral(1);
- MPI_Bcast(&field_before_int, 1,MPI_DOUBLE, 0,MPI_COMM_WORLD);
- dec.synchronize();
- cout<<"DEC usage"<<endl;
- dec.setOption("ForcedRenormalization",false);
-
- dec.sendData();
- // paramesh->write(MED_DRIVER,"./sourcesquarenc");
- //parafield->write(MED_DRIVER,"./sourcesquarenc","boundary");
-
-
- }
-
- //attaching a DEC to the target group
- if (target_group->containsMyRank())
- {
- MPI_Bcast(&field_before_int, 1,MPI_DOUBLE, 0,MPI_COMM_WORLD);
-
- dec.synchronize();
- dec.setOption("ForcedRenormalization",false);
- dec.recvData();
- //paramesh->write(MED_DRIVER, "./targetsquarenc");
- //parafield->write(MED_DRIVER, "./targetsquarenc", "boundary");
- field_after_int = parafield->getVolumeIntegral(1);
-
- }
- MPI_Bcast(&field_before_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
- MPI_Bcast(&field_after_int, 1,MPI_DOUBLE, size-1,MPI_COMM_WORLD);
-
- CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, epsilon);
-
- delete source_group;
- delete target_group;
- delete self_group;
- delete icocofield;
- delete paramesh;
- delete parafield;
- delete support;
- delete parasupport;
- delete mesh;
- MPI_Barrier(MPI_COMM_WORLD);
-
-}
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include "ParaMEDMEMTest.hxx"
-#include <cppunit/TestAssert.h>
-
-#include "CommInterface.hxx"
-#include "ProcessorGroup.hxx"
-#include "MPIProcessorGroup.hxx"
-#include "Topology.hxx"
-#include "DEC.hxx"
-#include "StructuredCoincidentDEC.hxx"
-#include "ParaMESH.hxx"
-#include "ParaFIELD.hxx"
-#include "ComponentTopology.hxx"
-#include "ICoCoMEDField.hxx"
-#include "MEDLoader.hxx"
-
-#include <string>
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-/*
- * Check methods defined in StructuredCoincidentDEC.hxx
- *
- StructuredCoincidentDEC();
- StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group);
- virtual ~StructuredCoincidentDEC();
- void synchronize();
- void recvData();
- void sendData();
-*/
-
-void ParaMEDMEMTest::testStructuredCoincidentDEC() {
- string testname="ParaMEDMEM - testStructured CoincidentDEC";
- // MPI_Init(&argc, &argv);
- int size;
- int rank;
- MPI_Comm_size(MPI_COMM_WORLD, &size);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- if (size<4) {
- return;
- }
-
- ParaMEDMEM::CommInterface interface;
-
- ParaMEDMEM::MPIProcessorGroup self_group (interface,rank,rank);
- ParaMEDMEM::MPIProcessorGroup target_group(interface,3,size-1);
- ParaMEDMEM::MPIProcessorGroup source_group (interface,0,2);
-
- ParaMEDMEM::MEDCouplingUMesh* mesh;
- ParaMEDMEM::ParaMESH* paramesh;
- ParaMEDMEM::ParaFIELD* parafield;
-
- string filename_xml1 = getResourceFile("square1_split");
- string filename_2 = getResourceFile("square1.med");
- //string filename_seq_wr = makeTmpFile("");
- //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med");
-
- // To remove tmp files from disk
- ParaMEDMEMTest_TmpFilesRemover aRemover;
-
- //loading the geometry for the source group
-
- ParaMEDMEM::StructuredCoincidentDEC dec(source_group, target_group);
-
- MPI_Barrier(MPI_COMM_WORLD);
- if (source_group.containsMyRank()) {
- string master = filename_xml1;
-
- ostringstream strstream;
- strstream <<master<<rank+1<<".med";
- ostringstream meshname;
- meshname<< "Mesh_2_"<< rank+1;
-
- mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
-
-
- paramesh=new ParaMESH (mesh,source_group,"source mesh");
-
- ParaMEDMEM::ComponentTopology comptopo(6);
- parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
-
- int nb_local=mesh->getNumberOfCells();
- const int* global_numbering = paramesh->getGlobalNumberingCell();
-
- double *value=parafield->getField()->getArray()->getPointer();
- for(int ielem=0; ielem<nb_local;ielem++)
- for (int icomp=0; icomp<6; icomp++)
- value[ielem*6+icomp]=global_numbering[ielem]*6+icomp;
-
- //ICoCo::Field* icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
-
- dec.attachLocalField(parafield);
- dec.synchronize();
- dec.sendData();
- //delete icocofield;
- }
-
- //loading the geometry for the target group
- if (target_group.containsMyRank()) {
-
- string meshname2("Mesh_2");
- mesh = MEDLoader::ReadUMeshFromFile(filename_2.c_str(),meshname2.c_str(),0);
-
- paramesh=new ParaMESH (mesh,self_group,"target mesh");
- ParaMEDMEM::ComponentTopology comptopo(6, &target_group);
-
- parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
-
- int nb_local=mesh->getNumberOfCells();
- double *value=parafield->getField()->getArray()->getPointer();
- for (int ielem=0; ielem<nb_local; ielem++)
- for (int icomp=0; icomp<comptopo.nbLocalComponents(); icomp++)
- value[ielem*comptopo.nbLocalComponents()+icomp]=0.0;
- //ICoCo::Field* icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
-
- dec.attachLocalField(parafield);
- dec.synchronize();
- dec.recvData();
-
- //checking validity of field
- const double* recv_value = parafield->getField()->getArray()->getPointer();
- for (int i=0; i< nb_local; i++) {
- int first = comptopo.firstLocalComponent();
- for (int icomp = 0; icomp < comptopo.nbLocalComponents(); icomp++)
- CPPUNIT_ASSERT_DOUBLES_EQUAL(recv_value[i*comptopo.nbLocalComponents()+icomp],(double)(i*6+icomp+first),1e-12);
- }
- //delete icocofield;
- }
- delete parafield;
- delete paramesh;
- mesh->decrRef();
-
- // MPI_Barrier(MPI_COMM_WORLD);
-
-}
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-// --- include all MPIAccess Test
-
-#include "MPIAccessTest.hxx"
-
-// --- Registers the fixture into the 'registry'
-
-CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessTest );
-
-// --- generic Main program from KERNEL_SRC/src/Basics/Test
-
-#include "MPIMainTest.hxx"
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-// --- include all MPIAccessDEC Test
-
-#include "MPIAccessDECTest.hxx"
-
-// --- Registers the fixture into the 'registry'
-
-CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessDECTest );
-
-// --- generic Main program from KERNEL_SRC/src/Basics/Test
-
-#include "MPIMainTest.hxx"
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-
-// --- include all MEDMEM Test
-
-#include "ParaMEDMEMTest.hxx"
-
-// --- Registers the fixture into the 'registry'
-
-CPPUNIT_TEST_SUITE_REGISTRATION( ParaMEDMEMTest );
-
-// --- generic Main program from KERNEL_SRC/src/Basics/Test
-
-#include "MPIMainTest.hxx"
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-#include "MPIAccessDEC.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessDECTest::test_AllToAllDECSynchronousPointToPoint() {
- test_AllToAllDEC( false ) ;
-}
-void MPIAccessDECTest::test_AllToAllDECAsynchronousPointToPoint() {
- test_AllToAllDEC( true ) ;
-}
-
-static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "===========================================================" << endl
- << "test_AllToAllDEC" << myrank << " KO" << endl
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- return ;
-}
-
-void MPIAccessDECTest::test_AllToAllDEC( bool Asynchronous ) {
-
- cout << "test_AllToAllDEC" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 || size > 11 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_AllToAllDEC" << endl
- << " (nbprocs >=2)" << endl
- << "test must be runned with more than 1 proc and less than 12 procs"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- cout << "test_AllToAllDEC" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
- std::set<int> sourceprocs;
- std::set<int> targetprocs;
- int i ;
- for ( i = 0 ; i < size/2 ; i++ ) {
- sourceprocs.insert(i);
- }
- for ( i = size/2 ; i < size ; i++ ) {
- targetprocs.insert(i);
- }
-
- ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ;
- ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ;
-
- MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
- Asynchronous ) ;
-
- MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
-
-#define maxreq 100
-#define datamsglength 10
-
- // int sts ;
- int sendcount = datamsglength ;
- int recvcount = datamsglength ;
- int * recvbuf = new int[datamsglength*size] ;
-
- int ireq ;
- for ( ireq = 0 ; ireq < maxreq ; ireq++ ) {
- int * sendbuf = new int[datamsglength*size] ;
- int j ;
- for ( j = 0 ; j < datamsglength*size ; j++ ) {
- sendbuf[j] = myrank*1000000 + ireq*1000 + j ;
- recvbuf[j] = -1 ;
- }
-
- MyMPIAccessDEC->allToAll( sendbuf, sendcount , MPI_INT ,
- recvbuf, recvcount , MPI_INT ) ;
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- int *ArrayOfRecvRequests = new int[nRecvReq] ;
- int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
- mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ;
- delete [] ArrayOfRecvRequests ;
- }
-
- int nSendReq = mpi_access->sendRequestIdsSize() ;
- cout << "test_AllToAllDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
- << endl ;
- if ( nSendReq ) {
- int *ArrayOfSendRequests = new int[nSendReq] ;
- int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
- delete [] ArrayOfSendRequests ;
- }
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq ) {
- ostringstream strstream ;
- strstream << "test_AllToAllDEC" << myrank << " final RecvRequestIds " << nRecvReq
- << " RecvRequests # 0 Error" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "test_AllToAllDEC" << myrank << " final RecvRequestIds " << nRecvReq
- << " RecvRequests = 0 OK" << endl ;
- }
-
- mpi_access->barrier() ;
-
- delete sourcegroup ;
- delete targetgroup ;
- delete MyMPIAccessDEC ;
- delete [] recvbuf ;
-
- // MPI_Finalize();
-
- cout << "test_AllToAllDEC" << myrank << " OK" << endl ;
-
- return ;
-}
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccessDEC.hxx"
-#include "LinearTimeInterpolator.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessDECTest::test_AllToAllTimeDECSynchronousPointToPoint() {
- test_AllToAllTimeDEC( false ) ;
-}
-void MPIAccessDECTest::test_AllToAllTimeDECAsynchronousPointToPoint() {
- test_AllToAllTimeDEC( true ) ;
-}
-
-static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access->errorString(sts, msgerr, &lenerr) ;
- cout << "test_AllToAllTimeDEC" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test_AllToAllTimeDEC" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- return ;
-}
-
-void MPIAccessDECTest::test_AllToAllTimeDEC( bool Asynchronous ) {
-
- cout << "test_AllToAllTimeDEC" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 || size > 11 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
- << " (nbprocs >=2)" << endl
- << "test must be runned with more than 1 proc and less than 12 procs"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- // int Asynchronous = atoi(argv[1]);
-
- cout << "test_AllToAllTimeDEC" << myrank << " Asynchronous " << Asynchronous << endl ;
-
- ParaMEDMEM::CommInterface interface ;
- std::set<int> sourceprocs;
- std::set<int> targetprocs;
- int i ;
- for ( i = 0 ; i < size/2 ; i++ ) {
- sourceprocs.insert(i);
- }
- for ( i = size/2 ; i < size ; i++ ) {
- targetprocs.insert(i);
- }
-
- ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ;
- ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ;
-
- // LinearTimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ;
- MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
- Asynchronous ) ;
- // Asynchronous , LinearInterp , 0.5 ) ;
- MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ;
- MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
-
- cout << "test_AllToAllTimeDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
- cout << "test_AllToAllTimeDEC" << myrank << " Barrier done" << endl ;
-
-#define maxproc 11
-#define maxreq 10000
-#define datamsglength 10
-
- int sts ;
- int sendcount = datamsglength ;
- int recvcount = datamsglength ;
-
- double time = 0 ;
- // double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
- double deltatime[maxproc] = {1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,11.} ;
- double maxtime = maxreq ;
- double nextdeltatime = deltatime[myrank] ;
- // MyMPIAccessDEC->InitTime( time , deltatime[myrank] , maxtime ) ;
- // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) {
- for ( time = 0 ; time <= maxtime && nextdeltatime != 0 ; time+=nextdeltatime ) {
- if ( time != 0 ) {
- nextdeltatime = deltatime[myrank] ;
- if ( time+nextdeltatime > maxtime ) {
- nextdeltatime = 0 ;
- }
- // MyMPIAccessDEC->NextTime( nextdeltatime ) ;
- }
- MyMPIAccessDEC->setTime( time , nextdeltatime ) ;
- cout << "test_AllToAllTimeDEC" << myrank << "=====TIME " << time << "=====DELTATIME "
- << nextdeltatime << "=====MAXTIME " << maxtime << " ======" << endl ;
- int * sendbuf = new int[datamsglength*size] ;
- // int * sendbuf = (int *) malloc(sizeof(int)*datamsglength*size) ;
- int * recvbuf = new int[datamsglength*size] ;
- int j ;
- for ( j = 0 ; j < datamsglength*size ; j++ ) {
- sendbuf[j] = myrank*1000000 + (j/datamsglength)*1000 + j ;
- recvbuf[j] = -1 ;
- }
-
- int sts = MyMPIAccessDEC->allToAllTime( sendbuf, sendcount , MPI_INT ,
- recvbuf, recvcount , MPI_INT ) ;
- chksts( sts , myrank , mpi_access ) ;
-
- // cout << "test_AllToAllTimeDEC" << myrank << " recvbuf before CheckSent" ;
- // for ( i = 0 ; i < datamsglength*size ; i++ ) {
- // cout << " " << recvbuf[i] ;
- // }
- // cout << endl ;
-
- // cout << "test_AllToAllTimeDEC" << myrank << " sendbuf " << sendbuf << endl ;
- // MyMPIAccessDEC->CheckSent() ;
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq != 0 ) {
- ostringstream strstream ;
- strstream << "=============================================================" << endl
- << "test_AllToAllTimeDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests # 0 ERROR"
- << endl << "============================================================="
- << endl ;
- int *ArrayOfRecvRequests = new int[nRecvReq] ;
- int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
- delete [] ArrayOfRecvRequests ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- // cout << "test_AllToAllTimeDEC" << myrank << " recvbuf" << endl ;
- bool badrecvbuf = false ;
- for ( i = 0 ; i < datamsglength*size ; i++ ) {
- if ( recvbuf[i] != (i/datamsglength)*1000000 + myrank*1000 +
- myrank*datamsglength+(i%datamsglength) ) {
- badrecvbuf = true ;
- cout << "test_AllToAllTimeDEC" << myrank << " recvbuf[" << i << "] "
- << recvbuf[i] << " # " << (i/datamsglength)*1000000 + myrank*1000 +
- myrank*datamsglength+(i%datamsglength) << endl ;
- }
- else if ( badrecvbuf ) {
- cout << "test_AllToAllTimeDEC" << myrank << " recvbuf[" << i << "] "
- << recvbuf[i] << " == " << (i/datamsglength)*1000000 + myrank*1000 +
- myrank*datamsglength+(i%datamsglength) << endl ;
- }
- }
- if ( badrecvbuf ) {
- ostringstream strstream ;
- strstream << "==============================================================" << endl
- << "test_AllToAllTimeDEC" << myrank << " badrecvbuf"
- << endl << "============================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- delete [] recvbuf ;
- }
-
- cout << "test_AllToAllTimeDEC" << myrank << " final CheckSent" << endl ;
- sts = MyMPIAccessDEC->checkSent() ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "================================================================" << endl
- << "test_AllToAllTimeDEC" << myrank << " final CheckSent ERROR"
- << endl << "================================================================"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- int nSendReq = mpi_access->sendRequestIdsSize() ;
- cout << "test_AllToAllTimeDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
- << endl ;
- if ( nSendReq ) {
- int *ArrayOfSendRequests = new int[nSendReq] ;
- int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
- delete [] ArrayOfSendRequests ;
- }
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq ) {
- ostringstream strstream ;
- strstream << "===============================================================" << endl
- << "test_AllToAllTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests # 0 Error"
- << endl << "==============================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "test_AllToAllTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests = 0 OK" << endl ;
- }
-
- cout << "test_AllToAllTimeDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
- cout << "test_AllToAllTimeDEC" << myrank << " Barrier done" << endl ;
-
- delete sourcegroup ;
- delete targetgroup ;
- // delete aLinearInterpDEC ;
- delete MyMPIAccessDEC ;
-
- // MPI_Finalize();
-
- cout << "test_AllToAllTimeDEC" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccessDEC.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessDECTest::test_AllToAllvDECSynchronousPointToPoint() {
- test_AllToAllvDEC( false ) ;
-}
-void MPIAccessDECTest::test_AllToAllvDECAsynchronousPointToPoint() {
- test_AllToAllvDEC( true ) ;
-}
-
-static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test_AllToAllvDEC" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test_AllToAllvDEC" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- return ;
-}
-
-void MPIAccessDECTest::test_AllToAllvDEC( bool Asynchronous ) {
-
- cout << "test_AllToAllvDEC" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 || size > 11 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_AllToAllvDEC" << endl
- << " (nbprocs >=2)" << endl
- << "test must be runned with more than 1 proc and less than 12 procs"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- // int Asynchronous = atoi(argv[1]);
-
- cout << "test_AllToAllvDEC" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
- std::set<int> sourceprocs;
- std::set<int> targetprocs;
- int i ;
- for ( i = 0 ; i < size/2 ; i++ ) {
- sourceprocs.insert(i);
- }
- for ( i = size/2 ; i < size ; i++ ) {
- targetprocs.insert(i);
- }
-
- ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ;
- ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ;
-
- MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
- Asynchronous ) ;
-
- MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
-
-#define maxreq 100
-#define datamsglength 10
-
- // int sts ;
- int *sendcounts = new int[size] ;
- int *sdispls = new int[size] ;
- int *recvcounts = new int[size] ;
- int *rdispls = new int[size] ;
- for ( i = 0 ; i < size ; i++ ) {
- sendcounts[i] = datamsglength-i;
- sdispls[i] = i*datamsglength ;
- recvcounts[i] = datamsglength-myrank;
- rdispls[i] = i*datamsglength ;
- }
- int * recvbuf = new int[datamsglength*size] ;
-
- int ireq ;
- for ( ireq = 0 ; ireq < maxreq ; ireq++ ) {
- int * sendbuf = new int[datamsglength*size] ;
- // int * sendbuf = (int *) malloc( sizeof(int)*datamsglength*size) ;
- int j ;
- for ( j = 0 ; j < datamsglength*size ; j++ ) {
- sendbuf[j] = myrank*1000000 + ireq*1000 + j ;
- recvbuf[j] = -1 ;
- }
-
- MyMPIAccessDEC->allToAllv( sendbuf, sendcounts , sdispls , MPI_INT ,
- recvbuf, recvcounts , rdispls , MPI_INT ) ;
-
- // cout << "test_AllToAllvDEC" << myrank << " recvbuf before CheckSent" ;
- // for ( i = 0 ; i < datamsglength*size ; i++ ) {
- // cout << " " << recvbuf[i] ;
- // }
- // cout << endl ;
-
- // cout << "test_AllToAllvDEC" << myrank << " sendbuf " << sendbuf << endl ;
- // MyMPIAccessDEC->CheckSent() ;
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- // cout << "test_AllToAllvDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests" << endl ;
- int *ArrayOfRecvRequests = new int[nRecvReq] ;
- int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
- mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ;
- delete [] ArrayOfRecvRequests ;
-
- // cout << "test_AllToAllvDEC" << myrank << " recvbuf" ;
- // for ( i = 0 ; i < datamsglength*size ; i++ ) {
- // cout << " " << recvbuf[i] ;
- // }
- // cout << endl ;
- }
-
- // cout << "test_AllToAllvDEC" << myrank << " final CheckSent" << endl ;
- // MyMPIAccessDEC->CheckSent() ;
-
- int nSendReq = mpi_access->sendRequestIdsSize() ;
- cout << "test_AllToAllvDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
- << endl ;
- if ( nSendReq ) {
- int *ArrayOfSendRequests = new int[nSendReq] ;
- int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
- delete [] ArrayOfSendRequests ;
- }
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq ) {
- ostringstream strstream ;
- strstream << "test_AllToAllvDEC" << myrank << " final RecvRequestIds " << nRecvReq
- << " RecvRequests # 0 Error" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "test_AllToAllvDEC" << myrank << " final RecvRequestIds " << nRecvReq
- << " RecvRequests = 0 OK" << endl ;
- }
-
- mpi_access->barrier() ;
-
- delete sourcegroup ;
- delete targetgroup ;
- delete MyMPIAccessDEC ;
- delete [] sendcounts ;
- delete [] sdispls ;
- delete [] recvcounts ;
- delete [] rdispls ;
- delete [] recvbuf ;
-
- // MPI_Finalize();
-
- cout << "test_AllToAllvDEC" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-#include <time.h>
-
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccessDEC.hxx"
-#include "LinearTimeInterpolator.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessDECTest::test_AllToAllvTimeDECSynchronousNative() {
- test_AllToAllvTimeDEC( false , true ) ;
-}
-void MPIAccessDECTest::test_AllToAllvTimeDECSynchronousPointToPoint() {
- test_AllToAllvTimeDEC( false , false ) ;
-}
-void MPIAccessDECTest::test_AllToAllvTimeDECAsynchronousPointToPoint() {
- test_AllToAllvTimeDEC( true , false ) ;
-}
-
-static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access->errorString(sts, msgerr, &lenerr) ;
- cout << "test_AllToAllvTimeDEC" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test_AllToAllvTimeDEC" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- return ;
-}
-
-void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINative ) {
-
- cout << "test_AllToAllvTimeDEC" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 || size > 11 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
- << " (nbprocs >=2)" << endl
- << "test must be runned with more than 1 proc and less than 12 procs"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- // int Asynchronous = atoi(argv[1]) ;
- int UseMPI_Alltoallv = UseMPINative ;
- // if ( argc == 3 ) {
- // UseMPI_Alltoallv = atoi(argv[2]) ;
- // }
-
- cout << "test_AllToAllvTimeDEC" << myrank << " Asynchronous " << Asynchronous
- << " UseMPI_Alltoallv " << UseMPI_Alltoallv << endl ;
-
- ParaMEDMEM::CommInterface interface ;
- std::set<int> sourceprocs;
- std::set<int> targetprocs;
- int i ;
- for ( i = 0 ; i < size/2 ; i++ ) {
- sourceprocs.insert(i);
- }
- for ( i = size/2 ; i < size ; i++ ) {
- targetprocs.insert(i);
- }
-
- ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ;
- ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ;
-
- // TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ;
- MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
- Asynchronous ) ;
- // Asynchronous , LinearInterp , 0.5 ) ;
- MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp , 0.5 ) ;
- MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
-
- cout << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
- cout << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
-
-#define maxproc 11
-#define maxreq 10000
-#define datamsglength 10
-
- int sts ;
- int *sendcounts = new int[size] ;
- int *sdispls = new int[size] ;
- int *recvcounts = new int[size] ;
- int *rdispls = new int[size] ;
- int *sendtimecounts = new int[size] ;
- int *stimedispls = new int[size] ;
- int *recvtimecounts = new int[size] ;
- int *rtimedispls = new int[size] ;
- for ( i = 0 ; i < size ; i++ ) {
- sendcounts[i] = datamsglength-i ;
- sdispls[i] = i*datamsglength ;
- recvcounts[i] = datamsglength-myrank ;
- rdispls[i] = i*datamsglength ;
- sendtimecounts[i] = 1 ;
- stimedispls[i] = 0 ;
- recvtimecounts[i] = 1 ;
- rtimedispls[i] = i ;
- //rtimedispls[i] = i*mpi_access->TimeExtent() ;
- }
-
- double timeLoc = 0 ;
- double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
- double maxtime ;
- double nextdeltatime = deltatime[myrank] ;
- if ( UseMPI_Alltoallv ) {
- maxtime = maxreq*nextdeltatime - 0.1 ;
- }
- else {
- maxtime = maxreq ;
- // MyMPIAccessDEC->InitTime( time , nextdeltatime , maxtime ) ;
- }
- time_t begintime = time(NULL) ;
- // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) {
- for ( timeLoc = 0 ; timeLoc <= maxtime && nextdeltatime != 0 ; timeLoc+=nextdeltatime ) {
- nextdeltatime = deltatime[myrank] ;
- if ( timeLoc != 0 ) {
- nextdeltatime = deltatime[myrank] ;
- if ( timeLoc+nextdeltatime > maxtime ) {
- nextdeltatime = 0 ;
- }
- // MyMPIAccessDEC->NextTime( nextdeltatime ) ;
- }
- MyMPIAccessDEC->setTime( timeLoc , nextdeltatime ) ;
- cout << "test_AllToAllvTimeDEC" << myrank << "=====TIME " << time << "=====DELTATIME "
- << nextdeltatime << "=====MAXTIME " << maxtime << " ======" << endl ;
- int * sendbuf = new int[datamsglength*size] ;
- // int * sendbuf = (int *) malloc(sizeof(int)*datamsglength*size) ;
- int * recvbuf = new int[datamsglength*size] ;
- int j ;
- for ( j = 0 ; j < datamsglength*size ; j++ ) {
- sendbuf[j] = myrank*1000000 + (j/datamsglength)*1000 + j ;
- recvbuf[j] = -1 ;
- }
-
- if ( UseMPI_Alltoallv ) {
- const MPI_Comm* comm = MyMPIAccessDEC->getComm();
- TimeMessage * aSendTimeMessage = new TimeMessage ;
- aSendTimeMessage->time = timeLoc ;
- // aSendTimeMessage->deltatime = deltatime[myrank] ;
- aSendTimeMessage->deltatime = nextdeltatime ;
- // aSendTimeMessage->maxtime = maxtime ;
- aSendTimeMessage->tag = (int ) (timeLoc/deltatime[myrank]) ;
- TimeMessage * aRecvTimeMessage = new TimeMessage[size] ;
- interface.allToAllV(aSendTimeMessage, sendtimecounts , stimedispls ,
- mpi_access->timeType() ,
- aRecvTimeMessage, recvtimecounts , rtimedispls ,
- mpi_access->timeType() , *comm ) ;
- // for ( j = 0 ; j < size ; j++ ) {
- // cout << "test_AllToAllvTimeDEC" << myrank << " TimeMessage received " << j << " "
- // << aRecvTimeMessage[j] << endl ;
- // }
- delete aSendTimeMessage ;
- delete [] aRecvTimeMessage ;
- interface.allToAllV(sendbuf, sendcounts , sdispls , MPI_INT ,
- recvbuf, recvcounts , rdispls , MPI_INT , *comm ) ;
- // free(sendbuf) ;
- delete [] sendbuf ;
- }
- else {
- int sts = MyMPIAccessDEC->allToAllvTime( sendbuf, sendcounts , sdispls , MPI_INT ,
- recvbuf, recvcounts , rdispls , MPI_INT ) ;
- chksts( sts , myrank , mpi_access ) ;
- }
-
- // cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf before CheckSent" ;
- // for ( i = 0 ; i < datamsglength*size ; i++ ) {
- // cout << " " << recvbuf[i] ;
- // }
- // cout << endl ;
-
- // cout << "test_AllToAllvTimeDEC" << myrank << " sendbuf " << sendbuf << endl ;
- // MyMPIAccessDEC->CheckSent() ;
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq != 0 ) {
- ostringstream strstream ;
- strstream << "=============================================================" << endl
- << "test_AllToAllvTimeDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests # 0 ERROR"
- << endl << "============================================================="
- << endl ;
- int *ArrayOfRecvRequests = new int[nRecvReq] ;
- int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
- delete [] ArrayOfRecvRequests ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- // cout << "test_AllToAllvTimeDEC" << myrank << " check of recvbuf" << endl ;
- bool badrecvbuf = false ;
- for ( i = 0 ; i < size ; i++ ) {
- int j ;
- for ( j = 0 ; j < datamsglength ; j++ ) {
- int index = i*datamsglength+j ;
- if ( j < recvcounts[i] ) {
- if ( recvbuf[index] != (index/datamsglength)*1000000 + myrank*1000 +
- myrank*datamsglength+(index%datamsglength) ) {
- badrecvbuf = true ;
- cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
- << recvbuf[index] << " # " << (index/datamsglength)*1000000 +
- myrank*1000 +
- myrank*datamsglength+(index%datamsglength) << endl ;
- }
- else if ( badrecvbuf ) {
- cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
- << recvbuf[index] << " == " << (index/datamsglength)*1000000 +
- myrank*1000 +
- myrank*datamsglength+(index%datamsglength) << endl ;
- }
- }
- else if ( recvbuf[index] != -1 ) {
- badrecvbuf = true ;
- cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
- << recvbuf[index] << " # -1" << endl ;
- }
- }
- }
- if ( badrecvbuf ) {
- ostringstream strstream ;
- strstream << "==============================================================" << endl
- << "test_AllToAllvTimeDEC" << myrank << " badrecvbuf"
- << endl << "============================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- delete [] recvbuf ;
- }
-
- cout << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
- cout << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
-
- cout << "test_AllToAllvTimeDEC" << myrank << " CheckFinalSent" << endl ;
- sts = MyMPIAccessDEC->checkFinalSent() ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "================================================================" << endl
- << "test_AllToAllvTimeDEC" << myrank << " final CheckSent ERROR"
- << endl << "================================================================"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- cout << "test_AllToAllvTimeDEC" << myrank << " CheckFinalRecv" << endl ;
- sts = MyMPIAccessDEC->checkFinalRecv() ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "================================================================" << endl
- << "test_AllToAllvTimeDEC" << myrank << " CheckFinalRecv ERROR"
- << endl << "================================================================"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq ) {
- ostringstream strstream ;
- strstream << "===============================================================" << endl
- << "test_AllToAllvTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests # 0 Error"
- << endl << "==============================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "test_AllToAllvTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests = 0 OK" << endl ;
- }
-
- time_t endtime = time(NULL) ;
- cout << "test_AllToAllvTimeDEC" << myrank << " begintime " << begintime << " endtime " << endtime
- << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank]
- << " calls to AllToAll" << endl ;
-
- cout << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
- cout << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
-
- delete sourcegroup ;
- delete targetgroup ;
- delete MyMPIAccessDEC ;
- // delete aLinearInterpDEC ;
-
- delete [] sendcounts ;
- delete [] sdispls ;
- delete [] recvcounts ;
- delete [] rdispls ;
- delete [] sendtimecounts ;
- delete [] stimedispls ;
- delete [] recvtimecounts ;
- delete [] rtimedispls ;
-
- // MPI_Finalize();
-
- endtime = time(NULL) ;
-
- cout << "test_AllToAllvTimeDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime
- << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank]
- << " calls to AllToAll" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <math.h>
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-#include <time.h>
-
-#include "MPIAccessDECTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccessDEC.hxx"
-#include "LinearTimeInterpolator.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessDECTest::test_AllToAllvTimeDoubleDECSynchronousPointToPoint() {
- test_AllToAllvTimeDoubleDEC( false ) ;
-}
-void MPIAccessDECTest::test_AllToAllvTimeDoubleDECAsynchronousPointToPoint() {
- test_AllToAllvTimeDoubleDEC( true ) ;
-}
-
-static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access->errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- return ;
-}
-
-void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) {
-
- cout << "test_AllToAllvTimeDoubleDEC" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 || size > 11 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
- << " (nbprocs >=2)" << endl
- << "test must be runned with more than 1 proc and less than 12 procs"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
-// int Asynchronous = atoi(argv[1]) ;
-
- cout << "test_AllToAllvTimeDoubleDEC" << myrank << " Asynchronous " << Asynchronous << endl ;
-
- ParaMEDMEM::CommInterface interface ;
- std::set<int> sourceprocs;
- std::set<int> targetprocs;
- int i ;
- for ( i = 0 ; i < size/2 ; i++ ) {
- sourceprocs.insert(i);
- }
- for ( i = size/2 ; i < size ; i++ ) {
- targetprocs.insert(i);
- }
-
- ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ;
- ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ;
-
-// TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0 ) ;
- MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
- Asynchronous ) ;
-// Asynchronous , LinearInterp , 0.5 ) ;
- MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ;
- MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
-
- cout << "test_AllToAllvTimeDoubleDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
-
-#define maxproc 11
-#define maxreq 100
-#define datamsglength 10
-
- int sts ;
- int *sendcounts = new int[size] ;
- int *sdispls = new int[size] ;
- int *recvcounts = new int[size] ;
- int *rdispls = new int[size] ;
- int *sendtimecounts = new int[size] ;
- int *stimedispls = new int[size] ;
- int *recvtimecounts = new int[size] ;
- int *rtimedispls = new int[size] ;
- for ( i = 0 ; i < size ; i++ ) {
- sendcounts[i] = datamsglength-i ;
- sdispls[i] = i*datamsglength ;
- recvcounts[i] = datamsglength-myrank ;
- rdispls[i] = i*datamsglength ;
- sendtimecounts[i] = 1 ;
- stimedispls[i] = 0 ;
- recvtimecounts[i] = 1 ;
- rtimedispls[i] = i ;
- }
-
- double timeLoc[maxproc] ;
- double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
- double maxtime[maxproc] ;
- double nextdeltatime[maxproc] ;
- for ( i = 0 ; i < size ; i++ ) {
- timeLoc[i] = 0 ;
- maxtime[i] = maxreq ;
- nextdeltatime[i] = deltatime[i] ;
- }
- time_t begintime = time(NULL) ;
- for ( timeLoc[myrank] = 0 ; timeLoc[myrank] <= maxtime[myrank] && nextdeltatime[myrank] != 0 ;
- timeLoc[myrank]+=nextdeltatime[myrank] ) {
-//local and target times
- int target ;
- for ( target = 0 ; target < size ; target++ ) {
- nextdeltatime[target] = deltatime[target] ;
- if ( timeLoc[target] != 0 ) {
- if ( timeLoc[target]+nextdeltatime[target] > maxtime[target] ) {
- nextdeltatime[target] = 0 ;
- }
- }
- if ( target != myrank ) {
- while ( timeLoc[myrank] >= timeLoc[target] ) {
- timeLoc[target] += deltatime[target] ;
- }
- }
- }
- MyMPIAccessDEC->setTime( timeLoc[myrank] , nextdeltatime[myrank] ) ;
- cout << "test" << myrank << "=====TIME " << timeLoc[myrank] << "=====DELTATIME "
- << nextdeltatime[myrank] << "=====MAXTIME " << maxtime[myrank] << " ======"
- << endl ;
- double * sendbuf = new double[datamsglength*size] ;
-// double * sendbuf = (double *) malloc(sizeof(double)*datamsglength*size) ;
- double * recvbuf = new double[datamsglength*size] ;
- int j ;
- //cout << "test_AllToAllvTimeDoubleDEC" << myrank << " sendbuf" ;
- for ( target = 0 ; target < size ; target++ ) {
- for ( j = 0 ; j < datamsglength ; j++ ) {
- //sendbuf[j] = myrank*10000 + (j/datamsglength)*100 + j ;
- sendbuf[target*datamsglength+j] = myrank*1000000 + target*10000 +
- (timeLoc[myrank]/deltatime[myrank])*100 + j ;
- //cout << " " << (int ) sendbuf[target*datamsglength+j] ;
- recvbuf[target*datamsglength+j] = -1 ;
- }
- //cout << endl ;
- }
-
- int sts = MyMPIAccessDEC->allToAllvTime( sendbuf, sendcounts , sdispls , MPI_DOUBLE ,
- recvbuf, recvcounts , rdispls , MPI_DOUBLE ) ;
- chksts( sts , myrank , mpi_access ) ;
-
-// cout << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf before CheckSent" ;
-// for ( i = 0 ; i < datamsglength*size ; i++ ) {
-// cout << " " << recvbuf[i] ;
-// }
-// cout << endl ;
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq != 0 ) {
- ostringstream strstream ;
- strstream << "=============================================================" << endl
- << "test_AllToAllvTimeDoubleDEC" << myrank << " WaitAllRecv "
- << nRecvReq << " Requests # 0 ERROR"
- << endl << "============================================================"
- << endl ;
- int *ArrayOfRecvRequests = new int[nRecvReq] ;
- int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
- mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
- delete [] ArrayOfRecvRequests ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
-// cout << "test_AllToAllvTimeDoubleDEC" << myrank << " check of recvbuf" << endl ;
- bool badrecvbuf = false ;
- for ( target = 0 ; target < size ; target++ ) {
- int j ;
- for ( j = 0 ; j < datamsglength ; j++ ) {
- int index = target*datamsglength+j ;
- if ( j < recvcounts[target] ) {
- if ( fabs(recvbuf[index] - (target*1000000 + myrank*10000 +
- (timeLoc[target]/deltatime[target])*100 + j)) > 101) {
- badrecvbuf = true ;
- cout << "test_AllToAllvTimeDoubleDEC" << myrank << " target " << target << " timeLoc[target] "
- << timeLoc[target] << " recvbuf[" << index << "] " << (int ) recvbuf[index]
- << " # " << (int ) (target*1000000 +
- myrank*10000 + (timeLoc[target]/deltatime[target])*100 + j)
- << endl ;
- }
- else if ( badrecvbuf ) {
- cout << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf[" << index << "] "
- << recvbuf[index] << " ~= " << (int ) (target*1000000 +
- myrank*10000 + (timeLoc[target]/deltatime[target])*100 + j) << endl ;
- }
- }
- else if ( recvbuf[index] != -1 ) {
- badrecvbuf = true ;
- cout << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf[" << index << "] "
- << recvbuf[index] << " # -1" << endl ;
- }
- }
- }
- if ( badrecvbuf ) {
- ostringstream strstream ;
- strstream << "==================================================================" << endl
- << "test_AllToAllvTimeDoubleDEC" << myrank << " badrecvbuf"
- << endl << "=================================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- delete [] recvbuf ;
- }
-
- cout << "test_AllToAllvTimeDoubleDEC" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
-
- cout << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalSent" << endl ;
- sts = MyMPIAccessDEC->checkFinalSent() ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "=================================================================" << endl
- << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalSent ERROR"
- << endl << "================================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- cout << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalRecv" << endl ;
- sts = MyMPIAccessDEC->checkFinalRecv() ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "=================================================================" << endl
- << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalRecv ERROR"
- << endl << "================================================================"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- int nRecvReq = mpi_access->recvRequestIdsSize() ;
- if ( nRecvReq ) {
- ostringstream strstream ;
- strstream << "===============================================================" << endl
- << "test_AllToAllvTimeDoubleDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests # 0 Error"
- << endl << "==============================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "test_AllToAllvTimeDoubleDEC" << myrank << " RecvRequestIds " << nRecvReq
- << " RecvRequests = 0 OK" << endl ;
- }
-
- time_t endtime = time(NULL) ;
- cout << "test_AllToAllvTimeDoubleDEC" << myrank << " begintime " << begintime << " endtime " << endtime
- << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank]
- << " calls to AllToAll" << endl ;
-
- cout << "test" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
-
- delete sourcegroup ;
- delete targetgroup ;
- delete MyMPIAccessDEC ;
-// delete aLinearInterpDEC ;
-
- delete [] sendcounts ;
- delete [] sdispls ;
- delete [] recvcounts ;
- delete [] rdispls ;
- delete [] sendtimecounts ;
- delete [] stimedispls ;
- delete [] recvtimecounts ;
- delete [] rtimedispls ;
-
-// MPI_Finalize();
-
- endtime = time(NULL) ;
-
- cout << "test_AllToAllvTimeDoubleDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime
- << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank]
- << " calls to AllToAll" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <time.h>
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_Cancel() {
-
- cout << "test_MPI_Access_Cancel" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_Cancel must be runned with 2 procs" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- cout << "test_MPI_Access_Cancel" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int intsendbuf[5] ;
- double doublesendbuf[10] ;
- int RequestId[10] ;
- int sts ;
- int i , j ;
- for ( j = 0 ; j < 3 ; j++ ) {
- for ( i = 0 ; i < 10 ; i++ ) {
- cout << "test" << myrank << " ============================ i " << i
- << "============================" << endl ;
- if ( myrank == 0 ) {
- if ( i < 5 ) {
- intsendbuf[i] = i ;
- sts = mpi_access.ISend(&intsendbuf[i],1,MPI_INT,target, RequestId[i]) ;
- cout << "test" << myrank << " Send MPI_INT RequestId " << RequestId[i]
- << endl ;
- }
- else {
- doublesendbuf[i] = i ;
- sts = mpi_access.ISend(&doublesendbuf[i],1,MPI_DOUBLE,target,
- RequestId[i]) ;
- cout << "test" << myrank << " Send MPI_DOUBLE RequestId " << RequestId[i]
- << endl ;
- }
- }
- else {
- int flag = false ;
- while ( !flag ) {
- int source, tag, outcount ;
- MPI_Datatype datatype ;
- sts = mpi_access.IProbe(target, source, tag, datatype, outcount,
- flag ) ;
- if ( flag ) {
- cout << "test" << myrank << " " << i << " IProbe target " << target
- << " source " << source << " tag " << tag
- << " outcount " << outcount << " flag " << flag << endl ;
- }
- else {
- cout << "test" << myrank << " flag " << flag << endl ;
- sleep( 1 ) ;
- }
- if ( flag ) {
- int recvbuf ;
- sts = mpi_access.IRecv(&recvbuf,outcount,MPI_INT,source,
- RequestId[i] ) ;
- if ( datatype == MPI_INT ) {
- int source, tag, error, outcount ;
- mpi_access.wait( RequestId[i] ) ;
- mpi_access.status( RequestId[i], source, tag, error, outcount,
- true ) ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "======================================================"
- << endl << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO" << endl
- << "======================================================"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- cout << "========================================================"
- << endl << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " OK" << endl
- << "========================================================"
- << endl ;
- }
- }
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.check() ;
- }
-
- if ( myrank != 0 ) {
- int iprobe ;
- for ( iprobe = 5 ; iprobe < 10 ; iprobe++ ) {
- cout << "test" << myrank << " ============================ iprobe "
- << iprobe << "============================" << endl ;
- int source, tag, outcount ;
- MPI_Datatype datatype ;
- int probeflag = false ;
- while ( !probeflag ) {
- sts = mpi_access.IProbe( target, source, tag, datatype, outcount,
- probeflag ) ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " IProbe iprobe " << iprobe
- << " target " << target << " probeflag " << probeflag
- << " tag " << tag << " outcount " << outcount << " datatype "
- << datatype << " lenerr " << lenerr << " " << msgerr << endl ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "=========================================================="
- << endl << "test" << myrank << " IProbe KO iprobe " << iprobe
- << endl
- << "=========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if ( !probeflag ) {
- //cout << "========================================================"
- // << endl << "test" << myrank << " IProbe KO(OK) iprobe " << iprobe
- // << " probeflag " << probeflag << endl
- // << "========================================================"
- // << endl ;
- }
- else {
- cout << "test" << myrank << " " << iprobe << " IProbe target "
- << target << " source " << source << " tag " << tag
- << " outcount " << outcount << " probeflag " << probeflag
- << endl ;
- if ( datatype != MPI_DOUBLE ) {
- ostringstream strstream ;
- strstream << "========================================================"
- << endl << "test" << myrank << " MPI_DOUBLE KO" << endl
- << "========================================================"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- int flag ;
- sts = mpi_access.cancel( source, tag, datatype, outcount, flag ) ;
- if ( sts != MPI_SUCCESS || !flag ) {
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "======================================================"
- << endl << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl << "test" << myrank
- << " Cancel PendingIrecv KO flag " << flag << " iprobe "
- << iprobe << " Irecv completed" << endl
- << "======================================================"
- << endl ;
- //return 1 ;
- }
- else {
- cout << "======================================================"
- << endl << "test" << myrank
- << " Cancel PendingIrecv OK RequestId " << " flag "
- << flag << " iprobe " << iprobe << endl
- << "======================================================"
- << endl ;
- }
- }
- int Reqtarget, Reqtag, Reqerror, Reqoutcount ;
- mpi_access.status( RequestId[iprobe], Reqtarget, Reqtag, Reqerror,
- Reqoutcount, true ) ;
- cout << "test" << myrank << " Status Reqtarget "<< Reqtarget
- << " Reqtag " << Reqtag << " Reqoutcount " << Reqoutcount
- << endl ;
- int Reqflag ;
- sts = mpi_access.cancel( RequestId[iprobe] , Reqflag ) ;
- cout << "test" << myrank << " " << iprobe
- << " Cancel Irecv done Reqtarget " << Reqtarget
- << " Reqtag " << Reqtag << " Reqoutcount " << Reqoutcount
- << " Reqflag " << Reqflag << endl ;
- if ( sts != MPI_SUCCESS || !Reqflag ) {
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- ostringstream strstream ;
- strstream << "========================================================"
- << endl << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl << "test" << myrank
- << " Cancel Irecv KO Reqflag " << Reqflag << " iprobe "
- << iprobe << endl
- << "========================================================"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "========================================================"
- << endl << "test" << myrank
- << " Cancel Irecv OK RequestId " << RequestId[iprobe]
- << " Reqflag " << Reqflag << " iprobe " << iprobe << endl
- << "========================================================"
- << endl ;
- probeflag = Reqflag ;
- }
- }
- }
- }
- }
- mpi_access.waitAll(10,RequestId) ;
- mpi_access.deleteRequests(10,RequestId) ;
- }
-
- int source, tag, outcount, flag ;
- MPI_Datatype datatype ;
- sts = mpi_access.IProbe(target, source, tag, datatype, outcount, flag ) ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- if ( sts != MPI_SUCCESS || flag ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " IProbe KO flag " << flag
- << " remaining unread/cancelled message :" << endl
- << " source " << source << " tag " << tag << endl
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- mpi_access.testAll(10,RequestId,flag) ;
- mpi_access.waitAll(10,RequestId) ;
- mpi_access.deleteRequests(10,RequestId) ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.check() ;
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_Cyclic_ISend_IRecv() {
-
- cout << "test_MPI_Access_Cyclic_ISend_IRecv" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 3 ) {
- cout << "test_MPI_Access_Cyclic_ISend_IRecv must be runned with 3 procs" << endl ;
- CPPUNIT_FAIL("test_MPI_Access_Cyclic_ISend_IRecv must be runned with 3 procs") ;
- }
-
- cout << "test_MPI_Access_Cyclic_ISend_IRecv" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
-#define maxsend 100
-
- if ( myrank >= 3 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int alltarget[3] = {1 , 2 , 0 } ;
- int allsource[3] = {2 , 0 , 1 } ;
- int SendRequestId[maxsend] ;
- int RecvRequestId[maxsend] ;
- int sendbuf[maxsend] ;
- int recvbuf[maxsend] ;
- int sts ;
- int i = 0 ;
- if ( myrank == 0 ) {
- sendbuf[i] = i ;
- sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,alltarget[myrank],
- SendRequestId[i]) ;
- cout << "test" << myrank << " Send RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- for ( i = 0 ; i < maxsend ; i++ ) {
- recvbuf[i] = -1 ;
- sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,allsource[myrank],
- RecvRequestId[i]) ;
- cout << "test" << myrank << " Recv RequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(allsource[myrank]) << endl ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr
- << " " << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- int j ;
- for (j = 0 ; j <= i ; j++) {
- int flag ;
- if ( j < i ) {
- cout << "test" << myrank << " " << j << " -> Test-Send("<< SendRequestId[j]
- << ")" << endl ;
- mpi_access.test( SendRequestId[j], flag ) ;
- if ( flag ) {
- int target, tag, error, outcount ;
- mpi_access.status( SendRequestId[j], target, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Send RequestId " << SendRequestId[j]
- << " target " << target << " tag " << tag << " error " << error
- << endl ;
- mpi_access.deleteRequest( SendRequestId[j] ) ;
- }
- }
- cout << "test" << myrank << " " << j << " -> Test-Recv("<< SendRequestId[j]
- << ")" << endl ;
- mpi_access.test( RecvRequestId[j], flag ) ;
- if ( flag ) {
- int source, tag, error, outcount ;
- mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Recv RequestId" << j << " "
- << RecvRequestId[j] << " source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount << endl ;
- if ( (outcount != 1) | (recvbuf[j] != j) ) {
- ostringstream strstream ;
- strstream << "====================================================="
- << endl << "test" << myrank << " outcount "
- << outcount << " recvbuf[ " << j << " ] " << recvbuf[j] << " KO"
- << endl << "====================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- }
- if ( myrank == 0 ) {
- if ( i != maxsend-1 ) {
- sendbuf[i+1] = i + 1 ;
- sts = mpi_access.ISend(&sendbuf[i+1],1,MPI_INT,alltarget[myrank],
- SendRequestId[i+1]) ;
- cout << "test" << myrank << " Send RequestId " << SendRequestId[i+1]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- }
- else {
- sendbuf[i] = i ;
- sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,alltarget[myrank],
- SendRequestId[i]) ;
- cout << "test" << myrank << " Send RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr
- << " " << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.check() ;
- }
-
- int flag ;
- mpi_access.testAll(maxsend,SendRequestId,flag) ;
- mpi_access.testAll(maxsend,RecvRequestId,flag) ;
- mpi_access.waitAll(maxsend,SendRequestId) ;
- mpi_access.deleteRequests(maxsend,SendRequestId) ;
- mpi_access.waitAll(maxsend,RecvRequestId) ;
- mpi_access.deleteRequests(maxsend,RecvRequestId) ;
- mpi_access.check() ;
- mpi_access.testAll(maxsend,SendRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " TestAllSendflag " << flag << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " TestAllSendflag " << flag << " OK" << endl
- << "=========================================================" << endl ;
- }
- mpi_access.testAll(maxsend,RecvRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " TestAllRecvflag " << flag << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " TestAllRecvflag " << flag << " OK" << endl
- << "=========================================================" << endl ;
- }
-
- int sendrequests[maxsend] ;
- int sendreqsize = mpi_access.sendRequestIds( alltarget[myrank] , maxsend ,
- sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- int source, tag, error, outcount ;
- mpi_access.status(sendrequests[0], source, tag, error, outcount, true) ;
- cout << "test" << myrank << " RequestId " << sendrequests[0]
- << " source " << source << " tag " << tag << " error " << error
- << " outcount " << outcount << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- int recvrequests[maxsend] ;
- int recvreqsize = mpi_access.sendRequestIds( allsource[myrank] , maxsend ,
- recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_Cyclic_Send_Recv() {
-
- cout << "test_MPI_Access_Cyclic_Send_Recv" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 3 ) {
- cout << "test_MPI_Access_Send_Recv must be runned with 3 procs" << endl ;
- CPPUNIT_FAIL("test_MPI_Access_Send_Recv must be runned with 3 procs") ;
- }
-
- cout << "test_MPI_Access_Cyclic_Send_Recv" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 3 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int alltarget[3] = {1 , 2 , 0 } ;
- int allsource[3] = {2 , 0 , 1 } ;
- int RequestId[10] ;
- int sts ;
- int i = 0 ;
- if ( myrank == 0 ) {
- sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
- cout << "test" << myrank << " Send RequestId " << RequestId[i]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- for ( i = 0 ; i < 10 ; i++ ) {
- int recvbuf ;
- int outcount ;
- if ( i & 1 ) {
- outcount = 0 ;
- sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i],
- &outcount) ;
- }
- else {
- sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i]) ;
- outcount = 1 ;
- }
- //int source, tag, error, outcount ;
- //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
- cout << "test" << myrank << " Recv RequestId " << RequestId[i]
- << " tag " << mpi_access.recvMPITag(allsource[myrank])
- << " outcount " << outcount << endl ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " outcount "
- << outcount << " recvbuf " << recvbuf << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- if ( myrank == 0 ) {
- if ( i != 9 ) {
- int ii = i + 1 ;
- sts = mpi_access.send(&ii,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
- cout << "test" << myrank << " Send RequestId " << RequestId[i]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- }
- else {
- sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
- cout << "test" << myrank << " Send RequestId " << RequestId[i]
- << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr
- << " " << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.check() ;
- }
-
- int flag ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,RequestId) ;
- mpi_access.check() ;
-
- int sendrequests[10] ;
- int sendreqsize = mpi_access.sendRequestIds( alltarget[myrank] , 10 ,
- sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- int recvrequests[10] ;
- int recvreqsize = mpi_access.sendRequestIds( allsource[myrank] , 10 ,
- recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <time.h>
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_IProbe() {
-
- cout << "test_MPI_Access_IProbe" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_IProbe must be runned with 2 procs" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- cout << "test_MPI_Access_IProbe" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int sendbuf[10] ;
- int RequestId[10] ;
- int sts ;
- int i ;
- for ( i = 0 ; i < 10 ; i++ ) {
- if ( myrank == 0 ) {
- sendbuf[i] = i ;
- sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, RequestId[i]) ;
- cout << "test" << myrank << " Send RequestId " << RequestId[i]
- << endl ;
- }
- else {
- int flag = false ;
- while ( !flag ) {
- int source, tag, outcount ;
- MPI_Datatype datatype ;
- sts = mpi_access.IProbe(target, source, tag, datatype, outcount, flag ) ;
- if ( flag ) {
- cout << "test" << myrank << " " << i << " IProbe target " << target
- << " source " << source << " tag " << tag
- << " outcount " << outcount << " flag " << flag << endl ;
- }
- else {
- cout << "test" << myrank << " IProbe flag " << flag << endl ;
- sleep( 1 ) ;
- }
- if ( flag ) {
- int recvbuf ;
- sts = mpi_access.recv(&recvbuf,outcount,datatype,source, RequestId[i],
- &outcount) ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO" << endl
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- cout << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.check() ;
- }
- int flag ;
- mpi_access.testAll(10,RequestId,flag) ;
- mpi_access.waitAll(10,RequestId) ;
- mpi_access.deleteRequests(10,RequestId) ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.check() ;
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_ISendRecv() {
-
- cout << "test_MPI_Access_ISendRecv" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- cout << "test_MPI_Access_ISendRecv must be runned with 2 procs" << endl ;
- CPPUNIT_FAIL("test_MPI_Access_ISendRecv must be runned with 2 procs") ;
- }
-
- cout << "test_MPI_Access_ISendRecv" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendRequestId[10] ;
- int RecvRequestId[10] ;
- int sendbuf[10] ;
- int recvbuf[10] ;
- int sts ;
- int i ;
- for ( i = 0 ; i < 10 ; i++ ) {
- sendbuf[i] = i ;
- sts = mpi_access.ISendRecv(&sendbuf[i],1,MPI_INT,target, SendRequestId[i],
- &recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
- cout << "test" << myrank << " Send sendRequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target)
- << " recvRequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr
- << " " << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- int j ;
- for (j = 0 ; j <= i ; j++) {
- int flag ;
- if ( j < i ) {
- cout << "test" << myrank << " " << j << " -> Test-Send("<< SendRequestId[j]
- << ")" << endl ;
- mpi_access.test( SendRequestId[j], flag ) ;
- if ( flag ) {
- int target, tag, error, outcount ;
- mpi_access.status( SendRequestId[j], target, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Send RequestId " << SendRequestId[j]
- << " target " << target << " tag " << tag << " error " << error
- << endl ;
- mpi_access.deleteRequest( SendRequestId[j] ) ;
- }
- }
- cout << "test" << myrank << " " << j << " -> Test-Recv("<< SendRequestId[j]
- << ")" << endl ;
- mpi_access.test( RecvRequestId[j], flag ) ;
- if ( flag ) {
- int source, tag, error, outcount ;
- mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Recv RequestId" << j << " "
- << RecvRequestId[j] << " source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount << endl ;
- if ( (outcount != 1) | (recvbuf[j] != j) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " outcount "
- << outcount << " recvbuf[ " << j << " ] " << recvbuf[j] << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- }
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- mpi_access.check() ;
- }
-
- int flag ;
- mpi_access.testAll(10,SendRequestId,flag) ;
- mpi_access.waitAll(10,SendRequestId) ;
- mpi_access.deleteRequests(10,SendRequestId) ;
- mpi_access.testAll(10,SendRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- mpi_access.testAll(10,RecvRequestId,flag) ;
- mpi_access.waitAll(10,RecvRequestId) ;
- mpi_access.deleteRequests(10,RecvRequestId) ;
- mpi_access.testAll(10,RecvRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.check() ;
-
- int sendrequests[10] ;
- int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- int recvrequests[10] ;
- int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_ISend_IRecv() {
-
- cout << "test_MPI_Access_ISend_IRecv" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- cout << "test_MPI_Access_ISend_IRecv must be runned with 2 procs" << endl ;
- CPPUNIT_FAIL("test_MPI_Access_ISend_IRecv must be runned with 2 procs") ;
- }
-
- cout << "test_MPI_Access_ISend_IRecv" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
-#define maxreq 100
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts ;
- int sendbuf[maxreq] ;
- int recvbuf[maxreq] ;
- int i ;
- for ( i = 0 ; i < maxreq ; i++ ) {
- if ( myrank == 0 ) {
- sendbuf[i] = i ;
- sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, SendRequestId[i]) ;
- cout << "test" << myrank << " ISend RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
- cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- }
- int j ;
- for (j = 0 ; j <= i ; j++) {
- int flag ;
- if ( myrank == 0 ) {
- mpi_access.test( SendRequestId[j], flag ) ;
- }
- else {
- mpi_access.test( RecvRequestId[j], flag ) ;
- }
- if ( flag ) {
- int target,source, tag, error, outcount ;
- if ( myrank == 0 ) {
- mpi_access.status( SendRequestId[j], target, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
- << ") : target " << target << " tag " << tag << " error " << error
- << " flag " << flag << endl ;
- }
- else {
- mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Test(Recv RequestId "
- << RecvRequestId[j] << ") : source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount
- << " flag " << flag << endl ;
- if ( (outcount != 1) | (recvbuf[j] != j) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount "
- << outcount << " recvbuf " << recvbuf[j] << " KO" << endl
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- //else {
- // cout << "==========================================================="
- // << endl << "test" << myrank << " outcount " << outcount
- // << " RequestId " << RecvRequestId[j] << " recvbuf "
- // << recvbuf[j] << " OK" << endl
- // << "==========================================================="
- // << endl ;
- //}
- }
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- mpi_access.check() ;
- if ( myrank == 0 ) {
- mpi_access.waitAll(maxreq, SendRequestId) ;
- mpi_access.deleteRequests(maxreq, SendRequestId) ;
- }
- else {
- mpi_access.waitAll(maxreq, RecvRequestId) ;
- mpi_access.deleteRequests(maxreq, RecvRequestId) ;
- }
- mpi_access.check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[maxreq] ;
- int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
- int i ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- for ( i = 0 ; i < sendreqsize ; i++ ) {
- cout << "test" << myrank << " sendrequests[ " << i << " ] = "
- << sendrequests[i] << endl ;
- }
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[maxreq] ;
- int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
- // MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <time.h>
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_ISend_IRecv_BottleNeck() {
-
- cout << "test_MPI_Access_ISend_IRecv_BottleNeck" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_ISend_IRecv_BottleNeck must be runned with 2 procs"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- cout << "test_MPI_Access_ISend_IRecv_BottleNeck" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
-#define maxreq 10000
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts ;
- int sendbuf[maxreq] ;
- int recvbuf[maxreq] ;
- int i ;
- for ( i = 0 ; i < maxreq ; i++ ) {
- if ( myrank == 0 ) {
- sendbuf[i] = i ;
- sts = mpi_access.ISend(sendbuf,i,MPI_INT,target, SendRequestId[i]) ;
- cout << "test" << myrank << " ISend RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- //sleep( 1 ) ;
- sts = mpi_access.IRecv(recvbuf,i,MPI_INT,target, RecvRequestId[i]) ;
- cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- int recvreqsize = mpi_access.recvRequestIdsSize() ;
- int * recvrequests = new int[ recvreqsize ] ;
- recvreqsize = mpi_access.recvRequestIds( target , recvreqsize , recvrequests ) ;
- int j ;
- for (j = 0 ; j < recvreqsize ; j++) {
- int flag ;
- mpi_access.test( recvrequests[j], flag ) ;
- if ( flag ) {
- int source, tag, error, outcount ;
- mpi_access.status( recvrequests[j], source, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Test(Recv RequestId "
- << recvrequests[j] << ") : source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount
- << " flag " << flag << " : DeleteRequest" << endl ;
- mpi_access.deleteRequest( recvrequests[j] ) ;
- }
- else {
-// cout << "test" << myrank << " Test(Recv RequestId "
-// << recvrequests[j] << ") flag " << flag << endl ;
- }
- }
- delete [] recvrequests ;
- }
- if ( sts != MPI_SUCCESS ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- }
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- mpi_access.check() ;
- if ( myrank == 0 ) {
- int size = mpi_access.sendRequestIdsSize() ;
- cout << "test" << myrank << " before WaitAll sendreqsize " << size << endl ;
- mpi_access.waitAll(maxreq, SendRequestId) ;
- size = mpi_access.sendRequestIdsSize() ;
- cout << "test" << myrank << " after WaitAll sendreqsize " << size << endl ;
- int * ArrayOfSendRequests = new int[ size ] ;
- int nSendRequest = mpi_access.sendRequestIds( size , ArrayOfSendRequests ) ;
- int i ;
- for ( i = 0 ; i < nSendRequest ; i++ ) {
- mpi_access.deleteRequest( ArrayOfSendRequests[i] ) ;
- }
- delete [] ArrayOfSendRequests ;
- }
- else {
- int size = mpi_access.recvRequestIdsSize() ;
- cout << "test" << myrank << " before WaitAll recvreqsize " << size << endl ;
- mpi_access.waitAll(maxreq, RecvRequestId) ;
- size = mpi_access.recvRequestIdsSize() ;
- cout << "test" << myrank << " after WaitAll recvreqsize " << size << endl ;
- int * ArrayOfRecvRequests = new int[ size ] ;
- int nRecvRequest = mpi_access.recvRequestIds( size , ArrayOfRecvRequests ) ;
- int i ;
- for ( i = 0 ; i < nRecvRequest ; i++ ) {
- mpi_access.deleteRequest( ArrayOfRecvRequests[i] ) ;
- }
- delete [] ArrayOfRecvRequests ;
- }
- mpi_access.check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[maxreq] ;
- int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
- int i ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- for ( i = 0 ; i < sendreqsize ; i++ ) {
- cout << "test" << myrank << " sendrequests[ " << i << " ] = "
- << sendrequests[i] << endl ;
- }
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[maxreq] ;
- int recvreqsize = mpi_access.recvRequestIds( target , maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length() {
-
- cout << "test_MPI_Access_ISend_IRecv_Length" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_ISend_IRecv_Length must be runned with 2 procs" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- cout << "test_MPI_Access_ISend_IRecv_Length" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
-#define maxreq 10
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts ;
- int sendbuf[1000*(maxreq-1)] ;
- int recvbuf[maxreq-1][1000*(maxreq-1)] ;
- int i ;
- for ( i = 0 ; i < 1000*(maxreq-1) ; i++ ) {
- sendbuf[i] = i ;
- }
- for ( i = 0 ; i < maxreq ; i++ ) {
- if ( myrank == 0 ) {
- sts = mpi_access.ISend( sendbuf, 1000*i, MPI_INT, target, SendRequestId[i] ) ;
- cout << "test" << myrank << " ISend RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- sts = mpi_access.IRecv( recvbuf[i], 1000*i, MPI_INT, target,
- RecvRequestId[i] ) ;
- cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- }
- int j ;
- for (j = 0 ; j <= i ; j++) {
- int flag ;
- if ( myrank == 0 ) {
- mpi_access.test( SendRequestId[j], flag ) ;
- }
- else {
- mpi_access.test( RecvRequestId[j], flag ) ;
- }
- if ( flag ) {
- int target,source, tag, error, outcount ;
- if ( myrank == 0 ) {
- mpi_access.status( SendRequestId[j], target, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
- << ") : target " << target << " tag " << tag << " error " << error
- << " flag " << flag << endl ;
- }
- else {
- mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Test(Recv RequestId "
- << RecvRequestId[j] << ") : source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount
- << " flag " << flag << endl ;
- if ( outcount != 0 ) {
- if ( (outcount != 1000*j) |
- (recvbuf[j][outcount-1] != (outcount-1)) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount "
- << outcount << " recvbuf " << recvbuf[j][outcount-1] << " KO"
- << endl
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " RequestId " << RecvRequestId[j] << " recvbuf "
- << recvbuf[j][outcount-1] << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- }
- else {
- cout << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " RequestId " << RecvRequestId[j] << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- }
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- mpi_access.check() ;
- cout << "test" << myrank << " WaitAll" << endl ;
- if ( myrank == 0 ) {
- mpi_access.waitAll(maxreq, SendRequestId) ;
- mpi_access.deleteRequests(maxreq, SendRequestId) ;
- }
- else {
- mpi_access.waitAll(maxreq, RecvRequestId) ;
- mpi_access.deleteRequests(maxreq, RecvRequestId) ;
- }
- mpi_access.check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[maxreq] ;
- int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
- sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[maxreq] ;
- int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
- // MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length_1() {
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_ISend_IRecv_Length_1 must be runned with 2 procs" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- cout << "test_MPI_Access_ISend_IRecv_Length_1" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
-#define maxreq 10
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts ;
- int sendbuf[1000*(maxreq-1)] ;
- int recvbuf[maxreq-1][1000*(maxreq-1)] ;
- int maxirecv = 1 ;
- int i ;
- RecvRequestId[0] = -1 ;
- for ( i = 0 ; i < 1000*(maxreq-1) ; i++ ) {
- sendbuf[i] = i ;
- }
- for ( i = 0 ; i < maxreq ; i++ ) {
- sts = MPI_SUCCESS ;
- if ( myrank == 0 ) {
- sts = mpi_access.ISend( sendbuf, 1000*i, MPI_INT, target, SendRequestId[i] ) ;
- cout << "test" << myrank << " ISend RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- int j ;
- for (j = 1 ; j <= i ; j++) {
- int source ;
- MPI_Datatype datatype ;
- int outcount ;
- int flag ;
- if ( myrank == 0 ) {
- mpi_access.test( SendRequestId[j], flag ) ;
- }
- else {
- int MPITag ;
- sts = mpi_access.IProbe( target , source, MPITag, datatype,
- outcount, flag) ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " IProbe lenerr " << lenerr << " "
- << msgerr << endl ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " IProbe KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- cout << "test" << myrank << " IProbe i/j " << i << "/" << j
- << " MPITag " << MPITag << " datatype " << datatype
- << " outcount " << outcount << " flag " << flag << endl ;
- }
- if ( flag ) {
- if ( myrank == 0 ) {
- int target, tag, error, outcount ;
- mpi_access.status( SendRequestId[j], target, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
- << ") : target " << target << " tag " << tag << " error " << error
- << " flag " << flag << endl ;
- }
- else {
- sts = mpi_access.IRecv( recvbuf[maxirecv], outcount, datatype, source,
- RecvRequestId[maxirecv] ) ;
- cout << "test" << myrank << " maxirecv " << maxirecv << " IRecv RequestId "
- << RecvRequestId[maxirecv] << " source " << source
- << " outcount " << outcount << " tag "
- << mpi_access.recvMPITag(target) << endl ;
- maxirecv = maxirecv + 1 ;
- }
- }
- else if ( myrank == 1 && i == maxreq-1 && j >= maxirecv ) {
- sts = mpi_access.IRecv( recvbuf[j], 1000*j, MPI_INT, target,
- RecvRequestId[j] ) ;
- cout << "test" << myrank << " maxirecv " << maxirecv << " IRecv RequestId "
- << RecvRequestId[j] << " target " << target << " length " << 1000*j
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- maxirecv = maxirecv + 1 ;
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " KO" << endl
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- mpi_access.check() ;
- int flag ;
- if ( myrank == 0 ) {
- mpi_access.testAll( maxreq, SendRequestId, flag ) ;
- cout << "test" << myrank << " TestAll SendRequest flag " << flag << endl ;
- }
- else {
- int i ;
- int source ;
- int outcount ;
- int flag ;
- if ( maxirecv != maxreq ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " KO" << " maxirecv " << maxirecv
- << " != maxreq " << maxreq << endl
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- while ( maxirecv > 0 ) {
- for ( i = 1 ; i < maxreq ; i++ ) {
- cout << "test" << myrank << " IProbe : " << endl ;
- sts = mpi_access.test( RecvRequestId[i] , flag ) ;
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " flag " << flag << " lenerr "
- << lenerr << " " << msgerr << " maxirecv " << maxirecv << endl ;
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- cout << "test" << myrank << " Test flag " << flag << endl ;
- if ( flag ) {
- int tag, error ;
- mpi_access.status( RecvRequestId[i] , source , tag , error ,
- outcount ) ;
- if ( i != 0 ) {
- if ( outcount != 1000*i |
- (recvbuf[i][outcount-1] != (outcount-1)) ) {
- ostringstream strstream ;
- strstream << "========================================================"
- << endl << "test" << myrank << " outcount " << outcount
- << " KO" << " i " << i
- << " recvbuf " << recvbuf[i][outcount-1] << endl
- << "========================================================"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- else if ( outcount != 0 ) {
- ostringstream strstream ;
- strstream << "========================================================"
- << endl << "test" << myrank << " outcount " << outcount
- << " KO" << " i " << i << endl
- << "========================================================"
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- maxirecv = maxirecv - 1 ;
- }
- }
- }
- mpi_access.testAll( maxreq, RecvRequestId, flag ) ;
- cout << "test" << myrank << " TestAll RecvRequest flag " << flag << endl ;
- }
- mpi_access.check() ;
- cout << "test" << myrank << " WaitAll :" << endl ;
- if ( myrank == 0 ) {
- mpi_access.waitAll( maxreq, SendRequestId ) ;
- mpi_access.deleteRequests( maxreq, SendRequestId ) ;
- }
- else {
- mpi_access.waitAll( maxreq, RecvRequestId ) ;
- mpi_access.deleteRequests( maxreq, RecvRequestId ) ;
- }
-
- if ( myrank == 0 ) {
- int sendrequests[maxreq] ;
- int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
- sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[maxreq] ;
- int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
- // MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_Probe() {
-
- cout << "test_MPI_Access_Probe" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- cout << "test_MPI_Access_Probe must be runned with 2 procs" << endl ;
- CPPUNIT_FAIL("test_MPI_Access_Probe must be runned with 2 procs") ;
- }
-
- cout << "test_MPI_Access_Probe" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int RequestId[10] ;
- int sts ;
- int i ;
- for ( i = 0 ; i < 10 ; i++ ) {
- if ( myrank == 0 ) {
- sts = mpi_access.send(&i,1,MPI_INT,target, RequestId[i]) ;
- cout << "test" << myrank << " Send RequestId " << RequestId[i]
- << endl ;
- }
- else {
- int source, tag, outcount ;
- MPI_Datatype datatype ;
- sts = mpi_access.probe(target, source, tag, datatype, outcount ) ;
- cout << "test" << myrank << " Probe target " << target << " source " << source
- << " tag " << tag << " outcount " << outcount << endl ;
- int recvbuf ;
- sts = mpi_access.recv(&recvbuf,outcount,datatype,source, RequestId[i],
- &outcount) ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.check() ;
- }
- int flag ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,RequestId) ;
- mpi_access.check() ;
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_SendRecv() {
-
- cout << "MPIAccessTest::test_MPI_Access_SendRecv" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- cout << "MPIAccessTest::test_MPI_Access_SendRecv must be runned with 2 procs" << endl ;
- CPPUNIT_FAIL("test_MPI_Access_SendRecv must be runned with 2 procs") ;
- }
-
- cout << "MPIAccessTest::test_MPI_Access_SendRecv" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int sendRequestId[10] ;
- int recvRequestId[10] ;
- int sts ;
- int i ;
- for ( i = 0 ; i < 10 ; i++ ) {
- int recvbuf ;
- int outcount ;
- if ( i & 1 ) {
- outcount = -1 ;
- sts = mpi_access.sendRecv(&i,1,MPI_INT,target, sendRequestId[i],
- &recvbuf,1,MPI_INT,target, recvRequestId[i],
- &outcount) ;
- }
- else {
- sts = mpi_access.sendRecv(&i,1,MPI_INT,target, sendRequestId[i],
- &recvbuf,1,MPI_INT,target, recvRequestId[i]) ;
-// outcount = mpi_access.MPIOutCount( recvRequestId[i] ) ;
- outcount = 1 ;
- }
- cout << "test" << myrank << " Send sendRequestId " << sendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target)
- << " recvRequestId " << recvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target)
- << " outcount " << outcount << " MPIOutCount "
- << mpi_access.MPIOutCount( recvRequestId[i] ) << endl ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.check() ;
- }
-
- int flag ;
- mpi_access.testAll(10,sendRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,sendRequestId) ;
- mpi_access.testAll(10,recvRequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,recvRequestId) ;
- mpi_access.check() ;
-
- int sendrequests[10] ;
- int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- int recvrequests[10] ;
- int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_Send_Recv() {
-
- cout << "test_MPI_Access_Send_Recv" << endl ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- cout << "test_MPI_Access_Send_Recv must be runned with 2 procs" << endl ;
- CPPUNIT_FAIL("test_MPI_Access_Send_Recv must be runned with 2 procs") ;
- }
-
- cout << "test_MPI_Access_Send_Recv" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int RequestId[10] ;
- int sts ;
- int i ;
- for ( i = 0 ; i < 10 ; i++ ) {
- if ( myrank == 0 ) {
- sts = mpi_access.send(&i,1,MPI_INT,target, RequestId[i]) ;
- cout << "test" << myrank << " Send RequestId " << RequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- int recvbuf ;
- int outcount ;
- sts = mpi_access.recv(&recvbuf,1,MPI_INT,target, RequestId[i],&outcount) ;
- //int source, tag, error, outcount ;
- //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
- cout << "test" << myrank << " Recv RequestId " << RequestId[i]
- << " tag " << mpi_access.recvMPITag(target)
- << " outcount " << outcount << endl ;
- if ( (outcount != 1) | (recvbuf != i) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.check() ;
- }
- int flag ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,RequestId) ;
- mpi_access.check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[10] ;
- int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- else {
- int recvrequests[10] ;
- int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_Send_Recv_Length() {
-
- cout << "test_MPI_Access_Send_Recv_Length" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_Send_Recv_Length must be runned with 2 procs" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- cout << "test_MPI_Access_Send_Recv_Length" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
- if ( myrank >= 2 ) {
- mpi_access.barrier() ;
- delete group ;
- return ;
- }
-
- int target = 1 - myrank ;
- int RequestId[10] ;
- int sendbuf[9000] ;
- int recvbuf[9000] ;
- bool recvbufok ;
- int sts ;
- int i , j ;
- for ( i = 0 ; i < 9000 ; i++ ) {
- sendbuf[i] = i ;
- }
- for ( i = 0 ; i < 10 ; i++ ) {
- if ( myrank == 0 ) {
- sts = mpi_access.send( sendbuf, 1000*i, MPI_INT, target, RequestId[i] ) ;
- cout << "test" << myrank << " Send RequestId " << RequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- sts = MPI_SUCCESS ;
- RequestId[i] = -1 ;
- int outcount = 0 ;
- if ( i != 0 ) {
- sts = mpi_access.recv( recvbuf,1000*i+1,MPI_INT,target, RequestId[i],
- &outcount ) ;
- }
- //int source, tag, error, outcount ;
- //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
- cout << "test" << myrank << " Recv RequestId " << RequestId[i]
- << " tag " << mpi_access.recvMPITag(target)
- << " outcount " << outcount << endl ;
- recvbufok = true ;
- for ( j = 0 ; j < outcount ; j++ ) {
- if ( recvbuf[j] != j ) {
- cout << "test" << myrank << " recvbuf[ " << j << " ] = " << recvbuf[j]
- << endl ;
- recvbufok = false ;
- break ;
- }
- }
- if ( (outcount != 1000*i) | !recvbufok ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " recvbuf " << recvbuf << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.check() ;
- }
- int flag ;
- mpi_access.testAll(10,RequestId,flag) ;
- if ( !flag ) {
- ostringstream strstream ;
- strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- mpi_access.waitAll(10,RequestId) ;
- mpi_access.check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[10] ;
- int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
- else {
- int recvrequests[10] ;
- int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- }
-
- mpi_access.barrier() ;
-
- delete group ;
-
-// MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void MPIAccessTest::test_MPI_Access_Time() {
-
- cout << "test_MPI_Access_Time" << endl ;
-
- // MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "test_MPI_Access_Time must be runned with 2 procs" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
- cout << "test_MPI_Access_Time" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess mpi_access( group ) ;
-
-#define maxreq 10
-
- if ( myrank >= 2 ) {
- cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->Barrier" << endl ;
- mpi_access.barrier() ;
- cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->Barrier" << endl ;
- delete group ;
- cout << "test_MPI_Access_Time" << myrank << " OK" << endl ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendTimeRequestId[maxreq] ;
- int RecvTimeRequestId[maxreq] ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts ;
- int sendbuf[maxreq] ;
- int recvbuf[maxreq] ;
- int i = 0 ;
- ParaMEDMEM::TimeMessage aSendTimeMsg[maxreq] ;
- ParaMEDMEM::TimeMessage aRecvTimeMsg[maxreq] ;
- double t ;
- double dt = 1. ;
- double maxt = 10. ;
- for ( t = 0 ; t < maxt ; t = t+dt ) {
- if ( myrank == 0 ) {
- aSendTimeMsg[i].time = t ;
- aSendTimeMsg[i].deltatime = dt ;
- //aSendTimeMsg[i].maxtime = maxt ;
- //sts = mpi_access.ISend( &aSendTimeMsg , mpi_access.timeExtent() ,
- sts = mpi_access.ISend( &aSendTimeMsg[i] , 1 ,
- mpi_access.timeType() , target ,
- SendTimeRequestId[i]) ;
- cout << "test" << myrank << " ISend RequestId " << SendTimeRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- sendbuf[i] = i ;
- sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, SendRequestId[i]) ;
- cout << "test" << myrank << " ISend RequestId " << SendRequestId[i]
- << " tag " << mpi_access.sendMPITag(target) << endl ;
- }
- else {
- //sts = mpi_access.IRecv( &aRecvTimeMsg , mpi_access.timeExtent() ,
- sts = mpi_access.IRecv( &aRecvTimeMsg[i] , 1 ,
- mpi_access.timeType() , target ,
- RecvTimeRequestId[i]) ;
- cout << "test" << myrank << " IRecv RequestId " << RecvTimeRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
- cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
- << " tag " << mpi_access.recvMPITag(target) << endl ;
- }
- int j ;
- for (j = 0 ; j <= i ; j++) {
- int flag ;
- if ( myrank == 0 ) {
- mpi_access.test( SendTimeRequestId[j], flag ) ;
- }
- else {
- mpi_access.test( RecvTimeRequestId[j], flag ) ;
- }
- if ( flag ) {
- int target,source, tag, error, outcount ;
- if ( myrank == 0 ) {
- mpi_access.status( SendTimeRequestId[j], target, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Test(Send TimeRequestId " << SendTimeRequestId[j]
- << ") : target " << target << " tag " << tag << " error " << error
- << " flag " << flag << aSendTimeMsg[j] << endl ;
- }
- else {
- mpi_access.status( RecvTimeRequestId[j], source, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Test(Recv TimeRequestId "
- << RecvTimeRequestId[j] << ") : source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount
- << " flag " << flag << aRecvTimeMsg[j] << endl ;
- if ( (outcount != 1) | (aRecvTimeMsg[j].time != j) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount << " KO"
- << " RecvTimeRequestId " << RecvTimeRequestId[j] << endl
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " RecvTimeRequestId " << RecvTimeRequestId[j] << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- }
- }
- if ( myrank == 0 ) {
- mpi_access.test( SendRequestId[j], flag ) ;
- }
- else {
- mpi_access.test( RecvRequestId[j], flag ) ;
- }
- if ( flag ) {
- int target,source, tag, error, outcount ;
- if ( myrank == 0 ) {
- mpi_access.status( SendRequestId[j], target, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
- << ") : target " << target << " tag " << tag << " error " << error
- << " flag " << flag << endl ;
- }
- else {
- mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
- true ) ;
- cout << "test" << myrank << " Test(Recv RequestId "
- << RecvRequestId[j] << ") : source " << source << " tag " << tag
- << " error " << error << " outcount " << outcount
- << " flag " << flag << endl ;
- if ( (outcount != 1) | (recvbuf[j] != j) ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " outcount "
- << outcount << " recvbuf " << recvbuf[j] << " KO" << endl
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "==========================================================="
- << endl << "test" << myrank << " outcount " << outcount
- << " RequestId " << RecvRequestId[j] << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- }
- }
- }
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- mpi_access.errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
-
- if ( sts != MPI_SUCCESS ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- i = i + 1 ;
- }
-
- mpi_access.check() ;
- if ( myrank == 0 ) {
- mpi_access.waitAll(maxreq, SendTimeRequestId) ;
- mpi_access.deleteRequests(maxreq, SendTimeRequestId) ;
- mpi_access.waitAll(maxreq, SendRequestId) ;
- mpi_access.deleteRequests(maxreq, SendRequestId) ;
- }
- else {
- mpi_access.waitAll(maxreq, RecvTimeRequestId) ;
- mpi_access.deleteRequests(maxreq, RecvTimeRequestId) ;
- mpi_access.waitAll(maxreq, RecvRequestId) ;
- mpi_access.deleteRequests(maxreq, RecvRequestId) ;
- }
- mpi_access.check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[2*maxreq] ;
- int sendreqsize = mpi_access.sendRequestIds( target , 2*maxreq , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[2*maxreq] ;
- int recvreqsize = mpi_access.sendRequestIds( target , 2*maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->Barrier" << endl ;
- mpi_access.barrier() ;
- cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->Barrier" << endl ;
-
- delete group ;
-
- // MPI_Finalize();
-
- cout << "test_MPI_Access_Time" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
-//
-// This library is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 2.1 of the License.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-//
-// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-//
-#include <string>
-#include <vector>
-#include <map>
-#include <iostream>
-#include <mpi.h>
-
-#include "MPIAccessTest.hxx"
-#include <cppunit/TestAssert.h>
-
-//#include "CommInterface.hxx"
-//#include "ProcessorGroup.hxx"
-//#include "MPIProcessorGroup.hxx"
-#include "MPIAccess.hxx"
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) {
- char msgerr[MPI_MAX_ERROR_STRING] ;
- int lenerr ;
- if ( sts != MPI_SUCCESS ) {
- mpi_access->errorString(sts, msgerr, &lenerr) ;
- cout << "test" << myrank << " lenerr " << lenerr << " "
- << msgerr << endl ;
- ostringstream strstream ;
- strstream << "==========================================================="
- << "test" << myrank << " KO"
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-return ;
-}
-
-void MPIAccessTest::test_MPI_Access_Time_0() {
-
- cout << "test_MPI_Access_Time_0" << endl ;
-
-// MPI_Init(&argc, &argv) ;
-
- int size ;
- int myrank ;
- MPI_Comm_size(MPI_COMM_WORLD,&size) ;
- MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
-
- if ( size < 2 ) {
- ostringstream strstream ;
- strstream << "usage :" << endl
- << "mpirun -np <nbprocs> test_MPI_Access_Time_0" <<endl
- << " nbprocs =2" << endl
- << "test must be runned with 2 procs" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
-
-#define maxreq 100
-
- double t ;
- double dt[2] = {2., 1.} ;
- double maxt = maxreq/dt[myrank] ;
-
- cout << "test_MPI_Access_Time_0 rank" << myrank << endl ;
-
- ParaMEDMEM::CommInterface interface ;
-
- ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
-
- ParaMEDMEM::MPIAccess * mpi_access = new ParaMEDMEM::MPIAccess( group ) ;
-
- if ( myrank >= 2 ) {
- cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
- mpi_access->barrier() ;
- cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
- cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
- mpi_access->barrier() ;
- cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
- delete group ;
- delete mpi_access ;
- cout << "test_MPI_Access_Time" << myrank << " OK" << endl ;
- return ;
- }
-
- int target = 1 - myrank ;
- int SendTimeRequestId[maxreq] ;
- int RecvTimeRequestId[maxreq] ;
- int SendRequestId[maxreq] ;
- int RecvRequestId[maxreq] ;
- int sts ;
- int sendbuf[maxreq] ;
- int recvbuf[maxreq] ;
- ParaMEDMEM::TimeMessage aSendTimeMsg[maxreq] ;
- int lasttime = -1 ;
- ParaMEDMEM::TimeMessage RecvTimeMessages[maxreq+1] ;
- ParaMEDMEM::TimeMessage *aRecvTimeMsg = &RecvTimeMessages[1] ;
-// mpi_access->Trace() ;
- int istep = 0 ;
- for ( t = 0 ; t < maxt ; t = t+dt[myrank] ) {
- cout << "test" << myrank << " ==========================TIME " << t
- << " ==========================" << endl ;
- if ( myrank == 0 ) {
- aSendTimeMsg[istep].time = t ;
- aSendTimeMsg[istep].deltatime = dt[myrank] ;
- //aSendTimeMsg[istep].maxtime = maxt ;
- if ( t+dt[myrank] >= maxt ) {
- aSendTimeMsg[istep].deltatime = 0 ;
- }
- sts = mpi_access->ISend( &aSendTimeMsg[istep] , 1 ,
- mpi_access->timeType() , target ,
- SendTimeRequestId[istep]) ;
- cout << "test" << myrank << " ISend TimeRequestId " << SendTimeRequestId[istep]
- << " tag " << mpi_access->MPITag(SendTimeRequestId[istep]) << endl ;
- chksts( sts , myrank , mpi_access ) ;
- sendbuf[istep] = istep ;
- sts = mpi_access->ISend(&sendbuf[istep],1,MPI_INT,target, SendRequestId[istep]) ;
- cout << "test" << myrank << " ISend Data RequestId " << SendRequestId[istep]
- << " tag " << mpi_access->MPITag(SendRequestId[istep]) << endl ;
- chksts( sts , myrank , mpi_access ) ;
-//CheckSent
-//=========
- int sendrequests[2*maxreq] ;
- int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq ,
- sendrequests ) ;
- int j , flag ;
- for ( j = 0 ; j < sendreqsize ; j++ ) {
- sts = mpi_access->test( sendrequests[j] , flag ) ;
- chksts( sts , myrank , mpi_access ) ;
- if ( flag ) {
- mpi_access->deleteRequest( sendrequests[j] ) ;
- cout << "test" << myrank << " " << j << ". " << sendrequests[j]
- << " sendrequest deleted" << endl ;
- }
- }
- }
- else {
-//InitRecv
-//========
- if ( t == 0 ) {
- aRecvTimeMsg[lasttime].time = 0 ;
- sts = mpi_access->IRecv( &aRecvTimeMsg[lasttime+1] , 1 ,
- mpi_access->timeType() ,
- target , RecvTimeRequestId[lasttime+1]) ;
- cout << "test" << myrank << " t == 0 IRecv TimeRequestId "
- << RecvTimeRequestId[lasttime+1]
- << " MPITag " << mpi_access->MPITag( RecvTimeRequestId[lasttime+1] )
- << " MPICompleted "
- << mpi_access->MPICompleted( RecvTimeRequestId[lasttime+1] ) << endl ;
- chksts( sts , myrank , mpi_access ) ;
- }
- else {
- cout << "test" << myrank << " t # 0 lasttime " << lasttime << endl ;
-//InitialOutTime
-//==============
- bool outtime = false ;
- if ( lasttime != -1 ) {
- if ( t <= aRecvTimeMsg[lasttime-1].time ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " t " << t << " <= "
- << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
- << aRecvTimeMsg[lasttime-1].time << " KO" << endl
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "==========================================================="
- << endl << "test" << myrank << " t " << t << " > "
- << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
- << aRecvTimeMsg[lasttime-1].time << " OK" << endl
- << "==========================================================="
- << endl ;
- }
- //outtime = ((aRecvTimeMsg[lasttime].time +
- // aRecvTimeMsg[lasttime].deltatime) >=
- // aRecvTimeMsg[lasttime].maxtime) ;
- outtime = aRecvTimeMsg[lasttime].deltatime == 0 ;
- }
-// CheckRecv - CheckTime
-// On a lasttime tel que :
-// aRecvTimeMsg[ lasttime-1 ].time < T(i-1) <= aRecvTimeMsg[ lasttime ].time
-// On cherche lasttime tel que :
-// aRecvTimeMsg[ lasttime-1 ].time < T(i) <= aRecvTimeMsg[ lasttime ].time
- if ( t <= aRecvTimeMsg[lasttime].time ) {
- outtime = false ;
- }
- cout << "test" << myrank << " while outtime( " << outtime << " && t " << t
- << " > aRecvTimeMsg[ " << lasttime << " ] "
- << aRecvTimeMsg[lasttime].time << " )" << endl ;
- while ( !outtime && (t > aRecvTimeMsg[lasttime].time) ) {
- lasttime += 1 ;
-//TimeMessage
-//===========
- sts = mpi_access->wait( RecvTimeRequestId[lasttime] ) ;
- chksts( sts , myrank , mpi_access ) ;
- cout << "test" << myrank << " Wait done RecvTimeRequestId "
- << RecvTimeRequestId[lasttime] << " lasttime " << lasttime
- << " tag " << mpi_access->MPITag(RecvTimeRequestId[lasttime])
- << aRecvTimeMsg[lasttime] << endl ;
- if ( lasttime == 0 ) {
- aRecvTimeMsg[lasttime-1] = aRecvTimeMsg[lasttime] ;
- }
- mpi_access->deleteRequest( RecvTimeRequestId[lasttime] ) ;
-
- double deltatime = aRecvTimeMsg[lasttime].deltatime ;
- //double maxtime = aRecvTimeMsg[lasttime].maxtime ;
- double nexttime = aRecvTimeMsg[lasttime].time + deltatime ;
- cout << "test" << myrank << " t " << t << " lasttime " << lasttime
- << " deltatime " << deltatime
- << " nexttime " << nexttime << endl ;
- //if ( nexttime < maxtime && t > nexttime ) {
- if ( deltatime != 0 && t > nexttime ) {
-//CheckRecv :
-//=========
- //while ( nexttime < maxtime && t > nexttime ) {
- while ( deltatime != 0 && t > nexttime ) {
- int source, MPITag, outcount ;
- MPI_Datatype datatype ;
- sts = mpi_access->probe( target , source, MPITag, datatype,
- outcount ) ;
- chksts( sts , myrank , mpi_access ) ;
-// Cancel DataMessages jusqu'a un TimeMessage
- int cancelflag ;
- while ( !mpi_access->isTimeMessage( MPITag ) ) {
- sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
- //sts = mpi_access->cancel( source, datatype, outcount ,
- //RecvRequestId[lasttime] ,
- cancelflag ) ;
- cout << "test" << myrank << " Recv TO CANCEL RequestId "
- << RecvRequestId[lasttime]
- << " tag " << mpi_access->recvMPITag( target )
- << " cancelflag " << cancelflag << endl ;
- chksts( sts , myrank , mpi_access ) ;
- sts = mpi_access->probe( target , source, MPITag, datatype,
- outcount ) ;
- chksts( sts , myrank , mpi_access ) ;
- }
-//On peut avancer en temps
- nexttime += deltatime ;
- //if ( nexttime < maxtime && t > nexttime ) {
- if ( deltatime != 0 && t > nexttime ) {
-// Cancel du TimeMessage
- sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
- //sts = mpi_access->cancel( source, datatype, outcount ,
- //RecvRequestId[lasttime] ,
- cancelflag ) ;
- cout << "test" << myrank << " Time TO CANCEL RequestId "
- << RecvRequestId[lasttime]
- << " tag " << mpi_access->recvMPITag( target )
- << " cancelflag " << cancelflag << endl ;
- chksts( sts , myrank , mpi_access ) ;
- }
- }
- }
- else {
-//DoRecv
-//======
- cout << "test" << myrank << " Recv target " << target
- << " lasttime " << lasttime
- << " lasttime-1 " << aRecvTimeMsg[lasttime-1]
- << " lasttime " << aRecvTimeMsg[lasttime]
- << endl ;
- sts = mpi_access->recv(&recvbuf[lasttime],1,MPI_INT,target,
- RecvRequestId[lasttime]) ;
- cout << "test" << myrank << " Recv RequestId "
- << RecvRequestId[lasttime]
- << " tag " << mpi_access->recvMPITag( target )
- << endl ;
- chksts( sts , myrank , mpi_access ) ;
- }
- //outtime = ((aRecvTimeMsg[lasttime].time +
- // aRecvTimeMsg[lasttime].deltatime) >=
- // aRecvTimeMsg[lasttime].maxtime) ;
- outtime = aRecvTimeMsg[lasttime].deltatime == 0 ;
- if ( !outtime ) {
-// Une lecture asynchrone d'un message temps a l'avance
- sts = mpi_access->IRecv( &aRecvTimeMsg[lasttime+1] , 1 ,
- mpi_access->timeType() , target ,
- RecvTimeRequestId[lasttime+1]) ;
- cout << "test" << myrank << " IRecv TimeRequestId "
- << RecvTimeRequestId[lasttime+1] << " MPITag "
- << mpi_access->MPITag( RecvTimeRequestId[lasttime+1] )
- << " MPICompleted "
- << mpi_access->MPICompleted( RecvTimeRequestId[lasttime+1] )
- << endl ;
- chksts( sts , myrank , mpi_access ) ;
- }
- else if ( t <= aRecvTimeMsg[lasttime].time ) {
- outtime = false ;
- }
- }
-
- //printf("DEBUG t %.15f Msg[lasttime-1] %.15f Msg[lasttime] %.15f \n",t,
- // aRecvTimeMsg[lasttime-1].time,aRecvTimeMsg[lasttime].time) ;
- if ( ((t <= aRecvTimeMsg[lasttime-1].time) ||
- (t > aRecvTimeMsg[lasttime].time)) && !outtime ) {
- ostringstream strstream ;
- strstream << "==========================================================="
- << endl << "test" << myrank << " t " << t << " <= "
- << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
- << aRecvTimeMsg[lasttime-1].time << " ou t " << t << " > "
- << "aRecvTimeMsg[ " << lasttime << " ].time "
- << aRecvTimeMsg[lasttime].time << endl
- << " ou bien outtime " << outtime << " KO RequestTimeIds "
- << RecvTimeRequestId[lasttime-1] << " " << RecvTimeRequestId[lasttime]
- << " RequestIds "
- << RecvRequestId[lasttime-1] << " " << RecvRequestId[lasttime] << endl
- << "==========================================================="
- << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "==========================================================="
- << endl << "test" << myrank
- << " aRecvTimeMsg[ " << lasttime << "-1 ].time "
- << aRecvTimeMsg[lasttime-1].time << " < t " << t << " <= "
- << "aRecvTimeMsg[ " << lasttime << " ].time "
- << aRecvTimeMsg[lasttime].time << endl
- << " ou bien outtime " << outtime << " OK RequestTimeIds "
- << RecvTimeRequestId[lasttime-1] << " " << RecvTimeRequestId[lasttime]
- << " RequestIds "
- << RecvRequestId[lasttime-1] << " " << RecvRequestId[lasttime] << endl
- << "==========================================================="
- << endl ;
- }
- }
- }
- chksts( sts , myrank , mpi_access ) ;
- istep = istep + 1 ;
- }
-
- cout << "test" << myrank << " Barrier :" << endl ;
- mpi_access->barrier() ;
-
- mpi_access->check() ;
-
- if ( myrank == 0 ) {
-//CheckFinalSent
-//==============
- cout << "test" << myrank << " CheckFinalSent :" << endl ;
- int sendrequests[2*maxreq] ;
- int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ;
- int j ;
- for ( j = 0 ; j < sendreqsize ; j++ ) {
- sts = mpi_access->wait( sendrequests[j] ) ;
- chksts( sts , myrank , mpi_access ) ;
- mpi_access->deleteRequest( sendrequests[j] ) ;
- cout << "test" << myrank << " " << j << ". " << sendrequests[j] << " deleted"
- << endl ;
- }
- }
- else {
- cout << "test" << myrank << " CheckFinalRecv :" << endl ;
- int recvrequests[2*maxreq] ;
- int recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ;
- int cancelflag ;
- int j ;
- for ( j = 0 ; j < recvreqsize ; j++ ) {
- sts = mpi_access->cancel( recvrequests[j] , cancelflag ) ;
- chksts( sts , myrank , mpi_access ) ;
- mpi_access->deleteRequest( recvrequests[j] ) ;
- cout << "test" << myrank << " " << j << ". " << recvrequests[j] << " deleted"
- << " cancelflag " << cancelflag << endl ;
- }
- int source, MPITag, outcount , flag ;
- MPI_Datatype datatype ;
- sts = mpi_access->IProbe( target , source, MPITag, datatype,
- outcount , flag ) ;
- chksts( sts , myrank , mpi_access ) ;
- while ( flag ) {
- sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
- //sts = mpi_access->cancel( source, datatype, outcount ,
- //RecvRequestId[lasttime] ,
- cancelflag ) ;
- cout << "test" << myrank << " TO CANCEL RequestId "
- << RecvRequestId[lasttime]
- << " tag " << mpi_access->recvMPITag( target )
- << " cancelflag " << cancelflag << endl ;
- chksts( sts , myrank , mpi_access ) ;
- sts = mpi_access->IProbe( target , source, MPITag, datatype,
- outcount , flag ) ;
- chksts( sts , myrank , mpi_access ) ;
- }
- }
- mpi_access->check() ;
-
- if ( myrank == 0 ) {
- int sendrequests[2*maxreq] ;
- int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ;
- if ( sendreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
- else {
- int recvrequests[2*maxreq] ;
- int recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ;
- if ( recvreqsize != 0 ) {
- ostringstream strstream ;
- strstream << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
- << "=========================================================" << endl ;
- cout << strstream.str() << endl ;
- CPPUNIT_FAIL( strstream.str() ) ;
- }
- else {
- cout << "=========================================================" << endl
- << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
- << "=========================================================" << endl ;
- }
- }
-
- int i ;
- for ( i = 0 ; i <= lasttime ; i++ ) {
- cout << "test" << myrank << " " << i << ". RecvTimeMsg "
- << aRecvTimeMsg[i].time << " recvbuf " << recvbuf[i] << endl ;
- }
-
- cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
- mpi_access->barrier() ;
- cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
-
- delete group ;
- delete mpi_access ;
-
-// MPI_Finalize();
-
- cout << "test" << myrank << " OK" << endl ;
-
- return ;
-}
-
-
-
-
+++ /dev/null
-#include <time.h>
-#include <sys/times.h>
-#include <sys/time.h>
-#include "ParaMEDMEMTest.hxx"
-#include <cppunit/TestAssert.h>
-
-#include "CommInterface.hxx"
-#include "ProcessorGroup.hxx"
-#include "MPIProcessorGroup.hxx"
-#include "Topology.hxx"
-#include "DEC.hxx"
-#include "MxN_Mapping.hxx"
-#include "InterpKernelDEC.hxx"
-#include "ParaMESH.hxx"
-#include "ParaFIELD.hxx"
-#include "ComponentTopology.hxx"
-#include "ICoCoMEDField.hxx"
-#include "MEDLoader.hxx"
-
-#include <string>
-#include <cstring>
-
-// use this define to enable lines, execution of which leads to Segmentation Fault
-#define ENABLE_FAULTS
-
-// use this define to enable CPPUNIT asserts and fails, showing bugs
-#define ENABLE_FORCED_FAILURES
-
-#ifndef CLK_TCK
-#include <unistd.h>
-#define CLK_TCK sysconf(_SC_CLK_TCK);
-#endif
-
-using namespace std;
-using namespace ParaMEDMEM;
-
-void testInterpKernelDEC_2D(const string& filename1, const string& meshname1,
- const string& filename2, const string& meshname2,
- int nproc_source, double epsilon, bool tri, bool all);
-void get_time( float *telps, float *tuser, float *tsys, float *tcpu );
-
-int main(int argc, char *argv[])
-{
- string filename1, filename2;
- string meshname1, meshname2;
- int nproc_source=1, rank;
- double epsilon=1.e-6;
- int count=0;
- bool tri=false;
- bool all=false;
-
- MPI_Init(&argc,&argv);
-
- for(int i=1;i<argc;i++){
- if( strcmp(argv[i],"-f1") == 0 ){
- filename1 = argv[++i];
- count++;
- }
- else if( strcmp(argv[i],"-f2") == 0 ){
- filename2 = argv[++i];
- count++;
- }
- else if( strcmp(argv[i],"-m1") == 0 ){
- meshname1 = argv[++i];
- count++;
- }
- else if( strcmp(argv[i],"-m2") == 0 ){
- meshname2 = argv[++i];
- count++;
- }
- else if( strcmp(argv[i],"-ns") == 0 ){
- nproc_source = atoi(argv[++i]);
- }
- else if( strcmp(argv[i],"-eps") == 0 ){
- epsilon = atof(argv[++i]);
- }
- else if( strcmp(argv[i],"-tri") == 0 ){
- tri = true;
- }
- else if( strcmp(argv[i],"-all") == 0 ){
- all = true;
- }
- }
-
- if( count != 4 ){
- cout << "usage test_perf -f1 filename1 -m1 meshname1 -f2 filename2 -m2 meshname2 (-ns nproc_source -eps epsilon -tri -all)" << endl;
- exit(0);
- }
-
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
- testInterpKernelDEC_2D(filename1,meshname1,filename2,meshname2,nproc_source,epsilon,tri,all);
-
- MPI_Finalize();
-}
-
-void testInterpKernelDEC_2D(const string& filename_xml1, const string& meshname1,
- const string& filename_xml2, const string& meshname2,
- int nproc_source, double epsilon, bool tri, bool all)
-{
- float tcpu, tcpu_u, tcpu_s, telps;
- int size;
- int rank;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
-
- set<int> self_procs;
- set<int> procs_source;
- set<int> procs_target;
-
- for (int i=0; i<nproc_source; i++)
- procs_source.insert(i);
- for (int i=nproc_source; i<size; i++)
- procs_target.insert(i);
- self_procs.insert(rank);
-
- ParaMEDMEM::CommInterface interface;
-
- ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
- ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
- ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
-
- //loading the geometry for the source group
-
- ParaMEDMEM::InterpKernelDEC dec (*source_group,*target_group);
- if(tri)
- dec.setIntersectionType(INTERP_KERNEL::Triangulation);
- else
- dec.setIntersectionType(INTERP_KERNEL::Convex);
-
- ParaMEDMEM::MEDCouplingUMesh* mesh;
- ParaMEDMEM::ParaMESH* paramesh;
- ParaMEDMEM::ParaFIELD* parafield;
- ICoCo::Field* icocofield ;
-
- // To remove tmp files from disk
- ParaMEDMEMTest_TmpFilesRemover aRemover;
-
- MPI_Barrier(MPI_COMM_WORLD);
- if (source_group->containsMyRank()){
- string master = filename_xml1;
-
- ostringstream strstream;
- if( nproc_source == 1 )
- strstream <<master<<".med";
- else
- strstream <<master<<rank+1<<".med";
-
- ostringstream meshname ;
- if( nproc_source == 1 )
- meshname<< meshname1;
- else
- meshname<< meshname1<<"_"<< rank+1;
-
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- if( rank == 0 )
- cout << "IO : Telapse = " << telps << " TuserCPU = " << tcpu_u << " TsysCPU = " << tcpu_s << " TCPU = " << tcpu << endl;
- mesh->incrRef();
-
- paramesh=new ParaMESH (mesh,*source_group,"source mesh");
-
- ParaMEDMEM::ComponentTopology comptopo;
- parafield = new ParaFIELD(ON_CELLS, NO_TIME, paramesh, comptopo);
-
- int nb_local=mesh->getNumberOfCells();
- double *value=parafield->getField()->getArray()->getPointer();
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=1.0;
-
- icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
-
- dec.attachLocalField(icocofield);
- }
-
- //loading the geometry for the target group
- if (target_group->containsMyRank()){
- string master= filename_xml2;
- ostringstream strstream;
- if( (size-nproc_source) == 1 )
- strstream << master<<".med";
- else
- strstream << master<<(rank-nproc_source+1)<<".med";
- ostringstream meshname ;
- if( (size-nproc_source) == 1 )
- meshname<< meshname2;
- else
- meshname<< meshname2<<"_"<<rank-nproc_source+1;
-
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- mesh = MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- mesh->incrRef();
-
- paramesh=new ParaMESH (mesh,*target_group,"target mesh");
- ParaMEDMEM::ComponentTopology comptopo;
- parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
-
- int nb_local=mesh->getNumberOfCells();
- double *value=parafield->getField()->getArray()->getPointer();
- for(int ielem=0; ielem<nb_local;ielem++)
- value[ielem]=0.0;
- icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
-
- dec.attachLocalField(icocofield);
- }
-
-
- //attaching a DEC to the source group
- double field_before_int;
- double field_after_int;
-
- if (source_group->containsMyRank()){
- field_before_int = parafield->getVolumeIntegral(0,true);
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- dec.synchronize();
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- if( rank == 0 )
- cout << "SYNCHRONIZE : Telapse = " << telps << " TuserCPU = " << tcpu_u << " TsysCPU = " << tcpu_s << " TCPU = " << tcpu << endl;
- cout<<"DEC usage"<<endl;
- dec.setForcedRenormalization(false);
- if(all)
- dec.setAllToAllMethod(PointToPoint);
-
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- dec.sendData();
-
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- if( rank == 0 )
- cout << "SEND DATA : Telapse = " << telps << " TuserCPU = " << tcpu_u << " TsysCPU = " << tcpu_s << " TCPU = " << tcpu << endl;
- dec.recvData();
-
- field_after_int = parafield->getVolumeIntegral(0,true);
-// CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, epsilon);
-
- }
-
- //attaching a DEC to the target group
- if (target_group->containsMyRank()){
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- dec.synchronize();
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- dec.setForcedRenormalization(false);
- if(all)
- dec.setAllToAllMethod(PointToPoint);
-
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- dec.recvData();
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- dec.sendData();
- }
-
- get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
- if( rank == 0 )
- cout << "RECV DATA : Telapse = " << telps << " TuserCPU = " << tcpu_u << " TsysCPU = " << tcpu_s << " TCPU = " << tcpu << endl;
-
- delete source_group;
- delete target_group;
- delete self_group;
- delete paramesh;
- delete parafield;
- mesh->decrRef() ;
- delete icocofield;
-
- MPI_Barrier(MPI_COMM_WORLD);
- cout << "end of InterpKernelDEC_2D test"<<endl;
-}
-
-void get_time( float *telps, float *tuser, float *tsys, float *tcpu )
-{
-
- /* Variables declaration */
- static time_t zsec = 0;
- static long zusec = 0;
- time_t nsec;
- long nusec;
- static clock_t zclock = 0;
- clock_t nclock;
- static clock_t zuser = 0;
- static clock_t zsys = 0;
- clock_t nuser, nsys;
-
- struct timeval tp;
- struct timezone tzp;
- struct tms local;
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- /* Elapsed time reading */
-
- gettimeofday(&tp,&tzp);
- nsec = tp.tv_sec;
- nusec = tp.tv_usec;
- *telps = (float)(nsec-zsec) + (float)(nusec-zusec)/(float)CLOCKS_PER_SEC;
-
- zsec = nsec;
- zusec = nusec;
-
- /* User and system CPU time reading */
-
- times(&local);
- nuser = local.tms_utime;
- nsys = local.tms_stime;
- *tuser = (float)(nuser-zuser) / (float)CLK_TCK;
- *tsys = (float)(nsys-zsys) / (float)CLK_TCK;
-
- zuser = nuser;
- zsys = nsys;
-
- /* CPU time reading */
-
- nclock = clock();
- *tcpu = (float)(nclock-zclock) / (float)CLOCKS_PER_SEC;
- zclock = nclock;
-
-}
-
-
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include <sstream>
+#include <cmath>
+
+using namespace std;
+
+
+
+/*!
+ * Tool to remove temporary files.
+ * Allows automatique removal of temporary files in case of test failure.
+ */
+MPIAccessDECTest_TmpFilesRemover::~MPIAccessDECTest_TmpFilesRemover()
+{
+ set<string>::iterator it = myTmpFiles.begin();
+ for (; it != myTmpFiles.end(); it++) {
+ if (access((*it).data(), F_OK) == 0)
+ remove((*it).data());
+ }
+ myTmpFiles.clear();
+ //cout << "~MPIAccessTest_TmpFilesRemover()" << endl;
+}
+
+bool MPIAccessDECTest_TmpFilesRemover::Register(const string theTmpFile)
+{
+ return (myTmpFiles.insert(theTmpFile)).second;
+}
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#ifndef _MPIACCESSDECTEST_HXX_
+#define _MPIACCESSDECTEST_HXX_
+
+#include <cppunit/extensions/HelperMacros.h>
+
+#include <set>
+#include <string>
+#include <iostream>
+#include "mpi.h"
+
+
+class MPIAccessDECTest : public CppUnit::TestFixture
+{
+ CPPUNIT_TEST_SUITE( MPIAccessDECTest );
+ // CPPUNIT_TEST( test_AllToAllDECSynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllDECAsynchronousPointToPoint ) ;
+ //CPPUNIT_TEST( test_AllToAllvDECSynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllvDECAsynchronousPointToPoint ) ;
+ //CPPUNIT_TEST( test_AllToAllTimeDECSynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllTimeDECAsynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllvTimeDECSynchronousNative ) ;
+ //CPPUNIT_TEST( test_AllToAllvTimeDECSynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllvTimeDECAsynchronousPointToPoint ) ;
+ //CPPUNIT_TEST( test_AllToAllvTimeDoubleDECSynchronousPointToPoint ) ;
+ CPPUNIT_TEST( test_AllToAllvTimeDoubleDECAsynchronousPointToPoint ) ;
+ CPPUNIT_TEST_SUITE_END();
+
+
+public:
+
+ MPIAccessDECTest():CppUnit::TestFixture(){}
+ ~MPIAccessDECTest(){}
+ void setUp(){}
+ void tearDown(){}
+ void test_AllToAllDECSynchronousPointToPoint() ;
+ void test_AllToAllDECAsynchronousPointToPoint() ;
+ void test_AllToAllvDECSynchronousPointToPoint() ;
+ void test_AllToAllvDECAsynchronousPointToPoint() ;
+ void test_AllToAllTimeDECSynchronousPointToPoint() ;
+ void test_AllToAllTimeDECAsynchronousPointToPoint() ;
+ void test_AllToAllvTimeDECSynchronousNative() ;
+ void test_AllToAllvTimeDECSynchronousPointToPoint() ;
+ void test_AllToAllvTimeDECAsynchronousPointToPoint() ;
+ void test_AllToAllvTimeDoubleDECSynchronousPointToPoint() ;
+ void test_AllToAllvTimeDoubleDECAsynchronousPointToPoint() ;
+
+private:
+ void test_AllToAllDEC( bool Asynchronous ) ;
+ void test_AllToAllvDEC( bool Asynchronous ) ;
+ void test_AllToAllTimeDEC( bool Asynchronous ) ;
+ void test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINative ) ;
+ void test_AllToAllvTimeDoubleDEC( bool Asynchronous ) ;
+ };
+
+// to automatically remove temporary files from disk
+class MPIAccessDECTest_TmpFilesRemover
+{
+public:
+ MPIAccessDECTest_TmpFilesRemover() {}
+ ~MPIAccessDECTest_TmpFilesRemover();
+ bool Register(const std::string theTmpFile);
+
+private:
+ std::set<std::string> myTmpFiles;
+};
+
+/*!
+ * Tool to print array to stream.
+ */
+template<class T>
+void MPIAccessDECTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text)
+{
+ stream << text << ": {";
+ if (length > 0) {
+ stream << array[0];
+ for (int i = 1; i < length; i++) {
+ stream << ", " << array[i];
+ }
+ }
+ stream << "}" << std::endl;
+};
+
+#endif
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include <sstream>
+#include <cmath>
+
+using namespace std;
+
+
+
+/*!
+ * Tool to remove temporary files.
+ * Allows automatique removal of temporary files in case of test failure.
+ */
+MPIAccessTest_TmpFilesRemover::~MPIAccessTest_TmpFilesRemover()
+{
+ set<string>::iterator it = myTmpFiles.begin();
+ for (; it != myTmpFiles.end(); it++) {
+ if (access((*it).data(), F_OK) == 0)
+ remove((*it).data());
+ }
+ myTmpFiles.clear();
+ //cout << "~MPIAccessTest_TmpFilesRemover()" << endl;
+}
+
+bool MPIAccessTest_TmpFilesRemover::Register(const string theTmpFile)
+{
+ return (myTmpFiles.insert(theTmpFile)).second;
+}
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#ifndef _MPIACCESSTEST_HXX_
+#define _MPIACCESSTEST_HXX_
+
+#include <cppunit/extensions/HelperMacros.h>
+
+#include <set>
+#include <string>
+#include <iostream>
+#include "mpi.h"
+
+
+class MPIAccessTest : public CppUnit::TestFixture
+{
+ CPPUNIT_TEST_SUITE( MPIAccessTest );
+ CPPUNIT_TEST( test_MPI_Access_Send_Recv ) ;
+ CPPUNIT_TEST( test_MPI_Access_Cyclic_Send_Recv ) ;
+ CPPUNIT_TEST( test_MPI_Access_SendRecv ) ;
+ CPPUNIT_TEST( test_MPI_Access_ISend_IRecv ) ;
+ CPPUNIT_TEST( test_MPI_Access_Cyclic_ISend_IRecv ) ;
+ CPPUNIT_TEST( test_MPI_Access_ISendRecv ) ;
+ CPPUNIT_TEST( test_MPI_Access_Probe ) ;
+ CPPUNIT_TEST( test_MPI_Access_IProbe ) ;
+ CPPUNIT_TEST( test_MPI_Access_Cancel ) ;
+ CPPUNIT_TEST( test_MPI_Access_Send_Recv_Length ) ;
+ CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_Length ) ;
+ CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_Length_1 ) ;
+ CPPUNIT_TEST( test_MPI_Access_Time ) ;
+ CPPUNIT_TEST( test_MPI_Access_Time_0 ) ;
+ CPPUNIT_TEST( test_MPI_Access_ISend_IRecv_BottleNeck ) ;
+ CPPUNIT_TEST_SUITE_END();
+
+
+public:
+
+ MPIAccessTest():CppUnit::TestFixture(){}
+ ~MPIAccessTest(){}
+ void setUp(){}
+ void tearDown(){}
+ void test_MPI_Access_Send_Recv() ;
+ void test_MPI_Access_Cyclic_Send_Recv() ;
+ void test_MPI_Access_SendRecv() ;
+ void test_MPI_Access_ISend_IRecv() ;
+ void test_MPI_Access_Cyclic_ISend_IRecv() ;
+ void test_MPI_Access_ISendRecv() ;
+ void test_MPI_Access_Probe() ;
+ void test_MPI_Access_IProbe() ;
+ void test_MPI_Access_Cancel() ;
+ void test_MPI_Access_Send_Recv_Length() ;
+ void test_MPI_Access_ISend_IRecv_Length() ;
+ void test_MPI_Access_ISend_IRecv_Length_1() ;
+ void test_MPI_Access_Time() ;
+ void test_MPI_Access_Time_0() ;
+ void test_MPI_Access_ISend_IRecv_BottleNeck() ;
+
+private:
+ };
+
+// to automatically remove temporary files from disk
+class MPIAccessTest_TmpFilesRemover
+{
+public:
+ MPIAccessTest_TmpFilesRemover() {}
+ ~MPIAccessTest_TmpFilesRemover();
+ bool Register(const std::string theTmpFile);
+
+private:
+ std::set<std::string> myTmpFiles;
+};
+
+/*!
+ * Tool to print array to stream.
+ */
+template<class T>
+void MPIAccessTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text)
+{
+ stream << text << ": {";
+ if (length > 0) {
+ stream << array[0];
+ for (int i = 1; i < length; i++) {
+ stream << ", " << array[i];
+ }
+ }
+ stream << "}" << std::endl;
+};
+
+#endif
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#ifndef _MPIMAINTEST_HXX_
+#define _MPIMAINTEST_HXX_
+
+#include <cppunit/CompilerOutputter.h>
+#include <cppunit/TestResult.h>
+#include <cppunit/TestResultCollector.h>
+#include <cppunit/TextTestProgressListener.h>
+#include <cppunit/BriefTestProgressListener.h>
+#include <cppunit/extensions/TestFactoryRegistry.h>
+#include <cppunit/TestRunner.h>
+#include <stdexcept>
+
+#include <mpi.h>
+
+#include <iostream>
+#include <fstream>
+#ifndef WIN32
+#include <fpu_control.h>
+#endif
+
+// ============================================================================
+/*!
+ * Main program source for Unit Tests with cppunit package does not depend
+ * on actual tests, so we use the same for all partial unit tests.
+ */
+// ============================================================================
+
+int main(int argc, char* argv[])
+{
+#ifndef WIN32
+ fpu_control_t cw = _FPU_DEFAULT & ~(_FPU_MASK_IM | _FPU_MASK_ZM | _FPU_MASK_OM);
+ _FPU_SETCW(cw);
+#endif
+ MPI_Init(&argc,&argv);
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+
+ // --- Create the event manager and test controller
+ CPPUNIT_NS::TestResult controller;
+
+ // --- Add a listener that colllects test result
+ CPPUNIT_NS::TestResultCollector result;
+ controller.addListener( &result );
+
+ // --- Add a listener that print dots as test run.
+#ifdef WIN32
+ CPPUNIT_NS::TextTestProgressListener progress;
+#else
+ CPPUNIT_NS::BriefTestProgressListener progress;
+#endif
+ controller.addListener( &progress );
+
+ // --- Get the top level suite from the registry
+
+ CPPUNIT_NS::Test *suite =
+ CPPUNIT_NS::TestFactoryRegistry::getRegistry().makeTest();
+
+ // --- Adds the test to the list of test to run
+
+ CPPUNIT_NS::TestRunner runner;
+ runner.addTest( suite );
+ runner.run( controller);
+
+ // --- Print test in a compiler compatible format.
+
+ std::ostringstream testFileName;
+ testFileName<<"UnitTestResult"<<rank;
+ std::ofstream testFile;
+ testFile.open(testFileName.str().c_str(), std::ios::out | std::ios::trunc);
+ //CPPUNIT_NS::CompilerOutputter outputter( &result, std::cerr );
+ CPPUNIT_NS::CompilerOutputter outputter( &result, testFile );
+ outputter.write();
+
+ // --- Run the tests.
+
+ bool wasSucessful = result.wasSuccessful();
+ testFile.close();
+
+ // --- Return error code 1 if the one of test failed.
+
+ MPI_Finalize();
+
+ return wasSucessful ? 0 : 1;
+}
+
+#endif
--- /dev/null
+# Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+
+include $(top_srcdir)/adm_local/unix/make_common_starter.am
+
+lib_LTLIBRARIES = libParaMEDMEMTest.la
+
+salomeinclude_HEADERS = \
+ MPIMainTest.hxx \
+ MPIAccessDECTest.hxx \
+ MPIAccessTest.hxx \
+ ParaMEDMEMTest.hxx
+
+EXTRA_DIST += MPIMainTest.hxx ParaMEDMEMTest_NonCoincidentDEC.cxx
+
+dist_libParaMEDMEMTest_la_SOURCES = \
+ ParaMEDMEMTest.cxx \
+ ParaMEDMEMTest_MPIProcessorGroup.cxx \
+ ParaMEDMEMTest_BlockTopology.cxx \
+ ParaMEDMEMTest_InterpKernelDEC.cxx \
+ ParaMEDMEMTest_StructuredCoincidentDEC.cxx \
+ ParaMEDMEMTest_MEDLoader.cxx \
+ ParaMEDMEMTest_ICocoTrio.cxx \
+ ParaMEDMEMTest_Gauthier1.cxx \
+ MPIAccessDECTest.cxx \
+ test_AllToAllDEC.cxx \
+ test_AllToAllvDEC.cxx \
+ test_AllToAllTimeDEC.cxx \
+ test_AllToAllvTimeDEC.cxx \
+ test_AllToAllvTimeDoubleDEC.cxx \
+ MPIAccessTest.cxx \
+ test_MPI_Access_Send_Recv.cxx \
+ test_MPI_Access_Cyclic_Send_Recv.cxx \
+ test_MPI_Access_SendRecv.cxx \
+ test_MPI_Access_ISend_IRecv.cxx \
+ test_MPI_Access_Cyclic_ISend_IRecv.cxx \
+ test_MPI_Access_ISendRecv.cxx \
+ test_MPI_Access_Probe.cxx \
+ test_MPI_Access_IProbe.cxx \
+ test_MPI_Access_Cancel.cxx \
+ test_MPI_Access_Send_Recv_Length.cxx \
+ test_MPI_Access_ISend_IRecv_Length.cxx \
+ test_MPI_Access_ISend_IRecv_Length_1.cxx \
+ test_MPI_Access_Time.cxx \
+ test_MPI_Access_Time_0.cxx \
+ test_MPI_Access_ISend_IRecv_BottleNeck.cxx
+
+
+libParaMEDMEMTest_la_CPPFLAGS = \
+ @CPPUNIT_INCLUDES@ \
+ $(MPI_INCLUDES) \
+ -I$(srcdir)/../INTERP_KERNEL \
+ -I$(srcdir)/../INTERP_KERNEL/Bases \
+ -I$(srcdir)/../INTERP_KERNEL/Geometric2D \
+ -I$(srcdir)/../ParaMEDMEM \
+ -I$(srcdir)/../MEDCoupling \
+ -I$(srcdir)/../MEDLoader \
+ -I$(srcdir)/../ParaMEDLoader
+
+libParaMEDMEMTest_la_LDFLAGS = \
+ ../ParaMEDMEM/libparamedmem.la \
+ ../ParaMEDLoader/libparamedloader.la \
+ @CPPUNIT_LIBS@ $(MPI_LIBS)
+
+# Executables targets
+bin_PROGRAMS = \
+ TestParaMEDMEM \
+ TestMPIAccessDEC \
+ TestMPIAccess \
+ test_perf
+
+dist_TestParaMEDMEM_SOURCES = TestParaMEDMEM.cxx
+dist_TestMPIAccessDEC_SOURCES = TestMPIAccessDEC.cxx
+dist_TestMPIAccess_SOURCES = TestMPIAccess.cxx
+dist_test_perf_SOURCES = test_perf.cxx
+
+LDADD = $(MED2_LIBS) $(libMEDMEMTest_la_LDFLAGS) -lm $(MPI_LIBS) \
+ libParaMEDMEMTest.la \
+ ../INTERP_KERNEL/libinterpkernel.la
+
+if MED_ENABLE_FVM
+ LDADD += $(FVM_LIBS)
+ dist_libParaMEDMEMTest_la_SOURCES += ParaMEDMEMTest_NonCoincidentDEC.cxx
+ libParaMEDMEMTest_la_CPPFLAGS += -DMED_ENABLE_FVM $(FVM_INCLUDES)
+ libParaMEDMEMTest_la_LDFLAGS += $(FVM_LIBS)
+endif
+
+AM_CPPFLAGS += $(libParaMEDMEMTest_la_CPPFLAGS)
+
+UNIT_TEST_PROG = TestParaMEDMEM
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include "ParaMEDMEMTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include <sstream>
+#include <cmath>
+#include <list>
+#include <stdexcept>
+#include <stdlib.h>
+
+//================================================================================
+/*!
+ * \brief Get path to the resources file.
+ *
+ * When running 'make test' source file is taken from MED_SRC/resources folder.
+ * Otherwise, file is searched in ${MED_ROOT_DIR}/share/salome/resources/med folder.
+ *
+ * \param filename name of the resource file (should not include a path)
+ * \return full path to the resource file
+ */
+//================================================================================
+
+std::string ParaMEDMEMTest::getResourceFile( const std::string& filename )
+{
+ std::string resourceFile = "";
+
+ if ( getenv("top_srcdir") ) {
+ // we are in 'make check' step
+ resourceFile = getenv("top_srcdir");
+ resourceFile += "/resources/";
+ }
+ else if ( getenv("MED_ROOT_DIR") ) {
+ // use MED_ROOT_DIR env.var
+ resourceFile = getenv("MED_ROOT_DIR");
+ resourceFile += "/share/salome/resources/med/";
+ }
+ resourceFile += filename;
+ return resourceFile;
+}
+
+
+//================================================================================
+/*!
+ * \brief Returns writable temporary directory
+ * \return full path to the temporary directory
+ */
+//================================================================================
+
+std::string ParaMEDMEMTest::getTmpDirectory()
+{
+ std::string path;
+
+ std::list<std::string> dirs;
+ if ( getenv("TMP") ) dirs.push_back( getenv("TMP" ));
+ if ( getenv("TMPDIR") ) dirs.push_back( getenv("TMPDIR" ));
+ dirs.push_back( "/tmp" );
+
+ std::string tmpd = "";
+ for ( std::list<std::string>::iterator dir = dirs.begin(); dir != dirs.end() && tmpd == "" ; ++dir ) {
+ if ( access( dir->data(), W_OK ) == 0 ) {
+ tmpd = dir->data();
+ }
+ }
+
+ if ( tmpd == "" )
+ throw std::runtime_error("Can't find writable temporary directory. Set TMP environment variable");
+
+ return tmpd;
+}
+
+//================================================================================
+/*!
+ * \brief Creates a copy of source file (if source file is specified)
+ * in the temporary directory and returns a path to the tmp file
+ *
+ * \param tmpfile name of the temporary file (without path)
+ * \param srcfile source file
+ * \return path to the temporary file
+ */
+//================================================================================
+std::string ParaMEDMEMTest::makeTmpFile( const std::string& tmpfile, const std::string& srcfile )
+{
+ std::string tmpf = getTmpDirectory() + "/" + tmpfile;
+ if ( srcfile != "" ) {
+ std::string cmd = "cp " + srcfile + " " + tmpf + " ; chmod +w " + tmpf;
+ system( cmd.c_str() );
+ }
+ return tmpf;
+}
+
+
+/*!
+ * Tool to remove temporary files.
+ * Allows automatique removal of temporary files in case of test failure.
+ */
+ParaMEDMEMTest_TmpFilesRemover::~ParaMEDMEMTest_TmpFilesRemover()
+{
+ std::set<std::string>::iterator it = myTmpFiles.begin();
+ for (; it != myTmpFiles.end(); it++) {
+ if (access((*it).data(), F_OK) == 0)
+ remove((*it).data());
+ }
+ myTmpFiles.clear();
+ //cout << "~ParaMEDMEMTest_TmpFilesRemover()" << endl;
+}
+
+bool ParaMEDMEMTest_TmpFilesRemover::Register(const std::string theTmpFile)
+{
+ return (myTmpFiles.insert(theTmpFile)).second;
+}
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#ifndef _ParaMEDMEMTEST_HXX_
+#define _ParaMEDMEMTEST_HXX_
+
+#include <cppunit/extensions/HelperMacros.h>
+
+#include <set>
+#include <string>
+#include <iostream>
+#include "mpi.h"
+
+
+class ParaMEDMEMTest : public CppUnit::TestFixture
+{
+ CPPUNIT_TEST_SUITE( ParaMEDMEMTest );
+ CPPUNIT_TEST(testMPIProcessorGroup_constructor);
+ CPPUNIT_TEST(testMPIProcessorGroup_boolean);
+ CPPUNIT_TEST(testMPIProcessorGroup_rank);
+ CPPUNIT_TEST(testBlockTopology_constructor);
+ CPPUNIT_TEST(testBlockTopology_serialize);
+ CPPUNIT_TEST(testInterpKernelDEC_2D);
+ CPPUNIT_TEST(testInterpKernelDEC2_2D);
+ CPPUNIT_TEST(testInterpKernelDEC_2DP0P1);
+ CPPUNIT_TEST(testInterpKernelDEC_3D);
+ CPPUNIT_TEST(testInterpKernelDECNonOverlapp_2D_P0P0);
+ CPPUNIT_TEST(testInterpKernelDECNonOverlapp_2D_P0P1P1P0);
+
+ CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D);
+ CPPUNIT_TEST(testSynchronousEqualInterpKernelWithoutInterpDEC_2D);
+ CPPUNIT_TEST(testSynchronousEqualInterpKernelDEC_2D);
+ CPPUNIT_TEST(testSynchronousFasterSourceInterpKernelDEC_2D);
+ CPPUNIT_TEST(testSynchronousSlowerSourceInterpKernelDEC_2D);
+ CPPUNIT_TEST(testSynchronousSlowSourceInterpKernelDEC_2D);
+ CPPUNIT_TEST(testSynchronousFastSourceInterpKernelDEC_2D);
+ CPPUNIT_TEST(testAsynchronousEqualInterpKernelDEC_2D);
+ CPPUNIT_TEST(testAsynchronousFasterSourceInterpKernelDEC_2D);
+ CPPUNIT_TEST(testAsynchronousSlowerSourceInterpKernelDEC_2D);
+ CPPUNIT_TEST(testAsynchronousSlowSourceInterpKernelDEC_2D);
+ CPPUNIT_TEST(testAsynchronousFastSourceInterpKernelDEC_2D);
+#ifdef MED_ENABLE_FVM
+ //can be added again after FVM correction for 2D
+ // CPPUNIT_TEST(testNonCoincidentDEC_2D);
+ CPPUNIT_TEST(testNonCoincidentDEC_3D);
+#endif
+ CPPUNIT_TEST(testStructuredCoincidentDEC);
+ CPPUNIT_TEST(testStructuredCoincidentDEC);
+ CPPUNIT_TEST(testICocoTrio1);
+ CPPUNIT_TEST(testGauthier1);
+ CPPUNIT_TEST(testGauthier2);
+ CPPUNIT_TEST(testMEDLoaderRead1);
+ CPPUNIT_TEST(testMEDLoaderPolygonRead);
+ CPPUNIT_TEST(testMEDLoaderPolyhedronRead);
+ //CPPUNIT_TEST(testMEDLoaderWrite1);
+ //CPPUNIT_TEST(testMEDLoaderPolygonWrite);
+ CPPUNIT_TEST_SUITE_END();
+
+
+public:
+
+ ParaMEDMEMTest():CppUnit::TestFixture(){}
+ ~ParaMEDMEMTest(){}
+ void setUp(){}
+ void tearDown(){}
+ void testMPIProcessorGroup_constructor();
+ void testMPIProcessorGroup_boolean();
+ void testMPIProcessorGroup_rank();
+ void testBlockTopology_constructor();
+ void testBlockTopology_serialize();
+ void testInterpKernelDEC_2D();
+ void testInterpKernelDEC2_2D();
+ void testInterpKernelDEC_2DP0P1();
+ void testInterpKernelDEC_3D();
+ void testInterpKernelDECNonOverlapp_2D_P0P0();
+ void testInterpKernelDECNonOverlapp_2D_P0P1P1P0();
+#ifdef MED_ENABLE_FVM
+ void testNonCoincidentDEC_2D();
+ void testNonCoincidentDEC_3D();
+#endif
+ void testStructuredCoincidentDEC();
+ void testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D();
+ void testSynchronousEqualInterpKernelWithoutInterpDEC_2D();
+ void testSynchronousEqualInterpKernelDEC_2D();
+ void testSynchronousFasterSourceInterpKernelDEC_2D();
+ void testSynchronousSlowerSourceInterpKernelDEC_2D();
+ void testSynchronousSlowSourceInterpKernelDEC_2D();
+ void testSynchronousFastSourceInterpKernelDEC_2D();
+
+ void testAsynchronousEqualInterpKernelDEC_2D();
+ void testAsynchronousFasterSourceInterpKernelDEC_2D();
+ void testAsynchronousSlowerSourceInterpKernelDEC_2D();
+ void testAsynchronousSlowSourceInterpKernelDEC_2D();
+ void testAsynchronousFastSourceInterpKernelDEC_2D();
+ //
+ void testICocoTrio1();
+ void testGauthier1();
+ void testGauthier2();
+ //
+ void testMEDLoaderRead1();
+ void testMEDLoaderPolygonRead();
+ void testMEDLoaderPolyhedronRead();
+ void testMEDLoaderWrite1();
+ void testMEDLoaderPolygonWrite();
+
+ std::string getResourceFile( const std::string& );
+ std::string getTmpDirectory();
+ std::string makeTmpFile( const std::string&, const std::string& = "" );
+
+private:
+ void testNonCoincidentDEC(const std::string& filename1,
+ const std::string& meshname1,
+ const std::string& filename2,
+ const std::string& meshname2,
+ int nbprocsource, double epsilon);
+ void testAsynchronousInterpKernelDEC_2D(double dtA, double tmaxA,
+ double dtB, double tmaxB,
+ bool WithPointToPoint, bool Asynchronous, bool WithInterp, const char *srcMeth, const char *targetMeth);
+ void testInterpKernelDEC_2D_(const char *srcMeth, const char *targetMeth);
+ void testInterpKernelDEC2_2D_(const char *srcMeth, const char *targetMeth);
+ void testInterpKernelDEC_3D_(const char *srcMeth, const char *targetMeth);
+};
+
+// to automatically remove temporary files from disk
+class ParaMEDMEMTest_TmpFilesRemover
+{
+public:
+ ParaMEDMEMTest_TmpFilesRemover() {}
+ ~ParaMEDMEMTest_TmpFilesRemover();
+ bool Register(const std::string theTmpFile);
+
+private:
+ std::set<std::string> myTmpFiles;
+};
+
+/*!
+ * Tool to print array to stream.
+ */
+template<class T>
+void ParaMEDMEMTest_DumpArray (std::ostream & stream, const T* array, const int length, const std::string text)
+{
+ stream << text << ": {";
+ if (length > 0) {
+ stream << array[0];
+ for (int i = 1; i < length; i++) {
+ stream << ", " << array[i];
+ }
+ }
+ stream << "}" << std::endl;
+};
+
+#endif
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include "ParaMEDMEMTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include "InterpolationUtils.hxx"
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "BlockTopology.hxx"
+
+#include <string>
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+/*
+ * Check methods defined in BlockTopology.hxx
+ *
+ BlockTopology(){};
+ BlockTopology(const ProcessorGroup& group, const MEDMEM::GRID& grid);
+ BlockTopology(const BlockTopology& geom_topo, const ComponentTopology& comp_topo);
+ (+) BlockTopology(const ProcessorGroup& group, int nb_elem);
+ virtual ~BlockTopology();
+ (+) inline int getNbElements()const;
+ (+) inline int getNbLocalElements() const;
+ const ProcessorGroup* getProcGroup()const {return _proc_group;};
+ (+) inline std::pair<int,int> globalToLocal (const int) const ;
+ (+) inline int localToGlobal (const std::pair<int,int>) const;
+ (+) std::vector<std::pair<int,int> > getLocalArrayMinMax() const ;
+ (+) int getDimension() const {return _dimension;};
+ (+) void serialize(int* & serializer, int& size) const ;
+ (+) void unserialize(const int* serializer, const CommInterface& comm_interface);
+
+ */
+
+void ParaMEDMEMTest::testBlockTopology_constructor()
+{
+ //test constructor
+ int size;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ CommInterface interface;
+ MPIProcessorGroup group(interface);
+ BlockTopology blocktopo(group,1);
+ CPPUNIT_ASSERT_EQUAL(1,blocktopo.getNbLocalElements());
+ CPPUNIT_ASSERT_EQUAL(size,blocktopo.getNbElements());
+ CPPUNIT_ASSERT_EQUAL(1,blocktopo.getDimension());
+
+ //checking access methods
+ BlockTopology blocktopo2(group,2);
+ std::pair<int,int> local= blocktopo2.globalToLocal(0);
+ CPPUNIT_ASSERT_EQUAL(local.first,0);
+ CPPUNIT_ASSERT_EQUAL(local.second,0);
+ int global=blocktopo2.localToGlobal(local);
+ CPPUNIT_ASSERT_EQUAL(global,0);
+
+ local = blocktopo2.globalToLocal(1);
+ CPPUNIT_ASSERT_EQUAL(local.first,0);
+ CPPUNIT_ASSERT_EQUAL(local.second,1);
+ global=blocktopo2.localToGlobal(local);
+ CPPUNIT_ASSERT_EQUAL(global,1);
+
+ local = blocktopo2.globalToLocal(2*size-1);
+ CPPUNIT_ASSERT_EQUAL(local.first,size-1);
+ CPPUNIT_ASSERT_EQUAL(local.second,1);
+ global=blocktopo2.localToGlobal(local);
+ CPPUNIT_ASSERT_EQUAL(global,2*size-1);
+
+ std::vector<std::pair<int,int> > bounds = blocktopo2.getLocalArrayMinMax();
+ int vecsize = bounds.size();
+ CPPUNIT_ASSERT_EQUAL(1,vecsize);
+ CPPUNIT_ASSERT_EQUAL(2*rank, (bounds[0]).first);
+ CPPUNIT_ASSERT_EQUAL(2*rank+2, (bounds[0]).second);
+ }
+
+void ParaMEDMEMTest::testBlockTopology_serialize()
+{
+
+ int size;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ CommInterface interface;
+ MPIProcessorGroup group(interface);
+ BlockTopology blocktopo(group,3);
+
+//testing the serialization process that is used to transfer a
+//block topology via a MPI_Send/Recv comm
+ BlockTopology blocktopo_recv;
+ int* serializer;
+ int sersize;
+ blocktopo.serialize(serializer,sersize);
+ blocktopo_recv.unserialize(serializer,interface);
+ CPPUNIT_ASSERT_EQUAL(blocktopo.getNbElements(),blocktopo_recv.getNbElements());
+ delete [] serializer;
+}
--- /dev/null
+#include "ParaMEDMEMTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include <string>
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "DEC.hxx"
+#include "InterpKernelDEC.hxx"
+#include <set>
+#include <time.h>
+#include "ICoCoTrioField.hxx"
+#include <iostream>
+#include <assert.h>
+#include <math.h>
+
+using namespace std;
+using namespace ParaMEDMEM;
+using namespace ICoCo;
+
+void afficheGauthier1( const TrioField& field, const double *vals, int lgth)
+{
+ CPPUNIT_ASSERT_EQUAL(lgth,field._nb_elems);
+ for (int ele=0;ele<field._nb_elems;ele++)
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(vals[ele],field._field[ele],1e-12);
+}
+
+void remplit_coordGauthier1(double* coords)
+{
+ double angle,epaisseur;
+ angle=0*45*(asin(1)/90);
+ epaisseur=1e-0;
+ coords[0*3+0]=0.;
+ coords[0*3+1]=0.;
+ coords[0*3+2]=0.;
+
+ coords[1*3+0]=cos(angle);
+ coords[1*3+1]=0.;
+ coords[1*3+2]=sin(angle);
+
+
+ coords[2*3+0]=-sin(angle);
+ coords[2*3+1]=0.;
+ coords[2*3+2]=cos(angle);
+
+ for (int d=0;d<3;d++)
+ coords[3*3+d]=coords[1*3+d]+ coords[2*3+d];
+
+ for (int i=4;i<8;i++)
+ {
+ for (int d=0;d<3;d++)
+ coords[i*3+d]=coords[(i-4)*3+d];
+ coords[i*3+1]+=epaisseur;
+ }
+
+}
+
+void init_quadGauthier1(TrioField& champ_quad,int is_master)
+{
+
+ champ_quad.setName("champ_quad");
+ champ_quad._space_dim=3;
+ champ_quad._mesh_dim=2;
+ champ_quad._nodes_per_elem=4;
+ champ_quad._itnumber=0;
+ champ_quad._time1=0;
+ champ_quad._time2=1;
+ champ_quad._nb_field_components=1;
+
+ if (is_master)
+ {
+ champ_quad._nbnodes=8;
+ champ_quad._nb_elems=2;
+
+ champ_quad._coords=new double[champ_quad._nbnodes*champ_quad._space_dim];
+ //memcpy(afield._coords,sommets.addr(),champ_quad._nbnodes*champ_quad._space_dim*sizeof(double));
+
+ remplit_coordGauthier1(champ_quad._coords);
+
+
+ champ_quad._connectivity=new int[champ_quad._nb_elems*champ_quad._nodes_per_elem];
+ champ_quad._connectivity[0*champ_quad._nodes_per_elem+0]=0;
+ champ_quad._connectivity[0*champ_quad._nodes_per_elem+1]=1;
+ champ_quad._connectivity[0*champ_quad._nodes_per_elem+2]=3;
+ champ_quad._connectivity[0*champ_quad._nodes_per_elem+3]=2;
+ champ_quad._connectivity[1*champ_quad._nodes_per_elem+0]=4;
+ champ_quad._connectivity[1*champ_quad._nodes_per_elem+1]=5;
+ champ_quad._connectivity[1*champ_quad._nodes_per_elem+2]=7;
+ champ_quad._connectivity[1*champ_quad._nodes_per_elem+3]=6;
+
+ }
+ else
+ {
+ champ_quad._nbnodes=0;
+ champ_quad._nb_elems=0;
+ champ_quad._coords=new double[champ_quad._nbnodes*champ_quad._space_dim];
+
+ }
+ champ_quad._has_field_ownership=false;
+ champ_quad._field=0;
+ //champ_quad._field=new double[champ_quad._nb_elems];
+ // assert(champ_quad._nb_field_components==1);
+}
+void init_triangleGauthier1(TrioField& champ_triangle,int is_master)
+{
+
+ champ_triangle.setName("champ_triangle");
+ champ_triangle._space_dim=3;
+ champ_triangle._mesh_dim=2;
+ champ_triangle._nodes_per_elem=3;
+ champ_triangle._itnumber=0;
+ champ_triangle._time1=0;
+ champ_triangle._time2=1;
+ champ_triangle._nb_field_components=1;
+
+ if (is_master)
+ {
+ champ_triangle._nb_elems=4;
+ champ_triangle._nbnodes=8;
+
+ champ_triangle._coords=new double[champ_triangle._nbnodes*champ_triangle._space_dim];
+ //memcpy(afield._coords,sommets.addr(),champ_triangle._nbnodes*champ_triangle._space_dim*sizeof(double));
+ remplit_coordGauthier1(champ_triangle._coords);
+
+ champ_triangle._connectivity=new int[champ_triangle._nb_elems*champ_triangle._nodes_per_elem];
+ champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+0]=0;
+ champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+1]=1;
+ champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+2]=2;
+ champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+0]=1;
+ champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+1]=2;
+ champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+2]=3;
+
+ champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+0]=4;
+ champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+1]=5;
+ champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+2]=7;
+ champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+0]=4;
+ champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+1]=6;
+ champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+2]=7;
+ }
+ else
+ {
+ champ_triangle._nb_elems=0;
+ champ_triangle._nbnodes=0;
+ champ_triangle._coords=new double[champ_triangle._nbnodes*champ_triangle._space_dim];
+
+ }
+ champ_triangle._has_field_ownership=false;
+ // champ_triangle._field=new double[champ_triangle._nb_elems];
+ champ_triangle._field=0;
+
+}
+
+
+void ParaMEDMEMTest::testGauthier1()
+{
+ int num_cas=0;
+ int rank, size;
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+
+ int is_master=0;
+
+ CommInterface comm;
+ set<int> emetteur_ids;
+ set<int> recepteur_ids;
+ emetteur_ids.insert(0);
+ if(size!=4)
+ return;
+ recepteur_ids.insert(1);
+ if (size >2)
+ recepteur_ids.insert(2);
+ if (size >2)
+ emetteur_ids.insert(3);
+ if ((rank==0)||(rank==1))
+ is_master=1;
+
+ MPIProcessorGroup recepteur_group(comm,recepteur_ids);
+ MPIProcessorGroup emetteur_group(comm,emetteur_ids);
+
+
+ string cas;
+ if (recepteur_group.containsMyRank())
+ {
+ cas="recepteur";
+ //freopen("recpeteur.out","w",stdout);
+ //freopen("recepteur.err","w",stderr);
+
+ }
+ else
+ {
+ cas="emetteur";
+ // freopen("emetteur.out","w",stdout);
+ //freopen("emetteur.err","w",stderr);
+ }
+ double expected[8][4]={
+ {1.,1.,1.,1.},
+ {40., 40., 1., 1.},
+ {1.,1.,1e200,1e200},
+ {40.,1.,1e200,1e200},
+ {1.,1.,1.,1.},
+ {40.,1.,1.,1.},
+ {1.,1.,1e200,1e200},
+ {20.5,1.,1e200,1e200}
+ };
+
+ int expectedLgth[8]={4,4,2,2,4,4,2,2};
+
+ for (int send=0;send<2;send++)
+ for (int rec=0;rec<2;rec++)
+ {
+ InterpKernelDEC dec_emetteur(emetteur_group, recepteur_group);
+ dec_emetteur.setOrientation(2);
+ TrioField champ_emetteur, champ_recepteur;
+
+ if (send==0)
+ init_quadGauthier1(champ_emetteur,is_master);
+ else
+ init_triangleGauthier1(champ_emetteur,is_master);
+ if (rec==0)
+ init_triangleGauthier1(champ_recepteur,is_master);
+ else
+ init_quadGauthier1(champ_recepteur,is_master);
+
+ if (cas=="emetteur")
+ {
+ champ_emetteur._field=new double[champ_emetteur._nb_elems];
+ for (int ele=0;ele<champ_emetteur._nb_elems;ele++)
+ champ_emetteur._field[ele]=1;
+
+ champ_emetteur._has_field_ownership=true;
+ }
+
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ clock_t clock0= clock ();
+ int compti=0;
+
+ bool init=true; // first time step ??
+ bool stop=false;
+ //boucle sur les pas de quads
+ while (!stop) {
+
+ compti++;
+ clock_t clocki= clock ();
+ //cout << compti << " CLOCK " << (clocki-clock0)*1.e-6 << endl;
+ for (int non_unif=0;non_unif<2;non_unif++)
+ {
+ // if (champ_recepteur._field)
+ // delete [] champ_recepteur._field;
+ champ_recepteur._field=0;
+ // champ_recepteur._has_field_ownership=false;
+
+
+
+ if (cas=="emetteur")
+ {
+ if (non_unif)
+ if(rank!=3)
+ champ_emetteur._field[0]=40;
+ }
+ bool ok=false; // Is the time interval successfully solved ?
+
+ // Loop on the time interval tries
+ if(1) {
+
+
+ if (cas=="emetteur")
+ dec_emetteur.attachLocalField((ICoCo::Field*) &champ_emetteur);
+ else
+ dec_emetteur.attachLocalField((ICoCo::Field*) &champ_recepteur);
+
+
+ if(init) dec_emetteur.synchronize();
+ init=false;
+
+ if (cas=="emetteur") {
+ // affiche(champ_emetteur);
+ dec_emetteur.sendData();
+ }
+ else if (cas=="recepteur")
+ {
+ dec_emetteur.recvData();
+ if (is_master)
+ afficheGauthier1(champ_recepteur,expected[num_cas],expectedLgth[num_cas]);
+ }
+ else
+ throw 0;
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+ stop=true;
+ num_cas++;
+ }
+ // destruction des champs, des DEC, et des tableaux associés
+ }
+ }
+}
+
+void ParaMEDMEMTest::testGauthier2()
+{
+ const char save_vit_in_2[]="VITESSE_P1_OUT\n1\n2\n3\n63\n3\n80\n0\n 0 1 2\n 3 4 5\n 6 7 8\n 9 10 11\n 12 13 14\n 15 16 17\n 18 19 20\n 21 22 23\n 24 25 26\n 27 28 29\n 30 2 1\n 31 5 4\n 32 8 7\n 33 11 10\n 34 14 13\n 35 17 16\n 36 20 19\n 37 23 22\n 38 26 25\n 39 29 28\n 30 40 2\n 31 41 5\n 32 42 8\n 33 43 11\n 34 44 14\n 35 45 17\n 36 46 20\n 37 47 23\n 38 48 26\n 39 49 29\n 31 2 40\n 32 5 41\n 33 8 42\n 34 11 43\n 35 14 44\n 36 17 45\n 37 20 46\n 38 23 47\n 39 26 48\n 50 29 49\n 3 2 4\n 6 5 7\n 9 8 10\n 12 11 13\n 15 14 16\n 18 17 19\n 21 20 22\n 24 23 25\n 27 26 28\n 51 29 52\n 31 4 2\n 32 7 5\n 33 10 8\n 34 13 11\n 35 16 14\n 36 19 17\n 37 22 20\n 38 25 23\n 39 28 26\n 50 52 29\n 0 2 53\n 3 5 54\n 6 8 55\n 9 11 56\n 12 14 57\n 15 17 58\n 18 20 59\n 21 23 60\n 24 26 61\n 27 29 62\n 3 53 2\n 6 54 5\n 9 55 8\n 12 56 11\n 15 57 14\n 18 58 17\n 21 59 20\n 24 60 23\n 27 61 26\n 51 62 29\n 0 0 0\n 0.5 0 0\n 0.5 0.05 0\n 0 0.1 0\n 0.5 0.1 0\n 0.5 0.15 0\n 0 0.2 0\n 0.5 0.2 0\n 0.5 0.25 0\n 0 0.3 0\n 0.5 0.3 0\n 0.5 0.35 0\n 0 0.4 0\n 0.5 0.4 0\n 0.5 0.45 0\n 0 0.5 0\n 0.5 0.5 0\n 0.5 0.55 0\n 0 0.6 0\n 0.5 0.6 0\n 0.5 0.65 0\n 0 0.7 0\n 0.5 0.7 0\n 0.5 0.75 0\n 0 0.8 0\n 0.5 0.8 0\n 0.5 0.85 0\n 0 0.9 0\n 0.5 0.9 0\n 0.5 0.95 0\n 1 0 0\n 1 0.1 0\n 1 0.2 0\n 1 0.3 0\n 1 0.4 0\n 1 0.5 0\n 1 0.6 0\n 1 0.7 0\n 1 0.8 0\n 1 0.9 0\n 1 0.05 0\n 1 0.15 0\n 1 0.25 0\n 1 0.35 0\n 1 0.45 0\n 1 0.55 0\n 1 0.65 0\n 1 0.75 0\n 1 0.85 0\n 1 0.95 0\n 1 1 0\n 0 1 0\n 0.5 1 0\n 0 0.05 0\n 0 0.15 0\n 0 0.25 0\n 0 0.35 0\n 0 0.45 0\n 0 0.55 0\n 0 0.65 0\n 0 0.75 0\n 0 0.85 0\n 0 0.95 0\n2.9268\n3.1707\n3\n1\n 0 0 0\n 0 0 0\n 0 0 0.05\n 0 0 0.1\n 0 0 0.1\n 0 0 0.15\n 0 0 0.2\n 0 0 0.2\n 0 0 0.25\n 0 0 0.3\n 0 0 0.3\n 0 0 0.35\n 0 0 0.4\n 0 0 0.4\n 0 0 0.45\n 0 0 0.5\n 0 0 0.5\n 0 0 0.55\n 0 0 0.6\n 0 0 0.6\n 0 0 0.65\n 0 0 0.7\n 0 0 0.7\n 0 0 0.75\n 0 0 0.8\n 0 0 0.8\n 0 0 0.85\n 0 0 0.9\n 0 0 0.9\n 0 0 0.95\n 0 0 0\n 0 0 0.1\n 0 0 0.2\n 0 0 0.3\n 0 0 0.4\n 0 0 0.5\n 0 0 0.6\n 0 0 0.7\n 0 0 0.8\n 0 0 0.9\n 0 0 0.05\n 0 0 0.15\n 0 0 0.25\n 0 0 0.35\n 0 0 0.45\n 0 0 0.55\n 0 0 0.65\n 0 0 0.75\n 0 0 0.85\n 0 0 0.95\n 0 0 1\n 0 0 1\n 0 0 1\n 0 0 0.05\n 0 0 0.15\n 0 0 0.25\n 0 0 0.35\n 0 0 0.45\n 0 0 0.55\n 0 0 0.65\n 0 0 0.75\n 0 0 0.85\n 0 0 0.95\n1\n";
+
+ const char save_vit_out_0_2[]="vitesse_in_chaude\n0\n2\n3\n22\n4\n10\n-1081737852\n 0 1 3 2\n 2 3 5 4\n 4 5 7 6\n 6 7 9 8\n 8 9 11 10\n 10 11 13 12\n 12 13 15 14\n 14 15 17 16\n 16 17 19 18\n 18 19 21 20\n 0 0 0\n 1 0 0\n 0 0.1 0\n 1 0.1 0\n 0 0.2 0\n 1 0.2 0\n 0 0.3 0\n 1 0.3 0\n 0 0.4 0\n 1 0.4 0\n 0 0.5 0\n 1 0.5 0\n 0 0.6 0\n 1 0.6 0\n 0 0.7 0\n 1 0.7 0\n 0 0.8 0\n 1 0.8 0\n 0 0.9 0\n 1 0.9 0\n 0 1 0\n 1 1 0\n2.9268\n3.1707\n3\n1\n 0 0 0.05\n 0 0 0.15\n 0 0 0.25\n 0 0 0.35\n 0 0 0.45\n 0 0 0.55\n 0 0 0.65\n 0 0 0.75\n 0 0 0.85\n 0 0 0.95\n0\n";
+ const char save_vit_out_1_2[]="vitesse_in_chaude\n1\n2\n3\n22\n4\n10\n-1081737852\n 0 1 3 2\n 2 3 5 4\n 4 5 7 6\n 6 7 9 8\n 8 9 11 10\n 10 11 13 12\n 12 13 15 14\n 14 15 17 16\n 16 17 19 18\n 18 19 21 20\n 0 0 0\n 1 0 0\n 0 0.1 0\n 1 0.1 0\n 0 0.2 0\n 1 0.2 0\n 0 0.3 0\n 1 0.3 0\n 0 0.4 0\n 1 0.4 0\n 0 0.5 0\n 1 0.5 0\n 0 0.6 0\n 1 0.6 0\n 0 0.7 0\n 1 0.7 0\n 0 0.8 0\n 1 0.8 0\n 0 0.9 0\n 1 0.9 0\n 0 1 0\n 1 1 0\n2.9268\n3.1707\n3\n1\n 0 0 0.029375\n 0 0 0.029375\n 0 0 0.1\n 0 0 0.1\n 0 0 0.2\n 0 0 0.2\n 0 0 0.3\n 0 0 0.3\n 0 0 0.4\n 0 0 0.4\n 0 0 0.5\n 0 0 0.5\n 0 0 0.6\n 0 0 0.6\n 0 0 0.7\n 0 0 0.7\n 0 0 0.8\n 0 0 0.8\n 0 0 0.9\n 0 0 0.9\n 0 0 0.970625\n 0 0 0.970625\n0\n";
+
+ const char *save_vit_outs[2]={save_vit_out_1_2,save_vit_out_0_2};
+
+ const char save_vit_out_1_0[]="vitesse_in_chaude\n1\n2\n3\n22\n4\n10\n-1081737852\n 0 1 3 2\n 2 3 5 4\n 4 5 7 6\n 6 7 9 8\n 8 9 11 10\n 10 11 13 12\n 12 13 15 14\n 14 15 17 16\n 16 17 19 18\n 18 19 21 20\n 0 0 0\n 1 0 0\n 0 0.1 0\n 1 0.1 0\n 0 0.2 0\n 1 0.2 0\n 0 0.3 0\n 1 0.3 0\n 0 0.4 0\n 1 0.4 0\n 0 0.5 0\n 1 0.5 0\n 0 0.6 0\n 1 0.6 0\n 0 0.7 0\n 1 0.7 0\n 0 0.8 0\n 1 0.8 0\n 0 0.9 0\n 1 0.9 0\n 0 1 0\n 1 1 0\n2.9268\n3.1707\n3\n1\n 0 0 0.029375\n 0 0 0.029375\n 0 0 0.1\n 0 0 0.1\n 0 0 0.2\n 0 0 0.2\n 0 0 0.3\n 0 0 0.3\n 0 0 0.4\n 0 0 0.4\n 0 0 0.5\n 0 0 0.5\n 0 0 0.6\n 0 0 0.6\n 0 0 0.7\n 0 0 0.7\n 0 0 0.8\n 0 0 0.8\n 0 0 0.9\n 0 0 0.9\n 0 0 0.970625\n 0 0 0.970625\n0\n";
+
+ const char save_vit_in[]="VITESSE_P1_OUT\n1\n2\n3\n63\n3\n80\n0\n 0 1 2\n 3 4 5\n 6 7 8\n 9 10 11\n 12 13 14\n 15 16 17\n 18 19 20\n 21 22 23\n 24 25 26\n 27 28 29\n 30 2 1\n 31 5 4\n 32 8 7\n 33 11 10\n 34 14 13\n 35 17 16\n 36 20 19\n 37 23 22\n 38 26 25\n 39 29 28\n 30 40 2\n 31 41 5\n 32 42 8\n 33 43 11\n 34 44 14\n 35 45 17\n 36 46 20\n 37 47 23\n 38 48 26\n 39 49 29\n 31 2 40\n 32 5 41\n 33 8 42\n 34 11 43\n 35 14 44\n 36 17 45\n 37 20 46\n 38 23 47\n 39 26 48\n 50 29 49\n 3 2 4\n 6 5 7\n 9 8 10\n 12 11 13\n 15 14 16\n 18 17 19\n 21 20 22\n 24 23 25\n 27 26 28\n 51 29 52\n 31 4 2\n 32 7 5\n 33 10 8\n 34 13 11\n 35 16 14\n 36 19 17\n 37 22 20\n 38 25 23\n 39 28 26\n 50 52 29\n 0 2 53\n 3 5 54\n 6 8 55\n 9 11 56\n 12 14 57\n 15 17 58\n 18 20 59\n 21 23 60\n 24 26 61\n 27 29 62\n 3 53 2\n 6 54 5\n 9 55 8\n 12 56 11\n 15 57 14\n 18 58 17\n 21 59 20\n 24 60 23\n 27 61 26\n 51 62 29\n 0 0 0\n 0.5 0 0\n 0.5 0.05 0\n 0 0.1 0\n 0.5 0.1 0\n 0.5 0.15 0\n 0 0.2 0\n 0.5 0.2 0\n 0.5 0.25 0\n 0 0.3 0\n 0.5 0.3 0\n 0.5 0.35 0\n 0 0.4 0\n 0.5 0.4 0\n 0.5 0.45 0\n 0 0.5 0\n 0.5 0.5 0\n 0.5 0.55 0\n 0 0.6 0\n 0.5 0.6 0\n 0.5 0.65 0\n 0 0.7 0\n 0.5 0.7 0\n 0.5 0.75 0\n 0 0.8 0\n 0.5 0.8 0\n 0.5 0.85 0\n 0 0.9 0\n 0.5 0.9 0\n 0.5 0.95 0\n 1 0 0\n 1 0.1 0\n 1 0.2 0\n 1 0.3 0\n 1 0.4 0\n 1 0.5 0\n 1 0.6 0\n 1 0.7 0\n 1 0.8 0\n 1 0.9 0\n 1 0.05 0\n 1 0.15 0\n 1 0.25 0\n 1 0.35 0\n 1 0.45 0\n 1 0.55 0\n 1 0.65 0\n 1 0.75 0\n 1 0.85 0\n 1 0.95 0\n 1 1 0\n 0 1 0\n 0.5 1 0\n 0 0.05 0\n 0 0.15 0\n 0 0.25 0\n 0 0.35 0\n 0 0.45 0\n 0 0.55 0\n 0 0.65 0\n 0 0.75 0\n 0 0.85 0\n 0 0.95 0\n2.9268\n3.1707\n3\n1\n 0 0 0\n 0 0 0\n 0 0 0.05\n 0 0 0.1\n 0 0 0.1\n 0 0 0.15\n 0 0 0.2\n 0 0 0.2\n 0 0 0.25\n 0 0 0.3\n 0 0 0.3\n 0 0 0.35\n 0 0 0.4\n 0 0 0.4\n 0 0 0.45\n 0 0 0.5\n 0 0 0.5\n 0 0 0.55\n 0 0 0.6\n 0 0 0.6\n 0 0 0.65\n 0 0 0.7\n 0 0 0.7\n 0 0 0.75\n 0 0 0.8\n 0 0 0.8\n 0 0 0.85\n 0 0 0.9\n 0 0 0.9\n 0 0 0.95\n 0 0 0\n 0 0 0.1\n 0 0 0.2\n 0 0 0.3\n 0 0 0.4\n 0 0 0.5\n 0 0 0.6\n 0 0 0.7\n 0 0 0.8\n 0 0 0.9\n 0 0 0.05\n 0 0 0.15\n 0 0 0.25\n 0 0 0.35\n 0 0 0.45\n 0 0 0.55\n 0 0 0.65\n 0 0 0.75\n 0 0 0.85\n 0 0 0.95\n 0 0 1\n 0 0 1\n 0 0 1\n 0 0 0.05\n 0 0 0.15\n 0 0 0.25\n 0 0 0.35\n 0 0 0.45\n 0 0 0.55\n 0 0 0.65\n 0 0 0.75\n 0 0 0.85\n 0 0 0.95\n1\n";
+
+ double valuesExpected1[2]={0.,0.};
+ double valuesExpected2[2]={0.95,0.970625};
+
+ double valuesExpected30[]={0., 0., 0.05, 0., 0., 0.15, 0., 0., 0.25, 0., 0., 0.35, 0., 0., 0.45, 0., 0., 0.55, 0., 0., 0.65, 0., 0., 0.75, 0., 0., 0.85, 0., 0., 0.95};
+ double valuesExpected31[]={0., 0., 0.029375, 0., 0., 0.029375, 0., 0., 0.1, 0., 0., 0.1, 0., 0., 0.2, 0., 0., 0.2, 0., 0., 0.3, 0., 0., 0.3, 0., 0., 0.4, 0., 0., 0.4, 0., 0., 0.5, 0., 0., 0.5, 0., 0., 0.6, 0., 0., 0.6, 0., 0., 0.7, 0., 0., 0.7, 0., 0., 0.8, 0., 0., 0.8, 0., 0., 0.9, 0., 0., 0.9, 0., 0., 0.970625, 0., 0., 0.970625 };
+
+ double *valuesExpected3[2]={valuesExpected30,valuesExpected31};
+
+ int rank, size;
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ if (size <2)
+ return ;
+ CommInterface comm;
+ set<int> Genepi_ids;
+ set<int> entree_chaude_ids;
+ Genepi_ids.insert(0);
+ for (int i=1;i<size;i++)
+ entree_chaude_ids.insert(i);
+ for (int type=0;type<2;type++)
+ {
+ MPIProcessorGroup entree_chaude_group(comm,entree_chaude_ids);
+ MPIProcessorGroup Genepi_group(comm,Genepi_ids);
+
+ TrioField vitesse;
+ InterpKernelDEC dec_vit_in_chaude(entree_chaude_group, Genepi_group);
+
+ if ( entree_chaude_group.containsMyRank())
+ {
+ istringstream save_vit(save_vit_in);
+ vitesse.restore(save_vit);
+ }
+ else
+ {
+ istringstream save_vit(save_vit_out_1_0);
+ vitesse.restore(save_vit);
+ vitesse._has_field_ownership=false;
+
+ if (vitesse._field)
+ {
+ delete [] vitesse._field;
+ // cette ligne est super importante sinon c'est tout faux !!!!!!!
+ vitesse._field=0;
+ }
+ // pour tester P1->P0
+ vitesse._type=type;
+ }
+
+ if (vitesse._type==1)
+ dec_vit_in_chaude.setMethod("P1");
+
+
+
+ dec_vit_in_chaude.attachLocalField((ICoCo::Field*) &vitesse);
+
+ dec_vit_in_chaude.synchronize();
+
+
+ // Envois - receptions
+ if (entree_chaude_group.containsMyRank())
+ {
+ dec_vit_in_chaude.sendData();
+ }
+ else
+ {
+ dec_vit_in_chaude.recvData();
+ }
+ if (entree_chaude_group.containsMyRank() )
+ {
+ if (1)
+ {
+ ostringstream save_vit(save_vit_in_2);
+ vitesse.save(save_vit);
+ }
+ }
+ else
+ {
+
+ double pmin=1e38, pmax=-1e38;
+
+ for(int i=0;i<vitesse.nb_values()*vitesse._nb_field_components;i++)
+ {
+ double p=*(vitesse._field+i);
+ if (p<pmin) pmin=p;
+ if (p>pmax) pmax=p;
+ }
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(valuesExpected1[type],pmin,1e-12);
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(valuesExpected2[type],pmax,1e-12);
+
+ ostringstream save_vit(save_vit_outs[type]);
+ vitesse.save(save_vit);
+
+ for(int i=0;i<vitesse.nb_values();i++)
+ {
+ for(int c=0;c<vitesse._nb_field_components;c++)
+ {
+ double p=vitesse._field[i*vitesse._nb_field_components+c];
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(valuesExpected3[type][i*vitesse._nb_field_components+c],p,1e-12);
+ }
+ }
+
+ }
+ }
+}
--- /dev/null
+#include "ParaMEDMEMTest.hxx"
+#include <string>
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "DEC.hxx"
+#include "InterpKernelDEC.hxx"
+#include <set>
+#include <time.h>
+#include "ICoCoTrioField.hxx"
+#include <iostream>
+#include <assert.h>
+
+using namespace std;
+using namespace ParaMEDMEM;
+using namespace ICoCo;
+
+typedef enum {sync_and,sync_or} synctype;
+void synchronize_bool(bool& stop, synctype s)
+{
+ int my_stop;
+ int my_stop_temp = stop?1:0;
+ if (s==sync_and)
+ MPI_Allreduce(&my_stop_temp,&my_stop,1,MPI_INTEGER,MPI_MIN,MPI_COMM_WORLD);
+ else if (s==sync_or)
+ MPI_Allreduce(&my_stop_temp,&my_stop,1,MPI_INTEGER,MPI_MAX,MPI_COMM_WORLD);
+ stop =(my_stop==1);
+}
+
+void synchronize_dt(double& dt)
+{
+ double dttemp=dt;
+ MPI_Allreduce(&dttemp,&dt,1,MPI_DOUBLE,MPI_MIN,MPI_COMM_WORLD);
+}
+
+
+void affiche( const TrioField& field)
+{
+ cout <<field.getName()<<endl;
+ for (int ele=0;ele<field._nb_elems;ele++)
+ cout <<ele <<": "<<field._field[ele]<<endl;;
+
+}
+
+void remplit_coord(double* coords)
+{
+ coords[0*3+0]=0.;
+ coords[0*3+1]=0.;
+ coords[0*3+2]=0.;
+
+ coords[1*3+0]=1.;
+ coords[1*3+1]=0.;
+ coords[1*3+2]=0.;
+
+
+ coords[2*3+0]=0.;
+ coords[2*3+1]=0.;
+ coords[2*3+2]=1.;
+
+ coords[3*3+0]=1.;
+ coords[3*3+1]=0.;
+ coords[3*3+2]=1.;
+
+ for (int i=4;i<8;i++)
+ {
+ for (int d=0;d<3;d++)
+ coords[i*3+d]=coords[(i-4)*3+d];
+ coords[i*3+1]+=1e-5;
+ }
+
+}
+
+void init_quad(TrioField& champ_quad)
+{
+
+ champ_quad.setName("champ_quad");
+ champ_quad._space_dim=3;
+ champ_quad._mesh_dim=2;
+ champ_quad._nbnodes=8;
+ champ_quad._nodes_per_elem=4;
+ champ_quad._nb_elems=2;
+ champ_quad._itnumber=0;
+ champ_quad._time1=0;
+ champ_quad._time2=1;
+ champ_quad._nb_field_components=1;
+
+ champ_quad._coords=new double[champ_quad._nbnodes*champ_quad._space_dim];
+ //memcpy(afield._coords,sommets.addr(),champ_quad._nbnodes*champ_quad._space_dim*sizeof(double));
+
+ remplit_coord(champ_quad._coords);
+
+
+ champ_quad._connectivity=new int[champ_quad._nb_elems*champ_quad._nodes_per_elem];
+ champ_quad._connectivity[0*champ_quad._nodes_per_elem+0]=0;
+ champ_quad._connectivity[0*champ_quad._nodes_per_elem+1]=1;
+ champ_quad._connectivity[0*champ_quad._nodes_per_elem+2]=3;
+ champ_quad._connectivity[0*champ_quad._nodes_per_elem+3]=2;
+ champ_quad._connectivity[1*champ_quad._nodes_per_elem+0]=4;
+ champ_quad._connectivity[1*champ_quad._nodes_per_elem+1]=5;
+ champ_quad._connectivity[1*champ_quad._nodes_per_elem+2]=7;
+ champ_quad._connectivity[1*champ_quad._nodes_per_elem+3]=6;
+
+
+ champ_quad._has_field_ownership=false;
+ champ_quad._field=0;
+ //champ_quad._field=new double[champ_quad._nb_elems];
+ // assert(champ_quad._nb_field_components==1);
+}
+void init_triangle(TrioField& champ_triangle)
+{
+
+ champ_triangle.setName("champ_triangle");
+ champ_triangle._space_dim=3;
+ champ_triangle._mesh_dim=2;
+ champ_triangle._nbnodes=8;
+ champ_triangle._nodes_per_elem=3;
+ champ_triangle._nb_elems=4;
+ champ_triangle._itnumber=0;
+ champ_triangle._time1=0;
+ champ_triangle._time2=1;
+ champ_triangle._nb_field_components=1;
+
+ champ_triangle._coords=new double[champ_triangle._nbnodes*champ_triangle._space_dim];
+ //memcpy(afield._coords,sommets.addr(),champ_triangle._nbnodes*champ_triangle._space_dim*sizeof(double));
+ remplit_coord(champ_triangle._coords);
+
+ champ_triangle._connectivity=new int[champ_triangle._nb_elems*champ_triangle._nodes_per_elem];
+ champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+0]=0;
+ champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+1]=1;
+ champ_triangle._connectivity[0*champ_triangle._nodes_per_elem+2]=2;
+ champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+0]=1;
+ champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+1]=3;
+ champ_triangle._connectivity[1*champ_triangle._nodes_per_elem+2]=2;
+
+ champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+0]=4;
+ champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+1]=5;
+ champ_triangle._connectivity[2*champ_triangle._nodes_per_elem+2]=7;
+ champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+0]=4;
+ champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+1]=7;
+ champ_triangle._connectivity[3*champ_triangle._nodes_per_elem+2]=6;
+
+ champ_triangle._has_field_ownership=false;
+ // champ_triangle._field=new double[champ_triangle._nb_elems];
+ champ_triangle._field=0;
+}
+
+void ParaMEDMEMTest::testICocoTrio1()
+{
+ int size;
+ int rank;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+
+ //the test is meant to run on five processors
+ if (size !=2) return ;
+
+ CommInterface comm;
+ set<int> emetteur_ids;
+ set<int> recepteur_ids;
+ emetteur_ids.insert(0);
+ recepteur_ids.insert(1);
+
+ MPIProcessorGroup recepteur_group(comm,recepteur_ids);
+ MPIProcessorGroup emetteur_group(comm,emetteur_ids);
+
+
+ string cas;
+ if (recepteur_group.containsMyRank())
+ {
+ cas="recepteur";
+
+ }
+ else
+ cas="emetteur";
+
+ InterpKernelDEC dec_emetteur(emetteur_group, recepteur_group);
+
+ TrioField champ_emetteur, champ_recepteur;
+
+ init_triangle(champ_emetteur);
+ //init_triangle(champ_emetteur);
+ init_quad(champ_recepteur);
+ //init_emetteur(champ_recepteur);
+
+ if (cas=="emetteur")
+ {
+ champ_emetteur._field=new double[champ_emetteur._nb_elems];
+ for (int ele=0;ele<champ_emetteur._nb_elems;ele++)
+ champ_emetteur._field[ele]=1;
+
+ champ_emetteur._has_field_ownership=true;
+ }
+
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ clock_t clock0= clock ();
+ int compti=0;
+
+ bool init=true; // first time step ??
+ bool stop=false;
+ //boucle sur les pas de quads
+ while (!stop) {
+
+ compti++;
+ clock_t clocki= clock ();
+ cout << compti << " CLOCK " << (clocki-clock0)*1.e-6 << endl;
+ for (int non_unif=0;non_unif<2;non_unif++)
+ {
+ // if (champ_recepteur._field)
+ // delete [] champ_recepteur._field;
+ champ_recepteur._field=0;
+ // champ_recepteur._has_field_ownership=false;
+
+
+
+ if (cas=="emetteur")
+ if (non_unif)
+ champ_emetteur._field[0]=40;
+ bool ok=false; // Is the time interval successfully solved ?
+
+ // Loop on the time interval tries
+ if(1)
+ {
+ if (cas=="emetteur")
+ dec_emetteur.attachLocalField((ICoCo::Field*) &champ_emetteur);
+ else
+ dec_emetteur.attachLocalField((ICoCo::Field*) &champ_recepteur);
+
+ dec_emetteur.setNature(ConservativeVolumic);
+
+ if(init)
+ dec_emetteur.synchronize();
+ init=false;
+
+ if (cas=="emetteur")
+ {
+ dec_emetteur.sendData();
+ affiche(champ_emetteur);
+ }
+ else if (cas=="recepteur")
+ {
+ dec_emetteur.recvData();
+ affiche(champ_recepteur);
+ }
+ else
+ throw 0;
+ }
+ stop=true;
+ }
+ }
+}
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include "ParaMEDMEMTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "DEC.hxx"
+#include "MxN_Mapping.hxx"
+#include "InterpKernelDEC.hxx"
+#include "ParaMESH.hxx"
+#include "ParaFIELD.hxx"
+#include "ComponentTopology.hxx"
+#include "ICoCoMEDField.hxx"
+#include "ParaMEDLoader.hxx"
+#include "MEDLoader.hxx"
+
+
+#include <string>
+#include <iterator>
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void ParaMEDMEMTest::testInterpKernelDEC_2D()
+{
+ testInterpKernelDEC_2D_("P0","P0");
+}
+
+void ParaMEDMEMTest::testInterpKernelDEC2_2D()
+{
+ testInterpKernelDEC2_2D_("P0","P0");
+}
+
+void ParaMEDMEMTest::testInterpKernelDEC_3D()
+{
+ testInterpKernelDEC_3D_("P0","P0");
+}
+
+void ParaMEDMEMTest::testInterpKernelDEC_2DP0P1()
+{
+ //testInterpKernelDEC_2D_("P0","P1");
+}
+
+/*
+ * Check methods defined in InterpKernelDEC.hxx
+ *
+ InterpKernelDEC();
+ InterpKernelDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group);
+ virtual ~InterpKernelDEC();
+ void synchronize();
+ void recvData();
+ void sendData();
+*/
+
+void ParaMEDMEMTest::testInterpKernelDEC_2D_(const char *srcMeth, const char *targetMeth)
+{
+ std::string srcM(srcMeth);
+ std::string targetM(targetMeth);
+ int size;
+ int rank;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+
+ //the test is meant to run on five processors
+ if (size !=5) return ;
+
+ int nproc_source = 3;
+ set<int> self_procs;
+ set<int> procs_source;
+ set<int> procs_target;
+
+ for (int i=0; i<nproc_source; i++)
+ procs_source.insert(i);
+ for (int i=nproc_source; i<size; i++)
+ procs_target.insert(i);
+ self_procs.insert(rank);
+
+ ParaMEDMEM::CommInterface interface;
+
+ ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+ ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
+ ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
+
+ //loading the geometry for the source group
+
+ ParaMEDMEM::InterpKernelDEC dec (*source_group,*target_group);
+
+ ParaMEDMEM::MEDCouplingUMesh* mesh;
+ ParaMEDMEM::ParaMESH* paramesh;
+ ParaMEDMEM::ParaFIELD* parafield;
+ ICoCo::Field* icocofield ;
+
+ string filename_xml1 = getResourceFile("square1_split");
+ string filename_xml2 = getResourceFile("square2_split");
+ //string filename_seq_wr = makeTmpFile("");
+ //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med");
+
+ // To remove tmp files from disk
+ ParaMEDMEMTest_TmpFilesRemover aRemover;
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (source_group->containsMyRank())
+ {
+ string master = filename_xml1;
+
+ ostringstream strstream;
+ strstream <<master<<rank+1<<".med";
+ ostringstream meshname ;
+ meshname<< "Mesh_2_"<< rank+1;
+
+ mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
+
+
+ paramesh=new ParaMESH (mesh,*source_group,"source mesh");
+
+ // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT( support,*source_group);
+ ParaMEDMEM::ComponentTopology comptopo;
+ if(srcM=="P0")
+ {
+ parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+ parafield->getField()->setNature(ConservativeVolumic);
+ }
+ else
+ parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
+ int nb_local;
+ if(srcM=="P0")
+ nb_local=mesh->getNumberOfCells();
+ else
+ nb_local=mesh->getNumberOfNodes();
+ // double * value= new double[nb_local];
+ double *value=parafield->getField()->getArray()->getPointer();
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=1.0;
+
+ // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
+ icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
+ dec.setMethod(srcMeth);
+ dec.attachLocalField(icocofield);
+ }
+
+ //loading the geometry for the target group
+ if (target_group->containsMyRank())
+ {
+ string master= filename_xml2;
+ ostringstream strstream;
+ strstream << master<<(rank-nproc_source+1)<<".med";
+ ostringstream meshname ;
+ meshname<< "Mesh_3_"<<rank-nproc_source+1;
+ mesh = MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
+
+ paramesh=new ParaMESH (mesh,*target_group,"target mesh");
+ // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT(support,*target_group);
+ ParaMEDMEM::ComponentTopology comptopo;
+ if(targetM=="P0")
+ {
+ parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+ parafield->getField()->setNature(ConservativeVolumic);
+ }
+ else
+ parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
+ int nb_local;
+ if(targetM=="P0")
+ nb_local=mesh->getNumberOfCells();
+ else
+ nb_local=mesh->getNumberOfNodes();
+ // double * value= new double[nb_local];
+ double *value=parafield->getField()->getArray()->getPointer();
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=0.0;
+ // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
+ icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
+ dec.setMethod(targetMeth);
+ dec.attachLocalField(icocofield);
+ }
+
+
+ //attaching a DEC to the source group
+ double field_before_int;
+ double field_after_int;
+
+ if (source_group->containsMyRank())
+ {
+ field_before_int = parafield->getVolumeIntegral(0,true);
+ dec.synchronize();
+ cout<<"DEC usage"<<endl;
+ dec.setForcedRenormalization(false);
+
+ dec.sendData();
+ MEDLoader::writeParaMesh("./sourcesquareb",paramesh);
+ if (source_group->myRank()==0)
+ aRemover.Register("./sourcesquareb");
+ ostringstream filename;
+ filename<<"./sourcesquareb_"<<source_group->myRank()+1;
+ aRemover.Register(filename.str().c_str());
+ MEDLoader::writeField("./sourcesquareb","boundary",parafield->getField());
+
+ dec.recvData();
+ cout <<"writing"<<endl;
+ MEDLoader::writeParaMesh("./sourcesquare",paramesh);
+ if (source_group->myRank()==0)
+ aRemover.Register("./sourcesquare");
+ MEDLoader::writeField("./sourcesquare","boundary",parafield->getField());
+
+
+ filename<<"./sourcesquare_"<<source_group->myRank()+1;
+ aRemover.Register(filename.str().c_str());
+ field_after_int = parafield->getVolumeIntegral(0,true);
+
+
+ // MPI_Bcast(&field_before_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
+ // MPI_Bcast(&field_after_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
+
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, 1e-6);
+
+ }
+
+ //attaching a DEC to the target group
+ if (target_group->containsMyRank())
+ {
+ dec.synchronize();
+ dec.setForcedRenormalization(false);
+
+ dec.recvData();
+ MEDLoader::writeParaMesh("./targetsquareb",paramesh);
+ MEDLoader::writeField("./targetsquareb", "boundary",parafield->getField());
+ if (target_group->myRank()==0)
+ aRemover.Register("./targetsquareb");
+ ostringstream filename;
+ filename<<"./targetsquareb_"<<target_group->myRank()+1;
+ aRemover.Register(filename.str().c_str());
+ dec.sendData();
+ MEDLoader::writeParaMesh("./targetsquare",paramesh);
+ MEDLoader::writeField("./targetsquare", "boundary",parafield->getField());
+
+ if (target_group->myRank()==0)
+ aRemover.Register("./targetsquareb");
+
+ filename<<"./targetsquareb_"<<target_group->myRank()+1;
+ aRemover.Register(filename.str().c_str());
+ // double field_before_int, field_after_int;
+ // MPI_Bcast(&field_before_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
+ // MPI_Bcast(&field_after_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
+
+ // CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, 1e-6);
+
+ }
+
+ delete source_group;
+ delete target_group;
+ delete self_group;
+ delete parafield;
+ delete paramesh;
+ mesh->decrRef();
+
+ delete icocofield;
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ cout << "end of InterpKernelDEC_2D test"<<endl;
+}
+
+void ParaMEDMEMTest::testInterpKernelDEC2_2D_(const char *srcMeth, const char *targetMeth)
+{
+ std::string srcM(srcMeth);
+ std::string targetM(targetMeth);
+ int size;
+ int rank;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+
+ //the test is meant to run on five processors
+ if (size !=5) return ;
+
+ int nproc_source = 3;
+ set<int> self_procs;
+ set<int> procs_source;
+ set<int> procs_target;
+
+ for (int i=0; i<nproc_source; i++)
+ procs_source.insert(i);
+ for (int i=nproc_source; i<size; i++)
+ procs_target.insert(i);
+ self_procs.insert(rank);
+
+ ParaMEDMEM::CommInterface interface;
+
+ ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+ ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
+ ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
+
+ //loading the geometry for the source group
+
+ ParaMEDMEM::InterpKernelDEC dec (*source_group,*target_group);
+
+ ParaMEDMEM::MEDCouplingUMesh* mesh;
+ ParaMEDMEM::MEDCouplingFieldDouble* mcfield;
+
+ string filename_xml1 = getResourceFile("square1_split");
+ string filename_xml2 = getResourceFile("square2_split");
+
+ // To remove tmp files from disk
+ ParaMEDMEMTest_TmpFilesRemover aRemover;
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (source_group->containsMyRank())
+ {
+ string master = filename_xml1;
+
+ ostringstream strstream;
+ strstream <<master<<rank+1<<".med";
+ ostringstream meshname ;
+ meshname<< "Mesh_2_"<< rank+1;
+
+ mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
+ ParaMEDMEM::ComponentTopology comptopo;
+ if(srcM=="P0")
+ {
+ mcfield = MEDCouplingFieldDouble::New(ON_CELLS,NO_TIME);
+ mcfield->setMesh(mesh);
+ DataArrayDouble *array=DataArrayDouble::New();
+ array->alloc(mcfield->getNumberOfTuples(),1);
+ mcfield->setArray(array);
+ array->decrRef();
+ mcfield->setNature(ConservativeVolumic);
+ }
+ else
+ {
+ mcfield = MEDCouplingFieldDouble::New(ON_CELLS,NO_TIME);
+ mcfield->setMesh(mesh);
+ DataArrayDouble *array=DataArrayDouble::New();
+ array->alloc(mcfield->getNumberOfTuples(),1);
+ mcfield->setArray(array);
+ array->decrRef();
+ }
+ int nb_local;
+ if(srcM=="P0")
+ nb_local=mesh->getNumberOfCells();
+ else
+ nb_local=mesh->getNumberOfNodes();
+ double *value=mcfield->getArray()->getPointer();
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=1.0;
+ dec.setMethod(srcMeth);
+ dec.attachLocalField(mcfield);
+ }
+
+ //loading the geometry for the target group
+ if (target_group->containsMyRank())
+ {
+ string master= filename_xml2;
+ ostringstream strstream;
+ strstream << master<<(rank-nproc_source+1)<<".med";
+ ostringstream meshname ;
+ meshname<< "Mesh_3_"<<rank-nproc_source+1;
+ mesh = MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
+ ParaMEDMEM::ComponentTopology comptopo;
+ if(targetM=="P0")
+ {
+ mcfield = MEDCouplingFieldDouble::New(ON_CELLS,NO_TIME);
+ mcfield->setMesh(mesh);
+ DataArrayDouble *array=DataArrayDouble::New();
+ array->alloc(mcfield->getNumberOfTuples(),1);
+ mcfield->setArray(array);
+ array->decrRef();
+ mcfield->setNature(ConservativeVolumic);
+ }
+ else
+ {
+ mcfield = MEDCouplingFieldDouble::New(ON_NODES,NO_TIME);
+ mcfield->setMesh(mesh);
+ DataArrayDouble *array=DataArrayDouble::New();
+ array->alloc(mcfield->getNumberOfTuples(),1);
+ mcfield->setArray(array);
+ array->decrRef();
+ }
+ int nb_local;
+ if(targetM=="P0")
+ nb_local=mesh->getNumberOfCells();
+ else
+ nb_local=mesh->getNumberOfNodes();
+ double *value=mcfield->getArray()->getPointer();
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=0.0;
+ dec.setMethod(targetMeth);
+ dec.attachLocalField(mcfield);
+ }
+
+
+ //attaching a DEC to the source group
+
+ if (source_group->containsMyRank())
+ {
+ dec.synchronize();
+ dec.setForcedRenormalization(false);
+ dec.sendData();
+ dec.recvData();
+ }
+
+ //attaching a DEC to the target group
+ if (target_group->containsMyRank())
+ {
+ dec.synchronize();
+ dec.setForcedRenormalization(false);
+ dec.recvData();
+ dec.sendData();
+ }
+ delete source_group;
+ delete target_group;
+ delete self_group;
+ mcfield->decrRef();
+ mesh->decrRef();
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ cout << "end of InterpKernelDEC2_2D test"<<endl;
+}
+
+void ParaMEDMEMTest::testInterpKernelDEC_3D_(const char *srcMeth, const char *targetMeth)
+{
+ std::string srcM(srcMeth);
+ std::string targetM(targetMeth);
+ int size;
+ int rank;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+
+ //the test is meant to run on five processors
+ if (size !=3) return ;
+
+ int nproc_source = 2;
+ set<int> self_procs;
+ set<int> procs_source;
+ set<int> procs_target;
+
+ for (int i=0; i<nproc_source; i++)
+ procs_source.insert(i);
+ for (int i=nproc_source; i<size; i++)
+ procs_target.insert(i);
+ self_procs.insert(rank);
+
+ ParaMEDMEM::CommInterface interface;
+
+ ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+ ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
+ ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
+
+ //loading the geometry for the source group
+
+ ParaMEDMEM::InterpKernelDEC dec (*source_group,*target_group);
+
+ ParaMEDMEM::MEDCouplingUMesh* mesh;
+ ParaMEDMEM::ParaMESH* paramesh;
+ ParaMEDMEM::ParaFIELD* parafield;
+ ICoCo::Field* icocofield ;
+
+ string tmp_dir = getenv("TMP");
+ if (tmp_dir == "")
+ tmp_dir = "/tmp";
+ string filename_xml1 = getResourceFile("Mesh3D_10_2d");
+ string filename_xml2 = getResourceFile("Mesh3D_11");
+ //string filename_seq_wr = makeTmpFile("");
+ //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med");
+
+ // To remove tmp files from disk
+ ParaMEDMEMTest_TmpFilesRemover aRemover;
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (source_group->containsMyRank())
+ {
+ string master = filename_xml1;
+
+ ostringstream strstream;
+ strstream <<master<<rank+1<<".med";
+ ostringstream meshname ;
+ meshname<< "Mesh_3_"<< rank+1;
+
+ mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
+
+
+ paramesh=new ParaMESH (mesh,*source_group,"source mesh");
+
+ // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT( support,*source_group);
+ ParaMEDMEM::ComponentTopology comptopo;
+ if(srcM=="P0")
+ {
+ parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+ parafield->getField()->setNature(ConservativeVolumic);
+ }
+ else
+ parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
+ int nb_local;
+ if(srcM=="P0")
+ nb_local=mesh->getNumberOfCells();
+ else
+ nb_local=mesh->getNumberOfNodes();
+ // double * value= new double[nb_local];
+ double *value=parafield->getField()->getArray()->getPointer();
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=1.0;
+
+ // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
+ icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
+ dec.setMethod(srcMeth);
+ dec.attachLocalField(icocofield);
+ }
+
+ //loading the geometry for the target group
+ if (target_group->containsMyRank())
+ {
+ string master= filename_xml2;
+ ostringstream strstream;
+ strstream << master << ".med";
+ ostringstream meshname ;
+ meshname<< "Mesh_6";
+ mesh = MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
+
+ paramesh=new ParaMESH (mesh,*target_group,"target mesh");
+ // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT(support,*target_group);
+ ParaMEDMEM::ComponentTopology comptopo;
+ if(targetM=="P0")
+ {
+ parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+ parafield->getField()->setNature(ConservativeVolumic);
+ }
+ else
+ parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
+ int nb_local;
+ if(targetM=="P0")
+ nb_local=mesh->getNumberOfCells();
+ else
+ nb_local=mesh->getNumberOfNodes();
+ // double * value= new double[nb_local];
+ double *value=parafield->getField()->getArray()->getPointer();
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=0.0;
+ // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
+ icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
+ dec.setMethod(targetMeth);
+ dec.attachLocalField(icocofield);
+ }
+ //attaching a DEC to the source group
+ double field_before_int;
+ double field_after_int;
+
+ if (source_group->containsMyRank())
+ {
+ field_before_int = parafield->getVolumeIntegral(0,true);
+ dec.synchronize();
+ cout<<"DEC usage"<<endl;
+ dec.setForcedRenormalization(false);
+
+ dec.sendData();
+ MEDLoader::writeParaMesh("./sourcesquareb",paramesh);
+ if (source_group->myRank()==0)
+ aRemover.Register("./sourcesquareb");
+ ostringstream filename;
+ filename<<"./sourcesquareb_"<<source_group->myRank()+1;
+ aRemover.Register(filename.str().c_str());
+ MEDLoader::writeField("./sourcesquareb","boundary",parafield->getField());
+
+ dec.recvData();
+ cout <<"writing"<<endl;
+ MEDLoader::writeParaMesh("./sourcesquare",paramesh);
+ if (source_group->myRank()==0)
+ aRemover.Register("./sourcesquare");
+ MEDLoader::writeField("./sourcesquare","boundary",parafield->getField());
+
+
+ filename<<"./sourcesquare_"<<source_group->myRank()+1;
+ aRemover.Register(filename.str().c_str());
+ field_after_int = parafield->getVolumeIntegral(0,true);
+
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, 1e-6);
+
+ }
+
+ //attaching a DEC to the target group
+ if (target_group->containsMyRank())
+ {
+ dec.synchronize();
+ dec.setForcedRenormalization(false);
+
+ dec.recvData();
+ MEDLoader::writeParaMesh("./targetsquareb",paramesh);
+ MEDLoader::writeField("./targetsquareb", "boundary",parafield->getField());
+ if (target_group->myRank()==0)
+ aRemover.Register("./targetsquareb");
+ ostringstream filename;
+ filename<<"./targetsquareb_"<<target_group->myRank()+1;
+ aRemover.Register(filename.str().c_str());
+ dec.sendData();
+ MEDLoader::writeParaMesh("./targetsquare",paramesh);
+ MEDLoader::writeField("./targetsquare", "boundary",parafield->getField());
+
+ if (target_group->myRank()==0)
+ aRemover.Register("./targetsquareb");
+
+ filename<<"./targetsquareb_"<<target_group->myRank()+1;
+ aRemover.Register(filename.str().c_str());
+ }
+ delete source_group;
+ delete target_group;
+ delete self_group;
+ delete parafield;
+ delete paramesh;
+ mesh->decrRef();
+
+ delete icocofield;
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ cout << "end of InterpKernelDEC_3D test"<<endl;
+}
+
+//Synchronous tests without interpolation with native mode (AllToAll(v) from lam/MPI:
+void ParaMEDMEMTest::testSynchronousEqualInterpKernelWithoutInterpNativeDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.1,1,0.1,1,false,false,false,"P0","P0");
+}
+
+//Synchronous tests without interpolation :
+void ParaMEDMEMTest::testSynchronousEqualInterpKernelWithoutInterpDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.1,1,0.1,1,true,false,false,"P0","P0");
+}
+
+//Synchronous tests with interpolation :
+void ParaMEDMEMTest::testSynchronousEqualInterpKernelDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.1,1,0.1,1,true,false,true,"P0","P0");
+}
+void ParaMEDMEMTest::testSynchronousFasterSourceInterpKernelDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.09,1,0.1,1,true,false,true,"P0","P0");
+}
+void ParaMEDMEMTest::testSynchronousSlowerSourceInterpKernelDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.11,1,0.1,1,true,false,true,"P0","P0");
+}
+void ParaMEDMEMTest::testSynchronousSlowSourceInterpKernelDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.11,1,0.01,1,true,false,true,"P0","P0");
+}
+void ParaMEDMEMTest::testSynchronousFastSourceInterpKernelDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.01,1,0.11,1,true,false,true,"P0","P0");
+}
+
+//Asynchronous tests with interpolation :
+void ParaMEDMEMTest::testAsynchronousEqualInterpKernelDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.1,1,0.1,1,true,true,true,"P0","P0");
+}
+void ParaMEDMEMTest::testAsynchronousFasterSourceInterpKernelDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.09,1,0.1,1,true,true,true,"P0","P0");
+}
+void ParaMEDMEMTest::testAsynchronousSlowerSourceInterpKernelDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.11,1,0.1,1,true,true,true,"P0","P0");
+}
+void ParaMEDMEMTest::testAsynchronousSlowSourceInterpKernelDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.11,1,0.01,1,true,true,true,"P0","P0");
+}
+void ParaMEDMEMTest::testAsynchronousFastSourceInterpKernelDEC_2D()
+{
+ testAsynchronousInterpKernelDEC_2D(0.01,1,0.11,1,true,true,true,"P0","P0");
+}
+
+void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P0()
+{
+ //
+ const double sourceCoordsAll[2][8]={{0.4,0.5,0.4,1.5,1.6,1.5,1.6,0.5},
+ {0.3,-0.5,1.6,-0.5,1.6,-1.5,0.3,-1.5}};
+ const double targetCoordsAll[3][16]={{0.7,1.45,0.7,1.65,0.9,1.65,0.9,1.45, 1.1,1.4,1.1,1.6,1.3,1.6,1.3,1.4},
+ {0.7,-0.6,0.7,0.7,0.9,0.7,0.9,-0.6, 1.1,-0.7,1.1,0.6,1.3,0.6,1.3,-0.7},
+ {0.7,-1.55,0.7,-1.35,0.9,-1.35,0.9,-1.55, 1.1,-1.65,1.1,-1.45,1.3,-1.45,1.3,-1.65}};
+ int conn4All[8]={0,1,2,3,4,5,6,7};
+ double targetResults[3][2]={{34.,34.},{38.333333333333336,42.666666666666664},{47.,47.}};
+ double targetResults2[3][2]={{0.28333333333333344,0.56666666666666687},{1.8564102564102569,2.0128205128205132},{1.0846153846153845,0.36153846153846159}};
+ double targetResults3[3][2]={{3.7777777777777781,7.5555555555555562},{24.511111111111113,26.355555555555558},{14.1,4.7}};
+ //
+ int size;
+ int rank;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ //
+ if(size!=5)
+ return ;
+ int nproc_source = 2;
+ set<int> self_procs;
+ set<int> procs_source;
+ set<int> procs_target;
+
+ for (int i=0; i<nproc_source; i++)
+ procs_source.insert(i);
+ for (int i=nproc_source; i<size; i++)
+ procs_target.insert(i);
+ self_procs.insert(rank);
+ //
+ ParaMEDMEM::MEDCouplingUMesh *mesh=0;
+ ParaMEDMEM::ParaMESH *paramesh=0;
+ ParaMEDMEM::ParaFIELD* parafield=0;
+ //
+ ParaMEDMEM::CommInterface interface;
+ //
+ ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+ ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
+ ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
+ //
+ MPI_Barrier(MPI_COMM_WORLD);
+ if(source_group->containsMyRank())
+ {
+ std::ostringstream stream; stream << "sourcemesh2D proc " << rank;
+ mesh=MEDCouplingUMesh::New(stream.str().c_str(),2);
+ mesh->allocateCells(2);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All);
+ mesh->finishInsertingCells();
+ DataArrayDouble *myCoords=DataArrayDouble::New();
+ myCoords->alloc(4,2);
+ const double *sourceCoords=sourceCoordsAll[rank];
+ std::copy(sourceCoords,sourceCoords+8,myCoords->getPointer());
+ mesh->setCoords(myCoords);
+ myCoords->decrRef();
+ paramesh=new ParaMESH(mesh,*source_group,"source mesh");
+ ParaMEDMEM::ComponentTopology comptopo;
+ parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+ double *value=parafield->getField()->getArray()->getPointer();
+ value[0]=34+13*((double)rank);
+ }
+ else
+ {
+ std::ostringstream stream; stream << "targetmesh2D proc " << rank-nproc_source;
+ mesh=MEDCouplingUMesh::New(stream.str().c_str(),2);
+ mesh->allocateCells(2);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn4All+4);
+ mesh->finishInsertingCells();
+ DataArrayDouble *myCoords=DataArrayDouble::New();
+ myCoords->alloc(8,2);
+ const double *targetCoords=targetCoordsAll[rank-nproc_source];
+ std::copy(targetCoords,targetCoords+16,myCoords->getPointer());
+ mesh->setCoords(myCoords);
+ myCoords->decrRef();
+ paramesh=new ParaMESH (mesh,*target_group,"target mesh");
+ ParaMEDMEM::ComponentTopology comptopo;
+ parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+ }
+ //test 1 - Conservative volumic
+ ParaMEDMEM::InterpKernelDEC dec(*source_group,*target_group);
+ parafield->getField()->setNature(ConservativeVolumic);
+ if (source_group->containsMyRank())
+ {
+ dec.setMethod("P0");
+ dec.attachLocalField(parafield);
+ dec.synchronize();
+ dec.setForcedRenormalization(false);
+ dec.sendData();
+ }
+ else
+ {
+ dec.setMethod("P0");
+ dec.attachLocalField(parafield);
+ dec.synchronize();
+ dec.setForcedRenormalization(false);
+ dec.recvData();
+ const double *res=parafield->getField()->getArray()->getConstPointer();
+ const double *expected=targetResults[rank-nproc_source];
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13);
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13);
+ }
+ //test 2 - Integral
+ ParaMEDMEM::InterpKernelDEC dec2(*source_group,*target_group);
+ parafield->getField()->setNature(Integral);
+ if (source_group->containsMyRank())
+ {
+ dec2.setMethod("P0");
+ dec2.attachLocalField(parafield);
+ dec2.synchronize();
+ dec2.setForcedRenormalization(false);
+ dec2.sendData();
+ }
+ else
+ {
+ dec2.setMethod("P0");
+ dec2.attachLocalField(parafield);
+ dec2.synchronize();
+ dec2.setForcedRenormalization(false);
+ dec2.recvData();
+ const double *res=parafield->getField()->getArray()->getConstPointer();
+ const double *expected=targetResults2[rank-nproc_source];
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13);
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13);
+ }
+ //test 3 - Integral with global constraint
+ ParaMEDMEM::InterpKernelDEC dec3(*source_group,*target_group);
+ parafield->getField()->setNature(IntegralGlobConstraint);
+ if (source_group->containsMyRank())
+ {
+ dec3.setMethod("P0");
+ dec3.attachLocalField(parafield);
+ dec3.synchronize();
+ dec3.setForcedRenormalization(false);
+ dec3.sendData();
+ }
+ else
+ {
+ dec3.setMethod("P0");
+ dec3.attachLocalField(parafield);
+ dec3.synchronize();
+ dec3.setForcedRenormalization(false);
+ dec3.recvData();
+ const double *res=parafield->getField()->getArray()->getConstPointer();
+ const double *expected=targetResults3[rank-nproc_source];
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[0],res[0],1e-13);
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[1],res[1],1e-13);
+ }
+ //test 4 - Conservative volumic reversed
+ ParaMEDMEM::InterpKernelDEC dec4(*source_group,*target_group);
+ parafield->getField()->setNature(ConservativeVolumic);
+ if (source_group->containsMyRank())
+ {
+ dec4.setMethod("P0");
+ dec4.attachLocalField(parafield);
+ dec4.synchronize();
+ dec4.setForcedRenormalization(false);
+ dec4.recvData();
+ const double *res=parafield->getField()->getArray()->getConstPointer();
+ CPPUNIT_ASSERT_EQUAL(1,parafield->getField()->getNumberOfTuples());
+ const double expected[]={37.8518518518519,43.5333333333333};
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[rank],res[0],1e-13);
+ }
+ else
+ {
+ dec4.setMethod("P0");
+ dec4.attachLocalField(parafield);
+ dec4.synchronize();
+ dec4.setForcedRenormalization(false);
+ double *res=parafield->getField()->getArray()->getPointer();
+ const double *toSet=targetResults[rank-nproc_source];
+ res[0]=toSet[0];
+ res[1]=toSet[1];
+ dec4.sendData();
+ }
+ //test 5 - Integral reversed
+ ParaMEDMEM::InterpKernelDEC dec5(*source_group,*target_group);
+ parafield->getField()->setNature(Integral);
+ if (source_group->containsMyRank())
+ {
+ dec5.setMethod("P0");
+ dec5.attachLocalField(parafield);
+ dec5.synchronize();
+ dec5.setForcedRenormalization(false);
+ dec5.recvData();
+ const double *res=parafield->getField()->getArray()->getConstPointer();
+ CPPUNIT_ASSERT_EQUAL(1,parafield->getField()->getNumberOfTuples());
+ const double expected[]={0.794600591715977,1.35631163708087};
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[rank],res[0],1e-13);
+ }
+ else
+ {
+ dec5.setMethod("P0");
+ dec5.attachLocalField(parafield);
+ dec5.synchronize();
+ dec5.setForcedRenormalization(false);
+ double *res=parafield->getField()->getArray()->getPointer();
+ const double *toSet=targetResults2[rank-nproc_source];
+ res[0]=toSet[0];
+ res[1]=toSet[1];
+ dec5.sendData();
+ }
+ //test 6 - Integral with global constraint reversed
+ ParaMEDMEM::InterpKernelDEC dec6(*source_group,*target_group);
+ parafield->getField()->setNature(IntegralGlobConstraint);
+ if (source_group->containsMyRank())
+ {
+ dec6.setMethod("P0");
+ dec6.attachLocalField(parafield);
+ dec6.synchronize();
+ dec6.setForcedRenormalization(false);
+ dec6.recvData();
+ const double *res=parafield->getField()->getArray()->getConstPointer();
+ CPPUNIT_ASSERT_EQUAL(1,parafield->getField()->getNumberOfTuples());
+ const double expected[]={36.4592592592593,44.5407407407407};
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expected[rank],res[0],1e-13);
+ }
+ else
+ {
+ dec6.setMethod("P0");
+ dec6.attachLocalField(parafield);
+ dec6.synchronize();
+ dec6.setForcedRenormalization(false);
+ double *res=parafield->getField()->getArray()->getPointer();
+ const double *toSet=targetResults3[rank-nproc_source];
+ res[0]=toSet[0];
+ res[1]=toSet[1];
+ dec6.sendData();
+ }
+ //
+ delete parafield;
+ mesh->decrRef();
+ delete paramesh;
+ delete self_group;
+ delete target_group;
+ delete source_group;
+ //
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+void ParaMEDMEMTest::testInterpKernelDECNonOverlapp_2D_P0P1P1P0()
+{
+ int size;
+ int rank;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ //
+ if(size!=5)
+ return ;
+ int nproc_source = 2;
+ set<int> self_procs;
+ set<int> procs_source;
+ set<int> procs_target;
+
+ for (int i=0; i<nproc_source; i++)
+ procs_source.insert(i);
+ for (int i=nproc_source; i<size; i++)
+ procs_target.insert(i);
+ self_procs.insert(rank);
+ //
+ ParaMEDMEM::MEDCouplingUMesh *mesh=0;
+ ParaMEDMEM::ParaMESH *paramesh=0;
+ ParaMEDMEM::ParaFIELD *parafieldP0=0,*parafieldP1=0;
+ //
+ ParaMEDMEM::CommInterface interface;
+ //
+ ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+ ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
+ ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
+ //
+ MPI_Barrier(MPI_COMM_WORLD);
+ if(source_group->containsMyRank())
+ {
+ if(rank==0)
+ {
+ double coords[6]={-0.3,-0.3, 0.7,0.7, 0.7,-0.3};
+ int conn[3]={0,1,2};
+ //int globalNode[3]={1,2,0};
+ mesh=MEDCouplingUMesh::New("Source mesh Proc0",2);
+ mesh->allocateCells(1);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn);
+ mesh->finishInsertingCells();
+ DataArrayDouble *myCoords=DataArrayDouble::New();
+ myCoords->alloc(3,2);
+ std::copy(coords,coords+6,myCoords->getPointer());
+ mesh->setCoords(myCoords);
+ myCoords->decrRef();
+ }
+ if(rank==1)
+ {
+ double coords[6]={-0.3,-0.3, -0.3,0.7, 0.7,0.7};
+ int conn[3]={0,1,2};
+ //int globalNode[3]={1,3,2};
+ mesh=MEDCouplingUMesh::New("Source mesh Proc1",2);
+ mesh->allocateCells(1);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn);
+ mesh->finishInsertingCells();
+ DataArrayDouble *myCoords=DataArrayDouble::New();
+ myCoords->alloc(3,2);
+ std::copy(coords,coords+6,myCoords->getPointer());
+ mesh->setCoords(myCoords);
+ myCoords->decrRef();
+ }
+ paramesh=new ParaMESH(mesh,*source_group,"source mesh");
+ ParaMEDMEM::ComponentTopology comptopo;
+ parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+ parafieldP1 = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
+ double *valueP0=parafieldP0->getField()->getArray()->getPointer();
+ double *valueP1=parafieldP1->getField()->getArray()->getPointer();
+ parafieldP0->getField()->setNature(ConservativeVolumic);
+ parafieldP1->getField()->setNature(ConservativeVolumic);
+ if(rank==0)
+ {
+ valueP0[0]=31.;
+ valueP1[0]=34.; valueP1[1]=77.; valueP1[2]=53.;
+ }
+ if(rank==1)
+ {
+ valueP0[0]=47.;
+ valueP1[0]=34.; valueP1[1]=57.; valueP1[2]=77.;
+ }
+ }
+ else
+ {
+ const char targetMeshName[]="target mesh";
+ if(rank==2)
+ {
+ double coords[10]={-0.3,-0.3, 0.2,-0.3, 0.7,-0.3, -0.3,0.2, 0.2,0.2 };
+ int conn[7]={0,3,4,1, 1,4,2};
+ //int globalNode[5]={4,3,0,2,1};
+ mesh=MEDCouplingUMesh::New("Target mesh Proc2",2);
+ mesh->allocateCells(2);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn+4);
+ mesh->finishInsertingCells();
+ DataArrayDouble *myCoords=DataArrayDouble::New();
+ myCoords->alloc(5,2);
+ std::copy(coords,coords+10,myCoords->getPointer());
+ mesh->setCoords(myCoords);
+ myCoords->decrRef();
+ paramesh=new ParaMESH(mesh,*target_group,targetMeshName);
+ DataArrayInt *da=DataArrayInt::New();
+ const int globalNumberingP2[5]={0,1,2,3,4};
+ da->useArray(globalNumberingP2,false,CPP_DEALLOC,5,1);
+ paramesh->setNodeGlobal(da);
+ da->decrRef();
+ }
+ if(rank==3)
+ {
+ double coords[6]={0.2,0.2, 0.7,-0.3, 0.7,0.2};
+ int conn[3]={0,2,1};
+ //int globalNode[3]={1,0,5};
+ mesh=MEDCouplingUMesh::New("Target mesh Proc3",2);
+ mesh->allocateCells(1);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,conn);
+ mesh->finishInsertingCells();
+ DataArrayDouble *myCoords=DataArrayDouble::New();
+ myCoords->alloc(3,2);
+ std::copy(coords,coords+6,myCoords->getPointer());
+ mesh->setCoords(myCoords);
+ myCoords->decrRef();
+ paramesh=new ParaMESH(mesh,*target_group,targetMeshName);
+ DataArrayInt *da=DataArrayInt::New();
+ const int globalNumberingP3[3]={4,2,5};
+ da->useArray(globalNumberingP3,false,CPP_DEALLOC,3,1);
+ paramesh->setNodeGlobal(da);
+ da->decrRef();
+ }
+ if(rank==4)
+ {
+ double coords[12]={-0.3,0.2, -0.3,0.7, 0.2,0.7, 0.2,0.2, 0.7,0.7, 0.7,0.2};
+ int conn[8]={0,1,2,3, 3,2,4,5};
+ //int globalNode[6]={2,6,7,1,8,5};
+ mesh=MEDCouplingUMesh::New("Target mesh Proc4",2);
+ mesh->allocateCells(2);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,conn+4);
+ mesh->finishInsertingCells();
+ DataArrayDouble *myCoords=DataArrayDouble::New();
+ myCoords->alloc(6,2);
+ std::copy(coords,coords+12,myCoords->getPointer());
+ mesh->setCoords(myCoords);
+ myCoords->decrRef();
+ paramesh=new ParaMESH(mesh,*target_group,targetMeshName);
+ DataArrayInt *da=DataArrayInt::New();
+ const int globalNumberingP4[6]={3,6,7,4,8,5};
+ da->useArray(globalNumberingP4,false,CPP_DEALLOC,6,1);
+ paramesh->setNodeGlobal(da);
+ da->decrRef();
+ }
+ ParaMEDMEM::ComponentTopology comptopo;
+ parafieldP0 = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+ parafieldP1 = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
+ parafieldP0->getField()->setNature(ConservativeVolumic);
+ parafieldP1->getField()->setNature(ConservativeVolumic);
+ }
+ // test 1 - P0 P1
+ ParaMEDMEM::InterpKernelDEC dec(*source_group,*target_group);
+ if (source_group->containsMyRank())
+ {
+ dec.setMethod("P0");
+ dec.attachLocalField(parafieldP0);
+ dec.synchronize();
+ dec.setForcedRenormalization(false);
+ dec.sendData();
+ dec.recvData();
+ const double *valueP0=parafieldP0->getField()->getArray()->getPointer();
+ if(rank==0)
+ {
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(34.42857143,valueP0[0],1e-7);
+ }
+ if(rank==1)
+ {
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(44.,valueP0[0],1e-7);
+ }
+ }
+ else
+ {
+ dec.setMethod("P1");
+ dec.attachLocalField(parafieldP1);
+ dec.synchronize();
+ dec.setForcedRenormalization(false);
+ dec.recvData();
+ const double *res=parafieldP1->getField()->getArray()->getConstPointer();
+ if(rank==2)
+ {
+ const double expectP2[5]={39.0, 31.0, 31.0, 47.0, 39.0};
+ CPPUNIT_ASSERT_EQUAL(5,parafieldP1->getField()->getNumberOfTuples());
+ CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents());
+ for(int kk=0;kk<5;kk++)
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP2[kk],res[kk],1e-12);
+ }
+ if(rank==3)
+ {
+ const double expectP3[3]={39.0, 31.0, 31.0};
+ CPPUNIT_ASSERT_EQUAL(3,parafieldP1->getField()->getNumberOfTuples());
+ CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents());
+ for(int kk=0;kk<3;kk++)
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP3[kk],res[kk],1e-12);
+ }
+ if(rank==4)
+ {
+ const double expectP4[6]={47.0, 47.0, 47.0, 39.0, 39.0, 31.0};
+ CPPUNIT_ASSERT_EQUAL(6,parafieldP1->getField()->getNumberOfTuples());
+ CPPUNIT_ASSERT_EQUAL(1,parafieldP1->getField()->getNumberOfComponents());
+ for(int kk=0;kk<6;kk++)
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(expectP4[kk],res[kk],1e-12);
+ }
+ dec.sendData();
+ }
+ //
+ delete parafieldP0;
+ delete parafieldP1;
+ mesh->decrRef();
+ delete paramesh;
+ delete self_group;
+ delete target_group;
+ delete source_group;
+ //
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*!
+ * Tests an asynchronous exchange between two codes
+ * one sends data with dtA as an interval, the max time being tmaxA
+ * the other one receives with dtB as an interval, the max time being tmaxB
+ */
+void ParaMEDMEMTest::testAsynchronousInterpKernelDEC_2D(double dtA, double tmaxA,
+ double dtB, double tmaxB, bool WithPointToPoint, bool Asynchronous,
+ bool WithInterp, const char *srcMeth, const char *targetMeth)
+{
+ std::string srcM(srcMeth);
+ std::string targetM(targetMeth);
+ int size;
+ int rank;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+
+ //the test is meant to run on five processors
+ if (size !=5) return ;
+
+ int nproc_source = 3;
+ set<int> self_procs;
+ set<int> procs_source;
+ set<int> procs_target;
+
+ for (int i=0; i<nproc_source; i++)
+ procs_source.insert(i);
+ for (int i=nproc_source; i<size; i++)
+ procs_target.insert(i);
+ self_procs.insert(rank);
+
+ ParaMEDMEM::CommInterface interface;
+
+ ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+ ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
+ ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
+
+ //loading the geometry for the source group
+
+ ParaMEDMEM::InterpKernelDEC dec (*source_group,*target_group);
+
+ ParaMEDMEM::MEDCouplingUMesh* mesh;
+ ParaMEDMEM::ParaMESH* paramesh;
+ ParaMEDMEM::ParaFIELD* parafield;
+
+ ICoCo::Field* icocofield ;
+
+ string tmp_dir = getenv("TMP");
+ if (tmp_dir == "")
+ tmp_dir = "/tmp";
+ string filename_xml1 = getResourceFile("square1_split");
+ string filename_xml2 = getResourceFile("square2_split");
+ //string filename_seq_wr = makeTmpFile("");
+ //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med");
+
+ // To remove tmp files from disk
+ ParaMEDMEMTest_TmpFilesRemover aRemover;
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (source_group->containsMyRank())
+ {
+ string master = filename_xml1;
+
+ ostringstream strstream;
+ strstream <<master<<rank+1<<".med";
+ ostringstream meshname ;
+ meshname<< "Mesh_2_"<< rank+1;
+
+ mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
+
+ paramesh=new ParaMESH (mesh,*source_group,"source mesh");
+
+ // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT( support,*source_group);
+ ParaMEDMEM::ComponentTopology comptopo;
+ if(srcM=="P0")
+ {
+ parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+ parafield->getField()->setNature(ConservativeVolumic);//InvertIntegral);//ConservativeVolumic);
+ }
+ else
+ parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
+
+ int nb_local;
+ if(srcM=="P0")
+ nb_local=mesh->getNumberOfCells();
+ else
+ nb_local=mesh->getNumberOfNodes();
+ // double * value= new double[nb_local];
+ double *value=parafield->getField()->getArray()->getPointer();
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=0.0;
+
+ // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
+ icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
+
+ dec.attachLocalField(icocofield);
+
+
+ }
+
+ //loading the geometry for the target group
+ if (target_group->containsMyRank())
+ {
+ string master= filename_xml2;
+ ostringstream strstream;
+ strstream << master<<(rank-nproc_source+1)<<".med";
+ ostringstream meshname ;
+ meshname<< "Mesh_3_"<<rank-nproc_source+1;
+
+ mesh = MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
+
+ paramesh=new ParaMESH (mesh,*target_group,"target mesh");
+ // ParaMEDMEM::ParaSUPPORT* parasupport=new UnstructuredParaSUPPORT(support,*target_group);
+ ParaMEDMEM::ComponentTopology comptopo;
+ if(targetM=="P0")
+ {
+ parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+ parafield->getField()->setNature(ConservativeVolumic);//InvertIntegral);//ConservativeVolumic);
+ }
+ else
+ parafield = new ParaFIELD(ON_NODES,NO_TIME,paramesh, comptopo);
+
+ int nb_local;
+ if(targetM=="P0")
+ nb_local=mesh->getNumberOfCells();
+ else
+ nb_local=mesh->getNumberOfNodes();
+
+ double *value=parafield->getField()->getArray()->getPointer();
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=0.0;
+ // ICoCo::Field* icocofield=new ICoCo::MEDField(paramesh,parafield);
+ icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
+
+ dec.attachLocalField(icocofield);
+ }
+
+
+ //attaching a DEC to the source group
+
+ if (source_group->containsMyRank())
+ {
+ cout<<"DEC usage"<<endl;
+ dec.setAsynchronous(Asynchronous);
+ if ( WithInterp ) {
+ dec.setTimeInterpolationMethod(LinearTimeInterp);
+ }
+ if ( WithPointToPoint ) {
+ dec.setAllToAllMethod(PointToPoint);
+ }
+ else {
+ dec.setAllToAllMethod(Native);
+ }
+ dec.synchronize();
+ dec.setForcedRenormalization(false);
+ for (double time=0; time<tmaxA+1e-10; time+=dtA)
+ {
+ cout << "testAsynchronousInterpKernelDEC_2D" << rank << " time " << time
+ << " dtA " << dtA << " tmaxA " << tmaxA << endl ;
+ if ( time+dtA < tmaxA+1e-7 ) {
+ dec.sendData( time , dtA );
+ }
+ else {
+ dec.sendData( time , 0 );
+ }
+ double* value = parafield->getField()->getArray()->getPointer();
+ int nb_local=parafield->getField()->getMesh()->getNumberOfCells();
+ for (int i=0; i<nb_local;i++)
+ value[i]= time+dtA;
+
+
+ }
+ }
+
+ //attaching a DEC to the target group
+ if (target_group->containsMyRank())
+ {
+ cout<<"DEC usage"<<endl;
+ dec.setAsynchronous(Asynchronous);
+ if ( WithInterp ) {
+ dec.setTimeInterpolationMethod(LinearTimeInterp);
+ }
+ if ( WithPointToPoint ) {
+ dec.setAllToAllMethod(PointToPoint);
+ }
+ else {
+ dec.setAllToAllMethod(Native);
+ }
+ dec.synchronize();
+ dec.setForcedRenormalization(false);
+ vector<double> times;
+ for (double time=0; time<tmaxB+1e-10; time+=dtB)
+ {
+ cout << "testAsynchronousInterpKernelDEC_2D" << rank << " time " << time
+ << " dtB " << dtB << " tmaxB " << tmaxB << endl ;
+ dec.recvData( time );
+ double vi = parafield->getVolumeIntegral(0,true);
+ cout << "testAsynchronousInterpKernelDEC_2D" << rank << " time " << time
+ << " VolumeIntegral " << vi
+ << " time*10000 " << time*10000 << endl ;
+
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(vi,time*10000,0.001);
+ }
+
+ }
+
+ delete source_group;
+ delete target_group;
+ delete self_group;
+ delete parafield ;
+ delete paramesh ;
+ mesh->decrRef() ;
+ delete icocofield ;
+
+ cout << "testAsynchronousInterpKernelDEC_2D" << rank << " MPI_Barrier " << endl ;
+
+ if (Asynchronous) MPI_Barrier(MPI_COMM_WORLD);
+ cout << "end of InterpKernelDEC_2D test"<<endl;
+}
--- /dev/null
+#include "ParaMEDMEMTest.hxx"
+#include <cppunit/TestAssert.h>
+#include "MEDLoader.hxx"
+#include "MEDCouplingUMesh.hxx"
+#include "MEDCouplingFieldDouble.hxx"
+
+#include <algorithm>
+#include <numeric>
+#include <iostream>
+#include <iterator>
+
+using namespace std;
+using namespace INTERP_KERNEL;
+using namespace ParaMEDMEM;
+
+void ParaMEDMEMTest::testMEDLoaderRead1()
+{
+ string fileName=getResourceFile("pointe_import22.med");
+ vector<string> meshNames=MEDLoader::GetMeshNames(fileName.c_str());
+ CPPUNIT_ASSERT_EQUAL(1,(int)meshNames.size());
+ MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),0);
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(16,mesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(3,(int)mesh->getAllTypes().size());
+ for(int i=0;i<12;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(i));
+ for(int i=12;i<14;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,mesh->getTypeOfCell(i));
+ for(int i=14;i<16;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,mesh->getTypeOfCell(i));
+ CPPUNIT_ASSERT_EQUAL(90,mesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(701,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+90,0));
+ CPPUNIT_ASSERT_EQUAL(711,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+17,0));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12);
+ mesh->decrRef();
+ //
+ vector<string> families=MEDLoader::GetMeshFamilyNames(fileName.c_str(),meshNames[0].c_str());
+ CPPUNIT_ASSERT_EQUAL(8,(int)families.size());
+ CPPUNIT_ASSERT(families[2]=="FAMILLE_ELEMENT_3");
+ //
+ vector<string> families2;
+ families2.push_back(families[2]);
+ mesh=MEDLoader::ReadUMeshFromFamilies(fileName.c_str(),meshNames[0].c_str(),0,families2);
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(2,mesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllTypes().size());
+ CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(0));
+ CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,mesh->getTypeOfCell(1));
+ CPPUNIT_ASSERT_EQUAL(11,mesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(132,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+11,0));
+ CPPUNIT_ASSERT_EQUAL(16,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+3,0));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12);
+ mesh->decrRef();
+ //
+ vector<string> groups=MEDLoader::GetMeshGroupsNames(fileName.c_str(),meshNames[0].c_str());
+ CPPUNIT_ASSERT_EQUAL(5,(int)groups.size());
+ CPPUNIT_ASSERT(groups[0]=="groupe1");
+ CPPUNIT_ASSERT(groups[1]=="groupe2");
+ CPPUNIT_ASSERT(groups[2]=="groupe3");
+ CPPUNIT_ASSERT(groups[3]=="groupe4");
+ CPPUNIT_ASSERT(groups[4]=="groupe5");
+ vector<string> groups2;
+ groups2.push_back(groups[0]);
+ mesh=MEDLoader::ReadUMeshFromGroups(fileName.c_str(),meshNames[0].c_str(),0,groups2);
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(7,mesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllTypes().size());
+ for(int i=0;i<6;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(i));
+ CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,mesh->getTypeOfCell(6));
+ CPPUNIT_ASSERT_EQUAL(36,mesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(254,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+36,0));
+ CPPUNIT_ASSERT_EQUAL(141,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+8,0));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12);
+ mesh->decrRef();
+ //
+ std::vector<std::string> fieldsName=MEDLoader::GetCellFieldNamesOnMesh(fileName.c_str(),meshNames[0].c_str());
+ CPPUNIT_ASSERT_EQUAL(2,(int)fieldsName.size());
+ CPPUNIT_ASSERT(fieldsName[0]=="fieldcelldoublescalar");
+ CPPUNIT_ASSERT(fieldsName[1]=="fieldcelldoublevector");
+ std::vector<std::pair<int,int> > its0=MEDLoader::GetCellFieldIterations(fileName.c_str(),fieldsName[0].c_str());
+ CPPUNIT_ASSERT_EQUAL(1,(int)its0.size());
+ CPPUNIT_ASSERT_EQUAL(-1,its0[0].first);
+ CPPUNIT_ASSERT_EQUAL(-1,its0[0].second);
+ std::vector<std::pair<int,int> > its1=MEDLoader::GetCellFieldIterations(fileName.c_str(),fieldsName[1].c_str());
+ CPPUNIT_ASSERT_EQUAL(1,(int)its1.size());
+ CPPUNIT_ASSERT_EQUAL(-1,its1[0].first);
+ CPPUNIT_ASSERT_EQUAL(-1,its1[0].second);
+ //
+ MEDCouplingFieldDouble *field0=MEDLoader::ReadFieldDoubleCell(fileName.c_str(),meshNames[0].c_str(),0,fieldsName[0].c_str(),its0[0].first,its0[0].second);
+ field0->checkCoherency();
+ CPPUNIT_ASSERT(field0->getName()==fieldsName[0]);
+ CPPUNIT_ASSERT_EQUAL(1,field0->getNumberOfComponents());
+ CPPUNIT_ASSERT_EQUAL(16,field0->getNumberOfTuples());
+ const double expectedValues[16]={1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,3.,3.,2.,2.};
+ double diffValue[16];
+ std::transform(field0->getArray()->getPointer(),field0->getArray()->getPointer()+16,expectedValues,diffValue,std::minus<double>());
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue,diffValue+16),1e-12);
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue,diffValue+16),1e-12);
+ const MEDCouplingUMesh *constMesh=dynamic_cast<const MEDCouplingUMesh *>(field0->getMesh());
+ CPPUNIT_ASSERT(constMesh);
+ CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllTypes().size());
+ for(int i=0;i<12;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i));
+ for(int i=12;i<14;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(i));
+ for(int i=14;i<16;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(i));
+ CPPUNIT_ASSERT_EQUAL(90,constMesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getPointer(),constMesh->getNodalConnectivity()->getPointer()+90,0));
+ CPPUNIT_ASSERT_EQUAL(711,std::accumulate(constMesh->getNodalConnectivityIndex()->getPointer(),constMesh->getNodalConnectivityIndex()->getPointer()+17,0));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+57,0),1e-12);
+ field0->decrRef();
+ //
+ MEDCouplingFieldDouble *field1=MEDLoader::ReadFieldDoubleCell(fileName.c_str(),meshNames[0].c_str(),0,fieldsName[1].c_str(),its1[0].first,its1[0].second);
+ field1->checkCoherency();
+ CPPUNIT_ASSERT(field1->getName()==fieldsName[1]);
+ CPPUNIT_ASSERT_EQUAL(3,field1->getNumberOfComponents());
+ CPPUNIT_ASSERT_EQUAL(16,field1->getNumberOfTuples());
+ const double expectedValues2[48]={1.,0.,1.,1.,0.,1.,1.,0.,1.,2.,1.,0.,2.,1.,0.,2.,1.,0.,3.,0.,1.,3.,0.,1.,3.,0.,1.,4.,1.,0.,4.,1.,0.,4.,1.,0.,6.,1.,1.,6.,0.,0.,5.,0.,0.,5.,1.,1.};
+ double diffValue2[48];
+ std::transform(field1->getArray()->getPointer(),field1->getArray()->getPointer()+48,expectedValues2,diffValue2,std::minus<double>());
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue2,diffValue2+48),1e-12);
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue2,diffValue2+48),1e-12);
+ constMesh=dynamic_cast<const MEDCouplingUMesh *>(field1->getMesh());
+ CPPUNIT_ASSERT(constMesh);
+ CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllTypes().size());
+ for(int i=0;i<12;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i));
+ for(int i=12;i<14;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(i));
+ for(int i=14;i<16;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(i));
+ CPPUNIT_ASSERT_EQUAL(90,constMesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getPointer(),constMesh->getNodalConnectivity()->getPointer()+90,0));
+ CPPUNIT_ASSERT_EQUAL(711,std::accumulate(constMesh->getNodalConnectivityIndex()->getPointer(),constMesh->getNodalConnectivityIndex()->getPointer()+17,0));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+57,0),1e-12);
+ field1->decrRef();
+ //fields on nodes
+ std::vector<std::string> fieldsNameNode=MEDLoader::GetNodeFieldNamesOnMesh(fileName.c_str(),meshNames[0].c_str());
+ CPPUNIT_ASSERT_EQUAL(2,(int)fieldsNameNode.size());
+ CPPUNIT_ASSERT(fieldsNameNode[0]=="fieldnodedouble");
+ CPPUNIT_ASSERT(fieldsNameNode[1]=="fieldnodeint");
+ std::vector<std::pair<int,int> > its0Node=MEDLoader::GetNodeFieldIterations(fileName.c_str(),fieldsNameNode[0].c_str());
+ CPPUNIT_ASSERT_EQUAL(3,(int)its0Node.size());
+ CPPUNIT_ASSERT_EQUAL(1,its0Node[0].first);
+ CPPUNIT_ASSERT_EQUAL(-1,its0Node[0].second);
+ CPPUNIT_ASSERT_EQUAL(2,its0Node[1].first);
+ CPPUNIT_ASSERT_EQUAL(-1,its0Node[1].second);
+ CPPUNIT_ASSERT_EQUAL(-1,its0Node[2].first);//strange but like that
+ CPPUNIT_ASSERT_EQUAL(-1,its0Node[2].second);
+ MEDCouplingFieldDouble *field0Nodes=MEDLoader::ReadFieldDoubleNode(fileName.c_str(),meshNames[0].c_str(),0,fieldsNameNode[0].c_str(),its0Node[0].first,its0Node[0].second);
+ field0Nodes->checkCoherency();
+ CPPUNIT_ASSERT(field0Nodes->getName()==fieldsNameNode[0]);
+ CPPUNIT_ASSERT_EQUAL(1,field0Nodes->getNumberOfComponents());
+ CPPUNIT_ASSERT_EQUAL(19,field0Nodes->getNumberOfTuples());
+ const double expectedValues3[19]={1.,1.,1.,2.,2.,2.,3.,3.,3.,4.,4.,4.,5.,5.,5.,6.,6.,6.,7.};
+ double diffValue3[19];
+ std::transform(field0Nodes->getArray()->getPointer(),field0Nodes->getArray()->getPointer()+19,expectedValues3,diffValue3,std::minus<double>());
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue3,diffValue3+19),1e-12);
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue3,diffValue3+19),1e-12);
+ constMesh=dynamic_cast<const MEDCouplingUMesh *>(field0Nodes->getMesh());
+ CPPUNIT_ASSERT(constMesh);
+ field0Nodes->decrRef();
+ //
+ field0Nodes=MEDLoader::ReadFieldDoubleNode(fileName.c_str(),meshNames[0].c_str(),0,fieldsNameNode[0].c_str(),its0Node[1].first,its0Node[1].second);
+ field0Nodes->checkCoherency();
+ CPPUNIT_ASSERT(field0Nodes->getName()==fieldsNameNode[0]);
+ CPPUNIT_ASSERT_EQUAL(1,field0Nodes->getNumberOfComponents());
+ CPPUNIT_ASSERT_EQUAL(19,field0Nodes->getNumberOfTuples());
+ const double expectedValues4[19]={1.,2.,2.,2.,3.,3.,3.,4.,4.,4.,5.,5.,5.,6.,6.,6.,7.,7.,7.};
+ std::transform(field0Nodes->getArray()->getPointer(),field0Nodes->getArray()->getPointer()+19,expectedValues4,diffValue3,std::minus<double>());
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue3,diffValue3+19),1e-12);
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue3,diffValue3+19),1e-12);
+ constMesh=dynamic_cast<const MEDCouplingUMesh *>(field0Nodes->getMesh());
+ CPPUNIT_ASSERT(constMesh);
+ CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllTypes().size());
+ for(int i=0;i<12;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i));
+ for(int i=12;i<14;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(i));
+ for(int i=14;i<16;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(i));
+ CPPUNIT_ASSERT_EQUAL(90,constMesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getPointer(),constMesh->getNodalConnectivity()->getPointer()+90,0));
+ CPPUNIT_ASSERT_EQUAL(711,std::accumulate(constMesh->getNodalConnectivityIndex()->getPointer(),constMesh->getNodalConnectivityIndex()->getPointer()+17,0));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+57,0),1e-12);
+ field0Nodes->decrRef();
+ //
+ field0Nodes=MEDLoader::ReadFieldDoubleNode(fileName.c_str(),meshNames[0].c_str(),0,fieldsNameNode[0].c_str(),its0Node[2].first,its0Node[2].second);
+ field0Nodes->checkCoherency();
+ CPPUNIT_ASSERT(field0Nodes->getName()==fieldsNameNode[0]);
+ CPPUNIT_ASSERT_EQUAL(1,field0Nodes->getNumberOfComponents());
+ CPPUNIT_ASSERT_EQUAL(19,field0Nodes->getNumberOfTuples());
+ const double expectedValues5[19]={1.,1.,1.,2.,2.,2.,3.,3.,3.,4.,4.,4.,5.,5.,5.,6.,6.,6.,7.};
+ std::transform(field0Nodes->getArray()->getPointer(),field0Nodes->getArray()->getPointer()+19,expectedValues5,diffValue3,std::minus<double>());
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue3,diffValue3+19),1e-12);
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue3,diffValue3+19),1e-12);
+ constMesh=dynamic_cast<const MEDCouplingUMesh *>(field0Nodes->getMesh());
+ CPPUNIT_ASSERT(constMesh);
+ CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(3,constMesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(16,constMesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(19,constMesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(3,(int)constMesh->getAllTypes().size());
+ for(int i=0;i<12;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,constMesh->getTypeOfCell(i));
+ for(int i=12;i<14;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_HEXA8,constMesh->getTypeOfCell(i));
+ for(int i=14;i<16;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_PYRA5,constMesh->getTypeOfCell(i));
+ CPPUNIT_ASSERT_EQUAL(90,constMesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(701,std::accumulate(constMesh->getNodalConnectivity()->getPointer(),constMesh->getNodalConnectivity()->getPointer()+90,0));
+ CPPUNIT_ASSERT_EQUAL(711,std::accumulate(constMesh->getNodalConnectivityIndex()->getPointer(),constMesh->getNodalConnectivityIndex()->getPointer()+17,0));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(46.,std::accumulate(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+57,0),1e-12);
+ field0Nodes->decrRef();
+}
+
+void ParaMEDMEMTest::testMEDLoaderPolygonRead()
+{
+ string fileName=getResourceFile("polygones.med");
+ vector<string> meshNames=MEDLoader::GetMeshNames(fileName.c_str());
+ CPPUNIT_ASSERT_EQUAL(1,(int)meshNames.size());
+ CPPUNIT_ASSERT(meshNames[0]=="Bord");
+ MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),0);
+ mesh->checkCoherency();
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(538,mesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(579,mesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllTypes().size());
+ for(int i=0;i<514;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(i));
+ for(int i=514;i<538;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,mesh->getTypeOfCell(i));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+1737,0),1e-12);
+ const double expectedVals1[12]={1.4851585216522212,-0.5,0.,1.4851585216522212,-0.4,0.,1.4851585216522212,-0.3,0., 1.5741585216522211, -0.5, 0. };
+ double diffValue1[12];
+ std::transform(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+12,expectedVals1,diffValue1,std::minus<double>());
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue1,diffValue1+12),1e-12);
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue1,diffValue1+12),1e-12);
+ CPPUNIT_ASSERT_EQUAL(2768,mesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(651050,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+2768,0));
+ CPPUNIT_ASSERT_EQUAL(725943,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+539,0));
+ mesh->decrRef();
+ //
+ std::vector<std::string> fieldsName=MEDLoader::GetCellFieldNamesOnMesh(fileName.c_str(),meshNames[0].c_str());
+ CPPUNIT_ASSERT_EQUAL(3,(int)fieldsName.size());
+ CPPUNIT_ASSERT(fieldsName[0]=="bord_:_distorsion");
+ CPPUNIT_ASSERT(fieldsName[1]=="bord_:_familles");
+ CPPUNIT_ASSERT(fieldsName[2]=="bord_:_non-ortho");
+ std::vector<std::pair<int,int> > its0=MEDLoader::GetCellFieldIterations(fileName.c_str(),fieldsName[0].c_str());
+ CPPUNIT_ASSERT_EQUAL(1,(int)its0.size());
+ MEDCouplingFieldDouble *field=MEDLoader::ReadFieldDoubleCell(fileName.c_str(),meshNames[0].c_str(),0,fieldsName[0].c_str(),its0[0].first,its0[0].second);
+ field->checkCoherency();
+ CPPUNIT_ASSERT(field->getName()==fieldsName[0]);
+ CPPUNIT_ASSERT_EQUAL(1,field->getNumberOfComponents());
+ CPPUNIT_ASSERT_EQUAL(538,field->getNumberOfTuples());
+ const MEDCouplingUMesh *constMesh=dynamic_cast<const MEDCouplingUMesh *>(field->getMesh());
+ CPPUNIT_ASSERT(constMesh);
+ CPPUNIT_ASSERT_EQUAL(3,constMesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(2,constMesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(538,constMesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(579,constMesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(2,(int)constMesh->getAllTypes().size());
+ for(int i=0;i<514;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,constMesh->getTypeOfCell(i));
+ for(int i=514;i<538;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,constMesh->getTypeOfCell(i));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,std::accumulate(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+1737,0),1e-12);
+ std::transform(constMesh->getCoords()->getPointer(),constMesh->getCoords()->getPointer()+12,expectedVals1,diffValue1,std::minus<double>());
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::max_element(diffValue1,diffValue1+12),1e-12);
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(0.,*std::min_element(diffValue1,diffValue1+12),1e-12);
+ CPPUNIT_ASSERT_EQUAL(2768,constMesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(651050,std::accumulate(constMesh->getNodalConnectivity()->getPointer(),constMesh->getNodalConnectivity()->getPointer()+2768,0));
+ CPPUNIT_ASSERT_EQUAL(725943,std::accumulate(constMesh->getNodalConnectivityIndex()->getPointer(),constMesh->getNodalConnectivityIndex()->getPointer()+539,0));
+ const double *values=field->getArray()->getPointer();
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(2.87214203182918,std::accumulate(values,values+538,0.),1e-12);
+ field->decrRef();
+}
+
+void ParaMEDMEMTest::testMEDLoaderPolyhedronRead()
+{
+ string fileName=getResourceFile("poly3D.med");
+ vector<string> meshNames=MEDLoader::GetMeshNames(fileName.c_str());
+ CPPUNIT_ASSERT_EQUAL(1,(int)meshNames.size());
+ CPPUNIT_ASSERT(meshNames[0]=="poly3D");
+ MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),0);
+ mesh->checkCoherency();
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllTypes().size());
+ CPPUNIT_ASSERT_EQUAL(NORM_TETRA4,mesh->getTypeOfCell(0));
+ CPPUNIT_ASSERT_EQUAL(NORM_POLYHED,mesh->getTypeOfCell(1));
+ CPPUNIT_ASSERT_EQUAL(NORM_POLYHED,mesh->getTypeOfCell(2));
+ CPPUNIT_ASSERT_EQUAL(98,mesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(725,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+98,0));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(110.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12);
+ CPPUNIT_ASSERT_EQUAL(155,std::accumulate(mesh->getNodalConnectivityIndex()->getPointer(),mesh->getNodalConnectivityIndex()->getPointer()+4,0));
+ mesh->decrRef();
+ //
+ mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),-1);
+ mesh->checkCoherency();
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(17,mesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(3,(int)mesh->getAllTypes().size());
+ for(int i=0;i<6;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_TRI3,mesh->getTypeOfCell(i));
+ for(int i=6;i<14;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(i));
+ for(int i=14;i<17;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,mesh->getTypeOfCell(i));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(110.,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+57,0),1e-12);
+ CPPUNIT_ASSERT_EQUAL(83,mesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(619,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+83,0));
+ mesh->decrRef();
+ //
+ vector<string> families=MEDLoader::GetMeshFamilyNames(fileName.c_str(),meshNames[0].c_str());
+ CPPUNIT_ASSERT_EQUAL(4,(int)families.size());
+ CPPUNIT_ASSERT(families[0]=="FAMILLE_FACE_POLYGONS3");
+ CPPUNIT_ASSERT(families[1]=="FAMILLE_FACE_QUAD41");
+ CPPUNIT_ASSERT(families[2]=="FAMILLE_FACE_TRIA32");
+ CPPUNIT_ASSERT(families[3]=="FAMILLE_ZERO");
+ vector<string> families2;
+ families2.push_back(families[0]);
+ mesh=MEDLoader::ReadUMeshFromFamilies(fileName.c_str(),meshNames[0].c_str(),-1,families2);
+ mesh->checkCoherency();
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(1,(int)mesh->getAllTypes().size());
+ for(int i=0;i<3;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_POLYGON,mesh->getTypeOfCell(i));
+ CPPUNIT_ASSERT_EQUAL(19,mesh->getNodalConnectivity()->getNbOfElems());
+ CPPUNIT_ASSERT_EQUAL(117,std::accumulate(mesh->getNodalConnectivity()->getPointer(),mesh->getNodalConnectivity()->getPointer()+19,0));
+ mesh->decrRef();
+ //
+ mesh=MEDLoader::ReadUMeshFromFamilies(fileName.c_str(),meshNames[0].c_str(),0,families2);
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(0,mesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(19,mesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(3,mesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(0,(int)mesh->getAllTypes().size());
+ mesh->decrRef();
+}
+
+void ParaMEDMEMTest::testMEDLoaderWrite1()
+{
+ const char meshName[]="MEDLoaderWrite1";
+ string outFileName=makeTmpFile("toto22137.med");
+ double targetCoords[18]={-0.3,-0.3, 0.2,-0.3, 0.7,-0.3, -0.3,0.2, 0.2,0.2, 0.7,0.2, -0.3,0.7, 0.2,0.7, 0.7,0.7 };
+ int targetConn[18]={0,3,4,1, 1,4,2, 4,5,2, 6,7,4,3, 7,8,5,4};
+ MEDCouplingUMesh *mesh=MEDCouplingUMesh::New();
+ mesh->setMeshDimension(2);
+ mesh->allocateCells(5);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn+4);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_TRI3,3,targetConn+7);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn+10);
+ mesh->insertNextCell(INTERP_KERNEL::NORM_QUAD4,4,targetConn+14);
+ mesh->finishInsertingCells();
+ DataArrayDouble *myCoords=DataArrayDouble::New();
+ myCoords->alloc(9,2);
+ std::copy(targetCoords,targetCoords+18,myCoords->getPointer());
+ mesh->setCoords(myCoords);
+ myCoords->decrRef();
+ mesh->checkCoherency();
+ CPPUNIT_ASSERT_EQUAL(2,mesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(5,mesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(9,mesh->getNumberOfNodes());
+ bool normalThrow=false;
+ try
+ {
+ MEDLoader::writeUMesh(outFileName.c_str(),mesh);
+ }
+ catch(INTERP_KERNEL::Exception& e)
+ {
+ normalThrow=true;
+ }
+ CPPUNIT_ASSERT(normalThrow);
+ mesh->setName(meshName);
+ MEDLoader::writeUMesh(outFileName.c_str(),mesh);
+ mesh->decrRef();
+ //
+ mesh=MEDLoader::ReadUMeshFromFile(outFileName.c_str(),meshName,0);
+ CPPUNIT_ASSERT_EQUAL(2,mesh->getSpaceDimension());
+ CPPUNIT_ASSERT_EQUAL(2,mesh->getMeshDimension());
+ CPPUNIT_ASSERT_EQUAL(5,mesh->getNumberOfCells());
+ CPPUNIT_ASSERT_EQUAL(9,mesh->getNumberOfNodes());
+ CPPUNIT_ASSERT_EQUAL(2,(int)mesh->getAllTypes().size());
+ for(int i=0;i<2;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_TRI3,mesh->getTypeOfCell(i));
+ for(int i=2;i<5;i++)
+ CPPUNIT_ASSERT_EQUAL(NORM_QUAD4,mesh->getTypeOfCell(i));
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(3.6,std::accumulate(mesh->getCoords()->getPointer(),mesh->getCoords()->getPointer()+18,0.),1.e-12);
+ mesh->decrRef();
+}
+
+void ParaMEDMEMTest::testMEDLoaderPolygonWrite()
+{
+ string fileName=getResourceFile("polygones.med");
+ vector<string> meshNames=MEDLoader::GetMeshNames(fileName.c_str());
+ CPPUNIT_ASSERT_EQUAL(1,(int)meshNames.size());
+ CPPUNIT_ASSERT(meshNames[0]=="Bord");
+ MEDCouplingUMesh *mesh=MEDLoader::ReadUMeshFromFile(fileName.c_str(),meshNames[0].c_str(),0);
+ mesh->checkCoherency();
+ string outFileName=makeTmpFile("toto22138.med");
+ MEDLoader::writeUMesh(outFileName.c_str(),mesh);
+ //
+ MEDCouplingUMesh *mesh2=MEDLoader::ReadUMeshFromFile(outFileName.c_str(),meshNames[0].c_str(),0);
+ //
+ mesh2->decrRef();
+ mesh->decrRef();
+}
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include "ParaMEDMEMTest.hxx"
+#include <cppunit/TestAssert.h>
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "InterpolationUtils.hxx"
+
+#include <string>
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+/*
+ * Check methods defined in MPPIProcessorGroup.hxx
+ *
+ (+) MPIProcessorGroup(const CommInterface& interface);
+ (+) MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids);
+ (u) MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids);
+ (+) MPIProcessorGroup(const CommInterface& interface,int pstart, int pend);
+ (+) virtual ~MPIProcessorGroup();
+ (+) virtual ProcessorGroup* fuse (const ProcessorGroup&) const;
+ (u) void intersect (ProcessorGroup&){};
+ (+) int myRank() const {int rank; MPI_Comm_rank(_comm,&rank); return rank;}
+ (+) bool containsMyRank() const { int rank; MPI_Group_rank(_group, &rank); return (rank!=MPI_UNDEFINED);}
+ (+) int translateRank(const ProcessorGroup* group, int rank) const;
+ (+) const MPI_Comm* getComm() const {return &_comm;}
+ (+) ProcessorGroup* createComplementProcGroup() const;
+ (o) ProcessorGroup* createProcGroup() const;
+
+*/
+
+void ParaMEDMEMTest::testMPIProcessorGroup_constructor()
+{
+ CommInterface comm_interface;
+ MPIProcessorGroup* group= new MPIProcessorGroup(comm_interface);
+ int size;
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ CPPUNIT_ASSERT_EQUAL(size,group->size());
+ int size2;
+ const MPI_Comm* communicator=group->getComm();
+ MPI_Comm_size(*communicator, &size2);
+ CPPUNIT_ASSERT_EQUAL(size,size2);
+ delete group;
+
+ set <int> procs;
+
+ procs.insert(0);
+ procs.insert(1);
+ if (size==1)
+ CPPUNIT_ASSERT_THROW(group=new MPIProcessorGroup(comm_interface,procs),INTERP_KERNEL::Exception);
+ else
+ {
+ CPPUNIT_ASSERT_NO_THROW( group=new MPIProcessorGroup(comm_interface,procs));
+ CPPUNIT_ASSERT_EQUAL (group->size(),2);
+ delete group;
+ }
+
+
+ //throws because plast<pfirst
+ CPPUNIT_ASSERT_THROW(group=new MPIProcessorGroup(comm_interface,1,0),INTERP_KERNEL::Exception);
+ //throws because plast is beyond size-1
+ CPPUNIT_ASSERT_THROW(group=new MPIProcessorGroup(comm_interface,0,size),INTERP_KERNEL::Exception);
+ if (size>1)
+ {
+ group=new MPIProcessorGroup(comm_interface,0,size-2);
+ CPPUNIT_ASSERT_EQUAL(group->size(),size-1);
+ delete group;
+ }
+
+}
+
+void ParaMEDMEMTest::testMPIProcessorGroup_boolean()
+{
+ int size;
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ CommInterface comm_interface;
+ MPIProcessorGroup group(comm_interface,0,0);
+ MPIProcessorGroup group2(comm_interface,size-1,size-1);
+ ProcessorGroup* group_fuse=group.fuse(group2);
+ int group_fuse_size=(size==1)?1:2;
+ CPPUNIT_ASSERT_EQUAL(group_fuse_size,group_fuse->size());
+
+ ProcessorGroup* group_complement=((MPIProcessorGroup*)group_fuse)->createComplementProcGroup();
+ CPPUNIT_ASSERT_EQUAL(group_complement->size(),size-group_fuse_size);
+
+ delete group_fuse;
+ delete group_complement;
+
+ //intersect not implemented yet
+ // if (size>1)
+ // {
+ // MPIProcessorGroup group3(comm_interface,0,size-2);
+ // MPIProcessorGroup group4(comm_interface,1,size-1);
+ // group3.intersect(group4);
+ // CPPUNIT_ASSERT_EQUAL(group3.size(),size-2);
+ // }
+}
+
+void ParaMEDMEMTest::testMPIProcessorGroup_rank()
+{
+ int size;
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ CommInterface comm_interface;
+ MPIProcessorGroup group(comm_interface,0,0);
+ MPIProcessorGroup group2(comm_interface,size-1,size-1);
+ ProcessorGroup* group_fuse=group2.fuse(group);
+
+ if (group.containsMyRank())
+ CPPUNIT_ASSERT_EQUAL (group.myRank(), rank);
+
+ if (group2.containsMyRank())
+ {
+ int trank=group_fuse->translateRank(&group2,0);
+ if (size==1)
+ CPPUNIT_ASSERT_EQUAL(trank,0);
+ else
+ CPPUNIT_ASSERT_EQUAL(trank,1);
+ }
+ delete group_fuse;
+}
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include "ParaMEDMEMTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include "MEDMEM_Exception.hxx"
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "DEC.hxx"
+#include "NonCoincidentDEC.hxx"
+#include "ParaMESH.hxx"
+#include "ParaFIELD.hxx"
+#include "UnstructuredParaSUPPORT.hxx"
+#include "ICoCoMEDField.hxx"
+
+#include <string>
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+
+using namespace std;
+using namespace ParaMEDMEM;
+using namespace MEDMEM;
+
+/*
+ * Check methods defined in InterpKernelDEC.hxx
+ *
+ InterpKernelDEC();
+ InterpKernelDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group);
+ virtual ~InterpKernelDEC();
+ void synchronize();
+ void recvData();
+ void sendData();
+*/
+
+void ParaMEDMEMTest::testNonCoincidentDEC_2D()
+{
+
+ int size;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+
+ //the test is meant to run on five processors
+ if (size !=5) return ;
+
+ testNonCoincidentDEC( "/share/salome/resources/med/square1_split",
+ "Mesh_2",
+ "/share/salome/resources/med/square2_split",
+ "Mesh_3",
+ 3,
+ 1e-6);
+}
+
+void ParaMEDMEMTest::testNonCoincidentDEC_3D()
+{
+ int size;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+
+ //the test is meant to run on five processors
+ if (size !=4) return ;
+
+ testNonCoincidentDEC( "/share/salome/resources/med/blade_12000_split2",
+ "Mesh_1",
+ "/share/salome/resources/med/blade_3000_split2",
+ "Mesh_1",
+ 2,
+ 1e4);
+}
+
+void ParaMEDMEMTest::testNonCoincidentDEC(const string& filename1,
+ const string& meshname1,
+ const string& filename2,
+ const string& meshname2,
+ int nproc_source,
+ double epsilon)
+{
+ int size;
+ int rank;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+
+ set<int> self_procs;
+ set<int> procs_source;
+ set<int> procs_target;
+
+ for (int i=0; i<nproc_source; i++)
+ procs_source.insert(i);
+ for (int i=nproc_source; i<size; i++)
+ procs_target.insert(i);
+ self_procs.insert(rank);
+
+ ParaMEDMEM::CommInterface interface;
+
+ ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+ ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
+ ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
+
+ ParaMEDMEM::ParaMESH* source_mesh=0;
+ ParaMEDMEM::ParaMESH* target_mesh=0;
+ ParaMEDMEM::ParaSUPPORT* parasupport=0;
+ //loading the geometry for the source group
+
+ ParaMEDMEM::NonCoincidentDEC dec (*source_group,*target_group);
+
+ MEDMEM::MESH* mesh;
+ MEDMEM::SUPPORT* support;
+ MEDMEM::FIELD<double>* field;
+ ParaMEDMEM::ParaMESH* paramesh;
+ ParaMEDMEM::ParaFIELD* parafield;
+
+ string filename_xml1 = getResourceFile(filename1);
+ string filename_xml2 = getResourceFile(filename2);
+ //string filename_seq_wr = makeTmpFile("");
+ //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med");
+
+ // To remove tmp files from disk
+ ParaMEDMEMTest_TmpFilesRemover aRemover;
+ //aRemover.Register(filename_seq_wr);
+ //aRemover.Register(filename_seq_med);
+ MPI_Barrier(MPI_COMM_WORLD);
+ ICoCo::Field* icocofield;
+ if (source_group->containsMyRank())
+ {
+ string master = filename_xml1;
+
+ ostringstream strstream;
+ strstream <<master<<rank+1<<".med";
+ ostringstream meshname ;
+ meshname<< meshname1<<"_"<< rank+1;
+
+ CPPUNIT_ASSERT_NO_THROW(mesh = new MESH(MED_DRIVER,strstream.str(),meshname.str()));
+ support=new MEDMEM::SUPPORT(mesh,"all elements",MED_EN::MED_CELL);
+
+ paramesh=new ParaMESH (*mesh,*source_group,"source mesh");
+
+ parasupport=new UnstructuredParaSUPPORT( support,*source_group);
+ ParaMEDMEM::ComponentTopology comptopo;
+ parafield = new ParaFIELD(parasupport, comptopo);
+
+
+ int nb_local=support->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);
+ double * value= new double[nb_local];
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=1.0;
+ parafield->getField()->setValue(value);
+
+ icocofield=new ICoCo::MEDField(paramesh,parafield);
+
+ dec.attachLocalField(icocofield);
+ delete [] value;
+ }
+
+ //loading the geometry for the target group
+ if (target_group->containsMyRank())
+ {
+ string master= filename_xml2;
+ ostringstream strstream;
+ strstream << master<<(rank-nproc_source+1)<<".med";
+ ostringstream meshname ;
+ meshname<< meshname2<<"_"<<rank-nproc_source+1;
+
+ CPPUNIT_ASSERT_NO_THROW(mesh = new MESH(MED_DRIVER,strstream.str(),meshname.str()));
+ support=new MEDMEM::SUPPORT(mesh,"all elements",MED_EN::MED_CELL);
+
+ paramesh=new ParaMESH (*mesh,*target_group,"target mesh");
+ parasupport=new UnstructuredParaSUPPORT(support,*target_group);
+ ParaMEDMEM::ComponentTopology comptopo;
+ parafield = new ParaFIELD(parasupport, comptopo);
+
+
+ int nb_local=support->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);
+ double * value= new double[nb_local];
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=0.0;
+ parafield->getField()->setValue(value);
+ icocofield=new ICoCo::MEDField(paramesh,parafield);
+
+ dec.attachLocalField(icocofield);
+ delete [] value;
+ }
+
+
+ //attaching a DEC to the source group
+ double field_before_int;
+ double field_after_int;
+
+ if (source_group->containsMyRank())
+ {
+ field_before_int = parafield->getVolumeIntegral(1);
+ MPI_Bcast(&field_before_int, 1,MPI_DOUBLE, 0,MPI_COMM_WORLD);
+ dec.synchronize();
+ cout<<"DEC usage"<<endl;
+ dec.setOption("ForcedRenormalization",false);
+
+ dec.sendData();
+ // paramesh->write(MED_DRIVER,"./sourcesquarenc");
+ //parafield->write(MED_DRIVER,"./sourcesquarenc","boundary");
+
+
+ }
+
+ //attaching a DEC to the target group
+ if (target_group->containsMyRank())
+ {
+ MPI_Bcast(&field_before_int, 1,MPI_DOUBLE, 0,MPI_COMM_WORLD);
+
+ dec.synchronize();
+ dec.setOption("ForcedRenormalization",false);
+ dec.recvData();
+ //paramesh->write(MED_DRIVER, "./targetsquarenc");
+ //parafield->write(MED_DRIVER, "./targetsquarenc", "boundary");
+ field_after_int = parafield->getVolumeIntegral(1);
+
+ }
+ MPI_Bcast(&field_before_int,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
+ MPI_Bcast(&field_after_int, 1,MPI_DOUBLE, size-1,MPI_COMM_WORLD);
+
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, epsilon);
+
+ delete source_group;
+ delete target_group;
+ delete self_group;
+ delete icocofield;
+ delete paramesh;
+ delete parafield;
+ delete support;
+ delete parasupport;
+ delete mesh;
+ MPI_Barrier(MPI_COMM_WORLD);
+
+}
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include "ParaMEDMEMTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "DEC.hxx"
+#include "StructuredCoincidentDEC.hxx"
+#include "ParaMESH.hxx"
+#include "ParaFIELD.hxx"
+#include "ComponentTopology.hxx"
+#include "ICoCoMEDField.hxx"
+#include "MEDLoader.hxx"
+
+#include <string>
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+/*
+ * Check methods defined in StructuredCoincidentDEC.hxx
+ *
+ StructuredCoincidentDEC();
+ StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group);
+ virtual ~StructuredCoincidentDEC();
+ void synchronize();
+ void recvData();
+ void sendData();
+*/
+
+void ParaMEDMEMTest::testStructuredCoincidentDEC() {
+ string testname="ParaMEDMEM - testStructured CoincidentDEC";
+ // MPI_Init(&argc, &argv);
+ int size;
+ int rank;
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ if (size<4) {
+ return;
+ }
+
+ ParaMEDMEM::CommInterface interface;
+
+ ParaMEDMEM::MPIProcessorGroup self_group (interface,rank,rank);
+ ParaMEDMEM::MPIProcessorGroup target_group(interface,3,size-1);
+ ParaMEDMEM::MPIProcessorGroup source_group (interface,0,2);
+
+ ParaMEDMEM::MEDCouplingUMesh* mesh;
+ ParaMEDMEM::ParaMESH* paramesh;
+ ParaMEDMEM::ParaFIELD* parafield;
+
+ string filename_xml1 = getResourceFile("square1_split");
+ string filename_2 = getResourceFile("square1.med");
+ //string filename_seq_wr = makeTmpFile("");
+ //string filename_seq_med = makeTmpFile("myWrField_seq_pointe221.med");
+
+ // To remove tmp files from disk
+ ParaMEDMEMTest_TmpFilesRemover aRemover;
+
+ //loading the geometry for the source group
+
+ ParaMEDMEM::StructuredCoincidentDEC dec(source_group, target_group);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (source_group.containsMyRank()) {
+ string master = filename_xml1;
+
+ ostringstream strstream;
+ strstream <<master<<rank+1<<".med";
+ ostringstream meshname;
+ meshname<< "Mesh_2_"<< rank+1;
+
+ mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
+
+
+ paramesh=new ParaMESH (mesh,source_group,"source mesh");
+
+ ParaMEDMEM::ComponentTopology comptopo(6);
+ parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+
+ int nb_local=mesh->getNumberOfCells();
+ const int* global_numbering = paramesh->getGlobalNumberingCell();
+
+ double *value=parafield->getField()->getArray()->getPointer();
+ for(int ielem=0; ielem<nb_local;ielem++)
+ for (int icomp=0; icomp<6; icomp++)
+ value[ielem*6+icomp]=global_numbering[ielem]*6+icomp;
+
+ //ICoCo::Field* icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
+
+ dec.attachLocalField(parafield);
+ dec.synchronize();
+ dec.sendData();
+ //delete icocofield;
+ }
+
+ //loading the geometry for the target group
+ if (target_group.containsMyRank()) {
+
+ string meshname2("Mesh_2");
+ mesh = MEDLoader::ReadUMeshFromFile(filename_2.c_str(),meshname2.c_str(),0);
+
+ paramesh=new ParaMESH (mesh,self_group,"target mesh");
+ ParaMEDMEM::ComponentTopology comptopo(6, &target_group);
+
+ parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+
+ int nb_local=mesh->getNumberOfCells();
+ double *value=parafield->getField()->getArray()->getPointer();
+ for (int ielem=0; ielem<nb_local; ielem++)
+ for (int icomp=0; icomp<comptopo.nbLocalComponents(); icomp++)
+ value[ielem*comptopo.nbLocalComponents()+icomp]=0.0;
+ //ICoCo::Field* icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
+
+ dec.attachLocalField(parafield);
+ dec.synchronize();
+ dec.recvData();
+
+ //checking validity of field
+ const double* recv_value = parafield->getField()->getArray()->getPointer();
+ for (int i=0; i< nb_local; i++) {
+ int first = comptopo.firstLocalComponent();
+ for (int icomp = 0; icomp < comptopo.nbLocalComponents(); icomp++)
+ CPPUNIT_ASSERT_DOUBLES_EQUAL(recv_value[i*comptopo.nbLocalComponents()+icomp],(double)(i*6+icomp+first),1e-12);
+ }
+ //delete icocofield;
+ }
+ delete parafield;
+ delete paramesh;
+ mesh->decrRef();
+
+ // MPI_Barrier(MPI_COMM_WORLD);
+
+}
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+// --- include all MPIAccess Test
+
+#include "MPIAccessTest.hxx"
+
+// --- Registers the fixture into the 'registry'
+
+CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessTest );
+
+// --- generic Main program from KERNEL_SRC/src/Basics/Test
+
+#include "MPIMainTest.hxx"
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+// --- include all MPIAccessDEC Test
+
+#include "MPIAccessDECTest.hxx"
+
+// --- Registers the fixture into the 'registry'
+
+CPPUNIT_TEST_SUITE_REGISTRATION( MPIAccessDECTest );
+
+// --- generic Main program from KERNEL_SRC/src/Basics/Test
+
+#include "MPIMainTest.hxx"
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+
+// --- include all MEDMEM Test
+
+#include "ParaMEDMEMTest.hxx"
+
+// --- Registers the fixture into the 'registry'
+
+CPPUNIT_TEST_SUITE_REGISTRATION( ParaMEDMEMTest );
+
+// --- generic Main program from KERNEL_SRC/src/Basics/Test
+
+#include "MPIMainTest.hxx"
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+#include "MPIAccessDEC.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessDECTest::test_AllToAllDECSynchronousPointToPoint() {
+ test_AllToAllDEC( false ) ;
+}
+void MPIAccessDECTest::test_AllToAllDECAsynchronousPointToPoint() {
+ test_AllToAllDEC( true ) ;
+}
+
+static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "===========================================================" << endl
+ << "test_AllToAllDEC" << myrank << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ return ;
+}
+
+void MPIAccessDECTest::test_AllToAllDEC( bool Asynchronous ) {
+
+ cout << "test_AllToAllDEC" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 || size > 11 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_AllToAllDEC" << endl
+ << " (nbprocs >=2)" << endl
+ << "test must be runned with more than 1 proc and less than 12 procs"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ cout << "test_AllToAllDEC" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+ std::set<int> sourceprocs;
+ std::set<int> targetprocs;
+ int i ;
+ for ( i = 0 ; i < size/2 ; i++ ) {
+ sourceprocs.insert(i);
+ }
+ for ( i = size/2 ; i < size ; i++ ) {
+ targetprocs.insert(i);
+ }
+
+ ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ;
+ ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ;
+
+ MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
+ Asynchronous ) ;
+
+ MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
+
+#define maxreq 100
+#define datamsglength 10
+
+ // int sts ;
+ int sendcount = datamsglength ;
+ int recvcount = datamsglength ;
+ int * recvbuf = new int[datamsglength*size] ;
+
+ int ireq ;
+ for ( ireq = 0 ; ireq < maxreq ; ireq++ ) {
+ int * sendbuf = new int[datamsglength*size] ;
+ int j ;
+ for ( j = 0 ; j < datamsglength*size ; j++ ) {
+ sendbuf[j] = myrank*1000000 + ireq*1000 + j ;
+ recvbuf[j] = -1 ;
+ }
+
+ MyMPIAccessDEC->allToAll( sendbuf, sendcount , MPI_INT ,
+ recvbuf, recvcount , MPI_INT ) ;
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ int *ArrayOfRecvRequests = new int[nRecvReq] ;
+ int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
+ mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ;
+ delete [] ArrayOfRecvRequests ;
+ }
+
+ int nSendReq = mpi_access->sendRequestIdsSize() ;
+ cout << "test_AllToAllDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
+ << endl ;
+ if ( nSendReq ) {
+ int *ArrayOfSendRequests = new int[nSendReq] ;
+ int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
+ delete [] ArrayOfSendRequests ;
+ }
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq ) {
+ ostringstream strstream ;
+ strstream << "test_AllToAllDEC" << myrank << " final RecvRequestIds " << nRecvReq
+ << " RecvRequests # 0 Error" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "test_AllToAllDEC" << myrank << " final RecvRequestIds " << nRecvReq
+ << " RecvRequests = 0 OK" << endl ;
+ }
+
+ mpi_access->barrier() ;
+
+ delete sourcegroup ;
+ delete targetgroup ;
+ delete MyMPIAccessDEC ;
+ delete [] recvbuf ;
+
+ // MPI_Finalize();
+
+ cout << "test_AllToAllDEC" << myrank << " OK" << endl ;
+
+ return ;
+}
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccessDEC.hxx"
+#include "LinearTimeInterpolator.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessDECTest::test_AllToAllTimeDECSynchronousPointToPoint() {
+ test_AllToAllTimeDEC( false ) ;
+}
+void MPIAccessDECTest::test_AllToAllTimeDECAsynchronousPointToPoint() {
+ test_AllToAllTimeDEC( true ) ;
+}
+
+static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access->errorString(sts, msgerr, &lenerr) ;
+ cout << "test_AllToAllTimeDEC" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test_AllToAllTimeDEC" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ return ;
+}
+
+void MPIAccessDECTest::test_AllToAllTimeDEC( bool Asynchronous ) {
+
+ cout << "test_AllToAllTimeDEC" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 || size > 11 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
+ << " (nbprocs >=2)" << endl
+ << "test must be runned with more than 1 proc and less than 12 procs"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ // int Asynchronous = atoi(argv[1]);
+
+ cout << "test_AllToAllTimeDEC" << myrank << " Asynchronous " << Asynchronous << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+ std::set<int> sourceprocs;
+ std::set<int> targetprocs;
+ int i ;
+ for ( i = 0 ; i < size/2 ; i++ ) {
+ sourceprocs.insert(i);
+ }
+ for ( i = size/2 ; i < size ; i++ ) {
+ targetprocs.insert(i);
+ }
+
+ ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ;
+ ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ;
+
+ // LinearTimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ;
+ MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
+ Asynchronous ) ;
+ // Asynchronous , LinearInterp , 0.5 ) ;
+ MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ;
+ MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
+
+ cout << "test_AllToAllTimeDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+ cout << "test_AllToAllTimeDEC" << myrank << " Barrier done" << endl ;
+
+#define maxproc 11
+#define maxreq 10000
+#define datamsglength 10
+
+ int sts ;
+ int sendcount = datamsglength ;
+ int recvcount = datamsglength ;
+
+ double time = 0 ;
+ // double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
+ double deltatime[maxproc] = {1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,11.} ;
+ double maxtime = maxreq ;
+ double nextdeltatime = deltatime[myrank] ;
+ // MyMPIAccessDEC->InitTime( time , deltatime[myrank] , maxtime ) ;
+ // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) {
+ for ( time = 0 ; time <= maxtime && nextdeltatime != 0 ; time+=nextdeltatime ) {
+ if ( time != 0 ) {
+ nextdeltatime = deltatime[myrank] ;
+ if ( time+nextdeltatime > maxtime ) {
+ nextdeltatime = 0 ;
+ }
+ // MyMPIAccessDEC->NextTime( nextdeltatime ) ;
+ }
+ MyMPIAccessDEC->setTime( time , nextdeltatime ) ;
+ cout << "test_AllToAllTimeDEC" << myrank << "=====TIME " << time << "=====DELTATIME "
+ << nextdeltatime << "=====MAXTIME " << maxtime << " ======" << endl ;
+ int * sendbuf = new int[datamsglength*size] ;
+ // int * sendbuf = (int *) malloc(sizeof(int)*datamsglength*size) ;
+ int * recvbuf = new int[datamsglength*size] ;
+ int j ;
+ for ( j = 0 ; j < datamsglength*size ; j++ ) {
+ sendbuf[j] = myrank*1000000 + (j/datamsglength)*1000 + j ;
+ recvbuf[j] = -1 ;
+ }
+
+ int sts = MyMPIAccessDEC->allToAllTime( sendbuf, sendcount , MPI_INT ,
+ recvbuf, recvcount , MPI_INT ) ;
+ chksts( sts , myrank , mpi_access ) ;
+
+ // cout << "test_AllToAllTimeDEC" << myrank << " recvbuf before CheckSent" ;
+ // for ( i = 0 ; i < datamsglength*size ; i++ ) {
+ // cout << " " << recvbuf[i] ;
+ // }
+ // cout << endl ;
+
+ // cout << "test_AllToAllTimeDEC" << myrank << " sendbuf " << sendbuf << endl ;
+ // MyMPIAccessDEC->CheckSent() ;
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq != 0 ) {
+ ostringstream strstream ;
+ strstream << "=============================================================" << endl
+ << "test_AllToAllTimeDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests # 0 ERROR"
+ << endl << "============================================================="
+ << endl ;
+ int *ArrayOfRecvRequests = new int[nRecvReq] ;
+ int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
+ delete [] ArrayOfRecvRequests ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ // cout << "test_AllToAllTimeDEC" << myrank << " recvbuf" << endl ;
+ bool badrecvbuf = false ;
+ for ( i = 0 ; i < datamsglength*size ; i++ ) {
+ if ( recvbuf[i] != (i/datamsglength)*1000000 + myrank*1000 +
+ myrank*datamsglength+(i%datamsglength) ) {
+ badrecvbuf = true ;
+ cout << "test_AllToAllTimeDEC" << myrank << " recvbuf[" << i << "] "
+ << recvbuf[i] << " # " << (i/datamsglength)*1000000 + myrank*1000 +
+ myrank*datamsglength+(i%datamsglength) << endl ;
+ }
+ else if ( badrecvbuf ) {
+ cout << "test_AllToAllTimeDEC" << myrank << " recvbuf[" << i << "] "
+ << recvbuf[i] << " == " << (i/datamsglength)*1000000 + myrank*1000 +
+ myrank*datamsglength+(i%datamsglength) << endl ;
+ }
+ }
+ if ( badrecvbuf ) {
+ ostringstream strstream ;
+ strstream << "==============================================================" << endl
+ << "test_AllToAllTimeDEC" << myrank << " badrecvbuf"
+ << endl << "============================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ delete [] recvbuf ;
+ }
+
+ cout << "test_AllToAllTimeDEC" << myrank << " final CheckSent" << endl ;
+ sts = MyMPIAccessDEC->checkSent() ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "================================================================" << endl
+ << "test_AllToAllTimeDEC" << myrank << " final CheckSent ERROR"
+ << endl << "================================================================"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ int nSendReq = mpi_access->sendRequestIdsSize() ;
+ cout << "test_AllToAllTimeDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
+ << endl ;
+ if ( nSendReq ) {
+ int *ArrayOfSendRequests = new int[nSendReq] ;
+ int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
+ delete [] ArrayOfSendRequests ;
+ }
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq ) {
+ ostringstream strstream ;
+ strstream << "===============================================================" << endl
+ << "test_AllToAllTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests # 0 Error"
+ << endl << "==============================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "test_AllToAllTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests = 0 OK" << endl ;
+ }
+
+ cout << "test_AllToAllTimeDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+ cout << "test_AllToAllTimeDEC" << myrank << " Barrier done" << endl ;
+
+ delete sourcegroup ;
+ delete targetgroup ;
+ // delete aLinearInterpDEC ;
+ delete MyMPIAccessDEC ;
+
+ // MPI_Finalize();
+
+ cout << "test_AllToAllTimeDEC" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccessDEC.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessDECTest::test_AllToAllvDECSynchronousPointToPoint() {
+ test_AllToAllvDEC( false ) ;
+}
+void MPIAccessDECTest::test_AllToAllvDECAsynchronousPointToPoint() {
+ test_AllToAllvDEC( true ) ;
+}
+
+static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test_AllToAllvDEC" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test_AllToAllvDEC" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ return ;
+}
+
+void MPIAccessDECTest::test_AllToAllvDEC( bool Asynchronous ) {
+
+ cout << "test_AllToAllvDEC" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 || size > 11 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_AllToAllvDEC" << endl
+ << " (nbprocs >=2)" << endl
+ << "test must be runned with more than 1 proc and less than 12 procs"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ // int Asynchronous = atoi(argv[1]);
+
+ cout << "test_AllToAllvDEC" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+ std::set<int> sourceprocs;
+ std::set<int> targetprocs;
+ int i ;
+ for ( i = 0 ; i < size/2 ; i++ ) {
+ sourceprocs.insert(i);
+ }
+ for ( i = size/2 ; i < size ; i++ ) {
+ targetprocs.insert(i);
+ }
+
+ ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ;
+ ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ;
+
+ MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
+ Asynchronous ) ;
+
+ MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
+
+#define maxreq 100
+#define datamsglength 10
+
+ // int sts ;
+ int *sendcounts = new int[size] ;
+ int *sdispls = new int[size] ;
+ int *recvcounts = new int[size] ;
+ int *rdispls = new int[size] ;
+ for ( i = 0 ; i < size ; i++ ) {
+ sendcounts[i] = datamsglength-i;
+ sdispls[i] = i*datamsglength ;
+ recvcounts[i] = datamsglength-myrank;
+ rdispls[i] = i*datamsglength ;
+ }
+ int * recvbuf = new int[datamsglength*size] ;
+
+ int ireq ;
+ for ( ireq = 0 ; ireq < maxreq ; ireq++ ) {
+ int * sendbuf = new int[datamsglength*size] ;
+ // int * sendbuf = (int *) malloc( sizeof(int)*datamsglength*size) ;
+ int j ;
+ for ( j = 0 ; j < datamsglength*size ; j++ ) {
+ sendbuf[j] = myrank*1000000 + ireq*1000 + j ;
+ recvbuf[j] = -1 ;
+ }
+
+ MyMPIAccessDEC->allToAllv( sendbuf, sendcounts , sdispls , MPI_INT ,
+ recvbuf, recvcounts , rdispls , MPI_INT ) ;
+
+ // cout << "test_AllToAllvDEC" << myrank << " recvbuf before CheckSent" ;
+ // for ( i = 0 ; i < datamsglength*size ; i++ ) {
+ // cout << " " << recvbuf[i] ;
+ // }
+ // cout << endl ;
+
+ // cout << "test_AllToAllvDEC" << myrank << " sendbuf " << sendbuf << endl ;
+ // MyMPIAccessDEC->CheckSent() ;
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ // cout << "test_AllToAllvDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests" << endl ;
+ int *ArrayOfRecvRequests = new int[nRecvReq] ;
+ int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
+ mpi_access->deleteRequests( nReq , ArrayOfRecvRequests ) ;
+ delete [] ArrayOfRecvRequests ;
+
+ // cout << "test_AllToAllvDEC" << myrank << " recvbuf" ;
+ // for ( i = 0 ; i < datamsglength*size ; i++ ) {
+ // cout << " " << recvbuf[i] ;
+ // }
+ // cout << endl ;
+ }
+
+ // cout << "test_AllToAllvDEC" << myrank << " final CheckSent" << endl ;
+ // MyMPIAccessDEC->CheckSent() ;
+
+ int nSendReq = mpi_access->sendRequestIdsSize() ;
+ cout << "test_AllToAllvDEC" << myrank << " final SendRequestIds " << nSendReq << " SendRequests"
+ << endl ;
+ if ( nSendReq ) {
+ int *ArrayOfSendRequests = new int[nSendReq] ;
+ int nReq = mpi_access->sendRequestIds( nSendReq, ArrayOfSendRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfSendRequests ) ;
+ delete [] ArrayOfSendRequests ;
+ }
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq ) {
+ ostringstream strstream ;
+ strstream << "test_AllToAllvDEC" << myrank << " final RecvRequestIds " << nRecvReq
+ << " RecvRequests # 0 Error" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "test_AllToAllvDEC" << myrank << " final RecvRequestIds " << nRecvReq
+ << " RecvRequests = 0 OK" << endl ;
+ }
+
+ mpi_access->barrier() ;
+
+ delete sourcegroup ;
+ delete targetgroup ;
+ delete MyMPIAccessDEC ;
+ delete [] sendcounts ;
+ delete [] sdispls ;
+ delete [] recvcounts ;
+ delete [] rdispls ;
+ delete [] recvbuf ;
+
+ // MPI_Finalize();
+
+ cout << "test_AllToAllvDEC" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+#include <time.h>
+
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccessDEC.hxx"
+#include "LinearTimeInterpolator.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessDECTest::test_AllToAllvTimeDECSynchronousNative() {
+ test_AllToAllvTimeDEC( false , true ) ;
+}
+void MPIAccessDECTest::test_AllToAllvTimeDECSynchronousPointToPoint() {
+ test_AllToAllvTimeDEC( false , false ) ;
+}
+void MPIAccessDECTest::test_AllToAllvTimeDECAsynchronousPointToPoint() {
+ test_AllToAllvTimeDEC( true , false ) ;
+}
+
+static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access->errorString(sts, msgerr, &lenerr) ;
+ cout << "test_AllToAllvTimeDEC" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test_AllToAllvTimeDEC" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ return ;
+}
+
+void MPIAccessDECTest::test_AllToAllvTimeDEC( bool Asynchronous , bool UseMPINative ) {
+
+ cout << "test_AllToAllvTimeDEC" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 || size > 11 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
+ << " (nbprocs >=2)" << endl
+ << "test must be runned with more than 1 proc and less than 12 procs"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ // int Asynchronous = atoi(argv[1]) ;
+ int UseMPI_Alltoallv = UseMPINative ;
+ // if ( argc == 3 ) {
+ // UseMPI_Alltoallv = atoi(argv[2]) ;
+ // }
+
+ cout << "test_AllToAllvTimeDEC" << myrank << " Asynchronous " << Asynchronous
+ << " UseMPI_Alltoallv " << UseMPI_Alltoallv << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+ std::set<int> sourceprocs;
+ std::set<int> targetprocs;
+ int i ;
+ for ( i = 0 ; i < size/2 ; i++ ) {
+ sourceprocs.insert(i);
+ }
+ for ( i = size/2 ; i < size ; i++ ) {
+ targetprocs.insert(i);
+ }
+
+ ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ;
+ ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ;
+
+ // TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0.5 ) ;
+ MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
+ Asynchronous ) ;
+ // Asynchronous , LinearInterp , 0.5 ) ;
+ MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp , 0.5 ) ;
+ MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
+
+ cout << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+ cout << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
+
+#define maxproc 11
+#define maxreq 10000
+#define datamsglength 10
+
+ int sts ;
+ int *sendcounts = new int[size] ;
+ int *sdispls = new int[size] ;
+ int *recvcounts = new int[size] ;
+ int *rdispls = new int[size] ;
+ int *sendtimecounts = new int[size] ;
+ int *stimedispls = new int[size] ;
+ int *recvtimecounts = new int[size] ;
+ int *rtimedispls = new int[size] ;
+ for ( i = 0 ; i < size ; i++ ) {
+ sendcounts[i] = datamsglength-i ;
+ sdispls[i] = i*datamsglength ;
+ recvcounts[i] = datamsglength-myrank ;
+ rdispls[i] = i*datamsglength ;
+ sendtimecounts[i] = 1 ;
+ stimedispls[i] = 0 ;
+ recvtimecounts[i] = 1 ;
+ rtimedispls[i] = i ;
+ //rtimedispls[i] = i*mpi_access->TimeExtent() ;
+ }
+
+ double timeLoc = 0 ;
+ double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
+ double maxtime ;
+ double nextdeltatime = deltatime[myrank] ;
+ if ( UseMPI_Alltoallv ) {
+ maxtime = maxreq*nextdeltatime - 0.1 ;
+ }
+ else {
+ maxtime = maxreq ;
+ // MyMPIAccessDEC->InitTime( time , nextdeltatime , maxtime ) ;
+ }
+ time_t begintime = time(NULL) ;
+ // for ( time = 0 ; time <= maxtime ; time+=deltatime[myrank] ) {
+ for ( timeLoc = 0 ; timeLoc <= maxtime && nextdeltatime != 0 ; timeLoc+=nextdeltatime ) {
+ nextdeltatime = deltatime[myrank] ;
+ if ( timeLoc != 0 ) {
+ nextdeltatime = deltatime[myrank] ;
+ if ( timeLoc+nextdeltatime > maxtime ) {
+ nextdeltatime = 0 ;
+ }
+ // MyMPIAccessDEC->NextTime( nextdeltatime ) ;
+ }
+ MyMPIAccessDEC->setTime( timeLoc , nextdeltatime ) ;
+ cout << "test_AllToAllvTimeDEC" << myrank << "=====TIME " << time << "=====DELTATIME "
+ << nextdeltatime << "=====MAXTIME " << maxtime << " ======" << endl ;
+ int * sendbuf = new int[datamsglength*size] ;
+ // int * sendbuf = (int *) malloc(sizeof(int)*datamsglength*size) ;
+ int * recvbuf = new int[datamsglength*size] ;
+ int j ;
+ for ( j = 0 ; j < datamsglength*size ; j++ ) {
+ sendbuf[j] = myrank*1000000 + (j/datamsglength)*1000 + j ;
+ recvbuf[j] = -1 ;
+ }
+
+ if ( UseMPI_Alltoallv ) {
+ const MPI_Comm* comm = MyMPIAccessDEC->getComm();
+ TimeMessage * aSendTimeMessage = new TimeMessage ;
+ aSendTimeMessage->time = timeLoc ;
+ // aSendTimeMessage->deltatime = deltatime[myrank] ;
+ aSendTimeMessage->deltatime = nextdeltatime ;
+ // aSendTimeMessage->maxtime = maxtime ;
+ aSendTimeMessage->tag = (int ) (timeLoc/deltatime[myrank]) ;
+ TimeMessage * aRecvTimeMessage = new TimeMessage[size] ;
+ interface.allToAllV(aSendTimeMessage, sendtimecounts , stimedispls ,
+ mpi_access->timeType() ,
+ aRecvTimeMessage, recvtimecounts , rtimedispls ,
+ mpi_access->timeType() , *comm ) ;
+ // for ( j = 0 ; j < size ; j++ ) {
+ // cout << "test_AllToAllvTimeDEC" << myrank << " TimeMessage received " << j << " "
+ // << aRecvTimeMessage[j] << endl ;
+ // }
+ delete aSendTimeMessage ;
+ delete [] aRecvTimeMessage ;
+ interface.allToAllV(sendbuf, sendcounts , sdispls , MPI_INT ,
+ recvbuf, recvcounts , rdispls , MPI_INT , *comm ) ;
+ // free(sendbuf) ;
+ delete [] sendbuf ;
+ }
+ else {
+ int sts = MyMPIAccessDEC->allToAllvTime( sendbuf, sendcounts , sdispls , MPI_INT ,
+ recvbuf, recvcounts , rdispls , MPI_INT ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+
+ // cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf before CheckSent" ;
+ // for ( i = 0 ; i < datamsglength*size ; i++ ) {
+ // cout << " " << recvbuf[i] ;
+ // }
+ // cout << endl ;
+
+ // cout << "test_AllToAllvTimeDEC" << myrank << " sendbuf " << sendbuf << endl ;
+ // MyMPIAccessDEC->CheckSent() ;
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq != 0 ) {
+ ostringstream strstream ;
+ strstream << "=============================================================" << endl
+ << "test_AllToAllvTimeDEC" << myrank << " WaitAllRecv " << nRecvReq << " Requests # 0 ERROR"
+ << endl << "============================================================="
+ << endl ;
+ int *ArrayOfRecvRequests = new int[nRecvReq] ;
+ int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
+ delete [] ArrayOfRecvRequests ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ // cout << "test_AllToAllvTimeDEC" << myrank << " check of recvbuf" << endl ;
+ bool badrecvbuf = false ;
+ for ( i = 0 ; i < size ; i++ ) {
+ int j ;
+ for ( j = 0 ; j < datamsglength ; j++ ) {
+ int index = i*datamsglength+j ;
+ if ( j < recvcounts[i] ) {
+ if ( recvbuf[index] != (index/datamsglength)*1000000 + myrank*1000 +
+ myrank*datamsglength+(index%datamsglength) ) {
+ badrecvbuf = true ;
+ cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
+ << recvbuf[index] << " # " << (index/datamsglength)*1000000 +
+ myrank*1000 +
+ myrank*datamsglength+(index%datamsglength) << endl ;
+ }
+ else if ( badrecvbuf ) {
+ cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
+ << recvbuf[index] << " == " << (index/datamsglength)*1000000 +
+ myrank*1000 +
+ myrank*datamsglength+(index%datamsglength) << endl ;
+ }
+ }
+ else if ( recvbuf[index] != -1 ) {
+ badrecvbuf = true ;
+ cout << "test_AllToAllvTimeDEC" << myrank << " recvbuf[" << index << "] "
+ << recvbuf[index] << " # -1" << endl ;
+ }
+ }
+ }
+ if ( badrecvbuf ) {
+ ostringstream strstream ;
+ strstream << "==============================================================" << endl
+ << "test_AllToAllvTimeDEC" << myrank << " badrecvbuf"
+ << endl << "============================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ delete [] recvbuf ;
+ }
+
+ cout << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+ cout << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
+
+ cout << "test_AllToAllvTimeDEC" << myrank << " CheckFinalSent" << endl ;
+ sts = MyMPIAccessDEC->checkFinalSent() ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "================================================================" << endl
+ << "test_AllToAllvTimeDEC" << myrank << " final CheckSent ERROR"
+ << endl << "================================================================"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ cout << "test_AllToAllvTimeDEC" << myrank << " CheckFinalRecv" << endl ;
+ sts = MyMPIAccessDEC->checkFinalRecv() ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "================================================================" << endl
+ << "test_AllToAllvTimeDEC" << myrank << " CheckFinalRecv ERROR"
+ << endl << "================================================================"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq ) {
+ ostringstream strstream ;
+ strstream << "===============================================================" << endl
+ << "test_AllToAllvTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests # 0 Error"
+ << endl << "==============================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "test_AllToAllvTimeDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests = 0 OK" << endl ;
+ }
+
+ time_t endtime = time(NULL) ;
+ cout << "test_AllToAllvTimeDEC" << myrank << " begintime " << begintime << " endtime " << endtime
+ << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank]
+ << " calls to AllToAll" << endl ;
+
+ cout << "test_AllToAllvTimeDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+ cout << "test_AllToAllvTimeDEC" << myrank << " Barrier done" << endl ;
+
+ delete sourcegroup ;
+ delete targetgroup ;
+ delete MyMPIAccessDEC ;
+ // delete aLinearInterpDEC ;
+
+ delete [] sendcounts ;
+ delete [] sdispls ;
+ delete [] recvcounts ;
+ delete [] rdispls ;
+ delete [] sendtimecounts ;
+ delete [] stimedispls ;
+ delete [] recvtimecounts ;
+ delete [] rtimedispls ;
+
+ // MPI_Finalize();
+
+ endtime = time(NULL) ;
+
+ cout << "test_AllToAllvTimeDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime
+ << " elapse " << endtime-begintime << " " << maxtime/deltatime[myrank]
+ << " calls to AllToAll" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <math.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+#include <time.h>
+
+#include "MPIAccessDECTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccessDEC.hxx"
+#include "LinearTimeInterpolator.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessDECTest::test_AllToAllvTimeDoubleDECSynchronousPointToPoint() {
+ test_AllToAllvTimeDoubleDEC( false ) ;
+}
+void MPIAccessDECTest::test_AllToAllvTimeDoubleDECAsynchronousPointToPoint() {
+ test_AllToAllvTimeDoubleDEC( true ) ;
+}
+
+static void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access->errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ return ;
+}
+
+void MPIAccessDECTest::test_AllToAllvTimeDoubleDEC( bool Asynchronous ) {
+
+ cout << "test_AllToAllvTimeDoubleDEC" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 || size > 11 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_AllToAllTimeDEC" << endl
+ << " (nbprocs >=2)" << endl
+ << "test must be runned with more than 1 proc and less than 12 procs"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+// int Asynchronous = atoi(argv[1]) ;
+
+ cout << "test_AllToAllvTimeDoubleDEC" << myrank << " Asynchronous " << Asynchronous << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+ std::set<int> sourceprocs;
+ std::set<int> targetprocs;
+ int i ;
+ for ( i = 0 ; i < size/2 ; i++ ) {
+ sourceprocs.insert(i);
+ }
+ for ( i = size/2 ; i < size ; i++ ) {
+ targetprocs.insert(i);
+ }
+
+ ParaMEDMEM::MPIProcessorGroup* sourcegroup = new ParaMEDMEM::MPIProcessorGroup(interface,sourceprocs) ;
+ ParaMEDMEM::MPIProcessorGroup* targetgroup = new ParaMEDMEM::MPIProcessorGroup(interface,targetprocs) ;
+
+// TimeInterpolator * aLinearInterpDEC = new LinearTimeInterpolator( 0 ) ;
+ MPIAccessDEC * MyMPIAccessDEC = new MPIAccessDEC( *sourcegroup , *targetgroup ,
+ Asynchronous ) ;
+// Asynchronous , LinearInterp , 0.5 ) ;
+ MyMPIAccessDEC->setTimeInterpolator( LinearTimeInterp ) ;
+ MPIAccess * mpi_access = MyMPIAccessDEC->getMPIAccess() ;
+
+ cout << "test_AllToAllvTimeDoubleDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+
+#define maxproc 11
+#define maxreq 100
+#define datamsglength 10
+
+ int sts ;
+ int *sendcounts = new int[size] ;
+ int *sdispls = new int[size] ;
+ int *recvcounts = new int[size] ;
+ int *rdispls = new int[size] ;
+ int *sendtimecounts = new int[size] ;
+ int *stimedispls = new int[size] ;
+ int *recvtimecounts = new int[size] ;
+ int *rtimedispls = new int[size] ;
+ for ( i = 0 ; i < size ; i++ ) {
+ sendcounts[i] = datamsglength-i ;
+ sdispls[i] = i*datamsglength ;
+ recvcounts[i] = datamsglength-myrank ;
+ rdispls[i] = i*datamsglength ;
+ sendtimecounts[i] = 1 ;
+ stimedispls[i] = 0 ;
+ recvtimecounts[i] = 1 ;
+ rtimedispls[i] = i ;
+ }
+
+ double timeLoc[maxproc] ;
+ double deltatime[maxproc] = {1.,2.1,3.2,4.3,5.4,6.5,7.6,8.7,9.8,10.9,11.} ;
+ double maxtime[maxproc] ;
+ double nextdeltatime[maxproc] ;
+ for ( i = 0 ; i < size ; i++ ) {
+ timeLoc[i] = 0 ;
+ maxtime[i] = maxreq ;
+ nextdeltatime[i] = deltatime[i] ;
+ }
+ time_t begintime = time(NULL) ;
+ for ( timeLoc[myrank] = 0 ; timeLoc[myrank] <= maxtime[myrank] && nextdeltatime[myrank] != 0 ;
+ timeLoc[myrank]+=nextdeltatime[myrank] ) {
+//local and target times
+ int target ;
+ for ( target = 0 ; target < size ; target++ ) {
+ nextdeltatime[target] = deltatime[target] ;
+ if ( timeLoc[target] != 0 ) {
+ if ( timeLoc[target]+nextdeltatime[target] > maxtime[target] ) {
+ nextdeltatime[target] = 0 ;
+ }
+ }
+ if ( target != myrank ) {
+ while ( timeLoc[myrank] >= timeLoc[target] ) {
+ timeLoc[target] += deltatime[target] ;
+ }
+ }
+ }
+ MyMPIAccessDEC->setTime( timeLoc[myrank] , nextdeltatime[myrank] ) ;
+ cout << "test" << myrank << "=====TIME " << timeLoc[myrank] << "=====DELTATIME "
+ << nextdeltatime[myrank] << "=====MAXTIME " << maxtime[myrank] << " ======"
+ << endl ;
+ double * sendbuf = new double[datamsglength*size] ;
+// double * sendbuf = (double *) malloc(sizeof(double)*datamsglength*size) ;
+ double * recvbuf = new double[datamsglength*size] ;
+ int j ;
+ //cout << "test_AllToAllvTimeDoubleDEC" << myrank << " sendbuf" ;
+ for ( target = 0 ; target < size ; target++ ) {
+ for ( j = 0 ; j < datamsglength ; j++ ) {
+ //sendbuf[j] = myrank*10000 + (j/datamsglength)*100 + j ;
+ sendbuf[target*datamsglength+j] = myrank*1000000 + target*10000 +
+ (timeLoc[myrank]/deltatime[myrank])*100 + j ;
+ //cout << " " << (int ) sendbuf[target*datamsglength+j] ;
+ recvbuf[target*datamsglength+j] = -1 ;
+ }
+ //cout << endl ;
+ }
+
+ int sts = MyMPIAccessDEC->allToAllvTime( sendbuf, sendcounts , sdispls , MPI_DOUBLE ,
+ recvbuf, recvcounts , rdispls , MPI_DOUBLE ) ;
+ chksts( sts , myrank , mpi_access ) ;
+
+// cout << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf before CheckSent" ;
+// for ( i = 0 ; i < datamsglength*size ; i++ ) {
+// cout << " " << recvbuf[i] ;
+// }
+// cout << endl ;
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq != 0 ) {
+ ostringstream strstream ;
+ strstream << "=============================================================" << endl
+ << "test_AllToAllvTimeDoubleDEC" << myrank << " WaitAllRecv "
+ << nRecvReq << " Requests # 0 ERROR"
+ << endl << "============================================================"
+ << endl ;
+ int *ArrayOfRecvRequests = new int[nRecvReq] ;
+ int nReq = mpi_access->recvRequestIds( nRecvReq, ArrayOfRecvRequests ) ;
+ mpi_access->waitAll( nReq , ArrayOfRecvRequests ) ;
+ delete [] ArrayOfRecvRequests ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+// cout << "test_AllToAllvTimeDoubleDEC" << myrank << " check of recvbuf" << endl ;
+ bool badrecvbuf = false ;
+ for ( target = 0 ; target < size ; target++ ) {
+ int j ;
+ for ( j = 0 ; j < datamsglength ; j++ ) {
+ int index = target*datamsglength+j ;
+ if ( j < recvcounts[target] ) {
+ if ( fabs(recvbuf[index] - (target*1000000 + myrank*10000 +
+ (timeLoc[target]/deltatime[target])*100 + j)) > 101) {
+ badrecvbuf = true ;
+ cout << "test_AllToAllvTimeDoubleDEC" << myrank << " target " << target << " timeLoc[target] "
+ << timeLoc[target] << " recvbuf[" << index << "] " << (int ) recvbuf[index]
+ << " # " << (int ) (target*1000000 +
+ myrank*10000 + (timeLoc[target]/deltatime[target])*100 + j)
+ << endl ;
+ }
+ else if ( badrecvbuf ) {
+ cout << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf[" << index << "] "
+ << recvbuf[index] << " ~= " << (int ) (target*1000000 +
+ myrank*10000 + (timeLoc[target]/deltatime[target])*100 + j) << endl ;
+ }
+ }
+ else if ( recvbuf[index] != -1 ) {
+ badrecvbuf = true ;
+ cout << "test_AllToAllvTimeDoubleDEC" << myrank << " recvbuf[" << index << "] "
+ << recvbuf[index] << " # -1" << endl ;
+ }
+ }
+ }
+ if ( badrecvbuf ) {
+ ostringstream strstream ;
+ strstream << "==================================================================" << endl
+ << "test_AllToAllvTimeDoubleDEC" << myrank << " badrecvbuf"
+ << endl << "=================================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ delete [] recvbuf ;
+ }
+
+ cout << "test_AllToAllvTimeDoubleDEC" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+
+ cout << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalSent" << endl ;
+ sts = MyMPIAccessDEC->checkFinalSent() ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "=================================================================" << endl
+ << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalSent ERROR"
+ << endl << "================================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ cout << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalRecv" << endl ;
+ sts = MyMPIAccessDEC->checkFinalRecv() ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "=================================================================" << endl
+ << "test_AllToAllvTimeDoubleDEC" << myrank << " CheckFinalRecv ERROR"
+ << endl << "================================================================"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ int nRecvReq = mpi_access->recvRequestIdsSize() ;
+ if ( nRecvReq ) {
+ ostringstream strstream ;
+ strstream << "===============================================================" << endl
+ << "test_AllToAllvTimeDoubleDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests # 0 Error"
+ << endl << "==============================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "test_AllToAllvTimeDoubleDEC" << myrank << " RecvRequestIds " << nRecvReq
+ << " RecvRequests = 0 OK" << endl ;
+ }
+
+ time_t endtime = time(NULL) ;
+ cout << "test_AllToAllvTimeDoubleDEC" << myrank << " begintime " << begintime << " endtime " << endtime
+ << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank]
+ << " calls to AllToAll" << endl ;
+
+ cout << "test" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+
+ delete sourcegroup ;
+ delete targetgroup ;
+ delete MyMPIAccessDEC ;
+// delete aLinearInterpDEC ;
+
+ delete [] sendcounts ;
+ delete [] sdispls ;
+ delete [] recvcounts ;
+ delete [] rdispls ;
+ delete [] sendtimecounts ;
+ delete [] stimedispls ;
+ delete [] recvtimecounts ;
+ delete [] rtimedispls ;
+
+// MPI_Finalize();
+
+ endtime = time(NULL) ;
+
+ cout << "test_AllToAllvTimeDoubleDEC" << myrank << " OK begintime " << begintime << " endtime " << endtime
+ << " elapse " << endtime-begintime << " " << maxtime[myrank]/deltatime[myrank]
+ << " calls to AllToAll" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <time.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_Cancel() {
+
+ cout << "test_MPI_Access_Cancel" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_Cancel must be runned with 2 procs" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ cout << "test_MPI_Access_Cancel" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int intsendbuf[5] ;
+ double doublesendbuf[10] ;
+ int RequestId[10] ;
+ int sts ;
+ int i , j ;
+ for ( j = 0 ; j < 3 ; j++ ) {
+ for ( i = 0 ; i < 10 ; i++ ) {
+ cout << "test" << myrank << " ============================ i " << i
+ << "============================" << endl ;
+ if ( myrank == 0 ) {
+ if ( i < 5 ) {
+ intsendbuf[i] = i ;
+ sts = mpi_access.ISend(&intsendbuf[i],1,MPI_INT,target, RequestId[i]) ;
+ cout << "test" << myrank << " Send MPI_INT RequestId " << RequestId[i]
+ << endl ;
+ }
+ else {
+ doublesendbuf[i] = i ;
+ sts = mpi_access.ISend(&doublesendbuf[i],1,MPI_DOUBLE,target,
+ RequestId[i]) ;
+ cout << "test" << myrank << " Send MPI_DOUBLE RequestId " << RequestId[i]
+ << endl ;
+ }
+ }
+ else {
+ int flag = false ;
+ while ( !flag ) {
+ int source, tag, outcount ;
+ MPI_Datatype datatype ;
+ sts = mpi_access.IProbe(target, source, tag, datatype, outcount,
+ flag ) ;
+ if ( flag ) {
+ cout << "test" << myrank << " " << i << " IProbe target " << target
+ << " source " << source << " tag " << tag
+ << " outcount " << outcount << " flag " << flag << endl ;
+ }
+ else {
+ cout << "test" << myrank << " flag " << flag << endl ;
+ sleep( 1 ) ;
+ }
+ if ( flag ) {
+ int recvbuf ;
+ sts = mpi_access.IRecv(&recvbuf,outcount,MPI_INT,source,
+ RequestId[i] ) ;
+ if ( datatype == MPI_INT ) {
+ int source, tag, error, outcount ;
+ mpi_access.wait( RequestId[i] ) ;
+ mpi_access.status( RequestId[i], source, tag, error, outcount,
+ true ) ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "======================================================"
+ << endl << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO" << endl
+ << "======================================================"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ cout << "========================================================"
+ << endl << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " OK" << endl
+ << "========================================================"
+ << endl ;
+ }
+ }
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.check() ;
+ }
+
+ if ( myrank != 0 ) {
+ int iprobe ;
+ for ( iprobe = 5 ; iprobe < 10 ; iprobe++ ) {
+ cout << "test" << myrank << " ============================ iprobe "
+ << iprobe << "============================" << endl ;
+ int source, tag, outcount ;
+ MPI_Datatype datatype ;
+ int probeflag = false ;
+ while ( !probeflag ) {
+ sts = mpi_access.IProbe( target, source, tag, datatype, outcount,
+ probeflag ) ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " IProbe iprobe " << iprobe
+ << " target " << target << " probeflag " << probeflag
+ << " tag " << tag << " outcount " << outcount << " datatype "
+ << datatype << " lenerr " << lenerr << " " << msgerr << endl ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "=========================================================="
+ << endl << "test" << myrank << " IProbe KO iprobe " << iprobe
+ << endl
+ << "=========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if ( !probeflag ) {
+ //cout << "========================================================"
+ // << endl << "test" << myrank << " IProbe KO(OK) iprobe " << iprobe
+ // << " probeflag " << probeflag << endl
+ // << "========================================================"
+ // << endl ;
+ }
+ else {
+ cout << "test" << myrank << " " << iprobe << " IProbe target "
+ << target << " source " << source << " tag " << tag
+ << " outcount " << outcount << " probeflag " << probeflag
+ << endl ;
+ if ( datatype != MPI_DOUBLE ) {
+ ostringstream strstream ;
+ strstream << "========================================================"
+ << endl << "test" << myrank << " MPI_DOUBLE KO" << endl
+ << "========================================================"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ int flag ;
+ sts = mpi_access.cancel( source, tag, datatype, outcount, flag ) ;
+ if ( sts != MPI_SUCCESS || !flag ) {
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "======================================================"
+ << endl << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl << "test" << myrank
+ << " Cancel PendingIrecv KO flag " << flag << " iprobe "
+ << iprobe << " Irecv completed" << endl
+ << "======================================================"
+ << endl ;
+ //return 1 ;
+ }
+ else {
+ cout << "======================================================"
+ << endl << "test" << myrank
+ << " Cancel PendingIrecv OK RequestId " << " flag "
+ << flag << " iprobe " << iprobe << endl
+ << "======================================================"
+ << endl ;
+ }
+ }
+ int Reqtarget, Reqtag, Reqerror, Reqoutcount ;
+ mpi_access.status( RequestId[iprobe], Reqtarget, Reqtag, Reqerror,
+ Reqoutcount, true ) ;
+ cout << "test" << myrank << " Status Reqtarget "<< Reqtarget
+ << " Reqtag " << Reqtag << " Reqoutcount " << Reqoutcount
+ << endl ;
+ int Reqflag ;
+ sts = mpi_access.cancel( RequestId[iprobe] , Reqflag ) ;
+ cout << "test" << myrank << " " << iprobe
+ << " Cancel Irecv done Reqtarget " << Reqtarget
+ << " Reqtag " << Reqtag << " Reqoutcount " << Reqoutcount
+ << " Reqflag " << Reqflag << endl ;
+ if ( sts != MPI_SUCCESS || !Reqflag ) {
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ ostringstream strstream ;
+ strstream << "========================================================"
+ << endl << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl << "test" << myrank
+ << " Cancel Irecv KO Reqflag " << Reqflag << " iprobe "
+ << iprobe << endl
+ << "========================================================"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "========================================================"
+ << endl << "test" << myrank
+ << " Cancel Irecv OK RequestId " << RequestId[iprobe]
+ << " Reqflag " << Reqflag << " iprobe " << iprobe << endl
+ << "========================================================"
+ << endl ;
+ probeflag = Reqflag ;
+ }
+ }
+ }
+ }
+ }
+ mpi_access.waitAll(10,RequestId) ;
+ mpi_access.deleteRequests(10,RequestId) ;
+ }
+
+ int source, tag, outcount, flag ;
+ MPI_Datatype datatype ;
+ sts = mpi_access.IProbe(target, source, tag, datatype, outcount, flag ) ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ if ( sts != MPI_SUCCESS || flag ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " IProbe KO flag " << flag
+ << " remaining unread/cancelled message :" << endl
+ << " source " << source << " tag " << tag << endl
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ mpi_access.testAll(10,RequestId,flag) ;
+ mpi_access.waitAll(10,RequestId) ;
+ mpi_access.deleteRequests(10,RequestId) ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.check() ;
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_Cyclic_ISend_IRecv() {
+
+ cout << "test_MPI_Access_Cyclic_ISend_IRecv" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 3 ) {
+ cout << "test_MPI_Access_Cyclic_ISend_IRecv must be runned with 3 procs" << endl ;
+ CPPUNIT_FAIL("test_MPI_Access_Cyclic_ISend_IRecv must be runned with 3 procs") ;
+ }
+
+ cout << "test_MPI_Access_Cyclic_ISend_IRecv" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+#define maxsend 100
+
+ if ( myrank >= 3 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int alltarget[3] = {1 , 2 , 0 } ;
+ int allsource[3] = {2 , 0 , 1 } ;
+ int SendRequestId[maxsend] ;
+ int RecvRequestId[maxsend] ;
+ int sendbuf[maxsend] ;
+ int recvbuf[maxsend] ;
+ int sts ;
+ int i = 0 ;
+ if ( myrank == 0 ) {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,alltarget[myrank],
+ SendRequestId[i]) ;
+ cout << "test" << myrank << " Send RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ for ( i = 0 ; i < maxsend ; i++ ) {
+ recvbuf[i] = -1 ;
+ sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,allsource[myrank],
+ RecvRequestId[i]) ;
+ cout << "test" << myrank << " Recv RequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(allsource[myrank]) << endl ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr
+ << " " << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ int j ;
+ for (j = 0 ; j <= i ; j++) {
+ int flag ;
+ if ( j < i ) {
+ cout << "test" << myrank << " " << j << " -> Test-Send("<< SendRequestId[j]
+ << ")" << endl ;
+ mpi_access.test( SendRequestId[j], flag ) ;
+ if ( flag ) {
+ int target, tag, error, outcount ;
+ mpi_access.status( SendRequestId[j], target, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Send RequestId " << SendRequestId[j]
+ << " target " << target << " tag " << tag << " error " << error
+ << endl ;
+ mpi_access.deleteRequest( SendRequestId[j] ) ;
+ }
+ }
+ cout << "test" << myrank << " " << j << " -> Test-Recv("<< SendRequestId[j]
+ << ")" << endl ;
+ mpi_access.test( RecvRequestId[j], flag ) ;
+ if ( flag ) {
+ int source, tag, error, outcount ;
+ mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Recv RequestId" << j << " "
+ << RecvRequestId[j] << " source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount << endl ;
+ if ( (outcount != 1) | (recvbuf[j] != j) ) {
+ ostringstream strstream ;
+ strstream << "====================================================="
+ << endl << "test" << myrank << " outcount "
+ << outcount << " recvbuf[ " << j << " ] " << recvbuf[j] << " KO"
+ << endl << "====================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ }
+ if ( myrank == 0 ) {
+ if ( i != maxsend-1 ) {
+ sendbuf[i+1] = i + 1 ;
+ sts = mpi_access.ISend(&sendbuf[i+1],1,MPI_INT,alltarget[myrank],
+ SendRequestId[i+1]) ;
+ cout << "test" << myrank << " Send RequestId " << SendRequestId[i+1]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ }
+ else {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,alltarget[myrank],
+ SendRequestId[i]) ;
+ cout << "test" << myrank << " Send RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr
+ << " " << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.check() ;
+ }
+
+ int flag ;
+ mpi_access.testAll(maxsend,SendRequestId,flag) ;
+ mpi_access.testAll(maxsend,RecvRequestId,flag) ;
+ mpi_access.waitAll(maxsend,SendRequestId) ;
+ mpi_access.deleteRequests(maxsend,SendRequestId) ;
+ mpi_access.waitAll(maxsend,RecvRequestId) ;
+ mpi_access.deleteRequests(maxsend,RecvRequestId) ;
+ mpi_access.check() ;
+ mpi_access.testAll(maxsend,SendRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " TestAllSendflag " << flag << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " TestAllSendflag " << flag << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ mpi_access.testAll(maxsend,RecvRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " TestAllRecvflag " << flag << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " TestAllRecvflag " << flag << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+
+ int sendrequests[maxsend] ;
+ int sendreqsize = mpi_access.sendRequestIds( alltarget[myrank] , maxsend ,
+ sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ int source, tag, error, outcount ;
+ mpi_access.status(sendrequests[0], source, tag, error, outcount, true) ;
+ cout << "test" << myrank << " RequestId " << sendrequests[0]
+ << " source " << source << " tag " << tag << " error " << error
+ << " outcount " << outcount << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ int recvrequests[maxsend] ;
+ int recvreqsize = mpi_access.sendRequestIds( allsource[myrank] , maxsend ,
+ recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_Cyclic_Send_Recv() {
+
+ cout << "test_MPI_Access_Cyclic_Send_Recv" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 3 ) {
+ cout << "test_MPI_Access_Send_Recv must be runned with 3 procs" << endl ;
+ CPPUNIT_FAIL("test_MPI_Access_Send_Recv must be runned with 3 procs") ;
+ }
+
+ cout << "test_MPI_Access_Cyclic_Send_Recv" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 3 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int alltarget[3] = {1 , 2 , 0 } ;
+ int allsource[3] = {2 , 0 , 1 } ;
+ int RequestId[10] ;
+ int sts ;
+ int i = 0 ;
+ if ( myrank == 0 ) {
+ sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
+ cout << "test" << myrank << " Send RequestId " << RequestId[i]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ for ( i = 0 ; i < 10 ; i++ ) {
+ int recvbuf ;
+ int outcount ;
+ if ( i & 1 ) {
+ outcount = 0 ;
+ sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i],
+ &outcount) ;
+ }
+ else {
+ sts = mpi_access.recv(&recvbuf,1,MPI_INT,allsource[myrank], RequestId[i]) ;
+ outcount = 1 ;
+ }
+ //int source, tag, error, outcount ;
+ //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
+ cout << "test" << myrank << " Recv RequestId " << RequestId[i]
+ << " tag " << mpi_access.recvMPITag(allsource[myrank])
+ << " outcount " << outcount << endl ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " outcount "
+ << outcount << " recvbuf " << recvbuf << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ if ( myrank == 0 ) {
+ if ( i != 9 ) {
+ int ii = i + 1 ;
+ sts = mpi_access.send(&ii,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
+ cout << "test" << myrank << " Send RequestId " << RequestId[i]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ }
+ else {
+ sts = mpi_access.send(&i,1,MPI_INT,alltarget[myrank], RequestId[i]) ;
+ cout << "test" << myrank << " Send RequestId " << RequestId[i]
+ << " tag " << mpi_access.sendMPITag(alltarget[myrank]) << endl ;
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr
+ << " " << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.check() ;
+ }
+
+ int flag ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,RequestId) ;
+ mpi_access.check() ;
+
+ int sendrequests[10] ;
+ int sendreqsize = mpi_access.sendRequestIds( alltarget[myrank] , 10 ,
+ sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ int recvrequests[10] ;
+ int recvreqsize = mpi_access.sendRequestIds( allsource[myrank] , 10 ,
+ recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <time.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_IProbe() {
+
+ cout << "test_MPI_Access_IProbe" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_IProbe must be runned with 2 procs" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ cout << "test_MPI_Access_IProbe" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int sendbuf[10] ;
+ int RequestId[10] ;
+ int sts ;
+ int i ;
+ for ( i = 0 ; i < 10 ; i++ ) {
+ if ( myrank == 0 ) {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, RequestId[i]) ;
+ cout << "test" << myrank << " Send RequestId " << RequestId[i]
+ << endl ;
+ }
+ else {
+ int flag = false ;
+ while ( !flag ) {
+ int source, tag, outcount ;
+ MPI_Datatype datatype ;
+ sts = mpi_access.IProbe(target, source, tag, datatype, outcount, flag ) ;
+ if ( flag ) {
+ cout << "test" << myrank << " " << i << " IProbe target " << target
+ << " source " << source << " tag " << tag
+ << " outcount " << outcount << " flag " << flag << endl ;
+ }
+ else {
+ cout << "test" << myrank << " IProbe flag " << flag << endl ;
+ sleep( 1 ) ;
+ }
+ if ( flag ) {
+ int recvbuf ;
+ sts = mpi_access.recv(&recvbuf,outcount,datatype,source, RequestId[i],
+ &outcount) ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ cout << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.check() ;
+ }
+ int flag ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ mpi_access.waitAll(10,RequestId) ;
+ mpi_access.deleteRequests(10,RequestId) ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.check() ;
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_ISendRecv() {
+
+ cout << "test_MPI_Access_ISendRecv" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ cout << "test_MPI_Access_ISendRecv must be runned with 2 procs" << endl ;
+ CPPUNIT_FAIL("test_MPI_Access_ISendRecv must be runned with 2 procs") ;
+ }
+
+ cout << "test_MPI_Access_ISendRecv" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendRequestId[10] ;
+ int RecvRequestId[10] ;
+ int sendbuf[10] ;
+ int recvbuf[10] ;
+ int sts ;
+ int i ;
+ for ( i = 0 ; i < 10 ; i++ ) {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISendRecv(&sendbuf[i],1,MPI_INT,target, SendRequestId[i],
+ &recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
+ cout << "test" << myrank << " Send sendRequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target)
+ << " recvRequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr
+ << " " << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ int j ;
+ for (j = 0 ; j <= i ; j++) {
+ int flag ;
+ if ( j < i ) {
+ cout << "test" << myrank << " " << j << " -> Test-Send("<< SendRequestId[j]
+ << ")" << endl ;
+ mpi_access.test( SendRequestId[j], flag ) ;
+ if ( flag ) {
+ int target, tag, error, outcount ;
+ mpi_access.status( SendRequestId[j], target, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Send RequestId " << SendRequestId[j]
+ << " target " << target << " tag " << tag << " error " << error
+ << endl ;
+ mpi_access.deleteRequest( SendRequestId[j] ) ;
+ }
+ }
+ cout << "test" << myrank << " " << j << " -> Test-Recv("<< SendRequestId[j]
+ << ")" << endl ;
+ mpi_access.test( RecvRequestId[j], flag ) ;
+ if ( flag ) {
+ int source, tag, error, outcount ;
+ mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Recv RequestId" << j << " "
+ << RecvRequestId[j] << " source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount << endl ;
+ if ( (outcount != 1) | (recvbuf[j] != j) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " outcount "
+ << outcount << " recvbuf[ " << j << " ] " << recvbuf[j] << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ }
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ mpi_access.check() ;
+ }
+
+ int flag ;
+ mpi_access.testAll(10,SendRequestId,flag) ;
+ mpi_access.waitAll(10,SendRequestId) ;
+ mpi_access.deleteRequests(10,SendRequestId) ;
+ mpi_access.testAll(10,SendRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ mpi_access.testAll(10,RecvRequestId,flag) ;
+ mpi_access.waitAll(10,RecvRequestId) ;
+ mpi_access.deleteRequests(10,RecvRequestId) ;
+ mpi_access.testAll(10,RecvRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.check() ;
+
+ int sendrequests[10] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ int recvrequests[10] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_ISend_IRecv() {
+
+ cout << "test_MPI_Access_ISend_IRecv" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ cout << "test_MPI_Access_ISend_IRecv must be runned with 2 procs" << endl ;
+ CPPUNIT_FAIL("test_MPI_Access_ISend_IRecv must be runned with 2 procs") ;
+ }
+
+ cout << "test_MPI_Access_ISend_IRecv" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+#define maxreq 100
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts ;
+ int sendbuf[maxreq] ;
+ int recvbuf[maxreq] ;
+ int i ;
+ for ( i = 0 ; i < maxreq ; i++ ) {
+ if ( myrank == 0 ) {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, SendRequestId[i]) ;
+ cout << "test" << myrank << " ISend RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
+ cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ }
+ int j ;
+ for (j = 0 ; j <= i ; j++) {
+ int flag ;
+ if ( myrank == 0 ) {
+ mpi_access.test( SendRequestId[j], flag ) ;
+ }
+ else {
+ mpi_access.test( RecvRequestId[j], flag ) ;
+ }
+ if ( flag ) {
+ int target,source, tag, error, outcount ;
+ if ( myrank == 0 ) {
+ mpi_access.status( SendRequestId[j], target, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
+ << ") : target " << target << " tag " << tag << " error " << error
+ << " flag " << flag << endl ;
+ }
+ else {
+ mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Test(Recv RequestId "
+ << RecvRequestId[j] << ") : source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount
+ << " flag " << flag << endl ;
+ if ( (outcount != 1) | (recvbuf[j] != j) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount "
+ << outcount << " recvbuf " << recvbuf[j] << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ //else {
+ // cout << "==========================================================="
+ // << endl << "test" << myrank << " outcount " << outcount
+ // << " RequestId " << RecvRequestId[j] << " recvbuf "
+ // << recvbuf[j] << " OK" << endl
+ // << "==========================================================="
+ // << endl ;
+ //}
+ }
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ mpi_access.check() ;
+ if ( myrank == 0 ) {
+ mpi_access.waitAll(maxreq, SendRequestId) ;
+ mpi_access.deleteRequests(maxreq, SendRequestId) ;
+ }
+ else {
+ mpi_access.waitAll(maxreq, RecvRequestId) ;
+ mpi_access.deleteRequests(maxreq, RecvRequestId) ;
+ }
+ mpi_access.check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[maxreq] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
+ int i ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ for ( i = 0 ; i < sendreqsize ; i++ ) {
+ cout << "test" << myrank << " sendrequests[ " << i << " ] = "
+ << sendrequests[i] << endl ;
+ }
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[maxreq] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+ // MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <time.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_ISend_IRecv_BottleNeck() {
+
+ cout << "test_MPI_Access_ISend_IRecv_BottleNeck" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_ISend_IRecv_BottleNeck must be runned with 2 procs"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ cout << "test_MPI_Access_ISend_IRecv_BottleNeck" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+#define maxreq 10000
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts ;
+ int sendbuf[maxreq] ;
+ int recvbuf[maxreq] ;
+ int i ;
+ for ( i = 0 ; i < maxreq ; i++ ) {
+ if ( myrank == 0 ) {
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(sendbuf,i,MPI_INT,target, SendRequestId[i]) ;
+ cout << "test" << myrank << " ISend RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ //sleep( 1 ) ;
+ sts = mpi_access.IRecv(recvbuf,i,MPI_INT,target, RecvRequestId[i]) ;
+ cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ int recvreqsize = mpi_access.recvRequestIdsSize() ;
+ int * recvrequests = new int[ recvreqsize ] ;
+ recvreqsize = mpi_access.recvRequestIds( target , recvreqsize , recvrequests ) ;
+ int j ;
+ for (j = 0 ; j < recvreqsize ; j++) {
+ int flag ;
+ mpi_access.test( recvrequests[j], flag ) ;
+ if ( flag ) {
+ int source, tag, error, outcount ;
+ mpi_access.status( recvrequests[j], source, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Test(Recv RequestId "
+ << recvrequests[j] << ") : source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount
+ << " flag " << flag << " : DeleteRequest" << endl ;
+ mpi_access.deleteRequest( recvrequests[j] ) ;
+ }
+ else {
+// cout << "test" << myrank << " Test(Recv RequestId "
+// << recvrequests[j] << ") flag " << flag << endl ;
+ }
+ }
+ delete [] recvrequests ;
+ }
+ if ( sts != MPI_SUCCESS ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ }
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ mpi_access.check() ;
+ if ( myrank == 0 ) {
+ int size = mpi_access.sendRequestIdsSize() ;
+ cout << "test" << myrank << " before WaitAll sendreqsize " << size << endl ;
+ mpi_access.waitAll(maxreq, SendRequestId) ;
+ size = mpi_access.sendRequestIdsSize() ;
+ cout << "test" << myrank << " after WaitAll sendreqsize " << size << endl ;
+ int * ArrayOfSendRequests = new int[ size ] ;
+ int nSendRequest = mpi_access.sendRequestIds( size , ArrayOfSendRequests ) ;
+ int i ;
+ for ( i = 0 ; i < nSendRequest ; i++ ) {
+ mpi_access.deleteRequest( ArrayOfSendRequests[i] ) ;
+ }
+ delete [] ArrayOfSendRequests ;
+ }
+ else {
+ int size = mpi_access.recvRequestIdsSize() ;
+ cout << "test" << myrank << " before WaitAll recvreqsize " << size << endl ;
+ mpi_access.waitAll(maxreq, RecvRequestId) ;
+ size = mpi_access.recvRequestIdsSize() ;
+ cout << "test" << myrank << " after WaitAll recvreqsize " << size << endl ;
+ int * ArrayOfRecvRequests = new int[ size ] ;
+ int nRecvRequest = mpi_access.recvRequestIds( size , ArrayOfRecvRequests ) ;
+ int i ;
+ for ( i = 0 ; i < nRecvRequest ; i++ ) {
+ mpi_access.deleteRequest( ArrayOfRecvRequests[i] ) ;
+ }
+ delete [] ArrayOfRecvRequests ;
+ }
+ mpi_access.check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[maxreq] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
+ int i ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ for ( i = 0 ; i < sendreqsize ; i++ ) {
+ cout << "test" << myrank << " sendrequests[ " << i << " ] = "
+ << sendrequests[i] << endl ;
+ }
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[maxreq] ;
+ int recvreqsize = mpi_access.recvRequestIds( target , maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length() {
+
+ cout << "test_MPI_Access_ISend_IRecv_Length" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_ISend_IRecv_Length must be runned with 2 procs" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ cout << "test_MPI_Access_ISend_IRecv_Length" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+#define maxreq 10
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts ;
+ int sendbuf[1000*(maxreq-1)] ;
+ int recvbuf[maxreq-1][1000*(maxreq-1)] ;
+ int i ;
+ for ( i = 0 ; i < 1000*(maxreq-1) ; i++ ) {
+ sendbuf[i] = i ;
+ }
+ for ( i = 0 ; i < maxreq ; i++ ) {
+ if ( myrank == 0 ) {
+ sts = mpi_access.ISend( sendbuf, 1000*i, MPI_INT, target, SendRequestId[i] ) ;
+ cout << "test" << myrank << " ISend RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ sts = mpi_access.IRecv( recvbuf[i], 1000*i, MPI_INT, target,
+ RecvRequestId[i] ) ;
+ cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ }
+ int j ;
+ for (j = 0 ; j <= i ; j++) {
+ int flag ;
+ if ( myrank == 0 ) {
+ mpi_access.test( SendRequestId[j], flag ) ;
+ }
+ else {
+ mpi_access.test( RecvRequestId[j], flag ) ;
+ }
+ if ( flag ) {
+ int target,source, tag, error, outcount ;
+ if ( myrank == 0 ) {
+ mpi_access.status( SendRequestId[j], target, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
+ << ") : target " << target << " tag " << tag << " error " << error
+ << " flag " << flag << endl ;
+ }
+ else {
+ mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Test(Recv RequestId "
+ << RecvRequestId[j] << ") : source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount
+ << " flag " << flag << endl ;
+ if ( outcount != 0 ) {
+ if ( (outcount != 1000*j) |
+ (recvbuf[j][outcount-1] != (outcount-1)) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount "
+ << outcount << " recvbuf " << recvbuf[j][outcount-1] << " KO"
+ << endl
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " RequestId " << RecvRequestId[j] << " recvbuf "
+ << recvbuf[j][outcount-1] << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ else {
+ cout << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " RequestId " << RecvRequestId[j] << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ mpi_access.check() ;
+ cout << "test" << myrank << " WaitAll" << endl ;
+ if ( myrank == 0 ) {
+ mpi_access.waitAll(maxreq, SendRequestId) ;
+ mpi_access.deleteRequests(maxreq, SendRequestId) ;
+ }
+ else {
+ mpi_access.waitAll(maxreq, RecvRequestId) ;
+ mpi_access.deleteRequests(maxreq, RecvRequestId) ;
+ }
+ mpi_access.check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[maxreq] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
+ sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[maxreq] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+ // MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_ISend_IRecv_Length_1() {
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_ISend_IRecv_Length_1 must be runned with 2 procs" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ cout << "test_MPI_Access_ISend_IRecv_Length_1" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+#define maxreq 10
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts ;
+ int sendbuf[1000*(maxreq-1)] ;
+ int recvbuf[maxreq-1][1000*(maxreq-1)] ;
+ int maxirecv = 1 ;
+ int i ;
+ RecvRequestId[0] = -1 ;
+ for ( i = 0 ; i < 1000*(maxreq-1) ; i++ ) {
+ sendbuf[i] = i ;
+ }
+ for ( i = 0 ; i < maxreq ; i++ ) {
+ sts = MPI_SUCCESS ;
+ if ( myrank == 0 ) {
+ sts = mpi_access.ISend( sendbuf, 1000*i, MPI_INT, target, SendRequestId[i] ) ;
+ cout << "test" << myrank << " ISend RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ int j ;
+ for (j = 1 ; j <= i ; j++) {
+ int source ;
+ MPI_Datatype datatype ;
+ int outcount ;
+ int flag ;
+ if ( myrank == 0 ) {
+ mpi_access.test( SendRequestId[j], flag ) ;
+ }
+ else {
+ int MPITag ;
+ sts = mpi_access.IProbe( target , source, MPITag, datatype,
+ outcount, flag) ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " IProbe lenerr " << lenerr << " "
+ << msgerr << endl ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " IProbe KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ cout << "test" << myrank << " IProbe i/j " << i << "/" << j
+ << " MPITag " << MPITag << " datatype " << datatype
+ << " outcount " << outcount << " flag " << flag << endl ;
+ }
+ if ( flag ) {
+ if ( myrank == 0 ) {
+ int target, tag, error, outcount ;
+ mpi_access.status( SendRequestId[j], target, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
+ << ") : target " << target << " tag " << tag << " error " << error
+ << " flag " << flag << endl ;
+ }
+ else {
+ sts = mpi_access.IRecv( recvbuf[maxirecv], outcount, datatype, source,
+ RecvRequestId[maxirecv] ) ;
+ cout << "test" << myrank << " maxirecv " << maxirecv << " IRecv RequestId "
+ << RecvRequestId[maxirecv] << " source " << source
+ << " outcount " << outcount << " tag "
+ << mpi_access.recvMPITag(target) << endl ;
+ maxirecv = maxirecv + 1 ;
+ }
+ }
+ else if ( myrank == 1 && i == maxreq-1 && j >= maxirecv ) {
+ sts = mpi_access.IRecv( recvbuf[j], 1000*j, MPI_INT, target,
+ RecvRequestId[j] ) ;
+ cout << "test" << myrank << " maxirecv " << maxirecv << " IRecv RequestId "
+ << RecvRequestId[j] << " target " << target << " length " << 1000*j
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ maxirecv = maxirecv + 1 ;
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ mpi_access.check() ;
+ int flag ;
+ if ( myrank == 0 ) {
+ mpi_access.testAll( maxreq, SendRequestId, flag ) ;
+ cout << "test" << myrank << " TestAll SendRequest flag " << flag << endl ;
+ }
+ else {
+ int i ;
+ int source ;
+ int outcount ;
+ int flag ;
+ if ( maxirecv != maxreq ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " KO" << " maxirecv " << maxirecv
+ << " != maxreq " << maxreq << endl
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ while ( maxirecv > 0 ) {
+ for ( i = 1 ; i < maxreq ; i++ ) {
+ cout << "test" << myrank << " IProbe : " << endl ;
+ sts = mpi_access.test( RecvRequestId[i] , flag ) ;
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " flag " << flag << " lenerr "
+ << lenerr << " " << msgerr << " maxirecv " << maxirecv << endl ;
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ cout << "test" << myrank << " Test flag " << flag << endl ;
+ if ( flag ) {
+ int tag, error ;
+ mpi_access.status( RecvRequestId[i] , source , tag , error ,
+ outcount ) ;
+ if ( i != 0 ) {
+ if ( outcount != 1000*i |
+ (recvbuf[i][outcount-1] != (outcount-1)) ) {
+ ostringstream strstream ;
+ strstream << "========================================================"
+ << endl << "test" << myrank << " outcount " << outcount
+ << " KO" << " i " << i
+ << " recvbuf " << recvbuf[i][outcount-1] << endl
+ << "========================================================"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ else if ( outcount != 0 ) {
+ ostringstream strstream ;
+ strstream << "========================================================"
+ << endl << "test" << myrank << " outcount " << outcount
+ << " KO" << " i " << i << endl
+ << "========================================================"
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ maxirecv = maxirecv - 1 ;
+ }
+ }
+ }
+ mpi_access.testAll( maxreq, RecvRequestId, flag ) ;
+ cout << "test" << myrank << " TestAll RecvRequest flag " << flag << endl ;
+ }
+ mpi_access.check() ;
+ cout << "test" << myrank << " WaitAll :" << endl ;
+ if ( myrank == 0 ) {
+ mpi_access.waitAll( maxreq, SendRequestId ) ;
+ mpi_access.deleteRequests( maxreq, SendRequestId ) ;
+ }
+ else {
+ mpi_access.waitAll( maxreq, RecvRequestId ) ;
+ mpi_access.deleteRequests( maxreq, RecvRequestId ) ;
+ }
+
+ if ( myrank == 0 ) {
+ int sendrequests[maxreq] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
+ sendreqsize = mpi_access.sendRequestIds( target , maxreq , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[maxreq] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+ // MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_Probe() {
+
+ cout << "test_MPI_Access_Probe" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ cout << "test_MPI_Access_Probe must be runned with 2 procs" << endl ;
+ CPPUNIT_FAIL("test_MPI_Access_Probe must be runned with 2 procs") ;
+ }
+
+ cout << "test_MPI_Access_Probe" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int RequestId[10] ;
+ int sts ;
+ int i ;
+ for ( i = 0 ; i < 10 ; i++ ) {
+ if ( myrank == 0 ) {
+ sts = mpi_access.send(&i,1,MPI_INT,target, RequestId[i]) ;
+ cout << "test" << myrank << " Send RequestId " << RequestId[i]
+ << endl ;
+ }
+ else {
+ int source, tag, outcount ;
+ MPI_Datatype datatype ;
+ sts = mpi_access.probe(target, source, tag, datatype, outcount ) ;
+ cout << "test" << myrank << " Probe target " << target << " source " << source
+ << " tag " << tag << " outcount " << outcount << endl ;
+ int recvbuf ;
+ sts = mpi_access.recv(&recvbuf,outcount,datatype,source, RequestId[i],
+ &outcount) ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.check() ;
+ }
+ int flag ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,RequestId) ;
+ mpi_access.check() ;
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_SendRecv() {
+
+ cout << "MPIAccessTest::test_MPI_Access_SendRecv" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ cout << "MPIAccessTest::test_MPI_Access_SendRecv must be runned with 2 procs" << endl ;
+ CPPUNIT_FAIL("test_MPI_Access_SendRecv must be runned with 2 procs") ;
+ }
+
+ cout << "MPIAccessTest::test_MPI_Access_SendRecv" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int sendRequestId[10] ;
+ int recvRequestId[10] ;
+ int sts ;
+ int i ;
+ for ( i = 0 ; i < 10 ; i++ ) {
+ int recvbuf ;
+ int outcount ;
+ if ( i & 1 ) {
+ outcount = -1 ;
+ sts = mpi_access.sendRecv(&i,1,MPI_INT,target, sendRequestId[i],
+ &recvbuf,1,MPI_INT,target, recvRequestId[i],
+ &outcount) ;
+ }
+ else {
+ sts = mpi_access.sendRecv(&i,1,MPI_INT,target, sendRequestId[i],
+ &recvbuf,1,MPI_INT,target, recvRequestId[i]) ;
+// outcount = mpi_access.MPIOutCount( recvRequestId[i] ) ;
+ outcount = 1 ;
+ }
+ cout << "test" << myrank << " Send sendRequestId " << sendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target)
+ << " recvRequestId " << recvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target)
+ << " outcount " << outcount << " MPIOutCount "
+ << mpi_access.MPIOutCount( recvRequestId[i] ) << endl ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.check() ;
+ }
+
+ int flag ;
+ mpi_access.testAll(10,sendRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,sendRequestId) ;
+ mpi_access.testAll(10,recvRequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,recvRequestId) ;
+ mpi_access.check() ;
+
+ int sendrequests[10] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ int recvrequests[10] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_Send_Recv() {
+
+ cout << "test_MPI_Access_Send_Recv" << endl ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ cout << "test_MPI_Access_Send_Recv must be runned with 2 procs" << endl ;
+ CPPUNIT_FAIL("test_MPI_Access_Send_Recv must be runned with 2 procs") ;
+ }
+
+ cout << "test_MPI_Access_Send_Recv" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int RequestId[10] ;
+ int sts ;
+ int i ;
+ for ( i = 0 ; i < 10 ; i++ ) {
+ if ( myrank == 0 ) {
+ sts = mpi_access.send(&i,1,MPI_INT,target, RequestId[i]) ;
+ cout << "test" << myrank << " Send RequestId " << RequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ int recvbuf ;
+ int outcount ;
+ sts = mpi_access.recv(&recvbuf,1,MPI_INT,target, RequestId[i],&outcount) ;
+ //int source, tag, error, outcount ;
+ //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
+ cout << "test" << myrank << " Recv RequestId " << RequestId[i]
+ << " tag " << mpi_access.recvMPITag(target)
+ << " outcount " << outcount << endl ;
+ if ( (outcount != 1) | (recvbuf != i) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.check() ;
+ }
+ int flag ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,RequestId) ;
+ mpi_access.check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[10] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ else {
+ int recvrequests[10] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_Send_Recv_Length() {
+
+ cout << "test_MPI_Access_Send_Recv_Length" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_Send_Recv_Length must be runned with 2 procs" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ cout << "test_MPI_Access_Send_Recv_Length" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+ if ( myrank >= 2 ) {
+ mpi_access.barrier() ;
+ delete group ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int RequestId[10] ;
+ int sendbuf[9000] ;
+ int recvbuf[9000] ;
+ bool recvbufok ;
+ int sts ;
+ int i , j ;
+ for ( i = 0 ; i < 9000 ; i++ ) {
+ sendbuf[i] = i ;
+ }
+ for ( i = 0 ; i < 10 ; i++ ) {
+ if ( myrank == 0 ) {
+ sts = mpi_access.send( sendbuf, 1000*i, MPI_INT, target, RequestId[i] ) ;
+ cout << "test" << myrank << " Send RequestId " << RequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ sts = MPI_SUCCESS ;
+ RequestId[i] = -1 ;
+ int outcount = 0 ;
+ if ( i != 0 ) {
+ sts = mpi_access.recv( recvbuf,1000*i+1,MPI_INT,target, RequestId[i],
+ &outcount ) ;
+ }
+ //int source, tag, error, outcount ;
+ //mpi_access.Status( RequestId[i], source, tag, error, outcount, true) ;
+ cout << "test" << myrank << " Recv RequestId " << RequestId[i]
+ << " tag " << mpi_access.recvMPITag(target)
+ << " outcount " << outcount << endl ;
+ recvbufok = true ;
+ for ( j = 0 ; j < outcount ; j++ ) {
+ if ( recvbuf[j] != j ) {
+ cout << "test" << myrank << " recvbuf[ " << j << " ] = " << recvbuf[j]
+ << endl ;
+ recvbufok = false ;
+ break ;
+ }
+ }
+ if ( (outcount != 1000*i) | !recvbufok ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " recvbuf " << recvbuf << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.check() ;
+ }
+ int flag ;
+ mpi_access.testAll(10,RequestId,flag) ;
+ if ( !flag ) {
+ ostringstream strstream ;
+ strstream << "test" << myrank << " flag " << flag << " KO" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ mpi_access.waitAll(10,RequestId) ;
+ mpi_access.check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[10] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , 10 , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+ else {
+ int recvrequests[10] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , 10 , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ }
+
+ mpi_access.barrier() ;
+
+ delete group ;
+
+// MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void MPIAccessTest::test_MPI_Access_Time() {
+
+ cout << "test_MPI_Access_Time" << endl ;
+
+ // MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "test_MPI_Access_Time must be runned with 2 procs" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+ cout << "test_MPI_Access_Time" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess mpi_access( group ) ;
+
+#define maxreq 10
+
+ if ( myrank >= 2 ) {
+ cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->Barrier" << endl ;
+ mpi_access.barrier() ;
+ cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->Barrier" << endl ;
+ delete group ;
+ cout << "test_MPI_Access_Time" << myrank << " OK" << endl ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendTimeRequestId[maxreq] ;
+ int RecvTimeRequestId[maxreq] ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts ;
+ int sendbuf[maxreq] ;
+ int recvbuf[maxreq] ;
+ int i = 0 ;
+ ParaMEDMEM::TimeMessage aSendTimeMsg[maxreq] ;
+ ParaMEDMEM::TimeMessage aRecvTimeMsg[maxreq] ;
+ double t ;
+ double dt = 1. ;
+ double maxt = 10. ;
+ for ( t = 0 ; t < maxt ; t = t+dt ) {
+ if ( myrank == 0 ) {
+ aSendTimeMsg[i].time = t ;
+ aSendTimeMsg[i].deltatime = dt ;
+ //aSendTimeMsg[i].maxtime = maxt ;
+ //sts = mpi_access.ISend( &aSendTimeMsg , mpi_access.timeExtent() ,
+ sts = mpi_access.ISend( &aSendTimeMsg[i] , 1 ,
+ mpi_access.timeType() , target ,
+ SendTimeRequestId[i]) ;
+ cout << "test" << myrank << " ISend RequestId " << SendTimeRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ sendbuf[i] = i ;
+ sts = mpi_access.ISend(&sendbuf[i],1,MPI_INT,target, SendRequestId[i]) ;
+ cout << "test" << myrank << " ISend RequestId " << SendRequestId[i]
+ << " tag " << mpi_access.sendMPITag(target) << endl ;
+ }
+ else {
+ //sts = mpi_access.IRecv( &aRecvTimeMsg , mpi_access.timeExtent() ,
+ sts = mpi_access.IRecv( &aRecvTimeMsg[i] , 1 ,
+ mpi_access.timeType() , target ,
+ RecvTimeRequestId[i]) ;
+ cout << "test" << myrank << " IRecv RequestId " << RecvTimeRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ sts = mpi_access.IRecv(&recvbuf[i],1,MPI_INT,target, RecvRequestId[i]) ;
+ cout << "test" << myrank << " IRecv RequestId " << RecvRequestId[i]
+ << " tag " << mpi_access.recvMPITag(target) << endl ;
+ }
+ int j ;
+ for (j = 0 ; j <= i ; j++) {
+ int flag ;
+ if ( myrank == 0 ) {
+ mpi_access.test( SendTimeRequestId[j], flag ) ;
+ }
+ else {
+ mpi_access.test( RecvTimeRequestId[j], flag ) ;
+ }
+ if ( flag ) {
+ int target,source, tag, error, outcount ;
+ if ( myrank == 0 ) {
+ mpi_access.status( SendTimeRequestId[j], target, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Test(Send TimeRequestId " << SendTimeRequestId[j]
+ << ") : target " << target << " tag " << tag << " error " << error
+ << " flag " << flag << aSendTimeMsg[j] << endl ;
+ }
+ else {
+ mpi_access.status( RecvTimeRequestId[j], source, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Test(Recv TimeRequestId "
+ << RecvTimeRequestId[j] << ") : source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount
+ << " flag " << flag << aRecvTimeMsg[j] << endl ;
+ if ( (outcount != 1) | (aRecvTimeMsg[j].time != j) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount << " KO"
+ << " RecvTimeRequestId " << RecvTimeRequestId[j] << endl
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " RecvTimeRequestId " << RecvTimeRequestId[j] << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ }
+ if ( myrank == 0 ) {
+ mpi_access.test( SendRequestId[j], flag ) ;
+ }
+ else {
+ mpi_access.test( RecvRequestId[j], flag ) ;
+ }
+ if ( flag ) {
+ int target,source, tag, error, outcount ;
+ if ( myrank == 0 ) {
+ mpi_access.status( SendRequestId[j], target, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Test(Send RequestId " << SendRequestId[j]
+ << ") : target " << target << " tag " << tag << " error " << error
+ << " flag " << flag << endl ;
+ }
+ else {
+ mpi_access.status( RecvRequestId[j], source, tag, error, outcount,
+ true ) ;
+ cout << "test" << myrank << " Test(Recv RequestId "
+ << RecvRequestId[j] << ") : source " << source << " tag " << tag
+ << " error " << error << " outcount " << outcount
+ << " flag " << flag << endl ;
+ if ( (outcount != 1) | (recvbuf[j] != j) ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " outcount "
+ << outcount << " recvbuf " << recvbuf[j] << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "==========================================================="
+ << endl << "test" << myrank << " outcount " << outcount
+ << " RequestId " << RecvRequestId[j] << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ }
+ }
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ mpi_access.errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+
+ if ( sts != MPI_SUCCESS ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ i = i + 1 ;
+ }
+
+ mpi_access.check() ;
+ if ( myrank == 0 ) {
+ mpi_access.waitAll(maxreq, SendTimeRequestId) ;
+ mpi_access.deleteRequests(maxreq, SendTimeRequestId) ;
+ mpi_access.waitAll(maxreq, SendRequestId) ;
+ mpi_access.deleteRequests(maxreq, SendRequestId) ;
+ }
+ else {
+ mpi_access.waitAll(maxreq, RecvTimeRequestId) ;
+ mpi_access.deleteRequests(maxreq, RecvTimeRequestId) ;
+ mpi_access.waitAll(maxreq, RecvRequestId) ;
+ mpi_access.deleteRequests(maxreq, RecvRequestId) ;
+ }
+ mpi_access.check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[2*maxreq] ;
+ int sendreqsize = mpi_access.sendRequestIds( target , 2*maxreq , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[2*maxreq] ;
+ int recvreqsize = mpi_access.sendRequestIds( target , 2*maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->Barrier" << endl ;
+ mpi_access.barrier() ;
+ cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->Barrier" << endl ;
+
+ delete group ;
+
+ // MPI_Finalize();
+
+ cout << "test_MPI_Access_Time" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+// Copyright (C) 2007-2008 CEA/DEN, EDF R&D
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+//
+// See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+//
+#include <string>
+#include <vector>
+#include <map>
+#include <iostream>
+#include <mpi.h>
+
+#include "MPIAccessTest.hxx"
+#include <cppunit/TestAssert.h>
+
+//#include "CommInterface.hxx"
+//#include "ProcessorGroup.hxx"
+//#include "MPIProcessorGroup.hxx"
+#include "MPIAccess.hxx"
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void chksts( int sts , int myrank , ParaMEDMEM::MPIAccess * mpi_access ) {
+ char msgerr[MPI_MAX_ERROR_STRING] ;
+ int lenerr ;
+ if ( sts != MPI_SUCCESS ) {
+ mpi_access->errorString(sts, msgerr, &lenerr) ;
+ cout << "test" << myrank << " lenerr " << lenerr << " "
+ << msgerr << endl ;
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << "test" << myrank << " KO"
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+return ;
+}
+
+void MPIAccessTest::test_MPI_Access_Time_0() {
+
+ cout << "test_MPI_Access_Time_0" << endl ;
+
+// MPI_Init(&argc, &argv) ;
+
+ int size ;
+ int myrank ;
+ MPI_Comm_size(MPI_COMM_WORLD,&size) ;
+ MPI_Comm_rank(MPI_COMM_WORLD,&myrank) ;
+
+ if ( size < 2 ) {
+ ostringstream strstream ;
+ strstream << "usage :" << endl
+ << "mpirun -np <nbprocs> test_MPI_Access_Time_0" <<endl
+ << " nbprocs =2" << endl
+ << "test must be runned with 2 procs" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+
+#define maxreq 100
+
+ double t ;
+ double dt[2] = {2., 1.} ;
+ double maxt = maxreq/dt[myrank] ;
+
+ cout << "test_MPI_Access_Time_0 rank" << myrank << endl ;
+
+ ParaMEDMEM::CommInterface interface ;
+
+ ParaMEDMEM::MPIProcessorGroup* group = new ParaMEDMEM::MPIProcessorGroup(interface) ;
+
+ ParaMEDMEM::MPIAccess * mpi_access = new ParaMEDMEM::MPIAccess( group ) ;
+
+ if ( myrank >= 2 ) {
+ cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
+ mpi_access->barrier() ;
+ cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
+ cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
+ mpi_access->barrier() ;
+ cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
+ delete group ;
+ delete mpi_access ;
+ cout << "test_MPI_Access_Time" << myrank << " OK" << endl ;
+ return ;
+ }
+
+ int target = 1 - myrank ;
+ int SendTimeRequestId[maxreq] ;
+ int RecvTimeRequestId[maxreq] ;
+ int SendRequestId[maxreq] ;
+ int RecvRequestId[maxreq] ;
+ int sts ;
+ int sendbuf[maxreq] ;
+ int recvbuf[maxreq] ;
+ ParaMEDMEM::TimeMessage aSendTimeMsg[maxreq] ;
+ int lasttime = -1 ;
+ ParaMEDMEM::TimeMessage RecvTimeMessages[maxreq+1] ;
+ ParaMEDMEM::TimeMessage *aRecvTimeMsg = &RecvTimeMessages[1] ;
+// mpi_access->Trace() ;
+ int istep = 0 ;
+ for ( t = 0 ; t < maxt ; t = t+dt[myrank] ) {
+ cout << "test" << myrank << " ==========================TIME " << t
+ << " ==========================" << endl ;
+ if ( myrank == 0 ) {
+ aSendTimeMsg[istep].time = t ;
+ aSendTimeMsg[istep].deltatime = dt[myrank] ;
+ //aSendTimeMsg[istep].maxtime = maxt ;
+ if ( t+dt[myrank] >= maxt ) {
+ aSendTimeMsg[istep].deltatime = 0 ;
+ }
+ sts = mpi_access->ISend( &aSendTimeMsg[istep] , 1 ,
+ mpi_access->timeType() , target ,
+ SendTimeRequestId[istep]) ;
+ cout << "test" << myrank << " ISend TimeRequestId " << SendTimeRequestId[istep]
+ << " tag " << mpi_access->MPITag(SendTimeRequestId[istep]) << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ sendbuf[istep] = istep ;
+ sts = mpi_access->ISend(&sendbuf[istep],1,MPI_INT,target, SendRequestId[istep]) ;
+ cout << "test" << myrank << " ISend Data RequestId " << SendRequestId[istep]
+ << " tag " << mpi_access->MPITag(SendRequestId[istep]) << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+//CheckSent
+//=========
+ int sendrequests[2*maxreq] ;
+ int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq ,
+ sendrequests ) ;
+ int j , flag ;
+ for ( j = 0 ; j < sendreqsize ; j++ ) {
+ sts = mpi_access->test( sendrequests[j] , flag ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ if ( flag ) {
+ mpi_access->deleteRequest( sendrequests[j] ) ;
+ cout << "test" << myrank << " " << j << ". " << sendrequests[j]
+ << " sendrequest deleted" << endl ;
+ }
+ }
+ }
+ else {
+//InitRecv
+//========
+ if ( t == 0 ) {
+ aRecvTimeMsg[lasttime].time = 0 ;
+ sts = mpi_access->IRecv( &aRecvTimeMsg[lasttime+1] , 1 ,
+ mpi_access->timeType() ,
+ target , RecvTimeRequestId[lasttime+1]) ;
+ cout << "test" << myrank << " t == 0 IRecv TimeRequestId "
+ << RecvTimeRequestId[lasttime+1]
+ << " MPITag " << mpi_access->MPITag( RecvTimeRequestId[lasttime+1] )
+ << " MPICompleted "
+ << mpi_access->MPICompleted( RecvTimeRequestId[lasttime+1] ) << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+ else {
+ cout << "test" << myrank << " t # 0 lasttime " << lasttime << endl ;
+//InitialOutTime
+//==============
+ bool outtime = false ;
+ if ( lasttime != -1 ) {
+ if ( t <= aRecvTimeMsg[lasttime-1].time ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " t " << t << " <= "
+ << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
+ << aRecvTimeMsg[lasttime-1].time << " KO" << endl
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "==========================================================="
+ << endl << "test" << myrank << " t " << t << " > "
+ << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
+ << aRecvTimeMsg[lasttime-1].time << " OK" << endl
+ << "==========================================================="
+ << endl ;
+ }
+ //outtime = ((aRecvTimeMsg[lasttime].time +
+ // aRecvTimeMsg[lasttime].deltatime) >=
+ // aRecvTimeMsg[lasttime].maxtime) ;
+ outtime = aRecvTimeMsg[lasttime].deltatime == 0 ;
+ }
+// CheckRecv - CheckTime
+// On a lasttime tel que :
+// aRecvTimeMsg[ lasttime-1 ].time < T(i-1) <= aRecvTimeMsg[ lasttime ].time
+// On cherche lasttime tel que :
+// aRecvTimeMsg[ lasttime-1 ].time < T(i) <= aRecvTimeMsg[ lasttime ].time
+ if ( t <= aRecvTimeMsg[lasttime].time ) {
+ outtime = false ;
+ }
+ cout << "test" << myrank << " while outtime( " << outtime << " && t " << t
+ << " > aRecvTimeMsg[ " << lasttime << " ] "
+ << aRecvTimeMsg[lasttime].time << " )" << endl ;
+ while ( !outtime && (t > aRecvTimeMsg[lasttime].time) ) {
+ lasttime += 1 ;
+//TimeMessage
+//===========
+ sts = mpi_access->wait( RecvTimeRequestId[lasttime] ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ cout << "test" << myrank << " Wait done RecvTimeRequestId "
+ << RecvTimeRequestId[lasttime] << " lasttime " << lasttime
+ << " tag " << mpi_access->MPITag(RecvTimeRequestId[lasttime])
+ << aRecvTimeMsg[lasttime] << endl ;
+ if ( lasttime == 0 ) {
+ aRecvTimeMsg[lasttime-1] = aRecvTimeMsg[lasttime] ;
+ }
+ mpi_access->deleteRequest( RecvTimeRequestId[lasttime] ) ;
+
+ double deltatime = aRecvTimeMsg[lasttime].deltatime ;
+ //double maxtime = aRecvTimeMsg[lasttime].maxtime ;
+ double nexttime = aRecvTimeMsg[lasttime].time + deltatime ;
+ cout << "test" << myrank << " t " << t << " lasttime " << lasttime
+ << " deltatime " << deltatime
+ << " nexttime " << nexttime << endl ;
+ //if ( nexttime < maxtime && t > nexttime ) {
+ if ( deltatime != 0 && t > nexttime ) {
+//CheckRecv :
+//=========
+ //while ( nexttime < maxtime && t > nexttime ) {
+ while ( deltatime != 0 && t > nexttime ) {
+ int source, MPITag, outcount ;
+ MPI_Datatype datatype ;
+ sts = mpi_access->probe( target , source, MPITag, datatype,
+ outcount ) ;
+ chksts( sts , myrank , mpi_access ) ;
+// Cancel DataMessages jusqu'a un TimeMessage
+ int cancelflag ;
+ while ( !mpi_access->isTimeMessage( MPITag ) ) {
+ sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
+ //sts = mpi_access->cancel( source, datatype, outcount ,
+ //RecvRequestId[lasttime] ,
+ cancelflag ) ;
+ cout << "test" << myrank << " Recv TO CANCEL RequestId "
+ << RecvRequestId[lasttime]
+ << " tag " << mpi_access->recvMPITag( target )
+ << " cancelflag " << cancelflag << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ sts = mpi_access->probe( target , source, MPITag, datatype,
+ outcount ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+//On peut avancer en temps
+ nexttime += deltatime ;
+ //if ( nexttime < maxtime && t > nexttime ) {
+ if ( deltatime != 0 && t > nexttime ) {
+// Cancel du TimeMessage
+ sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
+ //sts = mpi_access->cancel( source, datatype, outcount ,
+ //RecvRequestId[lasttime] ,
+ cancelflag ) ;
+ cout << "test" << myrank << " Time TO CANCEL RequestId "
+ << RecvRequestId[lasttime]
+ << " tag " << mpi_access->recvMPITag( target )
+ << " cancelflag " << cancelflag << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+ }
+ }
+ else {
+//DoRecv
+//======
+ cout << "test" << myrank << " Recv target " << target
+ << " lasttime " << lasttime
+ << " lasttime-1 " << aRecvTimeMsg[lasttime-1]
+ << " lasttime " << aRecvTimeMsg[lasttime]
+ << endl ;
+ sts = mpi_access->recv(&recvbuf[lasttime],1,MPI_INT,target,
+ RecvRequestId[lasttime]) ;
+ cout << "test" << myrank << " Recv RequestId "
+ << RecvRequestId[lasttime]
+ << " tag " << mpi_access->recvMPITag( target )
+ << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+ //outtime = ((aRecvTimeMsg[lasttime].time +
+ // aRecvTimeMsg[lasttime].deltatime) >=
+ // aRecvTimeMsg[lasttime].maxtime) ;
+ outtime = aRecvTimeMsg[lasttime].deltatime == 0 ;
+ if ( !outtime ) {
+// Une lecture asynchrone d'un message temps a l'avance
+ sts = mpi_access->IRecv( &aRecvTimeMsg[lasttime+1] , 1 ,
+ mpi_access->timeType() , target ,
+ RecvTimeRequestId[lasttime+1]) ;
+ cout << "test" << myrank << " IRecv TimeRequestId "
+ << RecvTimeRequestId[lasttime+1] << " MPITag "
+ << mpi_access->MPITag( RecvTimeRequestId[lasttime+1] )
+ << " MPICompleted "
+ << mpi_access->MPICompleted( RecvTimeRequestId[lasttime+1] )
+ << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+ else if ( t <= aRecvTimeMsg[lasttime].time ) {
+ outtime = false ;
+ }
+ }
+
+ //printf("DEBUG t %.15f Msg[lasttime-1] %.15f Msg[lasttime] %.15f \n",t,
+ // aRecvTimeMsg[lasttime-1].time,aRecvTimeMsg[lasttime].time) ;
+ if ( ((t <= aRecvTimeMsg[lasttime-1].time) ||
+ (t > aRecvTimeMsg[lasttime].time)) && !outtime ) {
+ ostringstream strstream ;
+ strstream << "==========================================================="
+ << endl << "test" << myrank << " t " << t << " <= "
+ << "aRecvTimeMsg[ " << lasttime << "-1 ].time "
+ << aRecvTimeMsg[lasttime-1].time << " ou t " << t << " > "
+ << "aRecvTimeMsg[ " << lasttime << " ].time "
+ << aRecvTimeMsg[lasttime].time << endl
+ << " ou bien outtime " << outtime << " KO RequestTimeIds "
+ << RecvTimeRequestId[lasttime-1] << " " << RecvTimeRequestId[lasttime]
+ << " RequestIds "
+ << RecvRequestId[lasttime-1] << " " << RecvRequestId[lasttime] << endl
+ << "==========================================================="
+ << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "==========================================================="
+ << endl << "test" << myrank
+ << " aRecvTimeMsg[ " << lasttime << "-1 ].time "
+ << aRecvTimeMsg[lasttime-1].time << " < t " << t << " <= "
+ << "aRecvTimeMsg[ " << lasttime << " ].time "
+ << aRecvTimeMsg[lasttime].time << endl
+ << " ou bien outtime " << outtime << " OK RequestTimeIds "
+ << RecvTimeRequestId[lasttime-1] << " " << RecvTimeRequestId[lasttime]
+ << " RequestIds "
+ << RecvRequestId[lasttime-1] << " " << RecvRequestId[lasttime] << endl
+ << "==========================================================="
+ << endl ;
+ }
+ }
+ }
+ chksts( sts , myrank , mpi_access ) ;
+ istep = istep + 1 ;
+ }
+
+ cout << "test" << myrank << " Barrier :" << endl ;
+ mpi_access->barrier() ;
+
+ mpi_access->check() ;
+
+ if ( myrank == 0 ) {
+//CheckFinalSent
+//==============
+ cout << "test" << myrank << " CheckFinalSent :" << endl ;
+ int sendrequests[2*maxreq] ;
+ int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ;
+ int j ;
+ for ( j = 0 ; j < sendreqsize ; j++ ) {
+ sts = mpi_access->wait( sendrequests[j] ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ mpi_access->deleteRequest( sendrequests[j] ) ;
+ cout << "test" << myrank << " " << j << ". " << sendrequests[j] << " deleted"
+ << endl ;
+ }
+ }
+ else {
+ cout << "test" << myrank << " CheckFinalRecv :" << endl ;
+ int recvrequests[2*maxreq] ;
+ int recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ;
+ int cancelflag ;
+ int j ;
+ for ( j = 0 ; j < recvreqsize ; j++ ) {
+ sts = mpi_access->cancel( recvrequests[j] , cancelflag ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ mpi_access->deleteRequest( recvrequests[j] ) ;
+ cout << "test" << myrank << " " << j << ". " << recvrequests[j] << " deleted"
+ << " cancelflag " << cancelflag << endl ;
+ }
+ int source, MPITag, outcount , flag ;
+ MPI_Datatype datatype ;
+ sts = mpi_access->IProbe( target , source, MPITag, datatype,
+ outcount , flag ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ while ( flag ) {
+ sts = mpi_access->cancel( source, MPITag, datatype, outcount ,
+ //sts = mpi_access->cancel( source, datatype, outcount ,
+ //RecvRequestId[lasttime] ,
+ cancelflag ) ;
+ cout << "test" << myrank << " TO CANCEL RequestId "
+ << RecvRequestId[lasttime]
+ << " tag " << mpi_access->recvMPITag( target )
+ << " cancelflag " << cancelflag << endl ;
+ chksts( sts , myrank , mpi_access ) ;
+ sts = mpi_access->IProbe( target , source, MPITag, datatype,
+ outcount , flag ) ;
+ chksts( sts , myrank , mpi_access ) ;
+ }
+ }
+ mpi_access->check() ;
+
+ if ( myrank == 0 ) {
+ int sendrequests[2*maxreq] ;
+ int sendreqsize = mpi_access->sendRequestIds( target , 2*maxreq , sendrequests ) ;
+ if ( sendreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " sendreqsize " << sendreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+ else {
+ int recvrequests[2*maxreq] ;
+ int recvreqsize = mpi_access->recvRequestIds( target , 2*maxreq , recvrequests ) ;
+ if ( recvreqsize != 0 ) {
+ ostringstream strstream ;
+ strstream << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " KO" << endl
+ << "=========================================================" << endl ;
+ cout << strstream.str() << endl ;
+ CPPUNIT_FAIL( strstream.str() ) ;
+ }
+ else {
+ cout << "=========================================================" << endl
+ << "test" << myrank << " recvreqsize " << recvreqsize << " OK" << endl
+ << "=========================================================" << endl ;
+ }
+ }
+
+ int i ;
+ for ( i = 0 ; i <= lasttime ; i++ ) {
+ cout << "test" << myrank << " " << i << ". RecvTimeMsg "
+ << aRecvTimeMsg[i].time << " recvbuf " << recvbuf[i] << endl ;
+ }
+
+ cout << "test_MPI_Access_Time_0 rank" << myrank << " --> mpi_access->barrier" << endl ;
+ mpi_access->barrier() ;
+ cout << "test_MPI_Access_Time_0 rank" << myrank << " <-- mpi_access->barrier" << endl ;
+
+ delete group ;
+ delete mpi_access ;
+
+// MPI_Finalize();
+
+ cout << "test" << myrank << " OK" << endl ;
+
+ return ;
+}
+
+
+
+
--- /dev/null
+#include <time.h>
+#include <sys/times.h>
+#include <sys/time.h>
+#include "ParaMEDMEMTest.hxx"
+#include <cppunit/TestAssert.h>
+
+#include "CommInterface.hxx"
+#include "ProcessorGroup.hxx"
+#include "MPIProcessorGroup.hxx"
+#include "Topology.hxx"
+#include "DEC.hxx"
+#include "MxN_Mapping.hxx"
+#include "InterpKernelDEC.hxx"
+#include "ParaMESH.hxx"
+#include "ParaFIELD.hxx"
+#include "ComponentTopology.hxx"
+#include "ICoCoMEDField.hxx"
+#include "MEDLoader.hxx"
+
+#include <string>
+#include <cstring>
+
+// use this define to enable lines, execution of which leads to Segmentation Fault
+#define ENABLE_FAULTS
+
+// use this define to enable CPPUNIT asserts and fails, showing bugs
+#define ENABLE_FORCED_FAILURES
+
+#ifndef CLK_TCK
+#include <unistd.h>
+#define CLK_TCK sysconf(_SC_CLK_TCK);
+#endif
+
+using namespace std;
+using namespace ParaMEDMEM;
+
+void testInterpKernelDEC_2D(const string& filename1, const string& meshname1,
+ const string& filename2, const string& meshname2,
+ int nproc_source, double epsilon, bool tri, bool all);
+void get_time( float *telps, float *tuser, float *tsys, float *tcpu );
+
+int main(int argc, char *argv[])
+{
+ string filename1, filename2;
+ string meshname1, meshname2;
+ int nproc_source=1, rank;
+ double epsilon=1.e-6;
+ int count=0;
+ bool tri=false;
+ bool all=false;
+
+ MPI_Init(&argc,&argv);
+
+ for(int i=1;i<argc;i++){
+ if( strcmp(argv[i],"-f1") == 0 ){
+ filename1 = argv[++i];
+ count++;
+ }
+ else if( strcmp(argv[i],"-f2") == 0 ){
+ filename2 = argv[++i];
+ count++;
+ }
+ else if( strcmp(argv[i],"-m1") == 0 ){
+ meshname1 = argv[++i];
+ count++;
+ }
+ else if( strcmp(argv[i],"-m2") == 0 ){
+ meshname2 = argv[++i];
+ count++;
+ }
+ else if( strcmp(argv[i],"-ns") == 0 ){
+ nproc_source = atoi(argv[++i]);
+ }
+ else if( strcmp(argv[i],"-eps") == 0 ){
+ epsilon = atof(argv[++i]);
+ }
+ else if( strcmp(argv[i],"-tri") == 0 ){
+ tri = true;
+ }
+ else if( strcmp(argv[i],"-all") == 0 ){
+ all = true;
+ }
+ }
+
+ if( count != 4 ){
+ cout << "usage test_perf -f1 filename1 -m1 meshname1 -f2 filename2 -m2 meshname2 (-ns nproc_source -eps epsilon -tri -all)" << endl;
+ exit(0);
+ }
+
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ testInterpKernelDEC_2D(filename1,meshname1,filename2,meshname2,nproc_source,epsilon,tri,all);
+
+ MPI_Finalize();
+}
+
+void testInterpKernelDEC_2D(const string& filename_xml1, const string& meshname1,
+ const string& filename_xml2, const string& meshname2,
+ int nproc_source, double epsilon, bool tri, bool all)
+{
+ float tcpu, tcpu_u, tcpu_s, telps;
+ int size;
+ int rank;
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+
+ set<int> self_procs;
+ set<int> procs_source;
+ set<int> procs_target;
+
+ for (int i=0; i<nproc_source; i++)
+ procs_source.insert(i);
+ for (int i=nproc_source; i<size; i++)
+ procs_target.insert(i);
+ self_procs.insert(rank);
+
+ ParaMEDMEM::CommInterface interface;
+
+ ParaMEDMEM::ProcessorGroup* self_group = new ParaMEDMEM::MPIProcessorGroup(interface,self_procs);
+ ParaMEDMEM::ProcessorGroup* target_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_target);
+ ParaMEDMEM::ProcessorGroup* source_group = new ParaMEDMEM::MPIProcessorGroup(interface,procs_source);
+
+ //loading the geometry for the source group
+
+ ParaMEDMEM::InterpKernelDEC dec (*source_group,*target_group);
+ if(tri)
+ dec.setIntersectionType(INTERP_KERNEL::Triangulation);
+ else
+ dec.setIntersectionType(INTERP_KERNEL::Convex);
+
+ ParaMEDMEM::MEDCouplingUMesh* mesh;
+ ParaMEDMEM::ParaMESH* paramesh;
+ ParaMEDMEM::ParaFIELD* parafield;
+ ICoCo::Field* icocofield ;
+
+ // To remove tmp files from disk
+ ParaMEDMEMTest_TmpFilesRemover aRemover;
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (source_group->containsMyRank()){
+ string master = filename_xml1;
+
+ ostringstream strstream;
+ if( nproc_source == 1 )
+ strstream <<master<<".med";
+ else
+ strstream <<master<<rank+1<<".med";
+
+ ostringstream meshname ;
+ if( nproc_source == 1 )
+ meshname<< meshname1;
+ else
+ meshname<< meshname1<<"_"<< rank+1;
+
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ mesh=MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ if( rank == 0 )
+ cout << "IO : Telapse = " << telps << " TuserCPU = " << tcpu_u << " TsysCPU = " << tcpu_s << " TCPU = " << tcpu << endl;
+ mesh->incrRef();
+
+ paramesh=new ParaMESH (mesh,*source_group,"source mesh");
+
+ ParaMEDMEM::ComponentTopology comptopo;
+ parafield = new ParaFIELD(ON_CELLS, NO_TIME, paramesh, comptopo);
+
+ int nb_local=mesh->getNumberOfCells();
+ double *value=parafield->getField()->getArray()->getPointer();
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=1.0;
+
+ icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
+
+ dec.attachLocalField(icocofield);
+ }
+
+ //loading the geometry for the target group
+ if (target_group->containsMyRank()){
+ string master= filename_xml2;
+ ostringstream strstream;
+ if( (size-nproc_source) == 1 )
+ strstream << master<<".med";
+ else
+ strstream << master<<(rank-nproc_source+1)<<".med";
+ ostringstream meshname ;
+ if( (size-nproc_source) == 1 )
+ meshname<< meshname2;
+ else
+ meshname<< meshname2<<"_"<<rank-nproc_source+1;
+
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ mesh = MEDLoader::ReadUMeshFromFile(strstream.str().c_str(),meshname.str().c_str(),0);
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ mesh->incrRef();
+
+ paramesh=new ParaMESH (mesh,*target_group,"target mesh");
+ ParaMEDMEM::ComponentTopology comptopo;
+ parafield = new ParaFIELD(ON_CELLS,NO_TIME,paramesh, comptopo);
+
+ int nb_local=mesh->getNumberOfCells();
+ double *value=parafield->getField()->getArray()->getPointer();
+ for(int ielem=0; ielem<nb_local;ielem++)
+ value[ielem]=0.0;
+ icocofield=new ICoCo::MEDField((MEDCouplingUMesh *)paramesh->getCellMesh(),parafield->getField());
+
+ dec.attachLocalField(icocofield);
+ }
+
+
+ //attaching a DEC to the source group
+ double field_before_int;
+ double field_after_int;
+
+ if (source_group->containsMyRank()){
+ field_before_int = parafield->getVolumeIntegral(0,true);
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ dec.synchronize();
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ if( rank == 0 )
+ cout << "SYNCHRONIZE : Telapse = " << telps << " TuserCPU = " << tcpu_u << " TsysCPU = " << tcpu_s << " TCPU = " << tcpu << endl;
+ cout<<"DEC usage"<<endl;
+ dec.setForcedRenormalization(false);
+ if(all)
+ dec.setAllToAllMethod(PointToPoint);
+
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ dec.sendData();
+
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ if( rank == 0 )
+ cout << "SEND DATA : Telapse = " << telps << " TuserCPU = " << tcpu_u << " TsysCPU = " << tcpu_s << " TCPU = " << tcpu << endl;
+ dec.recvData();
+
+ field_after_int = parafield->getVolumeIntegral(0,true);
+// CPPUNIT_ASSERT_DOUBLES_EQUAL(field_before_int, field_after_int, epsilon);
+
+ }
+
+ //attaching a DEC to the target group
+ if (target_group->containsMyRank()){
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ dec.synchronize();
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ dec.setForcedRenormalization(false);
+ if(all)
+ dec.setAllToAllMethod(PointToPoint);
+
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ dec.recvData();
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ dec.sendData();
+ }
+
+ get_time( &telps, &tcpu_u, &tcpu_s, &tcpu );
+ if( rank == 0 )
+ cout << "RECV DATA : Telapse = " << telps << " TuserCPU = " << tcpu_u << " TsysCPU = " << tcpu_s << " TCPU = " << tcpu << endl;
+
+ delete source_group;
+ delete target_group;
+ delete self_group;
+ delete paramesh;
+ delete parafield;
+ mesh->decrRef() ;
+ delete icocofield;
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ cout << "end of InterpKernelDEC_2D test"<<endl;
+}
+
+void get_time( float *telps, float *tuser, float *tsys, float *tcpu )
+{
+
+ /* Variables declaration */
+ static time_t zsec = 0;
+ static long zusec = 0;
+ time_t nsec;
+ long nusec;
+ static clock_t zclock = 0;
+ clock_t nclock;
+ static clock_t zuser = 0;
+ static clock_t zsys = 0;
+ clock_t nuser, nsys;
+
+ struct timeval tp;
+ struct timezone tzp;
+ struct tms local;
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Elapsed time reading */
+
+ gettimeofday(&tp,&tzp);
+ nsec = tp.tv_sec;
+ nusec = tp.tv_usec;
+ *telps = (float)(nsec-zsec) + (float)(nusec-zusec)/(float)CLOCKS_PER_SEC;
+
+ zsec = nsec;
+ zusec = nusec;
+
+ /* User and system CPU time reading */
+
+ times(&local);
+ nuser = local.tms_utime;
+ nsys = local.tms_stime;
+ *tuser = (float)(nuser-zuser) / (float)CLK_TCK;
+ *tsys = (float)(nsys-zsys) / (float)CLK_TCK;
+
+ zuser = nuser;
+ zsys = nsys;
+
+ /* CPU time reading */
+
+ nclock = clock();
+ *tcpu = (float)(nclock-zclock) / (float)CLOCKS_PER_SEC;
+ zclock = nclock;
+
+}
+
+
SWIG_DEF = libParaMEDMEM_Swig.i libParaMEDMEM_Swig.typemap
SWIG_FLAGS = @SWIG_FLAGS@ -I$(srcdir) $(MPI_INCLUDES) -I$(srcdir)/../ParaMEDMEM -I$(srcdir)/../MEDCoupling -I$(srcdir)/../MEDCoupling_Swig \
- -I$(srcdir)/../INTERP_KERNEL -I$(srcdir)/../INTERP_KERNEL/Bases -I$(srcdir)/../ParaMEDMEM/MEDLoader
+ -I$(srcdir)/../INTERP_KERNEL -I$(srcdir)/../INTERP_KERNEL/Bases -I$(srcdir)/../ParaMEDLoader -I$(srcdir)/../MEDLoader
dist__libParaMEDMEM_Swig_la_SOURCES = $(SWIG_DEF)
nodist__libParaMEDMEM_Swig_la_SOURCES = libParaMEDMEM_Swig_wrap.cxx
$(MED2_INCLUDES) $(HDF5_INCLUDES) @CXXTMPDPTHFLAGS@ \
-I$(srcdir)/../INTERP_KERNEL \
$(MPI_INCLUDES) -I$(srcdir)/../ParaMEDMEM -I$(srcdir)/../MEDCoupling_Swig -I$(srcdir)/../INTERP_KERNEL/Bases \
- -I$(srcdir)/../MEDCoupling -I$(srcdir)/../ParaMEDMEM/MEDLoader
+ -I$(srcdir)/../MEDCoupling -I$(srcdir)/../ParaMEDLoader -I$(srcdir)/../MEDLoader
_libParaMEDMEM_Swig_la_LDFLAGS = -module $(MED2_LIBS) $(HDF5_LIBS) $(PYTHON_LIBS) $(MPI_LIBS) \
../MEDCoupling/libmedcoupling.la ../INTERP_KERNEL/libinterpkernel.la \
- ../ParaMEDMEM/libparamedmem.la ../ParaMEDMEM/MEDLoader/libparamedmemmedloader.la
+ ../ParaMEDMEM/libparamedmem.la ../ParaMEDLoader/libparamedloader.la
if MED_ENABLE_KERNEL
_libParaMEDMEM_Swig_la_CPPFLAGS += ${KERNEL_CXXFLAGS}