]> SALOME platform Git repositories - tools/medcoupling.git/commitdiff
Salome HOME
*** empty log message ***
authorvbd <vbd>
Tue, 13 Feb 2007 15:06:56 +0000 (15:06 +0000)
committervbd <vbd>
Tue, 13 Feb 2007 15:06:56 +0000 (15:06 +0000)
20 files changed:
src/ParaMEDMEM/CommInterface.hxx
src/ParaMEDMEM/DEC.cxx
src/ParaMEDMEM/ExplicitCoincidentDEC.cxx
src/ParaMEDMEM/ExplicitCoincidentDEC.hxx
src/ParaMEDMEM/ExplicitTopology.cxx
src/ParaMEDMEM/ExplicitTopology.hxx
src/ParaMEDMEM/MPIProcessorGroup.cxx
src/ParaMEDMEM/MPIProcessorGroup.hxx
src/ParaMEDMEM/Makefile.in
src/ParaMEDMEM/ParaFIELD.cxx
src/ParaMEDMEM/ParaFIELD.hxx
src/ParaMEDMEM/ParaMESH.cxx
src/ParaMEDMEM/ParaMESH.hxx
src/ParaMEDMEM/ParaSUPPORT.hxx
src/ParaMEDMEM/ProcessorGroup.hxx
src/ParaMEDMEM/Topology.hxx
src/ParaMEDMEM/UnstructuredParaSUPPORT.cxx
src/ParaMEDMEM/UnstructuredParaSUPPORT.hxx
src/ParaMEDMEM/test_ExplicitDEC.cxx
src/ParaMEDMEM/test_ParaField.cxx

index 6972b54ee6ff8808d42ed8627dcc364fe1d31d0e..1c4c3c822adf0558de8e514ce34b26e028c18d57 100644 (file)
@@ -32,12 +32,19 @@ public:
        {return MPI_Alltoallv(sendbuf, sendcounts, senddispls, sendtype,
                                                  recvbuf, recvcounts, recvdispls, recvtype,
                                                  comm);}
+  int allToAll(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+              void* recvbuf, int recvcount, MPI_Datatype recvtype,
+              MPI_Comm comm) const
+  {
+    return MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
+  }
        int allGather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
                                  void* recvbuf, int recvcount, MPI_Datatype recvtype,
                                        MPI_Comm comm) const
        {return MPI_Allgather(sendbuf,sendcount, sendtype, recvbuf, recvcount, recvtype, comm);  
        }
-       
+
+  int worldSize() const {int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size;}
 };
 
 }
index 773e2618c32c2aead9892b0d80a61197b895e24e..27c93d0b02fe11fc75a6d305496bedf386cf1b52 100644 (file)
@@ -15,18 +15,18 @@ namespace ParaMEDMEM
 void DEC::attachTargetField(const ParaFIELD* field) 
 {
        _target_field=field;
-       if (field!=0)
-       {
-               BlockTopology* topo=dynamic_cast<BlockTopology*>(field->getTopology());
-               _comm_interface=&(topo->getProcGroup()->getCommInterface());
-       }
+       //if (field!=0)
+       //{
+       //BlockTopology* topo=dynamic_cast<BlockTopology*>(field->getTopology());
+               _comm_interface=&(field->getTopology()->getProcGroup()->getCommInterface());
+               //}
 }
 void DEC::attachSourceField(const ParaFIELD* field) 
 {_source_field=field;
-       if (field!=0)
-       {
-               BlockTopology* topo=dynamic_cast<BlockTopology*>(field->getTopology());
-               _comm_interface=&(topo->getProcGroup()->getCommInterface());
-       }
+//if (field!=0)
+//{
+//     BlockTopology* topo=dynamic_cast<BlockTopology*>(field->getTopology());
+               _comm_interface=&(field->getTopology()->getProcGroup()->getCommInterface());
+               //}
 }
 }
index aa930902921fb9c1f92ecf36b36155addcc02455..e9ca495ef50d54946fe34a3240062c12fc1b3d8f 100644 (file)
@@ -6,6 +6,8 @@
 #include "ParaFIELD.hxx"
 #include "MPIProcessorGroup.hxx"
 #include "ExplicitCoincidentDEC.hxx"
+#include "ExplicitMapping.hxx"
+
 
 namespace ParaMEDMEM
 {
@@ -22,16 +24,28 @@ ExplicitCoincidentDEC::~ExplicitCoincidentDEC()
  */
 void ExplicitCoincidentDEC::synchronize()
 {
-       if (_source_field!=0)
-               _toposource = dynamic_cast<ExplicitTopology*>(_source_field->getTopology());
-       if (_target_field!=0)
-               _topotarget = dynamic_cast<ExplicitTopology*>(_target_field->getTopology());
-       
-       // Transmitting source topology to target code 
-       broadcastTopology(*_toposource,*_topotarget,1000);
-       // Transmitting target topology to source code
-       //broadcastTopology(_topotarget,2000);
-       //checkCompatibility(_toposource,_topotarget);
+  if (_source_field!=0)
+    {
+      _toposource = dynamic_cast<ExplicitTopology*>(_source_field->getTopology());
+      _sourcegroup= _toposource->getProcGroup()->createProcGroup();
+      _targetgroup=_toposource->getProcGroup()->createComplementProcGroup();
+    }
+  if (_target_field!=0)
+    {
+      _topotarget = dynamic_cast<ExplicitTopology*>(_target_field->getTopology());
+      _sourcegroup= _topotarget->getProcGroup()->createComplementProcGroup();
+      _targetgroup=_topotarget->getProcGroup()->createProcGroup();
+    }
+  
+  // Exchanging
+  
+  // Transmitting source topology to target code 
+  broadcastTopology(_toposource,_topotarget,1000);
+  
+  transferMappingToSource();
+  // Transmitting target topology to source code
+  //           broadcastTopology(_topotarget,_toposource,2000);
+  //checkCompatibility(_toposource,_topotarget);
 }
 
 /*! Creates the arrays necessary for the data transfer
@@ -40,76 +54,73 @@ void ExplicitCoincidentDEC::synchronize()
  *  */
 void ExplicitCoincidentDEC::prepareSourceDE()
 {
-       ////////////////////////////////////
-       //Step 1 : buffer array creation 
-       
-       if (!_toposource->getProcGroup()->containsMyRank())
-               return;
-       MPIProcessorGroup* group=new MPIProcessorGroup(_toposource->getProcGroup()->getCommInterface());
-       
-       int myranksource = _toposource->getProcGroup()->myRank();
-       
-       vector <int>* target_arrays=new vector<int>[_topotarget->getProcGroup()->size()];
-       
-       //cout<<" topotarget size"<<    _topotarget->getProcGroup()->size()<<endl;
-       
-       int nb_local = _toposource-> getNbLocalElements();
-       for (int ielem=0; ielem< nb_local ; ielem++)
-       {
-               pair<int,int> target_local =_distant_elems[ielem];
-               target_arrays[target_local.first].push_back(target_local.second); 
-       }       
-       
-       int union_size=group->size();
-       
-       _sendcounts=new int[union_size];
-       _senddispls=new int[union_size];
-       _recvcounts=new int[union_size];
-       _recvdispls=new int[union_size];
-       
-       for (int i=0; i< union_size; i++)
-       {
-               _sendcounts[i]=0;
-               _recvcounts[i]=0;
-               _recvdispls[i]=0;
-       }
-       _senddispls[0]=0;
-       
-       for (int iproc=0; iproc < _topotarget->getProcGroup()->size(); iproc++)
-       {
-               //converts the rank in target to the rank in union communicator
-               int unionrank=group->translateRank(_topotarget->getProcGroup(),iproc);
-               _sendcounts[unionrank]=target_arrays[iproc].size();
-       }
-       
-       for (int iproc=1; iproc<group->size();iproc++)
-               _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1];
-       
-       _sendbuffer = new double [nb_local ];
-
-       /////////////////////////////////////////////////////////////
-       //Step 2 : filling the buffers with the source field values 
+  ////////////////////////////////////
+  //Step 1 : buffer array creation 
+  
+  if (!_toposource->getProcGroup()->containsMyRank())
+    return;
+  MPIProcessorGroup* group=new MPIProcessorGroup(_sourcegroup->getCommInterface());
+  
+  int myranksource = _sourcegroup->myRank();
+  
+  // Warning : the size of the target side is implicitly deduced
+  //from the size of MPI_COMM_WORLD
+  int target_size = _toposource->getProcGroup()->getCommInterface().worldSize()- _toposource->getProcGroup()->size()  ;
+  //   vector <int>* target_arrays=new vector<int>[_topotarget->getProcGroup()->size()];
+  
+  vector <int>* target_arrays=new vector<int>[target_size];
+  //cout<<" topotarget size"<< _topotarget->getProcGroup()->size()<<endl;
+  
+  int nb_local = _toposource-> getNbLocalElements();
 
-       int* counter=new int [_topotarget->getProcGroup()->size()];
-       counter[0]=0;   
-       for (int i=1; i<_topotarget->getProcGroup()->size(); i++)
-               counter[i]=counter[i-1]+target_arrays[i-1].size();
-               
-                       
-       const double* value = _source_field->getField()->getValue();
-       //cout << "Nb local " << nb_local<<endl;
-       for (int ielem=0; ielem<nb_local ; ielem++)
+  int union_size=group->size();
+  
+  _sendcounts=new int[union_size];
+  _senddispls=new int[union_size];
+  _recvcounts=new int[union_size];
+  _recvdispls=new int[union_size];
+  
+  for (int i=0; i< union_size; i++)
+    {
+      _sendcounts[i]=0;
+      _recvcounts[i]=0;
+      _recvdispls[i]=0;
+    }
+  _senddispls[0]=0;
+  int* counts=_explicit_mapping.getCounts();
+  for (int i=0; i<group->size(); i++)
+    _sendcounts[i]=counts[i];
+  
+  for (int iproc=1; iproc<group->size();iproc++)
+    _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1];
+  
+  _sendbuffer = new double [nb_local * _toposource->getNbComponents()];
+  
+  /////////////////////////////////////////////////////////////
+  //Step 2 : filling the buffers with the source field values 
+  
+  int* counter=new int [target_size];
+  counter[0]=0;        
+  for (int i=1; i<target_size; i++)
+    counter[i]=counter[i-1]+target_arrays[i-1].size();
+  
+  
+  const double* value = _source_field->getField()->getValue();
+  
+  int* bufferindex= _explicit_mapping.getBufferIndex();
+  
+  for (int ielem=0; ielem<nb_local; ielem++)
+    {
+      int ncomp = _toposource->getNbComponents();
+      for (int icomp=0; icomp<ncomp; icomp++)
        {
-         int global = _toposource->localToGlobal(make_pair(myranksource, ielem));
-         //int global=_toposource->localToGlobal(ielem);
-               int target_local =_topotarget->globalToLocal(global);
-               //cout <<"global : "<< global<<" local :"<<target_local.first<<" "<<target_local.second;
-               //cout <<"counter[]"<<counter[target_local.first]<<endl;
-               _sendbuffer[counter[target_local]++]=value[ielem];
-               
+         _sendbuffer[ielem*ncomp+icomp]=value[bufferindex[ielem]*ncomp+icomp];
        }
-       delete[] target_arrays;
-       delete[] counter;
+     
+    }
+  delete[] target_arrays;
+  delete[] counter;
 }
 
 /*!
@@ -119,15 +130,16 @@ void ExplicitCoincidentDEC::prepareTargetDE()
 {
        if (!_topotarget->getProcGroup()->containsMyRank())
                return;
-       MPIProcessorGroup* group=new MPIProcessorGroup(_toposource->getProcGroup()->getCommInterface());
+       MPIProcessorGroup* group=new MPIProcessorGroup(_topotarget->getProcGroup()->getCommInterface());
        
        //int myranktarget = _topotarget->getProcGroup()->myRank();
        
-       vector < vector <int> > source_arrays(_toposource->getProcGroup()->size());
+       vector < vector <int> > source_arrays(_sourcegroup->size());
        int nb_local = _topotarget-> getNbLocalElements();
        for (int ielem=0; ielem< nb_local ; ielem++)
        {
-               pair<int,int> source_local =_distant_elems[ielem];
+         //pair<int,int> source_local =_distant_elems[ielem];
+         pair <int,int> source_local=_explicit_mapping.getDistantNumbering(ielem);
                source_arrays[source_local.first].push_back(source_local.second); 
        }       
        int union_size=group->size();
@@ -142,15 +154,15 @@ void ExplicitCoincidentDEC::prepareTargetDE()
                        _recvcounts[i]=0;
                        _recvdispls[i]=0;
                }
-       for (int iproc=0; iproc < _toposource->getProcGroup()->size(); iproc++)
+       for (int iproc=0; iproc < _sourcegroup->size(); iproc++)
        {
                //converts the rank in target to the rank in union communicator
-               int unionrank=group->translateRank(_toposource->getProcGroup(),iproc);
-               _recvcounts[unionrank]=source_arrays[iproc].size();
+               int unionrank=group->translateRank(_sourcegroup,iproc);
+               _recvcounts[unionrank]=source_arrays[iproc].size()*_topotarget->getNbComponents();
        }
        for (int i=1; i<union_size; i++)
                _recvdispls[i]=_recvdispls[i-1]+_recvcounts[i-1];
-       _recvbuffer=new double[nb_local];
+       _recvbuffer=new double[nb_local*_topotarget->getNbComponents()];
                
 }
 
@@ -162,7 +174,7 @@ void ExplicitCoincidentDEC::prepareTargetDE()
  * \param topo Topology that is transmitted. It is read on processes where it already exists, and it is created and filled on others.
  * \param tag Communication tag associated with this operation.
  */
-void ExplicitCoincidentDEC::broadcastTopology(const ExplicitTopology& toposend, ExplicitTopology& toporecv, int tag)
+void ExplicitCoincidentDEC::broadcastTopology(const ExplicitTopology* toposend, ExplicitTopology* toporecv, int tag)
 {
        MPI_Status status;
        
@@ -173,46 +185,50 @@ void ExplicitCoincidentDEC::broadcastTopology(const ExplicitTopology& toposend,
        
        // The send processors serialize the send topology
        // and send the buffers to the recv procs
-       if (toposend.getProcGroup()->containsMyRank())
+       if (toposend !=0 && toposend->getProcGroup()->containsMyRank())
        {
-               toposend.serialize(serializer, size);
+               toposend->serialize(serializer, size);
                for (int iproc=0; iproc< group->size(); iproc++)
                {
-                       int itarget=(iproc+toposend.getProcGroup()->myRank())%group->size();
-                       if (!toposend.getProcGroup()->contains(itarget))
+                 //                    int itarget=(iproc+toposend->getProcGroup()->myRank())%group->size();
+                 int itarget=iproc;
+                       if (!toposend->getProcGroup()->contains(itarget))
                        {
-                               int nbelem = toposend.getNbLocalElements();
-                               _comm_interface->send(&nbelem,1,MPI_INTEGER, itarget,tag+itarget,*(group->getComm()));
-                               _comm_interface->send(&serializer, size, MPI_INTEGER, itarget, tag+itarget,*(group->getComm()));                                        
+                         //                            int nbelem = toposend->getNbLocalElements();
+                               _comm_interface->send(&size,1,MPI_INTEGER, itarget,tag+itarget,*(group->getComm()));
+                               _comm_interface->send(serializer, size, MPI_INTEGER, itarget, tag+itarget,*(group->getComm()));                                 
                        }
                }
        }
        else
        {
                vector <int> size (group->size());
-               int myrank=toporecv.getProcGroup()->myRank();
+               int myrank=toporecv->getProcGroup()->myRank();
+               int myworldrank=group->myRank();
                for (int iproc=0; iproc<group->size();iproc++)
                {
                        int isource = iproc;
-                       if (!toporecv.getProcGroup()->contains(isource))
+                       if (!toporecv->getProcGroup()->contains(isource))
                        {
                                int nbelem;
-                               _comm_interface->recv(&nbelem, 1, MPI_INTEGER, isource, tag+myrank, *(group->getComm()), &status);
+                               _comm_interface->recv(&nbelem, 1, MPI_INTEGER, isource, tag+myworldrank, *(group->getComm()), &status);
                                int* buffer = new int[nbelem];
-                               _comm_interface->recv(buffer, nbelem, MPI_INTEGER, isource,tag+myrank, *(group->getComm()), &status);                           
+                               _comm_interface->recv(buffer, nbelem, MPI_INTEGER, isource,tag+myworldrank, *(group->getComm()), &status);                              
                        
                                ExplicitTopology* topotemp=new ExplicitTopology();
                                topotemp->unserialize(buffer, *_comm_interface);
                                delete[] buffer;
                                
-                               for (int ielem=0; ielem<toporecv.getNbLocalElements(); ielem++)
+                               for (int ielem=0; ielem<toporecv->getNbLocalElements(); ielem++)
                                {
-                                       int global=toporecv.localToGlobal(make_pair(iproc,ielem));
+                                 int global = toporecv->localToGlobal(ielem);
+                                 //int global=toporecv->localToGlobal(make_pair(iproc,ielem));
                                        int sendlocal=topotemp->globalToLocal(global);
                                        if (sendlocal!=-1)
                                        {
                                                size[iproc]++;
-                                               _distant_elems.insert(make_pair(ielem, make_pair(iproc,sendlocal)));
+                                               _explicit_mapping.pushBackElem(make_pair(iproc,sendlocal));
+                                               //_distant_elems.insert(make_pair(ielem, make_pair(iproc,sendlocal)));
                                        }
                                }
                                delete topotemp;
@@ -222,42 +238,129 @@ void ExplicitCoincidentDEC::broadcastTopology(const ExplicitTopology& toposend,
        MESSAGE (" rank "<<group->myRank()<< " broadcastTopology is over");
 }
 
+void ExplicitCoincidentDEC::transferMappingToSource()
+{
+
+  MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
+  
+  // sending source->target mapping which is stored by target
+  //in _distant_elems from target to source
+  if (_topotarget!=0 && _topotarget->getProcGroup()->containsMyRank())
+    {
+      int world_size = _topotarget->getProcGroup()->getCommInterface().worldSize()  ;
+      int* nb_transfer_union=new int[world_size];
+      int* dummy_recv=new int[world_size];
+      for (int i=0; i<world_size; i++)
+       nb_transfer_union[i]=0;
+      //converts the rank in target to the rank in union communicator
+    
+      for (int i=0; i<  _explicit_mapping.nbDistantDomains(); i++)
+       {
+         //      ProcessorGroup* sourcegroup = _topotarget->getProcGroup()->createComplementProcGroup();
+         int unionrank=group->translateRank(_sourcegroup,_explicit_mapping.getDistantDomain(i));
+         nb_transfer_union[unionrank]=_explicit_mapping.getNbDistantElems(i);
+         //      delete sourcegroup;
+       }
+      _comm_interface->allToAll(nb_transfer_union, 1, MPI_INTEGER, dummy_recv, 1, MPI_INTEGER, MPI_COMM_WORLD);
+      
+      int* sendbuffer= _explicit_mapping.serialize(_topotarget->getProcGroup()->myRank());
+      
+      int* sendcounts= new int [world_size];
+      int* senddispls = new int [world_size];
+      for (int i=0; i< world_size; i++)
+       {
+         sendcounts[i]=2*nb_transfer_union[i];
+         if (i==0)
+           senddispls[i]=0;
+         else
+           senddispls[i]=senddispls[i-1]+sendcounts[i-1];
+       }
+      int* recvcounts=new int[world_size];
+      int* recvdispls=new int[world_size];
+      int *dummyrecv;
+      for (int i=0; i <world_size; i++)
+       {
+         recvcounts[i]=0;
+         recvdispls[i]=0;
+       }
+      _comm_interface->allToAllV(sendbuffer, sendcounts, senddispls, MPI_INTEGER, dummyrecv, recvcounts, senddispls, MPI_INTEGER, MPI_COMM_WORLD);
+      
+    }
+  //receiving in the source subdomains the mapping sent by targets
+  else
+    {
+       int world_size = _toposource->getProcGroup()->getCommInterface().worldSize()  ;
+      int* nb_transfer_union=new int[world_size];
+      int* dummy_send=new int[world_size];
+      for (int i=0; i<world_size; i++)
+       dummy_send[i]=0;
+      _comm_interface->allToAll(dummy_send, 1, MPI_INTEGER, nb_transfer_union, 1, MPI_INTEGER, MPI_COMM_WORLD);
+      
+      int total_size=0;
+      for (int i=0; i< world_size; i++)
+       total_size+=nb_transfer_union[i];
+      int nbtarget = _targetgroup->size();
+      int* targetranks = new int[ nbtarget];
+      for (int i=0; i<nbtarget; i++)
+       targetranks[i]=group->translateRank(_targetgroup,i);
+      int* mappingbuffer= new int [total_size*2];
+      int* sendcounts= new int [world_size];
+      int* senddispls = new int [world_size];
+      int* recvcounts=new int[world_size];
+      int* recvdispls=new int[world_size];
+      for (int i=0; i< world_size; i++)
+       {
+         recvcounts[i]=2*nb_transfer_union[i];
+         if (i==0)
+           recvdispls[i]=0;
+         else
+           recvdispls[i]=recvdispls[i-1]+recvcounts[i-1];
+       }
+
+      int *dummysend;
+      for (int i=0; i <world_size; i++)
+       {
+         sendcounts[i]=0;
+         senddispls[i]=0;
+       }
+      _comm_interface->allToAllV(dummysend, sendcounts, senddispls, MPI_INTEGER, mappingbuffer, recvcounts, recvdispls, MPI_INTEGER, MPI_COMM_WORLD);
+      
+      _explicit_mapping.unserialize(world_size,nb_transfer_union,nbtarget, targetranks, mappingbuffer);
+    }
+}
+
 void ExplicitCoincidentDEC::recvData()
 {
        //MPI_COMM_WORLD is used instead of group because there is no
        //mechanism for creating the union group yet
        MESSAGE("recvData");
-       for (int i=0; i< 4; i++)
-               cout << _recvcounts[i]<<" ";
-       cout <<endl;
-       for (int i=0; i< 4; i++)
-               cout << _recvdispls[i]<<" ";
-       cout <<endl;
+
        
        cout<<"start AllToAll"<<endl;
        _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, 
                        _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
        cout<<"end AllToAll"<<endl;
        int nb_local = _topotarget->getNbLocalElements();
-       double* value=new double[nb_local];
+       double* value=new double[nb_local*_topotarget->getNbComponents()];
        int myranktarget=_topotarget->getProcGroup()->myRank();
-       vector<int> counters(_toposource->getProcGroup()->size());
+       vector<int> counters(_sourcegroup->size());
        counters[0]=0;
-       for (int i=0; i<_toposource->getProcGroup()->size()-1; i++)
+       for (int i=0; i<_sourcegroup->size()-1; i++)
                {
                        MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
-                       int worldrank=group->translateRank(_toposource->getProcGroup(),i);
+                       int worldrank=group->translateRank(_sourcegroup,i);
                        counters[i+1]=counters[i]+_recvcounts[worldrank];
                }
        
        for (int ielem=0; ielem<nb_local ; ielem++)
        {
-               int global = _topotarget->localToGlobal(make_pair(myranktarget, ielem));
-               int source_local =_toposource->globalToLocal(global);
-               value[ielem]=_recvbuffer[counters[source_local]++];
-       }
-       
-       
+         pair<int,int> distant_numbering=_explicit_mapping.getDistantNumbering(ielem);
+         int iproc=distant_numbering.first; 
+         int ncomp =  _topotarget->getNbComponents();
+         for (int icomp=0; icomp< ncomp; icomp++)
+           value[ielem*ncomp+icomp]=_recvbuffer[counters[iproc]*ncomp+icomp];
+         counters[iproc]++;
+       }       
        _target_field->getField()->setValue(value);
 }
 
index 58fc1548b0cc7ead4866d85219320cf99f722549..43e3204f117ecaf097ff12a0d79c4c81c03c6c85 100644 (file)
@@ -2,39 +2,44 @@
 #define ExplicitCOINCIDENTDEC_HXX_
 
 #include "DEC.hxx"
+#include "ExplicitMapping.hxx"
 #include "ExplicitTopology.hxx"
 #include <map>
 
 namespace ParaMEDMEM
 {
-class DEC;
-class BlockTopology;
-class ExplicitCoincidentDEC: public DEC
-{
-public:
-       ExplicitCoincidentDEC();
-       virtual ~ExplicitCoincidentDEC();
-       void synchronize();
-       void broadcastTopology(BlockTopology*&, int tag);
-       void broadcastTopology(const ExplicitTopology& toposend, ExplicitTopology& toporecv, int tag);
-       
-       void prepareSourceDE();
-       void prepareTargetDE();
-       void recvData();
-       void sendData();
-private :
-       
-       ExplicitTopology* _toposource;
-       ExplicitTopology* _topotarget;
-       int* _sendcounts;
-       int* _recvcounts;
-       int* _senddispls;
-       int* _recvdispls;
-       double* _recvbuffer;
-       double* _sendbuffer;
-       std::map<int,std::pair<int,int> > _distant_elems;
-};
-
+  class DEC;
+  class BlockTopology;
+  //  class ExplicitMapping;
+  class ExplicitCoincidentDEC: public DEC
+  {
+  public:
+    ExplicitCoincidentDEC();
+    virtual ~ExplicitCoincidentDEC();
+    void synchronize();
+    void broadcastTopology(BlockTopology*&, int tag);
+    void broadcastTopology(const ExplicitTopology* toposend, ExplicitTopology* toporecv, int tag);
+    void transferMappingToSource();
+    void prepareSourceDE();
+    void prepareTargetDE();
+    void recvData();
+    void sendData();
+  private :
+    
+    ExplicitTopology* _toposource;
+    ExplicitTopology* _topotarget;
+    ProcessorGroup* _targetgroup;
+    ProcessorGroup* _sourcegroup;
+    int* _sendcounts;
+    int* _recvcounts;
+    int* _senddispls;
+    int* _recvdispls;
+    double* _recvbuffer;
+    double* _sendbuffer;
+    std::map<int,std::pair<int,int> > _distant_elems;
+    ExplicitMapping _explicit_mapping;
+  };
+  
 }
 
 #endif /*ExplicitCOINCIDENTDEC_HXX_*/
index e953bfe9421687071bf4c0587aed91e5d78b3f83..3ba799f734e0be3e54ba37dafc5f8e9e001fae43 100644 (file)
@@ -19,7 +19,9 @@ namespace ParaMEDMEM
 {
 
 ExplicitTopology::ExplicitTopology(const ParaSUPPORT& parasupport ):
-_proc_group(parasupport.getMesh()->getBlockTopology()->getProcGroup()){
+_proc_group(parasupport.getMesh()->getBlockTopology()->getProcGroup()),
+_nb_components(1)
+{
        _nb_elems=parasupport.getSupport()->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS);
        MED_EN::medEntityMesh entity= parasupport.getSupport()->getEntity();
        const int* global=parasupport.getMesh()->getGlobalNumbering(entity);
@@ -45,6 +47,19 @@ _proc_group(parasupport.getMesh()->getBlockTopology()->getProcGroup()){
        }
 }
 
+ExplicitTopology::ExplicitTopology(const ExplicitTopology& topo, int nb_components)
+{
+  _proc_group = topo._proc_group;
+  _nb_elems = topo._nb_elems;
+  _nb_components = nb_components;
+  _loc2glob=new int[2*_nb_elems];
+  for (int i=0; i<2*_nb_elems; i++)
+    {
+      _loc2glob[i]=topo._loc2glob[i];
+    }
+  _glob2loc=topo._glob2loc;
+}
+
 
 ExplicitTopology::~ExplicitTopology()
 {
@@ -76,13 +91,14 @@ void ExplicitTopology::unserialize(const int* serializer,const CommInterface& co
 {
        const int* ptr_serializer=serializer;
        cout << "unserialize..."<<endl;
-       _nb_elems=*(ptr_serializer++);
+       _nb_elems=*ptr_serializer++;
        cout << "nbelems "<<_nb_elems<<endl;
        _loc2glob=new int[_nb_elems];
        for (int i=0; i<_nb_elems; i++)
        {
-         _loc2glob[i]=*(ptr_serializer++);
+         _loc2glob[i]=*ptr_serializer;
          _glob2loc[*ptr_serializer]=i;
+         ptr_serializer++;
          
        }
 
index 47a497bcd741beb6340f536b29822ae3ba6f1ae8..e8c207f5629df515f1a00e362f0d5b3698473608 100644 (file)
@@ -18,11 +18,13 @@ namespace ParaMEDMEM
 {
 class Topology;
 class ComponentTopology;
+  class ParaSUPPORT;
 
 class ExplicitTopology: public Topology
 {
 public:
        ExplicitTopology(){};
+  ExplicitTopology( const ExplicitTopology& topo, int nbcomponents);
        ExplicitTopology(const ParaSUPPORT&);
        virtual ~ExplicitTopology();
        
@@ -38,15 +40,18 @@ public:
        inline int globalToLocal(int) const;
        void serialize(int* & serializer, int& size) const ;
        void unserialize(const int* serializer, const CommInterface& comm_interface);
+  int getNbComponents () const {return _nb_components;}
 private:
-       //Processor group
-       const ProcessorGroup* _proc_group;
-       //nb of elements
-       int _nb_elems;
-       //mapping local to global
-       int* _loc2glob;
-       //mapping global to local
-       hash_map<int,int> _glob2loc;
+  //Processor group
+  const ProcessorGroup* _proc_group;
+  //nb of elements
+  int _nb_elems;
+  //nb of components
+  int _nb_components;
+  //mapping local to global
+  int* _loc2glob;
+  //mapping global to local
+  hash_map<int,int> _glob2loc;
 };
 
 //!converts a pair <subdomainid,local> to a global number 
index 34a84019e00e411094f34c0e31a89ec35a5afe05..3f6330753414f8cdd69207dfad87bd83e3a1f521 100644 (file)
@@ -5,7 +5,7 @@
 #include <iostream>
 #include <set>
 #include <algorithm>
-#include "/export/home/vb144235/mpich2_install/include/mpi.h"
+#include "mpi.h"
 
 using namespace std;
 
@@ -71,5 +71,26 @@ int MPIProcessorGroup::translateRank(const ProcessorGroup* group, int rank) cons
 }
 
 
+ProcessorGroup* MPIProcessorGroup::createComplementProcGroup() const
+{
+  set <int> procs;
+  int world_size=_comm_interface.worldSize();
+  for (int i=0; i<world_size; i++)
+    procs.insert(i);
+  for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
+    procs.erase(*iter);
+  
+  return new MPIProcessorGroup(_comm_interface, procs);
+
+}
+ProcessorGroup* MPIProcessorGroup::createProcGroup() const
+{
+  set <int> procs;
+  for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
+    procs.insert(*iter);
+  
+  return new MPIProcessorGroup(_comm_interface, procs);
+
+}
        
 }
index 9a5e7043bf47a1ccd483c37f45dab37dd306d3e6..aa0d49ea41bc9f45a55d2ee5b549e4ab8d1cce1e 100644 (file)
@@ -23,6 +23,9 @@ public:
        bool containsMyRank() const { int rank; MPI_Group_rank(_group, &rank); return (rank!=MPI_UNDEFINED);}
        int translateRank(const ProcessorGroup* group, int rank) const;
        const MPI_Comm* getComm() const {return &_comm;}
+  ProcessorGroup* createComplementProcGroup() const;
+  ProcessorGroup* createProcGroup() const;
 private:
        MPI_Group _group;
        MPI_Comm _comm;
index 5daf5c465a22e184b27564d945ac9e21fe510b5c..3cd41cd22c5981927d901ec59723a043e8d51e62 100644 (file)
@@ -53,7 +53,9 @@ ExplicitTopology.hxx\
 ParaFIELD.hxx\
 DEC.hxx\
 StructuredCoincidentDEC.hxx\
-ExplicitCoincidentDEC.hxx
+UnstructuredParaSUPPORT.hxx\
+ExplicitCoincidentDEC.hxx\
+ExplicitMapping.hxx
 
 # Libraries targets
 
@@ -71,6 +73,7 @@ ParaFIELD.cxx\
 DEC.cxx\
 StructuredCoincidentDEC.cxx\
 ExplicitCoincidentDEC.cxx\
+UnstructuredParaSUPPORT.cxx\
 ExplicitTopology.cxx
 
 
@@ -82,22 +85,22 @@ BIN_SERVER_IDL =
 BIN_CLIENT_IDL = 
 
 TEST_PROGS = test_ProcessorGroup test_BlockTopology test_ParaStructuredSupport \
-test_ParaField test_DEC test_UnstructuredDEC
+test_ParaField test_DEC test_UnstructuredDEC test_ExplicitDEC
 
 LDFLAGS+= -L$(top_builddir)/lib@LIB_LOCATION_SUFFIX@/salome 
 LDFLAGSFORBIN+= -L$(top_builddir)/lib@LIB_LOCATION_SUFFIX@/salome
 
-CPPFLAGS+=$(MED2_INCLUDES) $(HDF5_INCLUDES) $(MPI_INCLUDES)
+CPPFLAGS+=$(MED2_INCLUDES) $(MPI_INCLUDES) $(LAM_INCLUDES) -I/data/tmpawa/vb144235/lam_install/include
 
-CXXFLAGS+=@CXXTMPDPTHFLAGS@ $(MPI_INCLUDES)
+CXXFLAGS+=@CXXTMPDPTHFLAGS@ 
 CPPFLAGS+=$(BOOST_CPPFLAGS)
 #LDFLAGS+=$(MED2_LIBS) $(HDF5_LIBS) 
 # change motivated by the bug KERNEL4778.
-LDFLAGS+=$(MED2_LIBS) $(HDF5_LIBS) -lmed_V2_1 $(STDLIB) -lmedmem $(MPI_LIBS)
+LDFLAGS+=$(MED2_LIBS) $(HDF5_LIBS) -lmed_V2_1 $(STDLIB) -lmedmem  $(MPI_LIBS) $(LAM_LIBS) -L/data/tmpawa/vb144235/lam_install/lib -lmpi -llam -lutil
 
 #LDFLAGSFORBIN+=$(MED2_LIBS) $(HDF5_LIBS)
 # change motivated by the bug KERNEL4778.
-LDFLAGSFORBIN+=-lm $(MED2_LIBS) $(HDF5_LIBS) -lmed_V2_1 -lmedmem $(MPI_LIBS)  $(BOOST_LIBS)
+LDFLAGSFORBIN+= -lm $(MED2_LIBS) $(HDF5_LIBS) -lmed_V2_1 -lmedmem   $(BOOST_LIBS) $(MPI_LIBS) $(LAM_LIBS) -L/data/tmpawa/vb144235/lam_install/lib -lmpi -llam -lutil
 
 ifeq ($(MED_WITH_KERNEL),yes)
   CPPFLAGS+= ${KERNEL_CXXFLAGS}
index f003519a6a7a6a497a34335d09799df67e3e1c84..3251a61db0dd7f89824a1675f097c1dc8893b1eb 100644 (file)
@@ -4,7 +4,7 @@
 #include "ComponentTopology.hxx"
 #include "ParaSUPPORT.hxx"
 #include "StructuredParaSUPPORT.hxx"
-#include "ExplicitCoincidentDEC.hxx";
+#include "ExplicitCoincidentDEC.hxx"
 #include "StructuredCoincidentDEC.hxx"
 #include "ParaFIELD.hxx"
 #include "ParaMESH.hxx"
@@ -14,19 +14,27 @@ using namespace MEDMEM;
 namespace ParaMEDMEM
 {
 
-ParaFIELD::ParaFIELD(ParaSUPPORT* para_support, const ComponentTopology& component_topology)
+ParaFIELD::ParaFIELD(const ParaSUPPORT* para_support, const ComponentTopology& component_topology)
 :_support(para_support),
 _component_topology(component_topology) 
 {
-       if (dynamic_cast<StructuredParaSUPPORT*>(para_support)!=0)
+       if (dynamic_cast<const StructuredParaSUPPORT*>(para_support)!=0)
        {const BlockTopology* source_topo = dynamic_cast<const BlockTopology*>(para_support->getTopology());
                _topology=new BlockTopology(*source_topo,component_topology);
        }
        else
-               throw MEDEXCEPTION(LOCALIZED(
+       {
+         if (component_topology.nbBlocks()!=1)
+           throw MEDEXCEPTION(LOCALIZED(
                "ParaFIELD constructor : Unstructured Support not taken into account with component topology yet"));
-               
-       
+         else 
+           {
+             const ExplicitTopology* source_topo=
+               dynamic_cast<const ExplicitTopology*> (para_support->getTopology());
+             _topology=new ExplicitTopology(*source_topo,component_topology.nbLocalComponents());
+                                            
+           }
+       }
 //     int nb_components=0;
 //     if (component_topology.getProcGroup()!=0)
        int nb_components = component_topology.nbLocalComponents();
@@ -61,8 +69,8 @@ ParaFIELD::ParaFIELD(MEDMEM::driverTypes driver_type, const string& file_name,
 ParaFIELD::~ParaFIELD(){}
 
 void ParaFIELD::write(MEDMEM::driverTypes driverType, const string& fileName, const string& meshName){
-       BlockTopology* topo = dynamic_cast<BlockTopology*> (_topology);
-       int myrank = topo->getProcGroup()->myRank();
+  //   Topology* topo = dynamic_cast<BlockTopology*> (_topology);
+       int myrank = _topology->getProcGroup()->myRank();
        ostringstream name;
        name <<fileName<<myrank+1<<".med";
        cout << name <<endl;
@@ -83,7 +91,7 @@ void ParaFIELD::synchronizeTarget(ParaFIELD* source_field){
        data_channel->attachTargetField(this);
        data_channel->synchronize();
        data_channel->prepareTargetDE();
-       data_channel->sendData();
+       data_channel->recvData();
        
        delete data_channel;
 }
index 88348e59da5dfdbf22de6088f0393bcbc867d8b5..7e5fac29db876a04ce2ff6571e41d8450e27a20f 100644 (file)
@@ -18,7 +18,9 @@ class ParaSUPPORT;
 class ParaFIELD
 {
 public:
-       ParaFIELD(ParaSUPPORT* support, const ComponentTopology& component_topology); 
+
+       ParaFIELD(const ParaSUPPORT* support, const ComponentTopology& component_topology); 
+
        ParaFIELD(MEDMEM::driverTypes driver_type, const string& file_name, 
                const string& driver_name, const ComponentTopology& component_topology) 
                throw (MEDMEM::MEDEXCEPTION);
@@ -33,7 +35,7 @@ private:
        const ComponentTopology& _component_topology;
        Topology* _topology; 
        MEDMEM::FIELD<double>* _field;
-       ParaSUPPORT* _support;
+       const ParaSUPPORT* _support;
 };
 
 }
index c0914d09812819ad3ca4a8f990584d3f141f9b60..45d1579a4c4c544670cc9a089136b7bec4cddaf0 100644 (file)
@@ -64,6 +64,7 @@ throw (MEDMEM::MEDEXCEPTION){
                strcpy(meshname,meshstring.c_str());
                strcpy(file,_medfilename.c_str());
       }
+    _name=meshstring;
        ///////////////////////////////////////////
        // treatment of the domain that corresponds
        // to the local id
@@ -214,8 +215,7 @@ throw (MEDMEM::MEDEXCEPTION){
        
        int nbdomains= _block_topology->getProcGroup()->size(); 
        vector<string> filename(nbdomains);
-               
-               
+                               
        //loop on the domains
        for (int i=0; i<nbdomains;i++)
        {
@@ -242,7 +242,7 @@ throw (MEDMEM::MEDEXCEPTION){
                for (int i=0; i<nbdomains;i++)
                {
                        //updating the ascii description file
-                       file << _name <<" "<< i+1 << " "<< _name << " localhost " << filename[i] << " "<<endl;
+                 file << _name <<" "<< i+1 << " "<< _name <<"_"<<i+1<< " localhost " << filename[i] << " "<<endl;
                }
                
        }
index a99612ab145aaf74c29a6173844f64f0ce83ed31..a217d0cac432e77a58f5c03b959ff5d0f4944ccc 100644 (file)
@@ -31,7 +31,7 @@ private:
        //mesh object underlying the ParaMESH object
        MEDMEM::MESH* _mesh;
        //name of the mesh
-       const string _name;
+       string _name;
        //connect zone
        std::vector<MEDMEM::CONNECTZONE*> _connect_zone;
        //id of the local grid
index 73a22cb191d234bf040f6042e4196ab94393e2e2..116664b76cb0b74601e474567e36e1309e646d6c 100644 (file)
@@ -13,6 +13,8 @@ namespace ParaMEDMEM
   {
   public:
     ParaSUPPORT();
+    ParaSUPPORT(const ParaMESH* mesh, const MEDMEM::SUPPORT* support):
+      _support(support), _mesh(mesh){};
     ParaSUPPORT(const MEDMEM::SUPPORT&);
     virtual ~ParaSUPPORT();
     virtual const Topology* getTopology() const {};
index d58eae0ee292080eb5397ea4b10a7f49340dea9d..e605bfaf81f39d5ce5c3e7703dcc2bdfe3fecc84 100644 (file)
@@ -26,7 +26,8 @@ public:
        const CommInterface& getCommInterface()const {return _comm_interface;};
        virtual int myRank() const =0;
        virtual int translateRank(const ProcessorGroup*, int) const =0;
-       
+  virtual ProcessorGroup* createComplementProcGroup() const =0;
+  virtual ProcessorGroup* createProcGroup() const=0;
 protected:
        const CommInterface& _comm_interface;
        std::set<int> _proc_ids;
index 1ab9434da74cf446638e0b4f16ce93dde1f81146..87842b7501b3e216578acdf8de9ab32be3205505 100644 (file)
@@ -6,6 +6,7 @@
 using namespace std;
 namespace ParaMEDMEM
 {
+  class ProcessorGroup;
 
 class Topology
 {
@@ -16,6 +17,7 @@ public:
 //     virtual int localToGlobal (const std::pair<int,int>) const =0;
        virtual int getNbElements() const=0;
        virtual int getNbLocalElements() const =0;
+       virtual const ProcessorGroup* getProcGroup()const =0;
 };
 
 }
index 49c28ba7509878b8c1722f5f0f858794cef74d52..f793bc72f16bfa6462b919f5d02cb20fe0736487 100644 (file)
@@ -1,6 +1,6 @@
 #include "Topology.hxx"
-#include "BlockTopology.hxx"
-#include "ParaGRID.hxx"
+#include "ExplicitTopology.hxx"
+#include "ParaMESH.hxx"
 #include "UnstructuredParaSUPPORT.hxx"
 #include "MEDMEM_Support.hxx"
 
@@ -8,15 +8,25 @@ namespace ParaMEDMEM
 {
        
 /*! Constructor on all elements from a MESH */
-UnstructuredParaSUPPORT::UnstructuredParaSUPPORT(const ParaMESH* const mesh, const SUPPORT* support):
-_mesh(mesh), 
+UnstructuredParaSUPPORT::UnstructuredParaSUPPORT(const ParaMESH* const mesh, const MEDMEM::SUPPORT* support):
 _entity(support->getEntity()),
-_support(support),
-_block_topology(mesh->getBlockTopology()->getProcGroup(), support->getNumberOfElements(MED_EN::MED_ALL_ELEMENTS))
+_explicit_topology(new ExplicitTopology(*support))
 {
+  _mesh=mesh;
+  _support=support;
 }
 
-StructuredParaSUPPORT::~StructuredParaSUPPORT()
+UnstructuredParaSUPPORT::UnstructuredParaSUPPORT(const ParaMESH* const mesh, const MED_EN::medEntityMesh entity):
+  ParaSUPPORT(mesh, new MEDMEM::SUPPORT(mesh->getMesh(), "support on all entities", entity)),
+  _entity(entity),
+  _explicit_topology(new ExplicitTopology(*this))
+{
+  //_mesh=mesh;
+  //  _support=new SUPPORT(_mesh->getMesh(), "support on all entities", entity);
+  //_explicit_topology(new ExplicitTopology(*support));
+}
+
+UnstructuredParaSUPPORT::~UnstructuredParaSUPPORT()
 {
        delete _support;
 }
index 7d30c5de4dc92360bb22d6647b40ec8ff77d7eb7..cc2ad70b8e6bb4b01890667ccde4f72ee0622333 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "ParaSUPPORT.hxx"
 #include "MEDMEM_define.hxx"
+#include "ExplicitTopology.hxx"
 
 using namespace MED_EN;
 namespace MEDMEM
@@ -12,23 +13,25 @@ namespace MEDMEM
 
 namespace ParaMEDMEM
 {
-class BlockTopology;
-class ParaMESH;
+  class Topology;
+  class ExplicitTopology;
+  class ParaMESH;
+  
 
-class UnstructuredParaSUPPORT:public ParaSUPPORT
-{
-public:
-       
-       UnstructuredParaSUPPORT(const ParaMESH* const mesh, SUPPORT* support );
-       virtual ~UnstructuredParaSUPPORT();
-       const Topology* getTopology() const {return _block_topology;}
-   
-private:
-       const BlockTopology* const  _block_topology;
-       const MED_EN::medEntityMesh _entity;
+  class UnstructuredParaSUPPORT:public ParaSUPPORT
+  {
+  public:
+    
+    UnstructuredParaSUPPORT(const ParaMESH* const mesh, const MEDMEM::SUPPORT* support );
+    UnstructuredParaSUPPORT(const ParaMESH* const mesh, const MED_EN::medEntityMesh entity);
+    virtual ~UnstructuredParaSUPPORT();
+    const Topology* getTopology() const {return _explicit_topology;}
+    
+  private:
+    const ExplicitTopology* const  _explicit_topology;
+    const MED_EN::medEntityMesh _entity;
        
-       
-};
-
+  };
+  
 }
 #endif /*STRUCTUREDPARASUPPORT_HXX_*/
index 70fc5f82360fdf065ca39f5786b55c14050353da..3237b7b8d76c6c56c8f16b6cd341603f27179b34 100644 (file)
@@ -13,7 +13,7 @@
 #include "MPIProcessorGroup.hxx"
 #include "MEDMEM_Mesh.hxx"
 #include "ParaMESH.hxx"
-#include "StructuredParaSUPPORT.hxx"
+#include "UnstructuredParaSUPPORT.hxx"
 #include "ComponentTopology.hxx"
 #include "ParaFIELD.hxx"
 
@@ -58,17 +58,17 @@ int main(int argc, char** argv)
        Topology* topo_target;
        if (source_group->containsMyRank())
        {
-               source_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/pointe_nosplit",*self_group);
+               source_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/med/pointe_nosplit",*source_group);
                topo_source=source_mesh->getBlockTopology();
        }
        if (target_group->containsMyRank())
        {
-               target_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/pointe_split",*target_group);
+               target_mesh=new ParaMESH(MED_DRIVER,"../../share/salome/resources/med/pointe_split",*target_group);
                topo_target=target_mesh->getBlockTopology();
        }
                
-       StructuredParaSUPPORT* target_support;
-       StructuredParaSUPPORT* source_support;
+       UnstructuredParaSUPPORT* target_support;
+       UnstructuredParaSUPPORT* source_support;
        ComponentTopology* target_comp;
        ComponentTopology* source_comp;
        ParaFIELD* target_field=0;
@@ -76,9 +76,9 @@ int main(int argc, char** argv)
        
        if (source_group->containsMyRank())
        {
-               source_support=new StructuredParaSUPPORT(source_mesh,MED_EN::MED_CELL);
-               source_comp=new ComponentTopology (6, source_group);
-               source_field = new ParaFIELD(source_support, *source_comp);
+               source_support=new UnstructuredParaSUPPORT(source_mesh,MED_EN::MED_CELL);
+               source_comp=new ComponentTopology (1);
+               source_field = new ParaFIELD(source_support,*source_comp);
                int nb_local = source_field->getTopology()->getNbLocalElements();
                cout << "Source field nb elems on rank : "<<rank<<" : "<<nb_local<<endl;
                double * value= new double[nb_local];
@@ -88,18 +88,18 @@ int main(int argc, char** argv)
                source_field->synchronizeSource(target_field);
                if (source_group->myRank()==0)
                {
-                       source_mesh->write(MED_DRIVER,"/home/vb144235/tmp/source");
-                       source_field->write(MED_DRIVER,"/home/vb144235/tmp/source","maa1");
+                       source_mesh->write(MED_DRIVER,"/home/vb144235/tmp/sourceexp");
+                       source_field->write(MED_DRIVER,"/home/vb144235/tmp/sourceexp","maa1");
                }
        }
        if (target_group->containsMyRank())
        {
-               target_support=new StructuredParaSUPPORT(target_mesh,MED_EN::MED_CELL); 
-               target_comp= new ComponentTopology (6);
-               target_field = new ParaFIELD(target_support, *target_comp);
+               target_support=new UnstructuredParaSUPPORT(target_mesh,MED_EN::MED_CELL);       
+               target_comp=new ComponentTopology(1);
+               target_field = new ParaFIELD(target_support,*target_comp);
                target_field->synchronizeTarget(source_field);
-               target_mesh->write(MED_DRIVER, "/home/vb144235/tmp/target");
-               target_field->write(MED_DRIVER, "/home/vb144235/tmp/target", "maa1");
+               target_mesh->write(MED_DRIVER, "/home/vb144235/tmp/targetexp");
+               target_field->write(MED_DRIVER, "/home/vb144235/tmp/targetexp", "maa1_1");
        }
        MPI_Barrier(MPI_COMM_WORLD);
        MPI_Finalize();
index 2c4be32b8ac8ad193bff92e1013b278d654be4b6..5bb26f235997515c73474b01cb9b7bef46e806d3 100644 (file)
@@ -23,6 +23,7 @@ using namespace ParaMEDMEM;
 using namespace MEDMEM;
 int main(int argc, char** argv)
 {
+
   string testname="ParaMEDMEM - test #1 -";
   MPI_Init(&argc, &argv); 
   int size;
@@ -74,6 +75,7 @@ int main(int argc, char** argv)
     }  
   MPI_Finalize();
   return 0;
+
 }