throw INTERP_KERNEL::Exception("No mesh dimension specified !");
for(std::set<INTERP_KERNEL::NormalizedCellType>::const_iterator iter=_types.begin();iter!=_types.end();iter++)
{
- if(INTERP_KERNEL::CellModel::getCellModel(*iter).getDimension()!=_mesh_dim)
+ if((int)INTERP_KERNEL::CellModel::getCellModel(*iter).getDimension()!=_mesh_dim)
{
std::ostringstream message;
message << "Mesh invalid because dimension is " << _mesh_dim << " and there is presence of cell(s) with type " << (*iter);
unsigned nbOfFaces=cm.getNumberOfSons2(&connNew[pos+1],lgthOld);
int *tmp=new int[nbOfFaces*lgthOld];
int *work=tmp;
- for(int j=0;j<nbOfFaces;j++)
+ for(int j=0;j<(int)nbOfFaces;j++)
{
INTERP_KERNEL::NormalizedCellType type;
unsigned offset=cm.fillSonCellNodalConnectivity2(j,&connNew[pos+1],lgthOld,work,type);
std::set<int> locMerge;
std::insert_iterator< std::set<int> > it(locMerge,locMerge.begin());
std::set_intersection(connOfCell.begin(),connOfCell.end(),fastFinder.begin(),fastFinder.end(),it);
- if(locMerge.size()==refLgth && fullyIn || locMerge.size()!=0 && !fullyIn)
+ if((int)locMerge.size()==refLgth && fullyIn || locMerge.size()!=0 && !fullyIn)
cellIdsKept.push_back(i);
}
return buildPartOfMySelf(&cellIdsKept[0],&cellIdsKept[0]+cellIdsKept.size(),true);
* values of the different axes.
*/
BlockTopology::BlockTopology(const ProcessorGroup& group, MEDCouplingCMesh *grid):
- _proc_group(&group), _dimension(grid->getSpaceDimension()), _owns_processor_group(false)
+ _dimension(grid->getSpaceDimension()), _proc_group(&group), _owns_processor_group(false)
{
vector <int> axis_length(_dimension);
_nb_elems=1;
* to \a group will cause an MPI error, while calling from a subset
* of \a group will result in a deadlock.
*/
- BlockTopology::BlockTopology(const ProcessorGroup& group, int nb_elem):_proc_group(&group),_dimension(1),_owns_processor_group(false)
+ BlockTopology::BlockTopology(const ProcessorGroup& group, int nb_elem):_dimension(1),_proc_group(&group),_owns_processor_group(false)
{
int* nbelems_per_proc = new int[group.size()];
const MPIProcessorGroup* mpi_group=dynamic_cast<const MPIProcessorGroup*>(_proc_group);
buffer.push_back(_nb_procs_per_dim[i]);
buffer.push_back(_cycle_type[i]);
buffer.push_back(_local_array_indices[i].size());
- for (int j=0; j<_local_array_indices[i].size(); j++)
+ for (int j=0; j<(int)_local_array_indices[i].size(); j++)
buffer.push_back(_local_array_indices[i][j]);
}
_nb_procs_per_dim[i]=*(ptr_serializer++);
_cycle_type[i]=(CYCLE_TYPE)*(ptr_serializer++);
_local_array_indices[i].resize(*(ptr_serializer++));
- for (int j=0; j<_local_array_indices[i].size(); j++)
+ for (int j=0; j<(int)_local_array_indices[i].size(); j++)
_local_array_indices[i][j]=*(ptr_serializer++);
}
set<int> procs;
: _local_para_field(sourceField),
_local_cell_mesh(sourceField.getSupport()->getCellMesh()),
_local_face_mesh(sourceField.getSupport()->getFaceMesh()),
- _local_group(local_group),
- _distant_group(distant_group)
+ _distant_group(distant_group),
+ _local_group(local_group)
{
_union_group = _local_group.fuse(distant_group);
_computeBoundingBoxes();
_comm_buffer=new int[_mapping.size()*2];
std::vector<int> offsets(_distant_domains.size());
offsets[0]=0;
- for (int i=1; i<_distant_domains.size();i++)
+ for (int i=1; i<(int)_distant_domains.size();i++)
offsets[i]=offsets[i-1]+_numbers[i-1];
- for (int i=0; i< _mapping.size(); i++)
+ for (int i=0; i<(int)_mapping.size(); i++)
{
int offset= offsets[_mapping[i].first];
_comm_buffer[offset*2]=idproc;
{
_numbers=new int[nbDistantDomains()];
_domains=new int[nbDistantDomains()];
- for (int i=0; i< _mapping.size(); i++)
+ for (int i=0; i<(int)_mapping.size(); i++)
{
if ( counts.find(_mapping[i].first) == counts.end())
counts.insert(std::make_pair(_mapping[i].first,1));
ParaMEDMEM::ParaMESH* _support;
ParaMEDMEM::ComponentTopology* _comp_topology;
};
-};
+}
#endif
const ProcessorGroup& target_group,
const DECOptions& dec_options,
const INTERP_KERNEL::InterpolationOptions& interp_options):
+ INTERP_KERNEL::InterpolationOptions(interp_options),
+ DECOptions(dec_options),
_source_field(source_field),
_source_support(source_field->getSupport()->getCellMesh()),
_mapping(source_group, target_group, dec_options),
_source_group(source_group),
- _target_group(target_group),
- DECOptions(dec_options),
- INTERP_KERNEL::InterpolationOptions(interp_options)
+ _target_group(target_group)
{
int nbelems = source_field->getField()->getNumberOfTuples();
_row_offsets.resize(nbelems+1);
{
MxN_Mapping::MxN_Mapping(const ProcessorGroup& source_group, const ProcessorGroup& target_group,const DECOptions& dec_options)
- : _union_group(source_group.fuse(target_group)),
- DECOptions(dec_options)
+ : DECOptions(dec_options),_union_group(source_group.fuse(target_group))
{
_access_DEC = new MPIAccessDEC(source_group,target_group,getAsynchronous());
_access_DEC->setTimeInterpolator(getTimeInterpolationMethod());
recvdispls[i]=_recv_proc_offsets[i];
}
vector<int> offsets = _send_proc_offsets;
- for (int i=0; i<_sending_ids.size();i++)
+ for (int i=0; i<(int)_sending_ids.size();i++)
{
int iproc = _sending_ids[i].first;
isendbuf[offsets[iproc]]=_sending_ids[i].second;
//building the buffer of the elements to be sent
vector<int> offsets = _send_proc_offsets;
- for (int i=0; i<_sending_ids.size();i++)
+ for (int i=0; i<(int)_sending_ids.size();i++)
{
int iproc = _sending_ids[i].first;
for (int icomp=0; icomp<nbcomp; icomp++)
*/
ParaFIELD::ParaFIELD(TypeOfField type, TypeOfTimeDiscretization td, ParaMESH* para_support, const ComponentTopology& component_topology)
- :_support(para_support),
+ :_field(0),
_component_topology(component_topology),_topology(0),
- _field(0),
+ _support(para_support),
_has_field_ownership(true),
_has_support_ownership(false)
{
*/
ParaFIELD::ParaFIELD(MEDCouplingFieldDouble* subdomain_field, const ProcessorGroup& proc_group):
_field(subdomain_field),
- _support(),
_component_topology(ComponentTopology(_field->getNumberOfComponents())),_topology(0),
+ _support(),
_has_field_ownership(false),
_has_support_ownership(true)
{
StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(0),_topo_target(0),
- _recv_buffer(0),_send_buffer(0),
- _recv_counts(0),_send_counts(0),
- _recv_displs(0),_send_displs(0)
+ _send_counts(0),_recv_counts(0),
+ _send_displs(0),_recv_displs(0),
+ _recv_buffer(0),_send_buffer(0)
{
}
\addtogroup structuredcoincidentdec
@{
*/
- StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):DEC(local_group,distant_group),_topo_source(0),_topo_target(0),_recv_buffer(0),_send_buffer(0),
- _recv_counts(0),_send_counts(0),_recv_displs(0),_send_displs(0)
+ StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):DEC(local_group,distant_group),
+ _topo_source(0),_topo_target(0),
+ _send_counts(0),_recv_counts(0),
+ _send_displs(0),_recv_displs(0),
+ _recv_buffer(0),_send_buffer(0)
{
}