-#! /usr/bin/env python
-# -*- coding: iso-8859-1 -*-
+#! /usr/bin/env python3
# Copyright (C) 2007-2016 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
-# Doxyfile 1.8.5
+# Doxyfile 1.8.8
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
PROJECT_NAME = "ParaViS Module v.@SALOMEPARAVIS_VERSION@"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
OUTPUT_DIRECTORY = doc_api
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
CREATE_SUBDIRS = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
REPEAT_BRIEF = NO
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
ALWAYS_DETAILED_SEC = YES
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
INLINE_INHERITED_MEMB = YES
+
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
FULL_PATH_NAMES = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
STRIP_FROM_PATH = @CMAKE_SOURCE_DIR@ \
@CMAKE_BINARY_DIR@
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
JAVADOC_AUTOBRIEF = YES
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
+
SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
TAB_SIZE = 5
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
ALIASES =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
OPTIMIZE_OUTPUT_JAVA = YES
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
EXTENSION_MAPPING =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
BUILTIN_STL_SUPPORT = @DOXYGEN_SUPPORT_STL@
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
LOOKUP_CACHE_SIZE = 0
+
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
EXTRACT_PRIVATE = YES
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
+
EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
INTERNAL_DOCS = YES
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
+
HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
SHOW_INCLUDE_FILES = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
+
SORT_MEMBER_DOCS = NO
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
+
GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
+
GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
MAX_INITIALIZER_LINES = 25
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
SHOW_USED_FILES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
CITE_BIB_FILES =
+
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
WARNINGS = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
WARN_IF_UNDOCUMENTED = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
+
WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
WARN_LOGFILE =
+
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
INPUT = @CMAKE_SOURCE_DIR@/src \
@CMAKE_SOURCE_DIR@/bin \
@CMAKE_BINARY_DIR@/bin
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
FILE_PATTERNS = *.hxx \
*.cxx \
*.h \
*.hh \
*.cc \
@DOXYGEN_PYTHON_EXTENSION@
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
IMAGE_PATH = @CMAKE_CURRENT_SOURCE_DIR@/images
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
FILTER_SOURCE_FILES = YES
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
USE_MDFILE_AS_MAINPAGE =
+
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
VERBATIM_HEADERS = YES
+
+# If the CLANG_ASSISTED_PARSING tag is set to YES, then doxygen will use the
+# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
+# cost of reduced performance. This can be particularly helpful with template
+# rich C++ code for which doxygen's built-in parser lacks the necessary type
+# information.
+# Note: The availability of this option depends on whether or not doxygen was
+# compiled with the --with-libclang option.
+# The default value is: NO.
+
+CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+CLANG_OPTIONS =
+
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
ALPHABETICAL_INDEX = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
COLS_IN_ALPHA_INDEX = 3
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
IGNORE_PREFIX =
+
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
+
GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_HEADER = @CMAKE_CURRENT_BINARY_DIR@/static/header.html
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_FOOTER = @CMAKE_CURRENT_SOURCE_DIR@/static/footer.html
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefor more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra stylesheet files is of importance (e.g. the last
+# stylesheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_EXTRA_STYLESHEET = @CMAKE_CURRENT_SOURCE_DIR@/static/salome_extra.css
-HTML_EXTRA_FILES =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_TIMESTAMP = NO
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
GENERATE_HTMLHELP = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
CHM_FILE =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
HHC_LOCATION =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
GENERATE_CHI = YES
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
TOC_EXPAND = YES
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
ECLIPSE_DOC_ID = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
GENERATE_TREEVIEW = YES
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
SEARCHENGINE = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
EXTRA_SEARCH_MAPPINGS =
+
#---------------------------------------------------------------------------
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
+
GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empy string,
+# for the replacement values of the other commands the user is refered to
+# HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
LATEX_FOOTER =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
USE_PDFLATEX = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
LATEX_BATCHMODE = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
LATEX_HIDE_INDICES = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
LATEX_BIB_STYLE = plain
+
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
RTF_EXTENSIONS_FILE =
+
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
MAN_EXTENSION = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
MAN_LINKS = NO
+
#---------------------------------------------------------------------------
# Configuration options related to the XML output
#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
XML_OUTPUT = xml
-XML_SCHEMA =
-XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
XML_PROGRAMLISTING = YES
+
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
DOCBOOK_OUTPUT = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
+
GENERATE_AUTOGEN_DEF = NO
+
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
PERLMOD_MAKEVAR_PREFIX =
+
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
SKIP_FUNCTION_MACROS = NO
+
#---------------------------------------------------------------------------
# Configuration options related to external references
#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
+
ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
EXTERNAL_PAGES = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
PERL_PATH = /usr/bin/perl
+
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
MSCGEN_PATH =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
HIDE_UNDOC_RELATIONS = NO
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: YES.
+
HAVE_DOT = YES
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DOT_NUM_THREADS = 0
+
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DOT_FONTNAME = Arial
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DOT_FONTPATH =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
COLLABORATION_GRAPH = NO
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
GROUP_GRAPHS = NO
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
UML_LIMIT_NUM_FIELDS = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
TEMPLATE_RELATIONS = YES
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
INCLUDE_GRAPH = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
INCLUDED_BY_GRAPH = NO
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
CALL_GRAPH = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
+# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
+# gif:cairo:gd, gif:gd, gif:gd:gd and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DOT_IMAGE_FORMAT = jpg
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
INTERACTIVE_SVG = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
MSCFILE_DIRS =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+PLANTUML_JAR_PATH =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
GENERATE_LEGEND = NO
+
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
DOT_CLEANUP = YES
import GEOM
import salome
from salome.geom import geomBuilder
- geompy = geomBuilder.New(salome.myStudy)
+ geompy = geomBuilder.New()
# create a cloud of points
points = [
import salome
from salome.smesh import smeshBuilder
- smesh = smeshBuilder.New(salome.myStudy)
+ smesh = smeshBuilder.New()
Mesh_tri = smesh.Mesh(geometry)
Regular_1D = Mesh_tri.Segment()
pvsimple.SetActiveSource(element)
pvsimple.Render(view=self.renderView)
if pause:
- raw_input("Press Enter key to continue")
+ input("Press Enter key to continue")
#
def display_mesh(self, element, pause=False):
<component-author>vsv</component-author>
<component-version>@SALOMEPARAVIS_VERSION@</component-version>
<component-comment>PVServer control service</component-comment>
- <component-multistudy>1</component-multistudy>
<component-impltype>1</component-impltype>
<component-icone>pqAppIcon22.png</component-icone>
<constraint></constraint>
try:
import GEOM
from salome.geom import geomBuilder
- geompy = geomBuilder.New(salome.myStudy)
+ geompy = geomBuilder.New()
go = sobj.GetObject()._narrow(GEOM.GEOM_Object)
if go:
tmpf = tempfile.NamedTemporaryFile(suffix='.vtk')
try:
import SMESH
from salome.smesh import smeshBuilder
- mesh = smeshBuilder.New(salome.myStudy)
+ mesh = smeshBuilder.New()
mo = sobj.GetObject()._narrow(SMESH.SMESH_Mesh)
if mo:
tmpf = tempfile.NamedTemporaryFile(suffix='.med')
mapGroups[groupName] = familiesOnGroups
# Establish the relations between families and groups
- for family_name, family_id in mapFamilies.iteritems():
+ for family_name, family_id in mapFamilies.items():
groupNames = []
- for group_name, family_name_on_group in mapGroups.iteritems():
+ for group_name, family_name_on_group in mapGroups.items():
if family_name in family_name_on_group:
groupNames.append(group_name)
if len(groupNames) > 0:
# Check that PointSprite plugin is available
if hasattr(representation, "MaxPixelSize"):
- activeRepresentation = 'Point Sprite'
- if representation.MaxPixelSize == 64:
- representation.MaxPixelSize = 8
+ activeRepresentation = 'Point Sprite'
+ if representation.MaxPixelSize == 64:
+ representation.MaxPixelSize = 8
else:
activeRepresentation = 'Points'
raise RuntimeError("Extraction SIL graph failed.")
# Sort families array by ID
-sortedArray = mapFamilies.values()
-sortedArray.sort()
+sortedArray = sorted(list(mapFamilies.values()))
# Prepare 'Annotation' list for lookup-table
numberValues = 0
numberValues += 1
# Iterate over all families to get group(s) by family name
- for famName, famID in mapFamilies.iteritems():
+ for famName, famID in mapFamilies.items():
if idFamily == famID:
- if mapRelations.has_key(famName):
+ if famName in mapRelations:
annotationList.append(str(', ').join(mapRelations.get(famName)))
else:
annotationList.append(str('No group'))
if (isMultiFile)
{
QStringList abuffer;
- abuffer.push_back(QString("def RebuildData( theStudy ):"));
+ abuffer.push_back(QString("def RebuildData():"));
QStringList lst(trace.split("\n"));
foreach(QString elem, lst)
{
#include <pqServerManagerModel.h>
#include <pqAnimationTimeToolbar.h>
+#if PY_VERSION_HEX < 0x03050000
+static char*
+Py_EncodeLocale(const wchar_t *arg, size_t *size)
+{
+ return _Py_wchar2char(arg, size);
+}
+static wchar_t*
+Py_DecodeLocale(const char *arg, size_t *size)
+{
+ return _Py_char2wchar(arg, size);
+}
+#endif
+
//----------------------------------------------------------------------------
PVGUI_Module* ParavisModule = 0;
// PyObject * elem = PyList_GetItem(lst, i);
// if (PyString_Check(elem))
// {
-// std::cout << "At pos:" << i << ", " << PyString_AsString(elem) << std::endl;
+// std::cout << "At pos:" << i << ", " << Py_EncodeLocale(PyUnicode_AS_UNICODE(elem), NULL) << std::endl;
// }
// else
// std::cout << "At pos:" << i << ", not a string!" << std::endl;
vtkSmartPyObject save_cam(PyObject_GetAttrString(trace_mod, const_cast<char*>("SaveCameras")));
vtkSmartPyObject camera_trace(PyObject_CallMethod(save_cam, const_cast<char*>("get_trace"), NULL));
// Convert to a single string
- vtkSmartPyObject ret(PyString_FromString(end_line.toStdString().c_str()));
+ vtkSmartPyObject ret(PyUnicode_FromUnicode(Py_DecodeLocale(end_line.toStdString().c_str(), NULL), end_line.size()));
vtkSmartPyObject final_string(PyObject_CallMethod(ret, const_cast<char*>("join"),
const_cast<char*>("O"), (PyObject*)camera_trace));
- if (PyString_CheckExact(final_string))
+ if (PyUnicode_CheckExact(final_string))
{
- QString camera_qs(PyString_AsString(final_string)); // deep copy
+ QString camera_qs(Py_EncodeLocale(PyUnicode_AS_UNICODE(final_string.GetPointer()), NULL)); // deep copy
traceString = traceString + end_line + end_line + QString("#### saving camera placements for all active views")
+ end_line + end_line + camera_qs + end_line;
}
-# Copyright (C) 2010-2016 CEA/DEN, EDF R&D
+# Copyright (C) 2010-2017 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
IF(SALOME_LIGHT_ONLY)
LIST(APPEND _PYFILES_TO_INSTALL
pvsimple.py
- presentations.py
)
ELSE(SALOME_LIGHT_ONLY)
- # Use a set of Python files redirecting the API directly
+ # Use a set of Python files redirecting the API directly
# to the original ParaView modules.
LIST(APPEND _PYFILES_TO_INSTALL
pvserver.py
pvsimple.py
- presentations.py
paravisSM.py
)
ENDIF(SALOME_LIGHT_ONLY)
+++ /dev/null
-# Copyright (C) 2010-2017 CEA/DEN, EDF R&D
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-#
-
-"""
-This module is intended to provide Python API for building presentations
-typical for Post-Pro module (Scalar Map, Deformed Shape, Vectors, etc.)
-"""
-
-from __future__ import division
-##from __future__ import print_function
-
-import warnings
-warnings.simplefilter('always', DeprecationWarning)
-warnings.warn("the presentations.py module is deprecated", DeprecationWarning, stacklevel=2)
-
-import os
-import re
-import warnings
-from math import sqrt, sin, cos, radians
-from string import upper
-
-# Do not use pv as a short name.
-# It is a name of function from numpy and may be redefined implicitly by 'from numpy import *' call.
-# import pvsimple as pv
-import pvsimple as pvs
-#try:
-# # TODO(MZN): to be removed (issue with Point Sprite texture)
-# #import paravisSM as sm
-#except:
-# import paraview.simple as pvs
-# import paraview.servermanager as sm
-
-
-# Constants
-EPS = 1E-3
-FLT_MIN = 1E-37
-VTK_LARGE_FLOAT = 1E+38
-GAP_COEFFICIENT = 0.0001
-
-
-# Globals
-_current_bar = None
-_med_field_sep = '@@][@@'
-
-
-# Enumerations
-class PrsTypeEnum:
- """
- Post-Pro presentation types.
- """
- MESH = 0
- SCALARMAP = 1
- ISOSURFACES = 2
- CUTPLANES = 3
- CUTLINES = 4
- DEFORMEDSHAPE = 5
- DEFORMEDSHAPESCALARMAP = 6
- VECTORS = 7
- PLOT3D = 8
- STREAMLINES = 9
- GAUSSPOINTS = 10
-
- _type2name = {MESH: 'Mesh',
- SCALARMAP: 'Scalar Map',
- ISOSURFACES: 'Iso Surfaces',
- CUTPLANES: 'Cut Planes',
- CUTLINES: 'Cut Lines',
- DEFORMEDSHAPE: 'Deformed Shape',
- DEFORMEDSHAPESCALARMAP: 'Deformed Shape And Scalar Map',
- VECTORS: 'Vectors',
- PLOT3D: 'Plot3D',
- STREAMLINES: 'Stream Lines',
- GAUSSPOINTS: 'Gauss Points'}
-
- @classmethod
- def get_name(cls, type):
- """Return presentaion name by its type."""
- return cls._type2name[type]
-
-
-class EntityType:
- """
- Entity types.
- """
- NODE = 0
- CELL = 1
-
- _type2name = {NODE: 'P1',
- CELL: 'P0'}
-
- _name2type = {'P1': NODE,
- 'P0': CELL}
-
- _type2pvtype = {NODE: 'POINT_DATA',
- CELL: 'CELL_DATA'}
-
- @classmethod
- def get_name(cls, type):
- """Return entity name (used in full group names) by its type."""
- return cls._type2name[type]
-
- @classmethod
- def get_type(cls, name):
- """Return entity type by its name (used in full group names)."""
- return cls._name2type[name]
-
- @classmethod
- def get_pvtype(cls, type):
- """Return entity type from ['CELL_DATA', 'POINT_DATA']"""
- return cls._type2pvtype[type]
-
-
-class Orientation:
- """Orientation types.
-
- Defines a set of plane orientation possibilities:
- AUTO: plane orientation should be calculated.
- XY: plane formed by X and Y axis.
- YZ: plane formed by Y and Z axis.
- ZX: plane formed by Z and X axis
-
- """
- AUTO = 0
- XY = 1
- YZ = 2
- ZX = 3
-
-
-class GlyphPos:
- """Glyph positions.
-
- Set of elements defining the position of the vector head:
- CENTER: in the center of the vector
- TAIL: in the tail of the vector
- HEAD: in the head of the vector
-
- """
- CENTER = 0
- TAIL = 1
- HEAD = 2
-
-
-class GaussType:
- """
- Gauss Points primitive types.
- """
- SPRITE = 0
- POINT = 1
- SPHERE = 2
-
- _type2mode = {SPRITE: 'Texture',
- POINT: 'SimplePoint',
- SPHERE: 'Sphere (Texture)'}
-
- @classmethod
- def get_mode(cls, type):
- """Return paraview point sprite mode by the primitive type."""
- return cls._type2mode[type]
-
-
-# Auxiliary functions
-
-def get_field_mesh_name(full_field_name):
- """Return mesh name of the field by its full name."""
- aList = full_field_name.split('/')
- if len(aList) >= 2 :
- field_name = full_field_name.split('/')[1]
- return field_name
-
-
-def get_field_entity(full_field_name):
- """Return entity type of the field by its full name."""
- aList = full_field_name.split(_med_field_sep)
- if len(aList) == 2 :
- entity_name = full_field_name.split(_med_field_sep)[-1]
- entity = EntityType.get_type(entity_name)
- return entity
-
-
-def get_field_short_name(full_field_name):
- """Return short name of the field by its full name."""
- aList = full_field_name.split('/')
- if len(aList) == 4 :
- short_name_with_type = full_field_name.split('/')[-1]
- short_name = short_name_with_type.split(_med_field_sep)[0]
- return short_name
-
-
-def find_mesh_full_name(proxy, short_mesh_name):
- """Return full mesh path by short mesh name, if found"""
- proxy.UpdatePipeline()
- all_mesh_names = get_mesh_full_names(proxy)
- for name in all_mesh_names:
- if short_mesh_name == get_field_short_name(name):
- return name
-
-
-def process_prs_for_test(prs, view, picture_name, show_bar=True):
- """Show presentation and record snapshot image.
-
- Arguments:
- prs: the presentation to show
- view: the render view
- picture_name: the full name of the graphics file to save
- show_bar: to show scalar bar or not
-
- """
- # Show the presentation only
- display_only(prs, view)
-
- # Show scalar bar
- global _current_bar
- if show_bar and _current_bar:
- _current_bar.Visibility = 1
-
- # Reset the view
- reset_view(view)
-
- # Create a directory for screenshot if necessary
- file_name = re.sub("\s+", "_", picture_name)
- pic_dir = os.path.dirname(picture_name)
- if not os.path.exists(pic_dir):
- os.makedirs(pic_dir)
-
- # Save picture
- print "Write image:", file_name
- pvs.WriteImage(file_name, view=view, Magnification=1)
-
-
-def reset_view(view=None):
- """Reset the view.
-
- Set predefined (taken from Post-Pro) camera settings.
- If the view is not passed, the active view is used.
-
- """
- if not view:
- view = pvs.GetRenderView()
-
- # Camera preferences
- view.CameraFocalPoint = [0.0, 0.0, 0.0]
- view.CameraViewUp = [0.0, 0.0, 1.0]
- view.CameraPosition = [738.946, -738.946, 738.946]
-
- # Turn on the headligth
- view.LightSwitch = 1
- view.LightIntensity = 0.5
-
- # Use parallel projection
- view.CameraParallelProjection = 1
-
- view.ResetCamera()
- pvs.Render(view=view)
-
-
-def hide_all(view, to_remove=False):
- """Hide all representations in the view."""
- if not view:
- view = pvs.GetRenderView()
-
- rep_list = view.Representations
- for rep in rep_list:
- if hasattr(rep, 'Visibility') and rep.Visibility != 0:
- rep.Visibility = 0
- if to_remove:
- view.Representations.remove(rep)
- pvs.Render(view=view)
-
-
-def display_only(prs, view=None):
- """Display only the given presentation in the view."""
- if not view:
- view = pvs.GetRenderView()
-
- rep_list = view.Representations
- for rep in rep_list:
- if hasattr(rep, 'Visibility'):
- rep.Visibility = (rep == prs)
- pvs.Render(view=view)
-
-
-def set_visible_lines(xy_prs, lines):
- """Set visible only the given lines for XYChartRepresentation."""
- sv = xy_prs.GetProperty("SeriesVisibility").GetData()
- visible = '0'
-
- for i in xrange(0, len(sv)):
- if i % 2 == 0:
- line_name = sv[i]
- if line_name in lines:
- visible = '1'
- else:
- visible = '0'
- else:
- sv[i] = visible
-
- xy_prs.SeriesVisibility = sv
-
-
-def check_vector_mode(vector_mode, nb_components):
- """Check vector mode.
-
- Check if vector mode is correct for the data array with the
- given number of components.
-
- Arguments:
- vector_mode: 'Magnitude', 'X', 'Y' or 'Z'
- nb_components: number of component in the data array
-
- Raises:
- ValueError: in case of the vector mode is unexistent
- or nonapplicable.
-
- """
- if vector_mode not in ('Magnitude', 'X', 'Y', 'Z'):
- raise ValueError("Unexistent vector mode: " + vector_mode)
-
- if ((nb_components == 1 and (vector_mode == 'Y' or vector_mode == 'Z')) or
- (nb_components == 2 and vector_mode == 'Z')):
- raise ValueError("Incorrect vector mode " + vector_mode + " for " +
- nb_components + "-component field")
-
-
-def get_vector_component(vector_mode):
- """Get vector component as ineger.
-
- Translate vector component notation from string
- to integer:
- 'Magnitude': -1
- 'X': 0
- 'Y': 1
- 'Z': 2
-
- """
- vcomponent = -1
-
- if vector_mode == 'X':
- vcomponent = 0
- elif vector_mode == 'Y':
- vcomponent = 1
- elif vector_mode == 'Z':
- vcomponent = 2
-
- return vcomponent
-
-
-def get_data_range(proxy, entity, field_name, vector_mode='Magnitude',
- cut_off=False):
- """Get data range for the field.
-
- Arguments:
- proxy: the pipeline object, containig data array for the field
- entity: the field entity
- field_name: the field name
- vector_mode: the vector mode ('Magnitude', 'X', 'Y' or 'Z')
-
- Returns:
- Data range as [min, max]
-
- """
- proxy.UpdatePipeline()
- entity_data_info = None
- field_data = proxy.GetFieldDataInformation()
-
- if field_name in field_data.keys():
- entity_data_info = field_data
- elif entity == EntityType.CELL:
- entity_data_info = proxy.GetCellDataInformation()
- elif entity == EntityType.NODE:
- entity_data_info = proxy.GetPointDataInformation()
-
- data_range = []
-
- if field_name in entity_data_info.keys():
- vcomp = get_vector_component(vector_mode)
- data_range = entity_data_info[field_name].GetComponentRange(vcomp)
- else:
- pv_entity = EntityType.get_pvtype(entity)
- warnings.warn("Field " + field_name +
- " is unknown for " + pv_entity + "!")
-
- # Cut off the range
- if cut_off and (data_range[0] <= data_range[1]):
- data_range = list(data_range)
- delta = abs(data_range[1] - data_range[0]) * GAP_COEFFICIENT
- data_range[0] += delta
- data_range[1] -= delta
-
- return data_range
-
-
-def get_bounds(proxy):
- """Get bounds of the proxy in 3D."""
- proxy.UpdatePipeline()
- dataInfo = proxy.GetDataInformation()
- bounds_info = dataInfo.GetBounds()
- return bounds_info
-
-
-def get_x_range(proxy):
- """Get X range of the proxy bounds in 3D."""
- proxy.UpdatePipeline()
- bounds_info = get_bounds(proxy)
- return bounds_info[0:2]
-
-
-def get_y_range(proxy):
- """Get Y range of the proxy bounds in 3D."""
- proxy.UpdatePipeline()
- bounds_info = get_bounds(proxy)
- return bounds_info[2:4]
-
-
-def get_z_range(proxy):
- """Get Z range of the proxy bounds in 3D."""
- proxy.UpdatePipeline()
- bounds_info = get_bounds(proxy)
- return bounds_info[4:6]
-
-
-def is_planar_input(proxy):
- """Check if the given input is planar."""
- proxy.UpdatePipeline()
- bounds_info = get_bounds(proxy)
-
- if (abs(bounds_info[0] - bounds_info[1]) <= FLT_MIN or
- abs(bounds_info[2] - bounds_info[3]) <= FLT_MIN or
- abs(bounds_info[4] - bounds_info[5]) <= FLT_MIN):
- return True
-
- return False
-
-
-def is_data_on_cells(proxy, field_name):
- """Check the existence of a field on cells with the given name."""
- proxy.UpdatePipeline()
- cell_data_info = proxy.GetCellDataInformation()
- return (field_name in cell_data_info.keys())
-
-
-def is_empty(proxy):
- """Check if the object contains any points or cells.
-
- Returns:
- True: if the given proxy doesn't contain any points or cells
- False: otherwise
-
- """
- proxy.UpdatePipeline()
- data_info = proxy.GetDataInformation()
-
- nb_cells = data_info.GetNumberOfCells()
- nb_points = data_info.GetNumberOfPoints()
-
- return not(nb_cells + nb_points)
-
-
-def get_orientation(proxy):
- """Get the optimum cutting plane orientation for Plot 3D."""
- proxy.UpdatePipeline()
- orientation = Orientation.XY
-
- bounds = get_bounds(proxy)
- delta = [bounds[1] - bounds[0],
- bounds[3] - bounds[2],
- bounds[5] - bounds[4]]
-
- if (delta[0] >= delta[1] and delta[0] >= delta[2]):
- if (delta[1] >= delta[2]):
- orientation = Orientation.XY
- else:
- orientation = Orientation.ZX
- elif (delta[1] >= delta[0] and delta[1] >= delta[2]):
- if (delta[0] >= delta[2]):
- orientation = Orientation.XY
- else:
- orientation = Orientation.YZ
- elif (delta[2] >= delta[0] and delta[2] >= delta[1]):
- if (delta[0] >= delta[1]):
- orientation = Orientation.ZX
- else:
- orientation = Orientation.YZ
-
- return orientation
-
-
-def dot_product(a, b):
- """Dot product of two 3-vectors."""
- dot = a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
- return dot
-
-
-def multiply3x3(a, b):
- """Mutltiply one 3x3 matrix by another."""
- c = [[0, 0, 0],
- [0, 0, 0],
- [0, 0, 0]]
-
- for i in xrange(3):
- c[0][i] = a[0][0] * b[0][i] + a[0][1] * b[1][i] + a[0][2] * b[2][i]
- c[1][i] = a[1][0] * b[0][i] + a[1][1] * b[1][i] + a[1][2] * b[2][i]
- c[2][i] = a[2][0] * b[0][i] + a[2][1] * b[1][i] + a[2][2] * b[2][i]
-
- return c
-
-
-def get_rx(ang):
- """Get X rotation matrix by angle."""
- rx = [[1.0, 0.0, 0.0],
- [0.0, cos(ang), -sin(ang)],
- [0.0, sin(ang), cos(ang)]]
-
- return rx
-
-
-def get_ry(ang):
- """Get Y rotation matrix by angle."""
- ry = [[cos(ang), 0.0, sin(ang)],
- [0.0, 1.0, 0.0],
- [-sin(ang), 0.0, cos(ang)]]
-
- return ry
-
-
-def get_rz(ang):
- """Get Z rotation matrix by angle."""
- rz = [[cos(ang), -sin(ang), 0.0],
- [sin(ang), cos(ang), 0.0],
- [0.0, 0.0, 1.0]]
-
- return rz
-
-
-def get_normal_by_orientation(orientation, ang1=0, ang2=0):
- """Get normal for the plane by its orientation."""
- i_plane = 0
- rotation = [[], [], []]
- rx = ry = rz = [[1.0, 0.0, 0.0],
- [0.0, 1.0, 0.0],
- [0.0, 0.0, 1.0]]
-
- normal = [0.0, 0.0, 0.0]
- if orientation == Orientation.XY:
- if abs(ang1) > EPS:
- rx = get_rx(ang1)
- if abs(ang2) > EPS:
- ry = get_ry(ang2)
- rotation = multiply3x3(rx, ry)
- i_plane = 2
- elif orientation == Orientation.ZX:
- if abs(ang1) > EPS:
- rz = get_rz(ang1)
- if abs(ang2) > EPS:
- rx = get_rx(ang2)
- rotation = multiply3x3(rz, rx)
- i_plane = 1
- elif orientation == Orientation.YZ:
- if abs(ang1) > EPS:
- ry = get_ry(ang1)
- if abs(ang2) > EPS:
- rz = get_rz(ang2)
- rotation = multiply3x3(ry, rz)
- i_plane = 0
-
- for i in xrange(0, 3):
- normal[i] = rotation[i][i_plane]
-
- return normal
-
-
-def get_bound_project(bound_box, dir):
- """Get bounds projection"""
- bound_points = [[bound_box[0], bound_box[2], bound_box[4]],
- [bound_box[1], bound_box[2], bound_box[4]],
- [bound_box[0], bound_box[3], bound_box[4]],
- [bound_box[1], bound_box[3], bound_box[4]],
- [bound_box[0], bound_box[2], bound_box[5]],
- [bound_box[1], bound_box[2], bound_box[5]],
- [bound_box[0], bound_box[3], bound_box[5]],
- [bound_box[1], bound_box[3], bound_box[5]]]
-
- bound_prj = [0, 0, 0]
- bound_prj[0] = dot_product(dir, bound_points[0])
- bound_prj[1] = bound_prj[0]
-
- for i in xrange(1, 8):
- tmp = dot_product(dir, bound_points[i])
- if bound_prj[1] < tmp:
- bound_prj[1] = tmp
- if bound_prj[0] > tmp:
- bound_prj[0] = tmp
-
- bound_prj[2] = bound_prj[1] - bound_prj[0]
- bound_prj[1] = bound_prj[0] + (1.0 - EPS) * bound_prj[2]
- bound_prj[0] = bound_prj[0] + EPS * bound_prj[2]
- bound_prj[2] = bound_prj[1] - bound_prj[0]
-
- return bound_prj
-
-
-def get_positions(nb_planes, dir, bounds, displacement):
- """Compute plane positions."""
- positions = []
- bound_prj = get_bound_project(bounds, dir)
- if nb_planes > 1:
- step = bound_prj[2] / (nb_planes - 1)
- abs_displacement = step * displacement
- start_pos = bound_prj[0] - 0.5 * step + abs_displacement
- for i in xrange(nb_planes):
- pos = start_pos + i * step
- positions.append(pos)
- else:
- pos = bound_prj[0] + bound_prj[2] * displacement
- positions.append(pos)
-
- return positions
-
-
-def get_contours(scalar_range, nb_contours):
- """Generate contour values."""
- contours = []
- for i in xrange(nb_contours):
- pos = scalar_range[0] + i * (
- scalar_range[1] - scalar_range[0]) / (nb_contours - 1)
- contours.append(pos)
-
- return contours
-
-
-def get_nb_components(proxy, entity, field_name):
- """Return number of components for the field."""
- proxy.UpdatePipeline()
- entity_data_info = None
- field_data = proxy.GetFieldDataInformation()
-
- if field_name in field_data.keys():
- entity_data_info = field_data
- elif entity == EntityType.CELL:
- select_cells_with_data(proxy, on_cells=[field_name])
- entity_data_info = proxy.GetCellDataInformation()
- elif entity == EntityType.NODE:
- select_cells_with_data(proxy, on_points=[field_name])
- entity_data_info = proxy.GetPointDataInformation()
-
- nb_comp = None
- if field_name in entity_data_info.keys():
- nb_comp = entity_data_info[field_name].GetNumberOfComponents()
- else:
- pv_entity = EntityType.get_pvtype(entity)
- raise ValueError("Field " + field_name +
- " is unknown for " + pv_entity + "!")
-
- return nb_comp
-
-
-def get_scale_factor(proxy):
- """Compute scale factor."""
- if not proxy:
- return 0.0
-
- proxy.UpdatePipeline()
- data_info = proxy.GetDataInformation()
-
- nb_cells = data_info.GetNumberOfCells()
- nb_points = data_info.GetNumberOfPoints()
- nb_elements = nb_cells if nb_cells > 0 else nb_points
- bounds = get_bounds(proxy)
-
- volume = 1
- vol = dim = 0
-
- for i in xrange(0, 6, 2):
- vol = abs(bounds[i + 1] - bounds[i])
- if vol > 0:
- dim += 1
- volume *= vol
-
- if nb_elements == 0 or dim < 1 / VTK_LARGE_FLOAT:
- return 0
-
- volume /= nb_elements
-
- return pow(volume, 1 / dim)
-
-
-def get_default_scale(prs_type, proxy, entity, field_name):
- """Get default scale factor."""
- proxy.UpdatePipeline()
- data_range = get_data_range(proxy, entity, field_name)
-
- if prs_type == PrsTypeEnum.DEFORMEDSHAPE:
- EPS = 1.0 / VTK_LARGE_FLOAT
- if abs(data_range[1]) > EPS:
- scale_factor = get_scale_factor(proxy)
- return scale_factor / data_range[1]
- elif prs_type == PrsTypeEnum.PLOT3D:
- bounds = get_bounds(proxy)
- length = sqrt((bounds[1] - bounds[0]) ** 2 +
- (bounds[3] - bounds[2]) ** 2 +
- (bounds[5] - bounds[4]) ** 2)
-
- EPS = 0.3
- if data_range[1] > 0:
- return length / data_range[1] * EPS
-
- return 0
-
-
-def get_calc_magnitude(proxy, array_entity, array_name):
- """Compute magnitude for the given vector array via Calculator.
-
- Returns:
- the calculator object.
-
- """
- proxy.UpdatePipeline()
- calculator = None
-
- # Transform vector array to scalar array if possible
- nb_components = get_nb_components(proxy, array_entity, array_name)
- if (nb_components > 1):
- calculator = pvs.Calculator(proxy)
- attribute_mode = "Point Data"
- if array_entity != EntityType.NODE:
- attribute_mode = "Cell Data"
- calculator.AttributeMode = attribute_mode
- if (nb_components == 2):
- # Workaroud: calculator unable to compute magnitude
- # if number of components equal to 2
- func = "sqrt(" + array_name + "_X^2+" + array_name + "_Y^2)"
- calculator.Function = func
- else:
- calculator.Function = "mag(" + array_name + ")"
- calculator.ResultArrayName = array_name + "_magnitude"
- calculator.UpdatePipeline()
-
- return calculator
-
-
-def get_add_component_calc(proxy, array_entity, array_name):
- """Creates 3-component array from 2-component.
-
- The first two components is from the original array. The 3rd component
- is zero.
- If the number of components is not equal to 2 - return original array name.
-
- Returns:
- the calculator object.
-
- """
- proxy.UpdatePipeline()
- calculator = None
-
- nb_components = get_nb_components(proxy, array_entity, array_name)
- if nb_components == 2:
- calculator = pvs.Calculator(proxy)
- attribute_mode = "Point Data"
- if array_entity != EntityType.NODE:
- attribute_mode = "Cell Data"
- calculator.AttributeMode = attribute_mode
- expression = "iHat * " + array_name + "_X + jHat * " + array_name + "_Y + kHat * 0"
- calculator.Function = expression
- calculator.ResultArrayName = array_name + "_3c"
- calculator.UpdatePipeline()
-
- return calculator
-
-
-def select_all_cells(proxy):
- """Select all cell types.
-
- Used in creation of mesh/submesh presentation.
-
- """
- proxy.UpdatePipeline()
- extractCT = pvs.ExtractCellType()
- extractCT.AllGeoTypes = extractCT.GetProperty("GeoTypesInfo")[::2]
- extractCT.UpdatePipelineInformation()
-
-
-def select_cells_with_data(proxy, on_points=[], on_cells=[], on_gauss=[]):
- """Select cell types with data.
-
- Only cell types with data for the given fields will be selected.
- If no fields defined (neither on points nor on cells) only cell
- types with data for even one field (from available) will be selected.
-
- """
- if not proxy.GetProperty("FieldsTreeInfo"):
- return
-
- proxy.UpdatePipeline()
- if not hasattr(proxy, 'Entity'):
- fields_info = proxy.GetProperty("FieldsTreeInfo")[::2]
- arr_name_with_dis=[elt.split("/")[-1] for elt in fields_info]
-
- fields = []
- for name in on_gauss:
- fields.append(name+_med_field_sep+'GAUSS')
- for name in on_cells:
- fields.append(name+_med_field_sep+'P0')
- for name in on_points:
- fields.append(name+_med_field_sep+'P1')
-
- field_list = []
- for name in fields:
- if arr_name_with_dis.count(name) > 0:
- index = arr_name_with_dis.index(name)
- field_list.append(fields_info[index])
-
- if field_list:
- proxy.AllArrays = field_list
- proxy.UpdatePipeline()
- return len(field_list) != 0
-
- # TODO: VTN. Looks like this code is out of date.
-
- #all_cell_types = proxy.CellTypes.Available
- all_cell_types = proxy.Entity.Available
- all_arrays = list(proxy.CellArrays.GetData())
- all_arrays.extend(proxy.PointArrays.GetData())
-
- if not all_arrays:
- file_name = proxy.FileName.split(os.sep)[-1]
- print "Warning: " + file_name + " doesn't contain any data array."
-
- # List of cell types to be selected
- cell_types_on = []
-
- for cell_type in all_cell_types:
- #proxy.CellTypes = [cell_type]
- proxy.Entity = [cell_type]
- proxy.UpdatePipeline()
-
- cell_arrays = proxy.GetCellDataInformation().keys()
- point_arrays = proxy.GetPointDataInformation().keys()
-
- if on_points or on_cells:
- if on_points is None:
- on_points = []
- if on_cells is None:
- on_cells = []
-
- if (all(array in cell_arrays for array in on_cells) and
- all(array in point_arrays for array in on_points)):
- # Add cell type to the list
- cell_types_on.append(cell_type)
- else:
- in_arrays = lambda array: ((array in cell_arrays) or
- (array in point_arrays))
- if any(in_arrays(array) for array in all_arrays):
- cell_types_on.append(cell_type)
-
- # Select cell types
- #proxy.CellTypes = cell_types_on
- proxy.Entity = cell_types_on
- proxy.UpdatePipeline()
-
-def if_possible(proxy, field_name, entity, prs_type, extrGrps=None):
- """Check if the presentation creation is possible on the given field."""
- proxy.UpdatePipeline()
- result = True
- if (prs_type == PrsTypeEnum.DEFORMEDSHAPE or
- prs_type == PrsTypeEnum.DEFORMEDSHAPESCALARMAP or
- prs_type == PrsTypeEnum.VECTORS or
- prs_type == PrsTypeEnum.STREAMLINES):
- nb_comp = get_nb_components(proxy, entity, field_name)
- result = (nb_comp > 1)
- elif (prs_type == PrsTypeEnum.GAUSSPOINTS):
- result = (entity == EntityType.CELL or
- field_name in proxy.QuadraturePointArrays.Available)
- elif (prs_type == PrsTypeEnum.MESH):
- result = len(get_group_names(extrGrps)) > 0
-
- return result
-
-
-def add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value):
- """Add scalar bar with predefined properties."""
- global _current_bar
-
- # Construct bar title
- title = "\n".join([field_name, str(time_value)])
- if nb_components > 1:
- title = "\n".join([title, vector_mode])
-
- # Create scalar bar
- scalar_bar = pvs.CreateScalarBar(Enabled=1)
- scalar_bar.Orientation = 'Vertical'
- scalar_bar.Title = title
- scalar_bar.LookupTable = lookup_table
-
- # Set default properties same as in Post-Pro
- scalar_bar.NumberOfLabels = 5
- scalar_bar.AutomaticLabelFormat = 0
- scalar_bar.LabelFormat = '%-#6.6g'
- # Title
- scalar_bar.TitleFontFamily = 'Arial'
- scalar_bar.TitleFontSize = 8
- scalar_bar.TitleBold = 1
- scalar_bar.TitleItalic = 1
- scalar_bar.TitleShadow = 1
- # Labels
- scalar_bar.LabelFontFamily = 'Arial'
- scalar_bar.LabelFontSize = 8
- scalar_bar.LabelBold = 1
- scalar_bar.LabelItalic = 1
- scalar_bar.LabelShadow = 1
-
- # Add the scalar bar to the view
- pvs.GetRenderView().Representations.append(scalar_bar)
-
- # Reassign the current bar
- _current_bar = scalar_bar
-
- return _current_bar
-
-
-def get_bar():
- """Get current scalar bar."""
- return _current_bar
-
-
-def get_lookup_table(field_name, nb_components, vector_mode='Magnitude'):
- """Get lookup table for the given field."""
- lookup_table = pvs.GetLookupTableForArray(field_name, nb_components)
-
- if vector_mode == 'Magnitude':
- lookup_table.VectorMode = vector_mode
- elif vector_mode == 'X':
- lookup_table.VectorMode = 'Component'
- lookup_table.VectorComponent = 0
- elif vector_mode == 'Y':
- lookup_table.VectorMode = 'Component'
- lookup_table.VectorComponent = 1
- elif vector_mode == 'Z':
- lookup_table.VectorMode = 'Component'
- lookup_table.VectorComponent = 2
- else:
- raise ValueError("Incorrect vector mode: " + vector_mode)
-
- lookup_table.Discretize = 0
- lookup_table.ColorSpace = 'HSV'
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 0
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 0
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- return lookup_table
-
-
-def get_group_mesh_name(full_group_name):
- """Return mesh name of the group by its full name."""
- aList = full_group_name.split('/')
- if len(aList) >= 2 :
- group_name = full_group_name.split('/')[1]
- return group_name
-
-def get_group_entity(full_group_name):
- """Return entity type of the group by its full name."""
- aList = full_group_name.split('/')
- if len(aList) >= 3 :
- entity_name = full_group_name.split('/')[2]
- entity = EntityType.get_type(entity_name)
- return entity
-
-
-def get_group_short_name(full_group_name):
- """Return short name of the group by its full name."""
- short_name = re.sub('^GRP_', '', full_group_name)
- return short_name
-
-
-def get_mesh_full_names(proxy):
- """Return all mesh names in the given proxy as a set."""
- proxy.UpdatePipeline()
- fields = proxy.GetProperty("FieldsTreeInfo")[::2]
- mesh_full_names = set([item for item in fields if get_field_mesh_name(item) == get_field_short_name(item)])
- return mesh_full_names
-
-
-def get_group_names(extrGrps):
- """Return full names of all groups of the given 'ExtractGroup' filter object.
- """
- group_names = filter(lambda x:x[:4]=="GRP_",list(extrGrps.GetProperty("GroupsFlagsInfo")[::2]))
- return group_names
-
-
-def get_time(proxy, timestamp_nb):
- """Get time value by timestamp number."""
- #proxy.UpdatePipeline()
- # Check timestamp number
- timestamps = []
-
- if (hasattr(proxy, 'TimestepValues')):
- timestamps = proxy.TimestepValues.GetData()
- elif (hasattr(proxy.Input, 'TimestepValues')):
- timestamps = proxy.Input.TimestepValues.GetData()
-
- length = len(timestamps)
- if (timestamp_nb > 0 and (timestamp_nb - 1) not in xrange(length) ) or (timestamp_nb < 0 and -timestamp_nb > length):
- raise ValueError("Timestamp number is out of range: " + str(timestamp_nb))
-
- if not timestamps:
- return 0.0
-
- # Return time value
- if timestamp_nb > 0:
- return timestamps[timestamp_nb - 1]
- else:
- return timestamps[timestamp_nb]
-
-def create_prs(prs_type, proxy, field_entity, field_name, timestamp_nb):
- """Auxiliary function.
-
- Build presentation of the given type on the given field and
- timestamp number.
- Set the presentation properties like visu.CreatePrsForResult() do.
-
- """
- proxy.UpdatePipeline()
- prs = None
-
- if prs_type == PrsTypeEnum.SCALARMAP:
- prs = ScalarMapOnField(proxy, field_entity, field_name, timestamp_nb)
- elif prs_type == PrsTypeEnum.CUTPLANES:
- prs = CutPlanesOnField(proxy, field_entity, field_name, timestamp_nb,
- orientation=Orientation.ZX)
- elif prs_type == PrsTypeEnum.CUTLINES:
- prs = CutLinesOnField(proxy, field_entity, field_name, timestamp_nb,
- orientation1=Orientation.XY,
- orientation2=Orientation.ZX)
- elif prs_type == PrsTypeEnum.DEFORMEDSHAPE:
- prs = DeformedShapeOnField(proxy, field_entity,
- field_name, timestamp_nb)
- elif prs_type == PrsTypeEnum.DEFORMEDSHAPESCALARMAP:
- prs = DeformedShapeAndScalarMapOnField(proxy, field_entity,
- field_name, timestamp_nb)
- elif prs_type == PrsTypeEnum.VECTORS:
- prs = VectorsOnField(proxy, field_entity, field_name, timestamp_nb)
- elif prs_type == PrsTypeEnum.PLOT3D:
- prs = Plot3DOnField(proxy, field_entity, field_name, timestamp_nb)
- elif prs_type == PrsTypeEnum.ISOSURFACES:
- prs = IsoSurfacesOnField(proxy, field_entity, field_name, timestamp_nb)
- elif prs_type == PrsTypeEnum.GAUSSPOINTS:
- prs = GaussPointsOnField(proxy, field_entity, field_name, timestamp_nb)
- elif prs_type == PrsTypeEnum.STREAMLINES:
- prs = StreamLinesOnField(proxy, field_entity, field_name, timestamp_nb)
- else:
- raise ValueError("Unexistent presentation type.")
-
- return prs
-
-
-# Functions for building Post-Pro presentations
-def ScalarMapOnField(proxy, entity, field_name, timestamp_nb,
- vector_mode='Magnitude'):
- """Creates Scalar Map presentation on the given field.
-
- Arguments:
- proxy: the pipeline object, containig data
- entity: the entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- vector_mode: the mode of transformation of vector values
- into scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude', 'X', 'Y' or 'Z'.
-
- Returns:
- Scalar Map as representation object.
-
- """
- proxy.UpdatePipeline()
- # We don't need mesh parts with no data on them
- if entity == EntityType.NODE:
- select_cells_with_data(proxy, on_points=[field_name])
- else:
- select_cells_with_data(proxy, on_cells=[field_name])
-
- # Check vector mode
- nb_components = get_nb_components(proxy, entity, field_name)
- check_vector_mode(vector_mode, nb_components)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- pvs.UpdatePipeline(time_value, proxy)
-
- # Get Scalar Map representation object
- scalarmap = pvs.GetRepresentation(proxy)
-
- # Get lookup table
- lookup_table = get_lookup_table(field_name, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, entity,
- field_name, vector_mode)
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
- # Set properties
- pvs.ColorBy(scalarmap, (EntityType.get_pvtype(entity), field_name))
- scalarmap.LookupTable = lookup_table
-
- # Add scalar bar
- bar_title = field_name + ", " + str(time_value)
- if (nb_components > 1):
- bar_title += "\n" + vector_mode
- add_scalar_bar(field_name, nb_components, vector_mode,
- lookup_table, time_value)
-
- return scalarmap
-
-
-def CutPlanesOnField(proxy, entity, field_name, timestamp_nb,
- nb_planes=10, orientation=Orientation.YZ,
- angle1=0, angle2=0,
- displacement=0.5, vector_mode='Magnitude'):
- """Creates Cut Planes presentation on the given field.
-
- Arguments:
- proxy: the pipeline object, containig data
- entity: the entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- nb_planes: number of cutting planes
- orientation: cutting planes orientation in 3D space
- angle1: rotation of the planes in 3d space around the first axis of the
- selected orientation (X axis for XY, Y axis for YZ, Z axis for ZX).
- The angle of rotation is set in degrees. Acceptable range: [-45, 45].
- angle2: rotation of the planes in 3d space around the second axis of the
- selected orientation. Acceptable range: [-45, 45].
- displacement: the displacement of the planes into one or another side
- vector_mode: the mode of transformation of vector values
- into scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude', 'X', 'Y' or 'Z'.
-
- Returns:
- Cut Planes as representation object.
-
- """
- proxy.UpdatePipeline()
- if entity == EntityType.NODE:
- select_cells_with_data(proxy, on_points=[field_name])
- else:
- select_cells_with_data(proxy, on_cells=[field_name])
-
- # Check vector mode
- nb_components = get_nb_components(proxy, entity, field_name)
- check_vector_mode(vector_mode, nb_components)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- pvs.UpdatePipeline(time_value, proxy)
-
- # Create slice filter
- slice_filter = pvs.Slice(proxy)
- slice_filter.SliceType = "Plane"
-
- # Set cut planes normal
- normal = get_normal_by_orientation(orientation,
- radians(angle1), radians(angle2))
- slice_filter.SliceType.Normal = normal
-
- # Set cut planes positions
- positions = get_positions(nb_planes, normal,
- get_bounds(proxy), displacement)
- slice_filter.SliceOffsetValues = positions
-
- # Get Cut Planes representation object
- cut_planes = pvs.GetRepresentation(slice_filter)
-
- # Get lookup table
- lookup_table = get_lookup_table(field_name, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, entity,
- field_name, vector_mode)
-
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
-
- # Set properties
- pvs.ColorBy(cut_planes, (EntityType.get_pvtype(entity), field_name))
- cut_planes.LookupTable = lookup_table
-
- # Add scalar bar
- add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value)
-
- return cut_planes
-
-
-def CutLinesOnField(proxy, entity, field_name, timestamp_nb,
- nb_lines=10,
- orientation1=Orientation.XY,
- base_angle1=0, base_angle2=0,
- orientation2=Orientation.YZ,
- cut_angle1=0, cut_angle2=0,
- displacement1=0.5, displacement2=0.5,
- generate_curves=False,
- vector_mode='Magnitude'):
- """Creates Cut Lines presentation on the given field.
-
- Arguments:
- proxy: the pipeline object, containig data
- entity: the entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- nb_lines: number of lines
- orientation1: base plane orientation in 3D space
- base_angle1: rotation of the base plane in 3d space around the first
- axis of the orientation1 (X axis for XY, Y axis for YZ, Z axis for ZX).
- The angle of rotation is set in degrees. Acceptable range: [-45, 45].
- base_angle2: rotation of the base plane in 3d space around the second
- axis of the orientation1. Acceptable range: [-45, 45].
- orientation2: cutting planes orientation in 3D space
- cut_angle1: rotation of the cut planes in 3d space around the first
- axis of the orientation2. Acceptable range: [-45, 45].
- cut_angle2: rotation of the cuting planes in 3d space around the second
- axis of the orientation2. Acceptable range: [-45, 45].
- displacement1: base plane displacement
- displacement2: cutting planes displacement
- generate_curves: if true, 'PlotOverLine' filter will be created
- for each cut line
- vector_mode: the mode of transformation of vector values
- into scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude', 'X', 'Y' or 'Z'.
-
- Returns:
- Cut Lines as representation object if generate_curves == False,
- (Cut Lines as representation object, list of 'PlotOverLine') otherwise
-
- """
- proxy.UpdatePipeline()
- if entity == EntityType.NODE:
- select_cells_with_data(proxy, on_points=[field_name])
- else:
- select_cells_with_data(proxy, on_cells=[field_name])
-
- # Check vector mode
- nb_components = get_nb_components(proxy, entity, field_name)
- check_vector_mode(vector_mode, nb_components)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- pvs.UpdatePipeline(time_value, proxy)
-
- # Create base plane
- base_plane = pvs.Slice(proxy)
- base_plane.SliceType = "Plane"
-
- # Set base plane normal
- base_normal = get_normal_by_orientation(orientation1,
- radians(base_angle1),
- radians(base_angle2))
- base_plane.SliceType.Normal = base_normal
-
- # Set base plane position
- base_position = get_positions(1, base_normal,
- get_bounds(proxy), displacement1)
- base_plane.SliceOffsetValues = base_position
-
- # Check base plane
- base_plane.UpdatePipeline()
- if (base_plane.GetDataInformation().GetNumberOfCells() == 0):
- base_plane = proxy
-
- # Create cutting planes
- cut_planes = pvs.Slice(base_plane)
- cut_planes.SliceType = "Plane"
-
- # Set cutting planes normal and get positions
- cut_normal = get_normal_by_orientation(orientation2,
- radians(cut_angle1),
- radians(cut_angle2))
- cut_planes.SliceType.Normal = cut_normal
-
- # Set cutting planes position
- cut_positions = get_positions(nb_lines, cut_normal,
- get_bounds(base_plane), displacement2)
-
- # Generate curves
- curves = []
- if generate_curves:
- index = 0
- for pos in cut_positions:
- # Get points for plot over line objects
- cut_planes.SliceOffsetValues = pos
- cut_planes.UpdatePipeline()
- bounds = get_bounds(cut_planes)
- point1 = [bounds[0], bounds[2], bounds[4]]
- point2 = [bounds[1], bounds[3], bounds[5]]
-
- # Create plot over line filter
- pol = pvs.PlotOverLine(cut_planes,
- Source="High Resolution Line Source")
- pvs.RenameSource('Y' + str(index), pol)
- pol.Source.Point1 = point1
- pol.Source.Point2 = point2
- pol.UpdatePipeline()
- curves.append(pol)
-
- index += 1
-
- cut_planes.SliceOffsetValues = cut_positions
- cut_planes.UpdatePipeline()
-
- # Get Cut Lines representation object
- cut_lines = pvs.GetRepresentation(cut_planes)
-
- # Get lookup table
- lookup_table = get_lookup_table(field_name, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, entity,
- field_name, vector_mode)
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
-
- # Set properties
- pvs.ColorBy(cut_lines, (EntityType.get_pvtype(entity), field_name))
- cut_lines.LookupTable = lookup_table
-
- # Set wireframe represenatation mode
- cut_lines.Representation = 'Wireframe'
-
- # Add scalar bar
- add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value)
-
- result = cut_lines
- # If curves were generated return tuple (cut lines, list of curves)
- if curves:
- result = cut_lines, curves
-
- return result
-
-
-def CutSegmentOnField(proxy, entity, field_name, timestamp_nb,
- point1, point2, vector_mode='Magnitude'):
- """Creates Cut Segment presentation on the given field.
-
- Arguments:
- proxy: the pipeline object, containig data
- entity: the entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- point1: set the first point of the segment (as [x, y, z])
- point1: set the second point of the segment (as [x, y, z])
- vector_mode: the mode of transformation of vector values
- into scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude', 'X', 'Y' or 'Z'.
-
- Returns:
- Cut Segment as 3D representation object.
-
- """
- proxy.UpdatePipeline()
- if entity == EntityType.NODE:
- select_cells_with_data(proxy, on_points=[field_name])
- else:
- select_cells_with_data(proxy, on_cells=[field_name])
-
- # Check vector mode
- nb_components = get_nb_components(proxy, entity, field_name)
- check_vector_mode(vector_mode, nb_components)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- pvs.UpdatePipeline(time_value, proxy)
-
- # Create plot over line filter
- pol = pvs.PlotOverLine(proxy, Source="High Resolution Line Source")
- pol.Source.Point1 = point1
- pol.Source.Point2 = point2
- pol.UpdatePipeline()
-
- # Get Cut Segment representation object
- cut_segment = pvs.GetRepresentation(pol)
-
- # Get lookup table
- lookup_table = get_lookup_table(field_name, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, entity,
- field_name, vector_mode)
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
-
- # Set properties
- pvs.ColorBy(cut_segment, (EntityType.get_pvtype(entity), field_name))
- cut_segment.LookupTable = lookup_table
-
- # Set wireframe represenatation mode
- cut_segment.Representation = 'Wireframe'
-
- # Add scalar bar
- add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value)
-
- return cut_segment
-
-
-def VectorsOnField(proxy, entity, field_name, timestamp_nb,
- scale_factor=None,
- glyph_pos=GlyphPos.TAIL, glyph_type='2D Glyph',
- is_colored=False, vector_mode='Magnitude'):
- """Creates Vectors presentation on the given field.
-
- Arguments:
- proxy: the pipeline object, containig data
- entity: the entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- scale_factor: scale factor
- glyph_pos: the position of glyphs
- glyph_type: the type of glyphs
- is_colored: this option allows to color the presentation according to
- the corresponding data array values
- vector_mode: the mode of transformation of vector values
- into scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude', 'X', 'Y' or 'Z'.
-
- Returns:
- Vectors as representation object.
-
- """
- proxy.UpdatePipeline()
- if entity == EntityType.NODE:
- select_cells_with_data(proxy, on_points=[field_name])
- else:
- select_cells_with_data(proxy, on_cells=[field_name])
-
- # Check vector mode
- nb_components = get_nb_components(proxy, entity, field_name)
- check_vector_mode(vector_mode, nb_components)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- pvs.UpdatePipeline(time_value, proxy)
-
- # Extract only groups with data for the field
- source = proxy
-
- # Cell centers
- if is_data_on_cells(proxy, field_name):
- cell_centers = pvs.CellCenters(source)
- cell_centers.VertexCells = 1
- source = cell_centers
-
- vector_array = field_name
- # If the given vector array has only 2 components, add the third one
- if nb_components == 2:
- calc = get_add_component_calc(source, EntityType.NODE, field_name)
- vector_array = calc.ResultArrayName
- source = calc
-
- # Glyph
- glyph = pvs.Glyph(source)
- glyph.Vectors = vector_array
- glyph.ScaleMode = 'vector'
- #glyph.MaskPoints = 0
-
- # Set glyph type
- glyph.GlyphType = glyph_type
- if glyph_type == '2D Glyph':
- glyph.GlyphType.GlyphType = 'Arrow'
- elif glyph_type == 'Cone':
- glyph.GlyphType.Resolution = 7
- glyph.GlyphType.Height = 2
- glyph.GlyphType.Radius = 0.2
-
- # Set glyph position if possible
- if glyph.GlyphType.GetProperty("Center"):
- if (glyph_pos == GlyphPos.TAIL):
- glyph.GlyphType.Center = [0.5, 0.0, 0.0]
- elif (glyph_pos == GlyphPos.HEAD):
- glyph.GlyphType.Center = [-0.5, 0.0, 0.0]
- elif (glyph_pos == GlyphPos.CENTER):
- glyph.GlyphType.Center = [0.0, 0.0, 0.0]
-
- if scale_factor is not None:
- glyph.ScaleFactor = scale_factor
- else:
- def_scale = get_default_scale(PrsTypeEnum.DEFORMEDSHAPE,
- proxy, entity, field_name)
- glyph.ScaleFactor = def_scale
-
- glyph.UpdatePipeline()
-
- # Get Vectors representation object
- vectors = pvs.GetRepresentation(glyph)
-
- # Get lookup table
- lookup_table = get_lookup_table(field_name, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, entity,
- field_name, vector_mode)
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
-
- # Set properties
- if (is_colored):
- pvs.ColorBy(vectors, (EntityType.get_pvtype(entity), 'GlyphVector'))
- else:
- pvs.ColorBy(vectors, (EntityType.get_pvtype(entity), None))
- vectors.LookupTable = lookup_table
-
- vectors.LineWidth = 1.0
-
- # Set wireframe represenatation mode
- vectors.Representation = 'Wireframe'
-
- # Add scalar bar
- add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value)
-
- return vectors
-
-
-def DeformedShapeOnField(proxy, entity, field_name,
- timestamp_nb,
- scale_factor=None, is_colored=False,
- vector_mode='Magnitude'):
- """Creates Defromed Shape presentation on the given field.
-
- Arguments:
- proxy: the pipeline object, containig data
- entity: the entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- scale_factor: scale factor of the deformation
- is_colored: this option allows to color the presentation according to
- the corresponding data array values
- vector_mode: the mode of transformation of vector values
- into scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude', 'X', 'Y' or 'Z'.
-
- Returns:
- Defromed Shape as representation object.
-
- """
- proxy.UpdatePipeline()
- # We don't need mesh parts with no data on them
- if entity == EntityType.NODE:
- select_cells_with_data(proxy, on_points=[field_name])
- else:
- select_cells_with_data(proxy, on_cells=[field_name])
-
- # Check vector mode
- nb_components = get_nb_components(proxy, entity, field_name)
- check_vector_mode(vector_mode, nb_components)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- pvs.UpdatePipeline(time_value, proxy)
-
- # Do merge
- source = pvs.MergeBlocks(proxy)
- pvs.UpdatePipeline()
-
- # Cell data to point data
- if is_data_on_cells(proxy, field_name):
- cell_to_point = pvs.CellDatatoPointData()
- cell_to_point.PassCellData = 1
- source = cell_to_point
-
- vector_array = field_name
- # If the given vector array has only 2 components, add the third one
- if nb_components == 2:
- calc = get_add_component_calc(source, EntityType.NODE, field_name)
- vector_array = calc.ResultArrayName
- source = calc
-
- # Warp by vector
- warp_vector = pvs.WarpByVector(source)
- warp_vector.Vectors = [vector_array]
- if scale_factor is not None:
- warp_vector.ScaleFactor = scale_factor
- else:
- def_scale = get_default_scale(PrsTypeEnum.DEFORMEDSHAPE,
- proxy, entity, field_name)
- warp_vector.ScaleFactor = def_scale
-
- # Get Deformed Shape representation object
- defshape = pvs.GetRepresentation(warp_vector)
-
- # Get lookup table
- lookup_table = get_lookup_table(field_name, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, entity,
- field_name, vector_mode)
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
-
- # Set properties
- if is_colored:
- pvs.ColorBy(defshape, (EntityType.get_pvtype(entity), field_name))
- else:
- pvs.ColorBy(defshape, (EntityType.get_pvtype(entity), None))
- defshape.LookupTable = lookup_table
-
- # Set wireframe represenatation mode
- defshape.Representation = 'Wireframe'
-
- # Add scalar bar
- add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value)
-
- return defshape
-
-
-def DeformedShapeAndScalarMapOnField(proxy, entity, field_name,
- timestamp_nb,
- scale_factor=None,
- scalar_entity=None,
- scalar_field_name=None,
- vector_mode='Magnitude'):
- """Creates Defromed Shape And Scalar Map presentation on the given field.
-
- Arguments:
- proxy: the pipeline object, containig data
- entity: the entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- scale_factor: scale factor of the deformation
- scalar_entity: scalar field entity
- scalar_field_name: scalar field, i.e. the field for coloring
- vector_mode: the mode of transformation of vector values
- into scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude', 'X', 'Y' or 'Z'.
-
- Returns:
- Defromed Shape And Scalar Map as representation object.
-
- """
- proxy.UpdatePipeline()
- # We don't need mesh parts with no data on them
- on_points = []
- on_cells = []
-
- if entity == EntityType.NODE:
- on_points.append(field_name)
- else:
- on_cells.append(field_name)
-
- if scalar_entity and scalar_field_name:
- if scalar_entity == EntityType.NODE:
- on_points.append(scalar_field_name)
- else:
- on_cells.append(scalar_field_name)
-
- nb_components = get_nb_components(proxy, entity, field_name)
-
- # Select fields
- select_cells_with_data(proxy, on_points, on_cells)
-
- # Check vector mode
- check_vector_mode(vector_mode, nb_components)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- pvs.UpdatePipeline(time_value, proxy)
-
- # Set scalar field by default
- scalar_field_entity = scalar_entity
- scalar_field = scalar_field_name
- if (scalar_field_entity is None) or (scalar_field is None):
- scalar_field_entity = entity
- scalar_field = field_name
-
- # Do merge
- source = pvs.MergeBlocks(proxy)
- pvs.UpdatePipeline()
-
- # Cell data to point data
- if is_data_on_cells(proxy, field_name):
- cell_to_point = pvs.CellDatatoPointData(source)
- cell_to_point.PassCellData = 1
- source = cell_to_point
-
- vector_array = field_name
- # If the given vector array has only 2 components, add the third one
- if nb_components == 2:
- calc = get_add_component_calc(source, EntityType.NODE, field_name)
- vector_array = calc.ResultArrayName
- source = calc
-
- # Warp by vector
- warp_vector = pvs.WarpByVector(source)
- warp_vector.Vectors = [vector_array]
- if scale_factor is not None:
- warp_vector.ScaleFactor = scale_factor
- else:
- def_scale = get_default_scale(PrsTypeEnum.DEFORMEDSHAPE,
- proxy, entity, field_name)
- warp_vector.ScaleFactor = def_scale
-
- # Get Defromed Shape And Scalar Map representation object
- defshapemap = pvs.GetRepresentation(warp_vector)
-
- # Get lookup table
- lookup_table = get_lookup_table(scalar_field, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, scalar_field_entity,
- scalar_field, vector_mode)
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
-
- # Set properties
- pvs.ColorBy(defshapemap, (EntityType.get_pvtype(scalar_field_entity), scalar_field))
- defshapemap.LookupTable = lookup_table
-
- # Add scalar bar
- add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value)
-
- return defshapemap
-
-
-def Plot3DOnField(proxy, entity, field_name, timestamp_nb,
- orientation=Orientation.AUTO,
- angle1=0, angle2=0,
- position=0.5, is_relative=True,
- scale_factor=None,
- is_contour=False, nb_contours=32,
- vector_mode='Magnitude'):
- """Creates Plot 3D presentation on the given field.
-
- Arguments:
- proxy: the pipeline object, containig data
- entity: the entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- orientation: the cut plane plane orientation in 3D space, if
- the input is planar - will not be taken into account
- angle1: rotation of the cut plane in 3d space around the first axis
- of the selected orientation (X axis for XY, Y axis for YZ,
- Z axis for ZX).
- The angle of rotation is set in degrees. Acceptable range: [-45, 45].
- angle2: rotation of the cut plane in 3d space around the second axis
- of the selected orientation. Acceptable range: [-45, 45].
- position: position of the cut plane in the object (ranging from 0 to 1).
- The value 0.5 corresponds to cutting by halves.
- is_relative: defines if the cut plane position is relative or absolute
- scale_factor: deformation scale factor
- is_contour: if True - Plot 3D will be represented with a set of contours,
- otherwise - Plot 3D will be represented with a smooth surface
- nb_contours: number of contours, applied if is_contour is True
- vector_mode: the mode of transformation of vector values
- into scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude', 'X', 'Y' or 'Z'.
-
- Returns:
- Plot 3D as representation object.
-
- """
- proxy.UpdatePipeline()
- # We don't need mesh parts with no data on them
- if entity == EntityType.NODE:
- select_cells_with_data(proxy, on_points=[field_name])
- else:
- select_cells_with_data(proxy, on_cells=[field_name])
-
- # Check vector mode
- nb_components = get_nb_components(proxy, entity, field_name)
- check_vector_mode(vector_mode, nb_components)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- pvs.UpdatePipeline(time_value, proxy)
-
- # Do merge
- merge_blocks = pvs.MergeBlocks(proxy)
- merge_blocks.UpdatePipeline()
-
- poly_data = None
-
- # Cutting plane
-
- # Define orientation if necessary (auto mode)
- plane_orientation = orientation
- if (orientation == Orientation.AUTO):
- plane_orientation = get_orientation(proxy)
-
- # Get cutting plane normal
- normal = None
-
- if (not is_planar_input(proxy)):
- normal = get_normal_by_orientation(plane_orientation,
- radians(angle1), radians(angle2))
-
- # Create slice filter
- slice_filter = pvs.Slice(merge_blocks)
- slice_filter.SliceType = "Plane"
-
- # Set cutting plane normal
- slice_filter.SliceType.Normal = normal
-
- # Set cutting plane position
- if (is_relative):
- base_position = get_positions(1, normal,
- get_bounds(proxy), position)
- slice_filter.SliceOffsetValues = base_position
- else:
- slice_filter.SliceOffsetValues = position
-
- slice_filter.UpdatePipeline()
- poly_data = slice_filter
- else:
- normal = get_normal_by_orientation(plane_orientation, 0, 0)
-
- use_normal = 0
- # Geometry filter
- if not poly_data or poly_data.GetDataInformation().GetNumberOfCells() == 0:
- geometry_filter = pvs.GeometryFilter(merge_blocks)
- poly_data = geometry_filter
- use_normal = 1 # TODO(MZN): workaround
-
- warp_scalar = None
- plot3d = None
- source = poly_data
-
- if is_data_on_cells(poly_data, field_name):
- # Cell data to point data
- cell_to_point = pvs.CellDatatoPointData(poly_data)
- cell_to_point.PassCellData = 1
- source = cell_to_point
-
- scalars = ['POINTS', field_name]
-
- # Transform vector array to scalar array if necessary
- if (nb_components > 1):
- calc = get_calc_magnitude(source, EntityType.NODE, field_name)
- scalars = ['POINTS', calc.ResultArrayName]
- source = calc
-
- # Warp by scalar
- warp_scalar = pvs.WarpByScalar(source)
- warp_scalar.Scalars = scalars
- warp_scalar.Normal = normal
- warp_scalar.UseNormal = use_normal
- if scale_factor is not None:
- warp_scalar.ScaleFactor = scale_factor
- else:
- def_scale = get_default_scale(PrsTypeEnum.PLOT3D,
- proxy, entity, field_name)
- warp_scalar.ScaleFactor = def_scale
-
- warp_scalar.UpdatePipeline()
- source = warp_scalar
-
- if (is_contour):
- # Contours
- contour = pvs.Contour(warp_scalar)
- contour.PointMergeMethod = "Uniform Binning"
- contour.ContourBy = ['POINTS', field_name]
- scalar_range = get_data_range(proxy, entity,
- field_name, vector_mode)
- contour.Isosurfaces = get_contours(scalar_range, nb_contours)
- contour.UpdatePipeline()
- source = contour
-
- # Get Plot 3D representation object
- plot3d = pvs.GetRepresentation(source)
-
- # Get lookup table
- lookup_table = get_lookup_table(field_name, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, entity,
- field_name, vector_mode)
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
-
- # Set properties
- pvs.ColorBy(plot3d, (EntityType.get_pvtype(entity), field_name))
- plot3d.LookupTable = lookup_table
-
- # Add scalar bar
- add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value)
-
- return plot3d
-
-
-def IsoSurfacesOnField(proxy, entity, field_name, timestamp_nb,
- custom_range=None, nb_surfaces=10,
- is_colored=True, color=None, vector_mode='Magnitude'):
- """Creates Iso Surfaces presentation on the given field.
-
- Arguments:
- proxy: the pipeline object, containig data
- entity: the entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- custom_range: scalar range, if undefined the source range will be applied
- nb_surfaces: number of surfaces, which will be generated
- is_colored: this option allows to color the presentation according to
- the corresponding data array values. If False - the presentation will
- be one-coloured.
- color: defines the presentation color as [R, G, B] triple. Taken into
- account only if is_colored is False.
- vector_mode: the mode of transformation of vector values
- into scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude', 'X', 'Y' or 'Z'.
-
- Returns:
- Iso Surfaces as representation object.
-
- """
- proxy.UpdatePipeline()
- # We don't need mesh parts with no data on them
- if entity == EntityType.NODE:
- select_cells_with_data(proxy, on_points=[field_name])
- else:
- select_cells_with_data(proxy, on_cells=[field_name])
-
- # Check vector mode
- nb_components = get_nb_components(proxy, entity, field_name)
- check_vector_mode(vector_mode, nb_components)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- pvs.UpdatePipeline(time_value, proxy)
-
- # Do merge
- source = pvs.MergeBlocks(proxy)
- pvs.UpdatePipeline()
-
- # Transform cell data into point data if necessary
- if is_data_on_cells(proxy, field_name):
- cell_to_point = pvs.CellDatatoPointData(source)
- cell_to_point.PassCellData = 1
- source = cell_to_point
-
- contour_by = ['POINTS', field_name]
-
- # Transform vector array to scalar array if necessary
- if (nb_components > 1):
- calc = get_calc_magnitude(source, EntityType.NODE, field_name)
- contour_by = ['POINTS', calc.ResultArrayName]
- source = calc
-
- # Contour filter settings
- contour = pvs.Contour(source)
- contour.ComputeScalars = 1
- contour.ContourBy = contour_by
-
- # Specify the range
- scalar_range = custom_range
- if (scalar_range is None):
- scalar_range = get_data_range(proxy, entity,
- field_name, cut_off=True)
-
- # Get contour values for the range
- surfaces = get_contours(scalar_range, nb_surfaces)
-
- # Set contour values
- contour.Isosurfaces = surfaces
-
- # Get Iso Surfaces representation object
- isosurfaces = pvs.GetRepresentation(contour)
-
- # Get lookup table
- lookup_table = get_lookup_table(field_name, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, entity,
- field_name, vector_mode)
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
-
- # Set display properties
- if (is_colored):
- pvs.ColorBy(isosurfaces, (EntityType.get_pvtype(entity), field_name))
- else:
- pvs.ColorBy(isosurfaces, (EntityType.get_pvtype(entity), None))
- if color:
- isosurfaces.DiffuseColor = color
- isosurfaces.LookupTable = lookup_table
-
- # Add scalar bar
- add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value)
-
- return isosurfaces
-
-
-def GaussPointsOnField(proxy, entity, field_name,
- timestamp_nb,
- is_deformed=True, scale_factor=None,
- is_colored=True, color=None,
- primitive=GaussType.SPRITE,
- is_proportional=True,
- max_pixel_size=256,
- multiplier=None, vector_mode='Magnitude'):
- """Creates Gauss Points on the given field.
-
- Arguments:
-
- proxy: the pipeline object, containig data
- entity: the field entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- is_deformed: defines whether the Gauss Points will be deformed or not
- scale_factor -- the scale factor for deformation. Will be taken into
- account only if is_deformed is True.
- If not passed by user, default scale will be computed.
- is_colored -- defines whether the Gauss Points will be multicolored,
- using the corresponding data values
- color: defines the presentation color as [R, G, B] triple. Taken into
- account only if is_colored is False.
- primitive: primitive type from GaussType
- is_proportional: if True, the size of primitives will depends on
- the gauss point value
- max_pixel_size: the maximum sizr of the Gauss Points primitive in pixels
- multiplier: coefficient between data values and the size of primitives
- If not passed by user, default scale will be computed.
- vector_mode: the mode of transformation of vector values into
- scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude' - vector module;
- 'X', 'Y', 'Z' - vector components.
-
- Returns:
- Gauss Points as representation object.
-
- """
- proxy.UpdatePipeline()
- # We don't need mesh parts with no data on them
- on_gauss = select_cells_with_data(proxy, on_gauss=[field_name])
- if not on_gauss:
- if entity == EntityType.NODE:
- select_cells_with_data(proxy, on_points=[field_name])
- else:
- select_cells_with_data(proxy, on_cells=[field_name])
-
- # Check vector mode
- nb_components = get_nb_components(proxy, entity, field_name)
- check_vector_mode(vector_mode, nb_components)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- pvs.UpdatePipeline(time_value, proxy)
-
- source = proxy
-
- # If no quadrature point array is passed, use cell centers
- if on_gauss:
- generate_qp = pvs.GenerateQuadraturePoints(source)
- generate_qp.QuadratureSchemeDef = ['CELLS', 'ELGA@0']
- source = generate_qp
- else:
- # Cell centers
- cell_centers = pvs.CellCenters(source)
- cell_centers.VertexCells = 1
- source = cell_centers
-
- source.UpdatePipeline()
-
- # Check if deformation enabled
- if is_deformed and nb_components > 1:
- vector_array = field_name
- # If the given vector array has only 2 components, add the third one
- if nb_components == 2:
- calc = get_add_component_calc(source, EntityType.NODE, field_name)
- vector_array = calc.ResultArrayName
- source = calc
-
- # Warp by vector
- warp_vector = pvs.WarpByVector(source)
- warp_vector.Vectors = [vector_array]
- if scale_factor is not None:
- warp_vector.ScaleFactor = scale_factor
- else:
- def_scale = get_default_scale(PrsTypeEnum.DEFORMEDSHAPE, proxy,
- entity, field_name)
- warp_vector.ScaleFactor = def_scale
- warp_vector.UpdatePipeline()
- source = warp_vector
-
- # Get Gauss Points representation object
- gausspnt = pvs.GetRepresentation(source)
-
- # Get lookup table
- lookup_table = get_lookup_table(field_name, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, entity,
- field_name, vector_mode)
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
-
- # Set display properties
- if is_colored:
- pvs.ColorBy(gausspnt, (EntityType.get_pvtype(entity), field_name))
- else:
- pvs.ColorBy(gausspnt, (EntityType.get_pvtype(entity), None))
- if color:
- gausspnt.DiffuseColor = color
-
- gausspnt.LookupTable = lookup_table
-
- # Add scalar bar
- add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value)
-
- # Set point sprite representation
- gausspnt.Representation = 'Point Sprite'
-
- # Point sprite settings
- gausspnt.InterpolateScalarsBeforeMapping = 0
- gausspnt.MaxPixelSize = max_pixel_size
-
- # Render mode
- gausspnt.RenderMode = GaussType.get_mode(primitive)
-
- #if primitive == GaussType.SPRITE:
- # Set texture
- # TODO(MZN): replace with pvsimple high-level interface
- # texture = sm.CreateProxy("textures", "SpriteTexture")
- # alphamprop = texture.GetProperty("AlphaMethod")
- # alphamprop.SetElement(0, 2) # Clamp
- # alphatprop = texture.GetProperty("AlphaThreshold")
- # alphatprop.SetElement(0, 63)
- # maxprop = texture.GetProperty("Maximum")
- # maxprop.SetElement(0, 255)
- # texture.UpdateVTKObjects()
-
- # gausspnt.Texture = texture
- #gausspnt.Texture.AlphaMethod = 'Clamp'
- #gausspnt.Texture.AlphaThreshold = 63
- #gausspnt.Texture.Maximum= 255
-
- # Proportional radius
- gausspnt.RadiusUseScalarRange = 0
- gausspnt.RadiusIsProportional = 0
-
- if is_proportional:
- mult = multiplier
- if mult is None and data_range[1] != 0:
- mult = abs(0.1 / data_range[1])
-
- gausspnt.RadiusScalarRange = data_range
- gausspnt.RadiusTransferFunctionEnabled = 1
- gausspnt.RadiusMode = 'Scalar'
- gausspnt.RadiusArray = ['POINTS', field_name]
- if nb_components > 1:
- v_comp = get_vector_component(vector_mode)
- gausspnt.RadiusVectorComponent = v_comp
- gausspnt.RadiusTransferFunctionMode = 'Table'
- gausspnt.RadiusScalarRange = data_range
- gausspnt.RadiusUseScalarRange = 1
- if mult is not None:
- gausspnt.RadiusIsProportional = 1
- gausspnt.RadiusProportionalFactor = mult
- else:
- gausspnt.RadiusTransferFunctionEnabled = 0
- gausspnt.RadiusMode = 'Constant'
- gausspnt.RadiusArray = ['POINTS', 'Constant Radius']
-
- return gausspnt
-
-def GaussPointsOnField1(proxy, entity, field_name,
- timestamp_nb,
- is_colored=True, color=None,
- primitive=GaussType.SPHERE,
- is_proportional=True,
- max_pixel_size=256,
- multiplier=None,
- vector_mode='Magnitude'):
- """Creates Gauss Points on the given field. Use GaussPoints() Paraview interface.
-
- Arguments:
- proxy: the pipeline object, containig data
- entity: the field entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- is_colored -- defines whether the Gauss Points will be multicolored,
- using the corresponding data values
- color: defines the presentation color as [R, G, B] triple. Taken into
- account only if is_colored is False.
- primitive: primitive type from GaussType
- is_proportional: if True, the size of primitives will depends on
- the gauss point value
- max_pixel_size: the maximum sizr of the Gauss Points primitive in pixels
- multiplier: coefficient between data values and the size of primitives
- If not passed by user, default scale will be computed.
- vector_mode: the mode of transformation of vector values into
- scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude' - vector module;
- 'X', 'Y', 'Z' - vector components.
-
- Returns:
- Gauss Points as representation object.
-
- """
- proxy.UpdatePipeline()
- select_cells_with_data(proxy, on_gauss=[field_name])
-
- nb_components = get_nb_components(proxy, entity, field_name)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- proxy.UpdatePipeline(time=time_value)
-
- # Create Gauss Points object
- source = pvs.ELGAfieldToPointSprite(proxy)
- source.UpdatePipeline()
-
- # Get Gauss Points representation object
- gausspnt = pvs.GetRepresentation(source)
-
- # Get lookup table
- lookup_table = get_lookup_table(field_name, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, entity,
- field_name, vector_mode)
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
-
- # Set display properties
- if is_colored:
- pvs.ColorBy(gausspnt, (EntityType.get_pvtype(entity), field_name))
- else:
- pvs.ColorBy(gausspnt, (EntityType.get_pvtype(entity), None))
- if color:
- gausspnt.DiffuseColor = color
-
- gausspnt.LookupTable = lookup_table
-
- # Add scalar bar
- add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value)
-
- # Set point sprite representation
- gausspnt.Representation = 'Point Sprite'
-
- # Point sprite settings
- gausspnt.InterpolateScalarsBeforeMapping = 0
- gausspnt.MaxPixelSize = max_pixel_size
-
- # Render mode
- gausspnt.RenderMode = GaussType.get_mode(primitive)
-
- #if primitive == GaussType.SPRITE:
- # Set texture
- # TODO(MZN): replace with pvsimple high-level interface
- # texture = sm.CreateProxy("textures", "SpriteTexture")
- # alphamprop = texture.GetProperty("AlphaMethod")
- # alphamprop.SetElement(0, 2) # Clamp
- # alphatprop = texture.GetProperty("AlphaThreshold")
- # alphatprop.SetElement(0, 63)
- # maxprop = texture.GetProperty("Maximum")
- # maxprop.SetElement(0, 255)
- # texture.UpdateVTKObjects()
-
- # gausspnt.Texture = texture
- #gausspnt.Texture.AlphaMethod = 'Clamp'
- #gausspnt.Texture.AlphaThreshold = 63
- #gausspnt.Texture.Maximum= 255
-
- # Proportional radius
- gausspnt.RadiusUseScalarRange = 0
- gausspnt.RadiusIsProportional = 0
-
- if is_proportional:
- mult = multiplier
- if mult is None and data_range[1] != 0:
- mult = abs(0.1 / data_range[1])
-
- gausspnt.RadiusScalarRange = data_range
- gausspnt.RadiusTransferFunctionEnabled = 1
- gausspnt.RadiusMode = 'Scalar'
- gausspnt.RadiusArray = ['POINTS', field_name]
- if nb_components > 1:
- v_comp = get_vector_component(vector_mode)
- gausspnt.RadiusVectorComponent = v_comp
- gausspnt.RadiusTransferFunctionMode = 'Table'
- gausspnt.RadiusScalarRange = data_range
- gausspnt.RadiusUseScalarRange = 1
- if mult is not None:
- gausspnt.RadiusIsProportional = 1
- gausspnt.RadiusProportionalFactor = mult
- else:
- gausspnt.RadiusTransferFunctionEnabled = 0
- gausspnt.RadiusMode = 'Constant'
- gausspnt.RadiusArray = ['POINTS', 'Constant Radius']
-
- return gausspnt
-
-def StreamLinesOnField(proxy, entity, field_name, timestamp_nb,
- direction='BOTH', is_colored=False, color=None,
- vector_mode='Magnitude'):
- """Creates Stream Lines presentation on the given field.
-
- Arguments:
- proxy: the pipeline object, containig data
- entity: the entity type from PrsTypeEnum
- field_name: the field name
- timestamp_nb: the number of time step (1, 2, ...)
- direction: the stream lines direction ('FORWARD', 'BACKWARD' or 'BOTH')
- is_colored: this option allows to color the presentation according to
- the corresponding data values. If False - the presentation will
- be one-coloured.
- color: defines the presentation color as [R, G, B] triple. Taken into
- account only if is_colored is False.
- vector_mode: the mode of transformation of vector values
- into scalar values, applicable only if the field contains vector values.
- Possible modes: 'Magnitude', 'X', 'Y' or 'Z'.
-
- Returns:
- Stream Lines as representation object.
-
- """
- proxy.UpdatePipeline()
- # We don't need mesh parts with no data on them
- if entity == EntityType.NODE:
- select_cells_with_data(proxy, on_points=[field_name])
- else:
- select_cells_with_data(proxy, on_cells=[field_name])
-
- # Check vector mode
- nb_components = get_nb_components(proxy, entity, field_name)
- check_vector_mode(vector_mode, nb_components)
-
- # Get time value
- time_value = get_time(proxy, timestamp_nb)
-
- # Set timestamp
- pvs.GetRenderView().ViewTime = time_value
- pvs.UpdatePipeline(time_value, proxy)
-
- # Do merge
- source = pvs.MergeBlocks(proxy)
- pvs.UpdatePipeline()
-
- # Cell data to point data
- if is_data_on_cells(proxy, field_name):
- cell_to_point = pvs.CellDatatoPointData(source)
- cell_to_point.PassCellData = 1
- pvs.UpdatePipeline()
- source = cell_to_point
-
- vector_array = field_name
- # If the given vector array has only 2 components, add the third one
- if nb_components == 2:
- calc = get_add_component_calc(source, EntityType.NODE, field_name)
- vector_array = calc.ResultArrayName
- pvs.UpdatePipeline()
- source = calc
-
- # Stream Tracer
- stream = pvs.StreamTracer(source)
- stream.SeedType = "Point Source"
- stream.Vectors = ['POINTS', vector_array]
- stream.IntegrationDirection = direction
- stream.IntegratorType = 'Runge-Kutta 2'
- stream.SeedType = 'High Resolution Line Source'
- stream.UpdatePipeline()
-
- # Get Stream Lines representation object
- if is_empty(stream):
- return None
- streamlines = pvs.GetRepresentation(stream)
-
- # Get lookup table
- lookup_table = get_lookup_table(field_name, nb_components, vector_mode)
-
- # Set field range if necessary
- data_range = get_data_range(proxy, entity,
- field_name, vector_mode)
- if hasattr(lookup_table,"LockDataRange"):
- lookup_table.LockDataRange = 1
- elif hasattr(lookup_table,"LockScalarRange"):
- lookup_table.LockScalarRange = 1
- else:
- raise RuntimeError("Object %s has no 'LockDataRange' or 'LockScalarRange' attribute!"%(lookup_table))
-
- lookup_table.RGBPoints = [data_range[0], 0, 0, 1, data_range[1], 1, 0, 0]
-
- # Set properties
- if is_colored:
- pvs.ColorBy(streamlines, (EntityType.get_pvtype(entity), field_name))
- else:
- pvs.ColorBy(streamlines, (EntityType.get_pvtype(entity), None))
- if color:
- streamlines.DiffuseColor = color
-
- streamlines.LookupTable = lookup_table
-
- # Add scalar bar
- add_scalar_bar(field_name, nb_components,
- vector_mode, lookup_table, time_value)
-
- return streamlines
-
-
-def MeshOnEntity(proxy, mesh_name, entity):
- """Creates submesh of the entity type for the mesh.
-
- Arguments:
- proxy -- the pipeline object, containig data
- mesh_name -- the full or short name of mesh field
-
- Returns:
- Submesh as representation object of the given source.
-
- """
- proxy.UpdatePipeline()
- mesh_full_name = None
- aList = mesh_name.split('/')
- if len(aList) >= 2:
- mesh_full_name = mesh_name
- else:
- mesh_full_name = find_mesh_full_name(proxy, mesh_name)
- if not mesh_full_name:
- raise RuntimeError, "The given mesh name was not found"
- # Select only the given mesh
- proxy.AllArrays = [mesh_full_name]
- proxy.UpdatePipeline()
-
- # Get representation object if the submesh is not empty
- prs = None
- if (proxy.GetDataInformation().GetNumberOfPoints() or
- proxy.GetDataInformation().GetNumberOfCells()):
- my_view = pvs.GetRenderView()
- prs = pvs.GetRepresentation(proxy, view=my_view)
- prs.ColorArrayName = (None, '')
-
- return prs
-
-
-def MeshOnGroup(proxy, extrGroups, group_name):
- """Creates submesh on the group.
-
- Arguments:
- proxy -- the pipeline object, containig data
- group_name -- the full group name
- extrGroups -- all extracted groups object
-
- Returns:
- Representation object of the given source with single group
- selected.
-
- """
- proxy.UpdatePipeline()
- # Deselect all groups
- extrGroups.AllGroups = []
- extrGroups.UpdatePipelineInformation()
- # Select only the group with the given name
- extrGroups.AllGroups = [group_name]
- extrGroups.UpdatePipelineInformation()
-
- # Get representation object if the submesh is not empty
- prs = None
-
- # Check if the group was set
- if len(extrGroups.AllGroups) == 1 and \
- extrGroups.AllGroups[0] == group_name:
- # Check if the submesh is not empty
- nb_points = proxy.GetDataInformation().GetNumberOfPoints()
- nb_cells = proxy.GetDataInformation().GetNumberOfCells()
-
- if nb_points or nb_cells:
-# prs = pvs.GetRepresentation(proxy)
- prs = pvs.Show()
- prs.ColorArrayName = (None, '')
- display_only(prs)
-
- return prs
-
-
-def CreatePrsForFile(file_name, prs_types,
- picture_dir, picture_ext):
- """Build presentations of the given types for the file.
-
- Build presentations for all fields on all timestamps.
-
- Arguments:
- file_name: full path to the MED file
- prs_types: the list of presentation types to build
- picture_dir: the directory path for saving snapshots
- picture_ext: graphics files extension (determines file type)
-
- """
- # Import MED file
- print "Import " + file_name.split(os.sep)[-1] + "..."
-
- try:
- proxy = pvs.MEDReader(FileName=file_name)
- if proxy is None:
- print "FAILED"
- else:
- #proxy.UpdatePipeline()
- print "OK"
- except:
- print "FAILED"
- else:
- # Get view
- view = pvs.GetRenderView()
- time_value = get_time(proxy, 0)
- view.ViewTime = time_value
- pvs.UpdatePipeline(time=time_value, proxy=proxy)
-
- # Create required presentations for the proxy
- CreatePrsForProxy(proxy, view, prs_types,
- picture_dir, picture_ext)
-
-def CreatePrsForProxy(proxy, view, prs_types, picture_dir, picture_ext):
- """Build presentations of the given types for all fields of the proxy.
-
- Save snapshots in graphics files (type depends on the given extension).
- Stores the files in the given directory.
-
- Arguments:
- proxy: the pipeline object, containig data
- view: the render view
- prs_types: the list of presentation types to build
- picture_dir: the directory path for saving snapshots
- picture_ext: graphics files extension (determines file type)
-
- """
- proxy.UpdatePipeline()
- # List of the field names
- fields_info = proxy.GetProperty("FieldsTreeInfo")[::2]
-
- # Add path separator to the end of picture path if necessery
- if not picture_dir.endswith(os.sep):
- picture_dir += os.sep
-
- # Mesh Presentation
- if PrsTypeEnum.MESH in prs_types:
- # Iterate on meshes
- mesh_names = get_mesh_full_names(proxy)
- for mesh_name in mesh_names:
- # Build mesh field presentation
- print "Creating submesh for '" + get_field_short_name(mesh_name) + "' mesh... "
- prs = MeshOnEntity(proxy, mesh_name, None)
- if prs is None:
- print "FAILED"
- continue
- else:
- print "OK"
- # Construct image file name
- pic_name = picture_dir + get_field_short_name(mesh_name) + "." + picture_ext
-
- # Show and dump the presentation into a graphics file
- process_prs_for_test(prs, view, pic_name, False)
-
- # Create Mesh presentation. Build all groups.
- extGrp = pvs.ExtractGroup()
- extGrp.UpdatePipelineInformation()
- if if_possible(proxy, None, None, PrsTypeEnum.MESH, extGrp):
- for group in get_group_names(extGrp):
- print "Creating submesh on group " + get_group_short_name(group) + "... "
- prs = MeshOnGroup(proxy, extGrp, group)
- if prs is None:
- print "FAILED"
- continue
- else:
- print "OK"
- # Construct image file name
- pic_name = picture_dir + get_group_short_name(group) + "." + picture_ext
-
- # Show and dump the presentation into a graphics file
- process_prs_for_test(prs, view, pic_name, False)
-
- # Presentations on fields
- for field in fields_info:
- field_name = get_field_short_name(field)
- # Ignore mesh presentation
- if field_name == get_field_mesh_name(field):
- continue
- field_entity = get_field_entity(field)
- # Select only the current field:
- # necessary for getting the right timestamps
- proxy.AllArrays = [field]
- proxy.UpdatePipeline()
-
- # Get timestamps
- timestamps = proxy.TimestepValues.GetData()
-
- for prs_type in prs_types:
- # Ignore mesh presentation
- if prs_type == PrsTypeEnum.MESH:
- continue
-
- # Get name of presentation type
- prs_name = PrsTypeEnum.get_name(prs_type)
-
- # Build the presentation if possible
- possible = if_possible(proxy, field_name,
- field_entity, prs_type)
- if possible:
- # Presentation type for graphics file name
- f_prs_type = prs_name.replace(' ', '').upper()
-
- for timestamp_nb in xrange(1, len(timestamps) + 1):
- time = timestamps[timestamp_nb - 1]
- if (time == 0.0):
- scalar_range = get_data_range(proxy, field_entity,
- field_name, cut_off=True)
- # exclude time stamps with null lenght of scalar range
- if (scalar_range[0] == scalar_range[1]):
- continue
- print "Creating " + prs_name + " on " + field_name + ", time = " + str(time) + "... "
- try:
- prs = create_prs(prs_type, proxy,
- field_entity, field_name, timestamp_nb)
- except ValueError:
- """ This exception comes from get_nb_components(...) function.
- The reason of exception is an implementation of MEDReader
- activating the first leaf when reading MED file (refer to
- MEDFileFieldRepresentationTree::activateTheFirst() and
- MEDFileFieldRepresentationTree::getTheSingleActivated(...) methods).
- """
- print "ValueError exception is catched"
- continue
- if prs is None:
- print "FAILED"
- continue
- else:
- print "OK"
-
- # Construct image file name
- pic_name = picture_dir + field_name + "_" + str(time) + "_" + f_prs_type + "." + picture_ext
-
- # Show and dump the presentation into a graphics file
- process_prs_for_test(prs, view, pic_name)
- return
-
-
-def delete_pv_object(obj):
- # There is a bug when repeating CreateRenderView/Delete calls
- # Here is a workaround proposed by KW (#10744)
- import gc
- del obj
- gc.collect()
def __my_log(msg):
if __DEBUG:
- print "[PARAVIS] %s" % msg
+ print("[PARAVIS] %s" % msg)
def __getFromGUI():
""" Identify if we are running inside SALOME's embedded interpreter.
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader0.png")
import vtk.test.Testing
wbv=WarpByVector(Input=myMedReader)
wbv.ScaleFactor=0.1
wbv.Vectors=['POINTS','f3NbComp4_Vector']
-assert(wbv.PointData.keys()==['f0NbComp1','f1NbComp2','f1NbComp2_Vector','f2NbComp3','f3NbComp4','f3NbComp4_Vector'])
+assert(list(wbv.PointData.keys())==['f0NbComp1','f1NbComp2','f1NbComp2_Vector','f2NbComp3','f3NbComp4','f3NbComp4_Vector'])
#
DataRepresentation2 = Show()
DataRepresentation2.EdgeColor = [0.0, 0.0, 0.5000076295109483]
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader10.png")
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader11.png")
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader12.png")
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader13.png")
import vtk.test.Testing
for i in [[28,21],[21,14],[14,7],[7,0]]:
m1.insertNextCell(i)
pass
-for i in xrange(6):
+for i in range(6):
m1.insertNextCell([i,i+1])
pass
for i in [[6,13],[13,20],[20,27],[27,34]]:
m1.insertNextCell(i)
pass
-for i in xrange(6,0,-1):
+for i in range(6,0,-1):
m1.insertNextCell([28+i,28+i-1])
pass
#
fs0=MEDFileFieldMultiTS()
fs1=MEDFileFieldMultiTS()
fs2=MEDFileFieldMultiTS()
-for i in xrange(5):
+for i in range(5):
f=MEDFileField1TS()
zePfl0=DataArrayInt.Aggregate(DataArrayInt.Range(0,12,1),pfl3,0) ; zePfl0.setName("PFL")
fNode=MEDCouplingFieldDouble(ON_GAUSS_PT) ; fNode.setTime(float(i),i,0)
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader14.png")
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader15.png")
import vtk.test.Testing
assert(vtkArrToTest.GetNumberOfComponents()==2)
assert(vtkArrToTest.GetComponentName(0)==arr2.getInfoOnComponent(0))
assert(vtkArrToTest.GetComponentName(1)==arr2.getInfoOnComponent(1))
- vals=[vtkArrToTest.GetValue(i) for i in xrange(16)]
+ vals=[vtkArrToTest.GetValue(i) for i in range(16)]
assert(arr2[8:].isEqualWithoutConsideringStr(DataArrayDouble(vals,8,2),1e-12))
pass
#
assert(vtkArrToTest.GetNumberOfComponents()==2)
assert(vtkArrToTest.GetComponentName(0)==arr2.getInfoOnComponent(0))
assert(vtkArrToTest.GetComponentName(1)==arr2.getInfoOnComponent(1))
- vals=[vtkArrToTest.GetValue(i) for i in xrange(16)]
+ vals=[vtkArrToTest.GetValue(i) for i in range(16)]
assert(arr2[4:12].isEqualWithoutConsideringStr(DataArrayDouble(vals,8,2),1e-12))
pass
# important to check that if all the field is present that it is OK (check of the optimization)
assert(vtkArrToTest.GetNumberOfComponents()==2)
assert(vtkArrToTest.GetComponentName(0)==arr2.getInfoOnComponent(0))
assert(vtkArrToTest.GetComponentName(1)==arr2.getInfoOnComponent(1))
- vals=[vtkArrToTest.GetValue(i) for i in xrange(32)]
+ vals=[vtkArrToTest.GetValue(i) for i in range(32)]
assert(arr2.isEqualWithoutConsideringStr(DataArrayDouble(vals,16,2),1e-12))
pass
ELNOfieldToSurface1=ELNOfieldToSurface(Input=reader)
assert(vtkArrToTest.GetNumberOfComponents()==2)
assert(vtkArrToTest.GetComponentName(0)==arr2.getInfoOnComponent(0))
assert(vtkArrToTest.GetComponentName(1)==arr2.getInfoOnComponent(1))
- vals=[vtkArrToTest.GetValue(i) for i in xrange(32)]
+ vals=[vtkArrToTest.GetValue(i) for i in range(32)]
assert(arr2.isEqualWithoutConsideringStr(DataArrayDouble(vals,16,2),1e-12))
pass
vtkArrToTest=res.GetBlock(0).GetPointData().GetArray("MyField")
assert(vtkArrToTest.GetNumberOfComponents()==2)
assert(vtkArrToTest.GetNumberOfTuples()==8)
-vals=[vtkArrToTest.GetValue(i) for i in xrange(16)]
+vals=[vtkArrToTest.GetValue(i) for i in range(16)]
assert(DataArrayDouble([(16.1,17.1),(18.1,19.1),(20.1,21.1),(22.1,23.1),(24.1,25.1),(26.1,27.1),(28.1,29.1),(30.1,31.1)]).isEqual(DataArrayDouble(vals,8,2),1e-12))
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, imgName)
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader2.png")
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath,png)
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, imgName)
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader3.png")
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader4.png")
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader5.png")
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader6.png")
import vtk.test.Testing
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader7.png")
import vtk.test.Testing
extGrp=ExtractGroup(Input=extractCT)
extGrp.UpdatePipelineInformation()
-assert(filter(lambda x:x[:4]=="GRP_",list(extGrp.GetProperty("GroupsFlagsInfo")[::2]))==['GRP_grp0'])
+assert([x for x in list(extGrp.GetProperty("GroupsFlagsInfo")[::2]) if x[:4]=="GRP_"]==['GRP_grp0'])
extGrp.AllGroups="GRP_grp0"
RenderView1 = GetRenderView()
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
- print "Could not get baseline directory. Test failed."
+ print("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "testMEDReader8.png")
import vtk.test.Testing
# Author : Anthony Geay (EDF R&D)
VTK_MODULE_LIBRARY(vtkSimpleMode vtkSimpleMode.cxx)
-TARGET_LINK_LIBRARIES(vtkSimpleMode vtkPVVTKExtensionsRendering vtkFiltersGeneral vtkFiltersCore vtkRenderingOpenGL2 ${PARAVIEW_LIBRARIES})
+IF(${SALOME_GUI_USE_OPENGL2})
+ TARGET_LINK_LIBRARIES(vtkSimpleMode vtkPVVTKExtensionsRendering vtkFiltersGeneral vtkFiltersCore vtkRenderingOpenGL2 ${PARAVIEW_LIBRARIES})
+ELSE()
+ TARGET_LINK_LIBRARIES(vtkSimpleMode vtkPVVTKExtensionsRendering vtkFiltersGeneral vtkFiltersCore vtkRenderingOpenGL ${PARAVIEW_LIBRARIES})
+ENDIF()
INSTALL(TARGETS vtkSimpleMode RUNTIME DESTINATION lib/salome LIBRARY DESTINATION lib/salome ARCHIVE DESTINATION lib/salome)
// set stdout to line buffering (aka C++ std::cout)
setvbuf(stdout, (char *)NULL, _IOLBF, BUFSIZ);
char* salome_python=getenv("SALOME_PYTHON");
+ size_t size_salome_python = sizeof(salome_python) / sizeof(salome_python[0]);
if(salome_python != 0)
- Py_SetProgramName(salome_python);
+ Py_SetProgramName(Py_DecodeLocale(salome_python, NULL));
+
Py_Initialize(); // Initialize the interpreter
- PySys_SetArgv(argc, argv);
+ wchar_t **w_argv = new wchar_t*[argc];
+ for (int i = 0; i < argc; i++)
+ w_argv[i] = Py_DecodeLocale(argv[i], NULL);
+ PySys_SetArgv(argc, w_argv);
PyRun_SimpleString("import threading\n");
PyEval_InitThreads(); // Create (and acquire) the interpreter lock
PyThreadState *pts = PyGILState_GetThisThreadState();
#endif
#include <Python.h>
+#if PY_VERSION_HEX < 0x03050000
+static char*
+Py_EncodeLocale(const wchar_t *arg, size_t *size)
+{
+ return _Py_wchar2char(arg, size);
+}
+static wchar_t*
+Py_DecodeLocale(const char *arg, size_t *size)
+{
+ return _Py_char2wchar(arg, size);
+}
+#endif
// next two MACRO must be used together only once inside a block
// -------------------------------------------------------------