dnl
dnl See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
dnl
-dnl Author: André Ribes, andre.ribes@edf.fr, EDF R&D
+dnl Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
-AC_INIT(ADAO_SRC, [6.6.0])
+AC_INIT(ADAO_SRC, [7.2.0])
AC_CONFIG_AUX_DIR(adm_local)
AM_INIT_AUTOMAKE
AM_CONFIG_HEADER(adao_config.h)
bin/Makefile
bin/qtEficas_adao_study.py
doc/Makefile
+ doc/en/Makefile
])
AC_OUTPUT
-# Copyright (C) 2010-2011 EDF R&D
+# Copyright (C) 2010-2013 EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
-# Author: André Ribes, andre.ribes@edf.fr, EDF R&D
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
-include $(top_srcdir)/adm_local/make_common_starter.am
-
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-PAPER =
-BUILDDIR = $(top_builddir)/doc/build
-SRCDIR = $(top_srcdir)/doc
-
-EXTRA_DIST = conf.py advanced.rst examples.rst index.rst intro.rst theory.rst using.rst \
- resources/ADAO.png \
- resources/ADAO_small.png \
- resources/ADAO_small_rouge.png \
- resources/ADAO_small_vert.png \
- images/adao_activate.png \
- images/adao_jdcexample01.png \
- images/adao_scriptentry01.png \
- images/adao_viewer.png \
- images/eficas_new.png \
- images/eficas_save.png \
- images/eficas_yacs.png \
- images/yacs_generatedscheme.png \
- images/adao_exporttoyacs.png \
- images/adao_jdcexample02.png \
- images/adao_scriptentry02.png \
- images/eficas_close.png \
- images/eficas_open.png \
- images/eficas_saveas.png \
- images/yacs_containerlog.png
-
-install-data-local:
- make html
- ${mkinstalldirs} $(DESTDIR)$(docdir)
- ${mkinstalldirs} $(DESTDIR)$(salomeresdir)
- cp -R $(BUILDDIR)/html/* $(DESTDIR)$(docdir)
- cp $(SRCDIR)/resources/*.png $(DESTDIR)$(salomeresdir)
- cp $(SRCDIR)/images/eficas_*.png $(DESTDIR)$(salomeresdir)
-
-uninstall-local:
- chmod -R +w $(DESTDIR)$(docdir)
- rm -rf $(DESTDIR)$(docdir)
- rm -f $(DESTDIR)$(salomeresdir)/*.png
-
-clean-local:
- -rm -rf $(top_builddir)/doc/build
-
-
-# Internal variables.
-PAPEROPT_a4 = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(top_srcdir)/doc
-
-.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
-
-help:
- @echo "Please use \`make <target>' where <target> is one of"
- @echo " html to make standalone HTML files"
- @echo " dirhtml to make HTML files named index.html in directories"
- @echo " pickle to make pickle files"
- @echo " json to make JSON files"
- @echo " htmlhelp to make HTML files and a HTML help project"
- @echo " qthelp to make HTML files and a qthelp project"
- @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
- @echo " changes to make an overview of all changed/added/deprecated items"
- @echo " linkcheck to check all external links for integrity"
- @echo " doctest to run all doctests embedded in the documentation (if enabled)"
-
-clean:
- -rm -rf $(BUILDDIR)/*
-
-html:
- $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
- $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-pickle:
- $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
- @echo
- @echo "Build finished; now you can process the pickle files."
-
-json:
- $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
- @echo
- @echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
- $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
- @echo
- @echo "Build finished; now you can run HTML Help Workshop with the" \
- ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
- $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
- @echo
- @echo "Build finished; now you can run "qcollectiongenerator" with the" \
- ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
- @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ADAO.qhcp"
- @echo "To view the help file:"
- @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ADAO.qhc"
-
-latex:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo
- @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
- @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
- "run these through (pdf)latex."
-
-changes:
- $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
- @echo
- @echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
- $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
- @echo
- @echo "Link check complete; look for any errors in the above output " \
- "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
- $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
- @echo "Testing of doctests in the sources finished, look at the " \
- "results in $(BUILDDIR)/doctest/output.txt."
+SUBDIRS = en
+++ /dev/null
-.. _section_advanced:
-
-================================================================================
-Advanced usage of the ADAO module
-================================================================================
-
-This section presents advanced methods to use the ADAO module, how to get more
-information, or how to use it without the graphical user interface (GUI). It
-requires to know how to find files or commands included inside the whole SALOME
-installation. All the names to be replaced by user are indicated by the
-following syntax ``<...>``.
-
-Converting and executing an ADAO command file (JDC) using a shell script
-------------------------------------------------------------------------
-
-It is possible to convert and execute an ADAO command file (JDC, or ".comm"
-file, which resides in ``<ADAO JDC file directory>``) automatically by using a
-template script containing all the required steps. The user has to know where
-are the main SALOME scripts, and in particular the ``runAppli`` one. The
-directory in which this script resides is symbolically named ``<SALOME main
-installation dir>`` and has to be replaced by the good one in the template.
-
-When an ADAO command file is build by the ADAO GUI editor and saved, if it is
-named for example "AdaoStudy1.comm", then a companion file named "AdaoStudy1.py"
-is automatically created in the same directory. It is named ``<ADAO Python
-file>`` in the template, and it is converted to YACS as an ``<ADAO YACS xml
-scheme>``. After that, it can be executed in console mode using the standard
-YACS console command (see YACS documentation for more information).
-
-In the example, we choose to start and stop the SALOME application server in the
-same script, which is not necessary, but useful to avoid stalling SALOME
-sessions. We choose also to remove the ``<ADAO YACS xml scheme>`` because it is
-a generated one. You only need to replace the text between these symbols
-``<...>`` to use it.
-
-The template of the shell script is the following::
-
- #!/bin/bash
- export USERDIR=<ADAO JDC file directory>
- export SALOMEDIR=<SALOME main installation directory>
- $SALOMEDIR/runAppli -k -t
- $SALOMEDIR/runSession python \
- $SALOMEDIR/bin/salome/AdaoYacsSchemaCreator.py \
- $USERDIR/<ADAO Python file> $USERDIR/<ADAO YACS xml scheme>
- $SALOMEDIR/runSession driver $USERDIR/<ADAO YACS xml scheme>
- $SALOMEDIR/runSession killSalome.py
- rm -f $USERDIR/<ADAO YACS xml scheme>
-
-Standard output and errors come on console.
-
-Running an ADAO calculation scheme in YACS using a TUI user mode
-----------------------------------------------------------------
-
-This section describes how to execute in TUI (Text User Interface) mode a YACS
-calculation scheme, obtained using the ADAO "Export to YACS" function. It uses
-the standard YACS TUI mode, which is briefly recalled here (see YACS
-documentation for more information) through a simple example. As seen in
-documentation, a XML scheme can be loaded in a Python. We give here a whole
-sequence of command lines to test the validity of the scheme before executing
-it, adding some initial supplementary ones to explicitly load the types catalog
-to avoid weird difficulties::
-
- import pilot
- import SALOMERuntime
- import loader
- SALOMERuntime.RuntimeSALOME_setRuntime()
-
- r = pilot.getRuntime()
- xmlLoader = loader.YACSLoader()
- xmlLoader.registerProcCataLoader()
- try:
- catalogAd = r.loadCatalog("proc", "<ADAO YACS xml scheme>")
- except:
- pass
- r.addCatalog(catalogAd)
-
- try:
- p = xmlLoader.load("<ADAO YACS xml scheme>")
- except IOError,ex:
- print "IO exception:",ex
-
- logger = p.getLogger("parser")
- if not logger.isEmpty():
- print "The imported file has errors :"
- print logger.getStr()
-
- if not p.isValid():
- print "The schema is not valid and can not be executed"
- print p.getErrorReport()
-
- info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
- p.checkConsistency(info)
- if info.areWarningsOrErrors():
- print "The schema is not consistent and can not be executed"
- print info.getGlobalRepr()
-
- e = pilot.ExecutorSwig()
- e.RunW(p)
- if p.getEffectiveState() != pilot.DONE:
- print p.getErrorReport()
-
-This method allows for example to edit the YACS XML scheme in TUI, or to gather
-results for further use.
-
-Getting information on special variables during the ADAO calculation in YACS
------------------------------------------------------------------------------
-
-Some special variables, used during calculations, can be monitored during the
-ADAO calculation in YACS. These variables can be printed, plotted, saved, etc.
-This can be done using "*observers*", that are scripts associated with one
-variable. In order to use this feature, one has to build scripts using as
-standard inputs (available in the namespace) the variables ``var`` and ``info``.
-The variable ``var`` is to be used in the same way as for the final ADD object,
-that is as a list/tuple object.
-
-Some templates are available when editing the ADAO case in EFICAS editor. These
-simple scripts can be customized by the user, either at the EFICAS edition stage
-or at the YACS edition stage, to improve the tuning of the ADAO calculation in
-YACS.
-
-As an example, here is one very simple script (similar to the "*ValuePrinter*"
-template) used to print the value of one monitored variable::
-
- print " --->",info," Value =",var[-1]
-
-Stored in a python file, this script can be associated to each variable
-available in the "*SELECTION*" keyword of the "*Observers*" command:
-"*Analysis*", "*CurrentState*", "*CostFunction*"... The current value of the
-variable will be printed at each step of the optimization or assimilation
-algorithm. The observers can embed plotting capabilities, storage, printing,
-etc.
-
-Getting more information when running a calculation
----------------------------------------------------
-
-When running, useful data and messages are logged. There are two ways to obtain
-theses information.
-
-The first one, and the preferred way, is to use the built-in variable "*Debug*"
-available in every ADAO case. It is available through the GUI of the module.
-Setting it to "*1*" will send messages in the log window of the YACS scheme
-execution.
-
-The second one consist in using the "*logging*" native module of Python (see the
-Python documentation http://docs.python.org/library/logging.html for more
-information on this module). Everywhere in the YACS scheme, mainly through the
-scripts entries, the user can set the logging level in accordance to the needs
-of detailed informations. The different logging levels are: "*DEBUG*", "*INFO*",
-"*WARNING*", "*ERROR*", "*CRITICAL*". All the informations flagged with a
-certain level will be printed for whatever activated level above this particular
-one (included). The easiest way is to change the log level is to write the
-following Python lines::
-
- import logging
- logging.getLogger().setLevel(logging.DEBUG)
-
-The standard logging module default level is "*WARNING*", the default level in
-the ADAO module is "*INFO*".
-
-It is also recommended to include in the simulation code some logging or debug
-mechanisms and use them in conjunction with the two previous methods. But be
-careful not to store too big variables because it cost time, whatever logging
-level is chosen.
-
-Switching from a version of ADAO to a newer one
------------------------------------------------
-
-The ADAO module and cases are identified as versions, with "Major", "Minor" and
-"Revision" characteristics. A particular version is numbered as
-"Major.Minor.Revision".
-
-Each version of the ADAO module can read ADAO case files of the previous minor
-version. In general, it can also read ADAO case files of all the previous minor
-versions for one major branch. In general also, an ADAO case file for one
-version can not be read by a previous minor or major version.
-
-Switching from 6.6 to 7.2
-+++++++++++++++++++++++++
-
-There is no known incompatibility for the ADAO case file. The upgrade procedure
-is to read the old ADAO case file with the new SALOME/ADAO module, and save it
-with a new name.
-
-There is one incompatibility introduced for the post-processing or observer
-script files. The old syntax to call a result object, such as the "*Analysis*"
-one in a script provided through the "*UserPostAnalysis*" keyword), was for
-example::
-
- Analysis = ADD.get("Analysis").valueserie(-1)
- Analysis = ADD.get("Analysis").valueserie()
-
-The new syntax is entirely similar to the classical one of a list/tuple object::
-
- Analysis = ADD.get("Analysis")[-1]
- Analysis = ADD.get("Analysis")[:]
-
-The post-processing scripts has to be modified.
-
-Switching from 6.5 to 6.6
-+++++++++++++++++++++++++
-
-There is no known incompatibility for the ADAO case file. The upgrade procedure
-is to read the old ADAO case file with the new SALOME/ADAO module, and save it
-with a new name.
-
-There is one incompatibility introduced for the designation of operators used to
-for the observation operator. The new mandatory names are "*DirectOperator*",
-"*TangentOperator*" and "*AdjointOperator*", as described in the last subsection
-of the chapter :ref:`section_reference`.
-
-Switching from 6.4 to 6.5
-+++++++++++++++++++++++++
-
-There is no known incompatibility for the ADAO case file or the accompanying
-scripts. The upgrade procedure is to read the old ADAO case file with the new
-SALOME/ADAO module, and save it with a new name.
-
-Switching from 6.3 to 6.4
-+++++++++++++++++++++++++
-
-There is no known incompatibility for the ADAO case file or the accompanying
-scripts. The upgrade procedure is to read the old ADAO case file with the new
-SALOME/ADAO module, and save it with a new name.
+++ /dev/null
-.. _section_bibliography:
-
-================================================================================
-Bibliography
-================================================================================
-
-.. [Argaud09] Argaud J.-P., Bouriquet B., Hunt J., *Data Assimilation from Operational and Industrial Applications to Complex Systems*, Mathematics Today, pp.150-152, October 2009
-
-.. [Bouttier99] Bouttier B., Courtier P., *Data assimilation concepts and methods*, Meteorological Training Course Lecture Series, ECMWF, 1999, http://www.ecmwf.int/newsevents/training/rcourse_notes/pdf_files/Assim_concepts.pdf
-
-.. [Bocquet04] Bocquet M., *Introduction aux principes et méthodes de l'assimilation de données en géophysique*, Lecture Notes, 2004-2008, http://cerea.enpc.fr/HomePages/bocquet/assim.pdf
-
-.. [Byrd95] Byrd R. H., Lu P., Nocedal J., *A Limited Memory Algorithm for Bound Constrained Optimization*, SIAM Journal on Scientific and Statistical Computing, 16(5), pp.1190-1208, 1995
-
-.. [Ide97] Ide K., Courtier P., Ghil M., Lorenc A. C., *Unified notation for data assimilation: operational, sequential and variational*, Journal of the Meteorological Society of Japan, 75(1B), pp.181-189, 1997
-
-.. [Kalnay03] Kalnay E., *Atmospheric Modeling, Data Assimilation and Predictability*, Cambridge University Press, 2003
-
-.. [Salome] *SALOME The Open Source Integration Platform for Numerical Simulation*, http://www.salome-platform.org/
-
-.. [Tarantola87] Tarantola A., *Inverse Problem: Theory Methods for Data Fitting and Parameter Estimation*, Elsevier, 1987
-
-.. [Talagrand97] Talagrand O., *Assimilation of Observations, an Introduction*, Journal of the Meteorological Society of Japan, 75(1B), pp.191-209, 1997
-
-.. [WikipediaDA] Wikipedia, *Data assimilation*, http://en.wikipedia.org/wiki/Data_assimilation
-
-.. [WikipediaMO] Wikipedia, *Mathematical optimization*, https://en.wikipedia.org/wiki/Mathematical_optimization
-
-.. [WikipediaPSO] Wikipedia, *Particle swarm optimization*, https://en.wikipedia.org/wiki/Particle_swarm_optimization
-
-.. [WikipediaQR] Wikipedia, *Quantile regression*, https://en.wikipedia.org/wiki/Quantile_regression
-
-.. [Zhu97] Zhu C., Byrd R. H., Nocedal J., *L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization*, ACM Transactions on Mathematical Software, Vol 23(4), pp.550-560, 1997
+++ /dev/null
-# -*- coding: utf-8 -*-
-# Copyright (C) 2008-2013 EDF R&D
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-#
-# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
-
-#
-# ADAO documentation build configuration file, created by
-# sphinx-quickstart on Wed Jun 16 15:48:00 2010.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.append(os.path.abspath('.'))
-
-# -- General configuration -----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ["sphinx.ext.pngmath"]
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'ADAO'
-copyright = u'2008-2013, Jean-Philippe ARGAUD'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '7.2.0'
-version = '7\_main'
-# The full version, including alpha/beta/rc tags.
-release = '7.2.0'
-release = '7\_main'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
-
-# List of directories, relative to source directory, that shouldn't be searched
-# for source files.
-exclude_trees = []
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_use_modindex = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-html_show_sourcelink = False
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'ADAOdoc'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# The paper size ('letter' or 'a4').
-latex_paper_size = 'a4'
-
-# The font size ('10pt', '11pt' or '12pt').
-latex_font_size = '10pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
- ('index', 'ADAO.tex', u'ADAO Documentation',
- u'Jean-Philippe ARGAUD', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_use_modindex = True
-
-source_encoding = 'iso-8859-15'
-
-# -- Options for Epub output ---------------------------------------------------
-
-# Bibliographic Dublin Core info.
-epub_title = u'ADAO'
-epub_author = u'Jean-Philippe ARGAUD'
-epub_publisher = u'Jean-Philippe ARGAUD'
-epub_copyright = u'2008-2013, Jean-Philippe ARGAUD'
-
-# The language of the text. It defaults to the language option
-# or en if the language is not set.
-#epub_language = ''
-
-# The scheme of the identifier. Typical schemes are ISBN or URL.
-#epub_scheme = ''
-
-# The unique identifier of the text. This can be a ISBN number
-# or the project homepage.
-#epub_identifier = ''
-
-# A unique identification for the text.
-#epub_uid = ''
-
-# HTML files that should be inserted before the pages created by sphinx.
-# The format is a list of tuples containing the path and title.
-#epub_pre_files = []
-
-# HTML files shat should be inserted after the pages created by sphinx.
-# The format is a list of tuples containing the path and title.
-#epub_post_files = []
-
-# A list of files that should not be packed into the epub file.
-#epub_exclude_files = []
-
-# The depth of the table of contents in toc.ncx.
-#epub_tocdepth = 3
-
-# Allow duplicate toc entries.
-#epub_tocdup = True
-
-# -- Options for PDF output --------------------------------------------------
-# Grouping the document tree into PDF files. List of tuples
-# (source start file, target name, title, author, options).
-#
-# If there is more than one author, separate them with \\.
-# For example: r'Guido van Rossum\\Fred L. Drake, Jr., editor'
-#
-# The options element is a dictionary that lets you override
-# this config per-document.
-# For example,
-# ('index', u'MyProject', u'My Project', u'Author Name',
-# dict(pdf_compressed = True))
-# would mean that specific document would be compressed
-# regardless of the global pdf_compressed setting.
-pdf_documents = [
- ('contents', u'ADAO', u'ADAO', u'Jean-Philippe ARGAUD', dict(pdf_compressed = True)),
-]
-# A comma-separated list of custom stylesheets. Example:
-pdf_stylesheets = ['sphinx','kerning','a4']
-# Create a compressed PDF
-# Use True/False or 1/0
-# Example: compressed=True
-#pdf_compressed = False
-pdf_compressed = True
-# A colon-separated list of folders to search for fonts. Example:
-# pdf_font_path = ['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']
-# Language to be used for hyphenation support
-#pdf_language = "en_US"
-# Mode for literal blocks wider than the frame. Can be
-# overflow, shrink or truncate
-#pdf_fit_mode = "shrink"
-# Section level that forces a break page.
-# For example: 1 means top-level sections start in a new page
-# 0 means disabled
-#pdf_break_level = 0
-# When a section starts in a new page, force it to be 'even', 'odd',
-# or just use 'any'
-#pdf_breakside = 'any'
-# Insert footnotes where they are defined instead of
-# at the end.
-#pdf_inline_footnotes = True
-# verbosity level. 0 1 or 2
-#pdf_verbosity = 0
-# If false, no index is generated.
-#pdf_use_index = True
-# If false, no modindex is generated.
-#pdf_use_modindex = True
-# If false, no coverpage is generated.
-#pdf_use_coverpage = True
-# Name of the cover page template to use
-#pdf_cover_template = 'sphinxcover.tmpl'
-# Documents to append as an appendix to all manuals.
-#pdf_appendices = []
-# Enable experimental feature to split table cells. Use it
-# if you get "DelayedTable too big" errors
-#pdf_splittables = False
-# Set the default DPI for images
-#pdf_default_dpi = 72
-# Enable rst2pdf extension modules (default is empty list)
-# you need vectorpdf for better sphinx's graphviz support
-#pdf_extensions = ['vectorpdf']
-# Page template name for "regular" pages
-#pdf_page_template = 'cutePage'
--- /dev/null
+# Copyright (C) 2010-2013 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+include $(top_srcdir)/adm_local/make_common_starter.am
+
+# You can set these variables from the command line.
+DOCLANG = en
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = $(top_builddir)/doc/build/$(DOCLANG)
+SRCDIR = $(top_srcdir)/doc/$(DOCLANG)
+
+EXTRA_DIST = conf.py advanced.rst examples.rst index.rst intro.rst theory.rst using.rst \
+ resources/ADAO.png \
+ resources/ADAO_small.png \
+ resources/ADAO_small_rouge.png \
+ resources/ADAO_small_vert.png \
+ images/adao_activate.png \
+ images/adao_jdcexample01.png \
+ images/adao_scriptentry01.png \
+ images/adao_viewer.png \
+ images/eficas_new.png \
+ images/eficas_save.png \
+ images/eficas_yacs.png \
+ images/yacs_generatedscheme.png \
+ images/adao_exporttoyacs.png \
+ images/adao_jdcexample02.png \
+ images/adao_scriptentry02.png \
+ images/eficas_close.png \
+ images/eficas_open.png \
+ images/eficas_saveas.png \
+ images/yacs_containerlog.png
+
+install-data-local:
+ make html
+ ${mkinstalldirs} $(DESTDIR)$(docdir)/$(DOCLANG)
+ ${mkinstalldirs} $(DESTDIR)$(salomeresdir)
+ cp -R $(BUILDDIR)/html/* $(DESTDIR)$(docdir)/$(DOCLANG)
+ cp $(SRCDIR)/resources/*.png $(DESTDIR)$(salomeresdir)
+ cp $(SRCDIR)/images/eficas_*.png $(DESTDIR)$(salomeresdir)
+
+uninstall-local:
+ chmod -R +w $(DESTDIR)$(docdir)/$(DOCLANG)
+ rm -rf $(DESTDIR)$(docdir)
+ rm -f $(DESTDIR)$(salomeresdir)/*.png
+
+clean-local:
+ -rm -rf $(BUILDDIR)
+
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SRCDIR)
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ADAO.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ADAO.qhc"
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
--- /dev/null
+.. _section_advanced:
+
+================================================================================
+Advanced usage of the ADAO module
+================================================================================
+
+This section presents advanced methods to use the ADAO module, how to get more
+information, or how to use it without the graphical user interface (GUI). It
+requires to know how to find files or commands included inside the whole SALOME
+installation. All the names to be replaced by user are indicated by the
+following syntax ``<...>``.
+
+Converting and executing an ADAO command file (JDC) using a shell script
+------------------------------------------------------------------------
+
+It is possible to convert and execute an ADAO command file (JDC, or ".comm"
+file, which resides in ``<ADAO JDC file directory>``) automatically by using a
+template script containing all the required steps. The user has to know where
+are the main SALOME scripts, and in particular the ``runAppli`` one. The
+directory in which this script resides is symbolically named ``<SALOME main
+installation dir>`` and has to be replaced by the good one in the template.
+
+When an ADAO command file is build by the ADAO GUI editor and saved, if it is
+named for example "AdaoStudy1.comm", then a companion file named "AdaoStudy1.py"
+is automatically created in the same directory. It is named ``<ADAO Python
+file>`` in the template, and it is converted to YACS as an ``<ADAO YACS xml
+scheme>``. After that, it can be executed in console mode using the standard
+YACS console command (see YACS documentation for more information).
+
+In the example, we choose to start and stop the SALOME application server in the
+same script, which is not necessary, but useful to avoid stalling SALOME
+sessions. We choose also to remove the ``<ADAO YACS xml scheme>`` because it is
+a generated one. You only need to replace the text between these symbols
+``<...>`` to use it.
+
+The template of the shell script is the following::
+
+ #!/bin/bash
+ export USERDIR=<ADAO JDC file directory>
+ export SALOMEDIR=<SALOME main installation directory>
+ $SALOMEDIR/runAppli -k -t
+ $SALOMEDIR/runSession python \
+ $SALOMEDIR/bin/salome/AdaoYacsSchemaCreator.py \
+ $USERDIR/<ADAO Python file> $USERDIR/<ADAO YACS xml scheme>
+ $SALOMEDIR/runSession driver $USERDIR/<ADAO YACS xml scheme>
+ $SALOMEDIR/runSession killSalome.py
+ rm -f $USERDIR/<ADAO YACS xml scheme>
+
+Standard output and errors come on console.
+
+Running an ADAO calculation scheme in YACS using a TUI user mode
+----------------------------------------------------------------
+
+This section describes how to execute in TUI (Text User Interface) mode a YACS
+calculation scheme, obtained using the ADAO "Export to YACS" function. It uses
+the standard YACS TUI mode, which is briefly recalled here (see YACS
+documentation for more information) through a simple example. As seen in
+documentation, a XML scheme can be loaded in a Python. We give here a whole
+sequence of command lines to test the validity of the scheme before executing
+it, adding some initial supplementary ones to explicitly load the types catalog
+to avoid weird difficulties::
+
+ import pilot
+ import SALOMERuntime
+ import loader
+ SALOMERuntime.RuntimeSALOME_setRuntime()
+
+ r = pilot.getRuntime()
+ xmlLoader = loader.YACSLoader()
+ xmlLoader.registerProcCataLoader()
+ try:
+ catalogAd = r.loadCatalog("proc", "<ADAO YACS xml scheme>")
+ except:
+ pass
+ r.addCatalog(catalogAd)
+
+ try:
+ p = xmlLoader.load("<ADAO YACS xml scheme>")
+ except IOError,ex:
+ print "IO exception:",ex
+
+ logger = p.getLogger("parser")
+ if not logger.isEmpty():
+ print "The imported file has errors :"
+ print logger.getStr()
+
+ if not p.isValid():
+ print "The schema is not valid and can not be executed"
+ print p.getErrorReport()
+
+ info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
+ p.checkConsistency(info)
+ if info.areWarningsOrErrors():
+ print "The schema is not consistent and can not be executed"
+ print info.getGlobalRepr()
+
+ e = pilot.ExecutorSwig()
+ e.RunW(p)
+ if p.getEffectiveState() != pilot.DONE:
+ print p.getErrorReport()
+
+This method allows for example to edit the YACS XML scheme in TUI, or to gather
+results for further use.
+
+Getting information on special variables during the ADAO calculation in YACS
+-----------------------------------------------------------------------------
+
+Some special variables, used during calculations, can be monitored during the
+ADAO calculation in YACS. These variables can be printed, plotted, saved, etc.
+This can be done using "*observers*", that are scripts associated with one
+variable. In order to use this feature, one has to build scripts using as
+standard inputs (available in the namespace) the variables ``var`` and ``info``.
+The variable ``var`` is to be used in the same way as for the final ADD object,
+that is as a list/tuple object.
+
+Some templates are available when editing the ADAO case in EFICAS editor. These
+simple scripts can be customized by the user, either at the EFICAS edition stage
+or at the YACS edition stage, to improve the tuning of the ADAO calculation in
+YACS.
+
+As an example, here is one very simple script (similar to the "*ValuePrinter*"
+template) used to print the value of one monitored variable::
+
+ print " --->",info," Value =",var[-1]
+
+Stored in a python file, this script can be associated to each variable
+available in the "*SELECTION*" keyword of the "*Observers*" command:
+"*Analysis*", "*CurrentState*", "*CostFunction*"... The current value of the
+variable will be printed at each step of the optimization or assimilation
+algorithm. The observers can embed plotting capabilities, storage, printing,
+etc.
+
+Getting more information when running a calculation
+---------------------------------------------------
+
+When running, useful data and messages are logged. There are two ways to obtain
+theses information.
+
+The first one, and the preferred way, is to use the built-in variable "*Debug*"
+available in every ADAO case. It is available through the GUI of the module.
+Setting it to "*1*" will send messages in the log window of the YACS scheme
+execution.
+
+The second one consist in using the "*logging*" native module of Python (see the
+Python documentation http://docs.python.org/library/logging.html for more
+information on this module). Everywhere in the YACS scheme, mainly through the
+scripts entries, the user can set the logging level in accordance to the needs
+of detailed informations. The different logging levels are: "*DEBUG*", "*INFO*",
+"*WARNING*", "*ERROR*", "*CRITICAL*". All the informations flagged with a
+certain level will be printed for whatever activated level above this particular
+one (included). The easiest way is to change the log level is to write the
+following Python lines::
+
+ import logging
+ logging.getLogger().setLevel(logging.DEBUG)
+
+The standard logging module default level is "*WARNING*", the default level in
+the ADAO module is "*INFO*".
+
+It is also recommended to include in the simulation code some logging or debug
+mechanisms and use them in conjunction with the two previous methods. But be
+careful not to store too big variables because it cost time, whatever logging
+level is chosen.
+
+Switching from a version of ADAO to a newer one
+-----------------------------------------------
+
+The ADAO module and cases are identified as versions, with "Major", "Minor" and
+"Revision" characteristics. A particular version is numbered as
+"Major.Minor.Revision".
+
+Each version of the ADAO module can read ADAO case files of the previous minor
+version. In general, it can also read ADAO case files of all the previous minor
+versions for one major branch. In general also, an ADAO case file for one
+version can not be read by a previous minor or major version.
+
+Switching from 6.6 to 7.2
++++++++++++++++++++++++++
+
+There is no known incompatibility for the ADAO case file. The upgrade procedure
+is to read the old ADAO case file with the new SALOME/ADAO module, and save it
+with a new name.
+
+There is one incompatibility introduced for the post-processing or observer
+script files. The old syntax to call a result object, such as the "*Analysis*"
+one in a script provided through the "*UserPostAnalysis*" keyword), was for
+example::
+
+ Analysis = ADD.get("Analysis").valueserie(-1)
+ Analysis = ADD.get("Analysis").valueserie()
+
+The new syntax is entirely similar to the classical one of a list/tuple object::
+
+ Analysis = ADD.get("Analysis")[-1]
+ Analysis = ADD.get("Analysis")[:]
+
+The post-processing scripts has to be modified.
+
+Switching from 6.5 to 6.6
++++++++++++++++++++++++++
+
+There is no known incompatibility for the ADAO case file. The upgrade procedure
+is to read the old ADAO case file with the new SALOME/ADAO module, and save it
+with a new name.
+
+There is one incompatibility introduced for the designation of operators used to
+for the observation operator. The new mandatory names are "*DirectOperator*",
+"*TangentOperator*" and "*AdjointOperator*", as described in the last subsection
+of the chapter :ref:`section_reference`.
+
+Switching from 6.4 to 6.5
++++++++++++++++++++++++++
+
+There is no known incompatibility for the ADAO case file or the accompanying
+scripts. The upgrade procedure is to read the old ADAO case file with the new
+SALOME/ADAO module, and save it with a new name.
+
+Switching from 6.3 to 6.4
++++++++++++++++++++++++++
+
+There is no known incompatibility for the ADAO case file or the accompanying
+scripts. The upgrade procedure is to read the old ADAO case file with the new
+SALOME/ADAO module, and save it with a new name.
--- /dev/null
+.. _section_bibliography:
+
+================================================================================
+Bibliography
+================================================================================
+
+.. [Argaud09] Argaud J.-P., Bouriquet B., Hunt J., *Data Assimilation from Operational and Industrial Applications to Complex Systems*, Mathematics Today, pp.150-152, October 2009
+
+.. [Bouttier99] Bouttier B., Courtier P., *Data assimilation concepts and methods*, Meteorological Training Course Lecture Series, ECMWF, 1999, http://www.ecmwf.int/newsevents/training/rcourse_notes/pdf_files/Assim_concepts.pdf
+
+.. [Bocquet04] Bocquet M., *Introduction aux principes et méthodes de l'assimilation de données en géophysique*, Lecture Notes, 2004-2008, http://cerea.enpc.fr/HomePages/bocquet/assim.pdf
+
+.. [Byrd95] Byrd R. H., Lu P., Nocedal J., *A Limited Memory Algorithm for Bound Constrained Optimization*, SIAM Journal on Scientific and Statistical Computing, 16(5), pp.1190-1208, 1995
+
+.. [Ide97] Ide K., Courtier P., Ghil M., Lorenc A. C., *Unified notation for data assimilation: operational, sequential and variational*, Journal of the Meteorological Society of Japan, 75(1B), pp.181-189, 1997
+
+.. [Kalnay03] Kalnay E., *Atmospheric Modeling, Data Assimilation and Predictability*, Cambridge University Press, 2003
+
+.. [Salome] *SALOME The Open Source Integration Platform for Numerical Simulation*, http://www.salome-platform.org/
+
+.. [Tarantola87] Tarantola A., *Inverse Problem: Theory Methods for Data Fitting and Parameter Estimation*, Elsevier, 1987
+
+.. [Talagrand97] Talagrand O., *Assimilation of Observations, an Introduction*, Journal of the Meteorological Society of Japan, 75(1B), pp.191-209, 1997
+
+.. [WikipediaDA] Wikipedia, *Data assimilation*, http://en.wikipedia.org/wiki/Data_assimilation
+
+.. [WikipediaMO] Wikipedia, *Mathematical optimization*, https://en.wikipedia.org/wiki/Mathematical_optimization
+
+.. [WikipediaPSO] Wikipedia, *Particle swarm optimization*, https://en.wikipedia.org/wiki/Particle_swarm_optimization
+
+.. [WikipediaQR] Wikipedia, *Quantile regression*, https://en.wikipedia.org/wiki/Quantile_regression
+
+.. [Zhu97] Zhu C., Byrd R. H., Nocedal J., *L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization*, ACM Transactions on Mathematical Software, Vol 23(4), pp.550-560, 1997
--- /dev/null
+# -*- coding: utf-8 -*-
+# Copyright (C) 2008-2013 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+#
+# ADAO documentation build configuration file, created by
+# sphinx-quickstart on Wed Jun 16 15:48:00 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ["sphinx.ext.pngmath"]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'ADAO'
+copyright = u'2008-2013, Jean-Philippe ARGAUD'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '7.2.0'
+version = '7\_main'
+# The full version, including alpha/beta/rc tags.
+release = '7.2.0'
+release = '7\_main'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = []
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = False
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'ADAOdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+latex_paper_size = 'a4'
+
+# The font size ('10pt', '11pt' or '12pt').
+latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'ADAO.tex', u'ADAO Documentation',
+ u'Jean-Philippe ARGAUD', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
+
+source_encoding = 'iso-8859-15'
+
+# -- Options for Epub output ---------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = u'ADAO'
+epub_author = u'Jean-Philippe ARGAUD'
+epub_publisher = u'Jean-Philippe ARGAUD'
+epub_copyright = u'2008-2013, Jean-Philippe ARGAUD'
+
+# The language of the text. It defaults to the language option
+# or en if the language is not set.
+#epub_language = ''
+
+# The scheme of the identifier. Typical schemes are ISBN or URL.
+#epub_scheme = ''
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#epub_identifier = ''
+
+# A unique identification for the text.
+#epub_uid = ''
+
+# HTML files that should be inserted before the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_pre_files = []
+
+# HTML files shat should be inserted after the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_post_files = []
+
+# A list of files that should not be packed into the epub file.
+#epub_exclude_files = []
+
+# The depth of the table of contents in toc.ncx.
+#epub_tocdepth = 3
+
+# Allow duplicate toc entries.
+#epub_tocdup = True
+
+# -- Options for PDF output --------------------------------------------------
+# Grouping the document tree into PDF files. List of tuples
+# (source start file, target name, title, author, options).
+#
+# If there is more than one author, separate them with \\.
+# For example: r'Guido van Rossum\\Fred L. Drake, Jr., editor'
+#
+# The options element is a dictionary that lets you override
+# this config per-document.
+# For example,
+# ('index', u'MyProject', u'My Project', u'Author Name',
+# dict(pdf_compressed = True))
+# would mean that specific document would be compressed
+# regardless of the global pdf_compressed setting.
+pdf_documents = [
+ ('contents', u'ADAO', u'ADAO', u'Jean-Philippe ARGAUD', dict(pdf_compressed = True)),
+]
+# A comma-separated list of custom stylesheets. Example:
+pdf_stylesheets = ['sphinx','kerning','a4']
+# Create a compressed PDF
+# Use True/False or 1/0
+# Example: compressed=True
+#pdf_compressed = False
+pdf_compressed = True
+# A colon-separated list of folders to search for fonts. Example:
+# pdf_font_path = ['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']
+# Language to be used for hyphenation support
+#pdf_language = "en_US"
+# Mode for literal blocks wider than the frame. Can be
+# overflow, shrink or truncate
+#pdf_fit_mode = "shrink"
+# Section level that forces a break page.
+# For example: 1 means top-level sections start in a new page
+# 0 means disabled
+#pdf_break_level = 0
+# When a section starts in a new page, force it to be 'even', 'odd',
+# or just use 'any'
+#pdf_breakside = 'any'
+# Insert footnotes where they are defined instead of
+# at the end.
+#pdf_inline_footnotes = True
+# verbosity level. 0 1 or 2
+#pdf_verbosity = 0
+# If false, no index is generated.
+#pdf_use_index = True
+# If false, no modindex is generated.
+#pdf_use_modindex = True
+# If false, no coverpage is generated.
+#pdf_use_coverpage = True
+# Name of the cover page template to use
+#pdf_cover_template = 'sphinxcover.tmpl'
+# Documents to append as an appendix to all manuals.
+#pdf_appendices = []
+# Enable experimental feature to split table cells. Use it
+# if you get "DelayedTable too big" errors
+#pdf_splittables = False
+# Set the default DPI for images
+#pdf_default_dpi = 72
+# Enable rst2pdf extension modules (default is empty list)
+# you need vectorpdf for better sphinx's graphviz support
+#pdf_extensions = ['vectorpdf']
+# Page template name for "regular" pages
+#pdf_page_template = 'cutePage'
--- /dev/null
+.. _section_examples:
+
+================================================================================
+Tutorials on using the ADAO module
+================================================================================
+
+.. |eficas_new| image:: images/eficas_new.png
+ :align: middle
+ :scale: 50%
+.. |eficas_save| image:: images/eficas_save.png
+ :align: middle
+ :scale: 50%
+.. |eficas_saveas| image:: images/eficas_saveas.png
+ :align: middle
+ :scale: 50%
+.. |eficas_yacs| image:: images/eficas_yacs.png
+ :align: middle
+ :scale: 50%
+
+This section presents some examples on using the ADAO module in SALOME. The
+first one shows how to build a simple data assimilation case defining
+explicitly all the required data through the GUI. The second one shows, on the
+same case, how to define data using external sources through scripts.
+
+Building a simple estimation case with explicit data definition
+---------------------------------------------------------------
+
+This simple example is a demonstration one, and describes how to set a BLUE
+estimation framework in order to get *ponderated (or fully weighted) least
+square estimated state* of a system from an observation of the state and from an
+*a priori* knowledge (or background) of this state. In other words, we look for
+the weighted middle between the observation and the background vectors. All the
+numerical values of this example are arbitrary.
+
+Experimental set up
++++++++++++++++++++
+
+We choose to operate in a 3-dimensional space. 3D is chosen in order to restrict
+the size of numerical object to explicitly enter by the user, but the problem is
+not dependant of the dimension and can be set in dimension 1000... The
+observation :math:`\mathbf{y}^o` is of value 1 in each direction, so:
+
+ ``Yo = [1 1 1]``
+
+The background state :math:`\mathbf{x}^b`, which represent some *a priori*
+knowledge or a regularization, is of value of 0 in each direction, which is:
+
+ ``Xb = [0 0 0]``
+
+Data assimilation requires information on errors covariances :math:`\mathbf{R}`
+and :math:`\mathbf{B}` respectively for observation and background variables. We
+choose here to have uncorrelated errors (that is, diagonal matrices) and to have
+the same variance of 1 for all variables (that is, identity matrices). We get:
+
+ ``B = R = [1 0 0 ; 0 1 0 ; 0 0 1]``
+
+Last, we need an observation operator :math:`\mathbf{H}` to convert the
+background value in the space of observation value. Here, because the space
+dimensions are the same, we can choose the identity as the observation
+operator:
+
+ ``H = [1 0 0 ; 0 1 0 ; 0 0 1]``
+
+With such choices, the Best Linear Unbiased Estimator (BLUE) will be the average
+vector between :math:`\mathbf{y}^o` and :math:`\mathbf{x}^b`, named the
+*analysis* and denoted by :math:`\mathbf{x}^a`:
+
+ ``Xa = [0.5 0.5 0.5]``
+
+As en extension of this example, one can change the variances for
+:math:`\mathbf{B}` or :math:`\mathbf{R}` independently, and the analysis will
+move to :math:`\mathbf{y}^o` or to :math:`\mathbf{x}^b` in inverse proportion of
+the variances in :math:`\mathbf{B}` and :math:`\mathbf{R}`. It is also
+equivalent to search for the analysis thought a BLUE algorithm or a 3DVAR one.
+
+Using the GUI to build the ADAO case
+++++++++++++++++++++++++++++++++++++
+
+First, you have to activate the ADAO module by choosing the appropriate module
+button or menu of SALOME, and you will see:
+
+ .. _adao_activate2:
+ .. image:: images/adao_activate.png
+ :align: center
+ :width: 100%
+ .. centered::
+ **Activating the module ADAO in SALOME**
+
+Choose the "*New*" button in this window. You will directly get the EFICAS
+interface for variables definition, along with the "*Object browser*". You can
+then click on the "*New*" button |eficas_new| to create a new ADAO case, and you
+will see:
+
+ .. _adao_viewer:
+ .. image:: images/adao_viewer.png
+ :align: center
+ :width: 100%
+ .. centered::
+ **The EFICAS viewer for cases definition in module ADAO**
+
+Then fill in the variables to build the ADAO case by using the experimental set
+up described above. All the technical information given above will be directly
+inserted in the ADAO case definition, by using the *String* type for all the
+variables. When the case definition is ready, save it to a "*JDC (\*.comm)*"
+native file somewhere in your path. Remember that other files will be also
+created near this first one, so it is better to make a specific directory for
+your case, and to save the file inside. The name of the file will appear in the
+"*Object browser*" window, under the "*ADAO*" menu. The final case definition
+looks like this:
+
+ .. _adao_jdcexample01:
+ .. image:: images/adao_jdcexample01.png
+ :align: center
+ :width: 100%
+ .. centered::
+ **Definition of the experimental set up chosen for the ADAO case**
+
+To go further, we need now to generate the YACS scheme from the ADAO case
+definition. In order to do that, right click on the name of the file case in the
+"*Object browser*" window, and choose the "*Export to YACS*" sub-menu (or the
+"*Export to YACS*" button |eficas_yacs|) as below:
+
+ .. _adao_exporttoyacs00:
+ .. image:: images/adao_exporttoyacs.png
+ :align: center
+ :scale: 75%
+ .. centered::
+ **"Export to YACS" sub-menu to generate the YACS scheme from the ADAO case**
+
+This command will generate the YACS scheme, activate YACS module in SALOME, and
+open the new scheme in the GUI of the YACS module [#]_. After reordering the
+nodes by using the "*arrange local node*" sub-menu of the YACS graphical view of
+the scheme, you get the following representation of the generated ADAO scheme:
+
+ .. _yacs_generatedscheme:
+ .. image:: images/yacs_generatedscheme.png
+ :align: center
+ :width: 100%
+ .. centered::
+ **YACS generated scheme from the ADAO case**
+
+After that point, all the modifications, executions and post-processing of the
+data assimilation scheme will be done in YACS. In order to check the result in a
+simple way, we create here a new YACS node by using the "*in-line script node*"
+sub-menu of the YACS graphical view, and we name it "*PostProcessing*".
+
+This script will retrieve the data assimilation analysis from the
+"*algoResults*" output port of the computation bloc (which gives access to a
+SALOME Python Object), and will print it on the standard output.
+
+To obtain this, the in-line script node need to have an input port of type
+"*pyobj*" named "*results*" for example, that have to be linked graphically to
+the "*algoResults*" output port of the computation bloc. Then the code to fill
+in the script node is::
+
+ Xa = results.ADD.get("Analysis")[-1]
+
+ print
+ print "Analysis =",Xa
+ print
+
+The augmented YACS scheme can be saved (overwriting the generated scheme if the
+simple "*Save*" command or button are used, or with a new name). Ideally, the
+implementation of such post-processing procedure can be done in YACS to test,
+and then entirely saved in one script that can be integrated in the ADAO case by
+using the keyword "*UserPostAnalysis*".
+
+Then, classically in YACS, it have to be prepared for run, and then executed.
+After completion, the printing on standard output is available in the "*YACS
+Container Log*", obtained through the right click menu of the "*proc*" window in
+the YACS scheme as shown below:
+
+ .. _yacs_containerlog:
+ .. image:: images/yacs_containerlog.png
+ :align: center
+ :width: 100%
+ .. centered::
+ **YACS menu for Container Log, and dialog window showing the log**
+
+We verify that the result is correct by checking that the log dialog window
+contains the following line::
+
+ Analysis = [0.5, 0.5, 0.5]
+
+as shown in the image above.
+
+As a simple extension of this example, one can notice that the same problem
+solved with a 3DVAR algorithm gives the same result. This algorithm can be
+chosen at the ADAO case building step, before entering in YACS step. The
+ADAO 3DVAR case will look completely similar to the BLUE algorithmic case, as
+shown by the following figure:
+
+ .. _adao_jdcexample02:
+ .. image:: images/adao_jdcexample02.png
+ :align: center
+ :width: 100%
+ .. centered::
+ **Defining an ADAO 3DVAR case looks completely similar to a BLUE case**
+
+There is only one command changing, with "*3DVAR*" value instead of "*Blue*".
+
+Building a simple estimation case with external data definition by scripts
+--------------------------------------------------------------------------
+
+It is useful to get parts or all of the data from external definition, using
+Python script files to provide access to the data. As an example, we build here
+an ADAO case representing the same experimental set up as in the above example
+`Building a simple estimation case with explicit data definition`_, but using
+data form a single one external Python script file.
+
+First, we write the following script file, using conventional names for the
+desired variables. Here, all the input variables are defined in the script, but
+the user can choose to split the file in several ones, or to mix explicit data
+definition in the ADAO GUI and implicit data definition by external files. The
+present script looks like::
+
+ import numpy
+ #
+ # Definition of the Background as a vector
+ # ----------------------------------------
+ Background = [0, 0, 0]
+ #
+ # Definition of the Observation as a vector
+ # -----------------------------------------
+ Observation = "1 1 1"
+ #
+ # Definition of the Background Error covariance as a matrix
+ # ---------------------------------------------------------
+ BackgroundError = numpy.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
+ #
+ # Definition of the Observation Error covariance as a matrix
+ # ----------------------------------------------------------
+ ObservationError = numpy.matrix("1 0 0 ; 0 1 0 ; 0 0 1")
+ #
+ # Definition of the Observation Operator as a matrix
+ # --------------------------------------------------
+ ObservationOperator = numpy.identity(3)
+
+The names of the Python variables above are mandatory, in order to define the
+right variables, but the Python script can be bigger and define classes,
+functions, etc. with other names. It shows different ways to define arrays and
+matrices, using list, string (as in Numpy or Octave), Numpy array type or Numpy
+matrix type, and Numpy special functions. All of these syntax are valid.
+
+After saving this script somewhere in your path (named here "*script.py*" for
+the example), we use the GUI to build the ADAO case. The procedure to fill in
+the case is similar except that, instead of selecting the "*String*" option for
+the "*FROM*" keyword, we select the "*Script*" one. This leads to a
+"*SCRIPT_DATA/SCRIPT_FILE*" entry in the tree, allowing to choose a file as:
+
+ .. _adao_scriptentry01:
+ .. image:: images/adao_scriptentry01.png
+ :align: center
+ :width: 100%
+ .. centered::
+ **Defining an input value using an external script file**
+
+Other steps and results are exactly the same as in the `Building a simple
+estimation case with explicit data definition`_ previous example.
+
+In fact, this script methodology allows to retrieve data from in-line or previous
+calculations, from static files, from database or from stream, all of them
+outside of SALOME. It allows also to modify easily some input data, for example
+for debug purpose or for repetitive execution process, and it is the most
+versatile method in order to parametrize the input data. **But be careful,
+script methodology is not a "safe" procedure, in the sense that erroneous
+data, or errors in calculations, can be directly injected into the YACS scheme
+execution.**
+
+Adding parameters to control the data assimilation algorithm
+------------------------------------------------------------
+
+One can add some optional parameters to control the data assimilation algorithm
+calculation. This is done by using the "*AlgorithmParameters*" keyword in the
+definition of the ADAO case, which is an keyword of the ASSIMILATION_STUDY. This
+keyword requires a Python dictionary, containing some key/value pairs. The list
+of possible optional parameters are given in the subsection
+:ref:`section_reference`.
+
+If no bounds at all are required on the control variables, then one can choose
+the "BFGS" or "CG" minimisation algorithm for the 3DVAR algorithm. For
+constrained optimization, the minimizer "LBFGSB" is often more robust, but the
+"TNC" is sometimes more performant.
+
+This dictionary has to be defined, for example, in an external Python script
+file, using the mandatory variable name "*AlgorithmParameters*" for the
+dictionary. All the keys inside the dictionary are optional, they all have
+default values, and can exist without being used. For example::
+
+ AlgorithmParameters = {
+ "Minimizer" : "CG", # Possible choice : "LBFGSB", "TNC", "CG", "BFGS"
+ "MaximumNumberOfSteps" : 10,
+ }
+
+Then the script can be added to the ADAO case, in a file entry describing the
+"*AlgorithmParameters*" keyword, as follows:
+
+ .. _adao_scriptentry02:
+ .. image:: images/adao_scriptentry02.png
+ :align: center
+ :width: 100%
+ .. centered::
+ **Adding parameters to control the algorithm**
+
+Other steps and results are exactly the same as in the `Building a simple
+estimation case with explicit data definition`_ previous example. The dictionary
+can also be directly given in the input field associated with the keyword.
+
+Building a complex case with external data definition by scripts
+----------------------------------------------------------------
+
+This more complex and complete example has to been considered as a framework for
+user inputs, that need to be tailored for each real application. Nevertheless,
+the file skeletons are sufficiently general to have been used for various
+applications in neutronic, fluid mechanics... Here, we will not focus on the
+results, but more on the user control of inputs and outputs in an ADAO case. As
+previously, all the numerical values of this example are arbitrary.
+
+The objective is to set up the input and output definitions of a physical case
+by external python scripts, using a general non-linear operator, adding control
+on parameters and so on... The complete framework scripts can be found in the
+ADAO skeletons examples directory under the name
+"*External_data_definition_by_scripts*".
+
+Experimental set up
++++++++++++++++++++
+
+We continue to operate in a 3-dimensional space, in order to restrict
+the size of numerical object shown in the scripts, but the problem is
+not dependant of the dimension.
+
+We choose a twin experiment context, using a known true state
+:math:`\mathbf{x}^t` of arbitrary values:
+
+ ``Xt = [1 2 3]``
+
+The background state :math:`\mathbf{x}^b`, which represent some *a priori*
+knowledge of the true state, is build as a normal random perturbation of 20% the
+true state :math:`\mathbf{x}^t` for each component, which is:
+
+ ``Xb = Xt + normal(0, 20%*Xt)``
+
+To describe the background error covariances matrix :math:`\mathbf{B}`, we make
+as previously the hypothesis of uncorrelated errors (that is, a diagonal matrix,
+of size 3x3 because :math:`\mathbf{x}^b` is of lenght 3) and to have the same
+variance of 0.1 for all variables. We get:
+
+ ``B = 0.1 * diagonal( length(Xb) )``
+
+We suppose that there exist an observation operator :math:`\mathbf{H}`, which
+can be non linear. In real calibration procedure or inverse problems, the
+physical simulation codes are embedded in the observation operator. We need also
+to know its gradient with respect to each calibrated variable, which is a rarely
+known information with industrial codes. But we will see later how to obtain an
+approximated gradient in this case.
+
+Being in twin experiments, the observation :math:`\mathbf{y}^o` and its error
+covariances matrix :math:`\mathbf{R}` are generated by using the true state
+:math:`\mathbf{x}^t` and the observation operator :math:`\mathbf{H}`:
+
+ ``Yo = H( Xt )``
+
+and, with an arbitrary standard deviation of 1% on each error component:
+
+ ``R = 0.0001 * diagonal( lenght(Yo) )``
+
+All the required data assimilation informations are then defined.
+
+Skeletons of the scripts describing the setup
++++++++++++++++++++++++++++++++++++++++++++++
+
+We give here the essential parts of each script used afterwards to build the ADAO
+case. Remember that using these scripts in real Python files requires to
+correctly define the path to imported modules or codes (even if the module is in
+the same directory that the importing Python file ; we indicate the path
+adjustment using the mention ``"# INSERT PHYSICAL SCRIPT PATH"``), the encoding
+if necessary, etc. The indicated file names for the following scripts are
+arbitrary. Examples of complete file scripts are available in the ADAO examples
+standard directory.
+
+We first define the true state :math:`\mathbf{x}^t` and some convenient matrix
+building function, in a Python script file named
+``Physical_data_and_covariance_matrices.py``::
+
+ import numpy
+ #
+ def True_state():
+ """
+ Arbitrary values and names, as a tuple of two series of same length
+ """
+ return (numpy.array([1, 2, 3]), ['Para1', 'Para2', 'Para3'])
+ #
+ def Simple_Matrix( size, diagonal=None ):
+ """
+ Diagonal matrix, with either 1 or a given vector on the diagonal
+ """
+ if diagonal is not None:
+ S = numpy.diag( diagonal )
+ else:
+ S = numpy.matrix(numpy.identity(int(size)))
+ return S
+
+We can then define the background state :math:`\mathbf{x}^b` as a random
+perturbation of the true state, adding at the end of the script the definition
+of a *required ADAO variable* in order to export the defined value. It is done
+in a Python script file named ``Script_Background_xb.py``::
+
+ from Physical_data_and_covariance_matrices import True_state
+ import numpy
+ #
+ xt, names = True_state()
+ #
+ Standard_deviation = 0.2*xt # 20% for each variable
+ #
+ xb = xt + abs(numpy.random.normal(0.,Standard_deviation,size=(len(xt),)))
+ #
+ # Creating the required ADAO variable
+ # -----------------------------------
+ Background = list(xb)
+
+In the same way, we define the background error covariance matrix
+:math:`\mathbf{B}` as a diagonal matrix of the same diagonal length as the
+background of the true state, using the convenient function already defined. It
+is done in a Python script file named ``Script_BackgroundError_B.py``::
+
+ from Physical_data_and_covariance_matrices import True_state, Simple_Matrix
+ #
+ xt, names = True_state()
+ #
+ B = 0.1 * Simple_Matrix( size = len(xt) )
+ #
+ # Creating the required ADAO variable
+ # -----------------------------------
+ BackgroundError = B
+
+To continue, we need the observation operator :math:`\mathbf{H}` as a function
+of the state. It is here defined in an external file named
+``"Physical_simulation_functions.py"``, which should contain one function
+conveniently named here ``"DirectOperator"``. This function is user one,
+representing as programming function the :math:`\mathbf{H}` operator. We suppose
+this function is then given by the user. A simple skeleton is given here for
+convenience::
+
+ def DirectOperator( XX ):
+ """ Direct non-linear simulation operator """
+ #
+ # --------------------------------------> EXAMPLE TO BE REMOVED
+ if type(XX) is type(numpy.matrix([])): # EXAMPLE TO BE REMOVED
+ HX = XX.A1.tolist() # EXAMPLE TO BE REMOVED
+ elif type(XX) is type(numpy.array([])): # EXAMPLE TO BE REMOVED
+ HX = numpy.matrix(XX).A1.tolist() # EXAMPLE TO BE REMOVED
+ else: # EXAMPLE TO BE REMOVED
+ HX = XX # EXAMPLE TO BE REMOVED
+ # --------------------------------------> EXAMPLE TO BE REMOVED
+ #
+ return numpy.array( HX )
+
+We does not need the operators ``"TangentOperator"`` and ``"AdjointOperator"``
+because they will be approximated using ADAO capabilities.
+
+We insist on the fact that these non-linear operator ``"DirectOperator"``,
+tangent operator ``"TangentOperator"`` and adjoint operator
+``"AdjointOperator"`` come from the physical knowledge, include the reference
+physical simulation code and its eventual adjoint, and have to be carefully set
+up by the data assimilation user. The errors in or missuses of the operators can
+not be detected or corrected by the data assimilation framework alone.
+
+In this twin experiments framework, the observation :math:`\mathbf{y}^o` and its
+error covariances matrix :math:`\mathbf{R}` can be generated. It is done in two
+Python script files, the first one being named ``Script_Observation_yo.py``::
+
+ from Physical_data_and_covariance_matrices import True_state
+ from Physical_simulation_functions import DirectOperator
+ #
+ xt, noms = True_state()
+ #
+ yo = DirectOperator( xt )
+ #
+ # Creating the required ADAO variable
+ # -----------------------------------
+ Observation = list(yo)
+
+and the second one named ``Script_ObservationError_R.py``::
+
+ from Physical_data_and_covariance_matrices import True_state, Simple_Matrix
+ from Physical_simulation_functions import DirectOperator
+ #
+ xt, names = True_state()
+ #
+ yo = DirectOperator( xt )
+ #
+ R = 0.0001 * Simple_Matrix( size = len(yo) )
+ #
+ # Creating the required ADAO variable
+ # -----------------------------------
+ ObservationError = R
+
+As in previous examples, it can be useful to define some parameters for the data
+assimilation algorithm. For example, if we use the standard 3DVAR algorithm, the
+following parameters can be defined in a Python script file named
+``Script_AlgorithmParameters.py``::
+
+ # Creating the required ADAO variable
+ # -----------------------------------
+ AlgorithmParameters = {
+ "Minimizer" : "TNC", # Possible : "LBFGSB", "TNC", "CG", "BFGS"
+ "MaximumNumberOfSteps" : 15, # Number of global iterative steps
+ "Bounds" : [
+ [ None, None ], # Bound on the first parameter
+ [ 0., 4. ], # Bound on the second parameter
+ [ 0., None ], # Bound on the third parameter
+ ],
+ }
+
+Finally, it is common to post-process the results, retrieving them after the
+data assimilation phase in order to analyze, print or show them. It requires to
+use a intermediary Python script file in order to extract these results. The
+following example Python script file named ``Script_UserPostAnalysis.py``,
+illustrates the fact::
+
+ from Physical_data_and_covariance_matrices import True_state
+ import numpy
+ #
+ xt, names = True_state()
+ xa = ADD.get("Analysis")[-1]
+ x_series = ADD.get("CurrentState")[:]
+ J = ADD.get("CostFunctionJ")[:]
+ #
+ # Verifying the results by printing
+ # ---------------------------------
+ print
+ print "xt = %s"%xt
+ print "xa = %s"%numpy.array(xa)
+ print
+ for i in range( len(x_series) ):
+ print "Step %2i : J = %.5e et X = %s"%(i, J[i], x_series[i])
+ print
+
+At the end, we get a description of the whole case setup through a set of files
+listed here:
+
+#. ``Physical_data_and_covariance_matrices.py``
+#. ``Physical_simulation_functions.py``
+#. ``Script_AlgorithmParameters.py``
+#. ``Script_BackgroundError_B.py``
+#. ``Script_Background_xb.py``
+#. ``Script_ObservationError_R.py``
+#. ``Script_Observation_yo.py``
+#. ``Script_UserPostAnalysis.py``
+
+We insist here that all these scripts are written by the user and can not be
+automatically tested. So the user is required to verify the scripts (and in
+particular their input/output) in order to limit the difficulty of debug. We
+recall: **script methodology is not a "safe" procedure, in the sense that
+erroneous data, or errors in calculations, can be directly injected into the
+YACS scheme execution.**
+
+Building the case with external data definition by scripts
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+All these scripts can then be used to define the ADAO case with external data
+definition by Python script files. It is entirely similar to the method
+described in the `Building a simple estimation case with external data
+definition by scripts`_ previous section. For each variable to be defined, we
+select the "*Script*" option of the "*FROM*" keyword, which leads to a
+"*SCRIPT_DATA/SCRIPT_FILE*" entry in the tree. For the "*ObservationOperator*"
+keyword, we choose the "*ScriptWithOneFunction*" form and keep the default
+differential increment.
+
+The other steps to build the ADAO case are exactly the same as in the `Building
+a simple estimation case with explicit data definition`_ previous section.
+
+Using the simple linear operator :math:`\mathbf{H}` from the Python script file
+``Physical_simulation_functions.py`` in the ADAO examples standard directory,
+the results will look like::
+
+ xt = [1 2 3]
+ xa = [ 1.000014 2.000458 3.000390]
+
+ Step 0 : J = 1.81750e+03 et X = [1.014011, 2.459175, 3.390462]
+ Step 1 : J = 1.81750e+03 et X = [1.014011, 2.459175, 3.390462]
+ Step 2 : J = 1.79734e+01 et X = [1.010771, 2.040342, 2.961378]
+ Step 3 : J = 1.79734e+01 et X = [1.010771, 2.040342, 2.961378]
+ Step 4 : J = 1.81909e+00 et X = [1.000826, 2.000352, 3.000487]
+ Step 5 : J = 1.81909e+00 et X = [1.000826, 2.000352, 3.000487]
+ Step 6 : J = 1.81641e+00 et X = [1.000247, 2.000651, 3.000156]
+ Step 7 : J = 1.81641e+00 et X = [1.000247, 2.000651, 3.000156]
+ Step 8 : J = 1.81569e+00 et X = [1.000015, 2.000432, 3.000364]
+ Step 9 : J = 1.81569e+00 et X = [1.000015, 2.000432, 3.000364]
+ Step 10 : J = 1.81568e+00 et X = [1.000013, 2.000458, 3.000390]
+ ...
+
+The state at the first step is the randomly generated background state
+:math:`\mathbf{x}^b`. After completion, these printing on standard output is
+available in the "*YACS Container Log*", obtained through the right click menu
+of the "*proc*" window in the YACS scheme.
+
+.. [#] For more information on YACS, see the the *YACS module User's Guide* available in the main "*Help*" menu of SALOME GUI.
--- /dev/null
+.. _section_glossary:
+
+Glossary
+========
+
+.. glossary::
+ :sorted:
+
+ case
+ One case is defined by a set of data and of choices, packed together
+ through the user interface of the module. The data are physical
+ measurements that have to be available before or during the case
+ execution. The simulation code(s) and the assimilation methods and
+ parameters has to be chosen, they define the execution properties of the
+ case.
+
+ iteration
+ One iteration occurs when using iterative optimizers (e.g. 3DVAR), and it
+ is entirely hidden in the main YACS OptimizerLoop Node named
+ "compute_bloc". Nevertheless, the user can watch the iterative process
+ through the *YACS Container Log* window, which is updated during the
+ process, and using *Observers* attached to calculation variables.
+
+ APosterioriCovariance
+ Keyword to indicate the covariance matrix of *a posteriori* analysis
+ errors.
+
+ BMA (Background minus Analysis)
+ Difference between the simulation based on the background state and the
+ one base on the optimal state estimation, noted as :math:`\mathbf{x}^b -
+ \mathbf{x}^a`.
+
+ OMA (Observation minus Analysis)
+ Difference between the observations and the result of the simulation based
+ on the optimal state estimation, the analysis, filtered to be compatible
+ with the observation, noted as :math:`\mathbf{y}^o -
+ \mathbf{H}\mathbf{x}^a`.
+
+ OMB (Observation minus Background)
+ Difference between the observations and the result of the simulation based
+ on the background state, filtered to be compatible with the observation,
+ noted as :math:`\mathbf{y}^o - \mathbf{H}\mathbf{x}^b`.
+
+ SigmaBck2
+ Keyword to indicate the Desroziers-Ivanov parameter measuring the
+ background part consistency of the data assimilation optimal state
+ estimation. It can be compared to 1.
+
+ SigmaObs2
+ Keyword to indicate the Desroziers-Ivanov parameter measuring the
+ observation part consistency of the data assimilation optimal state
+ estimation. It can be compared to 1.
+
+ MahalanobisConsistency
+ Keyword to indicate the Mahalanobis parameter measuring the consistency of
+ the data assimilation optimal state estimation. It can be compared to 1.
+
+ analysis
+ The optimal state estimation through a data assimilation or optimization
+ procedure.
+
+ innovation
+ Difference between the observations and the result of the simulation based
+ on the background state, filtered to be compatible with the observation.
+ It is similar with OMB in static cases.
+
+ CostFunctionJ
+ Keyword to indicate the minimization function, noted as :math:`J`.
+
+ CostFunctionJo
+ Keyword to indicate the observation part of the minimization function,
+ noted as :math:`J^o`.
+
+ CostFunctionJb
+ Keyword to indicate the background part of the minimization function,
+ noted as :math:`J^b`.
--- /dev/null
+================================================================================
+ADAO module documentation
+================================================================================
+
+.. image:: images/ADAO_logo.png
+ :align: center
+ :width: 20%
+
+The ADAO module provides **data assimilation and optimization** features in
+SALOME context. It is based on usage of other SALOME modules, namely YACS and
+EFICAS, and on usage of a generic underlying data assimilation library.
+
+Briefly stated, Data Assimilation is a methodological framework to compute the
+optimal estimate of the inaccessible true value of a system state over time. It
+uses information coming from experimental measurements or observations, and from
+numerical *a priori* models, including information about their errors. Parts of
+the framework are also known under the names of *parameter estimation*, *inverse
+problems*, *Bayesian estimation*, *optimal interpolation*, etc. More details can
+be found in the section :ref:`section_theory`.
+
+The documentation of this module is divided in parts. The first one
+:ref:`section_intro` is an introduction. The second part :ref:`section_theory`
+briefly introduces data assimilation, optimization and concepts. The third part
+:ref:`section_using` describes how to use the module ADAO. The fourth part
+:ref:`section_reference` gives a detailed description of all the ADAO commands
+and keywords. The fifth part :ref:`section_examples` gives examples on ADAO
+usage. Users interested in quick use of the module can jump to this section, but
+a valuable use of the module requires to read and come back regularly to the
+third and fourth ones. The last part :ref:`section_advanced` focuses on advanced
+usages of the module, how to get more information, or how to use it by
+scripting, without the graphical user interface (GUI).
+
+In all this documentation, we use standard notations of linear algebra, data
+assimilation (as described in [Ide97]_) and optimization. In particular, vectors
+are written horizontally or vertically without making difference. Matrices are
+written either normally, or with a condensed notation, consisting in the use of
+a space to separate values and a "``;``" to separate the rows, in a continuous
+line.
+
+Table of contents
+-----------------
+
+.. toctree::
+ :maxdepth: 2
+
+ intro
+ theory
+ using
+ reference
+ examples
+ advanced
+ licence
+ bibliography
+
+Indices and tables
+------------------
+
+* :ref:`genindex`
+* :ref:`search`
+* :ref:`section_glossary`
--- /dev/null
+.. _section_intro:
+
+================================================================================
+Introduction to ADAO
+================================================================================
+
+The aim of the ADAO module is **to help using data assimilation or optimization
+methodology in conjunction with other modules in SALOME**. The ADAO module
+provides interface to some standard algorithms of data assimilation or
+optimization, and allows integration of them in a SALOME study. Calculation or
+simulation modules have to provide one or more specific calling methods in order
+to ba callable in the SALOME/ADAO framework, and all the SALOME modules can be
+used throught YACS integration of ADAO.
+
+Its main objective is to *facilitate the use of various standard data
+assimilation or optimization methods*, while remaining easy to use and providing
+a path to help the implementation. For an end user, having already gathered his
+physical input information, it's a matter of "point\&click" to build an ADAO
+valid case and to evaluate it.
+
+The module covers a wide variety of practical applications in a robust way,
+allowing real engineering applications but also quick experimental setup to be
+performed. Its methodological and numerical scalability gives way to extend the
+application domain.
--- /dev/null
+.. _section_licence:
+
+================================================================================
+Licence and requirements for the module
+================================================================================
+
+.. index:: single: LICENCE
+.. index:: single: SALOME
+.. index:: single: ADAO
+
+The licence for this module is the GNU Lesser General Public License (Lesser
+GPL), as stated here and in the source files::
+
+ <ADAO, a SALOME module for Data Assimilation and Optimization>
+
+ Copyright (C) 2008-2013 EDF R&D
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+
+In addition, we expect that all publications describing work using this
+software, or all commercial products using it, quote at least one of the
+references given below:
+
+ * *ADAO, a SALOME module for Data Assimilation and Optimization*,
+ http://www.salome-platform.org/
+
+ * *SALOME The Open Source Integration Platform for Numerical Simulation*,
+ http://www.salome-platform.org/
+
+The documentation of the module is also covered by the licence and the
+requirement of quoting.
--- /dev/null
+.. _section_reference:
+
+================================================================================
+Reference description of the ADAO commands and keywords
+================================================================================
+
+This section presents the reference description of the ADAO commands and
+keywords available through the GUI or through scripts.
+
+Each command or keyword to be defined through the ADAO GUI has some properties.
+The first property is to be *required*, *optional* or only factual, describing a
+type of input. The second property is to be an "open" variable with a fixed type
+but with any value allowed by the type, or a "restricted" variable, limited to
+some specified values. The EFICAS editor GUI having build-in validating
+capacities, the properties of the commands or keywords given through this GUI
+are automatically correct.
+
+The mathematical notations used afterward are explained in the section
+:ref:`section_theory`.
+
+Examples of using these commands are available in the section
+:ref:`section_examples` and in example files installed with ADAO module.
+
+List of possible input types
+----------------------------
+
+.. index:: single: Dict
+.. index:: single: Function
+.. index:: single: Matrix
+.. index:: single: ScalarSparseMatrix
+.. index:: single: DiagonalSparseMatrix
+.. index:: single: String
+.. index:: single: Script
+.. index:: single: Vector
+
+Each ADAO variable has a pseudo-type to help filling it and validation. The
+different pseudo-types are:
+
+**Dict**
+ This indicates a variable that has to be filled by a dictionary, usually
+ given as a script.
+
+**Function**
+ This indicates a variable that has to be filled by a function, usually given
+ as a script or a component method.
+
+**Matrix**
+ This indicates a variable that has to be filled by a matrix, usually given
+ either as a string or as a script.
+
+**ScalarSparseMatrix**
+ This indicates a variable that has to be filled by a unique number, which
+ will be used to multiply an identity matrix, usually given either as a
+ string or as a script.
+
+**DiagonalSparseMatrix**
+ This indicates a variable that has to be filled by a vector, which will be
+ over the diagonal of an identity matrix, usually given either as a string or
+ as a script.
+
+**Script**
+ This indicates a script given as an external file. It can be described by a
+ full absolute path name or only by the file name without path.
+
+**String**
+ This indicates a string giving a literal representation of a matrix, a
+ vector or a vector serie, such as "1 2 ; 3 4" for a square 2x2 matrix.
+
+**Vector**
+ This indicates a variable that has to be filled by a vector, usually given
+ either as a string or as a script.
+
+**VectorSerie** This indicates a variable that has to be filled by a list of
+ vectors, usually given either as a string or as a script.
+
+When a command or keyword can be filled by a script file name, the script has to
+contain a variable or a method that has the same name as the one to be filled.
+In other words, when importing the script in a YACS Python node, it must create
+a variable of the good name in the current namespace.
+
+Reference description for ADAO calculation cases
+------------------------------------------------
+
+List of commands and keywords for an ADAO calculation case
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. index:: single: ASSIMILATION_STUDY
+.. index:: single: Algorithm
+.. index:: single: AlgorithmParameters
+.. index:: single: Background
+.. index:: single: BackgroundError
+.. index:: single: ControlInput
+.. index:: single: Debug
+.. index:: single: EvolutionError
+.. index:: single: EvolutionModel
+.. index:: single: InputVariables
+.. index:: single: Observation
+.. index:: single: ObservationError
+.. index:: single: ObservationOperator
+.. index:: single: Observers
+.. index:: single: OutputVariables
+.. index:: single: Study_name
+.. index:: single: Study_repertory
+.. index:: single: UserDataInit
+.. index:: single: UserPostAnalysis
+
+The first set of commands is related to the description of a calculation case,
+that is a *Data Assimilation* procedure or an *Optimization* procedure. The
+terms are ordered in alphabetical order, except the first, which describes
+choice between calculation or checking. The different commands are the
+following:
+
+**ASSIMILATION_STUDY**
+ *Required command*. This is the general command describing the data
+ assimilation or optimization case. It hierarchically contains all the other
+ commands.
+
+**Algorithm**
+ *Required command*. This is a string to indicate the data assimilation or
+ optimization algorithm chosen. The choices are limited and available through
+ the GUI. There exists for example "3DVAR", "Blue"... See below the list of
+ algorithms and associated parameters in the following subsection `Options
+ and required commands for calculation algorithms`_.
+
+**AlgorithmParameters**
+ *Optional command*. This command allows to add some optional parameters to
+ control the data assimilation or optimization algorithm. It is defined as a
+ "*Dict*" type object, that is, given as a script. See below the list of
+ algorithms and associated parameters in the following subsection `Options
+ and required commands for calculation algorithms`_.
+
+**Background**
+ *Required command*. This indicates the background or initial vector used,
+ previously noted as :math:`\mathbf{x}^b`. It is defined as a "*Vector*" type
+ object, that is, given either as a string or as a script.
+
+**BackgroundError**
+ *Required command*. This indicates the background error covariance matrix,
+ previously noted as :math:`\mathbf{B}`. It is defined as a "*Matrix*" type
+ object, a "*ScalarSparseMatrix*" type object, or a "*DiagonalSparseMatrix*"
+ type object, that is, given either as a string or as a script.
+
+**ControlInput**
+ *Optional command*. This indicates the control vector used to force the
+ evolution model at each step, usually noted as :math:`\mathbf{U}`. It is
+ defined as a "*Vector*" or a *VectorSerie* type object, that is, given
+ either as a string or as a script. When there is no control, it has to be a
+ void string ''.
+
+**Debug**
+ *Required command*. This define the level of trace and intermediary debug
+ information. The choices are limited between 0 (for False) and 1 (for
+ True).
+
+**EvolutionError**
+ *Optional command*. This indicates the evolution error covariance matrix,
+ usually noted as :math:`\mathbf{Q}`. It is defined as a "*Matrix*" type
+ object, a "*ScalarSparseMatrix*" type object, or a "*DiagonalSparseMatrix*"
+ type object, that is, given either as a string or as a script.
+
+**EvolutionModel**
+ *Optional command*. This indicates the evolution model operator, usually
+ noted :math:`M`, which describes a step of evolution. It is defined as a
+ "*Function*" type object, that is, given as a script. Different functional
+ forms can be used, as described in the following subsection `Requirements
+ for functions describing an operator`_. If there is some control :math:`U`
+ included in the evolution model, the operator has to be applied to a pair
+ :math:`(X,U)`.
+
+**InputVariables**
+ *Optional command*. This command allows to indicates the name and size of
+ physical variables that are bundled together in the control vector. This
+ information is dedicated to data processed inside an algorithm.
+
+**Observation**
+ *Required command*. This indicates the observation vector used for data
+ assimilation or optimization, previously noted as :math:`\mathbf{y}^o`. It
+ is defined as a "*Vector*" or a *VectorSerie* type object, that is, given
+ either as a string or as a script.
+
+**ObservationError**
+ *Required command*. This indicates the observation error covariance matrix,
+ previously noted as :math:`\mathbf{R}`. It is defined as a "*Matrix*" type
+ object, a "*ScalarSparseMatrix*" type object, or a "*DiagonalSparseMatrix*"
+ type object, that is, given either as a string or as a script.
+
+**ObservationOperator**
+ *Required command*. This indicates the observation operator, previously
+ noted :math:`H`, which transforms the input parameters :math:`\mathbf{x}` to
+ results :math:`\mathbf{y}` to be compared to observations
+ :math:`\mathbf{y}^o`. It is defined as a "*Function*" type object, that is,
+ given as a script. Different functional forms can be used, as described in
+ the following subsection `Requirements for functions describing an
+ operator`_. If there is some control :math:`U` included in the observation,
+ the operator has to be applied to a pair :math:`(X,U)`.
+
+**Observers**
+ *Optional command*. This command allows to set internal observers, that are
+ functions linked with a particular variable, which will be executed each
+ time this variable is modified. It is a convenient way to monitor variables
+ of interest during the data assimilation or optimization process, by
+ printing or plotting it, etc. Common templates are provided to help the user
+ to start or to quickly make his case.
+
+**OutputVariables**
+ *Optional command*. This command allows to indicates the name and size of
+ physical variables that are bundled together in the output observation
+ vector. This information is dedicated to data processed inside an algorithm.
+
+**Study_name**
+ *Required command*. This is an open string to describe the study by a name
+ or a sentence.
+
+**Study_repertory**
+ *Optional command*. If available, this repertory is used to find all the
+ script files that can be used to define some other commands by scripts.
+
+**UserDataInit**
+ *Optional command*. This commands allows to initialize some parameters or
+ data automatically before data assimilation algorithm processing.
+
+**UserPostAnalysis**
+ *Optional command*. This commands allows to process some parameters or data
+ automatically after data assimilation algorithm processing. It is defined as
+ a script or a string, allowing to put post-processing code directly inside
+ the ADAO case. Common templates are provided to help the user to start or
+ to quickly make his case.
+
+Options and required commands for calculation algorithms
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. index:: single: 3DVAR
+.. index:: single: Blue
+.. index:: single: EnsembleBlue
+.. index:: single: KalmanFilter
+.. index:: single: ExtendedKalmanFilter
+.. index:: single: LinearLeastSquares
+.. index:: single: NonLinearLeastSquares
+.. index:: single: ParticleSwarmOptimization
+.. index:: single: QuantileRegression
+
+.. index:: single: AlgorithmParameters
+.. index:: single: Bounds
+.. index:: single: CostDecrementTolerance
+.. index:: single: GradientNormTolerance
+.. index:: single: GroupRecallRate
+.. index:: single: MaximumNumberOfSteps
+.. index:: single: Minimizer
+.. index:: single: NumberOfInsects
+.. index:: single: ProjectedGradientTolerance
+.. index:: single: QualityCriterion
+.. index:: single: Quantile
+.. index:: single: SetSeed
+.. index:: single: StoreInternalVariables
+.. index:: single: StoreSupplementaryCalculations
+.. index:: single: SwarmVelocity
+
+Each algorithm can be controlled using some generic or specific options given
+through the "*AlgorithmParameters*" optional command, as follows for example::
+
+ AlgorithmParameters = {
+ "Minimizer" : "LBFGSB",
+ "MaximumNumberOfSteps" : 25,
+ "StoreSupplementaryCalculations" : ["APosterioriCovariance","OMA"],
+ }
+
+This section describes the available options algorithm by algorithm. If an
+option is specified for an algorithm that doesn't support it, the option is
+simply left unused. The meaning of the acronyms or particular names can be found
+in the :ref:`genindex` or the :ref:`section_glossary`. In addition, for each
+algorithm, the required commands/keywords are given, being described in `List of
+commands and keywords for an ADAO calculation case`_.
+
+**"Blue"**
+
+ *Required commands*
+ *"Background", "BackgroundError",
+ "Observation", "ObservationError",
+ "ObservationOperator"*
+
+ StoreInternalVariables
+ This boolean key allows to store default internal variables, mainly the
+ current state during iterative optimization process. Be careful, this can be
+ a numerically costly choice in certain calculation cases. The default is
+ "False".
+
+ StoreSupplementaryCalculations
+ This list indicates the names of the supplementary variables that can be
+ available at the end of the algorithm. It involves potentially costly
+ calculations. The default is a void list, none of these variables being
+ calculated and stored by default. The possible names are in the following
+ list: ["APosterioriCovariance", "BMA", "OMA", "OMB", "Innovation",
+ "SigmaBck2", "SigmaObs2", "MahalanobisConsistency"].
+
+**"LinearLeastSquares"**
+
+ *Required commands*
+ *"Observation", "ObservationError",
+ "ObservationOperator"*
+
+ StoreInternalVariables
+ This boolean key allows to store default internal variables, mainly the
+ current state during iterative optimization process. Be careful, this can be
+ a numerically costly choice in certain calculation cases. The default is
+ "False".
+
+ StoreSupplementaryCalculations
+ This list indicates the names of the supplementary variables that can be
+ available at the end of the algorithm. It involves potentially costly
+ calculations. The default is a void list, none of these variables being
+ calculated and stored by default. The possible names are in the following
+ list: ["OMA"].
+
+**"3DVAR"**
+
+ *Required commands*
+ *"Background", "BackgroundError",
+ "Observation", "ObservationError",
+ "ObservationOperator"*
+
+ Minimizer
+ This key allows to choose the optimization minimizer. The default choice
+ is "LBFGSB", and the possible ones are "LBFGSB" (nonlinear constrained
+ minimizer, see [Byrd95]_ and [Zhu97]_), "TNC" (nonlinear constrained
+ minimizer), "CG" (nonlinear unconstrained minimizer), "BFGS" (nonlinear
+ unconstrained minimizer), "NCG" (Newton CG minimizer).
+
+ Bounds
+ This key allows to define upper and lower bounds for every control
+ variable being optimized. Bounds can be given by a list of list of pairs
+ of lower/upper bounds for each variable, with possibly ``None`` every time
+ there is no bound. The bounds can always be specified, but they are taken
+ into account only by the constrained minimizers.
+
+ MaximumNumberOfSteps
+ This key indicates the maximum number of iterations allowed for iterative
+ optimization. The default is 15000, which is very similar to no limit on
+ iterations. It is then recommended to adapt this parameter to the needs on
+ real problems. For some minimizers, the effective stopping step can be
+ slightly different due to algorithm internal control requirements.
+
+ CostDecrementTolerance
+ This key indicates a limit value, leading to stop successfully the
+ iterative optimization process when the cost function decreases less than
+ this tolerance at the last step. The default is 1.e-7, and it is
+ recommended to adapt it to the needs on real problems.
+
+ ProjectedGradientTolerance
+ This key indicates a limit value, leading to stop successfully the iterative
+ optimization process when all the components of the projected gradient are
+ under this limit. It is only used for constrained minimizers. The default is
+ -1, that is the internal default of each minimizer (generally 1.e-5), and it
+ is not recommended to change it.
+
+ GradientNormTolerance
+ This key indicates a limit value, leading to stop successfully the
+ iterative optimization process when the norm of the gradient is under this
+ limit. It is only used for non-constrained minimizers. The default is
+ 1.e-5 and it is not recommended to change it.
+
+ StoreInternalVariables
+ This boolean key allows to store default internal variables, mainly the
+ current state during iterative optimization process. Be careful, this can be
+ a numerically costly choice in certain calculation cases. The default is
+ "False".
+
+ StoreSupplementaryCalculations
+ This list indicates the names of the supplementary variables that can be
+ available at the end of the algorithm. It involves potentially costly
+ calculations. The default is a void list, none of these variables being
+ calculated and stored by default. The possible names are in the following
+ list: ["APosterioriCovariance", "BMA", "OMA", "OMB", "Innovation",
+ "SigmaObs2", "MahalanobisConsistency"].
+
+**"NonLinearLeastSquares"**
+
+ *Required commands*
+ *"Background",
+ "Observation", "ObservationError",
+ "ObservationOperator"*
+
+ Minimizer
+ This key allows to choose the optimization minimizer. The default choice
+ is "LBFGSB", and the possible ones are "LBFGSB" (nonlinear constrained
+ minimizer, see [Byrd95]_ and [Zhu97]_), "TNC" (nonlinear constrained
+ minimizer), "CG" (nonlinear unconstrained minimizer), "BFGS" (nonlinear
+ unconstrained minimizer), "NCG" (Newton CG minimizer).
+
+ Bounds
+ This key allows to define upper and lower bounds for every control
+ variable being optimized. Bounds can be given by a list of list of pairs
+ of lower/upper bounds for each variable, with possibly ``None`` every time
+ there is no bound. The bounds can always be specified, but they are taken
+ into account only by the constrained minimizers.
+
+ MaximumNumberOfSteps
+ This key indicates the maximum number of iterations allowed for iterative
+ optimization. The default is 15000, which is very similar to no limit on
+ iterations. It is then recommended to adapt this parameter to the needs on
+ real problems. For some minimizers, the effective stopping step can be
+ slightly different due to algorithm internal control requirements.
+
+ CostDecrementTolerance
+ This key indicates a limit value, leading to stop successfully the
+ iterative optimization process when the cost function decreases less than
+ this tolerance at the last step. The default is 1.e-7, and it is
+ recommended to adapt it to the needs on real problems.
+
+ ProjectedGradientTolerance
+ This key indicates a limit value, leading to stop successfully the iterative
+ optimization process when all the components of the projected gradient are
+ under this limit. It is only used for constrained minimizers. The default is
+ -1, that is the internal default of each minimizer (generally 1.e-5), and it
+ is not recommended to change it.
+
+ GradientNormTolerance
+ This key indicates a limit value, leading to stop successfully the
+ iterative optimization process when the norm of the gradient is under this
+ limit. It is only used for non-constrained minimizers. The default is
+ 1.e-5 and it is not recommended to change it.
+
+ StoreInternalVariables
+ This boolean key allows to store default internal variables, mainly the
+ current state during iterative optimization process. Be careful, this can be
+ a numerically costly choice in certain calculation cases. The default is
+ "False".
+
+ StoreSupplementaryCalculations
+ This list indicates the names of the supplementary variables that can be
+ available at the end of the algorithm. It involves potentially costly
+ calculations. The default is a void list, none of these variables being
+ calculated and stored by default. The possible names are in the following
+ list: ["BMA", "OMA", "OMB", "Innovation"].
+
+**"EnsembleBlue"**
+
+ *Required commands*
+ *"Background", "BackgroundError",
+ "Observation", "ObservationError",
+ "ObservationOperator"*
+
+ SetSeed
+ This key allow to give an integer in order to fix the seed of the random
+ generator used to generate the ensemble. A convenient value is for example
+ 1000. By default, the seed is left uninitialized, and so use the default
+ initialization from the computer.
+
+**"KalmanFilter"**
+
+ *Required commands*
+ *"Background", "BackgroundError",
+ "Observation", "ObservationError",
+ "ObservationOperator",
+ "EvolutionModel", "EvolutionError",
+ "ControlInput"*
+
+ EstimationOf
+ This key allows to choose the type of estimation to be performed. It can be
+ either state-estimation, named "State", or parameter-estimation, named
+ "Parameters". The default choice is "State".
+
+ StoreSupplementaryCalculations
+ This list indicates the names of the supplementary variables that can be
+ available at the end of the algorithm. It involves potentially costly
+ calculations. The default is a void list, none of these variables being
+ calculated and stored by default. The possible names are in the following
+ list: ["APosterioriCovariance", "BMA", "Innovation"].
+
+**"ExtendedKalmanFilter"**
+
+ *Required commands*
+ *"Background", "BackgroundError",
+ "Observation", "ObservationError",
+ "ObservationOperator",
+ "EvolutionModel", "EvolutionError",
+ "ControlInput"*
+
+ Bounds
+ This key allows to define upper and lower bounds for every control variable
+ being optimized. Bounds can be given by a list of list of pairs of
+ lower/upper bounds for each variable, with extreme values every time there
+ is no bound. The bounds can always be specified, but they are taken into
+ account only by the constrained minimizers.
+
+ ConstrainedBy
+ This key allows to define the method to take bounds into account. The
+ possible methods are in the following list: ["EstimateProjection"].
+
+ EstimationOf
+ This key allows to choose the type of estimation to be performed. It can be
+ either state-estimation, named "State", or parameter-estimation, named
+ "Parameters". The default choice is "State".
+
+ StoreSupplementaryCalculations
+ This list indicates the names of the supplementary variables that can be
+ available at the end of the algorithm. It involves potentially costly
+ calculations. The default is a void list, none of these variables being
+ calculated and stored by default. The possible names are in the following
+ list: ["APosterioriCovariance", "BMA", "Innovation"].
+
+**"ParticleSwarmOptimization"**
+
+ *Required commands*
+ *"Background", "BackgroundError",
+ "Observation", "ObservationError",
+ "ObservationOperator"*
+
+ MaximumNumberOfSteps
+ This key indicates the maximum number of iterations allowed for iterative
+ optimization. The default is 50, which is an arbitrary limit. It is then
+ recommended to adapt this parameter to the needs on real problems.
+
+ NumberOfInsects
+ This key indicates the number of insects or particles in the swarm. The
+ default is 100, which is a usual default for this algorithm.
+
+ SwarmVelocity
+ This key indicates the part of the insect velocity which is imposed by the
+ swarm. It is a positive floating point value. The default value is 1.
+
+ GroupRecallRate
+ This key indicates the recall rate at the best swarm insect. It is a
+ floating point value between 0 and 1. The default value is 0.5.
+
+ QualityCriterion
+ This key indicates the quality criterion, minimized to find the optimal
+ state estimate. The default is the usual data assimilation criterion named
+ "DA", the augmented ponderated least squares. The possible criteria has to
+ be in the following list, where the equivalent names are indicated by "=":
+ ["AugmentedPonderatedLeastSquares"="APLS"="DA",
+ "PonderatedLeastSquares"="PLS", "LeastSquares"="LS"="L2",
+ "AbsoluteValue"="L1", "MaximumError"="ME"]
+
+ SetSeed
+ This key allow to give an integer in order to fix the seed of the random
+ generator used to generate the ensemble. A convenient value is for example
+ 1000. By default, the seed is left uninitialized, and so use the default
+ initialization from the computer.
+
+ StoreInternalVariables
+ This boolean key allows to store default internal variables, mainly the
+ current state during iterative optimization process. Be careful, this can be
+ a numerically costly choice in certain calculation cases. The default is
+ "False".
+
+ StoreSupplementaryCalculations
+ This list indicates the names of the supplementary variables that can be
+ available at the end of the algorithm. It involves potentially costly
+ calculations. The default is a void list, none of these variables being
+ calculated and stored by default. The possible names are in the following
+ list: ["BMA", "OMA", "OMB", "Innovation"].
+
+**"QuantileRegression"**
+
+ *Required commands*
+ *"Background",
+ "Observation",
+ "ObservationOperator"*
+
+ Quantile
+ This key allows to define the real value of the desired quantile, between
+ 0 and 1. The default is 0.5, corresponding to the median.
+
+ Minimizer
+ This key allows to choose the optimization minimizer. The default choice
+ and only available choice is "MMQR" (Majorize-Minimize for Quantile
+ Regression).
+
+ MaximumNumberOfSteps
+ This key indicates the maximum number of iterations allowed for iterative
+ optimization. The default is 15000, which is very similar to no limit on
+ iterations. It is then recommended to adapt this parameter to the needs on
+ real problems.
+
+ CostDecrementTolerance
+ This key indicates a limit value, leading to stop successfully the
+ iterative optimization process when the cost function or the surrogate
+ decreases less than this tolerance at the last step. The default is 1.e-6,
+ and it is recommended to adapt it to the needs on real problems.
+
+ StoreInternalVariables
+ This boolean key allows to store default internal variables, mainly the
+ current state during iterative optimization process. Be careful, this can be
+ a numerically costly choice in certain calculation cases. The default is
+ "False".
+
+ StoreSupplementaryCalculations
+ This list indicates the names of the supplementary variables that can be
+ available at the end of the algorithm. It involves potentially costly
+ calculations. The default is a void list, none of these variables being
+ calculated and stored by default. The possible names are in the following
+ list: ["BMA", "OMA", "OMB", "Innovation"].
+
+Reference description for ADAO checking cases
+---------------------------------------------
+
+List of commands and keywords for an ADAO checking case
++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. index:: single: CHECKING_STUDY
+.. index:: single: Algorithm
+.. index:: single: AlgorithmParameters
+.. index:: single: CheckingPoint
+.. index:: single: Debug
+.. index:: single: ObservationOperator
+.. index:: single: Study_name
+.. index:: single: Study_repertory
+.. index:: single: UserDataInit
+
+The second set of commands is related to the description of a checking case,
+that is a procedure to check required properties on information somewhere else
+by a calculation case. The terms are ordered in alphabetical order, except the
+first, which describes choice between calculation or checking. The different
+commands are the following:
+
+**CHECKING_STUDY**
+ *Required command*. This is the general command describing the checking
+ case. It hierarchically contains all the other commands.
+
+**Algorithm**
+ *Required command*. This is a string to indicate the data assimilation or
+ optimization algorithm chosen. The choices are limited and available through
+ the GUI. There exists for example "FunctionTest", "AdjointTest"... See below
+ the list of algorithms and associated parameters in the following subsection
+ `Options and required commands for checking algorithms`_.
+
+**AlgorithmParameters**
+ *Optional command*. This command allows to add some optional parameters to
+ control the data assimilation or optimization algorithm. It is defined as a
+ "*Dict*" type object, that is, given as a script. See below the list of
+ algorithms and associated parameters in the following subsection `Options
+ and required commands for checking algorithms`_.
+
+**CheckingPoint**
+ *Required command*. This indicates the vector used,
+ previously noted as :math:`\mathbf{x}^b`. It is defined as a "*Vector*" type
+ object, that is, given either as a string or as a script.
+
+**Debug**
+ *Required command*. This define the level of trace and intermediary debug
+ information. The choices are limited between 0 (for False) and 1 (for
+ True).
+
+**ObservationOperator**
+ *Required command*. This indicates the observation operator, previously
+ noted :math:`H`, which transforms the input parameters :math:`\mathbf{x}` to
+ results :math:`\mathbf{y}` to be compared to observations
+ :math:`\mathbf{y}^o`. It is defined as a "*Function*" type object, that is,
+ given as a script. Different functional forms can be used, as described in
+ the following subsection `Requirements for functions describing an
+ operator`_.
+
+**Study_name**
+ *Required command*. This is an open string to describe the study by a name
+ or a sentence.
+
+**Study_repertory**
+ *Optional command*. If available, this repertory is used to find all the
+ script files that can be used to define some other commands by scripts.
+
+**UserDataInit**
+ *Optional command*. This commands allows to initialize some parameters or
+ data automatically before data assimilation algorithm processing.
+
+Options and required commands for checking algorithms
++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. index:: single: AdjointTest
+.. index:: single: FunctionTest
+.. index:: single: GradientTest
+.. index:: single: LinearityTest
+
+.. index:: single: AlgorithmParameters
+.. index:: single: AmplitudeOfInitialDirection
+.. index:: single: EpsilonMinimumExponent
+.. index:: single: InitialDirection
+.. index:: single: ResiduFormula
+.. index:: single: SetSeed
+
+We recall that each algorithm can be controlled using some generic or specific
+options given through the "*AlgorithmParameters*" optional command, as follows
+for example::
+
+ AlgorithmParameters = {
+ "AmplitudeOfInitialDirection" : 1,
+ "EpsilonMinimumExponent" : -8,
+ }
+
+If an option is specified for an algorithm that doesn't support it, the option
+is simply left unused. The meaning of the acronyms or particular names can be
+found in the :ref:`genindex` or the :ref:`section_glossary`. In addition, for
+each algorithm, the required commands/keywords are given, being described in
+`List of commands and keywords for an ADAO checking case`_.
+
+**"AdjointTest"**
+
+ *Required commands*
+ *"CheckingPoint",
+ "ObservationOperator"*
+
+ AmplitudeOfInitialDirection
+ This key indicates the scaling of the initial perturbation build as a vector
+ used for the directional derivative around the nominal checking point. The
+ default is 1, that means no scaling.
+
+ EpsilonMinimumExponent
+ This key indicates the minimal exponent value of the power of 10 coefficient
+ to be used to decrease the increment multiplier. The default is -8, and it
+ has to be between 0 and -20. For example, its default value leads to
+ calculate the residue of the scalar product formula with a fixed increment
+ multiplied from 1.e0 to 1.e-8.
+
+ InitialDirection
+ This key indicates the vector direction used for the directional derivative
+ around the nominal checking point. It has to be a vector. If not specified,
+ this direction defaults to a random perturbation around zero of the same
+ vector size than the checking point.
+
+ SetSeed
+ This key allow to give an integer in order to fix the seed of the random
+ generator used to generate the ensemble. A convenient value is for example
+ 1000. By default, the seed is left uninitialized, and so use the default
+ initialization from the computer.
+
+**"FunctionTest"**
+
+ *Required commands*
+ *"CheckingPoint",
+ "ObservationOperator"*
+
+ No option
+
+**"GradientTest"**
+
+ *Required commands*
+ *"CheckingPoint",
+ "ObservationOperator"*
+
+ AmplitudeOfInitialDirection
+ This key indicates the scaling of the initial perturbation build as a vector
+ used for the directional derivative around the nominal checking point. The
+ default is 1, that means no scaling.
+
+ EpsilonMinimumExponent
+ This key indicates the minimal exponent value of the power of 10 coefficient
+ to be used to decrease the increment multiplier. The default is -8, and it
+ has to be between 0 and -20. For example, its default value leads to
+ calculate the residue of the scalar product formula with a fixed increment
+ multiplied from 1.e0 to 1.e-8.
+
+ InitialDirection
+ This key indicates the vector direction used for the directional derivative
+ around the nominal checking point. It has to be a vector. If not specified,
+ this direction defaults to a random perturbation around zero of the same
+ vector size than the checking point.
+
+ ResiduFormula
+ This key indicates the residue formula that has to be used for the test. The
+ default choice is "Taylor", and the possible ones are "Taylor" (residue of
+ the Taylor development of the operator, which has to decrease with the power
+ of 2 in perturbation) and "Norm" (residue obtained by taking the norm of the
+ Taylor development at zero order approximation, which approximate the
+ gradient, and which has to remain constant).
+
+ SetSeed
+ This key allow to give an integer in order to fix the seed of the random
+ generator used to generate the ensemble. A convenient value is for example
+ 1000. By default, the seed is left uninitialized, and so use the default
+ initialization from the computer.
+
+**"LinearityTest"**
+
+ *Required commands*
+ *"CheckingPoint",
+ "ObservationOperator"*
+
+ AmplitudeOfInitialDirection
+ This key indicates the scaling of the initial perturbation build as a vector
+ used for the directional derivative around the nominal checking point. The
+ default is 1, that means no scaling.
+
+ EpsilonMinimumExponent
+ This key indicates the minimal exponent value of the power of 10 coefficient
+ to be used to decrease the increment multiplier. The default is -8, and it
+ has to be between 0 and -20. For example, its default value leads to
+ calculate the residue of the scalar product formula with a fixed increment
+ multiplied from 1.e0 to 1.e-8.
+
+ InitialDirection
+ This key indicates the vector direction used for the directional derivative
+ around the nominal checking point. It has to be a vector. If not specified,
+ this direction defaults to a random perturbation around zero of the same
+ vector size than the checking point.
+
+ ResiduFormula
+ This key indicates the residue formula that has to be used for the test. The
+ default choice is "CenteredDL", and the possible ones are "CenteredDL"
+ (residue of the difference between the function at nominal point and the
+ values with positive and negative increments, which has to stay very small),
+ "Taylor" (residue of the Taylor development of the operator normalized by
+ the nominal value, which has to stay very small), "NominalTaylor" (residue
+ of the order 1 approximations of the operator, normalized to the nominal
+ point, which has to stay close to 1), and "NominalTaylorRMS" (residue of the
+ order 1 approximations of the operator, normalized by RMS to the nominal
+ point, which has to stay close to 0).
+
+ SetSeed
+ This key allow to give an integer in order to fix the seed of the random
+ generator used to generate the ensemble. A convenient value is for example
+ 1000. By default, the seed is left uninitialized, and so use the default
+ initialization from the computer.
+
+Requirements for functions describing an operator
+-------------------------------------------------
+
+The operators for observation and evolution are required to implement the data
+assimilation or optimization procedures. They include the physical simulation
+numerical simulations, but also the filtering and restriction to compare the
+simulation to observation. The evolution operator is considered here in its
+incremental form, representing the transition between two successive states, and
+is then similar to the observation operator.
+
+Schematically, an operator has to give a output solution given the input
+parameters. Part of the input parameters can be modified during the optimization
+procedure. So the mathematical representation of such a process is a function.
+It was briefly described in the section :ref:`section_theory` and is generalized
+here by the relation:
+
+.. math:: \mathbf{y} = O( \mathbf{x} )
+
+between the pseudo-observations :math:`\mathbf{y}` and the parameters
+:math:`\mathbf{x}` using the observation or evolution operator :math:`O`. The
+same functional representation can be used for the linear tangent model
+:math:`\mathbf{O}` of :math:`O` and its adjoint :math:`\mathbf{O}^*`, also
+required by some data assimilation or optimization algorithms.
+
+Then, **to describe completely an operator, the user has only to provide a
+function that fully and only realize the functional operation**.
+
+This function is usually given as a script that can be executed in a YACS node.
+This script can without difference launch external codes or use internal SALOME
+calls and methods. If the algorithm requires the 3 aspects of the operator
+(direct form, tangent form and adjoint form), the user has to give the 3
+functions or to approximate them.
+
+There are 3 practical methods for the user to provide the operator functional
+representation.
+
+First functional form: using "*ScriptWithOneFunction*"
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. index:: single: ScriptWithOneFunction
+.. index:: single: DirectOperator
+.. index:: single: DifferentialIncrement
+.. index:: single: CenteredFiniteDifference
+
+The first one consist in providing only one potentially non-linear function, and
+to approximate the tangent and the adjoint operators. This is done by using the
+keyword "*ScriptWithOneFunction*" for the description of the chosen operator in
+the ADAO GUI. The user have to provide the function in a script, with a
+mandatory name "*DirectOperator*". For example, the script can follow the
+template::
+
+ def DirectOperator( X ):
+ """ Direct non-linear simulation operator """
+ ...
+ ...
+ ...
+ return Y=O(X)
+
+In this case, the user can also provide a value for the differential increment,
+using through the GUI the keyword "*DifferentialIncrement*", which has a default
+value of 1%. This coefficient will be used in the finite difference
+approximation to build the tangent and adjoint operators. The finite difference
+approximation order can also be chosen through the GUI, using the keyword
+"*CenteredFiniteDifference*", with 0 for an uncentered schema of first order,
+and with 1 for a centered schema of second order (of twice the first order
+computational cost). The keyword has a default value of 0.
+
+This first operator definition allow easily to test the functional form before
+its use in an ADAO case, greatly reducing the complexity of implementation.
+
+**Important warning:** the name "*DirectOperator*" is mandatory, and the type of
+the X argument can be either a python list, a numpy array or a numpy 1D-matrix.
+The user has to treat these cases in his script.
+
+Second functional form: using "*ScriptWithFunctions*"
++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. index:: single: ScriptWithFunctions
+.. index:: single: DirectOperator
+.. index:: single: TangentOperator
+.. index:: single: AdjointOperator
+
+The second one consist in providing directly the three associated operators
+:math:`O`, :math:`\mathbf{O}` and :math:`\mathbf{O}^*`. This is done by using
+the keyword "*ScriptWithFunctions*" for the description of the chosen operator
+in the ADAO GUI. The user have to provide three functions in one script, with
+three mandatory names "*DirectOperator*", "*TangentOperator*" and
+"*AdjointOperator*". For example, the script can follow the template::
+
+ def DirectOperator( X ):
+ """ Direct non-linear simulation operator """
+ ...
+ ...
+ ...
+ return something like Y
+
+ def TangentOperator( (X, dX) ):
+ """ Tangent linear operator, around X, applied to dX """
+ ...
+ ...
+ ...
+ return something like Y
+
+ def AdjointOperator( (X, Y) ):
+ """ Adjoint operator, around X, applied to Y """
+ ...
+ ...
+ ...
+ return something like X
+
+Another time, this second operator definition allow easily to test the
+functional forms before their use in an ADAO case, reducing the complexity of
+implementation.
+
+**Important warning:** the names "*DirectOperator*", "*TangentOperator*" and
+"*AdjointOperator*" are mandatory, and the type of the X, Y, dX arguments can be
+either a python list, a numpy array or a numpy 1D-matrix. The user has to treat
+these cases in his script.
+
+Third functional form: using "*ScriptWithSwitch*"
++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. index:: single: ScriptWithSwitch
+.. index:: single: DirectOperator
+.. index:: single: TangentOperator
+.. index:: single: AdjointOperator
+
+This third form give more possibilities to control the execution of the three
+functions representing the operator, allowing advanced usage and control over
+each execution of the simulation code. This is done by using the keyword
+"*ScriptWithSwitch*" for the description of the chosen operator in the ADAO GUI.
+The user have to provide a switch in one script to control the execution of the
+direct, tangent and adjoint forms of its simulation code. The user can then, for
+example, use other approximations for the tangent and adjoint codes, or
+introduce more complexity in the argument treatment of the functions. But it
+will be far more complicated to implement and debug.
+
+**It is recommended not to use this third functional form without a solid
+numerical or physical reason.**
+
+If, however, you want to use this third form, we recommend using the following
+template for the switch. It requires an external script or code named
+"*Physical_simulation_functions.py*", containing three functions named
+"*DirectOperator*", "*TangentOperator*" and "*AdjointOperator*" as previously.
+Here is the switch template::
+
+ import Physical_simulation_functions
+ import numpy, logging
+ #
+ method = ""
+ for param in computation["specificParameters"]:
+ if param["name"] == "method":
+ method = param["value"]
+ if method not in ["Direct", "Tangent", "Adjoint"]:
+ raise ValueError("No valid computation method is given")
+ logging.info("Found method is \'%s\'"%method)
+ #
+ logging.info("Loading operator functions")
+ Function = Physical_simulation_functions.DirectOperator
+ Tangent = Physical_simulation_functions.TangentOperator
+ Adjoint = Physical_simulation_functions.AdjointOperator
+ #
+ logging.info("Executing the possible computations")
+ data = []
+ if method == "Direct":
+ logging.info("Direct computation")
+ Xcurrent = computation["inputValues"][0][0][0]
+ data = Function(numpy.matrix( Xcurrent ).T)
+ if method == "Tangent":
+ logging.info("Tangent computation")
+ Xcurrent = computation["inputValues"][0][0][0]
+ dXcurrent = computation["inputValues"][0][0][1]
+ data = Tangent(numpy.matrix(Xcurrent).T, numpy.matrix(dXcurrent).T)
+ if method == "Adjoint":
+ logging.info("Adjoint computation")
+ Xcurrent = computation["inputValues"][0][0][0]
+ Ycurrent = computation["inputValues"][0][0][1]
+ data = Adjoint((numpy.matrix(Xcurrent).T, numpy.matrix(Ycurrent).T))
+ #
+ logging.info("Formatting the output")
+ it = numpy.ravel(data)
+ outputValues = [[[[]]]]
+ for val in it:
+ outputValues[0][0][0].append(val)
+ #
+ result = {}
+ result["outputValues"] = outputValues
+ result["specificOutputInfos"] = []
+ result["returnCode"] = 0
+ result["errorMessage"] = ""
+
+All various modifications could be done from this template hypothesis.
+
+Special case of controled evolution operator
+++++++++++++++++++++++++++++++++++++++++++++
+
+In some cases, the evolution or the observation operators are required to be
+controled by an external input control, given a priori. In this case, the
+generic form of the incremental evolution model is slightly modified as follows:
+
+.. math:: \mathbf{y} = O( \mathbf{x}, \mathbf{u})
+
+where :math:`\mathbf{u}` is the control over one state increment. In this case,
+the direct operator has to be applied to a pair of variables :math:`(X,U)`.
+Schematically, the operator has to be set as::
+
+ def DirectOperator( (X, U) ):
+ """ Direct non-linear simulation operator """
+ ...
+ ...
+ ...
+ return something like X(n+1) or Y(n+1)
+
+The tangent and adjoint operators have the same signature as previously, noting
+that the derivatives has to be done only partially against :math:`\mathbf{x}`.
+In such a case with explicit control, only the second functional form (using
+"*ScriptWithFunctions*") and third functional form (using "*ScriptWithSwitch*")
+can be used.
--- /dev/null
+.. _section_theory:
+
+================================================================================
+A brief introduction to Data Assimilation and Optimization
+================================================================================
+
+.. index:: single: Data Assimilation
+.. index:: single: true state
+.. index:: single: observation
+.. index:: single: a priori
+
+**Data Assimilation** is a general framework for computing the optimal estimate
+of the true state of a system, over time if necessary. It uses values obtained
+by combining both observations and *a priori* models, including information
+about their errors.
+
+In other words, data assimilation merges measurement data of a system, that are
+the observations, with *a priori* system physical and mathematical knowledge,
+embedded in numerical models, to obtain the best possible estimate of the system
+true state and of its stochastic properties. Note that this true state can not
+be reached, but can only be estimated. Moreover, despite the fact that the used
+information are stochastic by nature, data assimilation provides deterministic
+techniques in order to realize the estimation.
+
+Because data assimilation look for the **best possible** estimate, its
+underlying procedure always integrates optimization in order to find this
+estimate: particular optimization methods are always embedded in data
+assimilation algorithms. Optimization methods can be seen here as a way to
+extend data assimilation applications. They will be introduced this way in the
+section `Going further in the state estimation by optimization methods`_, but
+they are far more general and can be used without data assimilation concepts.
+
+Two main types of applications exist in data assimilation, being covered by the
+same formalism: **parameters identification** and **fields reconstruction**.
+Before introducing the `Simple description of the data assimilation framework`_
+in a next section, we describe briefly these two types. At the end, some
+references allow `Going further in the data assimilation framework`_.
+
+Fields reconstruction or measures interpolation
+-----------------------------------------------
+
+.. index:: single: fields reconstruction
+
+Fields reconstruction consists in finding, from a restricted set of real
+measures, the physical field which is the most *consistent* with these measures.
+
+This consistency is to understand in terms of interpolation, that is to say that
+the field we want to reconstruct, using data assimilation on measures, has to
+fit at best the measures, while remaining constrained by the overall
+calculation. The calculation is thus an *a priori* estimation of the field that
+we seek to identify.
+
+If the system evolves in time, the reconstruction has to be established on every
+time step, as a whole. The interpolation process in this case is more
+complicated since it is temporal, not only in terms of instantaneous values of
+the field.
+
+A simple example of fields reconstruction comes from meteorology, in which one
+look for value of variables such as temperature or pressure in all points of the
+spatial domain. One have instantaneous measurements of these quantities at
+certain points, but also a history set of these measures. Moreover, these
+variables are constrained by evolution equations for the state of the
+atmosphere, which indicates for example that the pressure at a point can not
+take any value independently of the value at this same point in previous time.
+One must therefore make the reconstruction of a field at any point in space, in
+a "consistent" manner with the evolution equations and with the measures of the
+previous time steps.
+
+Parameters identification or calibration
+----------------------------------------
+
+.. index:: single: parameters identification
+
+The identification of parameters by data assimilation is a form of calibration
+which uses both the measurement and an *a priori* estimation (called the
+"*background*") of the state that one seeks to identify, as well as a
+characterization of their errors. From this point of view, it uses all available
+information on the physical system (even if assumptions about errors are
+relatively restrictive) to find the "*optimal*" estimation from the true state.
+We note, in terms of optimization, that the background realizes a mathematical
+regularization of the main problem of parameters identification.
+
+In practice, the two observed gaps "*calculation-background*" and
+"*calculation-measures*" are added to build the calibration correction of
+parameters or initial conditions. The addition of these two gaps requires a
+relative weight, which is chosen to reflect the trust we give to each piece of
+information. This confidence is measured by the covariance of the errors on the
+background and on the observations. Thus the stochastic aspect of information,
+measured or *a priori*, is essential for building the calibration error
+function.
+
+A simple example of parameters identification comes from any kind of physical
+simulation process involving a parametrized model. For example, a static
+mechanical simulation of a beam constrained by some forces is described by beam
+parameters, such as a Young coefficient, or by the intensity of the force. The
+parameter estimation problem consists in finding for example the right Young
+coefficient in order that the simulation of the beam corresponds to
+measurements, including the knowledge of errors.
+
+Simple description of the data assimilation framework
+-----------------------------------------------------
+
+.. index:: single: background
+.. index:: single: background error covariances
+.. index:: single: observation error covariances
+.. index:: single: covariances
+
+We can write these features in a simple manner. By default, all variables are
+vectors, as there are several parameters to readjust.
+
+According to standard notations in data assimilation, we note
+:math:`\mathbf{x}^a` the optimal parameters that is to be determined by
+calibration, :math:`\mathbf{y}^o` the observations (or experimental
+measurements) that we must compare to the simulation outputs,
+:math:`\mathbf{x}^b` the background (*a priori* values, or regularization
+values) of searched parameters, :math:`\mathbf{x}^t` the unknown ideals
+parameters that would give exactly the observations (assuming that the errors
+are zero and the model is exact) as output.
+
+In the simplest case, which is static, the steps of simulation and of
+observation can be combined into a single observation operator noted :math:`H`
+(linear or nonlinear), which transforms the input parameters :math:`\mathbf{x}`
+to results :math:`\mathbf{y}` to be compared to observations
+:math:`\mathbf{y}^o`. Moreover, we use the linearized operator
+:math:`\mathbf{H}` to represent the effect of the full operator :math:`H` around
+a linearization point (and we omit thereafter to mention :math:`H` even if it is
+possible to keep it). In reality, we have already indicated that the stochastic
+nature of variables is essential, coming from the fact that model, background
+and observations are incorrect. We therefore introduce errors of observations
+additively, in the form of a random vector :math:`\mathbf{\epsilon}^o` such
+that:
+
+.. math:: \mathbf{y}^o = \mathbf{H} \mathbf{x}^t + \mathbf{\epsilon}^o
+
+The errors represented here are not only those from observation, but also from
+the simulation. We can always consider that these errors are of zero mean. We
+can then define a matrix :math:`\mathbf{R}` of the observation error covariances
+by:
+
+.. math:: \mathbf{R} = E[\mathbf{\epsilon}^o.{\mathbf{\epsilon}^o}^T]
+
+The background can also be written as a function of the true value, by
+introducing the error vector :math:`\mathbf{\epsilon}^b`:
+
+.. math:: \mathbf{x}^b = \mathbf{x}^t + \mathbf{\epsilon}^b
+
+where errors are also assumed of zero mean, in the same manner as for
+observations. We define the :math:`\mathbf{B}` matrix of background error
+covariances by:
+
+.. math:: \mathbf{B} = E[\mathbf{\epsilon}^b.{\mathbf{\epsilon}^b}^T]
+
+The optimal estimation of the true parameters :math:`\mathbf{x}^t`, given the
+background :math:`\mathbf{x}^b` and the observations :math:`\mathbf{y}^o`, is
+then the "*analysis*" :math:`\mathbf{x}^a` and comes from the minimisation of an
+error function (in variational assimilation) or from the filtering correction (in
+assimilation by filtering).
+
+In **variational assimilation**, in a static case, one classically attempts to
+minimize the following function :math:`J`:
+
+.. math:: J(\mathbf{x})=(\mathbf{x}-\mathbf{x}^b)^T.\mathbf{B}^{-1}.(\mathbf{x}-\mathbf{x}^b)+(\mathbf{y}^o-\mathbf{H}.\mathbf{x})^T.\mathbf{R}^{-1}.(\mathbf{y}^o-\mathbf{H}.\mathbf{x})
+
+which is usually designed as the "*3D-VAR*" function. Since :math:`\mathbf{B}`
+and :math:`\mathbf{R}` covariance matrices are proportional to the variances of
+errors, their presence in both terms of the function :math:`J` can effectively
+weight the differences by confidence in the background or observations. The
+parameters vector :math:`\mathbf{x}` realizing the minimum of this function
+therefore constitute the analysis :math:`\mathbf{x}^a`. It is at this level that
+we have to use the full panoply of function minimization methods otherwise known
+in optimization (see also section `Going further in the state estimation by
+optimization methods`_). Depending on the size of the parameters vector
+:math:`\mathbf{x}` to identify and of the availability of gradient and Hessian
+of :math:`J`, it is appropriate to adapt the chosen optimization method
+(gradient, Newton, quasi-Newton...).
+
+In **assimilation by filtering**, in this simple case usually referred to as
+"*BLUE*" (for "*Best Linear Unbiased Estimator*"), the :math:`\mathbf{x}^a`
+analysis is given as a correction of the background :math:`\mathbf{x}^b` by a
+term proportional to the difference between observations :math:`\mathbf{y}^o`
+and calculations :math:`\mathbf{H}\mathbf{x}^b`:
+
+.. math:: \mathbf{x}^a = \mathbf{x}^b + \mathbf{K}(\mathbf{y}^o - \mathbf{H}\mathbf{x}^b)
+
+where :math:`\mathbf{K}` is the Kalman gain matrix, which is expressed using
+covariance matrices in the following form:
+
+.. math:: \mathbf{K} = \mathbf{B}\mathbf{H}^T(\mathbf{H}\mathbf{B}\mathbf{H}^T+\mathbf{R})^{-1}
+
+The advantage of filtering is to explicitly calculate the gain, to produce then
+the *a posteriori* covariance analysis matrix.
+
+In this simple static case, we can show, under the assumption of Gaussian error
+distributions, that the two *variational* and *filtering* approaches are
+equivalent.
+
+It is indicated here that these methods of "*3D-VAR*" and "*BLUE*" may be
+extended to dynamic problems, called respectively "*4D-VAR*" and "*Kalman
+filter*". They can take into account the evolution operator to establish an
+analysis at the right time steps of the gap between observations and simulations,
+and to have, at every moment, the propagation of the background through the
+evolution model. Many other variants have been developed to improve the
+numerical quality or to take into account computer requirements such as
+calculation size and time.
+
+Going further in the data assimilation framework
+------------------------------------------------
+
+.. index:: single: state estimation
+.. index:: single: parameter estimation
+.. index:: single: inverse problems
+.. index:: single: Bayesian estimation
+.. index:: single: optimal interpolation
+.. index:: single: mathematical regularization
+.. index:: single: data smoothing
+
+To get more information about all the data assimilation techniques, the reader
+can consult introductory documents like [Argaud09]_, on-line training courses or
+lectures like [Bouttier99]_ and [Bocquet04]_ (along with other materials coming
+from geosciences applications), or general documents like [Talagrand97]_,
+[Tarantola87]_, [Kalnay03]_, [Ide97]_ and [WikipediaDA]_.
+
+Note that data assimilation is not restricted to meteorology or geo-sciences, but
+is widely used in other scientific domains. There are several fields in science
+and technology where the effective use of observed but incomplete data is
+crucial.
+
+Some aspects of data assimilation are also known as *state estimation*,
+*parameter estimation*, *inverse problems*, *Bayesian estimation*, *optimal
+interpolation*, *mathematical regularization*, *data smoothing*, etc. These
+terms can be used in bibliographical searches.
+
+Going further in the state estimation by optimization methods
+-------------------------------------------------------------
+
+.. index:: single: state estimation
+.. index:: single: optimization methods
+
+As seen before, in a static simulation case, the variational data assimilation
+requires to minimize the goal function :math:`J`:
+
+.. math:: J(\mathbf{x})=(\mathbf{x}-\mathbf{x}^b)^T.\mathbf{B}^{-1}.(\mathbf{x}-\mathbf{x}^b)+(\mathbf{y}^o-\mathbf{H}.\mathbf{x})^T.\mathbf{R}^{-1}.(\mathbf{y}^o-\mathbf{H}.\mathbf{x})
+
+which is named the "*3D-VAR*" function. It can be seen as a *least squares
+minimization* extented form, obtained by adding a regularizing term using
+:math:`\mathbf{x}-\mathbf{x}^b`, and by weighting the differences using
+:math:`\mathbf{B}` and :math:`\mathbf{R}` the two covariance matrices. The
+minimization of the :math:`J` function leads to the *best* state estimation.
+
+State estimation possibilities extension, by using more explicitly optimization
+methods and their properties, can be imagined in two ways.
+
+First, classical optimization methods involves using various gradient-based
+minimizing procedures. They are extremely efficient to look for a single local
+minimum. But they require the goal function :math:`J` to be sufficiently regular
+and differentiable, and are not able to capture global properties of the
+minimization problem, for example: global minimum, set of equivalent solutions
+due to over-parametrization, multiple local minima, etc. **A way to extend
+estimation possibilities is then to use a whole range of optimizers, allowing
+global minimization, various robust search properties, etc**. There is a lot of
+minimizing methods, such as stochastic ones, evolutionary ones, heuristics and
+meta-heuristics for real-valued problems, etc. They can treat partially irregular
+or noisy function :math:`J`, can characterize local minima, etc. The main
+drawback is a greater numerical cost to find state estimates, and no guarantee
+of convergence in finite time. Here, we only point the following
+topics, as the methods are available in the ADAO module: *Quantile regression*
+[WikipediaQR]_ and *Particle swarm optimization* [WikipediaPSO]_.
+
+Secondly, optimization methods try usually to minimize quadratic measures of
+errors, as the natural properties of such goal functions are well suited for
+classical gradient optimization. But other measures of errors can be more
+adapted to real physical simulation problems. Then, **an another way to extend
+estimation possibilities is to use other measures of errors to be reduced**. For
+example, we can cite *absolute error value*, *maximum error value*, etc. These
+error measures are not differentiables, but some optimization methods can deal
+with: heuristics and meta-heuristics for real-valued problem, etc. As
+previously, the main drawback remain a greater numerical cost to find state
+estimates, and no guarantee of convergence in finite time. Here, we point also
+the following methods as it is available in the ADAO module: *Particle swarm
+optimization* [WikipediaPSO]_.
+
+The reader interested in the subject of optimization can look at [WikipediaMO]_
+as a general entry point.
--- /dev/null
+.. _section_using:
+
+================================================================================
+Using the ADAO module
+================================================================================
+
+.. |eficas_new| image:: images/eficas_new.png
+ :align: middle
+ :scale: 50%
+.. |eficas_save| image:: images/eficas_save.png
+ :align: middle
+ :scale: 50%
+.. |eficas_saveas| image:: images/eficas_saveas.png
+ :align: middle
+ :scale: 50%
+.. |eficas_yacs| image:: images/eficas_yacs.png
+ :align: middle
+ :scale: 50%
+.. |yacs_compile| image:: images/yacs_compile.png
+ :align: middle
+ :scale: 50%
+
+This section presents the usage of the ADAO module in SALOME. It is complemented
+by the detailed description of all the commands and keywords in the section
+:ref:`section_reference`, by advanced usage procedures in the section
+:ref:`section_advanced`, and by examples in the section :ref:`section_examples`.
+
+Logical procedure to build an ADAO test case
+--------------------------------------------
+
+The construction of an ADAO case follows a simple approach to define the set of
+input data, and then generates a complete executable block diagram used in YACS.
+Many variations exist for the definition of input data, but the logical sequence
+remains unchanged.
+
+First of all, the user is considered to know its personal input data needed to
+set up the data assimilation study. These data can already be available in
+SALOME or not.
+
+**Basically, the procedure of using ADAO involves the following steps:**
+
+#. **Activate the ADAO module and use the editor GUI,**
+#. **Build and/or modify the ADAO case and save it,**
+#. **Export the ADAO case as a YACS scheme,**
+#. **Supplement and modify the YACS scheme and save it,**
+#. **Execute the YACS case and obtain the results.**
+
+Each step will be detailed in the next section.
+
+STEP 1: Activate the ADAO module and use the editor GUI
+-------------------------------------------------------
+
+As always for a module, it has to be activated by choosing the appropriate
+module button (or menu) in the toolbar of SALOME. If there is no SALOME study
+loaded, a popup appears, allowing to choose between creating a new study, or
+opening an already existing one:
+
+ .. _adao_activate1:
+ .. image:: images/adao_activate.png
+ :align: center
+ .. centered::
+ **Activating the module ADAO in SALOME**
+
+Choosing the "*New*" button, an embedded case editor EFICAS [#]_ will be opened,
+along with the standard "*Object browser*". You can then click on the "*New*"
+button |eficas_new| (or choose the "*New*" entry in the "*ADAO*" main menu) to
+create a new ADAO case, and you will see:
+
+ .. _adao_viewer:
+ .. image:: images/adao_viewer.png
+ :align: center
+ :width: 100%
+ .. centered::
+ **The EFICAS editor for cases definition in module ADAO**
+
+STEP 2: Build and modify the ADAO case and save it
+--------------------------------------------------
+
+To build a case using EFICAS, you have to go through a series of sub-steps, by
+selecting, at each sub-step, a keyword and then filling in its value.
+
+The structured editor indicates hierarchical types, values or keywords allowed.
+Incomplete or incorrect keywords are identified by a visual error red flag.
+Possible values are indicated for keywords defined with a limited list of
+values, and adapted entries are given for the other keywords. Some help messages
+are contextually provided in the editor reserved places.
+
+A new case is set up with the minimal list of commands. All the mandatory
+commands or keywords are already present, none of them can be suppressed.
+Optional keywords can be added by choosing them in a list of suggestions of
+allowed ones for the main command, for example the "*ASSIMILATION_STUDY*"
+command. As an example, one can add an "*AlgorithmParameters*" keyword, as
+described in the last part of the section :ref:`section_examples`.
+
+At the end, when all fields or keywords have been correctly defined, each line
+of the commands tree must have a green flag. This indicates that the whole case
+is valid and completed (and can be saved).
+
+ .. _adao_jdcexample00:
+ .. image:: images/adao_jdcexample01.png
+ :align: center
+ :scale: 75%
+ .. centered::
+ **Example of a valid ADAO case**
+
+Finally, you have to save your ADAO case by pushing the "*Save*" button
+|eficas_save|, or the "*Save as*" button |eficas_saveas|, or by choosing the
+"*Save/Save as*" entry in the "*ADAO*" menu. You will be prompted for a location
+in your file tree and a name, that will be completed by a "*.comm*" extension
+used for JDC EFICAS files. This will generate a pair of files describing the
+ADAO case, with the same base name, the first one being completed by a "*.comm*"
+extension and the second one by a "*.py*" extension [#]_.
+
+STEP 3: Export the ADAO case as a YACS scheme
+---------------------------------------------
+
+When the ADAO case is completed, you have to export it as a YACS scheme [#]_ in
+order to execute the data assimilation calculation. This can be easily done by
+using the "*Export to YACS*" button |eficas_yacs|, or equivalently choose the
+"*Export to YACS*" entry in the "*ADAO*" main menu, or in the contextual case
+menu in the object browser.
+
+ .. _adao_exporttoyacs01:
+ .. image:: images/adao_exporttoyacs.png
+ :align: center
+ :scale: 75%
+ .. centered::
+ **"Export to YACS" sub-menu to generate the YACS scheme from the ADAO case**
+
+This will lead to automatically generate a YACS scheme, and open the YACS module
+on this scheme. The YACS file, associated with the scheme, will be stored in the
+same directory and with the same base name as the ADAO saved case, only changing
+its extension to "*.xml*". Be careful, *if the XML file name already exist, it
+will be overwritten without prompting for replacing the file*.
+
+STEP 4: Supplement and modify the YACS scheme and save it
+---------------------------------------------------------
+
+.. index:: single: Analysis
+
+When the YACS scheme is generated and opened in SALOME through the YACS module
+GUI, you can modify or supplement the scheme like any YACS scheme. Nodes or
+blocs can be added, copied or modified to elaborate complex analysis, or to
+insert data assimilation or optimization capabilities into more complex YACS
+calculation schemes. It is recommended to save the modified scheme with a new
+name, in order to preserve the XML file in the case you re-export the ADAO case
+to YACS.
+
+The main supplement needed in the YACS scheme is a post-processing step. The
+evaluation of the results has to be done in the physical context of the
+simulation used by the data assimilation procedure. The post-processing can be
+provided through the "*UserPostAnalysis*" ADAO keyword as a script or a string,
+by templates, or can be build as YACS nodes using all SALOME possibilities.
+
+The YACS scheme has an "*algoResults*" output port of the computation bloc,
+which gives access to a "*pyobj*" named hereafter "*ADD*", containing all the
+processing results. These results can be obtained by retrieving the named
+variables stored along the calculation. The main is the "*Analysis*" one, that
+can be obtained by the python command (for example in an in-line script node or
+a script provided through the "*UserPostAnalysis*" keyword)::
+
+ Analysis = ADD.get("Analysis")[:]
+
+"*Analysis*" is a complex object, similar to a list of values calculated at each
+step of data assimilation calculation. In order to get and print the optimal
+data assimilation state evaluation, in script provided through the
+"*UserPostAnalysis*" keyword, one can use::
+
+ Xa = ADD.get("Analysis")[-1]
+ print "Optimal state:", Xa
+ print
+
+This ``Xa`` is a vector of values, that represents the solution of the data
+assimilation or optimization evaluation problem, noted as :math:`\mathbf{x}^a`
+in the section :ref:`section_theory`.
+
+Such command can be used to print results, or to convert these ones to
+structures that can be used in the native or external SALOME post-processing. A
+simple example is given in the section :ref:`section_examples`.
+
+STEP 5: Execute the YACS case and obtain the results
+----------------------------------------------------
+
+The YACS scheme is now complete and can be executed. Parametrization and
+execution of a YACS case is fully compliant with the standard way to deal with a
+YACS scheme, and is described in the *YACS module User's Guide*.
+
+To recall the simplest way to proceed, the YACS scheme has to be compiled using
+the button |yacs_compile|, or the equivalent YACS menu entry, to prepare the
+scheme to run. Then the compiled scheme can be started, executed step by step or
+using breakpoints, etc.
+
+The standard output will be pushed into the "*YACS Container Log*", obtained
+through the right click menu of the "*proc*" window in the YACS GUI. The errors
+are shown either in the "*YACS Container Log*", or at the command line in the
+shell window (if SALOME has been launched by its explicit command and not by
+menu). As an example, the output of the above simple case is the following::
+
+ Entering in the assimilation study
+ Name is set to........: Test
+ Algorithm is set to...: Blue
+ Launching the analyse
+
+ Optimal state: [0.5, 0.5, 0.5]
+
+shown in the "*YACS Container Log*".
+
+The execution can also be done using a shell script, as described in the section
+:ref:`section_advanced`.
+
+.. [#] For more information on EFICAS, see the *EFICAS module* available in SALOME GUI.
+
+.. [#] For more information on YACS, see the *YACS module User's Guide* available in the main "*Help*" menu of SALOME GUI.
+
+.. [#] This intermediary python file can also be used as described in the section :ref:`section_advanced`.
+++ /dev/null
-.. _section_examples:
-
-================================================================================
-Tutorials on using the ADAO module
-================================================================================
-
-.. |eficas_new| image:: images/eficas_new.png
- :align: middle
- :scale: 50%
-.. |eficas_save| image:: images/eficas_save.png
- :align: middle
- :scale: 50%
-.. |eficas_saveas| image:: images/eficas_saveas.png
- :align: middle
- :scale: 50%
-.. |eficas_yacs| image:: images/eficas_yacs.png
- :align: middle
- :scale: 50%
-
-This section presents some examples on using the ADAO module in SALOME. The
-first one shows how to build a simple data assimilation case defining
-explicitly all the required data through the GUI. The second one shows, on the
-same case, how to define data using external sources through scripts.
-
-Building a simple estimation case with explicit data definition
----------------------------------------------------------------
-
-This simple example is a demonstration one, and describes how to set a BLUE
-estimation framework in order to get *ponderated (or fully weighted) least
-square estimated state* of a system from an observation of the state and from an
-*a priori* knowledge (or background) of this state. In other words, we look for
-the weighted middle between the observation and the background vectors. All the
-numerical values of this example are arbitrary.
-
-Experimental set up
-+++++++++++++++++++
-
-We choose to operate in a 3-dimensional space. 3D is chosen in order to restrict
-the size of numerical object to explicitly enter by the user, but the problem is
-not dependant of the dimension and can be set in dimension 1000... The
-observation :math:`\mathbf{y}^o` is of value 1 in each direction, so:
-
- ``Yo = [1 1 1]``
-
-The background state :math:`\mathbf{x}^b`, which represent some *a priori*
-knowledge or a regularization, is of value of 0 in each direction, which is:
-
- ``Xb = [0 0 0]``
-
-Data assimilation requires information on errors covariances :math:`\mathbf{R}`
-and :math:`\mathbf{B}` respectively for observation and background variables. We
-choose here to have uncorrelated errors (that is, diagonal matrices) and to have
-the same variance of 1 for all variables (that is, identity matrices). We get:
-
- ``B = R = [1 0 0 ; 0 1 0 ; 0 0 1]``
-
-Last, we need an observation operator :math:`\mathbf{H}` to convert the
-background value in the space of observation value. Here, because the space
-dimensions are the same, we can choose the identity as the observation
-operator:
-
- ``H = [1 0 0 ; 0 1 0 ; 0 0 1]``
-
-With such choices, the Best Linear Unbiased Estimator (BLUE) will be the average
-vector between :math:`\mathbf{y}^o` and :math:`\mathbf{x}^b`, named the
-*analysis* and denoted by :math:`\mathbf{x}^a`:
-
- ``Xa = [0.5 0.5 0.5]``
-
-As en extension of this example, one can change the variances for
-:math:`\mathbf{B}` or :math:`\mathbf{R}` independently, and the analysis will
-move to :math:`\mathbf{y}^o` or to :math:`\mathbf{x}^b` in inverse proportion of
-the variances in :math:`\mathbf{B}` and :math:`\mathbf{R}`. It is also
-equivalent to search for the analysis thought a BLUE algorithm or a 3DVAR one.
-
-Using the GUI to build the ADAO case
-++++++++++++++++++++++++++++++++++++
-
-First, you have to activate the ADAO module by choosing the appropriate module
-button or menu of SALOME, and you will see:
-
- .. _adao_activate2:
- .. image:: images/adao_activate.png
- :align: center
- :width: 100%
- .. centered::
- **Activating the module ADAO in SALOME**
-
-Choose the "*New*" button in this window. You will directly get the EFICAS
-interface for variables definition, along with the "*Object browser*". You can
-then click on the "*New*" button |eficas_new| to create a new ADAO case, and you
-will see:
-
- .. _adao_viewer:
- .. image:: images/adao_viewer.png
- :align: center
- :width: 100%
- .. centered::
- **The EFICAS viewer for cases definition in module ADAO**
-
-Then fill in the variables to build the ADAO case by using the experimental set
-up described above. All the technical information given above will be directly
-inserted in the ADAO case definition, by using the *String* type for all the
-variables. When the case definition is ready, save it to a "*JDC (\*.comm)*"
-native file somewhere in your path. Remember that other files will be also
-created near this first one, so it is better to make a specific directory for
-your case, and to save the file inside. The name of the file will appear in the
-"*Object browser*" window, under the "*ADAO*" menu. The final case definition
-looks like this:
-
- .. _adao_jdcexample01:
- .. image:: images/adao_jdcexample01.png
- :align: center
- :width: 100%
- .. centered::
- **Definition of the experimental set up chosen for the ADAO case**
-
-To go further, we need now to generate the YACS scheme from the ADAO case
-definition. In order to do that, right click on the name of the file case in the
-"*Object browser*" window, and choose the "*Export to YACS*" sub-menu (or the
-"*Export to YACS*" button |eficas_yacs|) as below:
-
- .. _adao_exporttoyacs00:
- .. image:: images/adao_exporttoyacs.png
- :align: center
- :scale: 75%
- .. centered::
- **"Export to YACS" sub-menu to generate the YACS scheme from the ADAO case**
-
-This command will generate the YACS scheme, activate YACS module in SALOME, and
-open the new scheme in the GUI of the YACS module [#]_. After reordering the
-nodes by using the "*arrange local node*" sub-menu of the YACS graphical view of
-the scheme, you get the following representation of the generated ADAO scheme:
-
- .. _yacs_generatedscheme:
- .. image:: images/yacs_generatedscheme.png
- :align: center
- :width: 100%
- .. centered::
- **YACS generated scheme from the ADAO case**
-
-After that point, all the modifications, executions and post-processing of the
-data assimilation scheme will be done in YACS. In order to check the result in a
-simple way, we create here a new YACS node by using the "*in-line script node*"
-sub-menu of the YACS graphical view, and we name it "*PostProcessing*".
-
-This script will retrieve the data assimilation analysis from the
-"*algoResults*" output port of the computation bloc (which gives access to a
-SALOME Python Object), and will print it on the standard output.
-
-To obtain this, the in-line script node need to have an input port of type
-"*pyobj*" named "*results*" for example, that have to be linked graphically to
-the "*algoResults*" output port of the computation bloc. Then the code to fill
-in the script node is::
-
- Xa = results.ADD.get("Analysis")[-1]
-
- print
- print "Analysis =",Xa
- print
-
-The augmented YACS scheme can be saved (overwriting the generated scheme if the
-simple "*Save*" command or button are used, or with a new name). Ideally, the
-implementation of such post-processing procedure can be done in YACS to test,
-and then entirely saved in one script that can be integrated in the ADAO case by
-using the keyword "*UserPostAnalysis*".
-
-Then, classically in YACS, it have to be prepared for run, and then executed.
-After completion, the printing on standard output is available in the "*YACS
-Container Log*", obtained through the right click menu of the "*proc*" window in
-the YACS scheme as shown below:
-
- .. _yacs_containerlog:
- .. image:: images/yacs_containerlog.png
- :align: center
- :width: 100%
- .. centered::
- **YACS menu for Container Log, and dialog window showing the log**
-
-We verify that the result is correct by checking that the log dialog window
-contains the following line::
-
- Analysis = [0.5, 0.5, 0.5]
-
-as shown in the image above.
-
-As a simple extension of this example, one can notice that the same problem
-solved with a 3DVAR algorithm gives the same result. This algorithm can be
-chosen at the ADAO case building step, before entering in YACS step. The
-ADAO 3DVAR case will look completely similar to the BLUE algorithmic case, as
-shown by the following figure:
-
- .. _adao_jdcexample02:
- .. image:: images/adao_jdcexample02.png
- :align: center
- :width: 100%
- .. centered::
- **Defining an ADAO 3DVAR case looks completely similar to a BLUE case**
-
-There is only one command changing, with "*3DVAR*" value instead of "*Blue*".
-
-Building a simple estimation case with external data definition by scripts
---------------------------------------------------------------------------
-
-It is useful to get parts or all of the data from external definition, using
-Python script files to provide access to the data. As an example, we build here
-an ADAO case representing the same experimental set up as in the above example
-`Building a simple estimation case with explicit data definition`_, but using
-data form a single one external Python script file.
-
-First, we write the following script file, using conventional names for the
-desired variables. Here, all the input variables are defined in the script, but
-the user can choose to split the file in several ones, or to mix explicit data
-definition in the ADAO GUI and implicit data definition by external files. The
-present script looks like::
-
- import numpy
- #
- # Definition of the Background as a vector
- # ----------------------------------------
- Background = [0, 0, 0]
- #
- # Definition of the Observation as a vector
- # -----------------------------------------
- Observation = "1 1 1"
- #
- # Definition of the Background Error covariance as a matrix
- # ---------------------------------------------------------
- BackgroundError = numpy.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
- #
- # Definition of the Observation Error covariance as a matrix
- # ----------------------------------------------------------
- ObservationError = numpy.matrix("1 0 0 ; 0 1 0 ; 0 0 1")
- #
- # Definition of the Observation Operator as a matrix
- # --------------------------------------------------
- ObservationOperator = numpy.identity(3)
-
-The names of the Python variables above are mandatory, in order to define the
-right variables, but the Python script can be bigger and define classes,
-functions, etc. with other names. It shows different ways to define arrays and
-matrices, using list, string (as in Numpy or Octave), Numpy array type or Numpy
-matrix type, and Numpy special functions. All of these syntax are valid.
-
-After saving this script somewhere in your path (named here "*script.py*" for
-the example), we use the GUI to build the ADAO case. The procedure to fill in
-the case is similar except that, instead of selecting the "*String*" option for
-the "*FROM*" keyword, we select the "*Script*" one. This leads to a
-"*SCRIPT_DATA/SCRIPT_FILE*" entry in the tree, allowing to choose a file as:
-
- .. _adao_scriptentry01:
- .. image:: images/adao_scriptentry01.png
- :align: center
- :width: 100%
- .. centered::
- **Defining an input value using an external script file**
-
-Other steps and results are exactly the same as in the `Building a simple
-estimation case with explicit data definition`_ previous example.
-
-In fact, this script methodology allows to retrieve data from in-line or previous
-calculations, from static files, from database or from stream, all of them
-outside of SALOME. It allows also to modify easily some input data, for example
-for debug purpose or for repetitive execution process, and it is the most
-versatile method in order to parametrize the input data. **But be careful,
-script methodology is not a "safe" procedure, in the sense that erroneous
-data, or errors in calculations, can be directly injected into the YACS scheme
-execution.**
-
-Adding parameters to control the data assimilation algorithm
-------------------------------------------------------------
-
-One can add some optional parameters to control the data assimilation algorithm
-calculation. This is done by using the "*AlgorithmParameters*" keyword in the
-definition of the ADAO case, which is an keyword of the ASSIMILATION_STUDY. This
-keyword requires a Python dictionary, containing some key/value pairs. The list
-of possible optional parameters are given in the subsection
-:ref:`section_reference`.
-
-If no bounds at all are required on the control variables, then one can choose
-the "BFGS" or "CG" minimisation algorithm for the 3DVAR algorithm. For
-constrained optimization, the minimizer "LBFGSB" is often more robust, but the
-"TNC" is sometimes more performant.
-
-This dictionary has to be defined, for example, in an external Python script
-file, using the mandatory variable name "*AlgorithmParameters*" for the
-dictionary. All the keys inside the dictionary are optional, they all have
-default values, and can exist without being used. For example::
-
- AlgorithmParameters = {
- "Minimizer" : "CG", # Possible choice : "LBFGSB", "TNC", "CG", "BFGS"
- "MaximumNumberOfSteps" : 10,
- }
-
-Then the script can be added to the ADAO case, in a file entry describing the
-"*AlgorithmParameters*" keyword, as follows:
-
- .. _adao_scriptentry02:
- .. image:: images/adao_scriptentry02.png
- :align: center
- :width: 100%
- .. centered::
- **Adding parameters to control the algorithm**
-
-Other steps and results are exactly the same as in the `Building a simple
-estimation case with explicit data definition`_ previous example. The dictionary
-can also be directly given in the input field associated with the keyword.
-
-Building a complex case with external data definition by scripts
-----------------------------------------------------------------
-
-This more complex and complete example has to been considered as a framework for
-user inputs, that need to be tailored for each real application. Nevertheless,
-the file skeletons are sufficiently general to have been used for various
-applications in neutronic, fluid mechanics... Here, we will not focus on the
-results, but more on the user control of inputs and outputs in an ADAO case. As
-previously, all the numerical values of this example are arbitrary.
-
-The objective is to set up the input and output definitions of a physical case
-by external python scripts, using a general non-linear operator, adding control
-on parameters and so on... The complete framework scripts can be found in the
-ADAO skeletons examples directory under the name
-"*External_data_definition_by_scripts*".
-
-Experimental set up
-+++++++++++++++++++
-
-We continue to operate in a 3-dimensional space, in order to restrict
-the size of numerical object shown in the scripts, but the problem is
-not dependant of the dimension.
-
-We choose a twin experiment context, using a known true state
-:math:`\mathbf{x}^t` of arbitrary values:
-
- ``Xt = [1 2 3]``
-
-The background state :math:`\mathbf{x}^b`, which represent some *a priori*
-knowledge of the true state, is build as a normal random perturbation of 20% the
-true state :math:`\mathbf{x}^t` for each component, which is:
-
- ``Xb = Xt + normal(0, 20%*Xt)``
-
-To describe the background error covariances matrix :math:`\mathbf{B}`, we make
-as previously the hypothesis of uncorrelated errors (that is, a diagonal matrix,
-of size 3x3 because :math:`\mathbf{x}^b` is of lenght 3) and to have the same
-variance of 0.1 for all variables. We get:
-
- ``B = 0.1 * diagonal( length(Xb) )``
-
-We suppose that there exist an observation operator :math:`\mathbf{H}`, which
-can be non linear. In real calibration procedure or inverse problems, the
-physical simulation codes are embedded in the observation operator. We need also
-to know its gradient with respect to each calibrated variable, which is a rarely
-known information with industrial codes. But we will see later how to obtain an
-approximated gradient in this case.
-
-Being in twin experiments, the observation :math:`\mathbf{y}^o` and its error
-covariances matrix :math:`\mathbf{R}` are generated by using the true state
-:math:`\mathbf{x}^t` and the observation operator :math:`\mathbf{H}`:
-
- ``Yo = H( Xt )``
-
-and, with an arbitrary standard deviation of 1% on each error component:
-
- ``R = 0.0001 * diagonal( lenght(Yo) )``
-
-All the required data assimilation informations are then defined.
-
-Skeletons of the scripts describing the setup
-+++++++++++++++++++++++++++++++++++++++++++++
-
-We give here the essential parts of each script used afterwards to build the ADAO
-case. Remember that using these scripts in real Python files requires to
-correctly define the path to imported modules or codes (even if the module is in
-the same directory that the importing Python file ; we indicate the path
-adjustment using the mention ``"# INSERT PHYSICAL SCRIPT PATH"``), the encoding
-if necessary, etc. The indicated file names for the following scripts are
-arbitrary. Examples of complete file scripts are available in the ADAO examples
-standard directory.
-
-We first define the true state :math:`\mathbf{x}^t` and some convenient matrix
-building function, in a Python script file named
-``Physical_data_and_covariance_matrices.py``::
-
- import numpy
- #
- def True_state():
- """
- Arbitrary values and names, as a tuple of two series of same length
- """
- return (numpy.array([1, 2, 3]), ['Para1', 'Para2', 'Para3'])
- #
- def Simple_Matrix( size, diagonal=None ):
- """
- Diagonal matrix, with either 1 or a given vector on the diagonal
- """
- if diagonal is not None:
- S = numpy.diag( diagonal )
- else:
- S = numpy.matrix(numpy.identity(int(size)))
- return S
-
-We can then define the background state :math:`\mathbf{x}^b` as a random
-perturbation of the true state, adding at the end of the script the definition
-of a *required ADAO variable* in order to export the defined value. It is done
-in a Python script file named ``Script_Background_xb.py``::
-
- from Physical_data_and_covariance_matrices import True_state
- import numpy
- #
- xt, names = True_state()
- #
- Standard_deviation = 0.2*xt # 20% for each variable
- #
- xb = xt + abs(numpy.random.normal(0.,Standard_deviation,size=(len(xt),)))
- #
- # Creating the required ADAO variable
- # -----------------------------------
- Background = list(xb)
-
-In the same way, we define the background error covariance matrix
-:math:`\mathbf{B}` as a diagonal matrix of the same diagonal length as the
-background of the true state, using the convenient function already defined. It
-is done in a Python script file named ``Script_BackgroundError_B.py``::
-
- from Physical_data_and_covariance_matrices import True_state, Simple_Matrix
- #
- xt, names = True_state()
- #
- B = 0.1 * Simple_Matrix( size = len(xt) )
- #
- # Creating the required ADAO variable
- # -----------------------------------
- BackgroundError = B
-
-To continue, we need the observation operator :math:`\mathbf{H}` as a function
-of the state. It is here defined in an external file named
-``"Physical_simulation_functions.py"``, which should contain one function
-conveniently named here ``"DirectOperator"``. This function is user one,
-representing as programming function the :math:`\mathbf{H}` operator. We suppose
-this function is then given by the user. A simple skeleton is given here for
-convenience::
-
- def DirectOperator( XX ):
- """ Direct non-linear simulation operator """
- #
- # --------------------------------------> EXAMPLE TO BE REMOVED
- if type(XX) is type(numpy.matrix([])): # EXAMPLE TO BE REMOVED
- HX = XX.A1.tolist() # EXAMPLE TO BE REMOVED
- elif type(XX) is type(numpy.array([])): # EXAMPLE TO BE REMOVED
- HX = numpy.matrix(XX).A1.tolist() # EXAMPLE TO BE REMOVED
- else: # EXAMPLE TO BE REMOVED
- HX = XX # EXAMPLE TO BE REMOVED
- # --------------------------------------> EXAMPLE TO BE REMOVED
- #
- return numpy.array( HX )
-
-We does not need the operators ``"TangentOperator"`` and ``"AdjointOperator"``
-because they will be approximated using ADAO capabilities.
-
-We insist on the fact that these non-linear operator ``"DirectOperator"``,
-tangent operator ``"TangentOperator"`` and adjoint operator
-``"AdjointOperator"`` come from the physical knowledge, include the reference
-physical simulation code and its eventual adjoint, and have to be carefully set
-up by the data assimilation user. The errors in or missuses of the operators can
-not be detected or corrected by the data assimilation framework alone.
-
-In this twin experiments framework, the observation :math:`\mathbf{y}^o` and its
-error covariances matrix :math:`\mathbf{R}` can be generated. It is done in two
-Python script files, the first one being named ``Script_Observation_yo.py``::
-
- from Physical_data_and_covariance_matrices import True_state
- from Physical_simulation_functions import DirectOperator
- #
- xt, noms = True_state()
- #
- yo = DirectOperator( xt )
- #
- # Creating the required ADAO variable
- # -----------------------------------
- Observation = list(yo)
-
-and the second one named ``Script_ObservationError_R.py``::
-
- from Physical_data_and_covariance_matrices import True_state, Simple_Matrix
- from Physical_simulation_functions import DirectOperator
- #
- xt, names = True_state()
- #
- yo = DirectOperator( xt )
- #
- R = 0.0001 * Simple_Matrix( size = len(yo) )
- #
- # Creating the required ADAO variable
- # -----------------------------------
- ObservationError = R
-
-As in previous examples, it can be useful to define some parameters for the data
-assimilation algorithm. For example, if we use the standard 3DVAR algorithm, the
-following parameters can be defined in a Python script file named
-``Script_AlgorithmParameters.py``::
-
- # Creating the required ADAO variable
- # -----------------------------------
- AlgorithmParameters = {
- "Minimizer" : "TNC", # Possible : "LBFGSB", "TNC", "CG", "BFGS"
- "MaximumNumberOfSteps" : 15, # Number of global iterative steps
- "Bounds" : [
- [ None, None ], # Bound on the first parameter
- [ 0., 4. ], # Bound on the second parameter
- [ 0., None ], # Bound on the third parameter
- ],
- }
-
-Finally, it is common to post-process the results, retrieving them after the
-data assimilation phase in order to analyze, print or show them. It requires to
-use a intermediary Python script file in order to extract these results. The
-following example Python script file named ``Script_UserPostAnalysis.py``,
-illustrates the fact::
-
- from Physical_data_and_covariance_matrices import True_state
- import numpy
- #
- xt, names = True_state()
- xa = ADD.get("Analysis")[-1]
- x_series = ADD.get("CurrentState")[:]
- J = ADD.get("CostFunctionJ")[:]
- #
- # Verifying the results by printing
- # ---------------------------------
- print
- print "xt = %s"%xt
- print "xa = %s"%numpy.array(xa)
- print
- for i in range( len(x_series) ):
- print "Step %2i : J = %.5e et X = %s"%(i, J[i], x_series[i])
- print
-
-At the end, we get a description of the whole case setup through a set of files
-listed here:
-
-#. ``Physical_data_and_covariance_matrices.py``
-#. ``Physical_simulation_functions.py``
-#. ``Script_AlgorithmParameters.py``
-#. ``Script_BackgroundError_B.py``
-#. ``Script_Background_xb.py``
-#. ``Script_ObservationError_R.py``
-#. ``Script_Observation_yo.py``
-#. ``Script_UserPostAnalysis.py``
-
-We insist here that all these scripts are written by the user and can not be
-automatically tested. So the user is required to verify the scripts (and in
-particular their input/output) in order to limit the difficulty of debug. We
-recall: **script methodology is not a "safe" procedure, in the sense that
-erroneous data, or errors in calculations, can be directly injected into the
-YACS scheme execution.**
-
-Building the case with external data definition by scripts
-++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-All these scripts can then be used to define the ADAO case with external data
-definition by Python script files. It is entirely similar to the method
-described in the `Building a simple estimation case with external data
-definition by scripts`_ previous section. For each variable to be defined, we
-select the "*Script*" option of the "*FROM*" keyword, which leads to a
-"*SCRIPT_DATA/SCRIPT_FILE*" entry in the tree. For the "*ObservationOperator*"
-keyword, we choose the "*ScriptWithOneFunction*" form and keep the default
-differential increment.
-
-The other steps to build the ADAO case are exactly the same as in the `Building
-a simple estimation case with explicit data definition`_ previous section.
-
-Using the simple linear operator :math:`\mathbf{H}` from the Python script file
-``Physical_simulation_functions.py`` in the ADAO examples standard directory,
-the results will look like::
-
- xt = [1 2 3]
- xa = [ 1.000014 2.000458 3.000390]
-
- Step 0 : J = 1.81750e+03 et X = [1.014011, 2.459175, 3.390462]
- Step 1 : J = 1.81750e+03 et X = [1.014011, 2.459175, 3.390462]
- Step 2 : J = 1.79734e+01 et X = [1.010771, 2.040342, 2.961378]
- Step 3 : J = 1.79734e+01 et X = [1.010771, 2.040342, 2.961378]
- Step 4 : J = 1.81909e+00 et X = [1.000826, 2.000352, 3.000487]
- Step 5 : J = 1.81909e+00 et X = [1.000826, 2.000352, 3.000487]
- Step 6 : J = 1.81641e+00 et X = [1.000247, 2.000651, 3.000156]
- Step 7 : J = 1.81641e+00 et X = [1.000247, 2.000651, 3.000156]
- Step 8 : J = 1.81569e+00 et X = [1.000015, 2.000432, 3.000364]
- Step 9 : J = 1.81569e+00 et X = [1.000015, 2.000432, 3.000364]
- Step 10 : J = 1.81568e+00 et X = [1.000013, 2.000458, 3.000390]
- ...
-
-The state at the first step is the randomly generated background state
-:math:`\mathbf{x}^b`. After completion, these printing on standard output is
-available in the "*YACS Container Log*", obtained through the right click menu
-of the "*proc*" window in the YACS scheme.
-
-.. [#] For more information on YACS, see the the *YACS module User's Guide* available in the main "*Help*" menu of SALOME GUI.
+++ /dev/null
-.. _section_glossary:
-
-Glossary
-========
-
-.. glossary::
- :sorted:
-
- case
- One case is defined by a set of data and of choices, packed together
- through the user interface of the module. The data are physical
- measurements that have to be available before or during the case
- execution. The simulation code(s) and the assimilation methods and
- parameters has to be chosen, they define the execution properties of the
- case.
-
- iteration
- One iteration occurs when using iterative optimizers (e.g. 3DVAR), and it
- is entirely hidden in the main YACS OptimizerLoop Node named
- "compute_bloc". Nevertheless, the user can watch the iterative process
- through the *YACS Container Log* window, which is updated during the
- process, and using *Observers* attached to calculation variables.
-
- APosterioriCovariance
- Keyword to indicate the covariance matrix of *a posteriori* analysis
- errors.
-
- BMA (Background minus Analysis)
- Difference between the simulation based on the background state and the
- one base on the optimal state estimation, noted as :math:`\mathbf{x}^b -
- \mathbf{x}^a`.
-
- OMA (Observation minus Analysis)
- Difference between the observations and the result of the simulation based
- on the optimal state estimation, the analysis, filtered to be compatible
- with the observation, noted as :math:`\mathbf{y}^o -
- \mathbf{H}\mathbf{x}^a`.
-
- OMB (Observation minus Background)
- Difference between the observations and the result of the simulation based
- on the background state, filtered to be compatible with the observation,
- noted as :math:`\mathbf{y}^o - \mathbf{H}\mathbf{x}^b`.
-
- SigmaBck2
- Keyword to indicate the Desroziers-Ivanov parameter measuring the
- background part consistency of the data assimilation optimal state
- estimation. It can be compared to 1.
-
- SigmaObs2
- Keyword to indicate the Desroziers-Ivanov parameter measuring the
- observation part consistency of the data assimilation optimal state
- estimation. It can be compared to 1.
-
- MahalanobisConsistency
- Keyword to indicate the Mahalanobis parameter measuring the consistency of
- the data assimilation optimal state estimation. It can be compared to 1.
-
- analysis
- The optimal state estimation through a data assimilation or optimization
- procedure.
-
- innovation
- Difference between the observations and the result of the simulation based
- on the background state, filtered to be compatible with the observation.
- It is similar with OMB in static cases.
-
- CostFunctionJ
- Keyword to indicate the minimization function, noted as :math:`J`.
-
- CostFunctionJo
- Keyword to indicate the observation part of the minimization function,
- noted as :math:`J^o`.
-
- CostFunctionJb
- Keyword to indicate the background part of the minimization function,
- noted as :math:`J^b`.
+++ /dev/null
-================================================================================
-ADAO module documentation
-================================================================================
-
-.. image:: images/ADAO_logo.png
- :align: center
- :width: 20%
-
-The ADAO module provides **data assimilation and optimization** features in
-SALOME context. It is based on usage of other SALOME modules, namely YACS and
-EFICAS, and on usage of a generic underlying data assimilation library.
-
-Briefly stated, Data Assimilation is a methodological framework to compute the
-optimal estimate of the inaccessible true value of a system state over time. It
-uses information coming from experimental measurements or observations, and from
-numerical *a priori* models, including information about their errors. Parts of
-the framework are also known under the names of *parameter estimation*, *inverse
-problems*, *Bayesian estimation*, *optimal interpolation*, etc. More details can
-be found in the section :ref:`section_theory`.
-
-The documentation of this module is divided in parts. The first one
-:ref:`section_intro` is an introduction. The second part :ref:`section_theory`
-briefly introduces data assimilation, optimization and concepts. The third part
-:ref:`section_using` describes how to use the module ADAO. The fourth part
-:ref:`section_reference` gives a detailed description of all the ADAO commands
-and keywords. The fifth part :ref:`section_examples` gives examples on ADAO
-usage. Users interested in quick use of the module can jump to this section, but
-a valuable use of the module requires to read and come back regularly to the
-third and fourth ones. The last part :ref:`section_advanced` focuses on advanced
-usages of the module, how to get more information, or how to use it by
-scripting, without the graphical user interface (GUI).
-
-In all this documentation, we use standard notations of linear algebra, data
-assimilation (as described in [Ide97]_) and optimization. In particular, vectors
-are written horizontally or vertically without making difference. Matrices are
-written either normally, or with a condensed notation, consisting in the use of
-a space to separate values and a "``;``" to separate the rows, in a continuous
-line.
-
-Table of contents
------------------
-
-.. toctree::
- :maxdepth: 2
-
- intro
- theory
- using
- reference
- examples
- advanced
- licence
- bibliography
-
-Indices and tables
-------------------
-
-* :ref:`genindex`
-* :ref:`search`
-* :ref:`section_glossary`
+++ /dev/null
-.. _section_intro:
-
-================================================================================
-Introduction to ADAO
-================================================================================
-
-The aim of the ADAO module is **to help using data assimilation or optimization
-methodology in conjunction with other modules in SALOME**. The ADAO module
-provides interface to some standard algorithms of data assimilation or
-optimization, and allows integration of them in a SALOME study. Calculation or
-simulation modules have to provide one or more specific calling methods in order
-to ba callable in the SALOME/ADAO framework, and all the SALOME modules can be
-used throught YACS integration of ADAO.
-
-Its main objective is to *facilitate the use of various standard data
-assimilation or optimization methods*, while remaining easy to use and providing
-a path to help the implementation. For an end user, having already gathered his
-physical input information, it's a matter of "point\&click" to build an ADAO
-valid case and to evaluate it.
-
-The module covers a wide variety of practical applications in a robust way,
-allowing real engineering applications but also quick experimental setup to be
-performed. Its methodological and numerical scalability gives way to extend the
-application domain.
+++ /dev/null
-.. _section_licence:
-
-================================================================================
-Licence and requirements for the module
-================================================================================
-
-.. index:: single: LICENCE
-.. index:: single: SALOME
-.. index:: single: ADAO
-
-The licence for this module is the GNU Lesser General Public License (Lesser
-GPL), as stated here and in the source files::
-
- <ADAO, a SALOME module for Data Assimilation and Optimization>
-
- Copyright (C) 2008-2013 EDF R&D
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-
-In addition, we expect that all publications describing work using this
-software, or all commercial products using it, quote at least one of the
-references given below:
-
- * *ADAO, a SALOME module for Data Assimilation and Optimization*,
- http://www.salome-platform.org/
-
- * *SALOME The Open Source Integration Platform for Numerical Simulation*,
- http://www.salome-platform.org/
-
-The documentation of the module is also covered by the licence and the
-requirement of quoting.
+++ /dev/null
-.. _section_reference:
-
-================================================================================
-Reference description of the ADAO commands and keywords
-================================================================================
-
-This section presents the reference description of the ADAO commands and
-keywords available through the GUI or through scripts.
-
-Each command or keyword to be defined through the ADAO GUI has some properties.
-The first property is to be *required*, *optional* or only factual, describing a
-type of input. The second property is to be an "open" variable with a fixed type
-but with any value allowed by the type, or a "restricted" variable, limited to
-some specified values. The EFICAS editor GUI having build-in validating
-capacities, the properties of the commands or keywords given through this GUI
-are automatically correct.
-
-The mathematical notations used afterward are explained in the section
-:ref:`section_theory`.
-
-Examples of using these commands are available in the section
-:ref:`section_examples` and in example files installed with ADAO module.
-
-List of possible input types
-----------------------------
-
-.. index:: single: Dict
-.. index:: single: Function
-.. index:: single: Matrix
-.. index:: single: ScalarSparseMatrix
-.. index:: single: DiagonalSparseMatrix
-.. index:: single: String
-.. index:: single: Script
-.. index:: single: Vector
-
-Each ADAO variable has a pseudo-type to help filling it and validation. The
-different pseudo-types are:
-
-**Dict**
- This indicates a variable that has to be filled by a dictionary, usually
- given as a script.
-
-**Function**
- This indicates a variable that has to be filled by a function, usually given
- as a script or a component method.
-
-**Matrix**
- This indicates a variable that has to be filled by a matrix, usually given
- either as a string or as a script.
-
-**ScalarSparseMatrix**
- This indicates a variable that has to be filled by a unique number, which
- will be used to multiply an identity matrix, usually given either as a
- string or as a script.
-
-**DiagonalSparseMatrix**
- This indicates a variable that has to be filled by a vector, which will be
- over the diagonal of an identity matrix, usually given either as a string or
- as a script.
-
-**Script**
- This indicates a script given as an external file. It can be described by a
- full absolute path name or only by the file name without path.
-
-**String**
- This indicates a string giving a literal representation of a matrix, a
- vector or a vector serie, such as "1 2 ; 3 4" for a square 2x2 matrix.
-
-**Vector**
- This indicates a variable that has to be filled by a vector, usually given
- either as a string or as a script.
-
-**VectorSerie** This indicates a variable that has to be filled by a list of
- vectors, usually given either as a string or as a script.
-
-When a command or keyword can be filled by a script file name, the script has to
-contain a variable or a method that has the same name as the one to be filled.
-In other words, when importing the script in a YACS Python node, it must create
-a variable of the good name in the current namespace.
-
-Reference description for ADAO calculation cases
-------------------------------------------------
-
-List of commands and keywords for an ADAO calculation case
-++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-.. index:: single: ASSIMILATION_STUDY
-.. index:: single: Algorithm
-.. index:: single: AlgorithmParameters
-.. index:: single: Background
-.. index:: single: BackgroundError
-.. index:: single: ControlInput
-.. index:: single: Debug
-.. index:: single: EvolutionError
-.. index:: single: EvolutionModel
-.. index:: single: InputVariables
-.. index:: single: Observation
-.. index:: single: ObservationError
-.. index:: single: ObservationOperator
-.. index:: single: Observers
-.. index:: single: OutputVariables
-.. index:: single: Study_name
-.. index:: single: Study_repertory
-.. index:: single: UserDataInit
-.. index:: single: UserPostAnalysis
-
-The first set of commands is related to the description of a calculation case,
-that is a *Data Assimilation* procedure or an *Optimization* procedure. The
-terms are ordered in alphabetical order, except the first, which describes
-choice between calculation or checking. The different commands are the
-following:
-
-**ASSIMILATION_STUDY**
- *Required command*. This is the general command describing the data
- assimilation or optimization case. It hierarchically contains all the other
- commands.
-
-**Algorithm**
- *Required command*. This is a string to indicate the data assimilation or
- optimization algorithm chosen. The choices are limited and available through
- the GUI. There exists for example "3DVAR", "Blue"... See below the list of
- algorithms and associated parameters in the following subsection `Options
- and required commands for calculation algorithms`_.
-
-**AlgorithmParameters**
- *Optional command*. This command allows to add some optional parameters to
- control the data assimilation or optimization algorithm. It is defined as a
- "*Dict*" type object, that is, given as a script. See below the list of
- algorithms and associated parameters in the following subsection `Options
- and required commands for calculation algorithms`_.
-
-**Background**
- *Required command*. This indicates the background or initial vector used,
- previously noted as :math:`\mathbf{x}^b`. It is defined as a "*Vector*" type
- object, that is, given either as a string or as a script.
-
-**BackgroundError**
- *Required command*. This indicates the background error covariance matrix,
- previously noted as :math:`\mathbf{B}`. It is defined as a "*Matrix*" type
- object, a "*ScalarSparseMatrix*" type object, or a "*DiagonalSparseMatrix*"
- type object, that is, given either as a string or as a script.
-
-**ControlInput**
- *Optional command*. This indicates the control vector used to force the
- evolution model at each step, usually noted as :math:`\mathbf{U}`. It is
- defined as a "*Vector*" or a *VectorSerie* type object, that is, given
- either as a string or as a script. When there is no control, it has to be a
- void string ''.
-
-**Debug**
- *Required command*. This define the level of trace and intermediary debug
- information. The choices are limited between 0 (for False) and 1 (for
- True).
-
-**EvolutionError**
- *Optional command*. This indicates the evolution error covariance matrix,
- usually noted as :math:`\mathbf{Q}`. It is defined as a "*Matrix*" type
- object, a "*ScalarSparseMatrix*" type object, or a "*DiagonalSparseMatrix*"
- type object, that is, given either as a string or as a script.
-
-**EvolutionModel**
- *Optional command*. This indicates the evolution model operator, usually
- noted :math:`M`, which describes a step of evolution. It is defined as a
- "*Function*" type object, that is, given as a script. Different functional
- forms can be used, as described in the following subsection `Requirements
- for functions describing an operator`_. If there is some control :math:`U`
- included in the evolution model, the operator has to be applied to a pair
- :math:`(X,U)`.
-
-**InputVariables**
- *Optional command*. This command allows to indicates the name and size of
- physical variables that are bundled together in the control vector. This
- information is dedicated to data processed inside an algorithm.
-
-**Observation**
- *Required command*. This indicates the observation vector used for data
- assimilation or optimization, previously noted as :math:`\mathbf{y}^o`. It
- is defined as a "*Vector*" or a *VectorSerie* type object, that is, given
- either as a string or as a script.
-
-**ObservationError**
- *Required command*. This indicates the observation error covariance matrix,
- previously noted as :math:`\mathbf{R}`. It is defined as a "*Matrix*" type
- object, a "*ScalarSparseMatrix*" type object, or a "*DiagonalSparseMatrix*"
- type object, that is, given either as a string or as a script.
-
-**ObservationOperator**
- *Required command*. This indicates the observation operator, previously
- noted :math:`H`, which transforms the input parameters :math:`\mathbf{x}` to
- results :math:`\mathbf{y}` to be compared to observations
- :math:`\mathbf{y}^o`. It is defined as a "*Function*" type object, that is,
- given as a script. Different functional forms can be used, as described in
- the following subsection `Requirements for functions describing an
- operator`_. If there is some control :math:`U` included in the observation,
- the operator has to be applied to a pair :math:`(X,U)`.
-
-**Observers**
- *Optional command*. This command allows to set internal observers, that are
- functions linked with a particular variable, which will be executed each
- time this variable is modified. It is a convenient way to monitor variables
- of interest during the data assimilation or optimization process, by
- printing or plotting it, etc. Common templates are provided to help the user
- to start or to quickly make his case.
-
-**OutputVariables**
- *Optional command*. This command allows to indicates the name and size of
- physical variables that are bundled together in the output observation
- vector. This information is dedicated to data processed inside an algorithm.
-
-**Study_name**
- *Required command*. This is an open string to describe the study by a name
- or a sentence.
-
-**Study_repertory**
- *Optional command*. If available, this repertory is used to find all the
- script files that can be used to define some other commands by scripts.
-
-**UserDataInit**
- *Optional command*. This commands allows to initialize some parameters or
- data automatically before data assimilation algorithm processing.
-
-**UserPostAnalysis**
- *Optional command*. This commands allows to process some parameters or data
- automatically after data assimilation algorithm processing. It is defined as
- a script or a string, allowing to put post-processing code directly inside
- the ADAO case. Common templates are provided to help the user to start or
- to quickly make his case.
-
-Options and required commands for calculation algorithms
-++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-.. index:: single: 3DVAR
-.. index:: single: Blue
-.. index:: single: EnsembleBlue
-.. index:: single: KalmanFilter
-.. index:: single: ExtendedKalmanFilter
-.. index:: single: LinearLeastSquares
-.. index:: single: NonLinearLeastSquares
-.. index:: single: ParticleSwarmOptimization
-.. index:: single: QuantileRegression
-
-.. index:: single: AlgorithmParameters
-.. index:: single: Bounds
-.. index:: single: CostDecrementTolerance
-.. index:: single: GradientNormTolerance
-.. index:: single: GroupRecallRate
-.. index:: single: MaximumNumberOfSteps
-.. index:: single: Minimizer
-.. index:: single: NumberOfInsects
-.. index:: single: ProjectedGradientTolerance
-.. index:: single: QualityCriterion
-.. index:: single: Quantile
-.. index:: single: SetSeed
-.. index:: single: StoreInternalVariables
-.. index:: single: StoreSupplementaryCalculations
-.. index:: single: SwarmVelocity
-
-Each algorithm can be controlled using some generic or specific options given
-through the "*AlgorithmParameters*" optional command, as follows for example::
-
- AlgorithmParameters = {
- "Minimizer" : "LBFGSB",
- "MaximumNumberOfSteps" : 25,
- "StoreSupplementaryCalculations" : ["APosterioriCovariance","OMA"],
- }
-
-This section describes the available options algorithm by algorithm. If an
-option is specified for an algorithm that doesn't support it, the option is
-simply left unused. The meaning of the acronyms or particular names can be found
-in the :ref:`genindex` or the :ref:`section_glossary`. In addition, for each
-algorithm, the required commands/keywords are given, being described in `List of
-commands and keywords for an ADAO calculation case`_.
-
-**"Blue"**
-
- *Required commands*
- *"Background", "BackgroundError",
- "Observation", "ObservationError",
- "ObservationOperator"*
-
- StoreInternalVariables
- This boolean key allows to store default internal variables, mainly the
- current state during iterative optimization process. Be careful, this can be
- a numerically costly choice in certain calculation cases. The default is
- "False".
-
- StoreSupplementaryCalculations
- This list indicates the names of the supplementary variables that can be
- available at the end of the algorithm. It involves potentially costly
- calculations. The default is a void list, none of these variables being
- calculated and stored by default. The possible names are in the following
- list: ["APosterioriCovariance", "BMA", "OMA", "OMB", "Innovation",
- "SigmaBck2", "SigmaObs2", "MahalanobisConsistency"].
-
-**"LinearLeastSquares"**
-
- *Required commands*
- *"Observation", "ObservationError",
- "ObservationOperator"*
-
- StoreInternalVariables
- This boolean key allows to store default internal variables, mainly the
- current state during iterative optimization process. Be careful, this can be
- a numerically costly choice in certain calculation cases. The default is
- "False".
-
- StoreSupplementaryCalculations
- This list indicates the names of the supplementary variables that can be
- available at the end of the algorithm. It involves potentially costly
- calculations. The default is a void list, none of these variables being
- calculated and stored by default. The possible names are in the following
- list: ["OMA"].
-
-**"3DVAR"**
-
- *Required commands*
- *"Background", "BackgroundError",
- "Observation", "ObservationError",
- "ObservationOperator"*
-
- Minimizer
- This key allows to choose the optimization minimizer. The default choice
- is "LBFGSB", and the possible ones are "LBFGSB" (nonlinear constrained
- minimizer, see [Byrd95]_ and [Zhu97]_), "TNC" (nonlinear constrained
- minimizer), "CG" (nonlinear unconstrained minimizer), "BFGS" (nonlinear
- unconstrained minimizer), "NCG" (Newton CG minimizer).
-
- Bounds
- This key allows to define upper and lower bounds for every control
- variable being optimized. Bounds can be given by a list of list of pairs
- of lower/upper bounds for each variable, with possibly ``None`` every time
- there is no bound. The bounds can always be specified, but they are taken
- into account only by the constrained minimizers.
-
- MaximumNumberOfSteps
- This key indicates the maximum number of iterations allowed for iterative
- optimization. The default is 15000, which is very similar to no limit on
- iterations. It is then recommended to adapt this parameter to the needs on
- real problems. For some minimizers, the effective stopping step can be
- slightly different due to algorithm internal control requirements.
-
- CostDecrementTolerance
- This key indicates a limit value, leading to stop successfully the
- iterative optimization process when the cost function decreases less than
- this tolerance at the last step. The default is 1.e-7, and it is
- recommended to adapt it to the needs on real problems.
-
- ProjectedGradientTolerance
- This key indicates a limit value, leading to stop successfully the iterative
- optimization process when all the components of the projected gradient are
- under this limit. It is only used for constrained minimizers. The default is
- -1, that is the internal default of each minimizer (generally 1.e-5), and it
- is not recommended to change it.
-
- GradientNormTolerance
- This key indicates a limit value, leading to stop successfully the
- iterative optimization process when the norm of the gradient is under this
- limit. It is only used for non-constrained minimizers. The default is
- 1.e-5 and it is not recommended to change it.
-
- StoreInternalVariables
- This boolean key allows to store default internal variables, mainly the
- current state during iterative optimization process. Be careful, this can be
- a numerically costly choice in certain calculation cases. The default is
- "False".
-
- StoreSupplementaryCalculations
- This list indicates the names of the supplementary variables that can be
- available at the end of the algorithm. It involves potentially costly
- calculations. The default is a void list, none of these variables being
- calculated and stored by default. The possible names are in the following
- list: ["APosterioriCovariance", "BMA", "OMA", "OMB", "Innovation",
- "SigmaObs2", "MahalanobisConsistency"].
-
-**"NonLinearLeastSquares"**
-
- *Required commands*
- *"Background",
- "Observation", "ObservationError",
- "ObservationOperator"*
-
- Minimizer
- This key allows to choose the optimization minimizer. The default choice
- is "LBFGSB", and the possible ones are "LBFGSB" (nonlinear constrained
- minimizer, see [Byrd95]_ and [Zhu97]_), "TNC" (nonlinear constrained
- minimizer), "CG" (nonlinear unconstrained minimizer), "BFGS" (nonlinear
- unconstrained minimizer), "NCG" (Newton CG minimizer).
-
- Bounds
- This key allows to define upper and lower bounds for every control
- variable being optimized. Bounds can be given by a list of list of pairs
- of lower/upper bounds for each variable, with possibly ``None`` every time
- there is no bound. The bounds can always be specified, but they are taken
- into account only by the constrained minimizers.
-
- MaximumNumberOfSteps
- This key indicates the maximum number of iterations allowed for iterative
- optimization. The default is 15000, which is very similar to no limit on
- iterations. It is then recommended to adapt this parameter to the needs on
- real problems. For some minimizers, the effective stopping step can be
- slightly different due to algorithm internal control requirements.
-
- CostDecrementTolerance
- This key indicates a limit value, leading to stop successfully the
- iterative optimization process when the cost function decreases less than
- this tolerance at the last step. The default is 1.e-7, and it is
- recommended to adapt it to the needs on real problems.
-
- ProjectedGradientTolerance
- This key indicates a limit value, leading to stop successfully the iterative
- optimization process when all the components of the projected gradient are
- under this limit. It is only used for constrained minimizers. The default is
- -1, that is the internal default of each minimizer (generally 1.e-5), and it
- is not recommended to change it.
-
- GradientNormTolerance
- This key indicates a limit value, leading to stop successfully the
- iterative optimization process when the norm of the gradient is under this
- limit. It is only used for non-constrained minimizers. The default is
- 1.e-5 and it is not recommended to change it.
-
- StoreInternalVariables
- This boolean key allows to store default internal variables, mainly the
- current state during iterative optimization process. Be careful, this can be
- a numerically costly choice in certain calculation cases. The default is
- "False".
-
- StoreSupplementaryCalculations
- This list indicates the names of the supplementary variables that can be
- available at the end of the algorithm. It involves potentially costly
- calculations. The default is a void list, none of these variables being
- calculated and stored by default. The possible names are in the following
- list: ["BMA", "OMA", "OMB", "Innovation"].
-
-**"EnsembleBlue"**
-
- *Required commands*
- *"Background", "BackgroundError",
- "Observation", "ObservationError",
- "ObservationOperator"*
-
- SetSeed
- This key allow to give an integer in order to fix the seed of the random
- generator used to generate the ensemble. A convenient value is for example
- 1000. By default, the seed is left uninitialized, and so use the default
- initialization from the computer.
-
-**"KalmanFilter"**
-
- *Required commands*
- *"Background", "BackgroundError",
- "Observation", "ObservationError",
- "ObservationOperator",
- "EvolutionModel", "EvolutionError",
- "ControlInput"*
-
- EstimationOf
- This key allows to choose the type of estimation to be performed. It can be
- either state-estimation, named "State", or parameter-estimation, named
- "Parameters". The default choice is "State".
-
- StoreSupplementaryCalculations
- This list indicates the names of the supplementary variables that can be
- available at the end of the algorithm. It involves potentially costly
- calculations. The default is a void list, none of these variables being
- calculated and stored by default. The possible names are in the following
- list: ["APosterioriCovariance", "BMA", "Innovation"].
-
-**"ExtendedKalmanFilter"**
-
- *Required commands*
- *"Background", "BackgroundError",
- "Observation", "ObservationError",
- "ObservationOperator",
- "EvolutionModel", "EvolutionError",
- "ControlInput"*
-
- Bounds
- This key allows to define upper and lower bounds for every control variable
- being optimized. Bounds can be given by a list of list of pairs of
- lower/upper bounds for each variable, with extreme values every time there
- is no bound. The bounds can always be specified, but they are taken into
- account only by the constrained minimizers.
-
- ConstrainedBy
- This key allows to define the method to take bounds into account. The
- possible methods are in the following list: ["EstimateProjection"].
-
- EstimationOf
- This key allows to choose the type of estimation to be performed. It can be
- either state-estimation, named "State", or parameter-estimation, named
- "Parameters". The default choice is "State".
-
- StoreSupplementaryCalculations
- This list indicates the names of the supplementary variables that can be
- available at the end of the algorithm. It involves potentially costly
- calculations. The default is a void list, none of these variables being
- calculated and stored by default. The possible names are in the following
- list: ["APosterioriCovariance", "BMA", "Innovation"].
-
-**"ParticleSwarmOptimization"**
-
- *Required commands*
- *"Background", "BackgroundError",
- "Observation", "ObservationError",
- "ObservationOperator"*
-
- MaximumNumberOfSteps
- This key indicates the maximum number of iterations allowed for iterative
- optimization. The default is 50, which is an arbitrary limit. It is then
- recommended to adapt this parameter to the needs on real problems.
-
- NumberOfInsects
- This key indicates the number of insects or particles in the swarm. The
- default is 100, which is a usual default for this algorithm.
-
- SwarmVelocity
- This key indicates the part of the insect velocity which is imposed by the
- swarm. It is a positive floating point value. The default value is 1.
-
- GroupRecallRate
- This key indicates the recall rate at the best swarm insect. It is a
- floating point value between 0 and 1. The default value is 0.5.
-
- QualityCriterion
- This key indicates the quality criterion, minimized to find the optimal
- state estimate. The default is the usual data assimilation criterion named
- "DA", the augmented ponderated least squares. The possible criteria has to
- be in the following list, where the equivalent names are indicated by "=":
- ["AugmentedPonderatedLeastSquares"="APLS"="DA",
- "PonderatedLeastSquares"="PLS", "LeastSquares"="LS"="L2",
- "AbsoluteValue"="L1", "MaximumError"="ME"]
-
- SetSeed
- This key allow to give an integer in order to fix the seed of the random
- generator used to generate the ensemble. A convenient value is for example
- 1000. By default, the seed is left uninitialized, and so use the default
- initialization from the computer.
-
- StoreInternalVariables
- This boolean key allows to store default internal variables, mainly the
- current state during iterative optimization process. Be careful, this can be
- a numerically costly choice in certain calculation cases. The default is
- "False".
-
- StoreSupplementaryCalculations
- This list indicates the names of the supplementary variables that can be
- available at the end of the algorithm. It involves potentially costly
- calculations. The default is a void list, none of these variables being
- calculated and stored by default. The possible names are in the following
- list: ["BMA", "OMA", "OMB", "Innovation"].
-
-**"QuantileRegression"**
-
- *Required commands*
- *"Background",
- "Observation",
- "ObservationOperator"*
-
- Quantile
- This key allows to define the real value of the desired quantile, between
- 0 and 1. The default is 0.5, corresponding to the median.
-
- Minimizer
- This key allows to choose the optimization minimizer. The default choice
- and only available choice is "MMQR" (Majorize-Minimize for Quantile
- Regression).
-
- MaximumNumberOfSteps
- This key indicates the maximum number of iterations allowed for iterative
- optimization. The default is 15000, which is very similar to no limit on
- iterations. It is then recommended to adapt this parameter to the needs on
- real problems.
-
- CostDecrementTolerance
- This key indicates a limit value, leading to stop successfully the
- iterative optimization process when the cost function or the surrogate
- decreases less than this tolerance at the last step. The default is 1.e-6,
- and it is recommended to adapt it to the needs on real problems.
-
- StoreInternalVariables
- This boolean key allows to store default internal variables, mainly the
- current state during iterative optimization process. Be careful, this can be
- a numerically costly choice in certain calculation cases. The default is
- "False".
-
- StoreSupplementaryCalculations
- This list indicates the names of the supplementary variables that can be
- available at the end of the algorithm. It involves potentially costly
- calculations. The default is a void list, none of these variables being
- calculated and stored by default. The possible names are in the following
- list: ["BMA", "OMA", "OMB", "Innovation"].
-
-Reference description for ADAO checking cases
----------------------------------------------
-
-List of commands and keywords for an ADAO checking case
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-.. index:: single: CHECKING_STUDY
-.. index:: single: Algorithm
-.. index:: single: AlgorithmParameters
-.. index:: single: CheckingPoint
-.. index:: single: Debug
-.. index:: single: ObservationOperator
-.. index:: single: Study_name
-.. index:: single: Study_repertory
-.. index:: single: UserDataInit
-
-The second set of commands is related to the description of a checking case,
-that is a procedure to check required properties on information somewhere else
-by a calculation case. The terms are ordered in alphabetical order, except the
-first, which describes choice between calculation or checking. The different
-commands are the following:
-
-**CHECKING_STUDY**
- *Required command*. This is the general command describing the checking
- case. It hierarchically contains all the other commands.
-
-**Algorithm**
- *Required command*. This is a string to indicate the data assimilation or
- optimization algorithm chosen. The choices are limited and available through
- the GUI. There exists for example "FunctionTest", "AdjointTest"... See below
- the list of algorithms and associated parameters in the following subsection
- `Options and required commands for checking algorithms`_.
-
-**AlgorithmParameters**
- *Optional command*. This command allows to add some optional parameters to
- control the data assimilation or optimization algorithm. It is defined as a
- "*Dict*" type object, that is, given as a script. See below the list of
- algorithms and associated parameters in the following subsection `Options
- and required commands for checking algorithms`_.
-
-**CheckingPoint**
- *Required command*. This indicates the vector used,
- previously noted as :math:`\mathbf{x}^b`. It is defined as a "*Vector*" type
- object, that is, given either as a string or as a script.
-
-**Debug**
- *Required command*. This define the level of trace and intermediary debug
- information. The choices are limited between 0 (for False) and 1 (for
- True).
-
-**ObservationOperator**
- *Required command*. This indicates the observation operator, previously
- noted :math:`H`, which transforms the input parameters :math:`\mathbf{x}` to
- results :math:`\mathbf{y}` to be compared to observations
- :math:`\mathbf{y}^o`. It is defined as a "*Function*" type object, that is,
- given as a script. Different functional forms can be used, as described in
- the following subsection `Requirements for functions describing an
- operator`_.
-
-**Study_name**
- *Required command*. This is an open string to describe the study by a name
- or a sentence.
-
-**Study_repertory**
- *Optional command*. If available, this repertory is used to find all the
- script files that can be used to define some other commands by scripts.
-
-**UserDataInit**
- *Optional command*. This commands allows to initialize some parameters or
- data automatically before data assimilation algorithm processing.
-
-Options and required commands for checking algorithms
-+++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-.. index:: single: AdjointTest
-.. index:: single: FunctionTest
-.. index:: single: GradientTest
-.. index:: single: LinearityTest
-
-.. index:: single: AlgorithmParameters
-.. index:: single: AmplitudeOfInitialDirection
-.. index:: single: EpsilonMinimumExponent
-.. index:: single: InitialDirection
-.. index:: single: ResiduFormula
-.. index:: single: SetSeed
-
-We recall that each algorithm can be controlled using some generic or specific
-options given through the "*AlgorithmParameters*" optional command, as follows
-for example::
-
- AlgorithmParameters = {
- "AmplitudeOfInitialDirection" : 1,
- "EpsilonMinimumExponent" : -8,
- }
-
-If an option is specified for an algorithm that doesn't support it, the option
-is simply left unused. The meaning of the acronyms or particular names can be
-found in the :ref:`genindex` or the :ref:`section_glossary`. In addition, for
-each algorithm, the required commands/keywords are given, being described in
-`List of commands and keywords for an ADAO checking case`_.
-
-**"AdjointTest"**
-
- *Required commands*
- *"CheckingPoint",
- "ObservationOperator"*
-
- AmplitudeOfInitialDirection
- This key indicates the scaling of the initial perturbation build as a vector
- used for the directional derivative around the nominal checking point. The
- default is 1, that means no scaling.
-
- EpsilonMinimumExponent
- This key indicates the minimal exponent value of the power of 10 coefficient
- to be used to decrease the increment multiplier. The default is -8, and it
- has to be between 0 and -20. For example, its default value leads to
- calculate the residue of the scalar product formula with a fixed increment
- multiplied from 1.e0 to 1.e-8.
-
- InitialDirection
- This key indicates the vector direction used for the directional derivative
- around the nominal checking point. It has to be a vector. If not specified,
- this direction defaults to a random perturbation around zero of the same
- vector size than the checking point.
-
- SetSeed
- This key allow to give an integer in order to fix the seed of the random
- generator used to generate the ensemble. A convenient value is for example
- 1000. By default, the seed is left uninitialized, and so use the default
- initialization from the computer.
-
-**"FunctionTest"**
-
- *Required commands*
- *"CheckingPoint",
- "ObservationOperator"*
-
- No option
-
-**"GradientTest"**
-
- *Required commands*
- *"CheckingPoint",
- "ObservationOperator"*
-
- AmplitudeOfInitialDirection
- This key indicates the scaling of the initial perturbation build as a vector
- used for the directional derivative around the nominal checking point. The
- default is 1, that means no scaling.
-
- EpsilonMinimumExponent
- This key indicates the minimal exponent value of the power of 10 coefficient
- to be used to decrease the increment multiplier. The default is -8, and it
- has to be between 0 and -20. For example, its default value leads to
- calculate the residue of the scalar product formula with a fixed increment
- multiplied from 1.e0 to 1.e-8.
-
- InitialDirection
- This key indicates the vector direction used for the directional derivative
- around the nominal checking point. It has to be a vector. If not specified,
- this direction defaults to a random perturbation around zero of the same
- vector size than the checking point.
-
- ResiduFormula
- This key indicates the residue formula that has to be used for the test. The
- default choice is "Taylor", and the possible ones are "Taylor" (residue of
- the Taylor development of the operator, which has to decrease with the power
- of 2 in perturbation) and "Norm" (residue obtained by taking the norm of the
- Taylor development at zero order approximation, which approximate the
- gradient, and which has to remain constant).
-
- SetSeed
- This key allow to give an integer in order to fix the seed of the random
- generator used to generate the ensemble. A convenient value is for example
- 1000. By default, the seed is left uninitialized, and so use the default
- initialization from the computer.
-
-**"LinearityTest"**
-
- *Required commands*
- *"CheckingPoint",
- "ObservationOperator"*
-
- AmplitudeOfInitialDirection
- This key indicates the scaling of the initial perturbation build as a vector
- used for the directional derivative around the nominal checking point. The
- default is 1, that means no scaling.
-
- EpsilonMinimumExponent
- This key indicates the minimal exponent value of the power of 10 coefficient
- to be used to decrease the increment multiplier. The default is -8, and it
- has to be between 0 and -20. For example, its default value leads to
- calculate the residue of the scalar product formula with a fixed increment
- multiplied from 1.e0 to 1.e-8.
-
- InitialDirection
- This key indicates the vector direction used for the directional derivative
- around the nominal checking point. It has to be a vector. If not specified,
- this direction defaults to a random perturbation around zero of the same
- vector size than the checking point.
-
- ResiduFormula
- This key indicates the residue formula that has to be used for the test. The
- default choice is "CenteredDL", and the possible ones are "CenteredDL"
- (residue of the difference between the function at nominal point and the
- values with positive and negative increments, which has to stay very small),
- "Taylor" (residue of the Taylor development of the operator normalized by
- the nominal value, which has to stay very small), "NominalTaylor" (residue
- of the order 1 approximations of the operator, normalized to the nominal
- point, which has to stay close to 1), and "NominalTaylorRMS" (residue of the
- order 1 approximations of the operator, normalized by RMS to the nominal
- point, which has to stay close to 0).
-
- SetSeed
- This key allow to give an integer in order to fix the seed of the random
- generator used to generate the ensemble. A convenient value is for example
- 1000. By default, the seed is left uninitialized, and so use the default
- initialization from the computer.
-
-Requirements for functions describing an operator
--------------------------------------------------
-
-The operators for observation and evolution are required to implement the data
-assimilation or optimization procedures. They include the physical simulation
-numerical simulations, but also the filtering and restriction to compare the
-simulation to observation. The evolution operator is considered here in its
-incremental form, representing the transition between two successive states, and
-is then similar to the observation operator.
-
-Schematically, an operator has to give a output solution given the input
-parameters. Part of the input parameters can be modified during the optimization
-procedure. So the mathematical representation of such a process is a function.
-It was briefly described in the section :ref:`section_theory` and is generalized
-here by the relation:
-
-.. math:: \mathbf{y} = O( \mathbf{x} )
-
-between the pseudo-observations :math:`\mathbf{y}` and the parameters
-:math:`\mathbf{x}` using the observation or evolution operator :math:`O`. The
-same functional representation can be used for the linear tangent model
-:math:`\mathbf{O}` of :math:`O` and its adjoint :math:`\mathbf{O}^*`, also
-required by some data assimilation or optimization algorithms.
-
-Then, **to describe completely an operator, the user has only to provide a
-function that fully and only realize the functional operation**.
-
-This function is usually given as a script that can be executed in a YACS node.
-This script can without difference launch external codes or use internal SALOME
-calls and methods. If the algorithm requires the 3 aspects of the operator
-(direct form, tangent form and adjoint form), the user has to give the 3
-functions or to approximate them.
-
-There are 3 practical methods for the user to provide the operator functional
-representation.
-
-First functional form: using "*ScriptWithOneFunction*"
-++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-.. index:: single: ScriptWithOneFunction
-.. index:: single: DirectOperator
-.. index:: single: DifferentialIncrement
-.. index:: single: CenteredFiniteDifference
-
-The first one consist in providing only one potentially non-linear function, and
-to approximate the tangent and the adjoint operators. This is done by using the
-keyword "*ScriptWithOneFunction*" for the description of the chosen operator in
-the ADAO GUI. The user have to provide the function in a script, with a
-mandatory name "*DirectOperator*". For example, the script can follow the
-template::
-
- def DirectOperator( X ):
- """ Direct non-linear simulation operator """
- ...
- ...
- ...
- return Y=O(X)
-
-In this case, the user can also provide a value for the differential increment,
-using through the GUI the keyword "*DifferentialIncrement*", which has a default
-value of 1%. This coefficient will be used in the finite difference
-approximation to build the tangent and adjoint operators. The finite difference
-approximation order can also be chosen through the GUI, using the keyword
-"*CenteredFiniteDifference*", with 0 for an uncentered schema of first order,
-and with 1 for a centered schema of second order (of twice the first order
-computational cost). The keyword has a default value of 0.
-
-This first operator definition allow easily to test the functional form before
-its use in an ADAO case, greatly reducing the complexity of implementation.
-
-**Important warning:** the name "*DirectOperator*" is mandatory, and the type of
-the X argument can be either a python list, a numpy array or a numpy 1D-matrix.
-The user has to treat these cases in his script.
-
-Second functional form: using "*ScriptWithFunctions*"
-+++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-.. index:: single: ScriptWithFunctions
-.. index:: single: DirectOperator
-.. index:: single: TangentOperator
-.. index:: single: AdjointOperator
-
-The second one consist in providing directly the three associated operators
-:math:`O`, :math:`\mathbf{O}` and :math:`\mathbf{O}^*`. This is done by using
-the keyword "*ScriptWithFunctions*" for the description of the chosen operator
-in the ADAO GUI. The user have to provide three functions in one script, with
-three mandatory names "*DirectOperator*", "*TangentOperator*" and
-"*AdjointOperator*". For example, the script can follow the template::
-
- def DirectOperator( X ):
- """ Direct non-linear simulation operator """
- ...
- ...
- ...
- return something like Y
-
- def TangentOperator( (X, dX) ):
- """ Tangent linear operator, around X, applied to dX """
- ...
- ...
- ...
- return something like Y
-
- def AdjointOperator( (X, Y) ):
- """ Adjoint operator, around X, applied to Y """
- ...
- ...
- ...
- return something like X
-
-Another time, this second operator definition allow easily to test the
-functional forms before their use in an ADAO case, reducing the complexity of
-implementation.
-
-**Important warning:** the names "*DirectOperator*", "*TangentOperator*" and
-"*AdjointOperator*" are mandatory, and the type of the X, Y, dX arguments can be
-either a python list, a numpy array or a numpy 1D-matrix. The user has to treat
-these cases in his script.
-
-Third functional form: using "*ScriptWithSwitch*"
-+++++++++++++++++++++++++++++++++++++++++++++++++
-
-.. index:: single: ScriptWithSwitch
-.. index:: single: DirectOperator
-.. index:: single: TangentOperator
-.. index:: single: AdjointOperator
-
-This third form give more possibilities to control the execution of the three
-functions representing the operator, allowing advanced usage and control over
-each execution of the simulation code. This is done by using the keyword
-"*ScriptWithSwitch*" for the description of the chosen operator in the ADAO GUI.
-The user have to provide a switch in one script to control the execution of the
-direct, tangent and adjoint forms of its simulation code. The user can then, for
-example, use other approximations for the tangent and adjoint codes, or
-introduce more complexity in the argument treatment of the functions. But it
-will be far more complicated to implement and debug.
-
-**It is recommended not to use this third functional form without a solid
-numerical or physical reason.**
-
-If, however, you want to use this third form, we recommend using the following
-template for the switch. It requires an external script or code named
-"*Physical_simulation_functions.py*", containing three functions named
-"*DirectOperator*", "*TangentOperator*" and "*AdjointOperator*" as previously.
-Here is the switch template::
-
- import Physical_simulation_functions
- import numpy, logging
- #
- method = ""
- for param in computation["specificParameters"]:
- if param["name"] == "method":
- method = param["value"]
- if method not in ["Direct", "Tangent", "Adjoint"]:
- raise ValueError("No valid computation method is given")
- logging.info("Found method is \'%s\'"%method)
- #
- logging.info("Loading operator functions")
- Function = Physical_simulation_functions.DirectOperator
- Tangent = Physical_simulation_functions.TangentOperator
- Adjoint = Physical_simulation_functions.AdjointOperator
- #
- logging.info("Executing the possible computations")
- data = []
- if method == "Direct":
- logging.info("Direct computation")
- Xcurrent = computation["inputValues"][0][0][0]
- data = Function(numpy.matrix( Xcurrent ).T)
- if method == "Tangent":
- logging.info("Tangent computation")
- Xcurrent = computation["inputValues"][0][0][0]
- dXcurrent = computation["inputValues"][0][0][1]
- data = Tangent(numpy.matrix(Xcurrent).T, numpy.matrix(dXcurrent).T)
- if method == "Adjoint":
- logging.info("Adjoint computation")
- Xcurrent = computation["inputValues"][0][0][0]
- Ycurrent = computation["inputValues"][0][0][1]
- data = Adjoint((numpy.matrix(Xcurrent).T, numpy.matrix(Ycurrent).T))
- #
- logging.info("Formatting the output")
- it = numpy.ravel(data)
- outputValues = [[[[]]]]
- for val in it:
- outputValues[0][0][0].append(val)
- #
- result = {}
- result["outputValues"] = outputValues
- result["specificOutputInfos"] = []
- result["returnCode"] = 0
- result["errorMessage"] = ""
-
-All various modifications could be done from this template hypothesis.
-
-Special case of controled evolution operator
-++++++++++++++++++++++++++++++++++++++++++++
-
-In some cases, the evolution or the observation operators are required to be
-controled by an external input control, given a priori. In this case, the
-generic form of the incremental evolution model is slightly modified as follows:
-
-.. math:: \mathbf{y} = O( \mathbf{x}, \mathbf{u})
-
-where :math:`\mathbf{u}` is the control over one state increment. In this case,
-the direct operator has to be applied to a pair of variables :math:`(X,U)`.
-Schematically, the operator has to be set as::
-
- def DirectOperator( (X, U) ):
- """ Direct non-linear simulation operator """
- ...
- ...
- ...
- return something like X(n+1) or Y(n+1)
-
-The tangent and adjoint operators have the same signature as previously, noting
-that the derivatives has to be done only partially against :math:`\mathbf{x}`.
-In such a case with explicit control, only the second functional form (using
-"*ScriptWithFunctions*") and third functional form (using "*ScriptWithSwitch*")
-can be used.
+++ /dev/null
-.. _section_theory:
-
-================================================================================
-A brief introduction to Data Assimilation and Optimization
-================================================================================
-
-.. index:: single: Data Assimilation
-.. index:: single: true state
-.. index:: single: observation
-.. index:: single: a priori
-
-**Data Assimilation** is a general framework for computing the optimal estimate
-of the true state of a system, over time if necessary. It uses values obtained
-by combining both observations and *a priori* models, including information
-about their errors.
-
-In other words, data assimilation merges measurement data of a system, that are
-the observations, with *a priori* system physical and mathematical knowledge,
-embedded in numerical models, to obtain the best possible estimate of the system
-true state and of its stochastic properties. Note that this true state can not
-be reached, but can only be estimated. Moreover, despite the fact that the used
-information are stochastic by nature, data assimilation provides deterministic
-techniques in order to realize the estimation.
-
-Because data assimilation look for the **best possible** estimate, its
-underlying procedure always integrates optimization in order to find this
-estimate: particular optimization methods are always embedded in data
-assimilation algorithms. Optimization methods can be seen here as a way to
-extend data assimilation applications. They will be introduced this way in the
-section `Going further in the state estimation by optimization methods`_, but
-they are far more general and can be used without data assimilation concepts.
-
-Two main types of applications exist in data assimilation, being covered by the
-same formalism: **parameters identification** and **fields reconstruction**.
-Before introducing the `Simple description of the data assimilation framework`_
-in a next section, we describe briefly these two types. At the end, some
-references allow `Going further in the data assimilation framework`_.
-
-Fields reconstruction or measures interpolation
------------------------------------------------
-
-.. index:: single: fields reconstruction
-
-Fields reconstruction consists in finding, from a restricted set of real
-measures, the physical field which is the most *consistent* with these measures.
-
-This consistency is to understand in terms of interpolation, that is to say that
-the field we want to reconstruct, using data assimilation on measures, has to
-fit at best the measures, while remaining constrained by the overall
-calculation. The calculation is thus an *a priori* estimation of the field that
-we seek to identify.
-
-If the system evolves in time, the reconstruction has to be established on every
-time step, as a whole. The interpolation process in this case is more
-complicated since it is temporal, not only in terms of instantaneous values of
-the field.
-
-A simple example of fields reconstruction comes from meteorology, in which one
-look for value of variables such as temperature or pressure in all points of the
-spatial domain. One have instantaneous measurements of these quantities at
-certain points, but also a history set of these measures. Moreover, these
-variables are constrained by evolution equations for the state of the
-atmosphere, which indicates for example that the pressure at a point can not
-take any value independently of the value at this same point in previous time.
-One must therefore make the reconstruction of a field at any point in space, in
-a "consistent" manner with the evolution equations and with the measures of the
-previous time steps.
-
-Parameters identification or calibration
-----------------------------------------
-
-.. index:: single: parameters identification
-
-The identification of parameters by data assimilation is a form of calibration
-which uses both the measurement and an *a priori* estimation (called the
-"*background*") of the state that one seeks to identify, as well as a
-characterization of their errors. From this point of view, it uses all available
-information on the physical system (even if assumptions about errors are
-relatively restrictive) to find the "*optimal*" estimation from the true state.
-We note, in terms of optimization, that the background realizes a mathematical
-regularization of the main problem of parameters identification.
-
-In practice, the two observed gaps "*calculation-background*" and
-"*calculation-measures*" are added to build the calibration correction of
-parameters or initial conditions. The addition of these two gaps requires a
-relative weight, which is chosen to reflect the trust we give to each piece of
-information. This confidence is measured by the covariance of the errors on the
-background and on the observations. Thus the stochastic aspect of information,
-measured or *a priori*, is essential for building the calibration error
-function.
-
-A simple example of parameters identification comes from any kind of physical
-simulation process involving a parametrized model. For example, a static
-mechanical simulation of a beam constrained by some forces is described by beam
-parameters, such as a Young coefficient, or by the intensity of the force. The
-parameter estimation problem consists in finding for example the right Young
-coefficient in order that the simulation of the beam corresponds to
-measurements, including the knowledge of errors.
-
-Simple description of the data assimilation framework
------------------------------------------------------
-
-.. index:: single: background
-.. index:: single: background error covariances
-.. index:: single: observation error covariances
-.. index:: single: covariances
-
-We can write these features in a simple manner. By default, all variables are
-vectors, as there are several parameters to readjust.
-
-According to standard notations in data assimilation, we note
-:math:`\mathbf{x}^a` the optimal parameters that is to be determined by
-calibration, :math:`\mathbf{y}^o` the observations (or experimental
-measurements) that we must compare to the simulation outputs,
-:math:`\mathbf{x}^b` the background (*a priori* values, or regularization
-values) of searched parameters, :math:`\mathbf{x}^t` the unknown ideals
-parameters that would give exactly the observations (assuming that the errors
-are zero and the model is exact) as output.
-
-In the simplest case, which is static, the steps of simulation and of
-observation can be combined into a single observation operator noted :math:`H`
-(linear or nonlinear), which transforms the input parameters :math:`\mathbf{x}`
-to results :math:`\mathbf{y}` to be compared to observations
-:math:`\mathbf{y}^o`. Moreover, we use the linearized operator
-:math:`\mathbf{H}` to represent the effect of the full operator :math:`H` around
-a linearization point (and we omit thereafter to mention :math:`H` even if it is
-possible to keep it). In reality, we have already indicated that the stochastic
-nature of variables is essential, coming from the fact that model, background
-and observations are incorrect. We therefore introduce errors of observations
-additively, in the form of a random vector :math:`\mathbf{\epsilon}^o` such
-that:
-
-.. math:: \mathbf{y}^o = \mathbf{H} \mathbf{x}^t + \mathbf{\epsilon}^o
-
-The errors represented here are not only those from observation, but also from
-the simulation. We can always consider that these errors are of zero mean. We
-can then define a matrix :math:`\mathbf{R}` of the observation error covariances
-by:
-
-.. math:: \mathbf{R} = E[\mathbf{\epsilon}^o.{\mathbf{\epsilon}^o}^T]
-
-The background can also be written as a function of the true value, by
-introducing the error vector :math:`\mathbf{\epsilon}^b`:
-
-.. math:: \mathbf{x}^b = \mathbf{x}^t + \mathbf{\epsilon}^b
-
-where errors are also assumed of zero mean, in the same manner as for
-observations. We define the :math:`\mathbf{B}` matrix of background error
-covariances by:
-
-.. math:: \mathbf{B} = E[\mathbf{\epsilon}^b.{\mathbf{\epsilon}^b}^T]
-
-The optimal estimation of the true parameters :math:`\mathbf{x}^t`, given the
-background :math:`\mathbf{x}^b` and the observations :math:`\mathbf{y}^o`, is
-then the "*analysis*" :math:`\mathbf{x}^a` and comes from the minimisation of an
-error function (in variational assimilation) or from the filtering correction (in
-assimilation by filtering).
-
-In **variational assimilation**, in a static case, one classically attempts to
-minimize the following function :math:`J`:
-
-.. math:: J(\mathbf{x})=(\mathbf{x}-\mathbf{x}^b)^T.\mathbf{B}^{-1}.(\mathbf{x}-\mathbf{x}^b)+(\mathbf{y}^o-\mathbf{H}.\mathbf{x})^T.\mathbf{R}^{-1}.(\mathbf{y}^o-\mathbf{H}.\mathbf{x})
-
-which is usually designed as the "*3D-VAR*" function. Since :math:`\mathbf{B}`
-and :math:`\mathbf{R}` covariance matrices are proportional to the variances of
-errors, their presence in both terms of the function :math:`J` can effectively
-weight the differences by confidence in the background or observations. The
-parameters vector :math:`\mathbf{x}` realizing the minimum of this function
-therefore constitute the analysis :math:`\mathbf{x}^a`. It is at this level that
-we have to use the full panoply of function minimization methods otherwise known
-in optimization (see also section `Going further in the state estimation by
-optimization methods`_). Depending on the size of the parameters vector
-:math:`\mathbf{x}` to identify and of the availability of gradient and Hessian
-of :math:`J`, it is appropriate to adapt the chosen optimization method
-(gradient, Newton, quasi-Newton...).
-
-In **assimilation by filtering**, in this simple case usually referred to as
-"*BLUE*" (for "*Best Linear Unbiased Estimator*"), the :math:`\mathbf{x}^a`
-analysis is given as a correction of the background :math:`\mathbf{x}^b` by a
-term proportional to the difference between observations :math:`\mathbf{y}^o`
-and calculations :math:`\mathbf{H}\mathbf{x}^b`:
-
-.. math:: \mathbf{x}^a = \mathbf{x}^b + \mathbf{K}(\mathbf{y}^o - \mathbf{H}\mathbf{x}^b)
-
-where :math:`\mathbf{K}` is the Kalman gain matrix, which is expressed using
-covariance matrices in the following form:
-
-.. math:: \mathbf{K} = \mathbf{B}\mathbf{H}^T(\mathbf{H}\mathbf{B}\mathbf{H}^T+\mathbf{R})^{-1}
-
-The advantage of filtering is to explicitly calculate the gain, to produce then
-the *a posteriori* covariance analysis matrix.
-
-In this simple static case, we can show, under the assumption of Gaussian error
-distributions, that the two *variational* and *filtering* approaches are
-equivalent.
-
-It is indicated here that these methods of "*3D-VAR*" and "*BLUE*" may be
-extended to dynamic problems, called respectively "*4D-VAR*" and "*Kalman
-filter*". They can take into account the evolution operator to establish an
-analysis at the right time steps of the gap between observations and simulations,
-and to have, at every moment, the propagation of the background through the
-evolution model. Many other variants have been developed to improve the
-numerical quality or to take into account computer requirements such as
-calculation size and time.
-
-Going further in the data assimilation framework
-------------------------------------------------
-
-.. index:: single: state estimation
-.. index:: single: parameter estimation
-.. index:: single: inverse problems
-.. index:: single: Bayesian estimation
-.. index:: single: optimal interpolation
-.. index:: single: mathematical regularization
-.. index:: single: data smoothing
-
-To get more information about all the data assimilation techniques, the reader
-can consult introductory documents like [Argaud09]_, on-line training courses or
-lectures like [Bouttier99]_ and [Bocquet04]_ (along with other materials coming
-from geosciences applications), or general documents like [Talagrand97]_,
-[Tarantola87]_, [Kalnay03]_, [Ide97]_ and [WikipediaDA]_.
-
-Note that data assimilation is not restricted to meteorology or geo-sciences, but
-is widely used in other scientific domains. There are several fields in science
-and technology where the effective use of observed but incomplete data is
-crucial.
-
-Some aspects of data assimilation are also known as *state estimation*,
-*parameter estimation*, *inverse problems*, *Bayesian estimation*, *optimal
-interpolation*, *mathematical regularization*, *data smoothing*, etc. These
-terms can be used in bibliographical searches.
-
-Going further in the state estimation by optimization methods
--------------------------------------------------------------
-
-.. index:: single: state estimation
-.. index:: single: optimization methods
-
-As seen before, in a static simulation case, the variational data assimilation
-requires to minimize the goal function :math:`J`:
-
-.. math:: J(\mathbf{x})=(\mathbf{x}-\mathbf{x}^b)^T.\mathbf{B}^{-1}.(\mathbf{x}-\mathbf{x}^b)+(\mathbf{y}^o-\mathbf{H}.\mathbf{x})^T.\mathbf{R}^{-1}.(\mathbf{y}^o-\mathbf{H}.\mathbf{x})
-
-which is named the "*3D-VAR*" function. It can be seen as a *least squares
-minimization* extented form, obtained by adding a regularizing term using
-:math:`\mathbf{x}-\mathbf{x}^b`, and by weighting the differences using
-:math:`\mathbf{B}` and :math:`\mathbf{R}` the two covariance matrices. The
-minimization of the :math:`J` function leads to the *best* state estimation.
-
-State estimation possibilities extension, by using more explicitly optimization
-methods and their properties, can be imagined in two ways.
-
-First, classical optimization methods involves using various gradient-based
-minimizing procedures. They are extremely efficient to look for a single local
-minimum. But they require the goal function :math:`J` to be sufficiently regular
-and differentiable, and are not able to capture global properties of the
-minimization problem, for example: global minimum, set of equivalent solutions
-due to over-parametrization, multiple local minima, etc. **A way to extend
-estimation possibilities is then to use a whole range of optimizers, allowing
-global minimization, various robust search properties, etc**. There is a lot of
-minimizing methods, such as stochastic ones, evolutionary ones, heuristics and
-meta-heuristics for real-valued problems, etc. They can treat partially irregular
-or noisy function :math:`J`, can characterize local minima, etc. The main
-drawback is a greater numerical cost to find state estimates, and no guarantee
-of convergence in finite time. Here, we only point the following
-topics, as the methods are available in the ADAO module: *Quantile regression*
-[WikipediaQR]_ and *Particle swarm optimization* [WikipediaPSO]_.
-
-Secondly, optimization methods try usually to minimize quadratic measures of
-errors, as the natural properties of such goal functions are well suited for
-classical gradient optimization. But other measures of errors can be more
-adapted to real physical simulation problems. Then, **an another way to extend
-estimation possibilities is to use other measures of errors to be reduced**. For
-example, we can cite *absolute error value*, *maximum error value*, etc. These
-error measures are not differentiables, but some optimization methods can deal
-with: heuristics and meta-heuristics for real-valued problem, etc. As
-previously, the main drawback remain a greater numerical cost to find state
-estimates, and no guarantee of convergence in finite time. Here, we point also
-the following methods as it is available in the ADAO module: *Particle swarm
-optimization* [WikipediaPSO]_.
-
-The reader interested in the subject of optimization can look at [WikipediaMO]_
-as a general entry point.
+++ /dev/null
-.. _section_using:
-
-================================================================================
-Using the ADAO module
-================================================================================
-
-.. |eficas_new| image:: images/eficas_new.png
- :align: middle
- :scale: 50%
-.. |eficas_save| image:: images/eficas_save.png
- :align: middle
- :scale: 50%
-.. |eficas_saveas| image:: images/eficas_saveas.png
- :align: middle
- :scale: 50%
-.. |eficas_yacs| image:: images/eficas_yacs.png
- :align: middle
- :scale: 50%
-.. |yacs_compile| image:: images/yacs_compile.png
- :align: middle
- :scale: 50%
-
-This section presents the usage of the ADAO module in SALOME. It is complemented
-by the detailed description of all the commands and keywords in the section
-:ref:`section_reference`, by advanced usage procedures in the section
-:ref:`section_advanced`, and by examples in the section :ref:`section_examples`.
-
-Logical procedure to build an ADAO test case
---------------------------------------------
-
-The construction of an ADAO case follows a simple approach to define the set of
-input data, and then generates a complete executable block diagram used in YACS.
-Many variations exist for the definition of input data, but the logical sequence
-remains unchanged.
-
-First of all, the user is considered to know its personal input data needed to
-set up the data assimilation study. These data can already be available in
-SALOME or not.
-
-**Basically, the procedure of using ADAO involves the following steps:**
-
-#. **Activate the ADAO module and use the editor GUI,**
-#. **Build and/or modify the ADAO case and save it,**
-#. **Export the ADAO case as a YACS scheme,**
-#. **Supplement and modify the YACS scheme and save it,**
-#. **Execute the YACS case and obtain the results.**
-
-Each step will be detailed in the next section.
-
-STEP 1: Activate the ADAO module and use the editor GUI
--------------------------------------------------------
-
-As always for a module, it has to be activated by choosing the appropriate
-module button (or menu) in the toolbar of SALOME. If there is no SALOME study
-loaded, a popup appears, allowing to choose between creating a new study, or
-opening an already existing one:
-
- .. _adao_activate1:
- .. image:: images/adao_activate.png
- :align: center
- .. centered::
- **Activating the module ADAO in SALOME**
-
-Choosing the "*New*" button, an embedded case editor EFICAS [#]_ will be opened,
-along with the standard "*Object browser*". You can then click on the "*New*"
-button |eficas_new| (or choose the "*New*" entry in the "*ADAO*" main menu) to
-create a new ADAO case, and you will see:
-
- .. _adao_viewer:
- .. image:: images/adao_viewer.png
- :align: center
- :width: 100%
- .. centered::
- **The EFICAS editor for cases definition in module ADAO**
-
-STEP 2: Build and modify the ADAO case and save it
---------------------------------------------------
-
-To build a case using EFICAS, you have to go through a series of sub-steps, by
-selecting, at each sub-step, a keyword and then filling in its value.
-
-The structured editor indicates hierarchical types, values or keywords allowed.
-Incomplete or incorrect keywords are identified by a visual error red flag.
-Possible values are indicated for keywords defined with a limited list of
-values, and adapted entries are given for the other keywords. Some help messages
-are contextually provided in the editor reserved places.
-
-A new case is set up with the minimal list of commands. All the mandatory
-commands or keywords are already present, none of them can be suppressed.
-Optional keywords can be added by choosing them in a list of suggestions of
-allowed ones for the main command, for example the "*ASSIMILATION_STUDY*"
-command. As an example, one can add an "*AlgorithmParameters*" keyword, as
-described in the last part of the section :ref:`section_examples`.
-
-At the end, when all fields or keywords have been correctly defined, each line
-of the commands tree must have a green flag. This indicates that the whole case
-is valid and completed (and can be saved).
-
- .. _adao_jdcexample00:
- .. image:: images/adao_jdcexample01.png
- :align: center
- :scale: 75%
- .. centered::
- **Example of a valid ADAO case**
-
-Finally, you have to save your ADAO case by pushing the "*Save*" button
-|eficas_save|, or the "*Save as*" button |eficas_saveas|, or by choosing the
-"*Save/Save as*" entry in the "*ADAO*" menu. You will be prompted for a location
-in your file tree and a name, that will be completed by a "*.comm*" extension
-used for JDC EFICAS files. This will generate a pair of files describing the
-ADAO case, with the same base name, the first one being completed by a "*.comm*"
-extension and the second one by a "*.py*" extension [#]_.
-
-STEP 3: Export the ADAO case as a YACS scheme
----------------------------------------------
-
-When the ADAO case is completed, you have to export it as a YACS scheme [#]_ in
-order to execute the data assimilation calculation. This can be easily done by
-using the "*Export to YACS*" button |eficas_yacs|, or equivalently choose the
-"*Export to YACS*" entry in the "*ADAO*" main menu, or in the contextual case
-menu in the object browser.
-
- .. _adao_exporttoyacs01:
- .. image:: images/adao_exporttoyacs.png
- :align: center
- :scale: 75%
- .. centered::
- **"Export to YACS" sub-menu to generate the YACS scheme from the ADAO case**
-
-This will lead to automatically generate a YACS scheme, and open the YACS module
-on this scheme. The YACS file, associated with the scheme, will be stored in the
-same directory and with the same base name as the ADAO saved case, only changing
-its extension to "*.xml*". Be careful, *if the XML file name already exist, it
-will be overwritten without prompting for replacing the file*.
-
-STEP 4: Supplement and modify the YACS scheme and save it
----------------------------------------------------------
-
-.. index:: single: Analysis
-
-When the YACS scheme is generated and opened in SALOME through the YACS module
-GUI, you can modify or supplement the scheme like any YACS scheme. Nodes or
-blocs can be added, copied or modified to elaborate complex analysis, or to
-insert data assimilation or optimization capabilities into more complex YACS
-calculation schemes. It is recommended to save the modified scheme with a new
-name, in order to preserve the XML file in the case you re-export the ADAO case
-to YACS.
-
-The main supplement needed in the YACS scheme is a post-processing step. The
-evaluation of the results has to be done in the physical context of the
-simulation used by the data assimilation procedure. The post-processing can be
-provided through the "*UserPostAnalysis*" ADAO keyword as a script or a string,
-by templates, or can be build as YACS nodes using all SALOME possibilities.
-
-The YACS scheme has an "*algoResults*" output port of the computation bloc,
-which gives access to a "*pyobj*" named hereafter "*ADD*", containing all the
-processing results. These results can be obtained by retrieving the named
-variables stored along the calculation. The main is the "*Analysis*" one, that
-can be obtained by the python command (for example in an in-line script node or
-a script provided through the "*UserPostAnalysis*" keyword)::
-
- Analysis = ADD.get("Analysis")[:]
-
-"*Analysis*" is a complex object, similar to a list of values calculated at each
-step of data assimilation calculation. In order to get and print the optimal
-data assimilation state evaluation, in script provided through the
-"*UserPostAnalysis*" keyword, one can use::
-
- Xa = ADD.get("Analysis")[-1]
- print "Optimal state:", Xa
- print
-
-This ``Xa`` is a vector of values, that represents the solution of the data
-assimilation or optimization evaluation problem, noted as :math:`\mathbf{x}^a`
-in the section :ref:`section_theory`.
-
-Such command can be used to print results, or to convert these ones to
-structures that can be used in the native or external SALOME post-processing. A
-simple example is given in the section :ref:`section_examples`.
-
-STEP 5: Execute the YACS case and obtain the results
-----------------------------------------------------
-
-The YACS scheme is now complete and can be executed. Parametrization and
-execution of a YACS case is fully compliant with the standard way to deal with a
-YACS scheme, and is described in the *YACS module User's Guide*.
-
-To recall the simplest way to proceed, the YACS scheme has to be compiled using
-the button |yacs_compile|, or the equivalent YACS menu entry, to prepare the
-scheme to run. Then the compiled scheme can be started, executed step by step or
-using breakpoints, etc.
-
-The standard output will be pushed into the "*YACS Container Log*", obtained
-through the right click menu of the "*proc*" window in the YACS GUI. The errors
-are shown either in the "*YACS Container Log*", or at the command line in the
-shell window (if SALOME has been launched by its explicit command and not by
-menu). As an example, the output of the above simple case is the following::
-
- Entering in the assimilation study
- Name is set to........: Test
- Algorithm is set to...: Blue
- Launching the analyse
-
- Optimal state: [0.5, 0.5, 0.5]
-
-shown in the "*YACS Container Log*".
-
-The execution can also be done using a shell script, as described in the section
-:ref:`section_advanced`.
-
-.. [#] For more information on EFICAS, see the *EFICAS module* available in SALOME GUI.
-
-.. [#] For more information on YACS, see the *YACS module User's Guide* available in the main "*Help*" menu of SALOME GUI.
-
-.. [#] This intermediary python file can also be used as described in the section :ref:`section_advanced`.
</section>
<section name="adao_help">
<parameter name="sub_menu" value="%1 module"/>
- <parameter name="User's Guide" value="${ADAO_ROOT_DIR}/share/doc/salome/gui/ADAO/index.html"/>
+ <parameter name="User's Guide" value="${ADAO_ROOT_DIR}/share/doc/salome/gui/ADAO/en/index.html"/>
</section>
</document>