From: Jean-Philippe ARGAUD Date: Fri, 16 Mar 2012 12:24:34 +0000 (+0100) Subject: Improvements of the documentation X-Git-Tag: V6_5_0~22 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=d58ddedd901157d5ba84a580de066d4130056abe;p=modules%2Fadao.git Improvements of the documentation --- diff --git a/doc/conf.py b/doc/conf.py index 28180cf..e7d68ef 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2010-2011 EDF R&D +# Copyright (C) 2008-2012 EDF R&D # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -58,16 +58,16 @@ master_doc = 'index' # General information about the project. project = u'ADAO' -copyright = u'2008-2011, EDF R&D, J.-P. Argaud' +copyright = u'2008-2012, Jean-Philippe ARGAUD' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '6.4.0' +version = '6.5.0a2' # The full version, including alpha/beta/rc tags. -release = '6.4.0' +release = '6.5.0a2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -193,7 +193,7 @@ latex_font_size = '10pt' # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'ADAO.tex', u'ADAO Documentation', - u'Jean-Philippe Argaud', 'manual'), + u'Jean-Philippe ARGAUD', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -214,3 +214,105 @@ latex_documents = [ #latex_use_modindex = True source_encoding = 'iso-8859-15' + +# -- Options for Epub output --------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = u'ADAO' +epub_author = u'Jean-Philippe ARGAUD' +epub_publisher = u'Jean-Philippe ARGAUD' +epub_copyright = u'2008-2012, Jean-Philippe ARGAUD' + +# The language of the text. It defaults to the language option +# or en if the language is not set. +#epub_language = '' + +# The scheme of the identifier. Typical schemes are ISBN or URL. +#epub_scheme = '' + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +#epub_identifier = '' + +# A unique identification for the text. +#epub_uid = '' + +# HTML files that should be inserted before the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_pre_files = [] + +# HTML files shat should be inserted after the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_post_files = [] + +# A list of files that should not be packed into the epub file. +#epub_exclude_files = [] + +# The depth of the table of contents in toc.ncx. +#epub_tocdepth = 3 + +# Allow duplicate toc entries. +#epub_tocdup = True + +# -- Options for PDF output -------------------------------------------------- +# Grouping the document tree into PDF files. List of tuples +# (source start file, target name, title, author, options). +# +# If there is more than one author, separate them with \\. +# For example: r'Guido van Rossum\\Fred L. Drake, Jr., editor' +# +# The options element is a dictionary that lets you override +# this config per-document. +# For example, +# ('index', u'MyProject', u'My Project', u'Author Name', +# dict(pdf_compressed = True)) +# would mean that specific document would be compressed +# regardless of the global pdf_compressed setting. +pdf_documents = [ + ('contents', u'ADAO', u'ADAO', u'Jean-Philippe ARGAUD'), +] +# A comma-separated list of custom stylesheets. Example: +pdf_stylesheets = ['sphinx','kerning','a4'] +# Create a compressed PDF +# Use True/False or 1/0 +# Example: compressed=True +#pdf_compressed = False +# A colon-separated list of folders to search for fonts. Example: +# pdf_font_path = ['/usr/share/fonts', '/usr/share/texmf-dist/fonts/'] +# Language to be used for hyphenation support +#pdf_language = "en_US" +# Mode for literal blocks wider than the frame. Can be +# overflow, shrink or truncate +#pdf_fit_mode = "shrink" +# Section level that forces a break page. +# For example: 1 means top-level sections start in a new page +# 0 means disabled +#pdf_break_level = 0 +# When a section starts in a new page, force it to be 'even', 'odd', +# or just use 'any' +#pdf_breakside = 'any' +# Insert footnotes where they are defined instead of +# at the end. +#pdf_inline_footnotes = True +# verbosity level. 0 1 or 2 +#pdf_verbosity = 0 +# If false, no index is generated. +#pdf_use_index = True +# If false, no modindex is generated. +#pdf_use_modindex = True +# If false, no coverpage is generated. +#pdf_use_coverpage = True +# Name of the cover page template to use +#pdf_cover_template = 'sphinxcover.tmpl' +# Documents to append as an appendix to all manuals. +#pdf_appendices = [] +# Enable experimental feature to split table cells. Use it +# if you get "DelayedTable too big" errors +#pdf_splittables = False +# Set the default DPI for images +#pdf_default_dpi = 72 +# Enable rst2pdf extension modules (default is empty list) +# you need vectorpdf for better sphinx's graphviz support +#pdf_extensions = ['vectorpdf'] +# Page template name for "regular" pages +#pdf_page_template = 'cutePage' diff --git a/doc/examples.rst b/doc/examples.rst index 6982776..882a48e 100644 --- a/doc/examples.rst +++ b/doc/examples.rst @@ -266,37 +266,14 @@ Adding parameters to control the data assimilation algorithm One can add some optional parameters to control the data assimilation algorithm calculation. This is done by using the "*AlgorithmParameters*" keyword in the definition of the ADAO case, which is an keyword of the ASSIMILATION_STUDY. This -keyword requires a Python dictionary, containing some key/value pairs. - -For example, with a 3DVAR algorithm, the possible keys are "*Minimizer*", -"*MaximumNumberOfSteps*", "*ProjectedGradientTolerance*", -"*GradientNormTolerance*" and "*Bounds*": - -#. The "*Minimizer*" key allows to choose the optimization minimizer. The - default choice is "LBFGSB", and the possible ones are "LBFGSB" (nonlinear - constrained minimizer, see [Byrd95] and [Zhu97]), "TNC" (nonlinear - constrained minimizer), "CG" (nonlinear unconstrained minimizer), "BFGS" - (nonlinear unconstrained minimizer), "NCG" (Newton CG minimizer). -#. The "*MaximumNumberOfSteps*" key indicates the maximum number of iterations - allowed for iterative optimization. The default is 15000, which very - similar of no limit on iterations. It is then recommended to adapt this - parameter to the needs on real problems. -#. The "*ProjectedGradientTolerance*" key indicates a limit value, leading to - stop successfully the iterative optimization process when all the - components of the projected gradient are under this limit. -#. The "*GradientNormTolerance*" key indicates a limit value, leading to stop - successfully the iterative optimization process when the norm of the - gradient is under this limit. -#. The "*Bounds*" key allows to define upper and lower bounds for every - control variable being optimized. Bounds can be given by a list of list of - pairs of lower/upper bounds for each variable, with possibly ``None`` every - time there is no bound. The bounds can always be specified, but they are - taken into account only by the constrained minimizers. +keyword requires a Python dictionary, containing some key/value pairs. The list +of possible optional parameters are given in the subsection +:ref:`subsection_algo_options`. If no bounds at all are required on the control variables, then one can choose the "BFGS" or "CG" minimisation algorithm for the 3DVAR algorithm. For constrained optimization, the minimizer "LBFGSB" is often more robust, but the -"TNC" is always more performant. +"TNC" is sometimes more performant. This dictionary has to be defined, for example, in an external Python script file, using the mandatory variable name "*AlgorithmParameters*" for the diff --git a/doc/index.rst b/doc/index.rst index d2d922c..0a23529 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -45,3 +45,4 @@ Indices and tables * :ref:`genindex` * :ref:`search` +* :ref:`section_glossary` diff --git a/doc/theory.rst b/doc/theory.rst index fe4f324..3833ccf 100644 --- a/doc/theory.rst +++ b/doc/theory.rst @@ -4,6 +4,12 @@ A brief introduction to Data Assimilation ================================================================================ +.. index:: single: Data Assimilation +.. index:: single: true state +.. index:: single: observation +.. index:: single: a priori + + **Data Assimilation** is a general framework for computing the optimal estimate of the true state of a system, over time if necessary. It uses values obtained both from observations and *a priori* models, including information about their @@ -26,6 +32,8 @@ references allow `Going further in the data assimilation framework`_. Fields reconstruction or measures interpolation ----------------------------------------------- +.. index:: single: parameters identification + Fields reconstruction consists in finding, from a restricted set of real measures, the physical field which is the most *consistent* with these measures. @@ -54,6 +62,8 @@ time steps. Parameters identification or calibration ---------------------------------------- +.. index:: single: fields reconstruction + The identification of parameters by data assimilation is a form of calibration which uses both the measurement and an *a priori* estimation (called the "*background*") of the state that one seeks to identify, as well as a @@ -75,6 +85,11 @@ function. Simple description of the data assimilation framework ----------------------------------------------------- +.. index:: single: background +.. index:: single: background error covariances +.. index:: single: observation error covariances +.. index:: single: covariances + We can write these features in a simple manner. By default, all variables are vectors, as there are several parameters to readjust. @@ -104,7 +119,7 @@ that: The errors represented here are not only those from observation, but also from the simulation. We can always consider that these errors are of zero mean. We -can then define a matrix :math:`\mathbf{R}` of the observation error covariance +can then define a matrix :math:`\mathbf{R}` of the observation error covariances by: .. math:: \mathbf{R} = E[\mathbf{\epsilon}^o.{\mathbf{\epsilon}^o}^T] diff --git a/doc/using.rst b/doc/using.rst index 964923e..d1e40e6 100644 --- a/doc/using.rst +++ b/doc/using.rst @@ -205,10 +205,13 @@ Reference description of the commands and keywords available through the GUI Each command or keyword to be defined through the ADAO GUI has some properties. The first property is to be a required command, an optional command or a keyword -describing a type. The second property is to be an "open" variable with a fixed -type but with any value allowed by the type, or a "restricted" variable, limited -to some specified values. The mathematical notations are explained in the -section :ref:`section_theory`. +describing a type of input. The second property is to be an "open" variable with +a fixed type but with any value allowed by the type, or a "restricted" variable, +limited to some specified values. The mathematical notations used afterwards are +explained in the section :ref:`section_theory`. + +List of possible input types +++++++++++++++++++++++++++++ The different type-style commands are: @@ -235,6 +238,9 @@ The different type-style commands are: *Type of an input*. This indicates a variable that has to be filled by a vector, usually given either as a string or as a script. +List of commands +++++++++++++++++ + The different commands are the following: :ASSIM_STUDY: @@ -244,12 +250,14 @@ The different commands are the following: :Algorithm: *Required command*. This is a string to indicates the data assimilation algorithm chosen. The choices are limited and available through the GUI. - There exists for example: "3DVAR", "Blue", "EnsembleBlue", "KalmanFilter". + There exists for example: "3DVAR", "Blue"... See below the list of + algorithms and associated parameters. :AlgorithmParameters: *Optional command*. This command allows to add some optional parameters to control the data assimilation algorithm calculation. It is defined as a - "*Dict*" type object. + "*Dict*" type object. See below the list of algorithms and associated + parameters. :Background: *Required command*. This indicates the backgroud vector used for data @@ -312,6 +320,101 @@ The different commands are the following: a script or a string, allowing to put simple code directly inside the ADAO case. +.. _subsection_algo_options: + +List of possible options for the algorithms ++++++++++++++++++++++++++++++++++++++++++++ + +Each algorithm can be controled using some generic or specific options given +throught the "*AlgorithmParameters*" optional command, as follows:: + + AlgorithmParameters = { + "Minimizer" : "CG", + "MaximumNumberOfSteps" : 10, + } + +This section describes the available options by algorithm. If an option is +specified for an algorithm that doesn't support it, the option is simply left +unused. + +:"Blue": + no option + +:"LinearLeastSquares": + no option + +:"3DVAR": + + :Minimizer: + This key allows to choose the optimization minimizer. The default choice + is "LBFGSB", and the possible ones are "LBFGSB" (nonlinear constrained + minimizer, see [Byrd95] and [Zhu97]), "TNC" (nonlinear constrained + minimizer), "CG" (nonlinear unconstrained minimizer), "BFGS" (nonlinear + unconstrained minimizer), "NCG" (Newton CG minimizer). + + :Bounds: + This key allows to define upper and lower bounds for every control + variable being optimized. Bounds can be given by a list of list of pairs + of lower/upper bounds for each variable, with possibly ``None`` every time + there is no bound. The bounds can always be specified, but they are taken + into account only by the constrained minimizers. + + :MaximumNumberOfSteps: + This key indicates the maximum number of iterations allowed for iterative + optimization. The default is 15000, which very similar to no limit on + iterations. It is then recommended to adapt this parameter to the needs on + real problems. For some algorithms, the effective stopping step can be + slightly different due to algorihtm internal control requirements. + + :ProjectedGradientTolerance: + This key indicates a limit value, leading to stop successfully the + iterative optimization process when all the components of the projected + gradient are under this limit. It is only used for constrained algorithms. + + :GradientNormTolerance: + This key indicates a limit value, leading to stop successfully the + iterative optimization process when the norm of the gradient is under this + limit. It is only used for non-constrained algorithms. + +:"NonLinearLeastSquares": + + :Minimizer: + This key allows to choose the optimization minimizer. The default choice + is "LBFGSB", and the possible ones are "LBFGSB" (nonlinear constrained + minimizer, see [Byrd95] and [Zhu97]), "TNC" (nonlinear constrained + minimizer), "CG" (nonlinear unconstrained minimizer), "BFGS" (nonlinear + unconstrained minimizer), "NCG" (Newton CG minimizer). + + :Bounds: + This key allows to define upper and lower bounds for every control + variable being optimized. Bounds can be given by a list of list of pairs + of lower/upper bounds for each variable, with possibly ``None`` every time + there is no bound. The bounds can always be specified, but they are taken + into account only by the constrained minimizers. + + :MaximumNumberOfSteps: + This key indicates the maximum number of iterations allowed for iterative + optimization. The default is 15000, which very similar to no limit on + iterations. It is then recommended to adapt this parameter to the needs on + real problems. For some algorithms, the effective stopping step can be + slightly different due to algorihtm internal control requirements. + + :ProjectedGradientTolerance: + This key indicates a limit value, leading to stop successfully the + iterative optimization process when all the components of the projected + gradient are under this limit. It is only used for constrained algorithms. + + :GradientNormTolerance: + This key indicates a limit value, leading to stop successfully the + iterative optimization process when the norm of the gradient is under this + limit. It is only used for non-constrained algorithms. + +:"EnsembleBlue": + no option + +:"KalmanFilter": + no option + Examples of using these commands are available in the section :ref:`section_examples` and in example files installed with ADAO module.