From c9babdbe03385d9a6a0f89f725c7046ea379d74b Mon Sep 17 00:00:00 2001 From: Jean-Philippe ARGAUD Date: Fri, 23 Sep 2022 14:28:22 +0200 Subject: [PATCH] Code and documentation update --- bin/AdaoCatalogGenerator.py | 9 +- bin/module_version.py | 2 +- doc/en/advanced.rst | 26 ++- doc/en/bibliography.rst | 14 +- doc/en/conf.py | 3 + doc/en/examples.rst | 64 ++++++ doc/en/index.rst | 32 +-- doc/en/ref_algorithm_3DVAR.rst | 16 +- doc/en/ref_algorithm_4DVAR.rst | 1 + doc/en/ref_algorithm_AdjointTest.rst | 40 +++- doc/en/ref_algorithm_Blue.rst | 1 + ...f_algorithm_DerivativeFreeOptimization.rst | 1 + .../ref_algorithm_DifferentialEvolution.rst | 1 + doc/en/ref_algorithm_EnsembleBlue.rst | 1 + doc/en/ref_algorithm_EnsembleKalmanFilter.rst | 21 +- doc/en/ref_algorithm_ExtendedBlue.rst | 1 + doc/en/ref_algorithm_ExtendedKalmanFilter.rst | 1 + doc/en/ref_algorithm_FunctionTest.rst | 1 + doc/en/ref_algorithm_GradientTest.rst | 1 + doc/en/ref_algorithm_KalmanFilter.rst | 1 + doc/en/ref_algorithm_LinearLeastSquares.rst | 1 + doc/en/ref_algorithm_LinearityTest.rst | 1 + doc/en/ref_algorithm_LocalSensitivityTest.rst | 1 + ...thm_MeasurementsOptimalPositioningTask.rst | 109 ++++++++++ .../ref_algorithm_NonLinearLeastSquares.rst | 1 + doc/en/ref_algorithm_ParallelFunctionTest.rst | 1 + ...ef_algorithm_ParticleSwarmOptimization.rst | 1 + doc/en/ref_algorithm_QuantileRegression.rst | 1 + doc/en/ref_algorithm_SamplingTest.rst | 1 + doc/en/ref_algorithm_TabuSearch.rst | 1 + doc/en/ref_algorithm_TangentTest.rst | 1 + .../ref_algorithm_UnscentedKalmanFilter.rst | 1 + doc/en/ref_assimilation_keywords.rst | 5 +- doc/en/ref_checking_keywords.rst | 6 +- doc/en/ref_task_keywords.rst | 45 ++++ doc/en/reference.rst | 20 ++ doc/en/scripts/simple_AdjointTest.py | 17 ++ doc/en/scripts/simple_AdjointTest.res | 37 ++++ doc/en/scripts/simple_AdjointTest.rst | 14 ++ doc/en/scripts/simple_FunctionTest.res | 10 +- .../scripts/simple_ParallelFunctionTest.res | 10 +- doc/en/snippets/AlgorithmParameters.rst | 11 +- doc/en/snippets/Analysis.rst | 2 +- doc/en/snippets/EnsembleOfSnapshots.rst | 12 ++ doc/en/snippets/ErrorNorm.rst | 9 + doc/en/snippets/ErrorNormTolerance.rst | 12 ++ doc/en/snippets/ExcludeLocations.rst | 11 + doc/en/snippets/Header2Algo03AdOp.rst | 2 +- doc/en/snippets/Header2Algo03Chck.rst | 2 +- doc/en/snippets/Header2Algo03Task.rst | 8 + doc/en/snippets/Header2Algo09.rst | 4 +- .../snippets/HybridCostDecrementTolerance.rst | 13 ++ .../snippets/HybridCovarianceEquilibrium.rst | 9 + .../HybridMaximumNumberOfIterations.rst | 14 ++ doc/en/snippets/MaximumNumberOfLocations.rst | 11 + doc/en/snippets/OptimalPoints.rst | 9 + doc/en/snippets/ReducedBasis.rst | 10 + doc/en/snippets/Residu.rst | 2 +- doc/en/snippets/Residus.rst | 8 + doc/en/snippets/Variant_EnKF.rst | 13 +- doc/en/tutorials_in_python.rst | 7 +- doc/en/tutorials_in_salome.rst | 2 +- doc/fr/advanced.rst | 33 +-- doc/fr/bibliography.rst | 14 +- doc/fr/conf.py | 3 + doc/fr/examples.rst | 65 ++++++ doc/fr/index.rst | 41 ++-- doc/fr/ref_algorithm_3DVAR.rst | 9 +- doc/fr/ref_algorithm_4DVAR.rst | 1 + doc/fr/ref_algorithm_AdjointTest.rst | 15 ++ doc/fr/ref_algorithm_Blue.rst | 2 + ...f_algorithm_DerivativeFreeOptimization.rst | 1 + .../ref_algorithm_DifferentialEvolution.rst | 1 + doc/fr/ref_algorithm_EnsembleBlue.rst | 1 + doc/fr/ref_algorithm_EnsembleKalmanFilter.rst | 23 ++- doc/fr/ref_algorithm_ExtendedBlue.rst | 2 + doc/fr/ref_algorithm_ExtendedKalmanFilter.rst | 1 + doc/fr/ref_algorithm_FunctionTest.rst | 2 + doc/fr/ref_algorithm_GradientTest.rst | 1 + doc/fr/ref_algorithm_KalmanFilter.rst | 3 + doc/fr/ref_algorithm_LinearLeastSquares.rst | 1 + doc/fr/ref_algorithm_LinearityTest.rst | 1 + doc/fr/ref_algorithm_LocalSensitivityTest.rst | 1 + ...thm_MeasurementsOptimalPositioningTask.rst | 111 ++++++++++ .../ref_algorithm_NonLinearLeastSquares.rst | 2 + doc/fr/ref_algorithm_ParallelFunctionTest.rst | 2 + ...ef_algorithm_ParticleSwarmOptimization.rst | 1 + doc/fr/ref_algorithm_QuantileRegression.rst | 1 + doc/fr/ref_algorithm_SamplingTest.rst | 1 + doc/fr/ref_algorithm_TabuSearch.rst | 1 + doc/fr/ref_algorithm_TangentTest.rst | 1 + .../ref_algorithm_UnscentedKalmanFilter.rst | 1 + doc/fr/ref_assimilation_keywords.rst | 4 +- doc/fr/ref_checking_keywords.rst | 8 +- doc/fr/ref_task_keywords.rst | 46 +++++ doc/fr/reference.rst | 22 ++ doc/fr/scripts/simple_AdjointTest.py | 17 ++ doc/fr/scripts/simple_AdjointTest.res | 37 ++++ doc/fr/scripts/simple_AdjointTest.rst | 15 ++ doc/fr/scripts/simple_Blue.py | 8 +- doc/fr/scripts/simple_ExtendedBlue.py | 8 +- doc/fr/scripts/simple_FunctionTest.py | 2 +- doc/fr/scripts/simple_FunctionTest.res | 10 +- doc/fr/scripts/simple_FunctionTest.rst | 4 +- .../scripts/simple_ParallelFunctionTest.res | 10 +- doc/fr/snippets/AlgorithmParameters.rst | 16 +- doc/fr/snippets/CostDecrementTolerance.rst | 6 +- doc/fr/snippets/EnsembleOfSnapshots.rst | 13 ++ doc/fr/snippets/ErrorNorm.rst | 9 + doc/fr/snippets/ErrorNormTolerance.rst | 13 ++ doc/fr/snippets/ExcludeLocations.rst | 11 + doc/fr/snippets/Header2Algo03AdOp.rst | 2 +- doc/fr/snippets/Header2Algo03Chck.rst | 2 +- doc/fr/snippets/Header2Algo03Task.rst | 9 + doc/fr/snippets/Header2Algo09.rst | 4 +- .../snippets/HybridCostDecrementTolerance.rst | 13 ++ .../snippets/HybridCovarianceEquilibrium.rst | 10 + .../HybridMaximumNumberOfIterations.rst | 15 ++ doc/fr/snippets/MaximumNumberOfLocations.rst | 12 ++ doc/fr/snippets/OptimalPoints.rst | 10 + doc/fr/snippets/ReducedBasis.rst | 10 + doc/fr/snippets/Residu.rst | 2 +- doc/fr/snippets/Residus.rst | 9 + doc/fr/snippets/Variant_EnKF.rst | 13 +- doc/fr/tutorials_in_python.rst | 14 +- src/daComposant/daAlgorithms/3DVAR.py | 2 +- src/daComposant/daAlgorithms/4DVAR.py | 2 +- src/daComposant/daAlgorithms/AdjointTest.py | 94 +++++---- src/daComposant/daAlgorithms/Atoms/ecweim.py | 193 ++++++++++++++++++ src/daComposant/daAlgorithms/EnsembleBlue.py | 1 + .../daAlgorithms/EnsembleKalmanFilter.py | 3 +- src/daComposant/daAlgorithms/FunctionTest.py | 71 ++++--- .../MeasurementsOptimalPositioningTask.py | 111 ++++++++++ .../daAlgorithms/ParallelFunctionTest.py | 71 ++++--- src/daComposant/daAlgorithms/SamplingTest.py | 43 +--- .../daAlgorithms/UnscentedKalmanFilter.py | 1 + src/daComposant/daCore/BasicObjects.py | 33 +-- src/daComposant/daCore/Interfaces.py | 18 +- src/daComposant/daCore/NumericObjects.py | 52 ++++- src/daComposant/daCore/version.py | 2 +- .../daYacsSchemaCreator/infos_daComposant.py | 1 + 141 files changed, 1779 insertions(+), 337 deletions(-) create mode 100644 doc/en/examples.rst create mode 100644 doc/en/ref_algorithm_MeasurementsOptimalPositioningTask.rst create mode 100644 doc/en/ref_task_keywords.rst create mode 100644 doc/en/scripts/simple_AdjointTest.py create mode 100644 doc/en/scripts/simple_AdjointTest.res create mode 100644 doc/en/scripts/simple_AdjointTest.rst create mode 100644 doc/en/snippets/EnsembleOfSnapshots.rst create mode 100644 doc/en/snippets/ErrorNorm.rst create mode 100644 doc/en/snippets/ErrorNormTolerance.rst create mode 100644 doc/en/snippets/ExcludeLocations.rst create mode 100644 doc/en/snippets/Header2Algo03Task.rst create mode 100644 doc/en/snippets/HybridCostDecrementTolerance.rst create mode 100644 doc/en/snippets/HybridCovarianceEquilibrium.rst create mode 100644 doc/en/snippets/HybridMaximumNumberOfIterations.rst create mode 100644 doc/en/snippets/MaximumNumberOfLocations.rst create mode 100644 doc/en/snippets/OptimalPoints.rst create mode 100644 doc/en/snippets/ReducedBasis.rst create mode 100644 doc/en/snippets/Residus.rst create mode 100644 doc/fr/examples.rst create mode 100644 doc/fr/ref_algorithm_MeasurementsOptimalPositioningTask.rst create mode 100644 doc/fr/ref_task_keywords.rst create mode 100644 doc/fr/scripts/simple_AdjointTest.py create mode 100644 doc/fr/scripts/simple_AdjointTest.res create mode 100644 doc/fr/scripts/simple_AdjointTest.rst create mode 100644 doc/fr/snippets/EnsembleOfSnapshots.rst create mode 100644 doc/fr/snippets/ErrorNorm.rst create mode 100644 doc/fr/snippets/ErrorNormTolerance.rst create mode 100644 doc/fr/snippets/ExcludeLocations.rst create mode 100644 doc/fr/snippets/Header2Algo03Task.rst create mode 100644 doc/fr/snippets/HybridCostDecrementTolerance.rst create mode 100644 doc/fr/snippets/HybridCovarianceEquilibrium.rst create mode 100644 doc/fr/snippets/HybridMaximumNumberOfIterations.rst create mode 100644 doc/fr/snippets/MaximumNumberOfLocations.rst create mode 100644 doc/fr/snippets/OptimalPoints.rst create mode 100644 doc/fr/snippets/ReducedBasis.rst create mode 100644 doc/fr/snippets/Residus.rst create mode 100644 src/daComposant/daAlgorithms/Atoms/ecweim.py create mode 100644 src/daComposant/daAlgorithms/MeasurementsOptimalPositioningTask.py diff --git a/bin/AdaoCatalogGenerator.py b/bin/AdaoCatalogGenerator.py index 2869d62..2c25a63 100644 --- a/bin/AdaoCatalogGenerator.py +++ b/bin/AdaoCatalogGenerator.py @@ -428,7 +428,7 @@ algos_names = "" optim_names = "" reduc_names = "" check_names = "" -decl_algos = "" +task_names = "" adao_all_names = "" assim_study_object = daCore.Aidsm.Aidsm() algos_list = assim_study_object._Aidsm__get_available_algorithms() @@ -446,7 +446,10 @@ for algo_name in algos_list: if algo_name in infos.CheckAlgos: logging.debug("A checking algorithm is found: " + algo_name) check_names += "\"" + algo_name + "\", " - if algo_name in infos.AssimAlgos+infos.OptimizationAlgos+infos.ReductionAlgos+infos.CheckAlgos: + if algo_name in infos.TaskAlgos: + logging.debug("A task algorithm is found: " + algo_name) + task_names += "\"" + algo_name + "\", " + if algo_name in infos.AssimAlgos+infos.OptimizationAlgos+infos.ReductionAlgos+infos.CheckAlgos+infos.TaskAlgos: # Pour filtrer sur les algorithmes vraiment interfacés, car il peut y en avoir moins que "algos_list" adao_all_names += "\"" + algo_name + "\", " @@ -585,7 +588,7 @@ mem_file.write(unicode(assim_study, 'utf-8').format(**{ 'optim_names':optim_names, 'reduc_names':reduc_names, 'check_names':check_names, - 'decl_algos':decl_algos, + 'task_names':task_names, })) # Final step: On écrit le fichier diff --git a/bin/module_version.py b/bin/module_version.py index 87b8017..7a5b255 100644 --- a/bin/module_version.py +++ b/bin/module_version.py @@ -31,7 +31,7 @@ __all__ = [] name = "ADAO" version = "9.10.0" year = "2022" -date = "lundi 12 décembre 2022, 12:12:12 (UTC+0100)" +date = "lundi 14 novembre 2022, 12:12:12 (UTC+0100)" longname = name + ", a module for Data Assimilation and Optimization" cata = "V" + version.replace(".","_") diff --git a/doc/en/advanced.rst b/doc/en/advanced.rst index 8be42b1..b96cb8d 100644 --- a/doc/en/advanced.rst +++ b/doc/en/advanced.rst @@ -33,6 +33,8 @@ interface (GUI). It requires to know how to find files or commands included inside the whole SALOME installation. All the names to be replaced by user are indicated by the syntax ``<...>``. +.. _section_advanced_convert_JDC: + Converting and executing an ADAO command file (JDC) using a Shell script ------------------------------------------------------------------------ @@ -128,6 +130,8 @@ following Shell script:: In all cases, the standard output and errors come in the launching terminal. +.. _section_advanced_YACS_tui: + Running an ADAO calculation scheme in YACS using the text user mode (YACS TUI) ------------------------------------------------------------------------------ @@ -189,12 +193,14 @@ Running an ADAO calculation in R environment using the TUI ADAO interface .. index:: single: R .. index:: single: rPython +.. index:: single: reticulate To extend the analysis and treatment capacities, it is possible to use ADAO calculations in **R** environment (see [R]_ for more details). It is available in SALOME by launching the R interpreter in the shell "``salome shell``". -Moreover, the package "*rPython*" has to be available, it can be installed by -the user if required by the following R command:: +Moreover, the package "*rPython*" (or the more recent "*reticulate*" one) has +to be available, it can be installed by the user if required by the following R +command:: # # IMPORTANT: to be run in R interpreter @@ -304,18 +310,20 @@ commands are used:: from adao import adaoBuilder adaoBuilder.Gui() -If necessary, explicit messages can be used to identify the required -environment variables that are missing. However, this command should not be run -in the SALOME Python console (because in this case it is enough to activate the -module since we already are in the graphical environment...) but it can be done -in a "SALOME shell" session obtained from the "Tools/Extensions" menu of -SALOME. As a reminder, the easiest way to get a Python interpreter included in -a "SALOME shell" session is to run the following command in a terminal:: +As a reminder, the easiest way to get a Python interpreter included in a +"SALOME shell" session is to run the following command in a terminal:: $SALOMEDIR/salome shell -- python with ``SALOMEDIR`` the ````. +If necessary, explicit messages can be used to identify the required +environment variables that are missing. However, **this command should not be +run in the SALOME Python console** (because in this case it is enough to +activate the module since we already are in the graphical environment...) or in +an independant Python install, but it can be run in a "SALOME shell" session +obtained from the "Tools/Extensions" menu of SALOME. + .. _section_advanced_execution_mode: Change the default execution mode of nodes in YACS diff --git a/doc/en/bibliography.rst b/doc/en/bibliography.rst index 870785f..62c23f2 100644 --- a/doc/en/bibliography.rst +++ b/doc/en/bibliography.rst @@ -39,7 +39,7 @@ Bibliography .. [Buchinsky98] Buchinsky M., *Recent Advances in Quantile Regression Models: A Practical Guidline for Empirical Research*, Journal of Human Resources, 33(1), pp.88-126, 1998 -.. [Burgers98] Burgers G., Van Leuween P. J., Evensen G., *Analysis scheme in the Ensemble Kalman Filter*, Monthly Weather Review, 126, 1719–1724, 1998 +.. [Burgers98] Burgers G., Van Leuween P. J., Evensen G., *Analysis scheme in the Ensemble Kalman Filter*, Monthly Weather Review, 126(6), pp.1719–1724, 1998 .. [Byrd95] Byrd R. H., Lu P., Nocedal J., *A Limited Memory Algorithm for Bound Constrained Optimization*, SIAM Journal on Scientific and Statistical Computing, 16(5), pp.1190-1208, 1995 @@ -49,9 +49,9 @@ Bibliography .. [Cohn98] Cohn S. E., Da Silva A., Guo J., Sienkiewicz M., Lamich D., *Assessing the effects of data selection with the DAO Physical-space Statistical Analysis System*, Monthly Weather Review, 126, pp.2913–2926, 1998 -.. [Courtier94] Courtier P., Thépaut J.-N., Hollingsworth A., *A strategy for operational implementation of 4D-Var, using an incremental approach*, Quarterly Journal of the Royal Meteorological Society, 120, pp.1367–1387, 1994 +.. [Courtier94] Courtier P., Thépaut J.-N., Hollingsworth A., *A strategy for operational implementation of 4D-Var, using an incremental approach*, Quarterly Journal of the Royal Meteorological Society, 120(519), pp.1367–1387, 1994 -.. [Courtier97] Courtier P., *Dual formulation of four-dimensional variational assimilation*, Quarterly Journal of the Royal Meteorological Society, 123, pp.2449-2461, 1997 +.. [Courtier97] Courtier P., *Dual formulation of four-dimensional variational assimilation*, Quarterly Journal of the Royal Meteorological Society, 123(544), pp.2249-2261, 1997 .. [Das11] Das S., Suganthan P. N., *Differential Evolution: A Survey of the State-of-the-art*, IEEE Transactions on Evolutionary Computation, 15(1), pp.4-31, 2011 @@ -69,6 +69,8 @@ Bibliography .. [Glover90] Glover F., *Tabu Search-Part II*, ORSA Journal on Computing, 2(1), pp.4-32, 1990 +.. [Hamill00] Hamill T. M., Snyder C., *A Hybrid Ensemble Kalman Filter-3D Variational Analysis Scheme*, Monthly Weather Review, 128(8), pp.2905-2919, 2000 + .. [Ide97] Ide K., Courtier P., Ghil M., Lorenc A. C., *Unified notation for data assimilation: operational, sequential and variational*, Journal of the Meteorological Society of Japan, 75(1B), pp.181-189, 1997 .. [Jazwinski70] Jazwinski A. H., *Stochastic Processes and Filtering Theory*, Academic Press, 1970 @@ -85,9 +87,9 @@ Bibliography .. [Lions68] Lions J.-L., *Optimal Control of Systems Governed by Partial Differential Equations*, Springer, 1971 -.. [Lorenc86] Lorenc A. C., *Analysis methods for numerical weather prediction*, Quarterly Journal of the Royal Meteorological Society, 112, pp.1177-1194, 1986 +.. [Lorenc86] Lorenc A. C., *Analysis methods for numerical weather prediction*, Quarterly Journal of the Royal Meteorological Society, 112(474), pp.1177-1194, 1986 -.. [Lorenc88] Lorenc A. C., *Optimal nonlinear objective analysis*, Quarterly Journal of the Royal Meteorological Society, 114, pp.205–240, 1988 +.. [Lorenc88] Lorenc A. C., *Optimal nonlinear objective analysis*, Quarterly Journal of the Royal Meteorological Society, 114(479), pp.205–240, 1988 .. [Morales11] Morales J. L., Nocedal J., *L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization*, ACM Transactions on Mathematical Software, 38(1), 2011 @@ -151,5 +153,5 @@ Bibliography .. [Zhu97] Zhu C., Byrd R. H., Nocedal J., *L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization*, ACM Transactions on Mathematical Software, 23(4), pp.550-560, 1997 -.. [Zupanski05] Zupanski M., *Maximum likelihood ensemble filter: Theoretical aspects*, Monthly Weather Review, 133, pp.1710–1726, 2005 +.. [Zupanski05] Zupanski M., *Maximum likelihood ensemble filter: Theoretical aspects*, Monthly Weather Review, 133(6), pp.1710–1726, 2005 diff --git a/doc/en/conf.py b/doc/en/conf.py index d68ba5e..6b41869 100644 --- a/doc/en/conf.py +++ b/doc/en/conf.py @@ -23,6 +23,7 @@ # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D import sys, os, time, sphinx, logging +# logging.getLogger().setLevel(logging.DEBUG) # -- Module version information -------------------------------------------------- @@ -61,8 +62,10 @@ from distutils.version import LooseVersion #, StrictVersion __lv = LooseVersion(sphinx.__version__) if __lv < LooseVersion("1.4.0"): extensions = ['sphinx.ext.pngmath'] + logging.debug('Using "%s" extensions'%(extensions,)) else: extensions = ['sphinx.ext.imgmath'] + logging.debug('Using "%s" extensions'%(extensions,)) try: import sphinx_rtd_theme extensions += ['sphinx_rtd_theme'] diff --git a/doc/en/examples.rst b/doc/en/examples.rst new file mode 100644 index 0000000..3c60dc0 --- /dev/null +++ b/doc/en/examples.rst @@ -0,0 +1,64 @@ +.. + Copyright (C) 2008-2022 EDF R&D + + This file is part of SALOME ADAO module. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com + + Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D + +.. _section_docu_examples: + +================================================================================ +**[DocU]** Examples of ADAO use +================================================================================ + +To make reading or searching easier, this section gathers in a synthetic way +the direct pointers, to the appropriate sections, of simple examples of use of +the module (mainly but not only in TUI). + +These examples are available in the tutorials, in the detailed documentations +of algorithms or applied problems, and in the advanced uses. + +Tutorials +--------- + +#. :ref:`section_tutorials_in_salome` +#. :ref:`section_tutorials_in_python` + +Calculation algorithms uses +--------------------------- + +#. :ref:`Examples with the "3DVAR" algorithm` +#. :ref:`Examples with the "Blue" algorithm` +#. :ref:`Examples with the "ExtendedBlue" algorithm` +#. :ref:`Examples with the "KalmanFilter" algorithm` +#. :ref:`Examples with the "NonLinearLeastSquares" algorithm` + +Checking algorithms uses +------------------------ + +#. :ref:`Examples with the "AdjointTest" check` +#. :ref:`Examples with the "FunctionTest" check` +#. :ref:`Examples with the "ParallelFunctionTest" check` + +Advanced uses +------------- + +#. :ref:`section_advanced_convert_JDC` +#. :ref:`section_advanced_YACS_tui` +#. :ref:`section_advanced_R` diff --git a/doc/en/index.rst b/doc/en/index.rst index 3ceefd8..7716f7a 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -48,27 +48,30 @@ more than one hundred different algorithmic methods and allows the study of about 350 distinct applied problems. The documentation for this module is divided into several major categories, -related to the theoretical documentation (indicated in the section title by -**[DocT]**), to the user documentation (indicated in the section title by -**[DocU]**), and to the reference documentation (indicated in the section title -by **[DocR]**). +related to the **theoretical documentation** (indicated in the section title by +**[DocT]**), to the **user documentation** (indicated in the section title by +**[DocU]**), and to the **reference documentation** (indicated in the section +title by **[DocR]**). The first part is the :ref:`section_intro`. The second part introduces :ref:`section_theory`, and their concepts, and the next part describes the -:ref:`section_methodology`. For a standard user, the next parts describe -examples on ADAO usage as :ref:`section_tutorials_in_salome` or -:ref:`section_tutorials_in_python`, then indicates the :ref:`section_advanced`, -with how to obtain additional information or how to use non-GUI command -execution scripting. Users interested in quick use of the module can stop -before reading the rest, but a valuable use of the module requires to read and -come back regularly to these parts. The following parts describe +:ref:`section_methodology`. For a standard user, the next parts describe some +:ref:`section_docu_examples`, quickly accessible by the collection of pointers +to the subsections. Didactic presentations are detailed in +:ref:`section_tutorials_in_salome` or :ref:`section_tutorials_in_python`, +supplemented by information on the :ref:`section_advanced` with how to obtain +additional information or how to use non-GUI command execution scripting. + +Users interested in quick access to the module can stop before reading the +remaining parts, but a valuable use of the module requires reading and +returning to these parts regularly. The following parts describe :ref:`section_gui_in_salome` and :ref:`section_tui`. The last main part gives a detailed :ref:`section_reference`, with three essential main sub-parts describing the details of commands and options of the algorithms. A :ref:`section_glossary`, some :ref:`section_notations`, a -:ref:`section_bibliography` and an extensive index are included in -the document. And, to comply with the module requirements, be sure to read the -part :ref:`section_license`. +:ref:`section_bibliography` and an extensive index are included in the +document. And, to comply with the module requirements, be sure to read the part +:ref:`section_license`. .. toctree:: :caption: Table of contents @@ -79,6 +82,7 @@ part :ref:`section_license`. intro theory methodology + examples tutorials_in_salome tutorials_in_python advanced diff --git a/doc/en/ref_algorithm_3DVAR.rst b/doc/en/ref_algorithm_3DVAR.rst index 0d3b290..76d0ea8 100644 --- a/doc/en/ref_algorithm_3DVAR.rst +++ b/doc/en/ref_algorithm_3DVAR.rst @@ -61,11 +61,16 @@ This algorithm is naturally written for a single estimate, without any dynamic or iterative notion (there is no need in this case for an incremental evolution operator, nor for an evolution error covariance). In ADAO, it can also be used on a succession of observations, placing the estimate in a recursive framework -partly similar to a :ref:`section_ref_algorithm_KalmanFilter`. A standard -estimate is made at each observation step on the state predicted by the -incremental evolution model, knowing that the state error covariance remains -the background covariance initially provided by the user. To be explicit, -unlike Kalman-type filters, the state error covariance is not updated. +similar to a :ref:`section_ref_algorithm_KalmanFilter`. A standard estimate is +made at each observation step on the state predicted by the incremental +evolution model, knowing that the state error covariance remains the background +covariance initially provided by the user. To be explicit, unlike Kalman-type +filters, the state error covariance is not updated. + +An extension of 3DVAR, coupling a 3DVAR méthod with a Kalman ensemble filter, +allows to improve the estimation of *a posteriori* error covariances. This +extension is obtained by using the "E3DVAR" variant of the filtering algorithm +:ref:`section_ref_algorithm_EnsembleKalmanFilter`. .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst @@ -246,6 +251,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulationQuantiles.rst .. ------------------------------------ .. +.. _section_ref_algorithm_3DVAR_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_3DVAR.rst diff --git a/doc/en/ref_algorithm_4DVAR.rst b/doc/en/ref_algorithm_4DVAR.rst index 9bba847..bf9ece4 100644 --- a/doc/en/ref_algorithm_4DVAR.rst +++ b/doc/en/ref_algorithm_4DVAR.rst @@ -154,6 +154,7 @@ StoreSupplementaryCalculations .. include:: snippets/IndexOfOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_4DVAR_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_3DVAR` diff --git a/doc/en/ref_algorithm_AdjointTest.rst b/doc/en/ref_algorithm_AdjointTest.rst index 0a1dfa1..416da22 100644 --- a/doc/en/ref_algorithm_AdjointTest.rst +++ b/doc/en/ref_algorithm_AdjointTest.rst @@ -30,25 +30,36 @@ Checking algorithm "*AdjointTest*" .. ------------------------------------ .. .. include:: snippets/Header2Algo01.rst -This algorithm allows to check the quality of the adjoint operator, by -calculating a residue with known theoretical properties. +This algorithm allows to check the quality of the adjoint of an operator +:math:`F`, by computing a residue whose theoretical properties are known. The +test is applicable to any operator, of evolution or observation. + +For all formulas, with :math:`\mathbf{x}` the current verification point, we +take :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and +:math:`\mathbf{dx}=\alpha_0*\mathbf{dx}_0` with :math:`\alpha_0` a scaling user +parameter, defaulting to 1. :math:`F` is the computational operator or code +(which is here acquired by the observation operator command +"*ObservationOperator*"). One can observe the following residue, which is the difference of two scalar products: .. math:: R(\alpha) = | < TangentF_x(\mathbf{dx}) , \mathbf{y} > - < \mathbf{dx} , AdjointF_x(\mathbf{y}) > | -that has to remain equal to zero at the calculation precision. One take -:math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and -:math:`\mathbf{dx}=\alpha*\mathbf{dx}_0`. :math:`F` is the calculation code. -:math:`\mathbf{y}` has to be in the image of :math:`F`. If it is not given, one -take :math:`\mathbf{y} = F(\mathbf{x})`. +in which the optional quantity :math:`\mathbf{y}` must be in the image of +:math:`F`. If it is not given, we take its default evaluation :math:`\mathbf{y} += F(\mathbf{x})`. + +This residue must remain constantly equal to zero at the accuracy of the +calculation. .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst .. include:: snippets/CheckingPoint.rst +.. include:: snippets/Observation.rst + .. include:: snippets/ObservationOperator.rst .. ------------------------------------ .. @@ -60,6 +71,8 @@ take :math:`\mathbf{y} = F(\mathbf{x})`. .. include:: snippets/InitialDirection.rst +.. include:: snippets/NumberOfPrintedDigits.rst + .. include:: snippets/SetSeed.rst StoreSupplementaryCalculations @@ -97,6 +110,19 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst +.. ------------------------------------ .. +.. _section_ref_algorithm_AdjointTest_examples: +.. include:: snippets/Header2Algo09.rst + +.. include:: scripts/simple_AdjointTest.rst + +.. literalinclude:: scripts/simple_AdjointTest.py + +.. include:: snippets/Header2Algo10.rst + +.. literalinclude:: scripts/simple_AdjointTest.res + :language: none + .. ------------------------------------ .. .. include:: snippets/Header2Algo06.rst diff --git a/doc/en/ref_algorithm_Blue.rst b/doc/en/ref_algorithm_Blue.rst index cc264af..c30cf68 100644 --- a/doc/en/ref_algorithm_Blue.rst +++ b/doc/en/ref_algorithm_Blue.rst @@ -208,6 +208,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulationQuantiles.rst .. ------------------------------------ .. +.. _section_ref_algorithm_Blue_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_Blue.rst diff --git a/doc/en/ref_algorithm_DerivativeFreeOptimization.rst b/doc/en/ref_algorithm_DerivativeFreeOptimization.rst index 504447f..8797d45 100644 --- a/doc/en/ref_algorithm_DerivativeFreeOptimization.rst +++ b/doc/en/ref_algorithm_DerivativeFreeOptimization.rst @@ -167,6 +167,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_DerivativeFreeOptimization_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_ParticleSwarmOptimization` diff --git a/doc/en/ref_algorithm_DifferentialEvolution.rst b/doc/en/ref_algorithm_DifferentialEvolution.rst index 06acf3f..1489c0d 100644 --- a/doc/en/ref_algorithm_DifferentialEvolution.rst +++ b/doc/en/ref_algorithm_DifferentialEvolution.rst @@ -175,6 +175,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_DifferentialEvolution_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_DerivativeFreeOptimization` diff --git a/doc/en/ref_algorithm_EnsembleBlue.rst b/doc/en/ref_algorithm_EnsembleBlue.rst index 1bf6c6b..7493f39 100644 --- a/doc/en/ref_algorithm_EnsembleBlue.rst +++ b/doc/en/ref_algorithm_EnsembleBlue.rst @@ -110,6 +110,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_EnsembleBlue_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_Blue` diff --git a/doc/en/ref_algorithm_EnsembleKalmanFilter.rst b/doc/en/ref_algorithm_EnsembleKalmanFilter.rst index bbbe1ee..6e0d36a 100644 --- a/doc/en/ref_algorithm_EnsembleKalmanFilter.rst +++ b/doc/en/ref_algorithm_EnsembleKalmanFilter.rst @@ -27,9 +27,6 @@ Calculation algorithm "*EnsembleKalmanFilter*" ---------------------------------------------- -.. ------------------------------------ .. -.. include:: snippets/Header2Algo00.rst - .. ------------------------------------ .. .. include:: snippets/Header2Algo01.rst @@ -64,17 +61,21 @@ robust formulations are proposed here: pair: Variant ; ETKF-N pair: Variant ; MLEF pair: Variant ; IEnKF + pair: Variant ; E3DVAR + pair: Variant ; EnKS - "EnKF" (Ensemble Kalman Filter, see [Evensen94]_), original stochastic algorithm, allowing consistent treatment of non-linear evolution operator, - "ETKF" (Ensemble-Transform Kalman Filter), deterministic EnKF algorithm, allowing treatment of non-linear evolution operator with a lot less members (one recommends to use a number of members on the order of 10 or even sometimes less), - "ETKF-N" (Ensemble-Transform Kalman Filter of finite size N), ETKF algorithm of "finite size N", yhat doesn't need inflation that is often required with the other algorithms, - "MLEF" (Maximum Likelihood Kalman Filter, see [Zupanski05]_), deterministic EnKF algorithm, allowing in addition the consistent treament of non-linear observation operator, - "IEnKF" (Iterative EnKF), deterministic EnKF algorithm, improving treament of operators non-linearities +- "E3DVAR" (EnKF 3DVAR), algorithm coupling ensemble and variational assimilation, which uses in parallel a 3DVAR variational assimilation and an EnKF algorithm to improve the estimation of *a posteriori* error covariances - "EnKS" (Ensemble Kalman Smoother), smoothing algorithm with a fixed time lag L. -Without being a universal recommandation, one recommend to use "EnKF" as a -reference algorithm, and the other algorithms (in this order) as means to -obtain less costly data assimilation with hopefully the same quality. +Without being a universal recommandation, one recommend to use "EnKF" +formulation as a reference algorithm, "ETKF-N" ou "IEnKF" formulation for +robust performance, and the other algorithms (in this order) as means to obtain +a less costly data assimilation with (hopefully) the same quality. .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst @@ -98,6 +99,12 @@ obtain less costly data assimilation with hopefully the same quality. .. include:: snippets/EstimationOf_State.rst +.. include:: snippets/HybridCostDecrementTolerance.rst + +.. include:: snippets/HybridCovarianceEquilibrium.rst + +.. include:: snippets/HybridMaximumNumberOfIterations.rst + .. include:: snippets/InflationFactor.rst .. include:: snippets/InflationType.rst @@ -206,6 +213,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_EnsembleKalmanFilter_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_KalmanFilter` @@ -220,4 +228,5 @@ StoreSupplementaryCalculations - [Bishop01]_ - [Evensen03]_ - [Zupanski05]_ +- [Hamill00]_ - [WikipediaEnKF]_ diff --git a/doc/en/ref_algorithm_ExtendedBlue.rst b/doc/en/ref_algorithm_ExtendedBlue.rst index 8e1a226..4d88bca 100644 --- a/doc/en/ref_algorithm_ExtendedBlue.rst +++ b/doc/en/ref_algorithm_ExtendedBlue.rst @@ -193,6 +193,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulationQuantiles.rst .. ------------------------------------ .. +.. _section_ref_algorithm_ExtendedBlue_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_ExtendedBlue.rst diff --git a/doc/en/ref_algorithm_ExtendedKalmanFilter.rst b/doc/en/ref_algorithm_ExtendedKalmanFilter.rst index 93b9f60..6f8724d 100644 --- a/doc/en/ref_algorithm_ExtendedKalmanFilter.rst +++ b/doc/en/ref_algorithm_ExtendedKalmanFilter.rst @@ -204,6 +204,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_ExtendedKalmanFilter_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_KalmanFilter` diff --git a/doc/en/ref_algorithm_FunctionTest.rst b/doc/en/ref_algorithm_FunctionTest.rst index 71c6e15..b1218a5 100644 --- a/doc/en/ref_algorithm_FunctionTest.rst +++ b/doc/en/ref_algorithm_FunctionTest.rst @@ -93,6 +93,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_FunctionTest_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_FunctionTest.rst diff --git a/doc/en/ref_algorithm_GradientTest.rst b/doc/en/ref_algorithm_GradientTest.rst index 01e66e9..878393b 100644 --- a/doc/en/ref_algorithm_GradientTest.rst +++ b/doc/en/ref_algorithm_GradientTest.rst @@ -146,6 +146,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_GradientTest_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/en/ref_algorithm_KalmanFilter.rst b/doc/en/ref_algorithm_KalmanFilter.rst index e7a73b5..9ca60f1 100644 --- a/doc/en/ref_algorithm_KalmanFilter.rst +++ b/doc/en/ref_algorithm_KalmanFilter.rst @@ -195,6 +195,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_KalmanFilter_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_KalmanFilter1.rst diff --git a/doc/en/ref_algorithm_LinearLeastSquares.rst b/doc/en/ref_algorithm_LinearLeastSquares.rst index 3837a1e..b7db4b3 100644 --- a/doc/en/ref_algorithm_LinearLeastSquares.rst +++ b/doc/en/ref_algorithm_LinearLeastSquares.rst @@ -154,6 +154,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_LinearLeastSquares_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_Blue` diff --git a/doc/en/ref_algorithm_LinearityTest.rst b/doc/en/ref_algorithm_LinearityTest.rst index e3fd85c..4bfebcb 100644 --- a/doc/en/ref_algorithm_LinearityTest.rst +++ b/doc/en/ref_algorithm_LinearityTest.rst @@ -167,6 +167,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_LinearityTest_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/en/ref_algorithm_LocalSensitivityTest.rst b/doc/en/ref_algorithm_LocalSensitivityTest.rst index 747d8d4..e2cd13a 100644 --- a/doc/en/ref_algorithm_LocalSensitivityTest.rst +++ b/doc/en/ref_algorithm_LocalSensitivityTest.rst @@ -97,6 +97,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_LocalSensitivityTest_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/en/ref_algorithm_MeasurementsOptimalPositioningTask.rst b/doc/en/ref_algorithm_MeasurementsOptimalPositioningTask.rst new file mode 100644 index 0000000..1550390 --- /dev/null +++ b/doc/en/ref_algorithm_MeasurementsOptimalPositioningTask.rst @@ -0,0 +1,109 @@ +.. + Copyright (C) 2008-2022 EDF R&D + + This file is part of SALOME ADAO module. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com + + Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D + +.. index:: single: MeasurementsOptimalPositioningTask +.. index:: single: Optimal positioning of measurements +.. index:: single: Measurement locations +.. index:: single: Measurements (Optimal positioning) +.. _section_ref_algorithm_MeasurementsOptimalPositioningTask: + +Task algorithm "*MeasurementsOptimalPositioningTask*" +----------------------------------------------------- + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo00.rst + +.. warning:: + + This algorithm is for now only available in textual user interface (TUI) and not in graphical user interface (GUI). + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo01.rst + +This algorithm provides optimal measurement points by an EIM (Empirical +Interpolation Method) analysis, from a set of state vectors (usually called +"snapshots" in reduced basis methodology). Each of these state vectors is +usually (but not necessarily) the result :math:`\mathbf{y}` of a simulation +:math:`H` for a given set of parameters :math:`\mathbf{x}=\mu`. + +In its simplest use, if the set of state vectors is pre-existing, it is only +necessary to provide it through the algorithm options. + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo02.rst + +*None* + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo03Task.rst + +.. include:: snippets/EnsembleOfSnapshots.rst + +.. include:: snippets/ExcludeLocations.rst + +.. include:: snippets/ErrorNorm.rst + +.. include:: snippets/ErrorNormTolerance.rst + +.. include:: snippets/MaximumNumberOfLocations.rst + +StoreSupplementaryCalculations + .. index:: single: StoreSupplementaryCalculations + + *List of names*. This list indicates the names of the supplementary + variables, that can be available during or at the end of the algorithm, if + they are initially required by the user. Their avalability involves, + potentially, costly calculations or memory consumptions. The default is then + a void list, none of these variables being calculated and stored by default + (excepted the unconditionnal variables). The possible names are in the + following list (the detailed description of each named variable is given in + the following part of this specific algorithmic documentation, in the + sub-section "*Information and variables available at the end of the + algorithm*"): [ + "OptimalPoints", + "ReducedBasis", + "Residus", + ]. + + Example : + ``{"StoreSupplementaryCalculations":["BMA", "CurrentState"]}`` + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo04.rst + +.. include:: snippets/OptimalPoints.rst + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo05.rst + +.. include:: snippets/OptimalPoints.rst + +.. include:: snippets/ReducedBasis.rst + +.. include:: snippets/Residus.rst + +.. ------------------------------------ .. +.. _section_ref_algorithm_MeasurementsOptimalPositioningTask_examples: +.. include:: snippets/Header2Algo06.rst + +- :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/en/ref_algorithm_NonLinearLeastSquares.rst b/doc/en/ref_algorithm_NonLinearLeastSquares.rst index 2f716ef..0a58eaa 100644 --- a/doc/en/ref_algorithm_NonLinearLeastSquares.rst +++ b/doc/en/ref_algorithm_NonLinearLeastSquares.rst @@ -189,6 +189,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_NonLinearLeastSquares_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_NonLinearLeastSquares.rst diff --git a/doc/en/ref_algorithm_ParallelFunctionTest.rst b/doc/en/ref_algorithm_ParallelFunctionTest.rst index dece3a8..0afa30b 100644 --- a/doc/en/ref_algorithm_ParallelFunctionTest.rst +++ b/doc/en/ref_algorithm_ParallelFunctionTest.rst @@ -94,6 +94,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_ParallelFunctionTest_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_ParallelFunctionTest.rst diff --git a/doc/en/ref_algorithm_ParticleSwarmOptimization.rst b/doc/en/ref_algorithm_ParticleSwarmOptimization.rst index 2af5fcb..100d345 100644 --- a/doc/en/ref_algorithm_ParticleSwarmOptimization.rst +++ b/doc/en/ref_algorithm_ParticleSwarmOptimization.rst @@ -147,6 +147,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_ParticleSwarmOptimization_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_DerivativeFreeOptimization` diff --git a/doc/en/ref_algorithm_QuantileRegression.rst b/doc/en/ref_algorithm_QuantileRegression.rst index c2645fc..e0acece 100644 --- a/doc/en/ref_algorithm_QuantileRegression.rst +++ b/doc/en/ref_algorithm_QuantileRegression.rst @@ -136,6 +136,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_QuantileRegression_examples: .. include:: snippets/Header2Algo06.rst .. ------------------------------------ .. diff --git a/doc/en/ref_algorithm_SamplingTest.rst b/doc/en/ref_algorithm_SamplingTest.rst index 2a247af..1b30f46 100644 --- a/doc/en/ref_algorithm_SamplingTest.rst +++ b/doc/en/ref_algorithm_SamplingTest.rst @@ -131,6 +131,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_SamplingTest_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/en/ref_algorithm_TabuSearch.rst b/doc/en/ref_algorithm_TabuSearch.rst index ae2ad5f..3a07ec9 100644 --- a/doc/en/ref_algorithm_TabuSearch.rst +++ b/doc/en/ref_algorithm_TabuSearch.rst @@ -156,6 +156,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_TabuSearch_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_DerivativeFreeOptimization` diff --git a/doc/en/ref_algorithm_TangentTest.rst b/doc/en/ref_algorithm_TangentTest.rst index 49abb58..acf9f95 100644 --- a/doc/en/ref_algorithm_TangentTest.rst +++ b/doc/en/ref_algorithm_TangentTest.rst @@ -108,6 +108,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_TangentTest_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/en/ref_algorithm_UnscentedKalmanFilter.rst b/doc/en/ref_algorithm_UnscentedKalmanFilter.rst index cf8de36..ef7d3e0 100644 --- a/doc/en/ref_algorithm_UnscentedKalmanFilter.rst +++ b/doc/en/ref_algorithm_UnscentedKalmanFilter.rst @@ -184,6 +184,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_UnscentedKalmanFilter_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_KalmanFilter` diff --git a/doc/en/ref_assimilation_keywords.rst b/doc/en/ref_assimilation_keywords.rst index 8507abb..740e035 100644 --- a/doc/en/ref_assimilation_keywords.rst +++ b/doc/en/ref_assimilation_keywords.rst @@ -46,9 +46,8 @@ imperatively indicated by one of these commands: .. include:: snippets/REDUCTION_STUDY.rst -The other nested terms are listed in alphabetical order. They are not -necessarily required for all algorithms. The different commands are the -following: +The nested terms are sorted in alphabetical order. They are not necessarily +required for all algorithms. The various commands are the following: .. include:: snippets/AlgorithmParameters.rst diff --git a/doc/en/ref_checking_keywords.rst b/doc/en/ref_checking_keywords.rst index e0cbe63..bf6cc51 100644 --- a/doc/en/ref_checking_keywords.rst +++ b/doc/en/ref_checking_keywords.rst @@ -31,12 +31,12 @@ procedure to check required properties on information, used somewhere else by a calculation case. The first term describes the choice between calculation or checking. In the -graphical interface, the verification is imperatively indicated by the command: +graphical interface, the choice is imperatively indicated by the command: .. include:: snippets/CHECKING_STUDY.rst -The other terms are ordered in alphabetical order. The different commands are -the following: +The nested terms are sorted in alphabetical order. They are not necessarily +required for all algorithms. The various commands are the following: .. include:: snippets/AlgorithmParameters.rst diff --git a/doc/en/ref_task_keywords.rst b/doc/en/ref_task_keywords.rst new file mode 100644 index 0000000..385a3d6 --- /dev/null +++ b/doc/en/ref_task_keywords.rst @@ -0,0 +1,45 @@ +.. + Copyright (C) 2008-2022 EDF R&D + + This file is part of SALOME ADAO module. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com + + Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D + +.. _section_ref_task_keywords: + +List of commands and keywords for a dedicated task or study oriented case +------------------------------------------------------------------------- + +This set of commands is related to the description of a dedicated task or study +oriented case, which consists of a simple specific procedure to perform a +computational task dedicated to a general application of data assimilation or +optimization methods. + +The nested terms are sorted in alphabetical order. They are not necessarily +required for all algorithms. The various commands are the following: + +.. include:: snippets/AlgorithmParameters.rst + +.. include:: snippets/Debug.rst + +.. include:: snippets/Observers.rst + +.. include:: snippets/StudyName.rst + +.. include:: snippets/StudyRepertory.rst diff --git a/doc/en/reference.rst b/doc/en/reference.rst index 874c59b..df19d70 100644 --- a/doc/en/reference.rst +++ b/doc/en/reference.rst @@ -145,3 +145,23 @@ The mathematical concepts and notations used are explained in the section ref_algorithm_SamplingTest ref_algorithm_TangentTest ref_checking_keywords + +.. _section_reference_task: + +================================================================================ +**[DocR]** Dedicated tasks or study oriented cases +================================================================================ + +This section describes the dedicated task or study oriented cases available in +ADAO, detailing their usage characteristics and their options. + +These tasks use algorithms from data assimilation methods, optimization methods +or methods with reduction. We refer to the :ref:`section_theory` section and +the :ref:`section_reference_assimilation` section for the underlying +algorithmic details. + +.. toctree:: + :maxdepth: 1 + + ref_algorithm_MeasurementsOptimalPositioningTask + ref_task_keywords diff --git a/doc/en/scripts/simple_AdjointTest.py b/doc/en/scripts/simple_AdjointTest.py new file mode 100644 index 0000000..a68f18e --- /dev/null +++ b/doc/en/scripts/simple_AdjointTest.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# +from numpy import array, eye +from adao import adaoBuilder +case = adaoBuilder.New('') +case.setCheckingPoint( Vector = array([0., 1., 2.]), Stored=True ) +case.setObservation( Vector = [10., 11., 12.] ) +case.setObservationOperator( Matrix = eye(3), ) +case.setAlgorithmParameters( + Algorithm='AdjointTest', + Parameters={ + 'EpsilonMinimumExponent' :-12, + 'NumberOfPrintedDigits' : 3, + 'SetSeed' : 1234567, + }, + ) +case.execute() diff --git a/doc/en/scripts/simple_AdjointTest.res b/doc/en/scripts/simple_AdjointTest.res new file mode 100644 index 0000000..c63412c --- /dev/null +++ b/doc/en/scripts/simple_AdjointTest.res @@ -0,0 +1,37 @@ + + ADJOINTTEST + =========== + + This test allows to analyze the quality of an adjoint operator associated + to some given direct operator. If the adjoint operator is approximated and + not given, the test measures the quality of the automatic approximation. + + Using the "ScalarProduct" formula, one observes the residue R which is the + difference of two scalar products: + + R(Alpha) = | < TangentF_X(dX) , Y > - < dX , AdjointF_X(Y) > | + + which must remain constantly equal to zero to the accuracy of the calculation. + One takes dX0 = Normal(0,X) and dX = Alpha*dX0, where F is the calculation + operator. If it is given, Y must be in the image of F. If it is not given, + one takes Y = F(X). + + (Remark: numbers that are (about) under 2e-16 represent 0 to machine precision) + + ------------------------------------------------------------- + i Alpha ||X|| ||Y|| ||dX|| R(Alpha) + ------------------------------------------------------------- + 0 1e+00 2.236e+00 1.910e+01 3.536e+00 0.000e+00 + 1 1e-01 2.236e+00 1.910e+01 3.536e-01 0.000e+00 + 2 1e-02 2.236e+00 1.910e+01 3.536e-02 0.000e+00 + 3 1e-03 2.236e+00 1.910e+01 3.536e-03 0.000e+00 + 4 1e-04 2.236e+00 1.910e+01 3.536e-04 0.000e+00 + 5 1e-05 2.236e+00 1.910e+01 3.536e-05 0.000e+00 + 6 1e-06 2.236e+00 1.910e+01 3.536e-06 0.000e+00 + 7 1e-07 2.236e+00 1.910e+01 3.536e-07 0.000e+00 + 8 1e-08 2.236e+00 1.910e+01 3.536e-08 0.000e+00 + 9 1e-09 2.236e+00 1.910e+01 3.536e-09 0.000e+00 + 10 1e-10 2.236e+00 1.910e+01 3.536e-10 0.000e+00 + 11 1e-11 2.236e+00 1.910e+01 3.536e-11 0.000e+00 + 12 1e-12 2.236e+00 1.910e+01 3.536e-12 0.000e+00 + ------------------------------------------------------------- diff --git a/doc/en/scripts/simple_AdjointTest.rst b/doc/en/scripts/simple_AdjointTest.rst new file mode 100644 index 0000000..4d515fe --- /dev/null +++ b/doc/en/scripts/simple_AdjointTest.rst @@ -0,0 +1,14 @@ +.. index:: single: AdjointTest (example) + +This example describes the test of the quality of the adjoint of some operator, +whose direct formulation is given and whose adjoint formulation is here +approximated by default. The required information is minimal, namely here an +operator :math:`F` (described for the test by the observation command +"*ObservationOperator*"), and a state :math:`\mathbf{x}^b` to test it on +(described for the test by the command "*CheckingPoint*"). An observation +:math:`\mathbf{y}^o` can be given as here (described for the test by the +command "*Observation*"). The output has been set to determine the printout, +for example to make more easy automatic comparison. + +The actual check is to observe whether the residue is consistently equal to +zero at the accuracy of the calculation. diff --git a/doc/en/scripts/simple_FunctionTest.res b/doc/en/scripts/simple_FunctionTest.res index 8f57585..a63fff5 100644 --- a/doc/en/scripts/simple_FunctionTest.res +++ b/doc/en/scripts/simple_FunctionTest.res @@ -1,8 +1,16 @@ + + FUNCTIONTEST + ============ + + This test allows to analyze the (repetition of) launch of some given + operator. It shows simple statistics related to its successful execution, + or related to the similarities of repetition of its execution. + ===> Information before launching: ----------------------------- Characteristics of input vector X, internally converted: Type...............: - Lenght of vector...: 3 + Length of vector...: 3 Minimum value......: 0.00e+00 Maximum value......: 2.00e+00 Mean of vector.....: 1.00e+00 diff --git a/doc/en/scripts/simple_ParallelFunctionTest.res b/doc/en/scripts/simple_ParallelFunctionTest.res index 829da09..bb6af81 100644 --- a/doc/en/scripts/simple_ParallelFunctionTest.res +++ b/doc/en/scripts/simple_ParallelFunctionTest.res @@ -1,8 +1,16 @@ + + PARALLELFUNCTIONTEST + ==================== + + This test allows to analyze the (repetition of) launch of some given + operator. It shows simple statistics related to its successful execution, + or related to the similarities of repetition of its execution. + ===> Information before launching: ----------------------------- Characteristics of input vector X, internally converted: Type...............: - Lenght of vector...: 30 + Length of vector...: 30 Minimum value......: 0.00e+00 Maximum value......: 2.90e+01 Mean of vector.....: 1.45e+01 diff --git a/doc/en/snippets/AlgorithmParameters.rst b/doc/en/snippets/AlgorithmParameters.rst index ddebd72..9fbfc85 100644 --- a/doc/en/snippets/AlgorithmParameters.rst +++ b/doc/en/snippets/AlgorithmParameters.rst @@ -4,10 +4,11 @@ AlgorithmParameters *Dictionary*. This variable indicates the data assimilation or optimization algorithm chosen by the keyword "*Algorithm*", and its potential optional - parameters. The algorithm choices are available through the GUI. There exists - for example "3DVAR", "Blue"... Each algorithm is defined, below, by a - specific subsection. Optionally, the command allows also to add some - parameters to control the algorithm. Their values are defined either - explicitly or in a "*Dict*" type object. See the + parameters. The potential choices by this keyword "*Algorithm*" are available + through the graphical interface or in the reference documentation containing + a specific sub-section for each of them. There are for example the "3DVAR", + the "Blue", etc. Optionally, the command also allows to add parameters to + control the chosen algorithm. Their values are defined either explicitly or + in a "*Dict*" type object. See the :ref:`section_ref_options_Algorithm_Parameters` for the detailed use of this command part. diff --git a/doc/en/snippets/Analysis.rst b/doc/en/snippets/Analysis.rst index 4592be4..8d19e6a 100644 --- a/doc/en/snippets/Analysis.rst +++ b/doc/en/snippets/Analysis.rst @@ -2,7 +2,7 @@ Analysis *List of vectors*. Each element of this variable is an optimal state - :math:`\mathbf{x}*` in optimization or an analysis :math:`\mathbf{x}^a` in + :math:`\mathbf{x}^*` in optimization or an analysis :math:`\mathbf{x}^a` in data assimilation. Example: diff --git a/doc/en/snippets/EnsembleOfSnapshots.rst b/doc/en/snippets/EnsembleOfSnapshots.rst new file mode 100644 index 0000000..ce3d82e --- /dev/null +++ b/doc/en/snippets/EnsembleOfSnapshots.rst @@ -0,0 +1,12 @@ +.. index:: single: EnsembleOfSnapshots + +EnsembleOfSnapshots + *List of vectors or matrix*. This key contains a set of physical state + vectors :math:`\mathbf{y}` (called "*snapshots*" in "Reduced Bases" + terminology), with 1 state per column if it is a matrix or 1 state per + element of the list. Important: the numbering of the points, to which a state + value is given, in each vector is implicitly that of the natural order of + numbering of the state vector, from 0 to the "size minus 1" of this vector. + + Example : + ``{"EnsembleOfSnapshots":[y1, y2, y3...]}`` diff --git a/doc/en/snippets/ErrorNorm.rst b/doc/en/snippets/ErrorNorm.rst new file mode 100644 index 0000000..4a6cbd4 --- /dev/null +++ b/doc/en/snippets/ErrorNorm.rst @@ -0,0 +1,9 @@ +.. index:: single: ErrorNorm + +ErrorNorm + *Predefined name*. This key indicates the norm used for the residue that + controls the optimal search. The default is the "L2" norm. The possible + criteria are in the following list: ["L2", "Linf"]. + + Example : + ``{"ErrorNorm":"L2"}`` diff --git a/doc/en/snippets/ErrorNormTolerance.rst b/doc/en/snippets/ErrorNormTolerance.rst new file mode 100644 index 0000000..050bd1b --- /dev/null +++ b/doc/en/snippets/ErrorNormTolerance.rst @@ -0,0 +1,12 @@ +.. index:: single: ErrorNormTolerance + +ErrorNormTolerance + *Real value*. This key specifies the value at which the residual associated + with the approximation is acceptable, which leads to stop the optimal search. + The default value is 1.e-7 (which is usually equivalent to almost no stopping + criterion because the approximation is less precise), and it is recommended + to adapt it to the needs for real problems. A usual value, recommended to + stop the search on residual criterion, is 1.e-2. + + Example : + ``{"ErrorNormTolerance":1.e-7}`` diff --git a/doc/en/snippets/ExcludeLocations.rst b/doc/en/snippets/ExcludeLocations.rst new file mode 100644 index 0000000..0ccad27 --- /dev/null +++ b/doc/en/snippets/ExcludeLocations.rst @@ -0,0 +1,11 @@ +.. index:: single: ExcludeLocations + +ExcludeLocations + *List of integers*. This key specifies the list of points in the state vector + excluded from the optimal search. The default value is an empty list. + Important: the numbering of these excluded points must be identical to the + one implicitly adopted in the states provided by the "*EnsembleOfSnapshots*" + key. + + Example : + ``{"ExcludeLocations":[3, 125, 286]}`` diff --git a/doc/en/snippets/Header2Algo03AdOp.rst b/doc/en/snippets/Header2Algo03AdOp.rst index 255ef17..9827e87 100644 --- a/doc/en/snippets/Header2Algo03AdOp.rst +++ b/doc/en/snippets/Header2Algo03AdOp.rst @@ -5,4 +5,4 @@ choose the specific options, described hereafter, of the algorithm. See :ref:`section_ref_options_Algorithm_Parameters` for the good use of this command. -The options of the algorithm are the following: +The options are the following: diff --git a/doc/en/snippets/Header2Algo03Chck.rst b/doc/en/snippets/Header2Algo03Chck.rst index b7fbc3e..ede6de8 100644 --- a/doc/en/snippets/Header2Algo03Chck.rst +++ b/doc/en/snippets/Header2Algo03Chck.rst @@ -5,4 +5,4 @@ the specific options, described hereafter, of the algorithm. See :ref:`section_ref_options_Algorithm_Parameters` for the good use of this command. -The options of the algorithm are the following: +The options are the following: diff --git a/doc/en/snippets/Header2Algo03Task.rst b/doc/en/snippets/Header2Algo03Task.rst new file mode 100644 index 0000000..28f822c --- /dev/null +++ b/doc/en/snippets/Header2Algo03Task.rst @@ -0,0 +1,8 @@ +The general optional commands, available in the editing user graphical or +textual interface, are indicated in :ref:`section_ref_task_keywords`. Moreover, +the parameters of the command "*AlgorithmParameters*" allow to choose the +specific options, described hereafter, of the algorithm. See +:ref:`section_ref_options_Algorithm_Parameters` for the good use of this +command. + +The options are the following: diff --git a/doc/en/snippets/Header2Algo09.rst b/doc/en/snippets/Header2Algo09.rst index d797086..5ad6b83 100644 --- a/doc/en/snippets/Header2Algo09.rst +++ b/doc/en/snippets/Header2Algo09.rst @@ -1,5 +1,5 @@ -Python (TUI) use example -++++++++++++++++++++++++ +Python (TUI) use examples ++++++++++++++++++++++++++ Here is a very simple use of the given algorithm and its parameters, written in :ref:`section_tui`, and from which input information allow to define an diff --git a/doc/en/snippets/HybridCostDecrementTolerance.rst b/doc/en/snippets/HybridCostDecrementTolerance.rst new file mode 100644 index 0000000..9dc9721 --- /dev/null +++ b/doc/en/snippets/HybridCostDecrementTolerance.rst @@ -0,0 +1,13 @@ +.. index:: single: HybridCostDecrementTolerance + +HybridCostDecrementTolerance + *Real value*. This key indicates a limit value, leading to stop successfully + the optimization process for the variational part in the coupling, when the + cost function decreases less than this tolerance at the last step. The + default is 1.e-7, and it is recommended to adapt it to the needs on real + problems. One can refer to the section describing ways for + :ref:`subsection_iterative_convergence_control` for more detailed + recommendations. + + Example: + ``{"HybridCostDecrementTolerance":1.e-7}`` diff --git a/doc/en/snippets/HybridCovarianceEquilibrium.rst b/doc/en/snippets/HybridCovarianceEquilibrium.rst new file mode 100644 index 0000000..82eb3ad --- /dev/null +++ b/doc/en/snippets/HybridCovarianceEquilibrium.rst @@ -0,0 +1,9 @@ +.. index:: single: HybridCovarianceEquilibrium + +HybridCovarianceEquilibrium + *Real value*. This key indicates, in hybrid variational optimization, the + equilibrium factor between the static *a priori* covariance and the ensemble + covariance. This factor is between 0 and 1, and its default value is 0.5. + + Example : + ``{"HybridCovarianceEquilibrium":0.5}`` diff --git a/doc/en/snippets/HybridMaximumNumberOfIterations.rst b/doc/en/snippets/HybridMaximumNumberOfIterations.rst new file mode 100644 index 0000000..70fb385 --- /dev/null +++ b/doc/en/snippets/HybridMaximumNumberOfIterations.rst @@ -0,0 +1,14 @@ +.. index:: single: HybridMaximumNumberOfIterations + +HybridMaximumNumberOfIterations + *Integer value*. This key indicates the maximum number of internal iterations + allowed for hybrid optimization, for the variational part. The default is + 15000, which is very similar to no limit on iterations. It is then + recommended to adapt this parameter to the needs on real problems. For some + optimizers, the effective stopping step can be slightly different of the + limit due to algorithm internal control requirements. One can refer to the + section describing ways for :ref:`subsection_iterative_convergence_control` + for more detailed recommendations. + + Example: + ``{"HybridMaximumNumberOfIterations":100}`` diff --git a/doc/en/snippets/MaximumNumberOfLocations.rst b/doc/en/snippets/MaximumNumberOfLocations.rst new file mode 100644 index 0000000..039610d --- /dev/null +++ b/doc/en/snippets/MaximumNumberOfLocations.rst @@ -0,0 +1,11 @@ +.. index:: single: MaximumNumberOfLocations + +MaximumNumberOfLocations + *Integer value*. This key specifies the maximum possible number of positions + found in the optimal search. The default value is 1. The optimal search may + eventually find less positions than required by this key, as for example in + the case where the residual associated to the approximation is lower than the + criterion and leads to the early termination of the optimal search. + + Example : + ``{"MaximumNumberOfLocations":5}`` diff --git a/doc/en/snippets/OptimalPoints.rst b/doc/en/snippets/OptimalPoints.rst new file mode 100644 index 0000000..6e381bf --- /dev/null +++ b/doc/en/snippets/OptimalPoints.rst @@ -0,0 +1,9 @@ +.. index:: single: OptimalPoints + +OptimalPoints + *List of integer series*. Each element is a series, containing the ideal points + determined by the optimal search, ordered by decreasing preference and in the + same order as the reduced basis vectors found iteratively. + + Example : + ``mp = ADD.get("OptimalPoints")[-1]`` diff --git a/doc/en/snippets/ReducedBasis.rst b/doc/en/snippets/ReducedBasis.rst new file mode 100644 index 0000000..d5f08e8 --- /dev/null +++ b/doc/en/snippets/ReducedBasis.rst @@ -0,0 +1,10 @@ +.. index:: single: ReducedBasis + +ReducedBasis + *List of matrix*. Each element is a matrix, containing in each column a + vector of the reduced basis obtained by the optimal search, ordered by + decreasing preference and in the same order as the ideal points found + iteratively. + + Example : + ``rb = ADD.get("ReducedBasis")[-1]`` diff --git a/doc/en/snippets/Residu.rst b/doc/en/snippets/Residu.rst index 7c875d8..3b901c2 100644 --- a/doc/en/snippets/Residu.rst +++ b/doc/en/snippets/Residu.rst @@ -2,7 +2,7 @@ Residu *List of values*. Each element is the value of the particular residue - verified during a checking algorithm, in the order of the tests. + checked during the running of the algorithm, in the order of the tests. Example: ``r = ADD.get("Residu")[:]`` diff --git a/doc/en/snippets/Residus.rst b/doc/en/snippets/Residus.rst new file mode 100644 index 0000000..e727a3d --- /dev/null +++ b/doc/en/snippets/Residus.rst @@ -0,0 +1,8 @@ +.. index:: single: Residus + +Residus + *List of real value series*. Each element is a series, containing the values + of the particular residue checked during the running of the algorithm. + + Example : + ``rs = ADD.get("Residus")[:]`` diff --git a/doc/en/snippets/Variant_EnKF.rst b/doc/en/snippets/Variant_EnKF.rst index 0b1fccc..2322dc9 100644 --- a/doc/en/snippets/Variant_EnKF.rst +++ b/doc/en/snippets/Variant_EnKF.rst @@ -5,21 +5,24 @@ pair: Variant ; ETKF-N pair: Variant ; MLEF pair: Variant ; IEnKF + pair: Variant ; E3DVAR pair: Variant ; EnKS Variant *Predifined name*. This key allows to choose one of the possible variants - for the main algorithm. The default variant is the original "EnKF", and the - possible ones are + for the main algorithm. The default variant is the original "EnKF" + formulation, and the possible ones are "EnKF" (Ensemble Kalman Filter), "ETKF" (Ensemble-Transform Kalman Filter), "ETKF-N" (Ensemble-Transform Kalman Filter), "MLEF" (Maximum Likelihood Kalman Filter), "IEnKF" (Iterative_EnKF), + "E3DVAR" (EnKF 3DVAR), "EnKS" (Ensemble Kalman Smoother). - One recommends to try the "ETKF-N" or "IEnKF" variants, and to reduce the - number of members to about 10 or less for all variants other then the - original "EnKF". + + One recommends to try the "ETKF-N" or "IEnKF" variants for a robust + performance, and to reduce the number of members to about 10 or less for all + variants other than the original "EnKF" formulation. Example : ``{"Variant":"EnKF"}`` diff --git a/doc/en/tutorials_in_python.rst b/doc/en/tutorials_in_python.rst index 7c44789..5c0acb4 100644 --- a/doc/en/tutorials_in_python.rst +++ b/doc/en/tutorials_in_python.rst @@ -113,7 +113,8 @@ As an extension of this example, one can change the variances represented by :math:`\mathbf{x}^a` will move to :math:`\mathbf{y}^o` or to :math:`\mathbf{x}^b`, in inverse proportion of the variances in :math:`\mathbf{B}` and :math:`\mathbf{R}`. As an other extension, it is also -equivalent to search for the analysis thought a BLUE algorithm or a 3DVAR one. +equivalent to search for the analysis thought a "Blue" algorithm or a "3DVAR" +one. Using the graphical interface (GUI) to build the ADAO case ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -194,9 +195,9 @@ as shown here: >>> As a simple extension of this example, one can notice that the same problem -solved with a 3DVAR algorithm gives the same result. This algorithm can be +solved with a "3DVAR" algorithm gives the same result. This algorithm can be chosen at the ADAO case building step by only changing the "*Algorithm*" -argument on the beginning. The remaining parts of the ADAO case in 3DVAR is +argument on the beginning. The remaining parts of the ADAO case in "3DVAR" is exactly similar to the BLUE algorithmic case. .. _section_tutorials_in_python_script: diff --git a/doc/en/tutorials_in_salome.rst b/doc/en/tutorials_in_salome.rst index 440dbfa..8207c87 100644 --- a/doc/en/tutorials_in_salome.rst +++ b/doc/en/tutorials_in_salome.rst @@ -406,7 +406,7 @@ the true state :math:`\mathbf{x}^t` for each component, which is: To describe the background error covariances matrix :math:`\mathbf{B}`, we make as previously the hypothesis of uncorrelated errors (that is, a diagonal matrix, -of size 3x3 because :math:`\mathbf{x}^b` is of lenght 3) and to have the same +of size 3x3 because :math:`\mathbf{x}^b` is of length 3) and to have the same variance of 0.1 for all variables. We get: :: diff --git a/doc/fr/advanced.rst b/doc/fr/advanced.rst index d613f97..4ccea05 100644 --- a/doc/fr/advanced.rst +++ b/doc/fr/advanced.rst @@ -33,6 +33,8 @@ l'interface graphique (GUI). Cela nécessite de savoir comment trouver les fichiers ou les commandes incluses dans l'installation complète de SALOME. Tous les noms à remplacer par l'utilisateur sont indiqués par la syntaxe ``<...>``. +.. _section_advanced_convert_JDC: + Convertir et exécuter un fichier de commandes ADAO (JDC) par l'intermédiaire d'un script Shell ---------------------------------------------------------------------------------------------- @@ -138,6 +140,8 @@ SALOME>``, il suffit d'enregistrer le script de commandes Shell suivant : Dans tous les cas, les sorties standard et d'erreur se font dans le terminal de lancement. +.. _section_advanced_YACS_tui: + Exécuter un schéma de calcul ADAO dans YACS en utilisant le mode "texte" (TUI YACS) ----------------------------------------------------------------------------------- @@ -202,13 +206,14 @@ Exécuter un calcul ADAO en environnement R en utilisant l'interface TUI ADAO .. index:: single: R .. index:: single: rPython +.. index:: single: reticulate Pour étendre les possibilités d'analyse et de traitement, il est possible d'utiliser les calculs ADAO dans l'environnement **R** (voir [R]_ pour plus de -détails). Ce dernier est disponible dans SALOME en lançant l'interpréteur R dans -le shell "``salome shell``". Il faut de plus disposer, en R, du package -"*rPython*", qui peut si nécessaire être installé par l'utilisateur à l'aide de -la commande R suivante : +détails). Ce dernier est disponible dans SALOME en lançant l'interpréteur R +dans le shell "``salome shell``". Il faut de plus disposer, en R, du package +"*rPython*" (ou du package "*reticulate*", plus récent), qui peut si nécessaire +être installé par l'utilisateur à l'aide de la commande R suivante : :: #-*- coding: utf-8 -*- @@ -328,20 +333,22 @@ Python obtenu depuis le "SALOME shell", on utilise les commandes suivantes : from adao import adaoBuilder adaoBuilder.Gui() -Si nécessaire, des messages explicites permettent d'identifier les variables -d'environnement requises qui seraient absentes. Cette commande ne doit -néanmoins pas être lancée dans la console Python de SALOME (car dans ce cas il -suffit d'activer le module puisque l'on est déjà dans l'interface -graphique...), mais elle peut l'être dans une session "SALOME shell" obtenue -depuis le menu "Outils/Extensions" de SALOME. Pour mémoire, le moyen le plus -simple d'obtenir un interpréteur Python inclu dans une session "SALOME shell" -est de lancer la commande suivante dans un terminal : -:: +Pour mémoire, le moyen le plus simple d'obtenir un interpréteur Python inclu +dans une session "SALOME shell" est de lancer la commande suivante dans un +terminal : :: $SALOMEDIR/salome shell -- python avec ``SALOMEDIR`` le ````. +Si nécessaire, des messages explicites permettent d'identifier les variables +d'environnement requises qui seraient absentes. **Cette commande ne doit +néanmoins pas être lancée dans la console Python de SALOME** (car dans ce cas +il suffit d'activer le module puisque l'on est déjà dans l'interface +graphique...) ou dans une installation Python indépendante, mais elle peut +l'être dans une session "SALOME shell" obtenue depuis le menu +"Outils/Extensions" de SALOME. + .. _section_advanced_execution_mode: Changer le mode par défaut d'exécution de noeuds dans YACS diff --git a/doc/fr/bibliography.rst b/doc/fr/bibliography.rst index 569320c..96f4cce 100644 --- a/doc/fr/bibliography.rst +++ b/doc/fr/bibliography.rst @@ -39,7 +39,7 @@ Bibliographie .. [Buchinsky98] Buchinsky M., *Recent Advances in Quantile Regression Models: A Practical Guidline for Empirical Research*, Journal of Human Resources, 33(1), pp.88-126, 1998 -.. [Burgers98] Burgers G., Van Leuween P. J., Evensen G., *Analysis scheme in the Ensemble Kalman Filter*, Monthly Weather Review, 126, 1719–1724, 1998 +.. [Burgers98] Burgers G., Van Leuween P. J., Evensen G., *Analysis scheme in the Ensemble Kalman Filter*, Monthly Weather Review, 126(6), pp.1719–1724, 1998 .. [Byrd95] Byrd R. H., Lu P., Nocedal J., *A Limited Memory Algorithm for Bound Constrained Optimization*, SIAM Journal on Scientific and Statistical Computing, 16(5), pp.1190-1208, 1995 @@ -49,9 +49,9 @@ Bibliographie .. [Cohn98] Cohn S. E., Da Silva A., Guo J., Sienkiewicz M., Lamich D., *Assessing the effects of data selection with the DAO Physical-space Statistical Analysis System*, Monthly Weather Review, 126, pp.2913–2926, 1998 -.. [Courtier94] Courtier P., Thépaut J.-N., Hollingsworth A., *A strategy for operational implementation of 4D-Var, using an incremental approach*, Quarterly Journal of the Royal Meteorological Society, 120, pp.1367–1387, 1994 +.. [Courtier94] Courtier P., Thépaut J.-N., Hollingsworth A., *A strategy for operational implementation of 4D-Var, using an incremental approach*, Quarterly Journal of the Royal Meteorological Society, 120(519), pp.1367–1387, 1994 -.. [Courtier97] Courtier P., *Dual formulation of four-dimensional variational assimilation*, Quarterly Journal of the Royal Meteorological Society, 123, pp.2449-2461, 1997 +.. [Courtier97] Courtier P., *Dual formulation of four-dimensional variational assimilation*, Quarterly Journal of the Royal Meteorological Society, 123(544), pp.2249-2261, 1997 .. [Das11] Das S., Suganthan P. N., *Differential Evolution: A Survey of the State-of-the-art*, IEEE Transactions on Evolutionary Computation, 15(1), pp.4-31, 2011 @@ -69,6 +69,8 @@ Bibliographie .. [Glover90] Glover F., *Tabu Search-Part II*, ORSA Journal on Computing, 2(1), pp.4-32, 1990 +.. [Hamill00] Hamill T. M., Snyder C., *A Hybrid Ensemble Kalman Filter-3D Variational Analysis Scheme*, Monthly Weather Review, 128(8), pp.2905-2919, 2000 + .. [Ide97] Ide K., Courtier P., Ghil M., Lorenc A. C., *Unified notation for data assimilation: operational, sequential and variational*, Journal of the Meteorological Society of Japan, 75(1B), pp.181-189, 1997 .. [Jazwinski70] Jazwinski A. H., *Stochastic Processes and Filtering Theory*, Academic Press, 1970 @@ -85,9 +87,9 @@ Bibliographie .. [Lions68] Lions J.-L., *Contrôle optimal de systèmes gouvernés par des équations aux dérivées partielles*, Dunod, 1968 -.. [Lorenc86] Lorenc A. C., *Analysis methods for numerical weather prediction*, Quarterly Journal of the Royal Meteorological Society, 112, pp.1177-1194, 1986 +.. [Lorenc86] Lorenc A. C., *Analysis methods for numerical weather prediction*, Quarterly Journal of the Royal Meteorological Society, 112(474), pp.1177-1194, 1986 -.. [Lorenc88] Lorenc A. C., *Optimal nonlinear objective analysis*, Quarterly Journal of the Royal Meteorological Society, 114, pp.205–240, 1988 +.. [Lorenc88] Lorenc A. C., *Optimal nonlinear objective analysis*, Quarterly Journal of the Royal Meteorological Society, 114(479), pp.205–240, 1988 .. [Morales11] Morales J. L., Nocedal J., *L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization*, ACM Transactions on Mathematical Software, 38(1), 2011 @@ -151,7 +153,7 @@ Bibliographie .. [Zhu97] Zhu C., Byrd R. H., Nocedal J., *L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization*, ACM Transactions on Mathematical Software, 23(4), pp.550-560, 1997 -.. [Zupanski05] Zupanski M., *Maximum likelihood ensemble filter: Theoretical aspects*, Monthly Weather Review, 133, pp.1710–1726, 2005 +.. [Zupanski05] Zupanski M., *Maximum likelihood ensemble filter: Theoretical aspects*, Monthly Weather Review, 133(6), pp.1710–1726, 2005 *Nota Bene* : un lien vers la version française de chaque page Wikipédia se trouve dans le sous-menu "*Languages*". Les deux versions sont complémentaires diff --git a/doc/fr/conf.py b/doc/fr/conf.py index 4943032..c97e168 100644 --- a/doc/fr/conf.py +++ b/doc/fr/conf.py @@ -23,6 +23,7 @@ # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D import sys, os, time, sphinx, logging +# logging.getLogger().setLevel(logging.DEBUG) # -- Module version information -------------------------------------------------- @@ -61,8 +62,10 @@ from distutils.version import LooseVersion #, StrictVersion __lv = LooseVersion(sphinx.__version__) if __lv < LooseVersion("1.4.0"): extensions = ['sphinx.ext.pngmath'] + logging.debug('Using "%s" extensions'%(extensions,)) else: extensions = ['sphinx.ext.imgmath'] + logging.debug('Using "%s" extensions'%(extensions,)) try: import sphinx_rtd_theme extensions += ['sphinx_rtd_theme'] diff --git a/doc/fr/examples.rst b/doc/fr/examples.rst new file mode 100644 index 0000000..0ab0681 --- /dev/null +++ b/doc/fr/examples.rst @@ -0,0 +1,65 @@ +.. + Copyright (C) 2008-2022 EDF R&D + + This file is part of SALOME ADAO module. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com + + Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D + +.. _section_docu_examples: + +================================================================================ +**[DocU]** Exemples d'utilisation d'ADAO +================================================================================ + +Pour faciliter la lecture ou la recherche, cette section rassemble de manière +synthétique les pointeurs directs, vers les sections adéquates, d'exemples +simples d’utilisation du module (principalement mais pas uniquement en TUI). + +Ces exemples sont disponibles en direct dans les tutoriaux, dans les +documentations détaillées d'algorithmes ou de problèmes appliqués, et dans les +usages avancés. + +Tutoriaux +--------- + +#. :ref:`section_tutorials_in_salome` +#. :ref:`section_tutorials_in_python` + +Utilisations d'algorithmes de calcul +------------------------------------ + +#. :ref:`Exemples avec l'algorithme de "3DVAR"` +#. :ref:`Exemples avec l'algorithme de "Blue"` +#. :ref:`Exemples avec l'algorithme de "ExtendedBlue"` +#. :ref:`Exemples avec l'algorithme de "KalmanFilter"` +#. :ref:`Exemples avec l'algorithme de "NonLinearLeastSquares"` + +Utilisations d'algorithmes de vérification +------------------------------------------ + +#. :ref:`Exemples avec la vérification "AdjointTest"` +#. :ref:`Exemples avec la vérification "FunctionTest"` +#. :ref:`Exemples avec la vérification "ParallelFunctionTest"` + +Utilisations avancées +--------------------- + +#. :ref:`section_advanced_convert_JDC` +#. :ref:`section_advanced_YACS_tui` +#. :ref:`section_advanced_R` diff --git a/doc/fr/index.rst b/doc/fr/index.rst index f469adf..c5ed074 100644 --- a/doc/fr/index.rst +++ b/doc/fr/index.rst @@ -50,29 +50,31 @@ d'une centaine de méthodes algorithmiques différentes et permet l'étude d'environ 350 problèmes appliqués distincts. La documentation de ce module est divisée en plusieurs grandes catégories, -relatives à la documentation théorique (indiquée dans le titre de section par -**[DocT]**), à la documentation utilisateur (indiquée dans le titre de section -par **[DocU]**), et à la documentation de référence (indiquée dans le titre de -section par **[DocR]**). +relatives à la **documentation théorique** (indiquée dans le titre de section +par **[DocT]**), à la **documentation utilisateur** (indiquée dans le titre de +section par **[DocU]**), et à la **documentation de référence** (indiquée dans +le titre de section par **[DocR]**). La première partie est l':ref:`section_intro`. La seconde partie présente :ref:`section_theory`, et à leurs concepts, et la partie suivante décrit la :ref:`section_methodology`. Pour un utilisateur courant, les parties suivantes -présentent des exemples didactiques d'utilisation sous la forme de -:ref:`section_tutorials_in_salome` ou de :ref:`section_tutorials_in_python`, -puis indique les :ref:`section_advanced`, avec l'obtention de renseignements -supplémentaires ou l'usage par scripts de commandes hors interface de contrôle -graphique. Les utilisateurs intéressés par un accès rapide au module peuvent -s'arrêter avant la lecture de la suite, mais un usage utile du module nécessite -de lire et de revenir régulièrement à ces parties. Les parties qui suivent -expliquent comment utiliser une :ref:`section_gui_in_salome` ou une -:ref:`section_tui`. La dernière grande partie détaille la -:ref:`section_reference`, avec trois sous-parties essentielles qui la composent -et qui décrivent les commandes et des options d'algorithmes. Un -:ref:`section_glossary`, des :ref:`section_notations`, une -:ref:`section_bibliography` et un index développé complètent le document. -Enfin, pour respecter les exigences de licence du module, n'oubliez pas de lire -la partie :ref:`section_license`. +présentent des :ref:`section_docu_examples`, rapidement accessibles par +l'ensemble des pointeurs vers les sous-parties. Des présentations didactiques +sont détaillés dans les :ref:`section_tutorials_in_salome` ou les +:ref:`section_tutorials_in_python`, et complétées par des indications sur les +:ref:`section_advanced`, avec l'obtention de renseignements supplémentaires ou +l'usage par scripts de commandes hors interface de contrôle graphique. + +Les utilisateurs intéressés par un accès rapide au module peuvent s'arrêter +avant la lecture de la suite, mais un usage utile du module nécessite de lire +et de revenir régulièrement à ces parties. Les parties qui suivent expliquent +comment utiliser une :ref:`section_gui_in_salome` ou une :ref:`section_tui`. La +dernière grande partie détaille la :ref:`section_reference`, avec trois +sous-parties essentielles qui la composent et qui décrivent les commandes et +des options d'algorithmes. Un :ref:`section_glossary`, des +:ref:`section_notations`, une :ref:`section_bibliography` et un index développé +complètent le document. Enfin, pour respecter les exigences de licence du +module, n'oubliez pas de lire la partie :ref:`section_license`. .. toctree:: :caption: Table des matières @@ -83,6 +85,7 @@ la partie :ref:`section_license`. intro theory methodology + examples tutorials_in_salome tutorials_in_python advanced diff --git a/doc/fr/ref_algorithm_3DVAR.rst b/doc/fr/ref_algorithm_3DVAR.rst index ed68757..3e9cb98 100644 --- a/doc/fr/ref_algorithm_3DVAR.rst +++ b/doc/fr/ref_algorithm_3DVAR.rst @@ -64,7 +64,7 @@ Cet algorithme est naturellement écrit pour une estimation unique, sans notion dynamique ou itérative (il n'y a donc pas besoin dans ce cas d'opérateur d'évolution incrémentale, ni de covariance d'erreurs d'évolution). Dans ADAO, il peut aussi être utilisé sur une succession d'observations, plaçant alors -l'estimation dans un cadre récursif en partie similaire à un +l'estimation dans un cadre récursif similaire à un :ref:`section_ref_algorithm_KalmanFilter`. Une estimation standard est effectuée à chaque pas d'observation sur l'état prévu par le modèle d'évolution incrémentale, sachant que la covariance d'erreur d'état reste la covariance @@ -72,6 +72,11 @@ d'ébauche initialement fournie par l'utilisateur. Pour être explicite, contrairement aux filtres de type Kalman, la covariance d'erreurs sur les états n'est pas remise à jour. +Une extension du 3DVAR, couplant en parallèle une méthode 3DVAR avec un filtre +de Kalman d'ensemble, permet d'améliorer l'estimation des covariances d'erreurs +*a posteriori*. On atteint cette extension en utilisant le variant "E3DVAR" de +l'algorithme de filtrage :ref:`section_ref_algorithm_EnsembleKalmanFilter`. + .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst @@ -251,6 +256,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulationQuantiles.rst .. ------------------------------------ .. +.. _section_ref_algorithm_3DVAR_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_3DVAR.rst @@ -260,6 +266,7 @@ StoreSupplementaryCalculations .. include:: snippets/Header2Algo10.rst .. literalinclude:: scripts/simple_3DVAR.res + :language: none .. include:: snippets/Header2Algo11.rst diff --git a/doc/fr/ref_algorithm_4DVAR.rst b/doc/fr/ref_algorithm_4DVAR.rst index 448b31d..27bb8f7 100644 --- a/doc/fr/ref_algorithm_4DVAR.rst +++ b/doc/fr/ref_algorithm_4DVAR.rst @@ -155,6 +155,7 @@ StoreSupplementaryCalculations .. include:: snippets/IndexOfOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_4DVAR_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_3DVAR` diff --git a/doc/fr/ref_algorithm_AdjointTest.rst b/doc/fr/ref_algorithm_AdjointTest.rst index 91f4865..88a7f50 100644 --- a/doc/fr/ref_algorithm_AdjointTest.rst +++ b/doc/fr/ref_algorithm_AdjointTest.rst @@ -70,6 +70,8 @@ Ce résidu doit rester constamment égal à zéro à la précision du calcul. .. include:: snippets/InitialDirection.rst +.. include:: snippets/NumberOfPrintedDigits.rst + .. include:: snippets/SetSeed.rst StoreSupplementaryCalculations @@ -107,6 +109,19 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst +.. ------------------------------------ .. +.. _section_ref_algorithm_AdjointTest_examples: +.. include:: snippets/Header2Algo09.rst + +.. include:: scripts/simple_AdjointTest.rst + +.. literalinclude:: scripts/simple_AdjointTest.py + +.. include:: snippets/Header2Algo10.rst + +.. literalinclude:: scripts/simple_AdjointTest.res + :language: none + .. ------------------------------------ .. .. include:: snippets/Header2Algo06.rst diff --git a/doc/fr/ref_algorithm_Blue.rst b/doc/fr/ref_algorithm_Blue.rst index b763d56..45438f7 100644 --- a/doc/fr/ref_algorithm_Blue.rst +++ b/doc/fr/ref_algorithm_Blue.rst @@ -210,6 +210,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulationQuantiles.rst .. ------------------------------------ .. +.. _section_ref_algorithm_Blue_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_Blue.rst @@ -219,6 +220,7 @@ StoreSupplementaryCalculations .. include:: snippets/Header2Algo10.rst .. literalinclude:: scripts/simple_Blue.res + :language: none .. ------------------------------------ .. .. include:: snippets/Header2Algo06.rst diff --git a/doc/fr/ref_algorithm_DerivativeFreeOptimization.rst b/doc/fr/ref_algorithm_DerivativeFreeOptimization.rst index a24c68a..d3593c1 100644 --- a/doc/fr/ref_algorithm_DerivativeFreeOptimization.rst +++ b/doc/fr/ref_algorithm_DerivativeFreeOptimization.rst @@ -169,6 +169,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_DerivativeFreeOptimization_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_ParticleSwarmOptimization` diff --git a/doc/fr/ref_algorithm_DifferentialEvolution.rst b/doc/fr/ref_algorithm_DifferentialEvolution.rst index 9f8008b..13a5c92 100644 --- a/doc/fr/ref_algorithm_DifferentialEvolution.rst +++ b/doc/fr/ref_algorithm_DifferentialEvolution.rst @@ -176,6 +176,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_DifferentialEvolution_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_DerivativeFreeOptimization` diff --git a/doc/fr/ref_algorithm_EnsembleBlue.rst b/doc/fr/ref_algorithm_EnsembleBlue.rst index 5bb432c..1773372 100644 --- a/doc/fr/ref_algorithm_EnsembleBlue.rst +++ b/doc/fr/ref_algorithm_EnsembleBlue.rst @@ -110,6 +110,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_EnsembleBlue_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_Blue` diff --git a/doc/fr/ref_algorithm_EnsembleKalmanFilter.rst b/doc/fr/ref_algorithm_EnsembleKalmanFilter.rst index 06f21f3..c6404c9 100644 --- a/doc/fr/ref_algorithm_EnsembleKalmanFilter.rst +++ b/doc/fr/ref_algorithm_EnsembleKalmanFilter.rst @@ -27,9 +27,6 @@ Algorithme de calcul "*EnsembleKalmanFilter*" --------------------------------------------- -.. ------------------------------------ .. -.. include:: snippets/Header2Algo00.rst - .. ------------------------------------ .. .. include:: snippets/Header2Algo01.rst @@ -65,18 +62,22 @@ formulations stables et robustes suivantes : pair: Variant ; ETKF-N pair: Variant ; MLEF pair: Variant ; IEnKF + pair: Variant ; E3DVAR + pair: Variant ; EnKS - "EnKF" (Ensemble Kalman Filter, voir [Evensen94]_), algorithme stochastique original, permettant de traiter de manière consistante un opérateur d'évolution non-linéaire, - "ETKF" (Ensemble-Transform Kalman Filter), algorithme déterministe d'EnKF, permettant de traiter un opérateur d'évolution non-linéaire avec beaucoup moins de membres (on recommande d'utiliser un nombre de membres de l'ordre de 10 ou même parfois moins), - "ETKF-N" (Ensemble-Transform Kalman Filter of finite size N), algorithme d'ETKF dit de "taille finie N", évitant de recourir à une inflation souvent nécessaire avec les autres algorithmes, - "MLEF" (Maximum Likelihood Kalman Filter, voir [Zupanski05]_), algorithme déterministe d'EnKF, permettant en plus de traiter de manière consistante un opérateur d'observation non-linéaire), - "IEnKF" (Iterative EnKF), algorithme déterministe d'EnKF, améliorant le traitement des non-linéarités des opérateurs, +- "E3DVAR" (EnKF 3DVAR), algorithme couplant assimilation d'ensemble et variationnelle, qui utilise en parallèle une assimilation variationnelle 3DVAR et un algorithme d'EnKF pour améliorer l'estimation des covariances d'erreurs *a posteriori*, - "EnKS" (Ensemble Kalman Smoother), algorithme de lissage avec un décalage fixe. -Sans pouvoir prétendre à l'universalité, on recommande d'utiliser l'"EnKF" -comme référence, et les autres algorithmes (dans l'ordre) comme des moyens pour -obtenir une assimilation de données plus économique et de qualité -éventuellement similaire. +Sans pouvoir prétendre à l'universalité, on recommande d'utiliser la +formulation "EnKF" comme référence, la formulation "ETKF-N" ou "IEnKF" pour une +performance robuste, et les autres algorithmes (dans l'ordre) comme des moyens +pour obtenir une assimilation de données plus économique et de qualité +(éventuellement) similaire. .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst @@ -100,6 +101,12 @@ obtenir une assimilation de données plus économique et de qualité .. include:: snippets/EstimationOf_State.rst +.. include:: snippets/HybridCostDecrementTolerance.rst + +.. include:: snippets/HybridCovarianceEquilibrium.rst + +.. include:: snippets/HybridMaximumNumberOfIterations.rst + .. include:: snippets/InflationFactor.rst .. include:: snippets/InflationType.rst @@ -208,6 +215,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_EnsembleKalmanFilter_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_KalmanFilter` @@ -222,4 +230,5 @@ StoreSupplementaryCalculations - [Bishop01]_ - [Evensen03]_ - [Zupanski05]_ +- [Hamill00]_ - [WikipediaEnKF]_ diff --git a/doc/fr/ref_algorithm_ExtendedBlue.rst b/doc/fr/ref_algorithm_ExtendedBlue.rst index 1ee1821..f073872 100644 --- a/doc/fr/ref_algorithm_ExtendedBlue.rst +++ b/doc/fr/ref_algorithm_ExtendedBlue.rst @@ -196,6 +196,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulationQuantiles.rst .. ------------------------------------ .. +.. _section_ref_algorithm_ExtendedBlue_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_ExtendedBlue.rst @@ -205,6 +206,7 @@ StoreSupplementaryCalculations .. include:: snippets/Header2Algo10.rst .. literalinclude:: scripts/simple_ExtendedBlue.res + :language: none .. ------------------------------------ .. .. include:: snippets/Header2Algo06.rst diff --git a/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst b/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst index 439d722..bd5048a 100644 --- a/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst +++ b/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst @@ -205,6 +205,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_ExtendedKalmanFilter_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_KalmanFilter` diff --git a/doc/fr/ref_algorithm_FunctionTest.rst b/doc/fr/ref_algorithm_FunctionTest.rst index 26aad55..3602f53 100644 --- a/doc/fr/ref_algorithm_FunctionTest.rst +++ b/doc/fr/ref_algorithm_FunctionTest.rst @@ -94,6 +94,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_FunctionTest_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_FunctionTest.rst @@ -103,6 +104,7 @@ StoreSupplementaryCalculations .. include:: snippets/Header2Algo10.rst .. literalinclude:: scripts/simple_FunctionTest.res + :language: none .. ------------------------------------ .. .. include:: snippets/Header2Algo06.rst diff --git a/doc/fr/ref_algorithm_GradientTest.rst b/doc/fr/ref_algorithm_GradientTest.rst index b01d387..7697260 100644 --- a/doc/fr/ref_algorithm_GradientTest.rst +++ b/doc/fr/ref_algorithm_GradientTest.rst @@ -148,6 +148,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_GradientTest_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/fr/ref_algorithm_KalmanFilter.rst b/doc/fr/ref_algorithm_KalmanFilter.rst index c95472c..80ecd9d 100644 --- a/doc/fr/ref_algorithm_KalmanFilter.rst +++ b/doc/fr/ref_algorithm_KalmanFilter.rst @@ -196,6 +196,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_KalmanFilter_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_KalmanFilter1.rst @@ -205,6 +206,7 @@ StoreSupplementaryCalculations .. include:: snippets/Header2Algo10.rst .. literalinclude:: scripts/simple_KalmanFilter1.res + :language: none .. include:: snippets/Header2Algo11.rst @@ -225,6 +227,7 @@ StoreSupplementaryCalculations .. include:: snippets/Header2Algo10.rst .. literalinclude:: scripts/simple_KalmanFilter2.res + :language: none .. include:: snippets/Header2Algo11.rst diff --git a/doc/fr/ref_algorithm_LinearLeastSquares.rst b/doc/fr/ref_algorithm_LinearLeastSquares.rst index 21da389..12720b7 100644 --- a/doc/fr/ref_algorithm_LinearLeastSquares.rst +++ b/doc/fr/ref_algorithm_LinearLeastSquares.rst @@ -156,6 +156,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_LinearLeastSquares_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_Blue` diff --git a/doc/fr/ref_algorithm_LinearityTest.rst b/doc/fr/ref_algorithm_LinearityTest.rst index 3636081..1fa5fc1 100644 --- a/doc/fr/ref_algorithm_LinearityTest.rst +++ b/doc/fr/ref_algorithm_LinearityTest.rst @@ -172,6 +172,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_LinearityTest_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/fr/ref_algorithm_LocalSensitivityTest.rst b/doc/fr/ref_algorithm_LocalSensitivityTest.rst index d255af6..0d5d301 100644 --- a/doc/fr/ref_algorithm_LocalSensitivityTest.rst +++ b/doc/fr/ref_algorithm_LocalSensitivityTest.rst @@ -98,6 +98,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_LocalSensitivityTest_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/fr/ref_algorithm_MeasurementsOptimalPositioningTask.rst b/doc/fr/ref_algorithm_MeasurementsOptimalPositioningTask.rst new file mode 100644 index 0000000..d014321 --- /dev/null +++ b/doc/fr/ref_algorithm_MeasurementsOptimalPositioningTask.rst @@ -0,0 +1,111 @@ +.. + Copyright (C) 2008-2022 EDF R&D + + This file is part of SALOME ADAO module. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com + + Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D + +.. index:: single: MeasurementsOptimalPositioningTask +.. index:: single: Positionnement optimal de mesures +.. index:: single: Positions de mesures +.. index:: single: Mesures (Positionnement optimal) +.. _section_ref_algorithm_MeasurementsOptimalPositioningTask: + +Algorithme de tâche "*MeasurementsOptimalPositioningTask*" +---------------------------------------------------------- + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo00.rst + +.. warning:: + + Cet algorithme n'est pour l'instant utilisable qu'en interface textuelle + (TUI) et pas en interface graphique (GUI). + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo01.rst + +Cet algorithme permet d'établir des points de mesures optimaux par une analyse +EIM (Empirical Interpolation Method), à partir d'un ensemble de vecteurs d'état +(usuellement appelés "*snapshots*" en méthodologie de bases réduites). Chacun +de ces vecteurs d'état est habituellement (mais pas obligatoirement) le +résultat :math:`\mathbf{y}` d'une simulation :math:`H` pour un jeu de +paramètres donné :math:`\mathbf{x}=\mu`. + +Dans son usage le plus simple, si l'ensemble des vecteurs d'état est +pré-existant, il suffit de le fournir par les options d'algorithme. + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo02.rst + +*Aucune* + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo03Task.rst + +.. include:: snippets/EnsembleOfSnapshots.rst + +.. include:: snippets/ExcludeLocations.rst + +.. include:: snippets/ErrorNorm.rst + +.. include:: snippets/ErrorNormTolerance.rst + +.. include:: snippets/MaximumNumberOfLocations.rst + +StoreSupplementaryCalculations + .. index:: single: StoreSupplementaryCalculations + + *Liste de noms*. Cette liste indique les noms des variables supplémentaires, + qui peuvent être disponibles au cours du déroulement ou à la fin de + l'algorithme, si elles sont initialement demandées par l'utilisateur. Leur + disponibilité implique, potentiellement, des calculs ou du stockage coûteux. + La valeur par défaut est donc une liste vide, aucune de ces variables n'étant + calculée et stockée par défaut (sauf les variables inconditionnelles). Les + noms possibles pour les variables supplémentaires sont dans la liste suivante + (la description détaillée de chaque variable nommée est donnée dans la suite + de cette documentation par algorithme spécifique, dans la sous-partie + "*Informations et variables disponibles à la fin de l'algorithme*") : [ + "OptimalPoints", + "ReducedBasis", + "Residus", + ]. + + Exemple : + ``{"StoreSupplementaryCalculations":["BMA", "CurrentState"]}`` + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo04.rst + +.. include:: snippets/OptimalPoints.rst + +.. ------------------------------------ .. +.. include:: snippets/Header2Algo05.rst + +.. include:: snippets/OptimalPoints.rst + +.. include:: snippets/ReducedBasis.rst + +.. include:: snippets/Residus.rst + +.. ------------------------------------ .. +.. _section_ref_algorithm_MeasurementsOptimalPositioningTask_examples: +.. include:: snippets/Header2Algo06.rst + +- :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/fr/ref_algorithm_NonLinearLeastSquares.rst b/doc/fr/ref_algorithm_NonLinearLeastSquares.rst index 0395239..9f559a5 100644 --- a/doc/fr/ref_algorithm_NonLinearLeastSquares.rst +++ b/doc/fr/ref_algorithm_NonLinearLeastSquares.rst @@ -192,6 +192,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_NonLinearLeastSquares_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_NonLinearLeastSquares.rst @@ -201,6 +202,7 @@ StoreSupplementaryCalculations .. include:: snippets/Header2Algo10.rst .. literalinclude:: scripts/simple_NonLinearLeastSquares.res + :language: none .. include:: snippets/Header2Algo11.rst diff --git a/doc/fr/ref_algorithm_ParallelFunctionTest.rst b/doc/fr/ref_algorithm_ParallelFunctionTest.rst index c363a77..8beddfc 100644 --- a/doc/fr/ref_algorithm_ParallelFunctionTest.rst +++ b/doc/fr/ref_algorithm_ParallelFunctionTest.rst @@ -95,6 +95,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_ParallelFunctionTest_examples: .. include:: snippets/Header2Algo09.rst .. include:: scripts/simple_ParallelFunctionTest.rst @@ -104,6 +105,7 @@ StoreSupplementaryCalculations .. include:: snippets/Header2Algo10.rst .. literalinclude:: scripts/simple_ParallelFunctionTest.res + :language: none .. ------------------------------------ .. .. include:: snippets/Header2Algo06.rst diff --git a/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst b/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst index 2dea1b7..8ede27d 100644 --- a/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst +++ b/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst @@ -148,6 +148,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_ParticleSwarmOptimization_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_DerivativeFreeOptimization` diff --git a/doc/fr/ref_algorithm_QuantileRegression.rst b/doc/fr/ref_algorithm_QuantileRegression.rst index 6a9169e..50c130b 100644 --- a/doc/fr/ref_algorithm_QuantileRegression.rst +++ b/doc/fr/ref_algorithm_QuantileRegression.rst @@ -137,6 +137,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_QuantileRegression_examples: .. include:: snippets/Header2Algo06.rst .. ------------------------------------ .. diff --git a/doc/fr/ref_algorithm_SamplingTest.rst b/doc/fr/ref_algorithm_SamplingTest.rst index ca8c58f..c4e0b40 100644 --- a/doc/fr/ref_algorithm_SamplingTest.rst +++ b/doc/fr/ref_algorithm_SamplingTest.rst @@ -133,6 +133,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_SamplingTest_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/fr/ref_algorithm_TabuSearch.rst b/doc/fr/ref_algorithm_TabuSearch.rst index ea05e78..02cb489 100644 --- a/doc/fr/ref_algorithm_TabuSearch.rst +++ b/doc/fr/ref_algorithm_TabuSearch.rst @@ -158,6 +158,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtOptimum.rst .. ------------------------------------ .. +.. _section_ref_algorithm_TabuSearch_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_DerivativeFreeOptimization` diff --git a/doc/fr/ref_algorithm_TangentTest.rst b/doc/fr/ref_algorithm_TangentTest.rst index 31d2ac0..844f442 100644 --- a/doc/fr/ref_algorithm_TangentTest.rst +++ b/doc/fr/ref_algorithm_TangentTest.rst @@ -114,6 +114,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_TangentTest_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_FunctionTest` diff --git a/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst b/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst index 0f6ceb6..4f4db44 100644 --- a/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst +++ b/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst @@ -185,6 +185,7 @@ StoreSupplementaryCalculations .. include:: snippets/SimulatedObservationAtCurrentState.rst .. ------------------------------------ .. +.. _section_ref_algorithm_UnscentedKalmanFilter_examples: .. include:: snippets/Header2Algo06.rst - :ref:`section_ref_algorithm_KalmanFilter` diff --git a/doc/fr/ref_assimilation_keywords.rst b/doc/fr/ref_assimilation_keywords.rst index 7043b37..9f188ac 100644 --- a/doc/fr/ref_assimilation_keywords.rst +++ b/doc/fr/ref_assimilation_keywords.rst @@ -46,8 +46,8 @@ plusieurs catégories), est impérativement désigné par l'une ces commandes: .. include:: snippets/REDUCTION_STUDY.rst -Les autres termes imbriqués sont classés par ordre alphabétique. Ils ne sont -pas obligatoirement requis pour tous les algorithmes. Les différentes commandes +Les termes imbriqués sont classés par ordre alphabétique. Ils ne sont pas +obligatoirement requis pour tous les algorithmes. Les différentes commandes sont les suivantes: .. include:: snippets/AlgorithmParameters.rst diff --git a/doc/fr/ref_checking_keywords.rst b/doc/fr/ref_checking_keywords.rst index a917f9f..70d39a3 100644 --- a/doc/fr/ref_checking_keywords.rst +++ b/doc/fr/ref_checking_keywords.rst @@ -31,13 +31,13 @@ une procédure pour vérifier les propriétés d'une information requise, utilis ailleurs par un cas de calcul. Le premier terme décrit le choix entre un calcul ou une vérification. Dans -l'interface graphique, la vérification est désigné obligatoirement par la -commande: +l'interface graphique, le choix est désigné obligatoirement par la commande: .. include:: snippets/CHECKING_STUDY.rst -Les autres termes sont classés par ordre alphabétique. Les différentes -commandes sont les suivantes: +Les termes imbriqués sont classés par ordre alphabétique. Ils ne sont pas +obligatoirement requis pour tous les algorithmes. Les différentes commandes +sont les suivantes: .. include:: snippets/AlgorithmParameters.rst diff --git a/doc/fr/ref_task_keywords.rst b/doc/fr/ref_task_keywords.rst new file mode 100644 index 0000000..cca695e --- /dev/null +++ b/doc/fr/ref_task_keywords.rst @@ -0,0 +1,46 @@ +.. + Copyright (C) 2008-2022 EDF R&D + + This file is part of SALOME ADAO module. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com + + Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D + +.. _section_ref_task_keywords: + +Liste des commandes et mots-clés pour un cas orienté tâche ou étude dédiée +-------------------------------------------------------------------------- + +Ce jeu de commandes est lié à la description d'un cas orienté tâche ou étude +dédiée, qui consiste en une procédure spécifique simple pour effectuer une +tâche de calcul dédiée à une application générale des méthodes d'assimilation +de données ou d'optimisation. + +Les termes imbriqués sont classés par ordre alphabétique. Ils ne sont pas +obligatoirement requis pour tous les algorithmes. Les différentes commandes +sont les suivantes: + +.. include:: snippets/AlgorithmParameters.rst + +.. include:: snippets/Debug.rst + +.. include:: snippets/Observers.rst + +.. include:: snippets/StudyName.rst + +.. include:: snippets/StudyRepertory.rst diff --git a/doc/fr/reference.rst b/doc/fr/reference.rst index 67fb22c..a0a9b29 100644 --- a/doc/fr/reference.rst +++ b/doc/fr/reference.rst @@ -147,3 +147,25 @@ dans la section :ref:`section_theory`. ref_algorithm_SamplingTest ref_algorithm_TangentTest ref_checking_keywords + +.. _section_reference_task: + +================================================================================ +**[DocR]** Cas orientés tâches ou études dédiées +================================================================================ + +Cette section décrit les algorithmes de tâches facilitant une étude dédiée +disponibles dans ADAO, détaillant leurs caractéristiques d'utilisation et leurs +options. + +Ces tâches utilisent des algorithmes provenant de méthodes d'assimilation de +données, de méthodes d'optimisation ou de méthodes avec réduction. On renvoie à +la section :ref:`section_theory` et à celle des +:ref:`section_reference_assimilation` pour les détails algorithmiques +sous-jacents. + +.. toctree:: + :maxdepth: 1 + + ref_algorithm_MeasurementsOptimalPositioningTask + ref_task_keywords diff --git a/doc/fr/scripts/simple_AdjointTest.py b/doc/fr/scripts/simple_AdjointTest.py new file mode 100644 index 0000000..a68f18e --- /dev/null +++ b/doc/fr/scripts/simple_AdjointTest.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# +from numpy import array, eye +from adao import adaoBuilder +case = adaoBuilder.New('') +case.setCheckingPoint( Vector = array([0., 1., 2.]), Stored=True ) +case.setObservation( Vector = [10., 11., 12.] ) +case.setObservationOperator( Matrix = eye(3), ) +case.setAlgorithmParameters( + Algorithm='AdjointTest', + Parameters={ + 'EpsilonMinimumExponent' :-12, + 'NumberOfPrintedDigits' : 3, + 'SetSeed' : 1234567, + }, + ) +case.execute() diff --git a/doc/fr/scripts/simple_AdjointTest.res b/doc/fr/scripts/simple_AdjointTest.res new file mode 100644 index 0000000..c63412c --- /dev/null +++ b/doc/fr/scripts/simple_AdjointTest.res @@ -0,0 +1,37 @@ + + ADJOINTTEST + =========== + + This test allows to analyze the quality of an adjoint operator associated + to some given direct operator. If the adjoint operator is approximated and + not given, the test measures the quality of the automatic approximation. + + Using the "ScalarProduct" formula, one observes the residue R which is the + difference of two scalar products: + + R(Alpha) = | < TangentF_X(dX) , Y > - < dX , AdjointF_X(Y) > | + + which must remain constantly equal to zero to the accuracy of the calculation. + One takes dX0 = Normal(0,X) and dX = Alpha*dX0, where F is the calculation + operator. If it is given, Y must be in the image of F. If it is not given, + one takes Y = F(X). + + (Remark: numbers that are (about) under 2e-16 represent 0 to machine precision) + + ------------------------------------------------------------- + i Alpha ||X|| ||Y|| ||dX|| R(Alpha) + ------------------------------------------------------------- + 0 1e+00 2.236e+00 1.910e+01 3.536e+00 0.000e+00 + 1 1e-01 2.236e+00 1.910e+01 3.536e-01 0.000e+00 + 2 1e-02 2.236e+00 1.910e+01 3.536e-02 0.000e+00 + 3 1e-03 2.236e+00 1.910e+01 3.536e-03 0.000e+00 + 4 1e-04 2.236e+00 1.910e+01 3.536e-04 0.000e+00 + 5 1e-05 2.236e+00 1.910e+01 3.536e-05 0.000e+00 + 6 1e-06 2.236e+00 1.910e+01 3.536e-06 0.000e+00 + 7 1e-07 2.236e+00 1.910e+01 3.536e-07 0.000e+00 + 8 1e-08 2.236e+00 1.910e+01 3.536e-08 0.000e+00 + 9 1e-09 2.236e+00 1.910e+01 3.536e-09 0.000e+00 + 10 1e-10 2.236e+00 1.910e+01 3.536e-10 0.000e+00 + 11 1e-11 2.236e+00 1.910e+01 3.536e-11 0.000e+00 + 12 1e-12 2.236e+00 1.910e+01 3.536e-12 0.000e+00 + ------------------------------------------------------------- diff --git a/doc/fr/scripts/simple_AdjointTest.rst b/doc/fr/scripts/simple_AdjointTest.rst new file mode 100644 index 0000000..af656b6 --- /dev/null +++ b/doc/fr/scripts/simple_AdjointTest.rst @@ -0,0 +1,15 @@ +.. index:: single: AdjointTest (exemple) + +Cet exemple décrit le test de la qualité de l'adjoint d'un opérateur +quelconque, dont la formulation directe est donnée et dont la formulation +adjointe est ici approximé par défaut. Les informations nécessaires sont +minimales, à savoir ici un opérateur :math:`F` (décrit pour le test par la +commande d'observation "*ObservationOperator*"), et un état +:math:`\mathbf{x}^b` sur lequel le tester (décrit pour le test par la commande +"*CheckingPoint*"). Une observation :math:`\mathbf{y}^o` peut être donnée comme +ici (décrit pour le test par la commande "*Observation*"). On a paramétré la +sortie pour fixer l'impression, par exemple pour faciliter la comparaison +automatique. + +La vérification pratique consiste à observer si le résidu est constamment égal +à zéro à la précision du calcul. diff --git a/doc/fr/scripts/simple_Blue.py b/doc/fr/scripts/simple_Blue.py index 36f3740..edef24d 100644 --- a/doc/fr/scripts/simple_Blue.py +++ b/doc/fr/scripts/simple_Blue.py @@ -5,11 +5,11 @@ from adao import adaoBuilder case = adaoBuilder.New('') case.setBackground( Vector = array([0., 1., 2.]), Stored=True ) case.setBackgroundError( ScalarSparseMatrix = 1. ) -case.setObservation( Vector=array([10., 11., 12.]), Stored=True ) +case.setObservation( Vector = array([10., 11., 12.]), Stored=True ) case.setObservationError( ScalarSparseMatrix = 1. ) -case.setObservationOperator( Matrix=array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]), ) +case.setObservationOperator( Matrix = array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]), ) case.setAlgorithmParameters( Algorithm='Blue', Parameters={ diff --git a/doc/fr/scripts/simple_ExtendedBlue.py b/doc/fr/scripts/simple_ExtendedBlue.py index ad9d1e0..7508fe7 100644 --- a/doc/fr/scripts/simple_ExtendedBlue.py +++ b/doc/fr/scripts/simple_ExtendedBlue.py @@ -5,11 +5,11 @@ from adao import adaoBuilder case = adaoBuilder.New('') case.setBackground( Vector = array([0., 1., 2.]), Stored=True ) case.setBackgroundError( ScalarSparseMatrix = 1. ) -case.setObservation( Vector=array([10., 11., 12.]), Stored=True ) +case.setObservation( Vector = array([10., 11., 12.]), Stored=True ) case.setObservationError( ScalarSparseMatrix = 1. ) -case.setObservationOperator( Matrix=array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]), ) +case.setObservationOperator( Matrix = array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]), ) case.setAlgorithmParameters( Algorithm='ExtendedBlue', Parameters={ diff --git a/doc/fr/scripts/simple_FunctionTest.py b/doc/fr/scripts/simple_FunctionTest.py index 638d6dd..d99ec9f 100644 --- a/doc/fr/scripts/simple_FunctionTest.py +++ b/doc/fr/scripts/simple_FunctionTest.py @@ -10,7 +10,7 @@ case.setAlgorithmParameters( Parameters={ 'NumberOfRepetition' : 5, 'NumberOfPrintedDigits' : 2, - "ShowElementarySummary":False, + 'ShowElementarySummary':False, }, ) case.execute() diff --git a/doc/fr/scripts/simple_FunctionTest.res b/doc/fr/scripts/simple_FunctionTest.res index 8f57585..a63fff5 100644 --- a/doc/fr/scripts/simple_FunctionTest.res +++ b/doc/fr/scripts/simple_FunctionTest.res @@ -1,8 +1,16 @@ + + FUNCTIONTEST + ============ + + This test allows to analyze the (repetition of) launch of some given + operator. It shows simple statistics related to its successful execution, + or related to the similarities of repetition of its execution. + ===> Information before launching: ----------------------------- Characteristics of input vector X, internally converted: Type...............: - Lenght of vector...: 3 + Length of vector...: 3 Minimum value......: 0.00e+00 Maximum value......: 2.00e+00 Mean of vector.....: 1.00e+00 diff --git a/doc/fr/scripts/simple_FunctionTest.rst b/doc/fr/scripts/simple_FunctionTest.rst index 3152f78..f988d3d 100644 --- a/doc/fr/scripts/simple_FunctionTest.rst +++ b/doc/fr/scripts/simple_FunctionTest.rst @@ -2,8 +2,8 @@ Cet exemple décrit le test du bon fonctionnement d'un opérateur quelconque, et que son appel se déroule de manière compatible avec son usage courant dans les -algorithmes d'ADAO. Les information nécessaires sont minimales, à savoir ici un -opérateur :math:`F` (décrit pour le test par la commande d'observation +algorithmes d'ADAO. Les informations nécessaires sont minimales, à savoir ici +un opérateur :math:`F` (décrit pour le test par la commande d'observation "*ObservationOperator*"), et un état :math:`\mathbf{x}^b` sur lequel le tester (décrit pour le test par la commande "*CheckingPoint*"). diff --git a/doc/fr/scripts/simple_ParallelFunctionTest.res b/doc/fr/scripts/simple_ParallelFunctionTest.res index 829da09..bb6af81 100644 --- a/doc/fr/scripts/simple_ParallelFunctionTest.res +++ b/doc/fr/scripts/simple_ParallelFunctionTest.res @@ -1,8 +1,16 @@ + + PARALLELFUNCTIONTEST + ==================== + + This test allows to analyze the (repetition of) launch of some given + operator. It shows simple statistics related to its successful execution, + or related to the similarities of repetition of its execution. + ===> Information before launching: ----------------------------- Characteristics of input vector X, internally converted: Type...............: - Lenght of vector...: 30 + Length of vector...: 30 Minimum value......: 0.00e+00 Maximum value......: 2.90e+01 Mean of vector.....: 1.45e+01 diff --git a/doc/fr/snippets/AlgorithmParameters.rst b/doc/fr/snippets/AlgorithmParameters.rst index 6053104..5d83e7a 100644 --- a/doc/fr/snippets/AlgorithmParameters.rst +++ b/doc/fr/snippets/AlgorithmParameters.rst @@ -4,11 +4,11 @@ AlgorithmParameters *Dictionnaire*. La variable définit l'algorithme d'assimilation de données ou d'optimisation choisi par le mot-clé "*Algorithm*", et ses éventuels - paramètres optionnels. Les choix d'algorithmes sont disponibles à travers - l'interface graphique. Il existe par exemple le "3DVAR", le "Blue"... Chaque - algorithme est défini, plus loin, par une sous-section spécifique. De manière - facultative, la commande permet aussi d'ajouter des paramètres pour contrôler - l'algorithme. Leurs valeurs sont définies explicitement ou dans un objet de - type "*Dict*". On se reportera à la - :ref:`section_ref_options_Algorithm_Parameters` pour l'usage détaillé de - cette partie de la commande. + paramètres optionnels. Les choix potentiels par ce mot-clé "*Algorithm*" sont + disponibles à travers l'interface graphique ou dans la documentation de + référence contenant une sous-section spécifique pour chacun d'eux. Il existe + par exemple le "3DVAR", le "Blue", etc. De manière facultative, la commande + permet aussi d'ajouter des paramètres pour contrôler l'algorithme choisi. + Leurs valeurs sont définies explicitement ou dans un objet de type "*Dict*". + On se reportera à la :ref:`section_ref_options_Algorithm_Parameters` pour + l'usage détaillé de cette partie de la commande. diff --git a/doc/fr/snippets/CostDecrementTolerance.rst b/doc/fr/snippets/CostDecrementTolerance.rst index 6ad2f8c..b1aed08 100644 --- a/doc/fr/snippets/CostDecrementTolerance.rst +++ b/doc/fr/snippets/CostDecrementTolerance.rst @@ -3,9 +3,9 @@ CostDecrementTolerance *Valeur réelle*. Cette clé indique une valeur limite, conduisant à arrêter le processus itératif d'optimisation lorsque la fonction coût décroît moins que - cette tolérance au dernier pas. Le défaut est de 1.e-7, et il est recommandé - de l'adapter aux besoins pour des problèmes réels. On peut se reporter à la - partie décrivant les manières de + cette tolérance au dernier pas. La valeur par défaut est de 1.e-7, et il est + recommandé de l'adapter aux besoins pour des problèmes réels. On peut se + reporter à la partie décrivant les manières de :ref:`subsection_iterative_convergence_control` pour des recommandations plus détaillées. diff --git a/doc/fr/snippets/EnsembleOfSnapshots.rst b/doc/fr/snippets/EnsembleOfSnapshots.rst new file mode 100644 index 0000000..de66dc4 --- /dev/null +++ b/doc/fr/snippets/EnsembleOfSnapshots.rst @@ -0,0 +1,13 @@ +.. index:: single: EnsembleOfSnapshots + +EnsembleOfSnapshots + *Liste de vecteurs ou matrice*. Cette clé contient un ensemble de vecteurs + d'état physique :math:`\mathbf{y}` (nommés "*snapshots*" en terminologie de + "Bases Réduites"), avec 1 état par colonne si c'est une matrice ou 1 état par + élément de la liste. Important : la numérotation des points, auxquels sont + fournis une valeur d'état, dans chaque vecteur est implicitement celle de + l'ordre naturel de numérotation du vecteur d'état, de 0 à la "taille moins 1" + de ce vecteur. + + Exemple : + ``{"EnsembleOfSnapshots":[y1, y2, y3...]}`` diff --git a/doc/fr/snippets/ErrorNorm.rst b/doc/fr/snippets/ErrorNorm.rst new file mode 100644 index 0000000..580c5b0 --- /dev/null +++ b/doc/fr/snippets/ErrorNorm.rst @@ -0,0 +1,9 @@ +.. index:: single: ErrorNorm + +ErrorNorm + *Nom prédéfini*. Cette clé indique la norme utilisée pour le résidu qui + contrôle la recherche optimale. Le défaut est la norme "L2". Les critères + possibles sont dans la liste suivante : ["L2", "Linf"]. + + Exemple : + ``{"ErrorNorm":"L2"}`` diff --git a/doc/fr/snippets/ErrorNormTolerance.rst b/doc/fr/snippets/ErrorNormTolerance.rst new file mode 100644 index 0000000..0a0b17d --- /dev/null +++ b/doc/fr/snippets/ErrorNormTolerance.rst @@ -0,0 +1,13 @@ +.. index:: single: ErrorNormTolerance + +ErrorNormTolerance + *Valeur réelle*. Cette clé indique la valeur à partir laquelle le résidu + associé à l'approximation est acceptable, ce qui conduit à arrêter la + recherche optimale. La valeur par défaut est de 1.e-7 (ce qui équivaut + usuellement à une quasi-absence de critère d'arrêt car l'approximation est + moins précise), et il est recommandé de l'adapter aux besoins pour des + problèmes réels. Une valeur habituelle, recommandée pour arrêter la recherche + sur critère de résidu, est de 1.e-2. + + Exemple : + ``{"ErrorNormTolerance":1.e-7}`` diff --git a/doc/fr/snippets/ExcludeLocations.rst b/doc/fr/snippets/ExcludeLocations.rst new file mode 100644 index 0000000..47cf1b2 --- /dev/null +++ b/doc/fr/snippets/ExcludeLocations.rst @@ -0,0 +1,11 @@ +.. index:: single: ExcludeLocations + +ExcludeLocations + *Liste d'entiers*. Cette clé indique la liste des points du vecteur d'état + exclus de la recherche optimale. La valeur par défaut est une liste vide. + Important : la numérotation de ces points exclus doit être identique à celle + qui est adoptée implicitement dans les états fournis par la clé + "*EnsembleOfSnapshots*". + + Exemple : + ``{"ExcludeLocations":[3, 125, 286]}`` diff --git a/doc/fr/snippets/Header2Algo03AdOp.rst b/doc/fr/snippets/Header2Algo03AdOp.rst index 234637a..8eabc51 100644 --- a/doc/fr/snippets/Header2Algo03AdOp.rst +++ b/doc/fr/snippets/Header2Algo03AdOp.rst @@ -6,4 +6,4 @@ particulières, décrites ci-après, de l'algorithme. On se reportera à la :ref:`section_ref_options_Algorithm_Parameters` pour le bon usage de cette commande. -Les options de l'algorithme sont les suivantes : +Les options sont les suivantes : diff --git a/doc/fr/snippets/Header2Algo03Chck.rst b/doc/fr/snippets/Header2Algo03Chck.rst index 0fdd785..c505cd0 100644 --- a/doc/fr/snippets/Header2Algo03Chck.rst +++ b/doc/fr/snippets/Header2Algo03Chck.rst @@ -6,4 +6,4 @@ décrites ci-après, de l'algorithme. On se reportera à la :ref:`section_ref_options_Algorithm_Parameters` pour le bon usage de cette commande. -Les options de l'algorithme sont les suivantes : +Les options sont les suivantes : diff --git a/doc/fr/snippets/Header2Algo03Task.rst b/doc/fr/snippets/Header2Algo03Task.rst new file mode 100644 index 0000000..c91aeda --- /dev/null +++ b/doc/fr/snippets/Header2Algo03Task.rst @@ -0,0 +1,9 @@ +Les commandes optionnelles générales, disponibles en édition dans l'interface +graphique ou textuelle, sont indiquées dans la +:ref:`section_ref_task_keywords`. De plus, les paramètres de la commande +"*AlgorithmParameters*" permettent d'indiquer les options particulières, +décrites ci-après, de l'algorithme. On se reportera à la +:ref:`section_ref_options_Algorithm_Parameters` pour le bon usage de cette +commande. + +Les options sont les suivantes : diff --git a/doc/fr/snippets/Header2Algo09.rst b/doc/fr/snippets/Header2Algo09.rst index bb285ba..c2a017d 100644 --- a/doc/fr/snippets/Header2Algo09.rst +++ b/doc/fr/snippets/Header2Algo09.rst @@ -1,5 +1,5 @@ -Exemple d'utilisation en Python (TUI) -+++++++++++++++++++++++++++++++++++++ +Exemples d'utilisation en Python (TUI) +++++++++++++++++++++++++++++++++++++++ Voici un exemple très simple d'usage de l'algorithme proposé et de ses paramètres, écrit en :ref:`section_tui`, et dont les informations indiquées en diff --git a/doc/fr/snippets/HybridCostDecrementTolerance.rst b/doc/fr/snippets/HybridCostDecrementTolerance.rst new file mode 100644 index 0000000..9ee65e7 --- /dev/null +++ b/doc/fr/snippets/HybridCostDecrementTolerance.rst @@ -0,0 +1,13 @@ +.. index:: single: HybridCostDecrementTolerance + +HybridCostDecrementTolerance + *Valeur réelle*. Cette clé indique une valeur limite, conduisant à arrêter le + processus itératif d'optimisation dans la partie variationnelle du couplage, + lorsque la fonction coût décroît moins que cette tolérance au dernier pas. Le + défaut est de 1.e-7, et il est recommandé de l'adapter aux besoins pour des + problèmes réels. On peut se reporter à la partie décrivant les manières de + :ref:`subsection_iterative_convergence_control` pour des recommandations plus + détaillées. + + Exemple : + ``{"HybridCostDecrementTolerance":1.e-7}`` diff --git a/doc/fr/snippets/HybridCovarianceEquilibrium.rst b/doc/fr/snippets/HybridCovarianceEquilibrium.rst new file mode 100644 index 0000000..ae7f1d5 --- /dev/null +++ b/doc/fr/snippets/HybridCovarianceEquilibrium.rst @@ -0,0 +1,10 @@ +.. index:: single: HybridCovarianceEquilibrium + +HybridCovarianceEquilibrium + *Valeur réelle*. Cette clé indique, en optimisation hybride variationnelle, + le facteur d'équilibre entre la covariance statique *a priori* et la + covariance d'ensemble. Ce facteur est compris entre 0 et 1, et sa valeur par + défaut est 0.5. + + Exemple : + ``{"HybridCovarianceEquilibrium":0.5}`` diff --git a/doc/fr/snippets/HybridMaximumNumberOfIterations.rst b/doc/fr/snippets/HybridMaximumNumberOfIterations.rst new file mode 100644 index 0000000..c322656 --- /dev/null +++ b/doc/fr/snippets/HybridMaximumNumberOfIterations.rst @@ -0,0 +1,15 @@ +.. index:: single: HybridMaximumNumberOfIterations + +HybridMaximumNumberOfIterations + *Valeur entière*. Cette clé indique le nombre maximum d'itérations internes + possibles en optimisation hybride, pour la partie variationnelle. Le défaut + est 15000, qui est très similaire à une absence de limite sur les itérations. + Il est ainsi recommandé d'adapter ce paramètre aux besoins pour des problèmes + réels. Pour certains optimiseurs, le nombre de pas effectif d'arrêt peut être + légèrement différent de la limite à cause d'exigences de contrôle interne de + l'algorithme. On peut se reporter à la partie décrivant les manières de + :ref:`subsection_iterative_convergence_control` pour des recommandations plus + détaillées. + + Exemple : + ``{"HybridMaximumNumberOfIterations":100}`` diff --git a/doc/fr/snippets/MaximumNumberOfLocations.rst b/doc/fr/snippets/MaximumNumberOfLocations.rst new file mode 100644 index 0000000..72c9e9f --- /dev/null +++ b/doc/fr/snippets/MaximumNumberOfLocations.rst @@ -0,0 +1,12 @@ +.. index:: single: MaximumNumberOfLocations + +MaximumNumberOfLocations + *Valeur entière*. Cette clé indique le nombre maximum possible de positions + trouvée dans la recherche optimale. La valeur par défaut est 1. La recherche + optimale peut éventuellement trouver moins de positions que ce qui est requis + par cette clé, comme par exemple dans le cas où le résidu associé à + l'approximation est inférieur au critère et conduit à l'arrêt anticipé de la + recherche optimale. + + Exemple : + ``{"MaximumNumberOfLocations":5}`` diff --git a/doc/fr/snippets/OptimalPoints.rst b/doc/fr/snippets/OptimalPoints.rst new file mode 100644 index 0000000..1cd50f6 --- /dev/null +++ b/doc/fr/snippets/OptimalPoints.rst @@ -0,0 +1,10 @@ +.. index:: single: OptimalPoints + +OptimalPoints + *Liste de série d'entiers*. Chaque élément est une série, contenant les + points idéaux déterminés par la recherche optimale, rangés par ordre de + préférence décroissante et dans le même ordre que les vecteurs de base + réduite trouvés itérativement. + + Exemple : + ``mp = ADD.get("OptimalPoints")[-1]`` diff --git a/doc/fr/snippets/ReducedBasis.rst b/doc/fr/snippets/ReducedBasis.rst new file mode 100644 index 0000000..105a41f --- /dev/null +++ b/doc/fr/snippets/ReducedBasis.rst @@ -0,0 +1,10 @@ +.. index:: single: ReducedBasis + +ReducedBasis + *Liste de matrices*. Chaque élément est une matrice, contenant dans chaque + colonne un vecteur de la base réduite obtenue par la recherche optimale, + rangés par ordre de préférence décroissante et dans le même ordre que les + points idéaux trouvés itérativement. + + Exemple : + ``rb = ADD.get("ReducedBasis")[-1]`` diff --git a/doc/fr/snippets/Residu.rst b/doc/fr/snippets/Residu.rst index a047098..2898fc9 100644 --- a/doc/fr/snippets/Residu.rst +++ b/doc/fr/snippets/Residu.rst @@ -2,7 +2,7 @@ Residu *Liste de valeurs*. Chaque élément est la valeur du résidu particulier - vérifié lors d'un algorithme de vérification, selon l'ordre des tests + vérifié lors du déroulement de l'algorithme, selon l'ordre des tests effectués. Exemple : diff --git a/doc/fr/snippets/Residus.rst b/doc/fr/snippets/Residus.rst new file mode 100644 index 0000000..6b18903 --- /dev/null +++ b/doc/fr/snippets/Residus.rst @@ -0,0 +1,9 @@ +.. index:: single: Residus + +Residus + *Liste de série de valeurs réelles*. Chaque élément est une série, contenant + les valeurs du résidu particulier vérifié lors du déroulement de + l'algorithme. + + Exemple : + ``rs = ADD.get("Residus")[:]`` diff --git a/doc/fr/snippets/Variant_EnKF.rst b/doc/fr/snippets/Variant_EnKF.rst index f44b918..a2238c1 100644 --- a/doc/fr/snippets/Variant_EnKF.rst +++ b/doc/fr/snippets/Variant_EnKF.rst @@ -5,21 +5,24 @@ pair: Variant ; ETKF-N pair: Variant ; MLEF pair: Variant ; IEnKF + pair: Variant ; E3DVAR pair: Variant ; EnKS Variant *Nom prédéfini*. Cette clé permet de choisir l'une des variantes possibles - pour l'algorithme principal. La variante par défaut est l'"EnKF" d'origine, - et les choix possibles sont + pour l'algorithme principal. La variante par défaut est la formulation "EnKF" + d'origine, et les choix possibles sont "EnKF" (Ensemble Kalman Filter), "ETKF" (Ensemble-Transform Kalman Filter), "ETKF-N" (Ensemble-Transform Kalman Filter), "MLEF" (Maximum Likelihood Kalman Filter), "IEnKF" (Iterative_EnKF), + "E3DVAR" (EnKF 3DVAR), "EnKS" (Ensemble Kalman Smoother). - Il est conseillé d'essayer les variantes "ETKF-N" ou "IEnKF", et de réduire - le nombre de membres à une dizaine ou moins pour toutes les variantes autres - que l'"EnKF" original. + + Il est conseillé d'essayer les variantes "ETKF-N" ou "IEnKF" pour une + performance robuste, et de réduire le nombre de membres à une dizaine ou + moins pour toutes les variantes autres que la formulation "EnKF" originale. Exemple : ``{"Variant":"EnKF"}`` diff --git a/doc/fr/tutorials_in_python.rst b/doc/fr/tutorials_in_python.rst index bcbc0da..57ea640 100644 --- a/doc/fr/tutorials_in_python.rst +++ b/doc/fr/tutorials_in_python.rst @@ -118,7 +118,7 @@ Pour étendre cet exemple, on peut modifier les variances représentées par :math:`\mathbf{x}^b`, en proportion inverse des variances dans :math:`\mathbf{B}` et :math:`\mathbf{R}`. Comme autre extension, on peut aussi dire qu'il est équivalent de rechercher l'analyse à l'aide d'un algorithme de -BLUE ou d'un algorithme de 3DVAR. +"Blue" ou d'un algorithme de "3DVAR". Utiliser l'interface textuelle (TUI) pour construire le cas ADAO ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -201,10 +201,10 @@ comme montré ci-après : >>> Pour étendre cet exemple, on peut remarquer que le même problème résolu par un -algorithme de 3DVAR donne le même résultat. Cet algorithme peut être choisi +algorithme de "3DVAR" donne le même résultat. Cet algorithme peut être choisi lors de l'étape de construction du cas ADAO en changeant simplement l'argument -"*Algorithm*" en entête. Le reste du cas ADAO en 3DVAR est alors entièrement -similaire au cas algorithmique du BLUE. +"*Algorithm*" en entête. Le reste du cas ADAO en "3DVAR" est alors entièrement +similaire au cas algorithmique du "Blue". .. _section_tutorials_in_python_script: @@ -292,12 +292,12 @@ Les autres étapes et résultats sont exactement les mêmes que dans l'exemple précédent :ref:`section_tutorials_in_python_explicit`. Dans la pratique, cette démarche par scripts est la manière la plus facile pour -récupérer des informations depuis des calculs en ligne ou préalables, depuis des -fichiers statiques, depuis des bases de données ou des flux informatiques, +récupérer des informations depuis des calculs en ligne ou préalables, depuis +des fichiers statiques, depuis des bases de données ou des flux informatiques, chacun pouvant être dans ou hors SALOME. Cela permet aussi de modifier aisément des données d'entrée, par exemple à des fin de débogage ou pour des traitements répétitifs, et c'est la méthode la plus polyvalente pour paramétrer les données d'entrée. **Mais attention, la méthodologie par scripts n'est pas une procédure "sûre", en ce sens que des données erronées ou des erreurs dans les calculs, peuvent être directement introduites dans l'exécution du cas ADAO. -L'utilisateur doit vérifier avec attention le contenu de ses scripts.** +L'utilisateur doit vérifier avec soin le contenu de ses scripts.** diff --git a/src/daComposant/daAlgorithms/3DVAR.py b/src/daComposant/daAlgorithms/3DVAR.py index 355426b..fa394d8 100644 --- a/src/daComposant/daAlgorithms/3DVAR.py +++ b/src/daComposant/daAlgorithms/3DVAR.py @@ -183,7 +183,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): message = "État initial imposé (par défaut, c'est l'ébauche si None)", ) self.requireInputArguments( - mandatory= ("Xb", "Y", "HO", "R", "B" ), + mandatory= ("Xb", "Y", "HO", "R", "B"), optional = ("U", "EM", "CM", "Q"), ) self.setAttributes(tags=( diff --git a/src/daComposant/daAlgorithms/4DVAR.py b/src/daComposant/daAlgorithms/4DVAR.py index a18d94d..d3c5bcf 100644 --- a/src/daComposant/daAlgorithms/4DVAR.py +++ b/src/daComposant/daAlgorithms/4DVAR.py @@ -126,7 +126,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): message = "État initial imposé (par défaut, c'est l'ébauche si None)", ) self.requireInputArguments( - mandatory= ("Xb", "Y", "HO", "EM", "R", "B" ), + mandatory= ("Xb", "Y", "HO", "EM", "R", "B"), optional = ("U", "CM", "Q"), ) self.setAttributes(tags=( diff --git a/src/daComposant/daAlgorithms/AdjointTest.py b/src/daComposant/daAlgorithms/AdjointTest.py index db8c6b3..99cb72f 100644 --- a/src/daComposant/daAlgorithms/AdjointTest.py +++ b/src/daComposant/daAlgorithms/AdjointTest.py @@ -35,6 +35,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): message = "Formule de résidu utilisée", listval = ["ScalarProduct"], ) + self.defineRequiredParameter( + name = "AmplitudeOfInitialDirection", + default = 1., + typecast = float, + message = "Amplitude de la direction initiale de la dérivée directionnelle autour du point nominal", + ) self.defineRequiredParameter( name = "EpsilonMinimumExponent", default = -8, @@ -50,15 +56,11 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): message = "Direction initiale de la dérivée directionnelle autour du point nominal", ) self.defineRequiredParameter( - name = "AmplitudeOfInitialDirection", - default = 1., - typecast = float, - message = "Amplitude de la direction initiale de la dérivée directionnelle autour du point nominal", - ) - self.defineRequiredParameter( - name = "SetSeed", - typecast = numpy.random.seed, - message = "Graine fixée pour le générateur aléatoire", + name = "NumberOfPrintedDigits", + default = 5, + typecast = int, + message = "Nombre de chiffres affichés pour les impressions de réels", + minval = 0, ) self.defineRequiredParameter( name = "ResultTitle", @@ -66,6 +68,11 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): typecast = str, message = "Titre du tableau et de la figure", ) + self.defineRequiredParameter( + name = "SetSeed", + typecast = numpy.random.seed, + message = "Graine fixée pour le générateur aléatoire", + ) self.defineRequiredParameter( name = "StoreSupplementaryCalculations", default = [], @@ -113,39 +120,57 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Xn, ) # - # Entete des resultats # -------------------- - __marge = 12*u" " - __precision = u""" - Remarque : les nombres inferieurs a %.0e (environ) representent un zero - a la precision machine.\n"""%mpr - if self._parameters["ResiduFormula"] == "ScalarProduct": - __entete = u" i Alpha ||X|| ||Y|| ||dX|| R(Alpha)" - __msgdoc = u""" - On observe le residu qui est la difference de deux produits scalaires : - - R(Alpha) = | < TangentF_X(dX) , Y > - < dX , AdjointF_X(Y) > | - - qui doit rester constamment egal a zero a la precision du calcul. - On prend dX0 = Normal(0,X) et dX = Alpha*dX0. F est le code de calcul. - Y doit etre dans l'image de F. S'il n'est pas donne, on prend Y = F(X).\n""" + __precision + __p = self._parameters["NumberOfPrintedDigits"] # + __marge = 5*u" " if len(self._parameters["ResultTitle"]) > 0: __rt = str(self._parameters["ResultTitle"]) - msgs = u"\n" - msgs += __marge + "====" + "="*len(__rt) + "====\n" - msgs += __marge + " " + __rt + "\n" - msgs += __marge + "====" + "="*len(__rt) + "====\n" + msgs = ("\n") + msgs += (__marge + "====" + "="*len(__rt) + "====\n") + msgs += (__marge + " " + __rt + "\n") + msgs += (__marge + "====" + "="*len(__rt) + "====\n") else: - msgs = u"" - msgs += __msgdoc + msgs = ("\n") + msgs += (" %s\n"%self._name) + msgs += (" %s\n"%("="*len(self._name),)) # + msgs += ("\n") + msgs += (" This test allows to analyze the quality of an adjoint operator associated\n") + msgs += (" to some given direct operator. If the adjoint operator is approximated and\n") + msgs += (" not given, the test measures the quality of the automatic approximation.\n") + # + if self._parameters["ResiduFormula"] == "ScalarProduct": + msgs += ("\n") + msgs += (" Using the \"%s\" formula, one observes the residue R which is the\n"%self._parameters["ResiduFormula"]) + msgs += (" difference of two scalar products:\n") + msgs += ("\n") + msgs += (" R(Alpha) = | < TangentF_X(dX) , Y > - < dX , AdjointF_X(Y) > |\n") + msgs += ("\n") + msgs += (" which must remain constantly equal to zero to the accuracy of the calculation.\n") + msgs += (" One takes dX0 = Normal(0,X) and dX = Alpha*dX0, where F is the calculation\n") + msgs += (" operator. If it is given, Y must be in the image of F. If it is not given,\n") + msgs += (" one takes Y = F(X).\n") + msgs += ("\n") + msgs += (" (Remark: numbers that are (about) under %.0e represent 0 to machine precision)"%mpr) + print(msgs) + # + # -------------------- + __pf = " %"+str(__p+7)+"."+str(__p)+"e" + __ms = " %2i %5.0e"+(__pf*4) + __bl = " %"+str(__p+7)+"s " + __entete = str.rstrip(" i Alpha " + \ + str.center("||X||",2+__p+7) + \ + str.center("||Y||",2+__p+7) + \ + str.center("||dX||",2+__p+7) + \ + str.center("R(Alpha)",2+__p+7)) __nbtirets = len(__entete) + 2 + # + msgs = "" msgs += "\n" + __marge + "-"*__nbtirets msgs += "\n" + __marge + __entete msgs += "\n" + __marge + "-"*__nbtirets # - # ---------- for i,amplitude in enumerate(Perturbations): dX = amplitude * dX0 NormedX = numpy.linalg.norm( dX ) @@ -155,17 +180,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Residu = abs(float(numpy.dot( TangentFXdX, Yn ) - numpy.dot( dX, AdjointFXY ))) # - msg = " %2i %5.0e %9.3e %9.3e %9.3e | %9.3e"%(i,amplitude,NormeX,NormeY,NormedX,Residu) + msg = __ms%(i,amplitude,NormeX,NormeY,NormedX,Residu) msgs += "\n" + __marge + msg # self.StoredVariables["Residu"].store( Residu ) # msgs += "\n" + __marge + "-"*__nbtirets - msgs += "\n" - # - # Sorties eventuelles - # ------------------- - print("\nResults of adjoint check by \"%s\" formula:"%self._parameters["ResiduFormula"]) print(msgs) # self._post_run(HO) diff --git a/src/daComposant/daAlgorithms/Atoms/ecweim.py b/src/daComposant/daAlgorithms/Atoms/ecweim.py new file mode 100644 index 0000000..a783407 --- /dev/null +++ b/src/daComposant/daAlgorithms/Atoms/ecweim.py @@ -0,0 +1,193 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2008-2022 EDF R&D +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# +# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D + +__doc__ = """ + EIM +""" +__author__ = "Jean-Philippe ARGAUD" + +import numpy + +# ============================================================================== +def EIM_offline(selfA, Verbose = False): + """ + Établissement de base par Empirical Interpolation Method (EIM) + """ + # + # Initialisations + # --------------- + if isinstance(selfA._parameters["EnsembleOfSnapshots"], (numpy.ndarray,numpy.matrix)): + __EOS = numpy.asarray(selfA._parameters["EnsembleOfSnapshots"]) + elif isinstance(selfA._parameters["EnsembleOfSnapshots"], (list,tuple)): + __EOS = numpy.asarray(selfA._parameters["EnsembleOfSnapshots"]).T + else: + raise ValueError("EOS has to be an array/matrix (each column is a snapshot vector) or a list/tuple (each element is a snapshot vector).") + # + if selfA._parameters["ErrorNorm"] == "L2": + MaxNormByColumn = MaxL2NormByColumn + else: + MaxNormByColumn = MaxLinfNormByColumn + # + if "ExcludeLocations" in selfA._parameters: + __ExcludedMagicPoints = selfA._parameters["ExcludeLocations"] + else: + __ExcludedMagicPoints = [] + if len(__ExcludedMagicPoints) > 0: + __ExcludedMagicPoints = numpy.ravel(numpy.asarray(__ExcludedMagicPoints, dtype=int)) + __IncludedMagicPoints = numpy.setdiff1d( + numpy.arange(__EOS.shape[0]), + __ExcludedMagicPoints, + assume_unique = True, + ) + else: + __IncludedMagicPoints = [] + # + __dimS, __nbmS = __EOS.shape + if "MaximumNumberOfLocations" in selfA._parameters and "MaximumRBSize" in selfA._parameters: + selfA._parameters["MaximumRBSize"] = min(selfA._parameters["MaximumNumberOfLocations"],selfA._parameters["MaximumRBSize"]) + elif "MaximumNumberOfLocations" in selfA._parameters: + selfA._parameters["MaximumRBSize"] = selfA._parameters["MaximumNumberOfLocations"] + elif "MaximumRBSize" in selfA._parameters: + pass + else: + selfA._parameters["MaximumRBSize"] = __nbmS + __maxM = min(selfA._parameters["MaximumRBSize"], __dimS, __nbmS) + if "ErrorNormTolerance" in selfA._parameters: + selfA._parameters["EpsilonEIM"] = selfA._parameters["ErrorNormTolerance"] + else: + selfA._parameters["EpsilonEIM"] = 1.e-2 + # + __mu = [] + __I = [] + __Q = numpy.empty(__dimS) + __errors = [] + # + __M = 0 + __iM = -1 + __rhoM = numpy.empty(__dimS) + # + __eM, __muM = MaxNormByColumn(__EOS, __IncludedMagicPoints) + __residuM = __EOS[:,__muM] + __errors.append(__eM) + # + # Boucle + # ------ + while __M < __maxM and __eM > selfA._parameters["EpsilonEIM"]: + __M = __M + 1 + # + __mu.append(__muM) + # + # Détermination du point et de la fonction magiques + __abs_residuM = numpy.abs(__residuM) + __iM = numpy.argmax(__abs_residuM) + __rhoM = __residuM / __abs_residuM[__iM] + # + if __iM in __ExcludedMagicPoints: + __sIndices = numpy.argsort(__abs_residuM) + __rang = -1 + assert __iM == __sIndices[__rang] + while __iM in __ExcludedMagicPoints and __rang >= -len(__abs_residuM): + __rang = __rang - 1 + __iM = __sIndices[__rang] + # + if __M > 1: + __Q = numpy.column_stack((__Q, __rhoM)) + else: + __Q = __rhoM + __I.append(__iM) + # + __restrictedQi = __Q[__I] + if __M > 1: + __Qi_inv = numpy.linalg.inv(__restrictedQi) + else: + __Qi_inv = 1. / __restrictedQi + # + __restrictedEOSi = __EOS[__I] + # + __interpolator = numpy.empty(__EOS.shape) + if __M > 1: + __interpolator = numpy.dot(__Q,numpy.dot(__Qi_inv,__restrictedEOSi)) + else: + __interpolator = numpy.outer(__Q,numpy.outer(__Qi_inv,__restrictedEOSi)) + # + __dataForNextIter = __EOS - __interpolator + __eM, __muM = MaxNormByColumn(__dataForNextIter, __IncludedMagicPoints) + __errors.append(__eM) + # + __residuM = __dataForNextIter[:,__muM] + # + #-------------------------- + if hasattr(selfA, "StoredVariables"): + selfA.StoredVariables["OptimalPoints"].store( __I ) + if selfA._toStore("ReducedBasis"): + selfA.StoredVariables["ReducedBasis"].store( __Q ) + if selfA._toStore("Residus"): + selfA.StoredVariables["Residus"].store( __errors ) + # + return __mu, __I, __Q, __errors + +# ============================================================================== +def EIM_online(selfA, QEIM, mu, iEIM): + raise NotImplementedError() + +# ============================================================================== +def MaxL2NormByColumn(Ensemble, IncludedPoints=[]): + nmax, imax = -1, -1 + if len(IncludedPoints) > 0: + for indice in range(Ensemble.shape[1]): + norme = numpy.linalg.norm( + numpy.take(Ensemble[:,indice], IncludedPoints, mode='clip'), + ) + if norme > nmax: + nmax, imax, = norme, indice + else: + for indice in range(Ensemble.shape[1]): + norme = numpy.linalg.norm( + Ensemble[:,indice], + ) + if norme > nmax: + nmax, imax, = norme, indice + return nmax, imax + +def MaxLinfNormByColumn(Ensemble, IncludedPoints=[]): + nmax, imax = -1, -1 + if len(IncludedPoints) > 0: + for indice in range(Ensemble.shape[1]): + norme = numpy.linalg.norm( + numpy.take(Ensemble[:,indice], IncludedPoints, mode='clip'), + ord=numpy.inf, + ) + if norme > nmax: + nmax, imax, = norme, indice + else: + for indice in range(Ensemble.shape[1]): + norme = numpy.linalg.norm( + Ensemble[:,indice], + ord=numpy.inf, + ) + if norme > nmax: + nmax, imax, = norme, indice + return nmax, imax + +# ============================================================================== +if __name__ == "__main__": + print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daAlgorithms/EnsembleBlue.py b/src/daComposant/daAlgorithms/EnsembleBlue.py index 83b5a77..863b21a 100644 --- a/src/daComposant/daAlgorithms/EnsembleBlue.py +++ b/src/daComposant/daAlgorithms/EnsembleBlue.py @@ -61,6 +61,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "NonLinear", "Filter", "Ensemble", + "Reduction", )) def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None): diff --git a/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py b/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py index 26e7ddc..a800a59 100644 --- a/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py +++ b/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py @@ -39,6 +39,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "ETKF-N", "MLEF", "IEnKF", + "E3DVAR", "EnKS", ], listadv = [ @@ -56,7 +57,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "IEnKF-B", "EnKS-KFF", "IEKF", - "E3DVAR", "E3DVAR-EnKF", "E3DVAR-ETKF", "E3DVAR-MLEF", @@ -185,6 +185,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "Filter", "Ensemble", "Dynamic", + "Reduction", )) def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None): diff --git a/src/daComposant/daAlgorithms/FunctionTest.py b/src/daComposant/daAlgorithms/FunctionTest.py index 22bf549..3d97381 100644 --- a/src/daComposant/daAlgorithms/FunctionTest.py +++ b/src/daComposant/daAlgorithms/FunctionTest.py @@ -88,26 +88,35 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # # ---------- __s = self._parameters["ShowElementarySummary"] + __p = self._parameters["NumberOfPrintedDigits"] + # __marge = 5*u" " - _p = self._parameters["NumberOfPrintedDigits"] if len(self._parameters["ResultTitle"]) > 0: __rt = str(self._parameters["ResultTitle"]) - msgs = u"\n" - msgs += __marge + "====" + "="*len(__rt) + "====\n" - msgs += __marge + " " + __rt + "\n" - msgs += __marge + "====" + "="*len(__rt) + "====\n" - print("%s"%msgs) + msgs = ("\n") + msgs += (__marge + "====" + "="*len(__rt) + "====\n") + msgs += (__marge + " " + __rt + "\n") + msgs += (__marge + "====" + "="*len(__rt) + "====\n") + else: + msgs = ("\n") + msgs += (" %s\n"%self._name) + msgs += (" %s\n"%("="*len(self._name),)) # - msgs = ("===> Information before launching:\n") + msgs += ("\n") + msgs += (" This test allows to analyze the (repetition of) launch of some given\n") + msgs += (" operator. It shows simple statistics related to its successful execution,\n") + msgs += (" or related to the similarities of repetition of its execution.\n") + msgs += ("\n") + msgs += ("===> Information before launching:\n") msgs += (" -----------------------------\n") msgs += (" Characteristics of input vector X, internally converted:\n") msgs += (" Type...............: %s\n")%type( Xn ) - msgs += (" Lenght of vector...: %i\n")%max(numpy.ravel( Xn ).shape) - msgs += (" Minimum value......: %."+str(_p)+"e\n")%numpy.min( Xn ) - msgs += (" Maximum value......: %."+str(_p)+"e\n")%numpy.max( Xn ) - msgs += (" Mean of vector.....: %."+str(_p)+"e\n")%numpy.mean( Xn, dtype=mfp ) - msgs += (" Standard error.....: %."+str(_p)+"e\n")%numpy.std( Xn, dtype=mfp ) - msgs += (" L2 norm of vector..: %."+str(_p)+"e\n")%numpy.linalg.norm( Xn ) + msgs += (" Length of vector...: %i\n")%max(numpy.ravel( Xn ).shape) + msgs += (" Minimum value......: %."+str(__p)+"e\n")%numpy.min( Xn ) + msgs += (" Maximum value......: %."+str(__p)+"e\n")%numpy.max( Xn ) + msgs += (" Mean of vector.....: %."+str(__p)+"e\n")%numpy.mean( Xn, dtype=mfp ) + msgs += (" Standard error.....: %."+str(__p)+"e\n")%numpy.std( Xn, dtype=mfp ) + msgs += (" L2 norm of vector..: %."+str(__p)+"e\n")%numpy.linalg.norm( Xn ) print(msgs) # print(" %s\n"%("-"*75,)) @@ -139,12 +148,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): msgs = ("===> Information after evaluation:\n") msgs += ("\n Characteristics of simulated output vector Y=H(X), to compare to others:\n") msgs += (" Type...............: %s\n")%type( Yn ) - msgs += (" Lenght of vector...: %i\n")%max(numpy.ravel( Yn ).shape) - msgs += (" Minimum value......: %."+str(_p)+"e\n")%numpy.min( Yn ) - msgs += (" Maximum value......: %."+str(_p)+"e\n")%numpy.max( Yn ) - msgs += (" Mean of vector.....: %."+str(_p)+"e\n")%numpy.mean( Yn, dtype=mfp ) - msgs += (" Standard error.....: %."+str(_p)+"e\n")%numpy.std( Yn, dtype=mfp ) - msgs += (" L2 norm of vector..: %."+str(_p)+"e\n")%numpy.linalg.norm( Yn ) + msgs += (" Length of vector...: %i\n")%max(numpy.ravel( Yn ).shape) + msgs += (" Minimum value......: %."+str(__p)+"e\n")%numpy.min( Yn ) + msgs += (" Maximum value......: %."+str(__p)+"e\n")%numpy.max( Yn ) + msgs += (" Mean of vector.....: %."+str(__p)+"e\n")%numpy.mean( Yn, dtype=mfp ) + msgs += (" Standard error.....: %."+str(__p)+"e\n")%numpy.std( Yn, dtype=mfp ) + msgs += (" L2 norm of vector..: %."+str(__p)+"e\n")%numpy.linalg.norm( Yn ) print(msgs) if self._toStore("SimulatedObservationAtCurrentState"): self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(Yn) ) @@ -172,24 +181,24 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Yy = numpy.array( Ys ) msgs += ("\n Characteristics of the whole set of outputs Y:\n") msgs += (" Number of evaluations.........................: %i\n")%len( Ys ) - msgs += (" Minimum value of the whole set of outputs.....: %."+str(_p)+"e\n")%numpy.min( Yy ) - msgs += (" Maximum value of the whole set of outputs.....: %."+str(_p)+"e\n")%numpy.max( Yy ) - msgs += (" Mean of vector of the whole set of outputs....: %."+str(_p)+"e\n")%numpy.mean( Yy, dtype=mfp ) - msgs += (" Standard error of the whole set of outputs....: %."+str(_p)+"e\n")%numpy.std( Yy, dtype=mfp ) + msgs += (" Minimum value of the whole set of outputs.....: %."+str(__p)+"e\n")%numpy.min( Yy ) + msgs += (" Maximum value of the whole set of outputs.....: %."+str(__p)+"e\n")%numpy.max( Yy ) + msgs += (" Mean of vector of the whole set of outputs....: %."+str(__p)+"e\n")%numpy.mean( Yy, dtype=mfp ) + msgs += (" Standard error of the whole set of outputs....: %."+str(__p)+"e\n")%numpy.std( Yy, dtype=mfp ) Ym = numpy.mean( numpy.array( Ys ), axis=0, dtype=mfp ) msgs += ("\n Characteristics of the vector Ym, mean of the outputs Y:\n") msgs += (" Size of the mean of the outputs...............: %i\n")%Ym.size - msgs += (" Minimum value of the mean of the outputs......: %."+str(_p)+"e\n")%numpy.min( Ym ) - msgs += (" Maximum value of the mean of the outputs......: %."+str(_p)+"e\n")%numpy.max( Ym ) - msgs += (" Mean of the mean of the outputs...............: %."+str(_p)+"e\n")%numpy.mean( Ym, dtype=mfp ) - msgs += (" Standard error of the mean of the outputs.....: %."+str(_p)+"e\n")%numpy.std( Ym, dtype=mfp ) + msgs += (" Minimum value of the mean of the outputs......: %."+str(__p)+"e\n")%numpy.min( Ym ) + msgs += (" Maximum value of the mean of the outputs......: %."+str(__p)+"e\n")%numpy.max( Ym ) + msgs += (" Mean of the mean of the outputs...............: %."+str(__p)+"e\n")%numpy.mean( Ym, dtype=mfp ) + msgs += (" Standard error of the mean of the outputs.....: %."+str(__p)+"e\n")%numpy.std( Ym, dtype=mfp ) Ye = numpy.mean( numpy.array( Ys ) - Ym, axis=0, dtype=mfp ) msgs += "\n Characteristics of the mean of the differences between the outputs Y and their mean Ym:\n" msgs += (" Size of the mean of the differences...........: %i\n")%Ym.size - msgs += (" Minimum value of the mean of the differences..: %."+str(_p)+"e\n")%numpy.min( Ye ) - msgs += (" Maximum value of the mean of the differences..: %."+str(_p)+"e\n")%numpy.max( Ye ) - msgs += (" Mean of the mean of the differences...........: %."+str(_p)+"e\n")%numpy.mean( Ye, dtype=mfp ) - msgs += (" Standard error of the mean of the differences.: %."+str(_p)+"e\n")%numpy.std( Ye, dtype=mfp ) + msgs += (" Minimum value of the mean of the differences..: %."+str(__p)+"e\n")%numpy.min( Ye ) + msgs += (" Maximum value of the mean of the differences..: %."+str(__p)+"e\n")%numpy.max( Ye ) + msgs += (" Mean of the mean of the differences...........: %."+str(__p)+"e\n")%numpy.mean( Ye, dtype=mfp ) + msgs += (" Standard error of the mean of the differences.: %."+str(__p)+"e\n")%numpy.std( Ye, dtype=mfp ) msgs += ("\n %s\n"%("-"*75,)) print(msgs) # diff --git a/src/daComposant/daAlgorithms/MeasurementsOptimalPositioningTask.py b/src/daComposant/daAlgorithms/MeasurementsOptimalPositioningTask.py new file mode 100644 index 0000000..bc91b67 --- /dev/null +++ b/src/daComposant/daAlgorithms/MeasurementsOptimalPositioningTask.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2008-2022 EDF R&D +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# +# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D + +import numpy +from daCore import BasicObjects +from daAlgorithms.Atoms import ecweim + +# ============================================================================== +class ElementaryAlgorithm(BasicObjects.Algorithm): + def __init__(self): + BasicObjects.Algorithm.__init__(self, "MEASUREMENTSOPTIMALPOSITIONING") + self.defineRequiredParameter( + name = "Variant", + default = "Positioning", + typecast = str, + message = "Variant ou formulation de la méthode", + listval = [ + "Positioning", + # "PositioningByEIM", + ], + ) + self.defineRequiredParameter( + name = "EnsembleOfSnapshots", + default = [], + typecast = numpy.array, + message = "Ensemble de vecteurs d'état physique (snapshots), 1 état par colonne", + ) + self.defineRequiredParameter( + name = "MaximumNumberOfLocations", + default = 1, + typecast = int, + message = "Nombre maximal de positions", + minval = 0, + ) + self.defineRequiredParameter( + name = "ExcludeLocations", + default = [], + typecast = tuple, + message = "Liste des positions exclues selon la numérotation interne d'un snapshot", + minval = -1, + ) + self.defineRequiredParameter( + name = "ErrorNorm", + default = "L2", + typecast = str, + message = "Norme d'erreur utilisée pour le critère d'optimalité des positions", + listval = ["L2", "Linf"] + ) + self.defineRequiredParameter( + name = "ErrorNormTolerance", + default = 1.e-7, + typecast = float, + message = "Valeur limite inférieure du critère d'optimalité forçant l'arrêt", + minval = 0., + ) + self.defineRequiredParameter( + name = "StoreSupplementaryCalculations", + default = [], + typecast = tuple, + message = "Liste de calculs supplémentaires à stocker et/ou effectuer", + listval = [ + "OptimalPoints", + "ReducedBasis", + "Residus", + ] + ) + self.requireInputArguments( + mandatory= (), + optional = ("Xb", "HO"), + ) + self.setAttributes(tags=( + "Reduction", + )) + + def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None): + self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q) + # + #-------------------------- + if self._parameters["Variant"] in ["Positioning", "PositioningByEIM"]: + if len(self._parameters["EnsembleOfSnapshots"]) > 0: + ecweim.EIM_offline(self) + # + #-------------------------- + else: + raise ValueError("Error in Variant name: %s"%self._parameters["Variant"]) + # + self._post_run(HO) + return 0 + +# ============================================================================== +if __name__ == "__main__": + print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daAlgorithms/ParallelFunctionTest.py b/src/daComposant/daAlgorithms/ParallelFunctionTest.py index 4364bd9..d9bb559 100644 --- a/src/daComposant/daAlgorithms/ParallelFunctionTest.py +++ b/src/daComposant/daAlgorithms/ParallelFunctionTest.py @@ -88,26 +88,35 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # # ---------- __s = self._parameters["ShowElementarySummary"] + __p = self._parameters["NumberOfPrintedDigits"] + # __marge = 5*u" " - _p = self._parameters["NumberOfPrintedDigits"] if len(self._parameters["ResultTitle"]) > 0: __rt = str(self._parameters["ResultTitle"]) - msgs = u"\n" - msgs += __marge + "====" + "="*len(__rt) + "====\n" - msgs += __marge + " " + __rt + "\n" - msgs += __marge + "====" + "="*len(__rt) + "====\n" - print("%s"%msgs) + msgs = ("\n") + msgs += (__marge + "====" + "="*len(__rt) + "====\n") + msgs += (__marge + " " + __rt + "\n") + msgs += (__marge + "====" + "="*len(__rt) + "====\n") + else: + msgs = ("\n") + msgs += (" %s\n"%self._name) + msgs += (" %s\n"%("="*len(self._name),)) # - msgs = ("===> Information before launching:\n") + msgs += ("\n") + msgs += (" This test allows to analyze the (repetition of) launch of some given\n") + msgs += (" operator. It shows simple statistics related to its successful execution,\n") + msgs += (" or related to the similarities of repetition of its execution.\n") + msgs += ("\n") + msgs += ("===> Information before launching:\n") msgs += (" -----------------------------\n") msgs += (" Characteristics of input vector X, internally converted:\n") msgs += (" Type...............: %s\n")%type( Xn ) - msgs += (" Lenght of vector...: %i\n")%max(numpy.ravel( Xn ).shape) - msgs += (" Minimum value......: %."+str(_p)+"e\n")%numpy.min( Xn ) - msgs += (" Maximum value......: %."+str(_p)+"e\n")%numpy.max( Xn ) - msgs += (" Mean of vector.....: %."+str(_p)+"e\n")%numpy.mean( Xn, dtype=mfp ) - msgs += (" Standard error.....: %."+str(_p)+"e\n")%numpy.std( Xn, dtype=mfp ) - msgs += (" L2 norm of vector..: %."+str(_p)+"e\n")%numpy.linalg.norm( Xn ) + msgs += (" Length of vector...: %i\n")%max(numpy.ravel( Xn ).shape) + msgs += (" Minimum value......: %."+str(__p)+"e\n")%numpy.min( Xn ) + msgs += (" Maximum value......: %."+str(__p)+"e\n")%numpy.max( Xn ) + msgs += (" Mean of vector.....: %."+str(__p)+"e\n")%numpy.mean( Xn, dtype=mfp ) + msgs += (" Standard error.....: %."+str(__p)+"e\n")%numpy.std( Xn, dtype=mfp ) + msgs += (" L2 norm of vector..: %."+str(__p)+"e\n")%numpy.linalg.norm( Xn ) print(msgs) # print(" %s\n"%("-"*75,)) @@ -157,12 +166,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): msgs = ("===> Information after evaluation:\n") msgs += ("\n Characteristics of simulated output vector Y=H(X), to compare to others:\n") msgs += (" Type...............: %s\n")%type( Yn ) - msgs += (" Lenght of vector...: %i\n")%max(numpy.ravel( Yn ).shape) - msgs += (" Minimum value......: %."+str(_p)+"e\n")%numpy.min( Yn ) - msgs += (" Maximum value......: %."+str(_p)+"e\n")%numpy.max( Yn ) - msgs += (" Mean of vector.....: %."+str(_p)+"e\n")%numpy.mean( Yn, dtype=mfp ) - msgs += (" Standard error.....: %."+str(_p)+"e\n")%numpy.std( Yn, dtype=mfp ) - msgs += (" L2 norm of vector..: %."+str(_p)+"e\n")%numpy.linalg.norm( Yn ) + msgs += (" Length of vector...: %i\n")%max(numpy.ravel( Yn ).shape) + msgs += (" Minimum value......: %."+str(__p)+"e\n")%numpy.min( Yn ) + msgs += (" Maximum value......: %."+str(__p)+"e\n")%numpy.max( Yn ) + msgs += (" Mean of vector.....: %."+str(__p)+"e\n")%numpy.mean( Yn, dtype=mfp ) + msgs += (" Standard error.....: %."+str(__p)+"e\n")%numpy.std( Yn, dtype=mfp ) + msgs += (" L2 norm of vector..: %."+str(__p)+"e\n")%numpy.linalg.norm( Yn ) print(msgs) if self._toStore("SimulatedObservationAtCurrentState"): self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(Yn) ) @@ -176,24 +185,24 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Yy = numpy.array( Ys ) msgs += ("\n Characteristics of the whole set of outputs Y:\n") msgs += (" Number of evaluations.........................: %i\n")%len( Ys ) - msgs += (" Minimum value of the whole set of outputs.....: %."+str(_p)+"e\n")%numpy.min( Yy ) - msgs += (" Maximum value of the whole set of outputs.....: %."+str(_p)+"e\n")%numpy.max( Yy ) - msgs += (" Mean of vector of the whole set of outputs....: %."+str(_p)+"e\n")%numpy.mean( Yy, dtype=mfp ) - msgs += (" Standard error of the whole set of outputs....: %."+str(_p)+"e\n")%numpy.std( Yy, dtype=mfp ) + msgs += (" Minimum value of the whole set of outputs.....: %."+str(__p)+"e\n")%numpy.min( Yy ) + msgs += (" Maximum value of the whole set of outputs.....: %."+str(__p)+"e\n")%numpy.max( Yy ) + msgs += (" Mean of vector of the whole set of outputs....: %."+str(__p)+"e\n")%numpy.mean( Yy, dtype=mfp ) + msgs += (" Standard error of the whole set of outputs....: %."+str(__p)+"e\n")%numpy.std( Yy, dtype=mfp ) Ym = numpy.mean( numpy.array( Ys ), axis=0, dtype=mfp ) msgs += ("\n Characteristics of the vector Ym, mean of the outputs Y:\n") msgs += (" Size of the mean of the outputs...............: %i\n")%Ym.size - msgs += (" Minimum value of the mean of the outputs......: %."+str(_p)+"e\n")%numpy.min( Ym ) - msgs += (" Maximum value of the mean of the outputs......: %."+str(_p)+"e\n")%numpy.max( Ym ) - msgs += (" Mean of the mean of the outputs...............: %."+str(_p)+"e\n")%numpy.mean( Ym, dtype=mfp ) - msgs += (" Standard error of the mean of the outputs.....: %."+str(_p)+"e\n")%numpy.std( Ym, dtype=mfp ) + msgs += (" Minimum value of the mean of the outputs......: %."+str(__p)+"e\n")%numpy.min( Ym ) + msgs += (" Maximum value of the mean of the outputs......: %."+str(__p)+"e\n")%numpy.max( Ym ) + msgs += (" Mean of the mean of the outputs...............: %."+str(__p)+"e\n")%numpy.mean( Ym, dtype=mfp ) + msgs += (" Standard error of the mean of the outputs.....: %."+str(__p)+"e\n")%numpy.std( Ym, dtype=mfp ) Ye = numpy.mean( numpy.array( Ys ) - Ym, axis=0, dtype=mfp ) msgs += "\n Characteristics of the mean of the differences between the outputs Y and their mean Ym:\n" msgs += (" Size of the mean of the differences...........: %i\n")%Ym.size - msgs += (" Minimum value of the mean of the differences..: %."+str(_p)+"e\n")%numpy.min( Ye ) - msgs += (" Maximum value of the mean of the differences..: %."+str(_p)+"e\n")%numpy.max( Ye ) - msgs += (" Mean of the mean of the differences...........: %."+str(_p)+"e\n")%numpy.mean( Ye, dtype=mfp ) - msgs += (" Standard error of the mean of the differences.: %."+str(_p)+"e\n")%numpy.std( Ye, dtype=mfp ) + msgs += (" Minimum value of the mean of the differences..: %."+str(__p)+"e\n")%numpy.min( Ye ) + msgs += (" Maximum value of the mean of the differences..: %."+str(__p)+"e\n")%numpy.max( Ye ) + msgs += (" Mean of the mean of the differences...........: %."+str(__p)+"e\n")%numpy.mean( Ye, dtype=mfp ) + msgs += (" Standard error of the mean of the differences.: %."+str(__p)+"e\n")%numpy.std( Ye, dtype=mfp ) msgs += ("\n %s\n"%("-"*75,)) print(msgs) # diff --git a/src/daComposant/daAlgorithms/SamplingTest.py b/src/daComposant/daAlgorithms/SamplingTest.py index 123bf87..66bf30b 100644 --- a/src/daComposant/daAlgorithms/SamplingTest.py +++ b/src/daComposant/daAlgorithms/SamplingTest.py @@ -21,7 +21,7 @@ # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D import numpy, logging, itertools -from daCore import BasicObjects +from daCore import BasicObjects, NumericObjects from daCore.PlatformInfo import PlatformInfo mfp = PlatformInfo().MaximumPrecision() @@ -39,19 +39,19 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): name = "SampleAsExplicitHyperCube", default = [], typecast = tuple, - message = "Points de calcul définis par un hyper-cube dont on donne la liste des échantillonages de chaque variable comme une liste", + message = "Points de calcul définis par un hyper-cube dont on donne la liste des échantillonnages de chaque variable comme une liste", ) self.defineRequiredParameter( name = "SampleAsMinMaxStepHyperCube", default = [], typecast = tuple, - message = "Points de calcul définis par un hyper-cube dont on donne la liste des échantillonages de chaque variable par un triplet [min,max,step]", + message = "Points de calcul définis par un hyper-cube dont on donne la liste des échantillonnages de chaque variable par un triplet [min,max,step]", ) self.defineRequiredParameter( name = "SampleAsIndependantRandomVariables", default = [], typecast = tuple, - message = "Points de calcul définis par un hyper-cube dont les points sur chaque axe proviennent de l'échantillonage indépendant de la variable selon la spécification ['distribution',[parametres],nombre]", + message = "Points de calcul définis par un hyper-cube dont les points sur chaque axe proviennent de l'échantillonnage indépendant de la variable selon la spécification ['distribution',[parametres],nombre]", ) self.defineRequiredParameter( name = "QualityCriterion", @@ -112,34 +112,13 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Y0 = numpy.ravel( Y ) # # --------------------------- - if len(self._parameters["SampleAsnUplet"]) > 0: - sampleList = self._parameters["SampleAsnUplet"] - for i,Xx in enumerate(sampleList): - if numpy.ravel(Xx).size != X0.size: - raise ValueError("The size %i of the %ith state X in the sample and %i of the checking point Xb are different, they have to be identical."%(numpy.ravel(Xx).size,i+1,X0.size)) - elif len(self._parameters["SampleAsExplicitHyperCube"]) > 0: - sampleList = itertools.product(*list(self._parameters["SampleAsExplicitHyperCube"])) - elif len(self._parameters["SampleAsMinMaxStepHyperCube"]) > 0: - coordinatesList = [] - for i,dim in enumerate(self._parameters["SampleAsMinMaxStepHyperCube"]): - if len(dim) != 3: - raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be [min,max,step]."%(i,dim)) - else: - coordinatesList.append(numpy.linspace(dim[0],dim[1],1+int((float(dim[1])-float(dim[0]))/float(dim[2])))) - sampleList = itertools.product(*coordinatesList) - elif len(self._parameters["SampleAsIndependantRandomVariables"]) > 0: - coordinatesList = [] - for i,dim in enumerate(self._parameters["SampleAsIndependantRandomVariables"]): - if len(dim) != 3: - raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be ('distribution',(parameters),length) with distribution in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]."%(i,dim)) - elif not( str(dim[0]) in ['normal','lognormal','uniform','weibull'] and hasattr(numpy.random,dim[0]) ): - raise ValueError("For dimension %i, the distribution name \"%s\" is not allowed, please choose in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]"%(i,dim[0])) - else: - distribution = getattr(numpy.random,str(dim[0]),'normal') - coordinatesList.append(distribution(*dim[1], size=max(1,int(dim[2])))) - sampleList = itertools.product(*coordinatesList) - else: - sampleList = iter([X0,]) + sampleList = NumericObjects.BuildComplexSampleList( + self._parameters["SampleAsnUplet"], + self._parameters["SampleAsExplicitHyperCube"], + self._parameters["SampleAsMinMaxStepHyperCube"], + self._parameters["SampleAsIndependantRandomVariables"], + X0, + ) # ---------- BI = B.getI() RI = R.getI() diff --git a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py index c14407b..95a36bd 100644 --- a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py +++ b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py @@ -130,6 +130,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "Filter", "Ensemble", "Dynamic", + "Reduction", )) def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None): diff --git a/src/daComposant/daCore/BasicObjects.py b/src/daComposant/daCore/BasicObjects.py index 9d12eac..2b3b1b4 100644 --- a/src/daComposant/daCore/BasicObjects.py +++ b/src/daComposant/daCore/BasicObjects.py @@ -44,14 +44,14 @@ class CacheManager(object): """ def __init__(self, toleranceInRedundancy = 1.e-18, - lenghtOfRedundancy = -1, + lengthOfRedundancy = -1, ): """ Les caractéristiques de tolérance peuvent être modifiées à la création. """ self.__tolerBP = float(toleranceInRedundancy) - self.__lenghtOR = int(lenghtOfRedundancy) - self.__initlnOR = self.__lenghtOR + self.__lengthOR = int(lengthOfRedundancy) + self.__initlnOR = self.__lengthOR self.__seenNames = [] self.__enabled = True self.clearCache() @@ -66,7 +66,7 @@ class CacheManager(object): __alc = False __HxV = None if self.__enabled: - for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1): + for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1): if not hasattr(xValue, 'size'): pass elif (str(oName) != self.__listOPCV[i][3]): @@ -83,15 +83,15 @@ class CacheManager(object): def storeValueInX(self, xValue, HxValue, oName="" ): "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x" - if self.__lenghtOR < 0: - self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2 - self.__initlnOR = self.__lenghtOR + if self.__lengthOR < 0: + self.__lengthOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2 + self.__initlnOR = self.__lengthOR self.__seenNames.append(str(oName)) if str(oName) not in self.__seenNames: # Etend la liste si nouveau - self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2 - self.__initlnOR += self.__lenghtOR + self.__lengthOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2 + self.__initlnOR += self.__lengthOR self.__seenNames.append(str(oName)) - while len(self.__listOPCV) > self.__lenghtOR: + while len(self.__listOPCV) > self.__lengthOR: self.__listOPCV.pop(0) self.__listOPCV.append( ( copy.copy(numpy.ravel(xValue)), # 0 Previous point @@ -102,13 +102,13 @@ class CacheManager(object): def disable(self): "Inactive le cache" - self.__initlnOR = self.__lenghtOR - self.__lenghtOR = 0 + self.__initlnOR = self.__lengthOR + self.__lengthOR = 0 self.__enabled = False def enable(self): "Active le cache" - self.__lenghtOR = self.__initlnOR + self.__lengthOR = self.__initlnOR self.__enabled = True # ============================================================================== @@ -529,7 +529,7 @@ class FullOperator(object): if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18 - if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1 + if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF from daCore import NumericObjects @@ -543,7 +543,7 @@ class FullOperator(object): reducingMemoryUse = __Function["withReducingMemoryUse"], avoidingRedundancy = __Function["withAvoidingRedundancy"], toleranceInRedundancy = __Function["withToleranceInRedundancy"], - lenghtOfRedundancy = __Function["withLenghtOfRedundancy"], + lengthOfRedundancy = __Function["withLengthOfRedundancy"], mpEnabled = __Function["EnableMultiProcessingInDerivatives"], mpWorkers = __Function["NumberOfProcesses"], mfEnabled = __Function["withmfEnabled"], @@ -758,7 +758,10 @@ class Algorithm(object): self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency") self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA") self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB") + self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints") + self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis") self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu") + self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus") self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles") self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2") self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2") diff --git a/src/daComposant/daCore/Interfaces.py b/src/daComposant/daCore/Interfaces.py index 0876603..28be7e6 100644 --- a/src/daComposant/daCore/Interfaces.py +++ b/src/daComposant/daCore/Interfaces.py @@ -1186,19 +1186,19 @@ class EficasGUI(object): self.__msg = "" self.__path_settings_ok = False #---------------- - if "EFICAS_ROOT" in os.environ: - __EFICAS_ROOT = os.environ["EFICAS_ROOT"] + if "EFICAS_TOOLS_ROOT" in os.environ: + __EFICAS_TOOLS_ROOT = os.environ["EFICAS_TOOLS_ROOT"] __path_ok = True elif "EFICAS_NOUVEAU_ROOT" in os.environ: - __EFICAS_ROOT = os.environ["EFICAS_NOUVEAU_ROOT"] + __EFICAS_TOOLS_ROOT = os.environ["EFICAS_NOUVEAU_ROOT"] __path_ok = True else: self.__msg += "\nKeyError:\n"+\ - " the required environment variable EFICAS_ROOT is unknown.\n"+\ - " You have either to be in SALOME environment, or to set\n"+\ - " this variable in your environment to the right path \"<...>\"\n"+\ - " to find an installed EFICAS application. For example:\n"+\ - " EFICAS_ROOT=\"<...>\" command\n" + " the required environment variable EFICAS_TOOLS_ROOT is unknown.\n"+\ + " You have either to be in SALOME environment, or to set this\n"+\ + " variable in your environment to the right path \"<...>\" to\n"+\ + " find an installed EFICAS application. For example:\n"+\ + " EFICAS_TOOLS_ROOT=\"<...>\" command\n" __path_ok = False try: import adao @@ -1238,7 +1238,7 @@ class EficasGUI(object): self.__path_settings_ok = True #---------------- if self.__path_settings_ok: - sys.path.insert(0,__EFICAS_ROOT) + sys.path.insert(0,__EFICAS_TOOLS_ROOT) sys.path.insert(0,os.path.join(adao.adao_py_dir,"daEficas")) if __addpath is not None and os.path.exists(os.path.abspath(__addpath)): sys.path.insert(0,os.path.abspath(__addpath)) diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index ef821f4..0902965 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -25,7 +25,7 @@ __doc__ = """ """ __author__ = "Jean-Philippe ARGAUD" -import os, copy, types, sys, logging, numpy +import os, copy, types, sys, logging, numpy, itertools from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm from daCore.PlatformInfo import PlatformInfo mpr = PlatformInfo().MachinePrecision() @@ -68,7 +68,7 @@ class FDApproximation(object): reducingMemoryUse = False, avoidingRedundancy = True, toleranceInRedundancy = 1.e-18, - lenghtOfRedundancy = -1, + lengthOfRedundancy = -1, mpEnabled = False, mpWorkers = None, mfEnabled = False, @@ -98,7 +98,7 @@ class FDApproximation(object): if avoidingRedundancy: self.__avoidRC = True self.__tolerBP = float(toleranceInRedundancy) - self.__lenghtRJ = int(lenghtOfRedundancy) + self.__lengthRJ = int(lengthOfRedundancy) self.__listJPCP = [] # Jacobian Previous Calculated Points self.__listJPCI = [] # Jacobian Previous Calculated Increment self.__listJPCR = [] # Jacobian Previous Calculated Results @@ -411,8 +411,8 @@ class FDApproximation(object): if __Produit is None or self.__avoidRC: _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) ) if self.__avoidRC: - if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size - while len(self.__listJPCP) > self.__lenghtRJ: + if self.__lengthRJ < 0: self.__lengthRJ = 2 * _X.size + while len(self.__listJPCP) > self.__lengthRJ: self.__listJPCP.pop(0) self.__listJPCI.pop(0) self.__listJPCR.pop(0) @@ -870,7 +870,7 @@ def ApplyBounds( __Vector, __Bounds, __newClip = True): raise ValueError("Incorrect array definition of vector data") if not isinstance(__Bounds, numpy.ndarray): # Is an array raise ValueError("Incorrect array definition of bounds data") - if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght + if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector length raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size)) if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2: raise ValueError("Incorrectly shaped bounds data") @@ -914,6 +914,46 @@ def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Sup # return Xa + EnsembleOfAnomalies( __EnXn ) +# ============================================================================== +def BuildComplexSampleList( + __SampleAsnUplet, + __SampleAsExplicitHyperCube, + __SampleAsMinMaxStepHyperCube, + __SampleAsIndependantRandomVariables, + __X0, + ): + # --------------------------- + if len(__SampleAsnUplet) > 0: + sampleList = __SampleAsnUplet + for i,Xx in enumerate(sampleList): + if numpy.ravel(Xx).size != __X0.size: + raise ValueError("The size %i of the %ith state X in the sample and %i of the checking point Xb are different, they have to be identical."%(numpy.ravel(Xx).size,i+1,X0.size)) + elif len(__SampleAsExplicitHyperCube) > 0: + sampleList = itertools.product(*list(__SampleAsExplicitHyperCube)) + elif len(__SampleAsMinMaxStepHyperCube) > 0: + coordinatesList = [] + for i,dim in enumerate(__SampleAsMinMaxStepHyperCube): + if len(dim) != 3: + raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be [min,max,step]."%(i,dim)) + else: + coordinatesList.append(numpy.linspace(dim[0],dim[1],1+int((float(dim[1])-float(dim[0]))/float(dim[2])))) + sampleList = itertools.product(*coordinatesList) + elif len(__SampleAsIndependantRandomVariables) > 0: + coordinatesList = [] + for i,dim in enumerate(__SampleAsIndependantRandomVariables): + if len(dim) != 3: + raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be ('distribution',(parameters),length) with distribution in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]."%(i,dim)) + elif not( str(dim[0]) in ['normal','lognormal','uniform','weibull'] and hasattr(numpy.random,dim[0]) ): + raise ValueError("For dimension %i, the distribution name \"%s\" is not allowed, please choose in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]"%(i,dim[0])) + else: + distribution = getattr(numpy.random,str(dim[0]),'normal') + coordinatesList.append(distribution(*dim[1], size=max(1,int(dim[2])))) + sampleList = itertools.product(*coordinatesList) + else: + sampleList = iter([__X0,]) + # ---------- + return sampleList + # ============================================================================== def multiXOsteps(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle, __CovForecast = False, __LinEvolution = False, diff --git a/src/daComposant/daCore/version.py b/src/daComposant/daCore/version.py index f966e82..578357c 100644 --- a/src/daComposant/daCore/version.py +++ b/src/daComposant/daCore/version.py @@ -29,7 +29,7 @@ __all__ = [] name = "ADAO" version = "9.10.0" year = "2022" -date = "lundi 12 décembre 2022, 12:12:12 (UTC+0100)" +date = "lundi 14 novembre 2022, 12:12:12 (UTC+0100)" longname = name + ", a module for Data Assimilation and Optimization" cata = "V" + version.replace(".","_") diff --git a/src/daSalome/daYacsSchemaCreator/infos_daComposant.py b/src/daSalome/daYacsSchemaCreator/infos_daComposant.py index 1f2935d..3887e44 100644 --- a/src/daSalome/daYacsSchemaCreator/infos_daComposant.py +++ b/src/daSalome/daYacsSchemaCreator/infos_daComposant.py @@ -111,6 +111,7 @@ CheckAlgos = [ "InputValuesTest", "ObserverTest", ] +TaskAlgos = [] AlgoDataRequirements = {} AlgoDataRequirements["3DVAR"] = [ -- 2.39.2