optim_names = ""
reduc_names = ""
check_names = ""
-decl_algos = ""
+task_names = ""
adao_all_names = ""
assim_study_object = daCore.Aidsm.Aidsm()
algos_list = assim_study_object._Aidsm__get_available_algorithms()
if algo_name in infos.CheckAlgos:
logging.debug("A checking algorithm is found: " + algo_name)
check_names += "\"" + algo_name + "\", "
- if algo_name in infos.AssimAlgos+infos.OptimizationAlgos+infos.ReductionAlgos+infos.CheckAlgos:
+ if algo_name in infos.TaskAlgos:
+ logging.debug("A task algorithm is found: " + algo_name)
+ task_names += "\"" + algo_name + "\", "
+ if algo_name in infos.AssimAlgos+infos.OptimizationAlgos+infos.ReductionAlgos+infos.CheckAlgos+infos.TaskAlgos:
# Pour filtrer sur les algorithmes vraiment interfacés, car il peut y en avoir moins que "algos_list"
adao_all_names += "\"" + algo_name + "\", "
'optim_names':optim_names,
'reduc_names':reduc_names,
'check_names':check_names,
- 'decl_algos':decl_algos,
+ 'task_names':task_names,
}))
# Final step: On écrit le fichier
name = "ADAO"
version = "9.10.0"
year = "2022"
-date = "lundi 12 décembre 2022, 12:12:12 (UTC+0100)"
+date = "lundi 14 novembre 2022, 12:12:12 (UTC+0100)"
longname = name + ", a module for Data Assimilation and Optimization"
cata = "V" + version.replace(".","_")
inside the whole SALOME installation. All the names to be replaced by user are
indicated by the syntax ``<...>``.
+.. _section_advanced_convert_JDC:
+
Converting and executing an ADAO command file (JDC) using a Shell script
------------------------------------------------------------------------
In all cases, the standard output and errors come in the launching terminal.
+.. _section_advanced_YACS_tui:
+
Running an ADAO calculation scheme in YACS using the text user mode (YACS TUI)
------------------------------------------------------------------------------
.. index:: single: R
.. index:: single: rPython
+.. index:: single: reticulate
To extend the analysis and treatment capacities, it is possible to use ADAO
calculations in **R** environment (see [R]_ for more details). It is available
in SALOME by launching the R interpreter in the shell "``salome shell``".
-Moreover, the package "*rPython*" has to be available, it can be installed by
-the user if required by the following R command::
+Moreover, the package "*rPython*" (or the more recent "*reticulate*" one) has
+to be available, it can be installed by the user if required by the following R
+command::
#
# IMPORTANT: to be run in R interpreter
from adao import adaoBuilder
adaoBuilder.Gui()
-If necessary, explicit messages can be used to identify the required
-environment variables that are missing. However, this command should not be run
-in the SALOME Python console (because in this case it is enough to activate the
-module since we already are in the graphical environment...) but it can be done
-in a "SALOME shell" session obtained from the "Tools/Extensions" menu of
-SALOME. As a reminder, the easiest way to get a Python interpreter included in
-a "SALOME shell" session is to run the following command in a terminal::
+As a reminder, the easiest way to get a Python interpreter included in a
+"SALOME shell" session is to run the following command in a terminal::
$SALOMEDIR/salome shell -- python
with ``SALOMEDIR`` the ``<SALOME main installation directory>``.
+If necessary, explicit messages can be used to identify the required
+environment variables that are missing. However, **this command should not be
+run in the SALOME Python console** (because in this case it is enough to
+activate the module since we already are in the graphical environment...) or in
+an independant Python install, but it can be run in a "SALOME shell" session
+obtained from the "Tools/Extensions" menu of SALOME.
+
.. _section_advanced_execution_mode:
Change the default execution mode of nodes in YACS
.. [Buchinsky98] Buchinsky M., *Recent Advances in Quantile Regression Models: A Practical Guidline for Empirical Research*, Journal of Human Resources, 33(1), pp.88-126, 1998
-.. [Burgers98] Burgers G., Van Leuween P. J., Evensen G., *Analysis scheme in the Ensemble Kalman Filter*, Monthly Weather Review, 126, 1719–1724, 1998
+.. [Burgers98] Burgers G., Van Leuween P. J., Evensen G., *Analysis scheme in the Ensemble Kalman Filter*, Monthly Weather Review, 126(6), pp.1719–1724, 1998
.. [Byrd95] Byrd R. H., Lu P., Nocedal J., *A Limited Memory Algorithm for Bound Constrained Optimization*, SIAM Journal on Scientific and Statistical Computing, 16(5), pp.1190-1208, 1995
.. [Cohn98] Cohn S. E., Da Silva A., Guo J., Sienkiewicz M., Lamich D., *Assessing the effects of data selection with the DAO Physical-space Statistical Analysis System*, Monthly Weather Review, 126, pp.2913–2926, 1998
-.. [Courtier94] Courtier P., Thépaut J.-N., Hollingsworth A., *A strategy for operational implementation of 4D-Var, using an incremental approach*, Quarterly Journal of the Royal Meteorological Society, 120, pp.1367–1387, 1994
+.. [Courtier94] Courtier P., Thépaut J.-N., Hollingsworth A., *A strategy for operational implementation of 4D-Var, using an incremental approach*, Quarterly Journal of the Royal Meteorological Society, 120(519), pp.1367–1387, 1994
-.. [Courtier97] Courtier P., *Dual formulation of four-dimensional variational assimilation*, Quarterly Journal of the Royal Meteorological Society, 123, pp.2449-2461, 1997
+.. [Courtier97] Courtier P., *Dual formulation of four-dimensional variational assimilation*, Quarterly Journal of the Royal Meteorological Society, 123(544), pp.2249-2261, 1997
.. [Das11] Das S., Suganthan P. N., *Differential Evolution: A Survey of the State-of-the-art*, IEEE Transactions on Evolutionary Computation, 15(1), pp.4-31, 2011
.. [Glover90] Glover F., *Tabu Search-Part II*, ORSA Journal on Computing, 2(1), pp.4-32, 1990
+.. [Hamill00] Hamill T. M., Snyder C., *A Hybrid Ensemble Kalman Filter-3D Variational Analysis Scheme*, Monthly Weather Review, 128(8), pp.2905-2919, 2000
+
.. [Ide97] Ide K., Courtier P., Ghil M., Lorenc A. C., *Unified notation for data assimilation: operational, sequential and variational*, Journal of the Meteorological Society of Japan, 75(1B), pp.181-189, 1997
.. [Jazwinski70] Jazwinski A. H., *Stochastic Processes and Filtering Theory*, Academic Press, 1970
.. [Lions68] Lions J.-L., *Optimal Control of Systems Governed by Partial Differential Equations*, Springer, 1971
-.. [Lorenc86] Lorenc A. C., *Analysis methods for numerical weather prediction*, Quarterly Journal of the Royal Meteorological Society, 112, pp.1177-1194, 1986
+.. [Lorenc86] Lorenc A. C., *Analysis methods for numerical weather prediction*, Quarterly Journal of the Royal Meteorological Society, 112(474), pp.1177-1194, 1986
-.. [Lorenc88] Lorenc A. C., *Optimal nonlinear objective analysis*, Quarterly Journal of the Royal Meteorological Society, 114, pp.205–240, 1988
+.. [Lorenc88] Lorenc A. C., *Optimal nonlinear objective analysis*, Quarterly Journal of the Royal Meteorological Society, 114(479), pp.205–240, 1988
.. [Morales11] Morales J. L., Nocedal J., *L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization*, ACM Transactions on Mathematical Software, 38(1), 2011
.. [Zhu97] Zhu C., Byrd R. H., Nocedal J., *L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization*, ACM Transactions on Mathematical Software, 23(4), pp.550-560, 1997
-.. [Zupanski05] Zupanski M., *Maximum likelihood ensemble filter: Theoretical aspects*, Monthly Weather Review, 133, pp.1710–1726, 2005
+.. [Zupanski05] Zupanski M., *Maximum likelihood ensemble filter: Theoretical aspects*, Monthly Weather Review, 133(6), pp.1710–1726, 2005
# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
import sys, os, time, sphinx, logging
+# logging.getLogger().setLevel(logging.DEBUG)
# -- Module version information --------------------------------------------------
__lv = LooseVersion(sphinx.__version__)
if __lv < LooseVersion("1.4.0"):
extensions = ['sphinx.ext.pngmath']
+ logging.debug('Using "%s" extensions'%(extensions,))
else:
extensions = ['sphinx.ext.imgmath']
+ logging.debug('Using "%s" extensions'%(extensions,))
try:
import sphinx_rtd_theme
extensions += ['sphinx_rtd_theme']
--- /dev/null
+..
+ Copyright (C) 2008-2022 EDF R&D
+
+ This file is part of SALOME ADAO module.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+
+ Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+.. _section_docu_examples:
+
+================================================================================
+**[DocU]** Examples of ADAO use
+================================================================================
+
+To make reading or searching easier, this section gathers in a synthetic way
+the direct pointers, to the appropriate sections, of simple examples of use of
+the module (mainly but not only in TUI).
+
+These examples are available in the tutorials, in the detailed documentations
+of algorithms or applied problems, and in the advanced uses.
+
+Tutorials
+---------
+
+#. :ref:`section_tutorials_in_salome`
+#. :ref:`section_tutorials_in_python`
+
+Calculation algorithms uses
+---------------------------
+
+#. :ref:`Examples with the "3DVAR" algorithm<section_ref_algorithm_3DVAR_examples>`
+#. :ref:`Examples with the "Blue" algorithm<section_ref_algorithm_Blue_examples>`
+#. :ref:`Examples with the "ExtendedBlue" algorithm<section_ref_algorithm_ExtendedBlue_examples>`
+#. :ref:`Examples with the "KalmanFilter" algorithm<section_ref_algorithm_KalmanFilter_examples>`
+#. :ref:`Examples with the "NonLinearLeastSquares" algorithm<section_ref_algorithm_NonLinearLeastSquares_examples>`
+
+Checking algorithms uses
+------------------------
+
+#. :ref:`Examples with the "AdjointTest" check<section_ref_algorithm_AdjointTest_examples>`
+#. :ref:`Examples with the "FunctionTest" check<section_ref_algorithm_FunctionTest_examples>`
+#. :ref:`Examples with the "ParallelFunctionTest" check<section_ref_algorithm_ParallelFunctionTest_examples>`
+
+Advanced uses
+-------------
+
+#. :ref:`section_advanced_convert_JDC`
+#. :ref:`section_advanced_YACS_tui`
+#. :ref:`section_advanced_R`
about 350 distinct applied problems.
The documentation for this module is divided into several major categories,
-related to the theoretical documentation (indicated in the section title by
-**[DocT]**), to the user documentation (indicated in the section title by
-**[DocU]**), and to the reference documentation (indicated in the section title
-by **[DocR]**).
+related to the **theoretical documentation** (indicated in the section title by
+**[DocT]**), to the **user documentation** (indicated in the section title by
+**[DocU]**), and to the **reference documentation** (indicated in the section
+title by **[DocR]**).
The first part is the :ref:`section_intro`. The second part introduces
:ref:`section_theory`, and their concepts, and the next part describes the
-:ref:`section_methodology`. For a standard user, the next parts describe
-examples on ADAO usage as :ref:`section_tutorials_in_salome` or
-:ref:`section_tutorials_in_python`, then indicates the :ref:`section_advanced`,
-with how to obtain additional information or how to use non-GUI command
-execution scripting. Users interested in quick use of the module can stop
-before reading the rest, but a valuable use of the module requires to read and
-come back regularly to these parts. The following parts describe
+:ref:`section_methodology`. For a standard user, the next parts describe some
+:ref:`section_docu_examples`, quickly accessible by the collection of pointers
+to the subsections. Didactic presentations are detailed in
+:ref:`section_tutorials_in_salome` or :ref:`section_tutorials_in_python`,
+supplemented by information on the :ref:`section_advanced` with how to obtain
+additional information or how to use non-GUI command execution scripting.
+
+Users interested in quick access to the module can stop before reading the
+remaining parts, but a valuable use of the module requires reading and
+returning to these parts regularly. The following parts describe
:ref:`section_gui_in_salome` and :ref:`section_tui`. The last main part gives a
detailed :ref:`section_reference`, with three essential main sub-parts
describing the details of commands and options of the algorithms. A
:ref:`section_glossary`, some :ref:`section_notations`, a
-:ref:`section_bibliography` and an extensive index are included in
-the document. And, to comply with the module requirements, be sure to read the
-part :ref:`section_license`.
+:ref:`section_bibliography` and an extensive index are included in the
+document. And, to comply with the module requirements, be sure to read the part
+:ref:`section_license`.
.. toctree::
:caption: Table of contents
intro
theory
methodology
+ examples
tutorials_in_salome
tutorials_in_python
advanced
or iterative notion (there is no need in this case for an incremental evolution
operator, nor for an evolution error covariance). In ADAO, it can also be used
on a succession of observations, placing the estimate in a recursive framework
-partly similar to a :ref:`section_ref_algorithm_KalmanFilter`. A standard
-estimate is made at each observation step on the state predicted by the
-incremental evolution model, knowing that the state error covariance remains
-the background covariance initially provided by the user. To be explicit,
-unlike Kalman-type filters, the state error covariance is not updated.
+similar to a :ref:`section_ref_algorithm_KalmanFilter`. A standard estimate is
+made at each observation step on the state predicted by the incremental
+evolution model, knowing that the state error covariance remains the background
+covariance initially provided by the user. To be explicit, unlike Kalman-type
+filters, the state error covariance is not updated.
+
+An extension of 3DVAR, coupling a 3DVAR méthod with a Kalman ensemble filter,
+allows to improve the estimation of *a posteriori* error covariances. This
+extension is obtained by using the "E3DVAR" variant of the filtering algorithm
+:ref:`section_ref_algorithm_EnsembleKalmanFilter`.
.. ------------------------------------ ..
.. include:: snippets/Header2Algo02.rst
.. include:: snippets/SimulationQuantiles.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_3DVAR_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_3DVAR.rst
.. include:: snippets/IndexOfOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_4DVAR_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_3DVAR`
.. ------------------------------------ ..
.. include:: snippets/Header2Algo01.rst
-This algorithm allows to check the quality of the adjoint operator, by
-calculating a residue with known theoretical properties.
+This algorithm allows to check the quality of the adjoint of an operator
+:math:`F`, by computing a residue whose theoretical properties are known. The
+test is applicable to any operator, of evolution or observation.
+
+For all formulas, with :math:`\mathbf{x}` the current verification point, we
+take :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and
+:math:`\mathbf{dx}=\alpha_0*\mathbf{dx}_0` with :math:`\alpha_0` a scaling user
+parameter, defaulting to 1. :math:`F` is the computational operator or code
+(which is here acquired by the observation operator command
+"*ObservationOperator*").
One can observe the following residue, which is the difference of two scalar
products:
.. math:: R(\alpha) = | < TangentF_x(\mathbf{dx}) , \mathbf{y} > - < \mathbf{dx} , AdjointF_x(\mathbf{y}) > |
-that has to remain equal to zero at the calculation precision. One take
-:math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and
-:math:`\mathbf{dx}=\alpha*\mathbf{dx}_0`. :math:`F` is the calculation code.
-:math:`\mathbf{y}` has to be in the image of :math:`F`. If it is not given, one
-take :math:`\mathbf{y} = F(\mathbf{x})`.
+in which the optional quantity :math:`\mathbf{y}` must be in the image of
+:math:`F`. If it is not given, we take its default evaluation :math:`\mathbf{y}
+= F(\mathbf{x})`.
+
+This residue must remain constantly equal to zero at the accuracy of the
+calculation.
.. ------------------------------------ ..
.. include:: snippets/Header2Algo02.rst
.. include:: snippets/CheckingPoint.rst
+.. include:: snippets/Observation.rst
+
.. include:: snippets/ObservationOperator.rst
.. ------------------------------------ ..
.. include:: snippets/InitialDirection.rst
+.. include:: snippets/NumberOfPrintedDigits.rst
+
.. include:: snippets/SetSeed.rst
StoreSupplementaryCalculations
.. include:: snippets/SimulatedObservationAtCurrentState.rst
+.. ------------------------------------ ..
+.. _section_ref_algorithm_AdjointTest_examples:
+.. include:: snippets/Header2Algo09.rst
+
+.. include:: scripts/simple_AdjointTest.rst
+
+.. literalinclude:: scripts/simple_AdjointTest.py
+
+.. include:: snippets/Header2Algo10.rst
+
+.. literalinclude:: scripts/simple_AdjointTest.res
+ :language: none
+
.. ------------------------------------ ..
.. include:: snippets/Header2Algo06.rst
.. include:: snippets/SimulationQuantiles.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_Blue_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_Blue.rst
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_DerivativeFreeOptimization_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_ParticleSwarmOptimization`
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_DifferentialEvolution_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_DerivativeFreeOptimization`
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_EnsembleBlue_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_Blue`
Calculation algorithm "*EnsembleKalmanFilter*"
----------------------------------------------
-.. ------------------------------------ ..
-.. include:: snippets/Header2Algo00.rst
-
.. ------------------------------------ ..
.. include:: snippets/Header2Algo01.rst
pair: Variant ; ETKF-N
pair: Variant ; MLEF
pair: Variant ; IEnKF
+ pair: Variant ; E3DVAR
+ pair: Variant ; EnKS
- "EnKF" (Ensemble Kalman Filter, see [Evensen94]_), original stochastic algorithm, allowing consistent treatment of non-linear evolution operator,
- "ETKF" (Ensemble-Transform Kalman Filter), deterministic EnKF algorithm, allowing treatment of non-linear evolution operator with a lot less members (one recommends to use a number of members on the order of 10 or even sometimes less),
- "ETKF-N" (Ensemble-Transform Kalman Filter of finite size N), ETKF algorithm of "finite size N", yhat doesn't need inflation that is often required with the other algorithms,
- "MLEF" (Maximum Likelihood Kalman Filter, see [Zupanski05]_), deterministic EnKF algorithm, allowing in addition the consistent treament of non-linear observation operator,
- "IEnKF" (Iterative EnKF), deterministic EnKF algorithm, improving treament of operators non-linearities
+- "E3DVAR" (EnKF 3DVAR), algorithm coupling ensemble and variational assimilation, which uses in parallel a 3DVAR variational assimilation and an EnKF algorithm to improve the estimation of *a posteriori* error covariances
- "EnKS" (Ensemble Kalman Smoother), smoothing algorithm with a fixed time lag L.
-Without being a universal recommandation, one recommend to use "EnKF" as a
-reference algorithm, and the other algorithms (in this order) as means to
-obtain less costly data assimilation with hopefully the same quality.
+Without being a universal recommandation, one recommend to use "EnKF"
+formulation as a reference algorithm, "ETKF-N" ou "IEnKF" formulation for
+robust performance, and the other algorithms (in this order) as means to obtain
+a less costly data assimilation with (hopefully) the same quality.
.. ------------------------------------ ..
.. include:: snippets/Header2Algo02.rst
.. include:: snippets/EstimationOf_State.rst
+.. include:: snippets/HybridCostDecrementTolerance.rst
+
+.. include:: snippets/HybridCovarianceEquilibrium.rst
+
+.. include:: snippets/HybridMaximumNumberOfIterations.rst
+
.. include:: snippets/InflationFactor.rst
.. include:: snippets/InflationType.rst
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_EnsembleKalmanFilter_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_KalmanFilter`
- [Bishop01]_
- [Evensen03]_
- [Zupanski05]_
+- [Hamill00]_
- [WikipediaEnKF]_
.. include:: snippets/SimulationQuantiles.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_ExtendedBlue_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_ExtendedBlue.rst
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_ExtendedKalmanFilter_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_KalmanFilter`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_FunctionTest_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_FunctionTest.rst
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_GradientTest_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_FunctionTest`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_KalmanFilter_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_KalmanFilter1.rst
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_LinearLeastSquares_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_Blue`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_LinearityTest_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_FunctionTest`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_LocalSensitivityTest_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_FunctionTest`
--- /dev/null
+..
+ Copyright (C) 2008-2022 EDF R&D
+
+ This file is part of SALOME ADAO module.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+
+ Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+.. index:: single: MeasurementsOptimalPositioningTask
+.. index:: single: Optimal positioning of measurements
+.. index:: single: Measurement locations
+.. index:: single: Measurements (Optimal positioning)
+.. _section_ref_algorithm_MeasurementsOptimalPositioningTask:
+
+Task algorithm "*MeasurementsOptimalPositioningTask*"
+-----------------------------------------------------
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo00.rst
+
+.. warning::
+
+ This algorithm is for now only available in textual user interface (TUI) and not in graphical user interface (GUI).
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo01.rst
+
+This algorithm provides optimal measurement points by an EIM (Empirical
+Interpolation Method) analysis, from a set of state vectors (usually called
+"snapshots" in reduced basis methodology). Each of these state vectors is
+usually (but not necessarily) the result :math:`\mathbf{y}` of a simulation
+:math:`H` for a given set of parameters :math:`\mathbf{x}=\mu`.
+
+In its simplest use, if the set of state vectors is pre-existing, it is only
+necessary to provide it through the algorithm options.
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo02.rst
+
+*None*
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo03Task.rst
+
+.. include:: snippets/EnsembleOfSnapshots.rst
+
+.. include:: snippets/ExcludeLocations.rst
+
+.. include:: snippets/ErrorNorm.rst
+
+.. include:: snippets/ErrorNormTolerance.rst
+
+.. include:: snippets/MaximumNumberOfLocations.rst
+
+StoreSupplementaryCalculations
+ .. index:: single: StoreSupplementaryCalculations
+
+ *List of names*. This list indicates the names of the supplementary
+ variables, that can be available during or at the end of the algorithm, if
+ they are initially required by the user. Their avalability involves,
+ potentially, costly calculations or memory consumptions. The default is then
+ a void list, none of these variables being calculated and stored by default
+ (excepted the unconditionnal variables). The possible names are in the
+ following list (the detailed description of each named variable is given in
+ the following part of this specific algorithmic documentation, in the
+ sub-section "*Information and variables available at the end of the
+ algorithm*"): [
+ "OptimalPoints",
+ "ReducedBasis",
+ "Residus",
+ ].
+
+ Example :
+ ``{"StoreSupplementaryCalculations":["BMA", "CurrentState"]}``
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo04.rst
+
+.. include:: snippets/OptimalPoints.rst
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo05.rst
+
+.. include:: snippets/OptimalPoints.rst
+
+.. include:: snippets/ReducedBasis.rst
+
+.. include:: snippets/Residus.rst
+
+.. ------------------------------------ ..
+.. _section_ref_algorithm_MeasurementsOptimalPositioningTask_examples:
+.. include:: snippets/Header2Algo06.rst
+
+- :ref:`section_ref_algorithm_FunctionTest`
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_NonLinearLeastSquares_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_NonLinearLeastSquares.rst
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_ParallelFunctionTest_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_ParallelFunctionTest.rst
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_ParticleSwarmOptimization_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_DerivativeFreeOptimization`
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_QuantileRegression_examples:
.. include:: snippets/Header2Algo06.rst
.. ------------------------------------ ..
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_SamplingTest_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_FunctionTest`
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_TabuSearch_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_DerivativeFreeOptimization`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_TangentTest_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_FunctionTest`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_UnscentedKalmanFilter_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_KalmanFilter`
.. include:: snippets/REDUCTION_STUDY.rst
-The other nested terms are listed in alphabetical order. They are not
-necessarily required for all algorithms. The different commands are the
-following:
+The nested terms are sorted in alphabetical order. They are not necessarily
+required for all algorithms. The various commands are the following:
.. include:: snippets/AlgorithmParameters.rst
calculation case.
The first term describes the choice between calculation or checking. In the
-graphical interface, the verification is imperatively indicated by the command:
+graphical interface, the choice is imperatively indicated by the command:
.. include:: snippets/CHECKING_STUDY.rst
-The other terms are ordered in alphabetical order. The different commands are
-the following:
+The nested terms are sorted in alphabetical order. They are not necessarily
+required for all algorithms. The various commands are the following:
.. include:: snippets/AlgorithmParameters.rst
--- /dev/null
+..
+ Copyright (C) 2008-2022 EDF R&D
+
+ This file is part of SALOME ADAO module.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+
+ Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+.. _section_ref_task_keywords:
+
+List of commands and keywords for a dedicated task or study oriented case
+-------------------------------------------------------------------------
+
+This set of commands is related to the description of a dedicated task or study
+oriented case, which consists of a simple specific procedure to perform a
+computational task dedicated to a general application of data assimilation or
+optimization methods.
+
+The nested terms are sorted in alphabetical order. They are not necessarily
+required for all algorithms. The various commands are the following:
+
+.. include:: snippets/AlgorithmParameters.rst
+
+.. include:: snippets/Debug.rst
+
+.. include:: snippets/Observers.rst
+
+.. include:: snippets/StudyName.rst
+
+.. include:: snippets/StudyRepertory.rst
ref_algorithm_SamplingTest
ref_algorithm_TangentTest
ref_checking_keywords
+
+.. _section_reference_task:
+
+================================================================================
+**[DocR]** Dedicated tasks or study oriented cases
+================================================================================
+
+This section describes the dedicated task or study oriented cases available in
+ADAO, detailing their usage characteristics and their options.
+
+These tasks use algorithms from data assimilation methods, optimization methods
+or methods with reduction. We refer to the :ref:`section_theory` section and
+the :ref:`section_reference_assimilation` section for the underlying
+algorithmic details.
+
+.. toctree::
+ :maxdepth: 1
+
+ ref_algorithm_MeasurementsOptimalPositioningTask
+ ref_task_keywords
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+from numpy import array, eye
+from adao import adaoBuilder
+case = adaoBuilder.New('')
+case.setCheckingPoint( Vector = array([0., 1., 2.]), Stored=True )
+case.setObservation( Vector = [10., 11., 12.] )
+case.setObservationOperator( Matrix = eye(3), )
+case.setAlgorithmParameters(
+ Algorithm='AdjointTest',
+ Parameters={
+ 'EpsilonMinimumExponent' :-12,
+ 'NumberOfPrintedDigits' : 3,
+ 'SetSeed' : 1234567,
+ },
+ )
+case.execute()
--- /dev/null
+
+ ADJOINTTEST
+ ===========
+
+ This test allows to analyze the quality of an adjoint operator associated
+ to some given direct operator. If the adjoint operator is approximated and
+ not given, the test measures the quality of the automatic approximation.
+
+ Using the "ScalarProduct" formula, one observes the residue R which is the
+ difference of two scalar products:
+
+ R(Alpha) = | < TangentF_X(dX) , Y > - < dX , AdjointF_X(Y) > |
+
+ which must remain constantly equal to zero to the accuracy of the calculation.
+ One takes dX0 = Normal(0,X) and dX = Alpha*dX0, where F is the calculation
+ operator. If it is given, Y must be in the image of F. If it is not given,
+ one takes Y = F(X).
+
+ (Remark: numbers that are (about) under 2e-16 represent 0 to machine precision)
+
+ -------------------------------------------------------------
+ i Alpha ||X|| ||Y|| ||dX|| R(Alpha)
+ -------------------------------------------------------------
+ 0 1e+00 2.236e+00 1.910e+01 3.536e+00 0.000e+00
+ 1 1e-01 2.236e+00 1.910e+01 3.536e-01 0.000e+00
+ 2 1e-02 2.236e+00 1.910e+01 3.536e-02 0.000e+00
+ 3 1e-03 2.236e+00 1.910e+01 3.536e-03 0.000e+00
+ 4 1e-04 2.236e+00 1.910e+01 3.536e-04 0.000e+00
+ 5 1e-05 2.236e+00 1.910e+01 3.536e-05 0.000e+00
+ 6 1e-06 2.236e+00 1.910e+01 3.536e-06 0.000e+00
+ 7 1e-07 2.236e+00 1.910e+01 3.536e-07 0.000e+00
+ 8 1e-08 2.236e+00 1.910e+01 3.536e-08 0.000e+00
+ 9 1e-09 2.236e+00 1.910e+01 3.536e-09 0.000e+00
+ 10 1e-10 2.236e+00 1.910e+01 3.536e-10 0.000e+00
+ 11 1e-11 2.236e+00 1.910e+01 3.536e-11 0.000e+00
+ 12 1e-12 2.236e+00 1.910e+01 3.536e-12 0.000e+00
+ -------------------------------------------------------------
--- /dev/null
+.. index:: single: AdjointTest (example)
+
+This example describes the test of the quality of the adjoint of some operator,
+whose direct formulation is given and whose adjoint formulation is here
+approximated by default. The required information is minimal, namely here an
+operator :math:`F` (described for the test by the observation command
+"*ObservationOperator*"), and a state :math:`\mathbf{x}^b` to test it on
+(described for the test by the command "*CheckingPoint*"). An observation
+:math:`\mathbf{y}^o` can be given as here (described for the test by the
+command "*Observation*"). The output has been set to determine the printout,
+for example to make more easy automatic comparison.
+
+The actual check is to observe whether the residue is consistently equal to
+zero at the accuracy of the calculation.
+
+ FUNCTIONTEST
+ ============
+
+ This test allows to analyze the (repetition of) launch of some given
+ operator. It shows simple statistics related to its successful execution,
+ or related to the similarities of repetition of its execution.
+
===> Information before launching:
-----------------------------
Characteristics of input vector X, internally converted:
Type...............: <class 'numpy.ndarray'>
- Lenght of vector...: 3
+ Length of vector...: 3
Minimum value......: 0.00e+00
Maximum value......: 2.00e+00
Mean of vector.....: 1.00e+00
+
+ PARALLELFUNCTIONTEST
+ ====================
+
+ This test allows to analyze the (repetition of) launch of some given
+ operator. It shows simple statistics related to its successful execution,
+ or related to the similarities of repetition of its execution.
+
===> Information before launching:
-----------------------------
Characteristics of input vector X, internally converted:
Type...............: <class 'numpy.ndarray'>
- Lenght of vector...: 30
+ Length of vector...: 30
Minimum value......: 0.00e+00
Maximum value......: 2.90e+01
Mean of vector.....: 1.45e+01
AlgorithmParameters
*Dictionary*. This variable indicates the data assimilation or optimization
algorithm chosen by the keyword "*Algorithm*", and its potential optional
- parameters. The algorithm choices are available through the GUI. There exists
- for example "3DVAR", "Blue"... Each algorithm is defined, below, by a
- specific subsection. Optionally, the command allows also to add some
- parameters to control the algorithm. Their values are defined either
- explicitly or in a "*Dict*" type object. See the
+ parameters. The potential choices by this keyword "*Algorithm*" are available
+ through the graphical interface or in the reference documentation containing
+ a specific sub-section for each of them. There are for example the "3DVAR",
+ the "Blue", etc. Optionally, the command also allows to add parameters to
+ control the chosen algorithm. Their values are defined either explicitly or
+ in a "*Dict*" type object. See the
:ref:`section_ref_options_Algorithm_Parameters` for the detailed use of this
command part.
Analysis
*List of vectors*. Each element of this variable is an optimal state
- :math:`\mathbf{x}*` in optimization or an analysis :math:`\mathbf{x}^a` in
+ :math:`\mathbf{x}^*` in optimization or an analysis :math:`\mathbf{x}^a` in
data assimilation.
Example:
--- /dev/null
+.. index:: single: EnsembleOfSnapshots
+
+EnsembleOfSnapshots
+ *List of vectors or matrix*. This key contains a set of physical state
+ vectors :math:`\mathbf{y}` (called "*snapshots*" in "Reduced Bases"
+ terminology), with 1 state per column if it is a matrix or 1 state per
+ element of the list. Important: the numbering of the points, to which a state
+ value is given, in each vector is implicitly that of the natural order of
+ numbering of the state vector, from 0 to the "size minus 1" of this vector.
+
+ Example :
+ ``{"EnsembleOfSnapshots":[y1, y2, y3...]}``
--- /dev/null
+.. index:: single: ErrorNorm
+
+ErrorNorm
+ *Predefined name*. This key indicates the norm used for the residue that
+ controls the optimal search. The default is the "L2" norm. The possible
+ criteria are in the following list: ["L2", "Linf"].
+
+ Example :
+ ``{"ErrorNorm":"L2"}``
--- /dev/null
+.. index:: single: ErrorNormTolerance
+
+ErrorNormTolerance
+ *Real value*. This key specifies the value at which the residual associated
+ with the approximation is acceptable, which leads to stop the optimal search.
+ The default value is 1.e-7 (which is usually equivalent to almost no stopping
+ criterion because the approximation is less precise), and it is recommended
+ to adapt it to the needs for real problems. A usual value, recommended to
+ stop the search on residual criterion, is 1.e-2.
+
+ Example :
+ ``{"ErrorNormTolerance":1.e-7}``
--- /dev/null
+.. index:: single: ExcludeLocations
+
+ExcludeLocations
+ *List of integers*. This key specifies the list of points in the state vector
+ excluded from the optimal search. The default value is an empty list.
+ Important: the numbering of these excluded points must be identical to the
+ one implicitly adopted in the states provided by the "*EnsembleOfSnapshots*"
+ key.
+
+ Example :
+ ``{"ExcludeLocations":[3, 125, 286]}``
:ref:`section_ref_options_Algorithm_Parameters` for the good use of this
command.
-The options of the algorithm are the following:
+The options are the following:
:ref:`section_ref_options_Algorithm_Parameters` for the good use of this
command.
-The options of the algorithm are the following:
+The options are the following:
--- /dev/null
+The general optional commands, available in the editing user graphical or
+textual interface, are indicated in :ref:`section_ref_task_keywords`. Moreover,
+the parameters of the command "*AlgorithmParameters*" allow to choose the
+specific options, described hereafter, of the algorithm. See
+:ref:`section_ref_options_Algorithm_Parameters` for the good use of this
+command.
+
+The options are the following:
-Python (TUI) use example
-++++++++++++++++++++++++
+Python (TUI) use examples
++++++++++++++++++++++++++
Here is a very simple use of the given algorithm and its parameters, written in
:ref:`section_tui`, and from which input information allow to define an
--- /dev/null
+.. index:: single: HybridCostDecrementTolerance
+
+HybridCostDecrementTolerance
+ *Real value*. This key indicates a limit value, leading to stop successfully
+ the optimization process for the variational part in the coupling, when the
+ cost function decreases less than this tolerance at the last step. The
+ default is 1.e-7, and it is recommended to adapt it to the needs on real
+ problems. One can refer to the section describing ways for
+ :ref:`subsection_iterative_convergence_control` for more detailed
+ recommendations.
+
+ Example:
+ ``{"HybridCostDecrementTolerance":1.e-7}``
--- /dev/null
+.. index:: single: HybridCovarianceEquilibrium
+
+HybridCovarianceEquilibrium
+ *Real value*. This key indicates, in hybrid variational optimization, the
+ equilibrium factor between the static *a priori* covariance and the ensemble
+ covariance. This factor is between 0 and 1, and its default value is 0.5.
+
+ Example :
+ ``{"HybridCovarianceEquilibrium":0.5}``
--- /dev/null
+.. index:: single: HybridMaximumNumberOfIterations
+
+HybridMaximumNumberOfIterations
+ *Integer value*. This key indicates the maximum number of internal iterations
+ allowed for hybrid optimization, for the variational part. The default is
+ 15000, which is very similar to no limit on iterations. It is then
+ recommended to adapt this parameter to the needs on real problems. For some
+ optimizers, the effective stopping step can be slightly different of the
+ limit due to algorithm internal control requirements. One can refer to the
+ section describing ways for :ref:`subsection_iterative_convergence_control`
+ for more detailed recommendations.
+
+ Example:
+ ``{"HybridMaximumNumberOfIterations":100}``
--- /dev/null
+.. index:: single: MaximumNumberOfLocations
+
+MaximumNumberOfLocations
+ *Integer value*. This key specifies the maximum possible number of positions
+ found in the optimal search. The default value is 1. The optimal search may
+ eventually find less positions than required by this key, as for example in
+ the case where the residual associated to the approximation is lower than the
+ criterion and leads to the early termination of the optimal search.
+
+ Example :
+ ``{"MaximumNumberOfLocations":5}``
--- /dev/null
+.. index:: single: OptimalPoints
+
+OptimalPoints
+ *List of integer series*. Each element is a series, containing the ideal points
+ determined by the optimal search, ordered by decreasing preference and in the
+ same order as the reduced basis vectors found iteratively.
+
+ Example :
+ ``mp = ADD.get("OptimalPoints")[-1]``
--- /dev/null
+.. index:: single: ReducedBasis
+
+ReducedBasis
+ *List of matrix*. Each element is a matrix, containing in each column a
+ vector of the reduced basis obtained by the optimal search, ordered by
+ decreasing preference and in the same order as the ideal points found
+ iteratively.
+
+ Example :
+ ``rb = ADD.get("ReducedBasis")[-1]``
Residu
*List of values*. Each element is the value of the particular residue
- verified during a checking algorithm, in the order of the tests.
+ checked during the running of the algorithm, in the order of the tests.
Example:
``r = ADD.get("Residu")[:]``
--- /dev/null
+.. index:: single: Residus
+
+Residus
+ *List of real value series*. Each element is a series, containing the values
+ of the particular residue checked during the running of the algorithm.
+
+ Example :
+ ``rs = ADD.get("Residus")[:]``
pair: Variant ; ETKF-N
pair: Variant ; MLEF
pair: Variant ; IEnKF
+ pair: Variant ; E3DVAR
pair: Variant ; EnKS
Variant
*Predifined name*. This key allows to choose one of the possible variants
- for the main algorithm. The default variant is the original "EnKF", and the
- possible ones are
+ for the main algorithm. The default variant is the original "EnKF"
+ formulation, and the possible ones are
"EnKF" (Ensemble Kalman Filter),
"ETKF" (Ensemble-Transform Kalman Filter),
"ETKF-N" (Ensemble-Transform Kalman Filter),
"MLEF" (Maximum Likelihood Kalman Filter),
"IEnKF" (Iterative_EnKF),
+ "E3DVAR" (EnKF 3DVAR),
"EnKS" (Ensemble Kalman Smoother).
- One recommends to try the "ETKF-N" or "IEnKF" variants, and to reduce the
- number of members to about 10 or less for all variants other then the
- original "EnKF".
+
+ One recommends to try the "ETKF-N" or "IEnKF" variants for a robust
+ performance, and to reduce the number of members to about 10 or less for all
+ variants other than the original "EnKF" formulation.
Example :
``{"Variant":"EnKF"}``
:math:`\mathbf{x}^a` will move to :math:`\mathbf{y}^o` or to
:math:`\mathbf{x}^b`, in inverse proportion of the variances in
:math:`\mathbf{B}` and :math:`\mathbf{R}`. As an other extension, it is also
-equivalent to search for the analysis thought a BLUE algorithm or a 3DVAR one.
+equivalent to search for the analysis thought a "Blue" algorithm or a "3DVAR"
+one.
Using the graphical interface (GUI) to build the ADAO case
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>>>
As a simple extension of this example, one can notice that the same problem
-solved with a 3DVAR algorithm gives the same result. This algorithm can be
+solved with a "3DVAR" algorithm gives the same result. This algorithm can be
chosen at the ADAO case building step by only changing the "*Algorithm*"
-argument on the beginning. The remaining parts of the ADAO case in 3DVAR is
+argument on the beginning. The remaining parts of the ADAO case in "3DVAR" is
exactly similar to the BLUE algorithmic case.
.. _section_tutorials_in_python_script:
To describe the background error covariances matrix :math:`\mathbf{B}`, we make
as previously the hypothesis of uncorrelated errors (that is, a diagonal matrix,
-of size 3x3 because :math:`\mathbf{x}^b` is of lenght 3) and to have the same
+of size 3x3 because :math:`\mathbf{x}^b` is of length 3) and to have the same
variance of 0.1 for all variables. We get:
::
fichiers ou les commandes incluses dans l'installation complète de SALOME. Tous
les noms à remplacer par l'utilisateur sont indiqués par la syntaxe ``<...>``.
+.. _section_advanced_convert_JDC:
+
Convertir et exécuter un fichier de commandes ADAO (JDC) par l'intermédiaire d'un script Shell
----------------------------------------------------------------------------------------------
Dans tous les cas, les sorties standard et d'erreur se font dans le terminal de
lancement.
+.. _section_advanced_YACS_tui:
+
Exécuter un schéma de calcul ADAO dans YACS en utilisant le mode "texte" (TUI YACS)
-----------------------------------------------------------------------------------
.. index:: single: R
.. index:: single: rPython
+.. index:: single: reticulate
Pour étendre les possibilités d'analyse et de traitement, il est possible
d'utiliser les calculs ADAO dans l'environnement **R** (voir [R]_ pour plus de
-détails). Ce dernier est disponible dans SALOME en lançant l'interpréteur R dans
-le shell "``salome shell``". Il faut de plus disposer, en R, du package
-"*rPython*", qui peut si nécessaire être installé par l'utilisateur à l'aide de
-la commande R suivante :
+détails). Ce dernier est disponible dans SALOME en lançant l'interpréteur R
+dans le shell "``salome shell``". Il faut de plus disposer, en R, du package
+"*rPython*" (ou du package "*reticulate*", plus récent), qui peut si nécessaire
+être installé par l'utilisateur à l'aide de la commande R suivante :
::
#-*- coding: utf-8 -*-
from adao import adaoBuilder
adaoBuilder.Gui()
-Si nécessaire, des messages explicites permettent d'identifier les variables
-d'environnement requises qui seraient absentes. Cette commande ne doit
-néanmoins pas être lancée dans la console Python de SALOME (car dans ce cas il
-suffit d'activer le module puisque l'on est déjà dans l'interface
-graphique...), mais elle peut l'être dans une session "SALOME shell" obtenue
-depuis le menu "Outils/Extensions" de SALOME. Pour mémoire, le moyen le plus
-simple d'obtenir un interpréteur Python inclu dans une session "SALOME shell"
-est de lancer la commande suivante dans un terminal :
-::
+Pour mémoire, le moyen le plus simple d'obtenir un interpréteur Python inclu
+dans une session "SALOME shell" est de lancer la commande suivante dans un
+terminal : ::
$SALOMEDIR/salome shell -- python
avec ``SALOMEDIR`` le ``<Répertoire principal d'installation de SALOME>``.
+Si nécessaire, des messages explicites permettent d'identifier les variables
+d'environnement requises qui seraient absentes. **Cette commande ne doit
+néanmoins pas être lancée dans la console Python de SALOME** (car dans ce cas
+il suffit d'activer le module puisque l'on est déjà dans l'interface
+graphique...) ou dans une installation Python indépendante, mais elle peut
+l'être dans une session "SALOME shell" obtenue depuis le menu
+"Outils/Extensions" de SALOME.
+
.. _section_advanced_execution_mode:
Changer le mode par défaut d'exécution de noeuds dans YACS
.. [Buchinsky98] Buchinsky M., *Recent Advances in Quantile Regression Models: A Practical Guidline for Empirical Research*, Journal of Human Resources, 33(1), pp.88-126, 1998
-.. [Burgers98] Burgers G., Van Leuween P. J., Evensen G., *Analysis scheme in the Ensemble Kalman Filter*, Monthly Weather Review, 126, 1719–1724, 1998
+.. [Burgers98] Burgers G., Van Leuween P. J., Evensen G., *Analysis scheme in the Ensemble Kalman Filter*, Monthly Weather Review, 126(6), pp.1719–1724, 1998
.. [Byrd95] Byrd R. H., Lu P., Nocedal J., *A Limited Memory Algorithm for Bound Constrained Optimization*, SIAM Journal on Scientific and Statistical Computing, 16(5), pp.1190-1208, 1995
.. [Cohn98] Cohn S. E., Da Silva A., Guo J., Sienkiewicz M., Lamich D., *Assessing the effects of data selection with the DAO Physical-space Statistical Analysis System*, Monthly Weather Review, 126, pp.2913–2926, 1998
-.. [Courtier94] Courtier P., Thépaut J.-N., Hollingsworth A., *A strategy for operational implementation of 4D-Var, using an incremental approach*, Quarterly Journal of the Royal Meteorological Society, 120, pp.1367–1387, 1994
+.. [Courtier94] Courtier P., Thépaut J.-N., Hollingsworth A., *A strategy for operational implementation of 4D-Var, using an incremental approach*, Quarterly Journal of the Royal Meteorological Society, 120(519), pp.1367–1387, 1994
-.. [Courtier97] Courtier P., *Dual formulation of four-dimensional variational assimilation*, Quarterly Journal of the Royal Meteorological Society, 123, pp.2449-2461, 1997
+.. [Courtier97] Courtier P., *Dual formulation of four-dimensional variational assimilation*, Quarterly Journal of the Royal Meteorological Society, 123(544), pp.2249-2261, 1997
.. [Das11] Das S., Suganthan P. N., *Differential Evolution: A Survey of the State-of-the-art*, IEEE Transactions on Evolutionary Computation, 15(1), pp.4-31, 2011
.. [Glover90] Glover F., *Tabu Search-Part II*, ORSA Journal on Computing, 2(1), pp.4-32, 1990
+.. [Hamill00] Hamill T. M., Snyder C., *A Hybrid Ensemble Kalman Filter-3D Variational Analysis Scheme*, Monthly Weather Review, 128(8), pp.2905-2919, 2000
+
.. [Ide97] Ide K., Courtier P., Ghil M., Lorenc A. C., *Unified notation for data assimilation: operational, sequential and variational*, Journal of the Meteorological Society of Japan, 75(1B), pp.181-189, 1997
.. [Jazwinski70] Jazwinski A. H., *Stochastic Processes and Filtering Theory*, Academic Press, 1970
.. [Lions68] Lions J.-L., *Contrôle optimal de systèmes gouvernés par des équations aux dérivées partielles*, Dunod, 1968
-.. [Lorenc86] Lorenc A. C., *Analysis methods for numerical weather prediction*, Quarterly Journal of the Royal Meteorological Society, 112, pp.1177-1194, 1986
+.. [Lorenc86] Lorenc A. C., *Analysis methods for numerical weather prediction*, Quarterly Journal of the Royal Meteorological Society, 112(474), pp.1177-1194, 1986
-.. [Lorenc88] Lorenc A. C., *Optimal nonlinear objective analysis*, Quarterly Journal of the Royal Meteorological Society, 114, pp.205–240, 1988
+.. [Lorenc88] Lorenc A. C., *Optimal nonlinear objective analysis*, Quarterly Journal of the Royal Meteorological Society, 114(479), pp.205–240, 1988
.. [Morales11] Morales J. L., Nocedal J., *L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization*, ACM Transactions on Mathematical Software, 38(1), 2011
.. [Zhu97] Zhu C., Byrd R. H., Nocedal J., *L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization*, ACM Transactions on Mathematical Software, 23(4), pp.550-560, 1997
-.. [Zupanski05] Zupanski M., *Maximum likelihood ensemble filter: Theoretical aspects*, Monthly Weather Review, 133, pp.1710–1726, 2005
+.. [Zupanski05] Zupanski M., *Maximum likelihood ensemble filter: Theoretical aspects*, Monthly Weather Review, 133(6), pp.1710–1726, 2005
*Nota Bene* : un lien vers la version française de chaque page Wikipédia se
trouve dans le sous-menu "*Languages*". Les deux versions sont complémentaires
# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
import sys, os, time, sphinx, logging
+# logging.getLogger().setLevel(logging.DEBUG)
# -- Module version information --------------------------------------------------
__lv = LooseVersion(sphinx.__version__)
if __lv < LooseVersion("1.4.0"):
extensions = ['sphinx.ext.pngmath']
+ logging.debug('Using "%s" extensions'%(extensions,))
else:
extensions = ['sphinx.ext.imgmath']
+ logging.debug('Using "%s" extensions'%(extensions,))
try:
import sphinx_rtd_theme
extensions += ['sphinx_rtd_theme']
--- /dev/null
+..
+ Copyright (C) 2008-2022 EDF R&D
+
+ This file is part of SALOME ADAO module.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+
+ Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+.. _section_docu_examples:
+
+================================================================================
+**[DocU]** Exemples d'utilisation d'ADAO
+================================================================================
+
+Pour faciliter la lecture ou la recherche, cette section rassemble de manière
+synthétique les pointeurs directs, vers les sections adéquates, d'exemples
+simples d’utilisation du module (principalement mais pas uniquement en TUI).
+
+Ces exemples sont disponibles en direct dans les tutoriaux, dans les
+documentations détaillées d'algorithmes ou de problèmes appliqués, et dans les
+usages avancés.
+
+Tutoriaux
+---------
+
+#. :ref:`section_tutorials_in_salome`
+#. :ref:`section_tutorials_in_python`
+
+Utilisations d'algorithmes de calcul
+------------------------------------
+
+#. :ref:`Exemples avec l'algorithme de "3DVAR"<section_ref_algorithm_3DVAR_examples>`
+#. :ref:`Exemples avec l'algorithme de "Blue"<section_ref_algorithm_Blue_examples>`
+#. :ref:`Exemples avec l'algorithme de "ExtendedBlue"<section_ref_algorithm_ExtendedBlue_examples>`
+#. :ref:`Exemples avec l'algorithme de "KalmanFilter"<section_ref_algorithm_KalmanFilter_examples>`
+#. :ref:`Exemples avec l'algorithme de "NonLinearLeastSquares"<section_ref_algorithm_NonLinearLeastSquares_examples>`
+
+Utilisations d'algorithmes de vérification
+------------------------------------------
+
+#. :ref:`Exemples avec la vérification "AdjointTest"<section_ref_algorithm_AdjointTest_examples>`
+#. :ref:`Exemples avec la vérification "FunctionTest"<section_ref_algorithm_FunctionTest_examples>`
+#. :ref:`Exemples avec la vérification "ParallelFunctionTest"<section_ref_algorithm_ParallelFunctionTest_examples>`
+
+Utilisations avancées
+---------------------
+
+#. :ref:`section_advanced_convert_JDC`
+#. :ref:`section_advanced_YACS_tui`
+#. :ref:`section_advanced_R`
d'environ 350 problèmes appliqués distincts.
La documentation de ce module est divisée en plusieurs grandes catégories,
-relatives à la documentation théorique (indiquée dans le titre de section par
-**[DocT]**), à la documentation utilisateur (indiquée dans le titre de section
-par **[DocU]**), et à la documentation de référence (indiquée dans le titre de
-section par **[DocR]**).
+relatives à la **documentation théorique** (indiquée dans le titre de section
+par **[DocT]**), à la **documentation utilisateur** (indiquée dans le titre de
+section par **[DocU]**), et à la **documentation de référence** (indiquée dans
+le titre de section par **[DocR]**).
La première partie est l':ref:`section_intro`. La seconde partie présente
:ref:`section_theory`, et à leurs concepts, et la partie suivante décrit la
:ref:`section_methodology`. Pour un utilisateur courant, les parties suivantes
-présentent des exemples didactiques d'utilisation sous la forme de
-:ref:`section_tutorials_in_salome` ou de :ref:`section_tutorials_in_python`,
-puis indique les :ref:`section_advanced`, avec l'obtention de renseignements
-supplémentaires ou l'usage par scripts de commandes hors interface de contrôle
-graphique. Les utilisateurs intéressés par un accès rapide au module peuvent
-s'arrêter avant la lecture de la suite, mais un usage utile du module nécessite
-de lire et de revenir régulièrement à ces parties. Les parties qui suivent
-expliquent comment utiliser une :ref:`section_gui_in_salome` ou une
-:ref:`section_tui`. La dernière grande partie détaille la
-:ref:`section_reference`, avec trois sous-parties essentielles qui la composent
-et qui décrivent les commandes et des options d'algorithmes. Un
-:ref:`section_glossary`, des :ref:`section_notations`, une
-:ref:`section_bibliography` et un index développé complètent le document.
-Enfin, pour respecter les exigences de licence du module, n'oubliez pas de lire
-la partie :ref:`section_license`.
+présentent des :ref:`section_docu_examples`, rapidement accessibles par
+l'ensemble des pointeurs vers les sous-parties. Des présentations didactiques
+sont détaillés dans les :ref:`section_tutorials_in_salome` ou les
+:ref:`section_tutorials_in_python`, et complétées par des indications sur les
+:ref:`section_advanced`, avec l'obtention de renseignements supplémentaires ou
+l'usage par scripts de commandes hors interface de contrôle graphique.
+
+Les utilisateurs intéressés par un accès rapide au module peuvent s'arrêter
+avant la lecture de la suite, mais un usage utile du module nécessite de lire
+et de revenir régulièrement à ces parties. Les parties qui suivent expliquent
+comment utiliser une :ref:`section_gui_in_salome` ou une :ref:`section_tui`. La
+dernière grande partie détaille la :ref:`section_reference`, avec trois
+sous-parties essentielles qui la composent et qui décrivent les commandes et
+des options d'algorithmes. Un :ref:`section_glossary`, des
+:ref:`section_notations`, une :ref:`section_bibliography` et un index développé
+complètent le document. Enfin, pour respecter les exigences de licence du
+module, n'oubliez pas de lire la partie :ref:`section_license`.
.. toctree::
:caption: Table des matières
intro
theory
methodology
+ examples
tutorials_in_salome
tutorials_in_python
advanced
dynamique ou itérative (il n'y a donc pas besoin dans ce cas d'opérateur
d'évolution incrémentale, ni de covariance d'erreurs d'évolution). Dans ADAO,
il peut aussi être utilisé sur une succession d'observations, plaçant alors
-l'estimation dans un cadre récursif en partie similaire à un
+l'estimation dans un cadre récursif similaire à un
:ref:`section_ref_algorithm_KalmanFilter`. Une estimation standard est
effectuée à chaque pas d'observation sur l'état prévu par le modèle d'évolution
incrémentale, sachant que la covariance d'erreur d'état reste la covariance
contrairement aux filtres de type Kalman, la covariance d'erreurs sur les états
n'est pas remise à jour.
+Une extension du 3DVAR, couplant en parallèle une méthode 3DVAR avec un filtre
+de Kalman d'ensemble, permet d'améliorer l'estimation des covariances d'erreurs
+*a posteriori*. On atteint cette extension en utilisant le variant "E3DVAR" de
+l'algorithme de filtrage :ref:`section_ref_algorithm_EnsembleKalmanFilter`.
+
.. ------------------------------------ ..
.. include:: snippets/Header2Algo02.rst
.. include:: snippets/SimulationQuantiles.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_3DVAR_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_3DVAR.rst
.. include:: snippets/Header2Algo10.rst
.. literalinclude:: scripts/simple_3DVAR.res
+ :language: none
.. include:: snippets/Header2Algo11.rst
.. include:: snippets/IndexOfOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_4DVAR_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_3DVAR`
.. include:: snippets/InitialDirection.rst
+.. include:: snippets/NumberOfPrintedDigits.rst
+
.. include:: snippets/SetSeed.rst
StoreSupplementaryCalculations
.. include:: snippets/SimulatedObservationAtCurrentState.rst
+.. ------------------------------------ ..
+.. _section_ref_algorithm_AdjointTest_examples:
+.. include:: snippets/Header2Algo09.rst
+
+.. include:: scripts/simple_AdjointTest.rst
+
+.. literalinclude:: scripts/simple_AdjointTest.py
+
+.. include:: snippets/Header2Algo10.rst
+
+.. literalinclude:: scripts/simple_AdjointTest.res
+ :language: none
+
.. ------------------------------------ ..
.. include:: snippets/Header2Algo06.rst
.. include:: snippets/SimulationQuantiles.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_Blue_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_Blue.rst
.. include:: snippets/Header2Algo10.rst
.. literalinclude:: scripts/simple_Blue.res
+ :language: none
.. ------------------------------------ ..
.. include:: snippets/Header2Algo06.rst
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_DerivativeFreeOptimization_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_ParticleSwarmOptimization`
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_DifferentialEvolution_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_DerivativeFreeOptimization`
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_EnsembleBlue_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_Blue`
Algorithme de calcul "*EnsembleKalmanFilter*"
---------------------------------------------
-.. ------------------------------------ ..
-.. include:: snippets/Header2Algo00.rst
-
.. ------------------------------------ ..
.. include:: snippets/Header2Algo01.rst
pair: Variant ; ETKF-N
pair: Variant ; MLEF
pair: Variant ; IEnKF
+ pair: Variant ; E3DVAR
+ pair: Variant ; EnKS
- "EnKF" (Ensemble Kalman Filter, voir [Evensen94]_), algorithme stochastique original, permettant de traiter de manière consistante un opérateur d'évolution non-linéaire,
- "ETKF" (Ensemble-Transform Kalman Filter), algorithme déterministe d'EnKF, permettant de traiter un opérateur d'évolution non-linéaire avec beaucoup moins de membres (on recommande d'utiliser un nombre de membres de l'ordre de 10 ou même parfois moins),
- "ETKF-N" (Ensemble-Transform Kalman Filter of finite size N), algorithme d'ETKF dit de "taille finie N", évitant de recourir à une inflation souvent nécessaire avec les autres algorithmes,
- "MLEF" (Maximum Likelihood Kalman Filter, voir [Zupanski05]_), algorithme déterministe d'EnKF, permettant en plus de traiter de manière consistante un opérateur d'observation non-linéaire),
- "IEnKF" (Iterative EnKF), algorithme déterministe d'EnKF, améliorant le traitement des non-linéarités des opérateurs,
+- "E3DVAR" (EnKF 3DVAR), algorithme couplant assimilation d'ensemble et variationnelle, qui utilise en parallèle une assimilation variationnelle 3DVAR et un algorithme d'EnKF pour améliorer l'estimation des covariances d'erreurs *a posteriori*,
- "EnKS" (Ensemble Kalman Smoother), algorithme de lissage avec un décalage fixe.
-Sans pouvoir prétendre à l'universalité, on recommande d'utiliser l'"EnKF"
-comme référence, et les autres algorithmes (dans l'ordre) comme des moyens pour
-obtenir une assimilation de données plus économique et de qualité
-éventuellement similaire.
+Sans pouvoir prétendre à l'universalité, on recommande d'utiliser la
+formulation "EnKF" comme référence, la formulation "ETKF-N" ou "IEnKF" pour une
+performance robuste, et les autres algorithmes (dans l'ordre) comme des moyens
+pour obtenir une assimilation de données plus économique et de qualité
+(éventuellement) similaire.
.. ------------------------------------ ..
.. include:: snippets/Header2Algo02.rst
.. include:: snippets/EstimationOf_State.rst
+.. include:: snippets/HybridCostDecrementTolerance.rst
+
+.. include:: snippets/HybridCovarianceEquilibrium.rst
+
+.. include:: snippets/HybridMaximumNumberOfIterations.rst
+
.. include:: snippets/InflationFactor.rst
.. include:: snippets/InflationType.rst
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_EnsembleKalmanFilter_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_KalmanFilter`
- [Bishop01]_
- [Evensen03]_
- [Zupanski05]_
+- [Hamill00]_
- [WikipediaEnKF]_
.. include:: snippets/SimulationQuantiles.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_ExtendedBlue_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_ExtendedBlue.rst
.. include:: snippets/Header2Algo10.rst
.. literalinclude:: scripts/simple_ExtendedBlue.res
+ :language: none
.. ------------------------------------ ..
.. include:: snippets/Header2Algo06.rst
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_ExtendedKalmanFilter_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_KalmanFilter`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_FunctionTest_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_FunctionTest.rst
.. include:: snippets/Header2Algo10.rst
.. literalinclude:: scripts/simple_FunctionTest.res
+ :language: none
.. ------------------------------------ ..
.. include:: snippets/Header2Algo06.rst
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_GradientTest_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_FunctionTest`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_KalmanFilter_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_KalmanFilter1.rst
.. include:: snippets/Header2Algo10.rst
.. literalinclude:: scripts/simple_KalmanFilter1.res
+ :language: none
.. include:: snippets/Header2Algo11.rst
.. include:: snippets/Header2Algo10.rst
.. literalinclude:: scripts/simple_KalmanFilter2.res
+ :language: none
.. include:: snippets/Header2Algo11.rst
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_LinearLeastSquares_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_Blue`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_LinearityTest_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_FunctionTest`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_LocalSensitivityTest_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_FunctionTest`
--- /dev/null
+..
+ Copyright (C) 2008-2022 EDF R&D
+
+ This file is part of SALOME ADAO module.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+
+ Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+.. index:: single: MeasurementsOptimalPositioningTask
+.. index:: single: Positionnement optimal de mesures
+.. index:: single: Positions de mesures
+.. index:: single: Mesures (Positionnement optimal)
+.. _section_ref_algorithm_MeasurementsOptimalPositioningTask:
+
+Algorithme de tâche "*MeasurementsOptimalPositioningTask*"
+----------------------------------------------------------
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo00.rst
+
+.. warning::
+
+ Cet algorithme n'est pour l'instant utilisable qu'en interface textuelle
+ (TUI) et pas en interface graphique (GUI).
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo01.rst
+
+Cet algorithme permet d'établir des points de mesures optimaux par une analyse
+EIM (Empirical Interpolation Method), à partir d'un ensemble de vecteurs d'état
+(usuellement appelés "*snapshots*" en méthodologie de bases réduites). Chacun
+de ces vecteurs d'état est habituellement (mais pas obligatoirement) le
+résultat :math:`\mathbf{y}` d'une simulation :math:`H` pour un jeu de
+paramètres donné :math:`\mathbf{x}=\mu`.
+
+Dans son usage le plus simple, si l'ensemble des vecteurs d'état est
+pré-existant, il suffit de le fournir par les options d'algorithme.
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo02.rst
+
+*Aucune*
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo03Task.rst
+
+.. include:: snippets/EnsembleOfSnapshots.rst
+
+.. include:: snippets/ExcludeLocations.rst
+
+.. include:: snippets/ErrorNorm.rst
+
+.. include:: snippets/ErrorNormTolerance.rst
+
+.. include:: snippets/MaximumNumberOfLocations.rst
+
+StoreSupplementaryCalculations
+ .. index:: single: StoreSupplementaryCalculations
+
+ *Liste de noms*. Cette liste indique les noms des variables supplémentaires,
+ qui peuvent être disponibles au cours du déroulement ou à la fin de
+ l'algorithme, si elles sont initialement demandées par l'utilisateur. Leur
+ disponibilité implique, potentiellement, des calculs ou du stockage coûteux.
+ La valeur par défaut est donc une liste vide, aucune de ces variables n'étant
+ calculée et stockée par défaut (sauf les variables inconditionnelles). Les
+ noms possibles pour les variables supplémentaires sont dans la liste suivante
+ (la description détaillée de chaque variable nommée est donnée dans la suite
+ de cette documentation par algorithme spécifique, dans la sous-partie
+ "*Informations et variables disponibles à la fin de l'algorithme*") : [
+ "OptimalPoints",
+ "ReducedBasis",
+ "Residus",
+ ].
+
+ Exemple :
+ ``{"StoreSupplementaryCalculations":["BMA", "CurrentState"]}``
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo04.rst
+
+.. include:: snippets/OptimalPoints.rst
+
+.. ------------------------------------ ..
+.. include:: snippets/Header2Algo05.rst
+
+.. include:: snippets/OptimalPoints.rst
+
+.. include:: snippets/ReducedBasis.rst
+
+.. include:: snippets/Residus.rst
+
+.. ------------------------------------ ..
+.. _section_ref_algorithm_MeasurementsOptimalPositioningTask_examples:
+.. include:: snippets/Header2Algo06.rst
+
+- :ref:`section_ref_algorithm_FunctionTest`
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_NonLinearLeastSquares_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_NonLinearLeastSquares.rst
.. include:: snippets/Header2Algo10.rst
.. literalinclude:: scripts/simple_NonLinearLeastSquares.res
+ :language: none
.. include:: snippets/Header2Algo11.rst
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_ParallelFunctionTest_examples:
.. include:: snippets/Header2Algo09.rst
.. include:: scripts/simple_ParallelFunctionTest.rst
.. include:: snippets/Header2Algo10.rst
.. literalinclude:: scripts/simple_ParallelFunctionTest.res
+ :language: none
.. ------------------------------------ ..
.. include:: snippets/Header2Algo06.rst
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_ParticleSwarmOptimization_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_DerivativeFreeOptimization`
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_QuantileRegression_examples:
.. include:: snippets/Header2Algo06.rst
.. ------------------------------------ ..
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_SamplingTest_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_FunctionTest`
.. include:: snippets/SimulatedObservationAtOptimum.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_TabuSearch_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_DerivativeFreeOptimization`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_TangentTest_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_FunctionTest`
.. include:: snippets/SimulatedObservationAtCurrentState.rst
.. ------------------------------------ ..
+.. _section_ref_algorithm_UnscentedKalmanFilter_examples:
.. include:: snippets/Header2Algo06.rst
- :ref:`section_ref_algorithm_KalmanFilter`
.. include:: snippets/REDUCTION_STUDY.rst
-Les autres termes imbriqués sont classés par ordre alphabétique. Ils ne sont
-pas obligatoirement requis pour tous les algorithmes. Les différentes commandes
+Les termes imbriqués sont classés par ordre alphabétique. Ils ne sont pas
+obligatoirement requis pour tous les algorithmes. Les différentes commandes
sont les suivantes:
.. include:: snippets/AlgorithmParameters.rst
ailleurs par un cas de calcul.
Le premier terme décrit le choix entre un calcul ou une vérification. Dans
-l'interface graphique, la vérification est désigné obligatoirement par la
-commande:
+l'interface graphique, le choix est désigné obligatoirement par la commande:
.. include:: snippets/CHECKING_STUDY.rst
-Les autres termes sont classés par ordre alphabétique. Les différentes
-commandes sont les suivantes:
+Les termes imbriqués sont classés par ordre alphabétique. Ils ne sont pas
+obligatoirement requis pour tous les algorithmes. Les différentes commandes
+sont les suivantes:
.. include:: snippets/AlgorithmParameters.rst
--- /dev/null
+..
+ Copyright (C) 2008-2022 EDF R&D
+
+ This file is part of SALOME ADAO module.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+
+ Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+.. _section_ref_task_keywords:
+
+Liste des commandes et mots-clés pour un cas orienté tâche ou étude dédiée
+--------------------------------------------------------------------------
+
+Ce jeu de commandes est lié à la description d'un cas orienté tâche ou étude
+dédiée, qui consiste en une procédure spécifique simple pour effectuer une
+tâche de calcul dédiée à une application générale des méthodes d'assimilation
+de données ou d'optimisation.
+
+Les termes imbriqués sont classés par ordre alphabétique. Ils ne sont pas
+obligatoirement requis pour tous les algorithmes. Les différentes commandes
+sont les suivantes:
+
+.. include:: snippets/AlgorithmParameters.rst
+
+.. include:: snippets/Debug.rst
+
+.. include:: snippets/Observers.rst
+
+.. include:: snippets/StudyName.rst
+
+.. include:: snippets/StudyRepertory.rst
ref_algorithm_SamplingTest
ref_algorithm_TangentTest
ref_checking_keywords
+
+.. _section_reference_task:
+
+================================================================================
+**[DocR]** Cas orientés tâches ou études dédiées
+================================================================================
+
+Cette section décrit les algorithmes de tâches facilitant une étude dédiée
+disponibles dans ADAO, détaillant leurs caractéristiques d'utilisation et leurs
+options.
+
+Ces tâches utilisent des algorithmes provenant de méthodes d'assimilation de
+données, de méthodes d'optimisation ou de méthodes avec réduction. On renvoie à
+la section :ref:`section_theory` et à celle des
+:ref:`section_reference_assimilation` pour les détails algorithmiques
+sous-jacents.
+
+.. toctree::
+ :maxdepth: 1
+
+ ref_algorithm_MeasurementsOptimalPositioningTask
+ ref_task_keywords
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+from numpy import array, eye
+from adao import adaoBuilder
+case = adaoBuilder.New('')
+case.setCheckingPoint( Vector = array([0., 1., 2.]), Stored=True )
+case.setObservation( Vector = [10., 11., 12.] )
+case.setObservationOperator( Matrix = eye(3), )
+case.setAlgorithmParameters(
+ Algorithm='AdjointTest',
+ Parameters={
+ 'EpsilonMinimumExponent' :-12,
+ 'NumberOfPrintedDigits' : 3,
+ 'SetSeed' : 1234567,
+ },
+ )
+case.execute()
--- /dev/null
+
+ ADJOINTTEST
+ ===========
+
+ This test allows to analyze the quality of an adjoint operator associated
+ to some given direct operator. If the adjoint operator is approximated and
+ not given, the test measures the quality of the automatic approximation.
+
+ Using the "ScalarProduct" formula, one observes the residue R which is the
+ difference of two scalar products:
+
+ R(Alpha) = | < TangentF_X(dX) , Y > - < dX , AdjointF_X(Y) > |
+
+ which must remain constantly equal to zero to the accuracy of the calculation.
+ One takes dX0 = Normal(0,X) and dX = Alpha*dX0, where F is the calculation
+ operator. If it is given, Y must be in the image of F. If it is not given,
+ one takes Y = F(X).
+
+ (Remark: numbers that are (about) under 2e-16 represent 0 to machine precision)
+
+ -------------------------------------------------------------
+ i Alpha ||X|| ||Y|| ||dX|| R(Alpha)
+ -------------------------------------------------------------
+ 0 1e+00 2.236e+00 1.910e+01 3.536e+00 0.000e+00
+ 1 1e-01 2.236e+00 1.910e+01 3.536e-01 0.000e+00
+ 2 1e-02 2.236e+00 1.910e+01 3.536e-02 0.000e+00
+ 3 1e-03 2.236e+00 1.910e+01 3.536e-03 0.000e+00
+ 4 1e-04 2.236e+00 1.910e+01 3.536e-04 0.000e+00
+ 5 1e-05 2.236e+00 1.910e+01 3.536e-05 0.000e+00
+ 6 1e-06 2.236e+00 1.910e+01 3.536e-06 0.000e+00
+ 7 1e-07 2.236e+00 1.910e+01 3.536e-07 0.000e+00
+ 8 1e-08 2.236e+00 1.910e+01 3.536e-08 0.000e+00
+ 9 1e-09 2.236e+00 1.910e+01 3.536e-09 0.000e+00
+ 10 1e-10 2.236e+00 1.910e+01 3.536e-10 0.000e+00
+ 11 1e-11 2.236e+00 1.910e+01 3.536e-11 0.000e+00
+ 12 1e-12 2.236e+00 1.910e+01 3.536e-12 0.000e+00
+ -------------------------------------------------------------
--- /dev/null
+.. index:: single: AdjointTest (exemple)
+
+Cet exemple décrit le test de la qualité de l'adjoint d'un opérateur
+quelconque, dont la formulation directe est donnée et dont la formulation
+adjointe est ici approximé par défaut. Les informations nécessaires sont
+minimales, à savoir ici un opérateur :math:`F` (décrit pour le test par la
+commande d'observation "*ObservationOperator*"), et un état
+:math:`\mathbf{x}^b` sur lequel le tester (décrit pour le test par la commande
+"*CheckingPoint*"). Une observation :math:`\mathbf{y}^o` peut être donnée comme
+ici (décrit pour le test par la commande "*Observation*"). On a paramétré la
+sortie pour fixer l'impression, par exemple pour faciliter la comparaison
+automatique.
+
+La vérification pratique consiste à observer si le résidu est constamment égal
+à zéro à la précision du calcul.
case = adaoBuilder.New('')
case.setBackground( Vector = array([0., 1., 2.]), Stored=True )
case.setBackgroundError( ScalarSparseMatrix = 1. )
-case.setObservation( Vector=array([10., 11., 12.]), Stored=True )
+case.setObservation( Vector = array([10., 11., 12.]), Stored=True )
case.setObservationError( ScalarSparseMatrix = 1. )
-case.setObservationOperator( Matrix=array([[1., 0., 0.],
- [0., 1., 0.],
- [0., 0., 1.]]), )
+case.setObservationOperator( Matrix = array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]]), )
case.setAlgorithmParameters(
Algorithm='Blue',
Parameters={
case = adaoBuilder.New('')
case.setBackground( Vector = array([0., 1., 2.]), Stored=True )
case.setBackgroundError( ScalarSparseMatrix = 1. )
-case.setObservation( Vector=array([10., 11., 12.]), Stored=True )
+case.setObservation( Vector = array([10., 11., 12.]), Stored=True )
case.setObservationError( ScalarSparseMatrix = 1. )
-case.setObservationOperator( Matrix=array([[1., 0., 0.],
- [0., 1., 0.],
- [0., 0., 1.]]), )
+case.setObservationOperator( Matrix = array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]]), )
case.setAlgorithmParameters(
Algorithm='ExtendedBlue',
Parameters={
Parameters={
'NumberOfRepetition' : 5,
'NumberOfPrintedDigits' : 2,
- "ShowElementarySummary":False,
+ 'ShowElementarySummary':False,
},
)
case.execute()
+
+ FUNCTIONTEST
+ ============
+
+ This test allows to analyze the (repetition of) launch of some given
+ operator. It shows simple statistics related to its successful execution,
+ or related to the similarities of repetition of its execution.
+
===> Information before launching:
-----------------------------
Characteristics of input vector X, internally converted:
Type...............: <class 'numpy.ndarray'>
- Lenght of vector...: 3
+ Length of vector...: 3
Minimum value......: 0.00e+00
Maximum value......: 2.00e+00
Mean of vector.....: 1.00e+00
Cet exemple décrit le test du bon fonctionnement d'un opérateur quelconque, et
que son appel se déroule de manière compatible avec son usage courant dans les
-algorithmes d'ADAO. Les information nécessaires sont minimales, à savoir ici un
-opérateur :math:`F` (décrit pour le test par la commande d'observation
+algorithmes d'ADAO. Les informations nécessaires sont minimales, à savoir ici
+un opérateur :math:`F` (décrit pour le test par la commande d'observation
"*ObservationOperator*"), et un état :math:`\mathbf{x}^b` sur lequel le tester
(décrit pour le test par la commande "*CheckingPoint*").
+
+ PARALLELFUNCTIONTEST
+ ====================
+
+ This test allows to analyze the (repetition of) launch of some given
+ operator. It shows simple statistics related to its successful execution,
+ or related to the similarities of repetition of its execution.
+
===> Information before launching:
-----------------------------
Characteristics of input vector X, internally converted:
Type...............: <class 'numpy.ndarray'>
- Lenght of vector...: 30
+ Length of vector...: 30
Minimum value......: 0.00e+00
Maximum value......: 2.90e+01
Mean of vector.....: 1.45e+01
AlgorithmParameters
*Dictionnaire*. La variable définit l'algorithme d'assimilation de données ou
d'optimisation choisi par le mot-clé "*Algorithm*", et ses éventuels
- paramètres optionnels. Les choix d'algorithmes sont disponibles à travers
- l'interface graphique. Il existe par exemple le "3DVAR", le "Blue"... Chaque
- algorithme est défini, plus loin, par une sous-section spécifique. De manière
- facultative, la commande permet aussi d'ajouter des paramètres pour contrôler
- l'algorithme. Leurs valeurs sont définies explicitement ou dans un objet de
- type "*Dict*". On se reportera à la
- :ref:`section_ref_options_Algorithm_Parameters` pour l'usage détaillé de
- cette partie de la commande.
+ paramètres optionnels. Les choix potentiels par ce mot-clé "*Algorithm*" sont
+ disponibles à travers l'interface graphique ou dans la documentation de
+ référence contenant une sous-section spécifique pour chacun d'eux. Il existe
+ par exemple le "3DVAR", le "Blue", etc. De manière facultative, la commande
+ permet aussi d'ajouter des paramètres pour contrôler l'algorithme choisi.
+ Leurs valeurs sont définies explicitement ou dans un objet de type "*Dict*".
+ On se reportera à la :ref:`section_ref_options_Algorithm_Parameters` pour
+ l'usage détaillé de cette partie de la commande.
CostDecrementTolerance
*Valeur réelle*. Cette clé indique une valeur limite, conduisant à arrêter le
processus itératif d'optimisation lorsque la fonction coût décroît moins que
- cette tolérance au dernier pas. Le défaut est de 1.e-7, et il est recommandé
- de l'adapter aux besoins pour des problèmes réels. On peut se reporter à la
- partie décrivant les manières de
+ cette tolérance au dernier pas. La valeur par défaut est de 1.e-7, et il est
+ recommandé de l'adapter aux besoins pour des problèmes réels. On peut se
+ reporter à la partie décrivant les manières de
:ref:`subsection_iterative_convergence_control` pour des recommandations plus
détaillées.
--- /dev/null
+.. index:: single: EnsembleOfSnapshots
+
+EnsembleOfSnapshots
+ *Liste de vecteurs ou matrice*. Cette clé contient un ensemble de vecteurs
+ d'état physique :math:`\mathbf{y}` (nommés "*snapshots*" en terminologie de
+ "Bases Réduites"), avec 1 état par colonne si c'est une matrice ou 1 état par
+ élément de la liste. Important : la numérotation des points, auxquels sont
+ fournis une valeur d'état, dans chaque vecteur est implicitement celle de
+ l'ordre naturel de numérotation du vecteur d'état, de 0 à la "taille moins 1"
+ de ce vecteur.
+
+ Exemple :
+ ``{"EnsembleOfSnapshots":[y1, y2, y3...]}``
--- /dev/null
+.. index:: single: ErrorNorm
+
+ErrorNorm
+ *Nom prédéfini*. Cette clé indique la norme utilisée pour le résidu qui
+ contrôle la recherche optimale. Le défaut est la norme "L2". Les critères
+ possibles sont dans la liste suivante : ["L2", "Linf"].
+
+ Exemple :
+ ``{"ErrorNorm":"L2"}``
--- /dev/null
+.. index:: single: ErrorNormTolerance
+
+ErrorNormTolerance
+ *Valeur réelle*. Cette clé indique la valeur à partir laquelle le résidu
+ associé à l'approximation est acceptable, ce qui conduit à arrêter la
+ recherche optimale. La valeur par défaut est de 1.e-7 (ce qui équivaut
+ usuellement à une quasi-absence de critère d'arrêt car l'approximation est
+ moins précise), et il est recommandé de l'adapter aux besoins pour des
+ problèmes réels. Une valeur habituelle, recommandée pour arrêter la recherche
+ sur critère de résidu, est de 1.e-2.
+
+ Exemple :
+ ``{"ErrorNormTolerance":1.e-7}``
--- /dev/null
+.. index:: single: ExcludeLocations
+
+ExcludeLocations
+ *Liste d'entiers*. Cette clé indique la liste des points du vecteur d'état
+ exclus de la recherche optimale. La valeur par défaut est une liste vide.
+ Important : la numérotation de ces points exclus doit être identique à celle
+ qui est adoptée implicitement dans les états fournis par la clé
+ "*EnsembleOfSnapshots*".
+
+ Exemple :
+ ``{"ExcludeLocations":[3, 125, 286]}``
:ref:`section_ref_options_Algorithm_Parameters` pour le bon usage de cette
commande.
-Les options de l'algorithme sont les suivantes :
+Les options sont les suivantes :
:ref:`section_ref_options_Algorithm_Parameters` pour le bon usage de cette
commande.
-Les options de l'algorithme sont les suivantes :
+Les options sont les suivantes :
--- /dev/null
+Les commandes optionnelles générales, disponibles en édition dans l'interface
+graphique ou textuelle, sont indiquées dans la
+:ref:`section_ref_task_keywords`. De plus, les paramètres de la commande
+"*AlgorithmParameters*" permettent d'indiquer les options particulières,
+décrites ci-après, de l'algorithme. On se reportera à la
+:ref:`section_ref_options_Algorithm_Parameters` pour le bon usage de cette
+commande.
+
+Les options sont les suivantes :
-Exemple d'utilisation en Python (TUI)
-+++++++++++++++++++++++++++++++++++++
+Exemples d'utilisation en Python (TUI)
+++++++++++++++++++++++++++++++++++++++
Voici un exemple très simple d'usage de l'algorithme proposé et de ses
paramètres, écrit en :ref:`section_tui`, et dont les informations indiquées en
--- /dev/null
+.. index:: single: HybridCostDecrementTolerance
+
+HybridCostDecrementTolerance
+ *Valeur réelle*. Cette clé indique une valeur limite, conduisant à arrêter le
+ processus itératif d'optimisation dans la partie variationnelle du couplage,
+ lorsque la fonction coût décroît moins que cette tolérance au dernier pas. Le
+ défaut est de 1.e-7, et il est recommandé de l'adapter aux besoins pour des
+ problèmes réels. On peut se reporter à la partie décrivant les manières de
+ :ref:`subsection_iterative_convergence_control` pour des recommandations plus
+ détaillées.
+
+ Exemple :
+ ``{"HybridCostDecrementTolerance":1.e-7}``
--- /dev/null
+.. index:: single: HybridCovarianceEquilibrium
+
+HybridCovarianceEquilibrium
+ *Valeur réelle*. Cette clé indique, en optimisation hybride variationnelle,
+ le facteur d'équilibre entre la covariance statique *a priori* et la
+ covariance d'ensemble. Ce facteur est compris entre 0 et 1, et sa valeur par
+ défaut est 0.5.
+
+ Exemple :
+ ``{"HybridCovarianceEquilibrium":0.5}``
--- /dev/null
+.. index:: single: HybridMaximumNumberOfIterations
+
+HybridMaximumNumberOfIterations
+ *Valeur entière*. Cette clé indique le nombre maximum d'itérations internes
+ possibles en optimisation hybride, pour la partie variationnelle. Le défaut
+ est 15000, qui est très similaire à une absence de limite sur les itérations.
+ Il est ainsi recommandé d'adapter ce paramètre aux besoins pour des problèmes
+ réels. Pour certains optimiseurs, le nombre de pas effectif d'arrêt peut être
+ légèrement différent de la limite à cause d'exigences de contrôle interne de
+ l'algorithme. On peut se reporter à la partie décrivant les manières de
+ :ref:`subsection_iterative_convergence_control` pour des recommandations plus
+ détaillées.
+
+ Exemple :
+ ``{"HybridMaximumNumberOfIterations":100}``
--- /dev/null
+.. index:: single: MaximumNumberOfLocations
+
+MaximumNumberOfLocations
+ *Valeur entière*. Cette clé indique le nombre maximum possible de positions
+ trouvée dans la recherche optimale. La valeur par défaut est 1. La recherche
+ optimale peut éventuellement trouver moins de positions que ce qui est requis
+ par cette clé, comme par exemple dans le cas où le résidu associé à
+ l'approximation est inférieur au critère et conduit à l'arrêt anticipé de la
+ recherche optimale.
+
+ Exemple :
+ ``{"MaximumNumberOfLocations":5}``
--- /dev/null
+.. index:: single: OptimalPoints
+
+OptimalPoints
+ *Liste de série d'entiers*. Chaque élément est une série, contenant les
+ points idéaux déterminés par la recherche optimale, rangés par ordre de
+ préférence décroissante et dans le même ordre que les vecteurs de base
+ réduite trouvés itérativement.
+
+ Exemple :
+ ``mp = ADD.get("OptimalPoints")[-1]``
--- /dev/null
+.. index:: single: ReducedBasis
+
+ReducedBasis
+ *Liste de matrices*. Chaque élément est une matrice, contenant dans chaque
+ colonne un vecteur de la base réduite obtenue par la recherche optimale,
+ rangés par ordre de préférence décroissante et dans le même ordre que les
+ points idéaux trouvés itérativement.
+
+ Exemple :
+ ``rb = ADD.get("ReducedBasis")[-1]``
Residu
*Liste de valeurs*. Chaque élément est la valeur du résidu particulier
- vérifié lors d'un algorithme de vérification, selon l'ordre des tests
+ vérifié lors du déroulement de l'algorithme, selon l'ordre des tests
effectués.
Exemple :
--- /dev/null
+.. index:: single: Residus
+
+Residus
+ *Liste de série de valeurs réelles*. Chaque élément est une série, contenant
+ les valeurs du résidu particulier vérifié lors du déroulement de
+ l'algorithme.
+
+ Exemple :
+ ``rs = ADD.get("Residus")[:]``
pair: Variant ; ETKF-N
pair: Variant ; MLEF
pair: Variant ; IEnKF
+ pair: Variant ; E3DVAR
pair: Variant ; EnKS
Variant
*Nom prédéfini*. Cette clé permet de choisir l'une des variantes possibles
- pour l'algorithme principal. La variante par défaut est l'"EnKF" d'origine,
- et les choix possibles sont
+ pour l'algorithme principal. La variante par défaut est la formulation "EnKF"
+ d'origine, et les choix possibles sont
"EnKF" (Ensemble Kalman Filter),
"ETKF" (Ensemble-Transform Kalman Filter),
"ETKF-N" (Ensemble-Transform Kalman Filter),
"MLEF" (Maximum Likelihood Kalman Filter),
"IEnKF" (Iterative_EnKF),
+ "E3DVAR" (EnKF 3DVAR),
"EnKS" (Ensemble Kalman Smoother).
- Il est conseillé d'essayer les variantes "ETKF-N" ou "IEnKF", et de réduire
- le nombre de membres à une dizaine ou moins pour toutes les variantes autres
- que l'"EnKF" original.
+
+ Il est conseillé d'essayer les variantes "ETKF-N" ou "IEnKF" pour une
+ performance robuste, et de réduire le nombre de membres à une dizaine ou
+ moins pour toutes les variantes autres que la formulation "EnKF" originale.
Exemple :
``{"Variant":"EnKF"}``
:math:`\mathbf{x}^b`, en proportion inverse des variances dans
:math:`\mathbf{B}` et :math:`\mathbf{R}`. Comme autre extension, on peut aussi
dire qu'il est équivalent de rechercher l'analyse à l'aide d'un algorithme de
-BLUE ou d'un algorithme de 3DVAR.
+"Blue" ou d'un algorithme de "3DVAR".
Utiliser l'interface textuelle (TUI) pour construire le cas ADAO
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>>>
Pour étendre cet exemple, on peut remarquer que le même problème résolu par un
-algorithme de 3DVAR donne le même résultat. Cet algorithme peut être choisi
+algorithme de "3DVAR" donne le même résultat. Cet algorithme peut être choisi
lors de l'étape de construction du cas ADAO en changeant simplement l'argument
-"*Algorithm*" en entête. Le reste du cas ADAO en 3DVAR est alors entièrement
-similaire au cas algorithmique du BLUE.
+"*Algorithm*" en entête. Le reste du cas ADAO en "3DVAR" est alors entièrement
+similaire au cas algorithmique du "Blue".
.. _section_tutorials_in_python_script:
précédent :ref:`section_tutorials_in_python_explicit`.
Dans la pratique, cette démarche par scripts est la manière la plus facile pour
-récupérer des informations depuis des calculs en ligne ou préalables, depuis des
-fichiers statiques, depuis des bases de données ou des flux informatiques,
+récupérer des informations depuis des calculs en ligne ou préalables, depuis
+des fichiers statiques, depuis des bases de données ou des flux informatiques,
chacun pouvant être dans ou hors SALOME. Cela permet aussi de modifier aisément
des données d'entrée, par exemple à des fin de débogage ou pour des traitements
répétitifs, et c'est la méthode la plus polyvalente pour paramétrer les données
d'entrée. **Mais attention, la méthodologie par scripts n'est pas une procédure
"sûre", en ce sens que des données erronées ou des erreurs dans les calculs,
peuvent être directement introduites dans l'exécution du cas ADAO.
-L'utilisateur doit vérifier avec attention le contenu de ses scripts.**
+L'utilisateur doit vérifier avec soin le contenu de ses scripts.**
message = "État initial imposé (par défaut, c'est l'ébauche si None)",
)
self.requireInputArguments(
- mandatory= ("Xb", "Y", "HO", "R", "B" ),
+ mandatory= ("Xb", "Y", "HO", "R", "B"),
optional = ("U", "EM", "CM", "Q"),
)
self.setAttributes(tags=(
message = "État initial imposé (par défaut, c'est l'ébauche si None)",
)
self.requireInputArguments(
- mandatory= ("Xb", "Y", "HO", "EM", "R", "B" ),
+ mandatory= ("Xb", "Y", "HO", "EM", "R", "B"),
optional = ("U", "CM", "Q"),
)
self.setAttributes(tags=(
message = "Formule de résidu utilisée",
listval = ["ScalarProduct"],
)
+ self.defineRequiredParameter(
+ name = "AmplitudeOfInitialDirection",
+ default = 1.,
+ typecast = float,
+ message = "Amplitude de la direction initiale de la dérivée directionnelle autour du point nominal",
+ )
self.defineRequiredParameter(
name = "EpsilonMinimumExponent",
default = -8,
message = "Direction initiale de la dérivée directionnelle autour du point nominal",
)
self.defineRequiredParameter(
- name = "AmplitudeOfInitialDirection",
- default = 1.,
- typecast = float,
- message = "Amplitude de la direction initiale de la dérivée directionnelle autour du point nominal",
- )
- self.defineRequiredParameter(
- name = "SetSeed",
- typecast = numpy.random.seed,
- message = "Graine fixée pour le générateur aléatoire",
+ name = "NumberOfPrintedDigits",
+ default = 5,
+ typecast = int,
+ message = "Nombre de chiffres affichés pour les impressions de réels",
+ minval = 0,
)
self.defineRequiredParameter(
name = "ResultTitle",
typecast = str,
message = "Titre du tableau et de la figure",
)
+ self.defineRequiredParameter(
+ name = "SetSeed",
+ typecast = numpy.random.seed,
+ message = "Graine fixée pour le générateur aléatoire",
+ )
self.defineRequiredParameter(
name = "StoreSupplementaryCalculations",
default = [],
Xn,
)
#
- # Entete des resultats
# --------------------
- __marge = 12*u" "
- __precision = u"""
- Remarque : les nombres inferieurs a %.0e (environ) representent un zero
- a la precision machine.\n"""%mpr
- if self._parameters["ResiduFormula"] == "ScalarProduct":
- __entete = u" i Alpha ||X|| ||Y|| ||dX|| R(Alpha)"
- __msgdoc = u"""
- On observe le residu qui est la difference de deux produits scalaires :
-
- R(Alpha) = | < TangentF_X(dX) , Y > - < dX , AdjointF_X(Y) > |
-
- qui doit rester constamment egal a zero a la precision du calcul.
- On prend dX0 = Normal(0,X) et dX = Alpha*dX0. F est le code de calcul.
- Y doit etre dans l'image de F. S'il n'est pas donne, on prend Y = F(X).\n""" + __precision
+ __p = self._parameters["NumberOfPrintedDigits"]
#
+ __marge = 5*u" "
if len(self._parameters["ResultTitle"]) > 0:
__rt = str(self._parameters["ResultTitle"])
- msgs = u"\n"
- msgs += __marge + "====" + "="*len(__rt) + "====\n"
- msgs += __marge + " " + __rt + "\n"
- msgs += __marge + "====" + "="*len(__rt) + "====\n"
+ msgs = ("\n")
+ msgs += (__marge + "====" + "="*len(__rt) + "====\n")
+ msgs += (__marge + " " + __rt + "\n")
+ msgs += (__marge + "====" + "="*len(__rt) + "====\n")
else:
- msgs = u""
- msgs += __msgdoc
+ msgs = ("\n")
+ msgs += (" %s\n"%self._name)
+ msgs += (" %s\n"%("="*len(self._name),))
#
+ msgs += ("\n")
+ msgs += (" This test allows to analyze the quality of an adjoint operator associated\n")
+ msgs += (" to some given direct operator. If the adjoint operator is approximated and\n")
+ msgs += (" not given, the test measures the quality of the automatic approximation.\n")
+ #
+ if self._parameters["ResiduFormula"] == "ScalarProduct":
+ msgs += ("\n")
+ msgs += (" Using the \"%s\" formula, one observes the residue R which is the\n"%self._parameters["ResiduFormula"])
+ msgs += (" difference of two scalar products:\n")
+ msgs += ("\n")
+ msgs += (" R(Alpha) = | < TangentF_X(dX) , Y > - < dX , AdjointF_X(Y) > |\n")
+ msgs += ("\n")
+ msgs += (" which must remain constantly equal to zero to the accuracy of the calculation.\n")
+ msgs += (" One takes dX0 = Normal(0,X) and dX = Alpha*dX0, where F is the calculation\n")
+ msgs += (" operator. If it is given, Y must be in the image of F. If it is not given,\n")
+ msgs += (" one takes Y = F(X).\n")
+ msgs += ("\n")
+ msgs += (" (Remark: numbers that are (about) under %.0e represent 0 to machine precision)"%mpr)
+ print(msgs)
+ #
+ # --------------------
+ __pf = " %"+str(__p+7)+"."+str(__p)+"e"
+ __ms = " %2i %5.0e"+(__pf*4)
+ __bl = " %"+str(__p+7)+"s "
+ __entete = str.rstrip(" i Alpha " + \
+ str.center("||X||",2+__p+7) + \
+ str.center("||Y||",2+__p+7) + \
+ str.center("||dX||",2+__p+7) + \
+ str.center("R(Alpha)",2+__p+7))
__nbtirets = len(__entete) + 2
+ #
+ msgs = ""
msgs += "\n" + __marge + "-"*__nbtirets
msgs += "\n" + __marge + __entete
msgs += "\n" + __marge + "-"*__nbtirets
#
- # ----------
for i,amplitude in enumerate(Perturbations):
dX = amplitude * dX0
NormedX = numpy.linalg.norm( dX )
#
Residu = abs(float(numpy.dot( TangentFXdX, Yn ) - numpy.dot( dX, AdjointFXY )))
#
- msg = " %2i %5.0e %9.3e %9.3e %9.3e | %9.3e"%(i,amplitude,NormeX,NormeY,NormedX,Residu)
+ msg = __ms%(i,amplitude,NormeX,NormeY,NormedX,Residu)
msgs += "\n" + __marge + msg
#
self.StoredVariables["Residu"].store( Residu )
#
msgs += "\n" + __marge + "-"*__nbtirets
- msgs += "\n"
- #
- # Sorties eventuelles
- # -------------------
- print("\nResults of adjoint check by \"%s\" formula:"%self._parameters["ResiduFormula"])
print(msgs)
#
self._post_run(HO)
--- /dev/null
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+ EIM
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import numpy
+
+# ==============================================================================
+def EIM_offline(selfA, Verbose = False):
+ """
+ Établissement de base par Empirical Interpolation Method (EIM)
+ """
+ #
+ # Initialisations
+ # ---------------
+ if isinstance(selfA._parameters["EnsembleOfSnapshots"], (numpy.ndarray,numpy.matrix)):
+ __EOS = numpy.asarray(selfA._parameters["EnsembleOfSnapshots"])
+ elif isinstance(selfA._parameters["EnsembleOfSnapshots"], (list,tuple)):
+ __EOS = numpy.asarray(selfA._parameters["EnsembleOfSnapshots"]).T
+ else:
+ raise ValueError("EOS has to be an array/matrix (each column is a snapshot vector) or a list/tuple (each element is a snapshot vector).")
+ #
+ if selfA._parameters["ErrorNorm"] == "L2":
+ MaxNormByColumn = MaxL2NormByColumn
+ else:
+ MaxNormByColumn = MaxLinfNormByColumn
+ #
+ if "ExcludeLocations" in selfA._parameters:
+ __ExcludedMagicPoints = selfA._parameters["ExcludeLocations"]
+ else:
+ __ExcludedMagicPoints = []
+ if len(__ExcludedMagicPoints) > 0:
+ __ExcludedMagicPoints = numpy.ravel(numpy.asarray(__ExcludedMagicPoints, dtype=int))
+ __IncludedMagicPoints = numpy.setdiff1d(
+ numpy.arange(__EOS.shape[0]),
+ __ExcludedMagicPoints,
+ assume_unique = True,
+ )
+ else:
+ __IncludedMagicPoints = []
+ #
+ __dimS, __nbmS = __EOS.shape
+ if "MaximumNumberOfLocations" in selfA._parameters and "MaximumRBSize" in selfA._parameters:
+ selfA._parameters["MaximumRBSize"] = min(selfA._parameters["MaximumNumberOfLocations"],selfA._parameters["MaximumRBSize"])
+ elif "MaximumNumberOfLocations" in selfA._parameters:
+ selfA._parameters["MaximumRBSize"] = selfA._parameters["MaximumNumberOfLocations"]
+ elif "MaximumRBSize" in selfA._parameters:
+ pass
+ else:
+ selfA._parameters["MaximumRBSize"] = __nbmS
+ __maxM = min(selfA._parameters["MaximumRBSize"], __dimS, __nbmS)
+ if "ErrorNormTolerance" in selfA._parameters:
+ selfA._parameters["EpsilonEIM"] = selfA._parameters["ErrorNormTolerance"]
+ else:
+ selfA._parameters["EpsilonEIM"] = 1.e-2
+ #
+ __mu = []
+ __I = []
+ __Q = numpy.empty(__dimS)
+ __errors = []
+ #
+ __M = 0
+ __iM = -1
+ __rhoM = numpy.empty(__dimS)
+ #
+ __eM, __muM = MaxNormByColumn(__EOS, __IncludedMagicPoints)
+ __residuM = __EOS[:,__muM]
+ __errors.append(__eM)
+ #
+ # Boucle
+ # ------
+ while __M < __maxM and __eM > selfA._parameters["EpsilonEIM"]:
+ __M = __M + 1
+ #
+ __mu.append(__muM)
+ #
+ # Détermination du point et de la fonction magiques
+ __abs_residuM = numpy.abs(__residuM)
+ __iM = numpy.argmax(__abs_residuM)
+ __rhoM = __residuM / __abs_residuM[__iM]
+ #
+ if __iM in __ExcludedMagicPoints:
+ __sIndices = numpy.argsort(__abs_residuM)
+ __rang = -1
+ assert __iM == __sIndices[__rang]
+ while __iM in __ExcludedMagicPoints and __rang >= -len(__abs_residuM):
+ __rang = __rang - 1
+ __iM = __sIndices[__rang]
+ #
+ if __M > 1:
+ __Q = numpy.column_stack((__Q, __rhoM))
+ else:
+ __Q = __rhoM
+ __I.append(__iM)
+ #
+ __restrictedQi = __Q[__I]
+ if __M > 1:
+ __Qi_inv = numpy.linalg.inv(__restrictedQi)
+ else:
+ __Qi_inv = 1. / __restrictedQi
+ #
+ __restrictedEOSi = __EOS[__I]
+ #
+ __interpolator = numpy.empty(__EOS.shape)
+ if __M > 1:
+ __interpolator = numpy.dot(__Q,numpy.dot(__Qi_inv,__restrictedEOSi))
+ else:
+ __interpolator = numpy.outer(__Q,numpy.outer(__Qi_inv,__restrictedEOSi))
+ #
+ __dataForNextIter = __EOS - __interpolator
+ __eM, __muM = MaxNormByColumn(__dataForNextIter, __IncludedMagicPoints)
+ __errors.append(__eM)
+ #
+ __residuM = __dataForNextIter[:,__muM]
+ #
+ #--------------------------
+ if hasattr(selfA, "StoredVariables"):
+ selfA.StoredVariables["OptimalPoints"].store( __I )
+ if selfA._toStore("ReducedBasis"):
+ selfA.StoredVariables["ReducedBasis"].store( __Q )
+ if selfA._toStore("Residus"):
+ selfA.StoredVariables["Residus"].store( __errors )
+ #
+ return __mu, __I, __Q, __errors
+
+# ==============================================================================
+def EIM_online(selfA, QEIM, mu, iEIM):
+ raise NotImplementedError()
+
+# ==============================================================================
+def MaxL2NormByColumn(Ensemble, IncludedPoints=[]):
+ nmax, imax = -1, -1
+ if len(IncludedPoints) > 0:
+ for indice in range(Ensemble.shape[1]):
+ norme = numpy.linalg.norm(
+ numpy.take(Ensemble[:,indice], IncludedPoints, mode='clip'),
+ )
+ if norme > nmax:
+ nmax, imax, = norme, indice
+ else:
+ for indice in range(Ensemble.shape[1]):
+ norme = numpy.linalg.norm(
+ Ensemble[:,indice],
+ )
+ if norme > nmax:
+ nmax, imax, = norme, indice
+ return nmax, imax
+
+def MaxLinfNormByColumn(Ensemble, IncludedPoints=[]):
+ nmax, imax = -1, -1
+ if len(IncludedPoints) > 0:
+ for indice in range(Ensemble.shape[1]):
+ norme = numpy.linalg.norm(
+ numpy.take(Ensemble[:,indice], IncludedPoints, mode='clip'),
+ ord=numpy.inf,
+ )
+ if norme > nmax:
+ nmax, imax, = norme, indice
+ else:
+ for indice in range(Ensemble.shape[1]):
+ norme = numpy.linalg.norm(
+ Ensemble[:,indice],
+ ord=numpy.inf,
+ )
+ if norme > nmax:
+ nmax, imax, = norme, indice
+ return nmax, imax
+
+# ==============================================================================
+if __name__ == "__main__":
+ print('\n AUTODIAGNOSTIC\n')
"NonLinear",
"Filter",
"Ensemble",
+ "Reduction",
))
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
"ETKF-N",
"MLEF",
"IEnKF",
+ "E3DVAR",
"EnKS",
],
listadv = [
"IEnKF-B",
"EnKS-KFF",
"IEKF",
- "E3DVAR",
"E3DVAR-EnKF",
"E3DVAR-ETKF",
"E3DVAR-MLEF",
"Filter",
"Ensemble",
"Dynamic",
+ "Reduction",
))
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
#
# ----------
__s = self._parameters["ShowElementarySummary"]
+ __p = self._parameters["NumberOfPrintedDigits"]
+ #
__marge = 5*u" "
- _p = self._parameters["NumberOfPrintedDigits"]
if len(self._parameters["ResultTitle"]) > 0:
__rt = str(self._parameters["ResultTitle"])
- msgs = u"\n"
- msgs += __marge + "====" + "="*len(__rt) + "====\n"
- msgs += __marge + " " + __rt + "\n"
- msgs += __marge + "====" + "="*len(__rt) + "====\n"
- print("%s"%msgs)
+ msgs = ("\n")
+ msgs += (__marge + "====" + "="*len(__rt) + "====\n")
+ msgs += (__marge + " " + __rt + "\n")
+ msgs += (__marge + "====" + "="*len(__rt) + "====\n")
+ else:
+ msgs = ("\n")
+ msgs += (" %s\n"%self._name)
+ msgs += (" %s\n"%("="*len(self._name),))
#
- msgs = ("===> Information before launching:\n")
+ msgs += ("\n")
+ msgs += (" This test allows to analyze the (repetition of) launch of some given\n")
+ msgs += (" operator. It shows simple statistics related to its successful execution,\n")
+ msgs += (" or related to the similarities of repetition of its execution.\n")
+ msgs += ("\n")
+ msgs += ("===> Information before launching:\n")
msgs += (" -----------------------------\n")
msgs += (" Characteristics of input vector X, internally converted:\n")
msgs += (" Type...............: %s\n")%type( Xn )
- msgs += (" Lenght of vector...: %i\n")%max(numpy.ravel( Xn ).shape)
- msgs += (" Minimum value......: %."+str(_p)+"e\n")%numpy.min( Xn )
- msgs += (" Maximum value......: %."+str(_p)+"e\n")%numpy.max( Xn )
- msgs += (" Mean of vector.....: %."+str(_p)+"e\n")%numpy.mean( Xn, dtype=mfp )
- msgs += (" Standard error.....: %."+str(_p)+"e\n")%numpy.std( Xn, dtype=mfp )
- msgs += (" L2 norm of vector..: %."+str(_p)+"e\n")%numpy.linalg.norm( Xn )
+ msgs += (" Length of vector...: %i\n")%max(numpy.ravel( Xn ).shape)
+ msgs += (" Minimum value......: %."+str(__p)+"e\n")%numpy.min( Xn )
+ msgs += (" Maximum value......: %."+str(__p)+"e\n")%numpy.max( Xn )
+ msgs += (" Mean of vector.....: %."+str(__p)+"e\n")%numpy.mean( Xn, dtype=mfp )
+ msgs += (" Standard error.....: %."+str(__p)+"e\n")%numpy.std( Xn, dtype=mfp )
+ msgs += (" L2 norm of vector..: %."+str(__p)+"e\n")%numpy.linalg.norm( Xn )
print(msgs)
#
print(" %s\n"%("-"*75,))
msgs = ("===> Information after evaluation:\n")
msgs += ("\n Characteristics of simulated output vector Y=H(X), to compare to others:\n")
msgs += (" Type...............: %s\n")%type( Yn )
- msgs += (" Lenght of vector...: %i\n")%max(numpy.ravel( Yn ).shape)
- msgs += (" Minimum value......: %."+str(_p)+"e\n")%numpy.min( Yn )
- msgs += (" Maximum value......: %."+str(_p)+"e\n")%numpy.max( Yn )
- msgs += (" Mean of vector.....: %."+str(_p)+"e\n")%numpy.mean( Yn, dtype=mfp )
- msgs += (" Standard error.....: %."+str(_p)+"e\n")%numpy.std( Yn, dtype=mfp )
- msgs += (" L2 norm of vector..: %."+str(_p)+"e\n")%numpy.linalg.norm( Yn )
+ msgs += (" Length of vector...: %i\n")%max(numpy.ravel( Yn ).shape)
+ msgs += (" Minimum value......: %."+str(__p)+"e\n")%numpy.min( Yn )
+ msgs += (" Maximum value......: %."+str(__p)+"e\n")%numpy.max( Yn )
+ msgs += (" Mean of vector.....: %."+str(__p)+"e\n")%numpy.mean( Yn, dtype=mfp )
+ msgs += (" Standard error.....: %."+str(__p)+"e\n")%numpy.std( Yn, dtype=mfp )
+ msgs += (" L2 norm of vector..: %."+str(__p)+"e\n")%numpy.linalg.norm( Yn )
print(msgs)
if self._toStore("SimulatedObservationAtCurrentState"):
self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(Yn) )
Yy = numpy.array( Ys )
msgs += ("\n Characteristics of the whole set of outputs Y:\n")
msgs += (" Number of evaluations.........................: %i\n")%len( Ys )
- msgs += (" Minimum value of the whole set of outputs.....: %."+str(_p)+"e\n")%numpy.min( Yy )
- msgs += (" Maximum value of the whole set of outputs.....: %."+str(_p)+"e\n")%numpy.max( Yy )
- msgs += (" Mean of vector of the whole set of outputs....: %."+str(_p)+"e\n")%numpy.mean( Yy, dtype=mfp )
- msgs += (" Standard error of the whole set of outputs....: %."+str(_p)+"e\n")%numpy.std( Yy, dtype=mfp )
+ msgs += (" Minimum value of the whole set of outputs.....: %."+str(__p)+"e\n")%numpy.min( Yy )
+ msgs += (" Maximum value of the whole set of outputs.....: %."+str(__p)+"e\n")%numpy.max( Yy )
+ msgs += (" Mean of vector of the whole set of outputs....: %."+str(__p)+"e\n")%numpy.mean( Yy, dtype=mfp )
+ msgs += (" Standard error of the whole set of outputs....: %."+str(__p)+"e\n")%numpy.std( Yy, dtype=mfp )
Ym = numpy.mean( numpy.array( Ys ), axis=0, dtype=mfp )
msgs += ("\n Characteristics of the vector Ym, mean of the outputs Y:\n")
msgs += (" Size of the mean of the outputs...............: %i\n")%Ym.size
- msgs += (" Minimum value of the mean of the outputs......: %."+str(_p)+"e\n")%numpy.min( Ym )
- msgs += (" Maximum value of the mean of the outputs......: %."+str(_p)+"e\n")%numpy.max( Ym )
- msgs += (" Mean of the mean of the outputs...............: %."+str(_p)+"e\n")%numpy.mean( Ym, dtype=mfp )
- msgs += (" Standard error of the mean of the outputs.....: %."+str(_p)+"e\n")%numpy.std( Ym, dtype=mfp )
+ msgs += (" Minimum value of the mean of the outputs......: %."+str(__p)+"e\n")%numpy.min( Ym )
+ msgs += (" Maximum value of the mean of the outputs......: %."+str(__p)+"e\n")%numpy.max( Ym )
+ msgs += (" Mean of the mean of the outputs...............: %."+str(__p)+"e\n")%numpy.mean( Ym, dtype=mfp )
+ msgs += (" Standard error of the mean of the outputs.....: %."+str(__p)+"e\n")%numpy.std( Ym, dtype=mfp )
Ye = numpy.mean( numpy.array( Ys ) - Ym, axis=0, dtype=mfp )
msgs += "\n Characteristics of the mean of the differences between the outputs Y and their mean Ym:\n"
msgs += (" Size of the mean of the differences...........: %i\n")%Ym.size
- msgs += (" Minimum value of the mean of the differences..: %."+str(_p)+"e\n")%numpy.min( Ye )
- msgs += (" Maximum value of the mean of the differences..: %."+str(_p)+"e\n")%numpy.max( Ye )
- msgs += (" Mean of the mean of the differences...........: %."+str(_p)+"e\n")%numpy.mean( Ye, dtype=mfp )
- msgs += (" Standard error of the mean of the differences.: %."+str(_p)+"e\n")%numpy.std( Ye, dtype=mfp )
+ msgs += (" Minimum value of the mean of the differences..: %."+str(__p)+"e\n")%numpy.min( Ye )
+ msgs += (" Maximum value of the mean of the differences..: %."+str(__p)+"e\n")%numpy.max( Ye )
+ msgs += (" Mean of the mean of the differences...........: %."+str(__p)+"e\n")%numpy.mean( Ye, dtype=mfp )
+ msgs += (" Standard error of the mean of the differences.: %."+str(__p)+"e\n")%numpy.std( Ye, dtype=mfp )
msgs += ("\n %s\n"%("-"*75,))
print(msgs)
#
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+import numpy
+from daCore import BasicObjects
+from daAlgorithms.Atoms import ecweim
+
+# ==============================================================================
+class ElementaryAlgorithm(BasicObjects.Algorithm):
+ def __init__(self):
+ BasicObjects.Algorithm.__init__(self, "MEASUREMENTSOPTIMALPOSITIONING")
+ self.defineRequiredParameter(
+ name = "Variant",
+ default = "Positioning",
+ typecast = str,
+ message = "Variant ou formulation de la méthode",
+ listval = [
+ "Positioning",
+ # "PositioningByEIM",
+ ],
+ )
+ self.defineRequiredParameter(
+ name = "EnsembleOfSnapshots",
+ default = [],
+ typecast = numpy.array,
+ message = "Ensemble de vecteurs d'état physique (snapshots), 1 état par colonne",
+ )
+ self.defineRequiredParameter(
+ name = "MaximumNumberOfLocations",
+ default = 1,
+ typecast = int,
+ message = "Nombre maximal de positions",
+ minval = 0,
+ )
+ self.defineRequiredParameter(
+ name = "ExcludeLocations",
+ default = [],
+ typecast = tuple,
+ message = "Liste des positions exclues selon la numérotation interne d'un snapshot",
+ minval = -1,
+ )
+ self.defineRequiredParameter(
+ name = "ErrorNorm",
+ default = "L2",
+ typecast = str,
+ message = "Norme d'erreur utilisée pour le critère d'optimalité des positions",
+ listval = ["L2", "Linf"]
+ )
+ self.defineRequiredParameter(
+ name = "ErrorNormTolerance",
+ default = 1.e-7,
+ typecast = float,
+ message = "Valeur limite inférieure du critère d'optimalité forçant l'arrêt",
+ minval = 0.,
+ )
+ self.defineRequiredParameter(
+ name = "StoreSupplementaryCalculations",
+ default = [],
+ typecast = tuple,
+ message = "Liste de calculs supplémentaires à stocker et/ou effectuer",
+ listval = [
+ "OptimalPoints",
+ "ReducedBasis",
+ "Residus",
+ ]
+ )
+ self.requireInputArguments(
+ mandatory= (),
+ optional = ("Xb", "HO"),
+ )
+ self.setAttributes(tags=(
+ "Reduction",
+ ))
+
+ def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
+ self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
+ #
+ #--------------------------
+ if self._parameters["Variant"] in ["Positioning", "PositioningByEIM"]:
+ if len(self._parameters["EnsembleOfSnapshots"]) > 0:
+ ecweim.EIM_offline(self)
+ #
+ #--------------------------
+ else:
+ raise ValueError("Error in Variant name: %s"%self._parameters["Variant"])
+ #
+ self._post_run(HO)
+ return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+ print('\n AUTODIAGNOSTIC\n')
#
# ----------
__s = self._parameters["ShowElementarySummary"]
+ __p = self._parameters["NumberOfPrintedDigits"]
+ #
__marge = 5*u" "
- _p = self._parameters["NumberOfPrintedDigits"]
if len(self._parameters["ResultTitle"]) > 0:
__rt = str(self._parameters["ResultTitle"])
- msgs = u"\n"
- msgs += __marge + "====" + "="*len(__rt) + "====\n"
- msgs += __marge + " " + __rt + "\n"
- msgs += __marge + "====" + "="*len(__rt) + "====\n"
- print("%s"%msgs)
+ msgs = ("\n")
+ msgs += (__marge + "====" + "="*len(__rt) + "====\n")
+ msgs += (__marge + " " + __rt + "\n")
+ msgs += (__marge + "====" + "="*len(__rt) + "====\n")
+ else:
+ msgs = ("\n")
+ msgs += (" %s\n"%self._name)
+ msgs += (" %s\n"%("="*len(self._name),))
#
- msgs = ("===> Information before launching:\n")
+ msgs += ("\n")
+ msgs += (" This test allows to analyze the (repetition of) launch of some given\n")
+ msgs += (" operator. It shows simple statistics related to its successful execution,\n")
+ msgs += (" or related to the similarities of repetition of its execution.\n")
+ msgs += ("\n")
+ msgs += ("===> Information before launching:\n")
msgs += (" -----------------------------\n")
msgs += (" Characteristics of input vector X, internally converted:\n")
msgs += (" Type...............: %s\n")%type( Xn )
- msgs += (" Lenght of vector...: %i\n")%max(numpy.ravel( Xn ).shape)
- msgs += (" Minimum value......: %."+str(_p)+"e\n")%numpy.min( Xn )
- msgs += (" Maximum value......: %."+str(_p)+"e\n")%numpy.max( Xn )
- msgs += (" Mean of vector.....: %."+str(_p)+"e\n")%numpy.mean( Xn, dtype=mfp )
- msgs += (" Standard error.....: %."+str(_p)+"e\n")%numpy.std( Xn, dtype=mfp )
- msgs += (" L2 norm of vector..: %."+str(_p)+"e\n")%numpy.linalg.norm( Xn )
+ msgs += (" Length of vector...: %i\n")%max(numpy.ravel( Xn ).shape)
+ msgs += (" Minimum value......: %."+str(__p)+"e\n")%numpy.min( Xn )
+ msgs += (" Maximum value......: %."+str(__p)+"e\n")%numpy.max( Xn )
+ msgs += (" Mean of vector.....: %."+str(__p)+"e\n")%numpy.mean( Xn, dtype=mfp )
+ msgs += (" Standard error.....: %."+str(__p)+"e\n")%numpy.std( Xn, dtype=mfp )
+ msgs += (" L2 norm of vector..: %."+str(__p)+"e\n")%numpy.linalg.norm( Xn )
print(msgs)
#
print(" %s\n"%("-"*75,))
msgs = ("===> Information after evaluation:\n")
msgs += ("\n Characteristics of simulated output vector Y=H(X), to compare to others:\n")
msgs += (" Type...............: %s\n")%type( Yn )
- msgs += (" Lenght of vector...: %i\n")%max(numpy.ravel( Yn ).shape)
- msgs += (" Minimum value......: %."+str(_p)+"e\n")%numpy.min( Yn )
- msgs += (" Maximum value......: %."+str(_p)+"e\n")%numpy.max( Yn )
- msgs += (" Mean of vector.....: %."+str(_p)+"e\n")%numpy.mean( Yn, dtype=mfp )
- msgs += (" Standard error.....: %."+str(_p)+"e\n")%numpy.std( Yn, dtype=mfp )
- msgs += (" L2 norm of vector..: %."+str(_p)+"e\n")%numpy.linalg.norm( Yn )
+ msgs += (" Length of vector...: %i\n")%max(numpy.ravel( Yn ).shape)
+ msgs += (" Minimum value......: %."+str(__p)+"e\n")%numpy.min( Yn )
+ msgs += (" Maximum value......: %."+str(__p)+"e\n")%numpy.max( Yn )
+ msgs += (" Mean of vector.....: %."+str(__p)+"e\n")%numpy.mean( Yn, dtype=mfp )
+ msgs += (" Standard error.....: %."+str(__p)+"e\n")%numpy.std( Yn, dtype=mfp )
+ msgs += (" L2 norm of vector..: %."+str(__p)+"e\n")%numpy.linalg.norm( Yn )
print(msgs)
if self._toStore("SimulatedObservationAtCurrentState"):
self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(Yn) )
Yy = numpy.array( Ys )
msgs += ("\n Characteristics of the whole set of outputs Y:\n")
msgs += (" Number of evaluations.........................: %i\n")%len( Ys )
- msgs += (" Minimum value of the whole set of outputs.....: %."+str(_p)+"e\n")%numpy.min( Yy )
- msgs += (" Maximum value of the whole set of outputs.....: %."+str(_p)+"e\n")%numpy.max( Yy )
- msgs += (" Mean of vector of the whole set of outputs....: %."+str(_p)+"e\n")%numpy.mean( Yy, dtype=mfp )
- msgs += (" Standard error of the whole set of outputs....: %."+str(_p)+"e\n")%numpy.std( Yy, dtype=mfp )
+ msgs += (" Minimum value of the whole set of outputs.....: %."+str(__p)+"e\n")%numpy.min( Yy )
+ msgs += (" Maximum value of the whole set of outputs.....: %."+str(__p)+"e\n")%numpy.max( Yy )
+ msgs += (" Mean of vector of the whole set of outputs....: %."+str(__p)+"e\n")%numpy.mean( Yy, dtype=mfp )
+ msgs += (" Standard error of the whole set of outputs....: %."+str(__p)+"e\n")%numpy.std( Yy, dtype=mfp )
Ym = numpy.mean( numpy.array( Ys ), axis=0, dtype=mfp )
msgs += ("\n Characteristics of the vector Ym, mean of the outputs Y:\n")
msgs += (" Size of the mean of the outputs...............: %i\n")%Ym.size
- msgs += (" Minimum value of the mean of the outputs......: %."+str(_p)+"e\n")%numpy.min( Ym )
- msgs += (" Maximum value of the mean of the outputs......: %."+str(_p)+"e\n")%numpy.max( Ym )
- msgs += (" Mean of the mean of the outputs...............: %."+str(_p)+"e\n")%numpy.mean( Ym, dtype=mfp )
- msgs += (" Standard error of the mean of the outputs.....: %."+str(_p)+"e\n")%numpy.std( Ym, dtype=mfp )
+ msgs += (" Minimum value of the mean of the outputs......: %."+str(__p)+"e\n")%numpy.min( Ym )
+ msgs += (" Maximum value of the mean of the outputs......: %."+str(__p)+"e\n")%numpy.max( Ym )
+ msgs += (" Mean of the mean of the outputs...............: %."+str(__p)+"e\n")%numpy.mean( Ym, dtype=mfp )
+ msgs += (" Standard error of the mean of the outputs.....: %."+str(__p)+"e\n")%numpy.std( Ym, dtype=mfp )
Ye = numpy.mean( numpy.array( Ys ) - Ym, axis=0, dtype=mfp )
msgs += "\n Characteristics of the mean of the differences between the outputs Y and their mean Ym:\n"
msgs += (" Size of the mean of the differences...........: %i\n")%Ym.size
- msgs += (" Minimum value of the mean of the differences..: %."+str(_p)+"e\n")%numpy.min( Ye )
- msgs += (" Maximum value of the mean of the differences..: %."+str(_p)+"e\n")%numpy.max( Ye )
- msgs += (" Mean of the mean of the differences...........: %."+str(_p)+"e\n")%numpy.mean( Ye, dtype=mfp )
- msgs += (" Standard error of the mean of the differences.: %."+str(_p)+"e\n")%numpy.std( Ye, dtype=mfp )
+ msgs += (" Minimum value of the mean of the differences..: %."+str(__p)+"e\n")%numpy.min( Ye )
+ msgs += (" Maximum value of the mean of the differences..: %."+str(__p)+"e\n")%numpy.max( Ye )
+ msgs += (" Mean of the mean of the differences...........: %."+str(__p)+"e\n")%numpy.mean( Ye, dtype=mfp )
+ msgs += (" Standard error of the mean of the differences.: %."+str(__p)+"e\n")%numpy.std( Ye, dtype=mfp )
msgs += ("\n %s\n"%("-"*75,))
print(msgs)
#
# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
import numpy, logging, itertools
-from daCore import BasicObjects
+from daCore import BasicObjects, NumericObjects
from daCore.PlatformInfo import PlatformInfo
mfp = PlatformInfo().MaximumPrecision()
name = "SampleAsExplicitHyperCube",
default = [],
typecast = tuple,
- message = "Points de calcul définis par un hyper-cube dont on donne la liste des échantillonages de chaque variable comme une liste",
+ message = "Points de calcul définis par un hyper-cube dont on donne la liste des échantillonnages de chaque variable comme une liste",
)
self.defineRequiredParameter(
name = "SampleAsMinMaxStepHyperCube",
default = [],
typecast = tuple,
- message = "Points de calcul définis par un hyper-cube dont on donne la liste des échantillonages de chaque variable par un triplet [min,max,step]",
+ message = "Points de calcul définis par un hyper-cube dont on donne la liste des échantillonnages de chaque variable par un triplet [min,max,step]",
)
self.defineRequiredParameter(
name = "SampleAsIndependantRandomVariables",
default = [],
typecast = tuple,
- message = "Points de calcul définis par un hyper-cube dont les points sur chaque axe proviennent de l'échantillonage indépendant de la variable selon la spécification ['distribution',[parametres],nombre]",
+ message = "Points de calcul définis par un hyper-cube dont les points sur chaque axe proviennent de l'échantillonnage indépendant de la variable selon la spécification ['distribution',[parametres],nombre]",
)
self.defineRequiredParameter(
name = "QualityCriterion",
Y0 = numpy.ravel( Y )
#
# ---------------------------
- if len(self._parameters["SampleAsnUplet"]) > 0:
- sampleList = self._parameters["SampleAsnUplet"]
- for i,Xx in enumerate(sampleList):
- if numpy.ravel(Xx).size != X0.size:
- raise ValueError("The size %i of the %ith state X in the sample and %i of the checking point Xb are different, they have to be identical."%(numpy.ravel(Xx).size,i+1,X0.size))
- elif len(self._parameters["SampleAsExplicitHyperCube"]) > 0:
- sampleList = itertools.product(*list(self._parameters["SampleAsExplicitHyperCube"]))
- elif len(self._parameters["SampleAsMinMaxStepHyperCube"]) > 0:
- coordinatesList = []
- for i,dim in enumerate(self._parameters["SampleAsMinMaxStepHyperCube"]):
- if len(dim) != 3:
- raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be [min,max,step]."%(i,dim))
- else:
- coordinatesList.append(numpy.linspace(dim[0],dim[1],1+int((float(dim[1])-float(dim[0]))/float(dim[2]))))
- sampleList = itertools.product(*coordinatesList)
- elif len(self._parameters["SampleAsIndependantRandomVariables"]) > 0:
- coordinatesList = []
- for i,dim in enumerate(self._parameters["SampleAsIndependantRandomVariables"]):
- if len(dim) != 3:
- raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be ('distribution',(parameters),length) with distribution in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]."%(i,dim))
- elif not( str(dim[0]) in ['normal','lognormal','uniform','weibull'] and hasattr(numpy.random,dim[0]) ):
- raise ValueError("For dimension %i, the distribution name \"%s\" is not allowed, please choose in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]"%(i,dim[0]))
- else:
- distribution = getattr(numpy.random,str(dim[0]),'normal')
- coordinatesList.append(distribution(*dim[1], size=max(1,int(dim[2]))))
- sampleList = itertools.product(*coordinatesList)
- else:
- sampleList = iter([X0,])
+ sampleList = NumericObjects.BuildComplexSampleList(
+ self._parameters["SampleAsnUplet"],
+ self._parameters["SampleAsExplicitHyperCube"],
+ self._parameters["SampleAsMinMaxStepHyperCube"],
+ self._parameters["SampleAsIndependantRandomVariables"],
+ X0,
+ )
# ----------
BI = B.getI()
RI = R.getI()
"Filter",
"Ensemble",
"Dynamic",
+ "Reduction",
))
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
"""
def __init__(self,
toleranceInRedundancy = 1.e-18,
- lenghtOfRedundancy = -1,
+ lengthOfRedundancy = -1,
):
"""
Les caractéristiques de tolérance peuvent être modifiées à la création.
"""
self.__tolerBP = float(toleranceInRedundancy)
- self.__lenghtOR = int(lenghtOfRedundancy)
- self.__initlnOR = self.__lenghtOR
+ self.__lengthOR = int(lengthOfRedundancy)
+ self.__initlnOR = self.__lengthOR
self.__seenNames = []
self.__enabled = True
self.clearCache()
__alc = False
__HxV = None
if self.__enabled:
- for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
+ for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
if not hasattr(xValue, 'size'):
pass
elif (str(oName) != self.__listOPCV[i][3]):
def storeValueInX(self, xValue, HxValue, oName="" ):
"Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
- if self.__lenghtOR < 0:
- self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
- self.__initlnOR = self.__lenghtOR
+ if self.__lengthOR < 0:
+ self.__lengthOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
+ self.__initlnOR = self.__lengthOR
self.__seenNames.append(str(oName))
if str(oName) not in self.__seenNames: # Etend la liste si nouveau
- self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
- self.__initlnOR += self.__lenghtOR
+ self.__lengthOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
+ self.__initlnOR += self.__lengthOR
self.__seenNames.append(str(oName))
- while len(self.__listOPCV) > self.__lenghtOR:
+ while len(self.__listOPCV) > self.__lengthOR:
self.__listOPCV.pop(0)
self.__listOPCV.append( (
copy.copy(numpy.ravel(xValue)), # 0 Previous point
def disable(self):
"Inactive le cache"
- self.__initlnOR = self.__lenghtOR
- self.__lenghtOR = 0
+ self.__initlnOR = self.__lengthOR
+ self.__lengthOR = 0
self.__enabled = False
def enable(self):
"Active le cache"
- self.__lenghtOR = self.__initlnOR
+ self.__lengthOR = self.__initlnOR
self.__enabled = True
# ==============================================================================
if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
- if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
+ if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
from daCore import NumericObjects
reducingMemoryUse = __Function["withReducingMemoryUse"],
avoidingRedundancy = __Function["withAvoidingRedundancy"],
toleranceInRedundancy = __Function["withToleranceInRedundancy"],
- lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
+ lengthOfRedundancy = __Function["withLengthOfRedundancy"],
mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
mpWorkers = __Function["NumberOfProcesses"],
mfEnabled = __Function["withmfEnabled"],
self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
+ self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
+ self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
+ self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
self.__msg = ""
self.__path_settings_ok = False
#----------------
- if "EFICAS_ROOT" in os.environ:
- __EFICAS_ROOT = os.environ["EFICAS_ROOT"]
+ if "EFICAS_TOOLS_ROOT" in os.environ:
+ __EFICAS_TOOLS_ROOT = os.environ["EFICAS_TOOLS_ROOT"]
__path_ok = True
elif "EFICAS_NOUVEAU_ROOT" in os.environ:
- __EFICAS_ROOT = os.environ["EFICAS_NOUVEAU_ROOT"]
+ __EFICAS_TOOLS_ROOT = os.environ["EFICAS_NOUVEAU_ROOT"]
__path_ok = True
else:
self.__msg += "\nKeyError:\n"+\
- " the required environment variable EFICAS_ROOT is unknown.\n"+\
- " You have either to be in SALOME environment, or to set\n"+\
- " this variable in your environment to the right path \"<...>\"\n"+\
- " to find an installed EFICAS application. For example:\n"+\
- " EFICAS_ROOT=\"<...>\" command\n"
+ " the required environment variable EFICAS_TOOLS_ROOT is unknown.\n"+\
+ " You have either to be in SALOME environment, or to set this\n"+\
+ " variable in your environment to the right path \"<...>\" to\n"+\
+ " find an installed EFICAS application. For example:\n"+\
+ " EFICAS_TOOLS_ROOT=\"<...>\" command\n"
__path_ok = False
try:
import adao
self.__path_settings_ok = True
#----------------
if self.__path_settings_ok:
- sys.path.insert(0,__EFICAS_ROOT)
+ sys.path.insert(0,__EFICAS_TOOLS_ROOT)
sys.path.insert(0,os.path.join(adao.adao_py_dir,"daEficas"))
if __addpath is not None and os.path.exists(os.path.abspath(__addpath)):
sys.path.insert(0,os.path.abspath(__addpath))
"""
__author__ = "Jean-Philippe ARGAUD"
-import os, copy, types, sys, logging, numpy
+import os, copy, types, sys, logging, numpy, itertools
from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
from daCore.PlatformInfo import PlatformInfo
mpr = PlatformInfo().MachinePrecision()
reducingMemoryUse = False,
avoidingRedundancy = True,
toleranceInRedundancy = 1.e-18,
- lenghtOfRedundancy = -1,
+ lengthOfRedundancy = -1,
mpEnabled = False,
mpWorkers = None,
mfEnabled = False,
if avoidingRedundancy:
self.__avoidRC = True
self.__tolerBP = float(toleranceInRedundancy)
- self.__lenghtRJ = int(lenghtOfRedundancy)
+ self.__lengthRJ = int(lengthOfRedundancy)
self.__listJPCP = [] # Jacobian Previous Calculated Points
self.__listJPCI = [] # Jacobian Previous Calculated Increment
self.__listJPCR = [] # Jacobian Previous Calculated Results
if __Produit is None or self.__avoidRC:
_Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
if self.__avoidRC:
- if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
- while len(self.__listJPCP) > self.__lenghtRJ:
+ if self.__lengthRJ < 0: self.__lengthRJ = 2 * _X.size
+ while len(self.__listJPCP) > self.__lengthRJ:
self.__listJPCP.pop(0)
self.__listJPCI.pop(0)
self.__listJPCR.pop(0)
raise ValueError("Incorrect array definition of vector data")
if not isinstance(__Bounds, numpy.ndarray): # Is an array
raise ValueError("Incorrect array definition of bounds data")
- if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
+ if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector length
raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
raise ValueError("Incorrectly shaped bounds data")
#
return Xa + EnsembleOfAnomalies( __EnXn )
+# ==============================================================================
+def BuildComplexSampleList(
+ __SampleAsnUplet,
+ __SampleAsExplicitHyperCube,
+ __SampleAsMinMaxStepHyperCube,
+ __SampleAsIndependantRandomVariables,
+ __X0,
+ ):
+ # ---------------------------
+ if len(__SampleAsnUplet) > 0:
+ sampleList = __SampleAsnUplet
+ for i,Xx in enumerate(sampleList):
+ if numpy.ravel(Xx).size != __X0.size:
+ raise ValueError("The size %i of the %ith state X in the sample and %i of the checking point Xb are different, they have to be identical."%(numpy.ravel(Xx).size,i+1,X0.size))
+ elif len(__SampleAsExplicitHyperCube) > 0:
+ sampleList = itertools.product(*list(__SampleAsExplicitHyperCube))
+ elif len(__SampleAsMinMaxStepHyperCube) > 0:
+ coordinatesList = []
+ for i,dim in enumerate(__SampleAsMinMaxStepHyperCube):
+ if len(dim) != 3:
+ raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be [min,max,step]."%(i,dim))
+ else:
+ coordinatesList.append(numpy.linspace(dim[0],dim[1],1+int((float(dim[1])-float(dim[0]))/float(dim[2]))))
+ sampleList = itertools.product(*coordinatesList)
+ elif len(__SampleAsIndependantRandomVariables) > 0:
+ coordinatesList = []
+ for i,dim in enumerate(__SampleAsIndependantRandomVariables):
+ if len(dim) != 3:
+ raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be ('distribution',(parameters),length) with distribution in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]."%(i,dim))
+ elif not( str(dim[0]) in ['normal','lognormal','uniform','weibull'] and hasattr(numpy.random,dim[0]) ):
+ raise ValueError("For dimension %i, the distribution name \"%s\" is not allowed, please choose in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]"%(i,dim[0]))
+ else:
+ distribution = getattr(numpy.random,str(dim[0]),'normal')
+ coordinatesList.append(distribution(*dim[1], size=max(1,int(dim[2]))))
+ sampleList = itertools.product(*coordinatesList)
+ else:
+ sampleList = iter([__X0,])
+ # ----------
+ return sampleList
+
# ==============================================================================
def multiXOsteps(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle,
__CovForecast = False, __LinEvolution = False,
name = "ADAO"
version = "9.10.0"
year = "2022"
-date = "lundi 12 décembre 2022, 12:12:12 (UTC+0100)"
+date = "lundi 14 novembre 2022, 12:12:12 (UTC+0100)"
longname = name + ", a module for Data Assimilation and Optimization"
cata = "V" + version.replace(".","_")
"InputValuesTest",
"ObserverTest",
]
+TaskAlgos = []
AlgoDataRequirements = {}
AlgoDataRequirements["3DVAR"] = [