From: Jean-Philippe ARGAUD Date: Wed, 10 Apr 2024 11:57:47 +0000 (+0200) Subject: Documentation update and review corrections X-Git-Tag: V9_13_0a1~16 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=96c845367f7f71af429c06500eb6ab738cc9c5e1;p=modules%2Fadao.git Documentation update and review corrections --- diff --git a/doc/en/images/sampling_01_SampleAsnUplet.png b/doc/en/images/sampling_01_SampleAsnUplet.png index 00f30b7..a10feeb 100644 Binary files a/doc/en/images/sampling_01_SampleAsnUplet.png and b/doc/en/images/sampling_01_SampleAsnUplet.png differ diff --git a/doc/en/images/sampling_02_SampleAsExplicitHyperCube.png b/doc/en/images/sampling_02_SampleAsExplicitHyperCube.png index 00f30b7..a10feeb 100644 Binary files a/doc/en/images/sampling_02_SampleAsExplicitHyperCube.png and b/doc/en/images/sampling_02_SampleAsExplicitHyperCube.png differ diff --git a/doc/en/images/sampling_03_SampleAsMinMaxStepHyperCube.png b/doc/en/images/sampling_03_SampleAsMinMaxStepHyperCube.png index 00f30b7..a10feeb 100644 Binary files a/doc/en/images/sampling_03_SampleAsMinMaxStepHyperCube.png and b/doc/en/images/sampling_03_SampleAsMinMaxStepHyperCube.png differ diff --git a/doc/en/images/sampling_04_SampleAsMinMaxLatinHyperCube.png b/doc/en/images/sampling_04_SampleAsMinMaxLatinHyperCube.png index 0d25357..aab97d3 100644 Binary files a/doc/en/images/sampling_04_SampleAsMinMaxLatinHyperCube.png and b/doc/en/images/sampling_04_SampleAsMinMaxLatinHyperCube.png differ diff --git a/doc/en/images/sampling_05_SampleAsMinMaxSobolSequence.png b/doc/en/images/sampling_05_SampleAsMinMaxSobolSequence.png index 3def1ed..4bee257 100644 Binary files a/doc/en/images/sampling_05_SampleAsMinMaxSobolSequence.png and b/doc/en/images/sampling_05_SampleAsMinMaxSobolSequence.png differ diff --git a/doc/en/images/sampling_06_SampleAsIndependantRandomVariables_normal.png b/doc/en/images/sampling_06_SampleAsIndependantRandomVariables_normal.png index dc2b051..6fbfed6 100644 Binary files a/doc/en/images/sampling_06_SampleAsIndependantRandomVariables_normal.png and b/doc/en/images/sampling_06_SampleAsIndependantRandomVariables_normal.png differ diff --git a/doc/en/images/sampling_07_SampleAsIndependantRandomVariables_uniform.png b/doc/en/images/sampling_07_SampleAsIndependantRandomVariables_uniform.png index 57021b4..a18d2d7 100644 Binary files a/doc/en/images/sampling_07_SampleAsIndependantRandomVariables_uniform.png and b/doc/en/images/sampling_07_SampleAsIndependantRandomVariables_uniform.png differ diff --git a/doc/en/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png b/doc/en/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png index 6295aea..4e223aa 100644 Binary files a/doc/en/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png and b/doc/en/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png differ diff --git a/doc/en/ref_algorithm_3DVAR.rst b/doc/en/ref_algorithm_3DVAR.rst index 6d6f302..bd905fe 100644 --- a/doc/en/ref_algorithm_3DVAR.rst +++ b/doc/en/ref_algorithm_3DVAR.rst @@ -74,6 +74,15 @@ allows to improve the estimation of *a posteriori* error covariances. This extension is obtained by using the "E3DVAR" variant of the filtering algorithm :ref:`section_ref_algorithm_EnsembleKalmanFilter`. +.. ------------------------------------ .. +.. include:: snippets/Header2Algo12.rst + +.. include:: snippets/FeaturePropNonLocalOptimization.rst + +.. include:: snippets/FeaturePropDerivativeNeeded.rst + +.. include:: snippets/FeaturePropParallelDerivativesOnly.rst + .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst diff --git a/doc/en/ref_algorithm_AdjointTest.rst b/doc/en/ref_algorithm_AdjointTest.rst index f0052cd..4a930af 100644 --- a/doc/en/ref_algorithm_AdjointTest.rst +++ b/doc/en/ref_algorithm_AdjointTest.rst @@ -32,7 +32,8 @@ Checking algorithm "*AdjointTest*" This algorithm allows to check the quality of the adjoint of an operator :math:`F`, by computing a residue whose theoretical properties are known. The -test is applicable to any operator, of evolution or observation. +test is applicable to any operator, of evolution :math:`\mathcal{D}` or +observation :math:`\mathcal{H}`.. For all formulas, with :math:`\mathbf{x}` the current verification point, we take :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and diff --git a/doc/en/ref_algorithm_GradientTest.rst b/doc/en/ref_algorithm_GradientTest.rst index ca24b21..08064fd 100644 --- a/doc/en/ref_algorithm_GradientTest.rst +++ b/doc/en/ref_algorithm_GradientTest.rst @@ -30,9 +30,10 @@ Checking algorithm "*GradientTest*" .. ------------------------------------ .. .. include:: snippets/Header2Algo01.rst -This algorithm allows to check the quality of the adjoint operator, by +This algorithm allows to check the quality of an adjoint operator, by calculating a residue with known theoretical properties. Different residue -formula are available. +formula are available. The test is applicable to any operator, of evolution +:math:`\mathcal{D}` or observation :math:`\mathcal{H}`. In any cases, one take :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and :math:`\mathbf{dx}=\alpha*\mathbf{dx}_0` with :math:`\alpha_0` a user scaling diff --git a/doc/en/ref_algorithm_LinearityTest.rst b/doc/en/ref_algorithm_LinearityTest.rst index 7b39c83..5cfa3d4 100644 --- a/doc/en/ref_algorithm_LinearityTest.rst +++ b/doc/en/ref_algorithm_LinearityTest.rst @@ -32,10 +32,14 @@ Checking algorithm "*LinearityTest*" This algorithm allows to check the linear quality of the operator, by calculating a residue with known theoretical properties. Different residue -formula are available. - -In any cases, one take :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and -:math:`\mathbf{dx}=\alpha*\mathbf{dx}_0`. :math:`F` is the calculation code. +formula are available. The test is applicable to any operator, of evolution +:math:`\mathcal{D}` or observation :math:`\mathcal{H}`. + +In any cases, with :math:`\mathbf{x}` the current verification point, one take +:math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and +:math:`\mathbf{dx}=\alpha*\mathbf{dx}_0` with :math:`\alpha_0`a user scale +parameter, at 1 by default. :math:`F` is the calculation code (given here by +the user by using the observation operator command "*ObservationOperator*"). "CenteredDL" residue ******************** diff --git a/doc/en/ref_algorithm_LocalSensitivityTest.rst b/doc/en/ref_algorithm_LocalSensitivityTest.rst index 9506365..c4f2b27 100644 --- a/doc/en/ref_algorithm_LocalSensitivityTest.rst +++ b/doc/en/ref_algorithm_LocalSensitivityTest.rst @@ -30,15 +30,15 @@ Checking algorithm "*LocalSensitivityTest*" .. ------------------------------------ .. .. include:: snippets/Header2Algo01.rst -This algorithm allows to calculate the value of the Jacobian of the operator -:math:`H` with respect to the input variables :math:`\mathbf{x}`. This operator -appears in the relation: +This algorithm allows to calculate the value of the Jacobian of the observation +operator :math:`\mathcal{H}` with respect to the input variables +:math:`\mathbf{x}`. This operator appears in the relation: -.. math:: \mathbf{y} = H(\mathbf{x}) +.. math:: \mathbf{y} = \mathcal{H}(\mathbf{x}) (see :ref:`section_theory` for further explanations). This Jacobian is the -linearized operator (or the tangent one) :math:`\mathbf{H}` of the :math:`H` -near the chosen checking point. +linearized operator (or the tangent one) :math:`\mathbf{H}` of the +:math:`\mathcal{H}` near the chosen checking point. .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst diff --git a/doc/en/ref_algorithm_TangentTest.rst b/doc/en/ref_algorithm_TangentTest.rst index 76df818..441acdb 100644 --- a/doc/en/ref_algorithm_TangentTest.rst +++ b/doc/en/ref_algorithm_TangentTest.rst @@ -32,7 +32,8 @@ Checking algorithm "*TangentTest*" This algorithm allows to check the quality of the tangent operator, by calculating a residue whose theoretical properties are known. The test is -applicable to any operator, of evolution or observation. +applicable to any operator, of evolution :math:`\mathcal{D}` or observation +:math:`\mathcal{H}`. For all formulas, with :math:`\mathbf{x}` the current verification point, we take :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and diff --git a/doc/en/scripts/simple_3DVAR1.png b/doc/en/scripts/simple_3DVAR1.png index 648154e..d7127cf 100644 Binary files a/doc/en/scripts/simple_3DVAR1.png and b/doc/en/scripts/simple_3DVAR1.png differ diff --git a/doc/en/scripts/simple_3DVAR1Plus.png b/doc/en/scripts/simple_3DVAR1Plus.png index 56c498a..143125e 100644 Binary files a/doc/en/scripts/simple_3DVAR1Plus.png and b/doc/en/scripts/simple_3DVAR1Plus.png differ diff --git a/doc/en/scripts/simple_3DVAR2_state.png b/doc/en/scripts/simple_3DVAR2_state.png index 1c69070..9b96418 100644 Binary files a/doc/en/scripts/simple_3DVAR2_state.png and b/doc/en/scripts/simple_3DVAR2_state.png differ diff --git a/doc/en/scripts/simple_3DVAR2_variance.png b/doc/en/scripts/simple_3DVAR2_variance.png index 2aa05b6..7c0181c 100644 Binary files a/doc/en/scripts/simple_3DVAR2_variance.png and b/doc/en/scripts/simple_3DVAR2_variance.png differ diff --git a/doc/en/scripts/simple_3DVAR3_state.png b/doc/en/scripts/simple_3DVAR3_state.png index afff271..f9a01d4 100644 Binary files a/doc/en/scripts/simple_3DVAR3_state.png and b/doc/en/scripts/simple_3DVAR3_state.png differ diff --git a/doc/en/scripts/simple_3DVAR3_variance.png b/doc/en/scripts/simple_3DVAR3_variance.png index 513175b..3988234 100644 Binary files a/doc/en/scripts/simple_3DVAR3_variance.png and b/doc/en/scripts/simple_3DVAR3_variance.png differ diff --git a/doc/en/scripts/simple_DerivativeFreeOptimization.png b/doc/en/scripts/simple_DerivativeFreeOptimization.png index a63dd6c..d68b0bd 100644 Binary files a/doc/en/scripts/simple_DerivativeFreeOptimization.png and b/doc/en/scripts/simple_DerivativeFreeOptimization.png differ diff --git a/doc/en/scripts/simple_KalmanFilter1_state.png b/doc/en/scripts/simple_KalmanFilter1_state.png index 769bd79..05b4920 100644 Binary files a/doc/en/scripts/simple_KalmanFilter1_state.png and b/doc/en/scripts/simple_KalmanFilter1_state.png differ diff --git a/doc/en/scripts/simple_KalmanFilter1_variance.png b/doc/en/scripts/simple_KalmanFilter1_variance.png index 8a45364..de88046 100644 Binary files a/doc/en/scripts/simple_KalmanFilter1_variance.png and b/doc/en/scripts/simple_KalmanFilter1_variance.png differ diff --git a/doc/en/scripts/simple_KalmanFilter2_state.png b/doc/en/scripts/simple_KalmanFilter2_state.png index 769bd79..05b4920 100644 Binary files a/doc/en/scripts/simple_KalmanFilter2_state.png and b/doc/en/scripts/simple_KalmanFilter2_state.png differ diff --git a/doc/en/scripts/simple_KalmanFilter2_variance.png b/doc/en/scripts/simple_KalmanFilter2_variance.png index 8a45364..de88046 100644 Binary files a/doc/en/scripts/simple_KalmanFilter2_variance.png and b/doc/en/scripts/simple_KalmanFilter2_variance.png differ diff --git a/doc/en/scripts/simple_NonLinearLeastSquares.png b/doc/en/scripts/simple_NonLinearLeastSquares.png index a63dd6c..d68b0bd 100644 Binary files a/doc/en/scripts/simple_NonLinearLeastSquares.png and b/doc/en/scripts/simple_NonLinearLeastSquares.png differ diff --git a/doc/en/scripts/simple_ParticleSwarmOptimization1.png b/doc/en/scripts/simple_ParticleSwarmOptimization1.png index f037f98..6f7f37e 100644 Binary files a/doc/en/scripts/simple_ParticleSwarmOptimization1.png and b/doc/en/scripts/simple_ParticleSwarmOptimization1.png differ diff --git a/doc/en/snippets/FeaturePropDerivativeFree.rst b/doc/en/snippets/FeaturePropDerivativeFree.rst new file mode 100644 index 0000000..58658fe --- /dev/null +++ b/doc/en/snippets/FeaturePropDerivativeFree.rst @@ -0,0 +1,6 @@ +.. index:: single: Derivation not required + +- The methods proposed by this algorithm **do not require derivation of the + objective function or of one of the operators**, thus avoiding this + additional cost when derivatives are calculated numerically by multiple + evaluations. diff --git a/doc/en/snippets/FeaturePropDerivativeNeeded.rst b/doc/en/snippets/FeaturePropDerivativeNeeded.rst new file mode 100644 index 0000000..f8bd752 --- /dev/null +++ b/doc/en/snippets/FeaturePropDerivativeNeeded.rst @@ -0,0 +1,7 @@ +.. index:: single: Derivation required + +- The methods proposed by this algorithm **require the derivation of the + objective function or of one of the operators**. It requires that at least + one or both of the observation or evolution operators be differentiable, and + this implies an additional cost in the case where the derivatives are + calculated numerically by multiple evaluations. diff --git a/doc/en/snippets/FeaturePropGlobalOptimization.rst b/doc/en/snippets/FeaturePropGlobalOptimization.rst new file mode 100644 index 0000000..bde4741 --- /dev/null +++ b/doc/en/snippets/FeaturePropGlobalOptimization.rst @@ -0,0 +1,7 @@ +.. index:: single: Global optimization + +- The optimization methods proposed by this algorithm perform a **global search + for the minimum**, theoretically achieving a globally optimal state over the + search domain. However, this global optimality is achieved "*at + convergence*", which means in long or infinite time during an iterative + optimization "*with real values*" (as opposed to "*with integer values*"). diff --git a/doc/en/snippets/FeaturePropLocalOptimization.rst b/doc/en/snippets/FeaturePropLocalOptimization.rst new file mode 100644 index 0000000..ea218ac --- /dev/null +++ b/doc/en/snippets/FeaturePropLocalOptimization.rst @@ -0,0 +1,5 @@ +.. index:: single: Local optimization + +- The optimization methods proposed by this algorithm perform a **local search + for the minimum**, theoretically enabling a locally optimal state (as opposed + to a "*globally optimal*" state) to be reached. diff --git a/doc/en/snippets/FeaturePropNonLocalOptimization.rst b/doc/en/snippets/FeaturePropNonLocalOptimization.rst new file mode 100644 index 0000000..45cd8c1 --- /dev/null +++ b/doc/en/snippets/FeaturePropNonLocalOptimization.rst @@ -0,0 +1,6 @@ +.. index:: single: Non local optimization + +- The optimization methods proposed by this algorithm perform a **non-local + search for the minimum**, without however ensuring a global search. This is + the case when optimization methods have the ability to avoid being trapped by + the first local minimum found. These capabilities are sometimes heuristic. diff --git a/doc/en/snippets/FeaturePropParallelAlgorithm.rst b/doc/en/snippets/FeaturePropParallelAlgorithm.rst new file mode 100644 index 0000000..a8908e7 --- /dev/null +++ b/doc/en/snippets/FeaturePropParallelAlgorithm.rst @@ -0,0 +1,7 @@ +.. index:: single: Algorithmic parallelism included + +- The methods proposed by this algorithm **have internal parallelism**, and can + therefore take advantage of computational distribution resources. The + potential interaction between the internal parallelism of the methods and the + parallelism that may be present in the user's observation or evolution + operators must therefore be carefully tuned. diff --git a/doc/en/snippets/FeaturePropParallelDerivativesOnly.rst b/doc/en/snippets/FeaturePropParallelDerivativesOnly.rst new file mode 100644 index 0000000..5e2c03d --- /dev/null +++ b/doc/en/snippets/FeaturePropParallelDerivativesOnly.rst @@ -0,0 +1,7 @@ +.. index:: single: Parallelism of derivation + +- The methods proposed by this algorithm **have no internal parallelism, but + use the numerical derivation of operator(s), which can be parallelized**. The + potential interaction between the parallelism of the numerical derivation and + the parallelism that may be present in the user's observation or evolution + operators must therefore be carefully tuned. diff --git a/doc/en/snippets/FeaturePropParallelFree.rst b/doc/en/snippets/FeaturePropParallelFree.rst new file mode 100644 index 0000000..192e9a4 --- /dev/null +++ b/doc/en/snippets/FeaturePropParallelFree.rst @@ -0,0 +1,7 @@ +.. index:: single: Absence of algorithmic parallelism + +- The methods proposed by this algorithm **have no internal parallelism**, and + therefore cannot take advantage of computer resources for distributing + calculations. The methods are sequential, and any use of parallelism + resources is therefore reserved for the user's observation or evolution + operators. diff --git a/doc/en/snippets/Header2Algo12.rst b/doc/en/snippets/Header2Algo12.rst new file mode 100644 index 0000000..709ef70 --- /dev/null +++ b/doc/en/snippets/Header2Algo12.rst @@ -0,0 +1,8 @@ +Some noteworthy properties of the implemented algorithm ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +To complete the description, we summarize here a few notable properties of the +algorithm or of its implementation. These properties may have an influence on +how it is used or on its computational performance. For further information, +please refer to the more comprehensive references given at the end of this +algorithm description. diff --git a/doc/en/snippets/ModuleCompatibility.rst b/doc/en/snippets/ModuleCompatibility.rst index 22ae77b..eb55df6 100644 --- a/doc/en/snippets/ModuleCompatibility.rst +++ b/doc/en/snippets/ModuleCompatibility.rst @@ -16,7 +16,7 @@ versions within the range described below. Python, 3.6.5, 3.12.2 Numpy, 1.14.3, 1.26.4 - Scipy, 0.19.1, 1.12.0 - MatplotLib, 2.2.2, 3.8.3 + Scipy, 0.19.1, 1.13.0 + MatplotLib, 2.2.2, 3.8.4 GnuplotPy, 1.8, 1.8 NLopt, 2.4.2, 2.7.1 diff --git a/doc/fr/images/sampling_01_SampleAsnUplet.png b/doc/fr/images/sampling_01_SampleAsnUplet.png index 00e993d..e903025 100644 Binary files a/doc/fr/images/sampling_01_SampleAsnUplet.png and b/doc/fr/images/sampling_01_SampleAsnUplet.png differ diff --git a/doc/fr/images/sampling_02_SampleAsExplicitHyperCube.png b/doc/fr/images/sampling_02_SampleAsExplicitHyperCube.png index 00e993d..e903025 100644 Binary files a/doc/fr/images/sampling_02_SampleAsExplicitHyperCube.png and b/doc/fr/images/sampling_02_SampleAsExplicitHyperCube.png differ diff --git a/doc/fr/images/sampling_03_SampleAsMinMaxStepHyperCube.png b/doc/fr/images/sampling_03_SampleAsMinMaxStepHyperCube.png index 00e993d..e903025 100644 Binary files a/doc/fr/images/sampling_03_SampleAsMinMaxStepHyperCube.png and b/doc/fr/images/sampling_03_SampleAsMinMaxStepHyperCube.png differ diff --git a/doc/fr/images/sampling_04_SampleAsMinMaxLatinHyperCube.png b/doc/fr/images/sampling_04_SampleAsMinMaxLatinHyperCube.png index e1f4b21..657a403 100644 Binary files a/doc/fr/images/sampling_04_SampleAsMinMaxLatinHyperCube.png and b/doc/fr/images/sampling_04_SampleAsMinMaxLatinHyperCube.png differ diff --git a/doc/fr/images/sampling_05_SampleAsMinMaxSobolSequence.png b/doc/fr/images/sampling_05_SampleAsMinMaxSobolSequence.png index 4bd68e7..ad1accf 100644 Binary files a/doc/fr/images/sampling_05_SampleAsMinMaxSobolSequence.png and b/doc/fr/images/sampling_05_SampleAsMinMaxSobolSequence.png differ diff --git a/doc/fr/images/sampling_06_SampleAsIndependantRandomVariables_normal.png b/doc/fr/images/sampling_06_SampleAsIndependantRandomVariables_normal.png index 25b3b8b..265457e 100644 Binary files a/doc/fr/images/sampling_06_SampleAsIndependantRandomVariables_normal.png and b/doc/fr/images/sampling_06_SampleAsIndependantRandomVariables_normal.png differ diff --git a/doc/fr/images/sampling_07_SampleAsIndependantRandomVariables_uniform.png b/doc/fr/images/sampling_07_SampleAsIndependantRandomVariables_uniform.png index 0fb7d09..894f67f 100644 Binary files a/doc/fr/images/sampling_07_SampleAsIndependantRandomVariables_uniform.png and b/doc/fr/images/sampling_07_SampleAsIndependantRandomVariables_uniform.png differ diff --git a/doc/fr/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png b/doc/fr/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png index c548aab..1208a8e 100644 Binary files a/doc/fr/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png and b/doc/fr/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png differ diff --git a/doc/fr/ref_algorithm_3DVAR.rst b/doc/fr/ref_algorithm_3DVAR.rst index 065e996..037740e 100644 --- a/doc/fr/ref_algorithm_3DVAR.rst +++ b/doc/fr/ref_algorithm_3DVAR.rst @@ -80,6 +80,15 @@ ces covariances d'erreurs *a posteriori*. On atteint cette extension en utilisant le variant "E3DVAR" de l'algorithme de filtrage :ref:`section_ref_algorithm_EnsembleKalmanFilter`. +.. ------------------------------------ .. +.. include:: snippets/Header2Algo12.rst + +.. include:: snippets/FeaturePropNonLocalOptimization.rst + +.. include:: snippets/FeaturePropDerivativeNeeded.rst + +.. include:: snippets/FeaturePropParallelDerivativesOnly.rst + .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst diff --git a/doc/fr/ref_algorithm_AdjointTest.rst b/doc/fr/ref_algorithm_AdjointTest.rst index cc7dbf8..d5c7735 100644 --- a/doc/fr/ref_algorithm_AdjointTest.rst +++ b/doc/fr/ref_algorithm_AdjointTest.rst @@ -32,8 +32,8 @@ Algorithme de vérification "*AdjointTest*" Cet algorithme permet de vérifier la qualité de l'adjoint d'un opérateur :math:`F`, en calculant un résidu dont les propriétés théoriques sont connues. -Le test est applicable à un opérateur quelconque, d'évolution comme -d'observation. +Le test est applicable à un opérateur quelconque, d'évolution +:math:`\mathcal{D}` comme d'observation :math:`\mathcal{H}`. Pour toutes les formules, avec :math:`\mathbf{x}` le point courant de vérification, on prend :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` et diff --git a/doc/fr/ref_algorithm_GradientTest.rst b/doc/fr/ref_algorithm_GradientTest.rst index bf8c1a3..86f6b63 100644 --- a/doc/fr/ref_algorithm_GradientTest.rst +++ b/doc/fr/ref_algorithm_GradientTest.rst @@ -30,10 +30,11 @@ Algorithme de vérification "*GradientTest*" .. ------------------------------------ .. .. include:: snippets/Header2Algo01.rst -Cet algorithme permet de vérifier la qualité du gradient de l'opérateur, en +Cet algorithme permet de vérifier la qualité du gradient d'un opérateur, en calculant un résidu dont les propriétés théoriques sont connues. Plusieurs formules de résidu sont disponibles. Le test est applicable à un opérateur -quelconque, d'évolution comme d'observation. +quelconque, d'évolution :math:`\mathcal{D}` comme d'observation +:math:`\mathcal{H}`.. Pour toutes les formules, avec :math:`\mathbf{x}` le point courant de vérification, on prend :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` et diff --git a/doc/fr/ref_algorithm_LinearityTest.rst b/doc/fr/ref_algorithm_LinearityTest.rst index f4bf507..617cd2f 100644 --- a/doc/fr/ref_algorithm_LinearityTest.rst +++ b/doc/fr/ref_algorithm_LinearityTest.rst @@ -34,7 +34,7 @@ Cet algorithme permet de vérifier la qualité de linéarité d'un opérateur, e calculant un résidu dont les propriétés théoriques sont connues. Plusieurs formules de résidu sont utilisables et sont décrites ci-dessous avec leur interprétation. Le test est applicable à un opérateur quelconque, d'évolution -comme d'observation. +:math:`\mathcal{D}` comme d'observation :math:`\mathcal{H}`. Pour toutes les formules, avec :math:`\mathbf{x}` le point courant de vérification, on prend :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` et diff --git a/doc/fr/ref_algorithm_LocalSensitivityTest.rst b/doc/fr/ref_algorithm_LocalSensitivityTest.rst index 63cc32f..ad6db9b 100644 --- a/doc/fr/ref_algorithm_LocalSensitivityTest.rst +++ b/doc/fr/ref_algorithm_LocalSensitivityTest.rst @@ -31,14 +31,14 @@ Algorithme de vérification "*LocalSensitivityTest*" .. include:: snippets/Header2Algo01.rst Cet algorithme permet d'établir la valeur de la Jacobienne de l'opérateur -:math:`H` par rapport aux variables d'entrée :math:`\mathbf{x}`. Cet opérateur -intervient dans la relation : +d'observation :math:`\mathcal{H}` par rapport aux variables d'entrée +:math:`\mathbf{x}`. Cet opérateur intervient dans la relation : -.. math:: \mathbf{y} = H(\mathbf{x}) +.. math:: \mathbf{y} = \mathcal{H}(\mathbf{x}) (voir :ref:`section_theory` pour de plus amples explications). Cette jacobienne est l'opérateur linéarisé (ou opérateur tangent) :math:`\mathbf{H}` de -:math:`H` autour du point de vérification choisi. +:math:`\mathcal{H}` autour du point de vérification choisi. .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst diff --git a/doc/fr/ref_algorithm_TangentTest.rst b/doc/fr/ref_algorithm_TangentTest.rst index 658e6d8..bc05399 100644 --- a/doc/fr/ref_algorithm_TangentTest.rst +++ b/doc/fr/ref_algorithm_TangentTest.rst @@ -32,7 +32,8 @@ Algorithme de vérification "*TangentTest*" Cet algorithme permet de vérifier la qualité de l'opérateur tangent, en calculant un résidu dont les propriétés théoriques sont connues. Le test est -applicable à un opérateur quelconque, d'évolution comme d'observation. +applicable à un opérateur quelconque, d'évolution :math:`\mathcal{D}` comme +d'observation :math:`\mathcal{H}`. Pour toutes les formules, avec :math:`\mathbf{x}` le point courant de vérification, on prend :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` et diff --git a/doc/fr/scripts/simple_3DVAR1.png b/doc/fr/scripts/simple_3DVAR1.png index 70d3d2d..50724ec 100644 Binary files a/doc/fr/scripts/simple_3DVAR1.png and b/doc/fr/scripts/simple_3DVAR1.png differ diff --git a/doc/fr/scripts/simple_3DVAR1Plus.png b/doc/fr/scripts/simple_3DVAR1Plus.png index 561b07e..d0952c5 100644 Binary files a/doc/fr/scripts/simple_3DVAR1Plus.png and b/doc/fr/scripts/simple_3DVAR1Plus.png differ diff --git a/doc/fr/scripts/simple_3DVAR2_state.png b/doc/fr/scripts/simple_3DVAR2_state.png index d220543..f9f33ae 100644 Binary files a/doc/fr/scripts/simple_3DVAR2_state.png and b/doc/fr/scripts/simple_3DVAR2_state.png differ diff --git a/doc/fr/scripts/simple_3DVAR2_variance.png b/doc/fr/scripts/simple_3DVAR2_variance.png index b42c094..8ac41b3 100644 Binary files a/doc/fr/scripts/simple_3DVAR2_variance.png and b/doc/fr/scripts/simple_3DVAR2_variance.png differ diff --git a/doc/fr/scripts/simple_3DVAR3_state.png b/doc/fr/scripts/simple_3DVAR3_state.png index 2b2d7ee..71077cc 100644 Binary files a/doc/fr/scripts/simple_3DVAR3_state.png and b/doc/fr/scripts/simple_3DVAR3_state.png differ diff --git a/doc/fr/scripts/simple_3DVAR3_variance.png b/doc/fr/scripts/simple_3DVAR3_variance.png index 11c36bf..ed81dc3 100644 Binary files a/doc/fr/scripts/simple_3DVAR3_variance.png and b/doc/fr/scripts/simple_3DVAR3_variance.png differ diff --git a/doc/fr/scripts/simple_DerivativeFreeOptimization.png b/doc/fr/scripts/simple_DerivativeFreeOptimization.png index 2afe397..74a51d8 100644 Binary files a/doc/fr/scripts/simple_DerivativeFreeOptimization.png and b/doc/fr/scripts/simple_DerivativeFreeOptimization.png differ diff --git a/doc/fr/scripts/simple_KalmanFilter1_state.png b/doc/fr/scripts/simple_KalmanFilter1_state.png index 07ee3f2..2d80a3d 100644 Binary files a/doc/fr/scripts/simple_KalmanFilter1_state.png and b/doc/fr/scripts/simple_KalmanFilter1_state.png differ diff --git a/doc/fr/scripts/simple_KalmanFilter1_variance.png b/doc/fr/scripts/simple_KalmanFilter1_variance.png index 3d89cb1..6c2d1aa 100644 Binary files a/doc/fr/scripts/simple_KalmanFilter1_variance.png and b/doc/fr/scripts/simple_KalmanFilter1_variance.png differ diff --git a/doc/fr/scripts/simple_KalmanFilter2_state.png b/doc/fr/scripts/simple_KalmanFilter2_state.png index 07ee3f2..2d80a3d 100644 Binary files a/doc/fr/scripts/simple_KalmanFilter2_state.png and b/doc/fr/scripts/simple_KalmanFilter2_state.png differ diff --git a/doc/fr/scripts/simple_KalmanFilter2_variance.png b/doc/fr/scripts/simple_KalmanFilter2_variance.png index 3d89cb1..6c2d1aa 100644 Binary files a/doc/fr/scripts/simple_KalmanFilter2_variance.png and b/doc/fr/scripts/simple_KalmanFilter2_variance.png differ diff --git a/doc/fr/scripts/simple_NonLinearLeastSquares.png b/doc/fr/scripts/simple_NonLinearLeastSquares.png index 2afe397..74a51d8 100644 Binary files a/doc/fr/scripts/simple_NonLinearLeastSquares.png and b/doc/fr/scripts/simple_NonLinearLeastSquares.png differ diff --git a/doc/fr/scripts/simple_ParticleSwarmOptimization1.png b/doc/fr/scripts/simple_ParticleSwarmOptimization1.png index e2e9e8c..5e9f08c 100644 Binary files a/doc/fr/scripts/simple_ParticleSwarmOptimization1.png and b/doc/fr/scripts/simple_ParticleSwarmOptimization1.png differ diff --git a/doc/fr/snippets/FeaturePropDerivativeFree.rst b/doc/fr/snippets/FeaturePropDerivativeFree.rst new file mode 100644 index 0000000..e1eeaa4 --- /dev/null +++ b/doc/fr/snippets/FeaturePropDerivativeFree.rst @@ -0,0 +1,6 @@ +.. index:: single: Dérivation non requise + +- Les méthodes proposées par cet algorithme **ne requièrent pas de dérivation + de la fonction objectif ou de l'un des opérateurs**, permettant d'éviter ce + coût supplémentaire dans le cas où les dérivées sont calculées numériquement + par de multiples évaluations. diff --git a/doc/fr/snippets/FeaturePropDerivativeNeeded.rst b/doc/fr/snippets/FeaturePropDerivativeNeeded.rst new file mode 100644 index 0000000..0b1246a --- /dev/null +++ b/doc/fr/snippets/FeaturePropDerivativeNeeded.rst @@ -0,0 +1,7 @@ +.. index:: single: Dérivation requise + +- Les méthodes proposées par cet algorithme **requièrent la dérivation de la + fonction objectif ou de l'un des opérateurs**. Il nécessite que l'un au moins + des opérateurs d'observation ou d'évolution soit différentiable voire les + deux, et cela implique un coût supplémentaire dans le cas où les dérivées + sont calculées numériquement par de multiples évaluations. diff --git a/doc/fr/snippets/FeaturePropGlobalOptimization.rst b/doc/fr/snippets/FeaturePropGlobalOptimization.rst new file mode 100644 index 0000000..f095661 --- /dev/null +++ b/doc/fr/snippets/FeaturePropGlobalOptimization.rst @@ -0,0 +1,8 @@ +.. index:: single: Optimisation globale + +- Les méthodes d'optimisation proposées par cet algorithme effectuent une + **recherche globale du minimum**, permettant en théorie d'atteindre un état + globalement optimal sur le domaine de recherche. Cette optimalité globale est + néanmoins obtenue "*à convergence*", ce qui signifie en temps long ou infini + lors d'une optimisation itérative *à valeurs réelles* (par opposition *à + valeurs entières*). diff --git a/doc/fr/snippets/FeaturePropLocalOptimization.rst b/doc/fr/snippets/FeaturePropLocalOptimization.rst new file mode 100644 index 0000000..96fceeb --- /dev/null +++ b/doc/fr/snippets/FeaturePropLocalOptimization.rst @@ -0,0 +1,5 @@ +.. index:: single: Optimisation locale + +- Les méthodes d'optimisation proposées par cet algorithme effectuent une + **recherche locale du minimum**, permettant en théorie d'atteindre un état + localement optimal (par opposition à un état "*globalement optimal*"). diff --git a/doc/fr/snippets/FeaturePropNonLocalOptimization.rst b/doc/fr/snippets/FeaturePropNonLocalOptimization.rst new file mode 100644 index 0000000..d5f7d80 --- /dev/null +++ b/doc/fr/snippets/FeaturePropNonLocalOptimization.rst @@ -0,0 +1,7 @@ +.. index:: single: Optimisation non locale + +- Les méthodes d'optimisation proposées par cet algorithme effectuent une + **recherche non locale du minimum**, sans pour autant néanmoins assurer une + recherche globale. C'est le cas lorsque les méthodes d'optimisation + présentent des capacités d'éviter de rester bloquées par le premier minimum + local trouvé. Ces capacités sont parfois heuristiques. diff --git a/doc/fr/snippets/FeaturePropParallelAlgorithm.rst b/doc/fr/snippets/FeaturePropParallelAlgorithm.rst new file mode 100644 index 0000000..a78f27f --- /dev/null +++ b/doc/fr/snippets/FeaturePropParallelAlgorithm.rst @@ -0,0 +1,8 @@ +.. index:: single: Parallélisme algorithmique présent + +- Les méthodes proposées par cet algorithme **présentent un parallélisme + interne**, et peuvent donc profiter de ressources informatiques de + répartition de calculs. L'interaction potentielle, entre le parallélisme + interne des méthodes, et le parallélisme éventuellement présent dans les + opérateurs d'observation ou d'évolution de l'utilisateur, doit donc être + soigneusement réglée. diff --git a/doc/fr/snippets/FeaturePropParallelDerivativesOnly.rst b/doc/fr/snippets/FeaturePropParallelDerivativesOnly.rst new file mode 100644 index 0000000..939db42 --- /dev/null +++ b/doc/fr/snippets/FeaturePropParallelDerivativesOnly.rst @@ -0,0 +1,8 @@ +.. index:: single: Parallélisme de dérivation + +- Les méthodes proposées par cet algorithme **ne présentent pas de parallélisme + interne, mais utilisent la dérivation numérique d'opérateur(s) qui est, elle, + parallélisable**. L'interaction potentielle, entre le parallélisme de la + dérivation numérique, et le parallélisme éventuellement présent dans les + opérateurs d'observation ou d'évolution de l'utilisateur, doit donc être + soigneusement réglée. diff --git a/doc/fr/snippets/FeaturePropParallelFree.rst b/doc/fr/snippets/FeaturePropParallelFree.rst new file mode 100644 index 0000000..76234aa --- /dev/null +++ b/doc/fr/snippets/FeaturePropParallelFree.rst @@ -0,0 +1,7 @@ +.. index:: single: Parallélisme algorithmique absent + +- Les méthodes proposées par cet algorithme **ne présentent pas de parallélisme + interne**, et ne peuvent donc profiter de ressources informatiques de + répartition de calculs. Les méthodes sont séquentielles, et un usage éventuel + des ressources du parallélisme est donc réservé aux opérateurs d'observation + ou d'évolution de l'utilisateur. diff --git a/doc/fr/snippets/Header2Algo12.rst b/doc/fr/snippets/Header2Algo12.rst new file mode 100644 index 0000000..ad8d63e --- /dev/null +++ b/doc/fr/snippets/Header2Algo12.rst @@ -0,0 +1,8 @@ +Quelques propriétés notables de l'algorithme implémenté ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Pour compléter la description on synthétise ici quelques propriétés notables, +de l'algorithme ou de son implémentation. Ces propriétés peuvent avoir une +influence sur la manière de l'utiliser ou sur ses performances de calcul. Pour +de plus amples renseignements, on se reportera aux références plus complètes +indiquées à la fin du descriptif de cet algorithme. diff --git a/doc/fr/snippets/ModuleCompatibility.rst b/doc/fr/snippets/ModuleCompatibility.rst index e7281ce..f646b6d 100644 --- a/doc/fr/snippets/ModuleCompatibility.rst +++ b/doc/fr/snippets/ModuleCompatibility.rst @@ -17,7 +17,7 @@ l'étendue décrite ci-dessous. Python, 3.6.5, 3.12.2 Numpy, 1.14.3, 1.26.4 - Scipy, 0.19.1, 1.12.0 - MatplotLib, 2.2.2, 3.8.3 + Scipy, 0.19.1, 1.13.0 + MatplotLib, 2.2.2, 3.8.4 GnuplotPy, 1.8, 1.8 NLopt, 2.4.2, 2.7.1 diff --git a/src/daComposant/daAlgorithms/Atoms/ecw2ukf.py b/src/daComposant/daAlgorithms/Atoms/ecw2ukf.py index 6f6ac79..f935804 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecw2ukf.py +++ b/src/daComposant/daAlgorithms/Atoms/ecw2ukf.py @@ -109,7 +109,6 @@ def ecw2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="UKF"): else: Cm = None # - # Pndemi = numpy.real(scipy.linalg.cholesky(Pn)) Pndemi = numpy.real(scipy.linalg.sqrtm(Pn)) Xnmu = Xn + Pndemi @ SC nbSpts = SC.shape[1] diff --git a/src/daComposant/daAlgorithms/Atoms/ecwnlls.py b/src/daComposant/daAlgorithms/Atoms/ecwnlls.py index 3444073..b6ed31a 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecwnlls.py +++ b/src/daComposant/daAlgorithms/Atoms/ecwnlls.py @@ -146,6 +146,8 @@ def ecwnlls(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/ecwukf.py b/src/daComposant/daAlgorithms/Atoms/ecwukf.py index 107a5bb..465168d 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecwukf.py +++ b/src/daComposant/daAlgorithms/Atoms/ecwukf.py @@ -107,7 +107,6 @@ def ecwukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="UKF"): else: Cm = None # - # Pndemi = numpy.real(scipy.linalg.cholesky(Pn)) Pndemi = numpy.real(scipy.linalg.sqrtm(Pn)) Xnmu = Xn + Pndemi @ SC nbSpts = SC.shape[1] @@ -133,7 +132,6 @@ def ecwukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="UKF"): dXEnnmuXhmn = XEnnmu[:, point].flat - Xhmn Pmn += Wc[point] * numpy.outer(dXEnnmuXhmn, dXEnnmuXhmn) # - # Pmndemi = numpy.real(scipy.linalg.cholesky(Pmn)) Pmndemi = numpy.real(scipy.linalg.sqrtm(Pmn)) Xnnmu = Xhmn.reshape((-1, 1)) + Pmndemi @ SC # diff --git a/src/daComposant/daAlgorithms/Atoms/incr3dvar.py b/src/daComposant/daAlgorithms/Atoms/incr3dvar.py index d6570c4..6fb3632 100644 --- a/src/daComposant/daAlgorithms/Atoms/incr3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/incr3dvar.py @@ -147,6 +147,8 @@ def incr3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/lbfgsb113hlt.py b/src/daComposant/daAlgorithms/Atoms/lbfgsb113hlt.py new file mode 100644 index 0000000..4aea5a3 --- /dev/null +++ b/src/daComposant/daAlgorithms/Atoms/lbfgsb113hlt.py @@ -0,0 +1,553 @@ +# Modification de la version 1.13.0 +# flake8: noqa +""" +Functions +--------- +.. autosummary:: + :toctree: generated/ + + fmin_l_bfgs_b + +""" + +## License for the Python wrapper +## ============================== + +## Copyright (c) 2004 David M. Cooke + +## Permission is hereby granted, free of charge, to any person obtaining a +## copy of this software and associated documentation files (the "Software"), +## to deal in the Software without restriction, including without limitation +## the rights to use, copy, modify, merge, publish, distribute, sublicense, +## and/or sell copies of the Software, and to permit persons to whom the +## Software is furnished to do so, subject to the following conditions: + +## The above copyright notice and this permission notice shall be included in +## all copies or substantial portions of the Software. + +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +## DEALINGS IN THE SOFTWARE. + +## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy + +import numpy as np +from numpy import array, asarray, float64, zeros +from scipy.optimize import _lbfgsb +from scipy.optimize._optimize import (MemoizeJac, OptimizeResult, _call_callback_maybe_halt, + _wrap_callback, _check_unknown_options, + _prepare_scalar_function) +from scipy.optimize._constraints import old_bound_to_new + +from scipy.sparse.linalg import LinearOperator + +__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct'] + + +def fmin_l_bfgs_b(func, x0, fprime=None, args=(), + approx_grad=0, + bounds=None, m=10, factr=1e7, pgtol=1e-5, + epsilon=1e-8, + iprint=-1, maxfun=15000, maxiter=15000, disp=None, + callback=None, maxls=20): + """ + Minimize a function func using the L-BFGS-B algorithm. + + Parameters + ---------- + func : callable f(x,*args) + Function to minimize. + x0 : ndarray + Initial guess. + fprime : callable fprime(x,*args), optional + The gradient of `func`. If None, then `func` returns the function + value and the gradient (``f, g = func(x, *args)``), unless + `approx_grad` is True in which case `func` returns only ``f``. + args : sequence, optional + Arguments to pass to `func` and `fprime`. + approx_grad : bool, optional + Whether to approximate the gradient numerically (in which case + `func` returns only the function value). + bounds : list, optional + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None or +-inf for one of ``min`` or + ``max`` when there is no bound in that direction. + m : int, optional + The maximum number of variable metric corrections + used to define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms in an + approximation to it.) + factr : float, optional + The iteration stops when + ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, + where ``eps`` is the machine precision, which is automatically + generated by the code. Typical values for `factr` are: 1e12 for + low accuracy; 1e7 for moderate accuracy; 10.0 for extremely + high accuracy. See Notes for relationship to `ftol`, which is exposed + (instead of `factr`) by the `scipy.optimize.minimize` interface to + L-BFGS-B. + pgtol : float, optional + The iteration will stop when + ``max{|proj g_i | i = 1, ..., n} <= pgtol`` + where ``proj g_i`` is the i-th component of the projected gradient. + epsilon : float, optional + Step size used when `approx_grad` is True, for numerically + calculating the gradient + iprint : int, optional + Controls the frequency of output. ``iprint < 0`` means no output; + ``iprint = 0`` print only one line at the last iteration; + ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; + ``iprint = 99`` print details of every iteration except n-vectors; + ``iprint = 100`` print also the changes of active set and final x; + ``iprint > 100`` print details of every iteration including x and g. + disp : int, optional + If zero, then no output. If a positive number, then this over-rides + `iprint` (i.e., `iprint` gets the value of `disp`). + maxfun : int, optional + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int, optional + Maximum number of iterations. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + + Returns + ------- + x : array_like + Estimated position of the minimum. + f : float + Value of `func` at the minimum. + d : dict + Information dictionary. + + * d['warnflag'] is + + - 0 if converged, + - 1 if too many function evaluations or too many iterations, + - 2 if stopped for another reason, given in d['task'] + + * d['grad'] is the gradient at the minimum (should be 0 ish) + * d['funcalls'] is the number of function calls made. + * d['nit'] is the number of iterations. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'L-BFGS-B' `method` in particular. Note that the + `ftol` option is made available via that interface, while `factr` is + provided via this interface, where `factr` is the factor multiplying + the default machine floating-point precision to arrive at `ftol`: + ``ftol = factr * numpy.finfo(float).eps``. + + Notes + ----- + License of L-BFGS-B (FORTRAN code): + + The version included here (in fortran code) is 3.0 + (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd, + and Jorge Nocedal . It carries the following + condition for use: + + This software is freely available, but we expect that all publications + describing work using this software, or all commercial products using it, + quote at least one of the references given below. This software is released + under the BSD License. + + References + ---------- + * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound + Constrained Optimization, (1995), SIAM Journal on Scientific and + Statistical Computing, 16, 5, pp. 1190-1208. + * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (1997), + ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. + * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (2011), + ACM Transactions on Mathematical Software, 38, 1. + + Examples + -------- + Solve a linear regression problem via `fmin_l_bfgs_b`. To do this, first we define + an objective function ``f(m, b) = (y - y_model)**2``, where `y` describes the + observations and `y_model` the prediction of the linear model as + ``y_model = m*x + b``. The bounds for the parameters, ``m`` and ``b``, are arbitrarily + chosen as ``(0,5)`` and ``(5,10)`` for this example. + + >>> import numpy as np + >>> from scipy.optimize import fmin_l_bfgs_b + >>> X = np.arange(0, 10, 1) + >>> M = 2 + >>> B = 3 + >>> Y = M * X + B + >>> def func(parameters, *args): + ... x = args[0] + ... y = args[1] + ... m, b = parameters + ... y_model = m*x + b + ... error = sum(np.power((y - y_model), 2)) + ... return error + + >>> initial_values = np.array([0.0, 1.0]) + + >>> x_opt, f_opt, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y), + ... approx_grad=True) + >>> x_opt, f_opt + array([1.99999999, 3.00000006]), 1.7746231151323805e-14 # may vary + + The optimized parameters in ``x_opt`` agree with the ground truth parameters + ``m`` and ``b``. Next, let us perform a bound contrained optimization using the `bounds` + parameter. + + >>> bounds = [(0, 5), (5, 10)] + >>> x_opt, f_op, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y), + ... approx_grad=True, bounds=bounds) + >>> x_opt, f_opt + array([1.65990508, 5.31649385]), 15.721334516453945 # may vary + """ + # handle fprime/approx_grad + if approx_grad: + fun = func + jac = None + elif fprime is None: + fun = MemoizeJac(func) + jac = fun.derivative + else: + fun = func + jac = fprime + + # build options + if disp is None: + disp = iprint + callback = _wrap_callback(callback) + opts = {'disp': disp, + 'iprint': iprint, + 'maxcor': m, + 'ftol': factr * np.finfo(float).eps, + 'gtol': pgtol, + 'eps': epsilon, + 'maxfun': maxfun, + 'maxiter': maxiter, + 'callback': callback, + 'maxls': maxls} + + res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds, + **opts) + d = {'grad': res['jac'], + 'task': res['message'], + 'funcalls': res['nfev'], + 'nit': res['nit'], + 'warnflag': res['status']} + f = res['fun'] + x = res['x'] + + return x, f, d + + +def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, + disp=None, maxcor=10, ftol=2.2204460492503131e-09, + gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000, + iprint=-1, callback=None, maxls=20, + finite_diff_rel_step=None, **unknown_options): + """ + Minimize a scalar function of one or more variables using the L-BFGS-B + algorithm. + + Options + ------- + disp : None or int + If `disp is None` (the default), then the supplied version of `iprint` + is used. If `disp is not None`, then it overrides the supplied version + of `iprint` with the behaviour you outlined. + maxcor : int + The maximum number of variable metric corrections used to + define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms + in an approximation to it.) + ftol : float + The iteration stops when ``(f^k - + f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``. + gtol : float + The iteration will stop when ``max{|proj g_i | i = 1, ..., n} + <= gtol`` where ``proj g_i`` is the i-th component of the + projected gradient. + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + maxfun : int + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int + Maximum number of iterations. + iprint : int, optional + Controls the frequency of output. ``iprint < 0`` means no output; + ``iprint = 0`` print only one line at the last iteration; + ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; + ``iprint = 99`` print details of every iteration except n-vectors; + ``iprint = 100`` print also the changes of active set and final x; + ``iprint > 100`` print details of every iteration including x and g. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + + Notes + ----- + The option `ftol` is exposed via the `scipy.optimize.minimize` interface, + but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The + relationship between the two is ``ftol = factr * numpy.finfo(float).eps``. + I.e., `factr` multiplies the default machine floating-point precision to + arrive at `ftol`. + + """ + _check_unknown_options(unknown_options) + m = maxcor + pgtol = gtol + factr = ftol / np.finfo(float).eps + + x0 = asarray(x0).ravel() + n, = x0.shape + + # historically old-style bounds were/are expected by lbfgsb. + # That's still the case but we'll deal with new-style from here on, + # it's easier + if bounds is None: + pass + elif len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + else: + bounds = np.array(old_bound_to_new(bounds)) + + # check bounds + if (bounds[0] > bounds[1]).any(): + raise ValueError( + "LBFGSB - one of the lower bounds is greater than an upper bound." + ) + + # initial vector must lie within the bounds. Otherwise ScalarFunction and + # approx_derivative will cause problems + x0 = np.clip(x0, bounds[0], bounds[1]) + + if disp is not None: + if disp == 0: + iprint = -1 + else: + iprint = disp + + # _prepare_scalar_function can use bounds=None to represent no bounds + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + bounds=bounds, + finite_diff_rel_step=finite_diff_rel_step) + + func_and_grad = sf.fun_and_grad + + fortran_int = _lbfgsb.types.intvar.dtype + + nbd = zeros(n, fortran_int) + low_bnd = zeros(n, float64) + upper_bnd = zeros(n, float64) + bounds_map = {(-np.inf, np.inf): 0, + (1, np.inf): 1, + (1, 1): 2, + (-np.inf, 1): 3} + + if bounds is not None: + for i in range(0, n): + l, u = bounds[0, i], bounds[1, i] + if not np.isinf(l): + low_bnd[i] = l + l = 1 + if not np.isinf(u): + upper_bnd[i] = u + u = 1 + nbd[i] = bounds_map[l, u] + + if not maxls > 0: + raise ValueError('maxls must be positive.') + + x = array(x0, float64) + f = array(0.0, float64) + g = zeros((n,), float64) + wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64) + iwa = zeros(3*n, fortran_int) + task = zeros(1, 'S60') + csave = zeros(1, 'S60') + lsave = zeros(4, fortran_int) + isave = zeros(44, fortran_int) + dsave = zeros(29, float64) + + task[:] = 'START' + + n_iterations = 0 + + while 1: + # g may become float32 if a user provides a function that calculates + # the Jacobian in float32 (see gh-18730). The underlying Fortran code + # expects float64, so upcast it + g = g.astype(np.float64) + # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ + _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, + pgtol, wa, iwa, task, iprint, csave, lsave, + isave, dsave, maxls) + task_str = task.tobytes() + if task_str.startswith(b'FG'): + # The minimization routine wants f and g at the current x. + # Note that interruptions due to maxfun are postponed + # until the completion of the current minimization iteration. + # Overwrite f and g: + f, g = func_and_grad(x) + if sf.nfev > maxfun: + task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS ' + 'EXCEEDS LIMIT') + elif task_str.startswith(b'NEW_X'): + # new iteration + n_iterations += 1 + + intermediate_result = OptimizeResult(x=x, fun=f) + if _call_callback_maybe_halt(callback, intermediate_result): + task[:] = 'STOP: CALLBACK REQUESTED HALT' + if n_iterations >= maxiter: + task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT' + elif sf.nfev > maxfun: + task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS ' + 'EXCEEDS LIMIT') + else: + break + + task_str = task.tobytes().strip(b'\x00').strip() + if task_str.startswith(b'CONV'): + warnflag = 0 + elif sf.nfev > maxfun or n_iterations >= maxiter: + warnflag = 1 + else: + warnflag = 2 + + # These two portions of the workspace are described in the mainlb + # subroutine in lbfgsb.f. See line 363. + s = wa[0: m*n].reshape(m, n) + y = wa[m*n: 2*m*n].reshape(m, n) + + # See lbfgsb.f line 160 for this portion of the workspace. + # isave(31) = the total number of BFGS updates prior the current iteration; + n_bfgs_updates = isave[30] + + n_corrs = min(n_bfgs_updates, maxcor) + hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs]) + + task_str = task_str.decode() + return OptimizeResult(fun=f, jac=g, nfev=sf.nfev, + njev=sf.ngev, + nit=n_iterations, status=warnflag, message=task_str, + x=x, success=(warnflag == 0), hess_inv=hess_inv) + + +class LbfgsInvHessProduct(LinearOperator): + """Linear operator for the L-BFGS approximate inverse Hessian. + + This operator computes the product of a vector with the approximate inverse + of the Hessian of the objective function, using the L-BFGS limited + memory approximation to the inverse Hessian, accumulated during the + optimization. + + Objects of this class implement the ``scipy.sparse.linalg.LinearOperator`` + interface. + + Parameters + ---------- + sk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the solution vector. + (See [1]). + yk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the gradient. (See [1]). + + References + ---------- + .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited + storage." Mathematics of computation 35.151 (1980): 773-782. + + """ + + def __init__(self, sk, yk): + """Construct the operator.""" + if sk.shape != yk.shape or sk.ndim != 2: + raise ValueError('sk and yk must have matching shape, (n_corrs, n)') + n_corrs, n = sk.shape + + super().__init__(dtype=np.float64, shape=(n, n)) + + self.sk = sk + self.yk = yk + self.n_corrs = n_corrs + self.rho = 1 / np.einsum('ij,ij->i', sk, yk) + + def _matvec(self, x): + """Efficient matrix-vector multiply with the BFGS matrices. + + This calculation is described in Section (4) of [1]. + + Parameters + ---------- + x : ndarray + An array with shape (n,) or (n,1). + + Returns + ------- + y : ndarray + The matrix-vector product + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + q = np.array(x, dtype=self.dtype, copy=True) + if q.ndim == 2 and q.shape[1] == 1: + q = q.reshape(-1) + + alpha = np.empty(n_corrs) + + for i in range(n_corrs-1, -1, -1): + alpha[i] = rho[i] * np.dot(s[i], q) + q = q - alpha[i]*y[i] + + r = q + for i in range(n_corrs): + beta = rho[i] * np.dot(y[i], r) + r = r + s[i] * (alpha[i] - beta) + + return r + + def todense(self): + """Return a dense array representation of this operator. + + Returns + ------- + arr : ndarray, shape=(n, n) + An array with the same shape and containing + the same data represented by this `LinearOperator`. + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + I = np.eye(*self.shape, dtype=self.dtype) + Hk = I + + for i in range(n_corrs): + A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i] + A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i] + + Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] * + s[i][np.newaxis, :]) + return Hk diff --git a/src/daComposant/daAlgorithms/Atoms/psas3dvar.py b/src/daComposant/daAlgorithms/Atoms/psas3dvar.py index bf5e279..2e31e58 100644 --- a/src/daComposant/daAlgorithms/Atoms/psas3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/psas3dvar.py @@ -128,6 +128,8 @@ def psas3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/std3dvar.py b/src/daComposant/daAlgorithms/Atoms/std3dvar.py index 8fa53f3..303845d 100644 --- a/src/daComposant/daAlgorithms/Atoms/std3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/std3dvar.py @@ -131,6 +131,8 @@ def std3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/std4dvar.py b/src/daComposant/daAlgorithms/Atoms/std4dvar.py index e4d1af9..6af5464 100644 --- a/src/daComposant/daAlgorithms/Atoms/std4dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/std4dvar.py @@ -192,6 +192,8 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/van3dvar.py b/src/daComposant/daAlgorithms/Atoms/van3dvar.py index 8c44da3..34eb24d 100644 --- a/src/daComposant/daAlgorithms/Atoms/van3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/van3dvar.py @@ -139,6 +139,8 @@ def van3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py b/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py index 8018de5..65891bb 100644 --- a/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py +++ b/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py @@ -73,7 +73,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): name = "EstimationOf", default = "State", typecast = str, - message = "Estimation d'etat ou de parametres", + message = "Estimation d'état ou de paramètres", listval = ["State", "Parameters"], ) self.defineRequiredParameter( diff --git a/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py b/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py index fe8c37d..63a7ee7 100644 --- a/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py +++ b/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py @@ -52,7 +52,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): name = "EstimationOf", default = "State", typecast = str, - message = "Estimation d'etat ou de parametres", + message = "Estimation d'état ou de paramètres", listval = ["State", "Parameters"], ) self.defineRequiredParameter( diff --git a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py index d0d6a15..1147c35 100644 --- a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py +++ b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py @@ -51,7 +51,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): name = "EstimationOf", default = "State", typecast = str, - message = "Estimation d'etat ou de parametres", + message = "Estimation d'état ou de paramètres", listval = ["State", "Parameters"], ) self.defineRequiredParameter( diff --git a/src/daSalome/__init__.py b/src/daSalome/__init__.py index 29c53cd..c2bcae2 100644 --- a/src/daSalome/__init__.py +++ b/src/daSalome/__init__.py @@ -129,8 +129,11 @@ import os, sys, logging adao_py_dir = os.path.abspath(os.path.dirname(__file__)) # Variable témoin sys.path.insert(0, adao_py_dir) -from daCore import Persistence # PathManagement activé dans Persistence from daCore.version import name, version, year, date, __version__ +try: + from daCore import Persistence # PathManagement activé dans Persistence +except: + logging.debug("INIT Pas de chargement initial de Persistence") try: from daYacsIntegration.daOptimizerLoop import * except: