From 96c845367f7f71af429c06500eb6ab738cc9c5e1 Mon Sep 17 00:00:00 2001 From: Jean-Philippe ARGAUD Date: Wed, 10 Apr 2024 13:57:47 +0200 Subject: [PATCH] Documentation update and review corrections --- doc/en/images/sampling_01_SampleAsnUplet.png | Bin 14632 -> 14632 bytes .../sampling_02_SampleAsExplicitHyperCube.png | Bin 14632 -> 14632 bytes ...ampling_03_SampleAsMinMaxStepHyperCube.png | Bin 14632 -> 14632 bytes ...mpling_04_SampleAsMinMaxLatinHyperCube.png | Bin 15799 -> 15799 bytes ...ampling_05_SampleAsMinMaxSobolSequence.png | Bin 16216 -> 16216 bytes ...pleAsIndependantRandomVariables_normal.png | Bin 16708 -> 16708 bytes ...leAsIndependantRandomVariables_uniform.png | Bin 14004 -> 14004 bytes ...leAsIndependantRandomVariables_weibull.png | Bin 13608 -> 13608 bytes doc/en/ref_algorithm_3DVAR.rst | 9 + doc/en/ref_algorithm_AdjointTest.rst | 3 +- doc/en/ref_algorithm_GradientTest.rst | 5 +- doc/en/ref_algorithm_LinearityTest.rst | 12 +- doc/en/ref_algorithm_LocalSensitivityTest.rst | 12 +- doc/en/ref_algorithm_TangentTest.rst | 3 +- doc/en/scripts/simple_3DVAR1.png | Bin 40888 -> 40888 bytes doc/en/scripts/simple_3DVAR1Plus.png | Bin 42650 -> 42650 bytes doc/en/scripts/simple_3DVAR2_state.png | Bin 33178 -> 33178 bytes doc/en/scripts/simple_3DVAR2_variance.png | Bin 18819 -> 18819 bytes doc/en/scripts/simple_3DVAR3_state.png | Bin 36035 -> 36035 bytes doc/en/scripts/simple_3DVAR3_variance.png | Bin 26719 -> 26719 bytes .../simple_DerivativeFreeOptimization.png | Bin 40186 -> 40186 bytes doc/en/scripts/simple_KalmanFilter1_state.png | Bin 31508 -> 31508 bytes .../scripts/simple_KalmanFilter1_variance.png | Bin 25190 -> 25190 bytes doc/en/scripts/simple_KalmanFilter2_state.png | Bin 31508 -> 31508 bytes .../scripts/simple_KalmanFilter2_variance.png | Bin 25190 -> 25190 bytes .../scripts/simple_NonLinearLeastSquares.png | Bin 40186 -> 40186 bytes .../simple_ParticleSwarmOptimization1.png | Bin 40413 -> 40413 bytes doc/en/snippets/FeaturePropDerivativeFree.rst | 6 + .../snippets/FeaturePropDerivativeNeeded.rst | 7 + .../FeaturePropGlobalOptimization.rst | 7 + .../snippets/FeaturePropLocalOptimization.rst | 5 + .../FeaturePropNonLocalOptimization.rst | 6 + .../snippets/FeaturePropParallelAlgorithm.rst | 7 + .../FeaturePropParallelDerivativesOnly.rst | 7 + doc/en/snippets/FeaturePropParallelFree.rst | 7 + doc/en/snippets/Header2Algo12.rst | 8 + doc/en/snippets/ModuleCompatibility.rst | 4 +- doc/fr/images/sampling_01_SampleAsnUplet.png | Bin 15888 -> 15888 bytes .../sampling_02_SampleAsExplicitHyperCube.png | Bin 15888 -> 15888 bytes ...ampling_03_SampleAsMinMaxStepHyperCube.png | Bin 15888 -> 15888 bytes ...mpling_04_SampleAsMinMaxLatinHyperCube.png | Bin 17108 -> 17108 bytes ...ampling_05_SampleAsMinMaxSobolSequence.png | Bin 17436 -> 17436 bytes ...pleAsIndependantRandomVariables_normal.png | Bin 17891 -> 17891 bytes ...leAsIndependantRandomVariables_uniform.png | Bin 15251 -> 15251 bytes ...leAsIndependantRandomVariables_weibull.png | Bin 14777 -> 14777 bytes doc/fr/ref_algorithm_3DVAR.rst | 9 + doc/fr/ref_algorithm_AdjointTest.rst | 4 +- doc/fr/ref_algorithm_GradientTest.rst | 5 +- doc/fr/ref_algorithm_LinearityTest.rst | 2 +- doc/fr/ref_algorithm_LocalSensitivityTest.rst | 8 +- doc/fr/ref_algorithm_TangentTest.rst | 3 +- doc/fr/scripts/simple_3DVAR1.png | Bin 41144 -> 41144 bytes doc/fr/scripts/simple_3DVAR1Plus.png | Bin 43869 -> 43869 bytes doc/fr/scripts/simple_3DVAR2_state.png | Bin 31971 -> 31971 bytes doc/fr/scripts/simple_3DVAR2_variance.png | Bin 19002 -> 19002 bytes doc/fr/scripts/simple_3DVAR3_state.png | Bin 34719 -> 34719 bytes doc/fr/scripts/simple_3DVAR3_variance.png | Bin 26989 -> 26989 bytes .../simple_DerivativeFreeOptimization.png | Bin 40395 -> 40395 bytes doc/fr/scripts/simple_KalmanFilter1_state.png | Bin 30177 -> 30177 bytes .../scripts/simple_KalmanFilter1_variance.png | Bin 25376 -> 25376 bytes doc/fr/scripts/simple_KalmanFilter2_state.png | Bin 30177 -> 30177 bytes .../scripts/simple_KalmanFilter2_variance.png | Bin 25376 -> 25376 bytes .../scripts/simple_NonLinearLeastSquares.png | Bin 40395 -> 40395 bytes .../simple_ParticleSwarmOptimization1.png | Bin 40622 -> 40622 bytes doc/fr/snippets/FeaturePropDerivativeFree.rst | 6 + .../snippets/FeaturePropDerivativeNeeded.rst | 7 + .../FeaturePropGlobalOptimization.rst | 8 + .../snippets/FeaturePropLocalOptimization.rst | 5 + .../FeaturePropNonLocalOptimization.rst | 7 + .../snippets/FeaturePropParallelAlgorithm.rst | 8 + .../FeaturePropParallelDerivativesOnly.rst | 8 + doc/fr/snippets/FeaturePropParallelFree.rst | 7 + doc/fr/snippets/Header2Algo12.rst | 8 + doc/fr/snippets/ModuleCompatibility.rst | 4 +- src/daComposant/daAlgorithms/Atoms/ecw2ukf.py | 1 - src/daComposant/daAlgorithms/Atoms/ecwnlls.py | 2 + src/daComposant/daAlgorithms/Atoms/ecwukf.py | 2 - .../daAlgorithms/Atoms/incr3dvar.py | 2 + .../daAlgorithms/Atoms/lbfgsb113hlt.py | 553 ++++++++++++++++++ .../daAlgorithms/Atoms/psas3dvar.py | 2 + .../daAlgorithms/Atoms/std3dvar.py | 2 + .../daAlgorithms/Atoms/std4dvar.py | 2 + .../daAlgorithms/Atoms/van3dvar.py | 2 + .../daAlgorithms/EnsembleKalmanFilter.py | 2 +- .../daAlgorithms/ExtendedKalmanFilter.py | 2 +- .../daAlgorithms/UnscentedKalmanFilter.py | 2 +- src/daSalome/__init__.py | 5 +- 87 files changed, 751 insertions(+), 35 deletions(-) create mode 100644 doc/en/snippets/FeaturePropDerivativeFree.rst create mode 100644 doc/en/snippets/FeaturePropDerivativeNeeded.rst create mode 100644 doc/en/snippets/FeaturePropGlobalOptimization.rst create mode 100644 doc/en/snippets/FeaturePropLocalOptimization.rst create mode 100644 doc/en/snippets/FeaturePropNonLocalOptimization.rst create mode 100644 doc/en/snippets/FeaturePropParallelAlgorithm.rst create mode 100644 doc/en/snippets/FeaturePropParallelDerivativesOnly.rst create mode 100644 doc/en/snippets/FeaturePropParallelFree.rst create mode 100644 doc/en/snippets/Header2Algo12.rst create mode 100644 doc/fr/snippets/FeaturePropDerivativeFree.rst create mode 100644 doc/fr/snippets/FeaturePropDerivativeNeeded.rst create mode 100644 doc/fr/snippets/FeaturePropGlobalOptimization.rst create mode 100644 doc/fr/snippets/FeaturePropLocalOptimization.rst create mode 100644 doc/fr/snippets/FeaturePropNonLocalOptimization.rst create mode 100644 doc/fr/snippets/FeaturePropParallelAlgorithm.rst create mode 100644 doc/fr/snippets/FeaturePropParallelDerivativesOnly.rst create mode 100644 doc/fr/snippets/FeaturePropParallelFree.rst create mode 100644 doc/fr/snippets/Header2Algo12.rst create mode 100644 src/daComposant/daAlgorithms/Atoms/lbfgsb113hlt.py diff --git a/doc/en/images/sampling_01_SampleAsnUplet.png b/doc/en/images/sampling_01_SampleAsnUplet.png index 00f30b7ba87afab729a1858bf8478efc058063dd..a10feeb3933637e9db62ddafaea52c8915b61396 100644 GIT binary patch delta 41 wcmZ2cw4!K&hn$IyLPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`BP1002+65W4^X diff --git a/doc/en/images/sampling_02_SampleAsExplicitHyperCube.png b/doc/en/images/sampling_02_SampleAsExplicitHyperCube.png index 00f30b7ba87afab729a1858bf8478efc058063dd..a10feeb3933637e9db62ddafaea52c8915b61396 100644 GIT binary patch delta 41 wcmZ2cw4!K&hn$IyLPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`BP1002+65W4^X diff --git a/doc/en/images/sampling_03_SampleAsMinMaxStepHyperCube.png b/doc/en/images/sampling_03_SampleAsMinMaxStepHyperCube.png index 00f30b7ba87afab729a1858bf8478efc058063dd..a10feeb3933637e9db62ddafaea52c8915b61396 100644 GIT binary patch delta 41 wcmZ2cw4!K&hn$IyLPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`BP1002+65W4^X diff --git a/doc/en/images/sampling_04_SampleAsMinMaxLatinHyperCube.png b/doc/en/images/sampling_04_SampleAsMinMaxLatinHyperCube.png index 0d253571d1ed495ff7329f8f3e5ba4c1556192d7..aab97d3d9bbff00bd3e5d3dcc85cac126371f692 100644 GIT binary patch delta 41 wcmdm9y}f#Zhn$IyLPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`A=90aiy4e*gdg diff --git a/doc/en/images/sampling_05_SampleAsMinMaxSobolSequence.png b/doc/en/images/sampling_05_SampleAsMinMaxSobolSequence.png index 3def1ed8ca1c9c485c9d5e5b424e626f7b7b9334..4bee257c5d184ea0a75f229721c592e0df330e90 100644 GIT binary patch delta 41 wcmcanccX5Chn$IyLPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`D170RUpe5orJb diff --git a/doc/en/images/sampling_06_SampleAsIndependantRandomVariables_normal.png b/doc/en/images/sampling_06_SampleAsIndependantRandomVariables_normal.png index dc2b051c6b75bbf715080238f30d2224378b2c11..6fbfed6be16832c77dce1465f7cecaa526473b3d 100644 GIT binary patch delta 43 ycmX@o#CW8Mae{}OiH<@>Nl8JmmA-y%Vo52PG*u`eo?x2PG*u`eo?yq@n;di8`J#E08s`IQvd(} diff --git a/doc/en/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png b/doc/en/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png index 6295aea7154352c2c4b8d7e165d93798dd2fed66..4e223aaedc5fa246cc33792f45831fff7b843912 100644 GIT binary patch delta 41 wcmZ3HwIXYRhn$IyLPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`BP%002!w5T^hD diff --git a/doc/en/ref_algorithm_3DVAR.rst b/doc/en/ref_algorithm_3DVAR.rst index 6d6f302..bd905fe 100644 --- a/doc/en/ref_algorithm_3DVAR.rst +++ b/doc/en/ref_algorithm_3DVAR.rst @@ -74,6 +74,15 @@ allows to improve the estimation of *a posteriori* error covariances. This extension is obtained by using the "E3DVAR" variant of the filtering algorithm :ref:`section_ref_algorithm_EnsembleKalmanFilter`. +.. ------------------------------------ .. +.. include:: snippets/Header2Algo12.rst + +.. include:: snippets/FeaturePropNonLocalOptimization.rst + +.. include:: snippets/FeaturePropDerivativeNeeded.rst + +.. include:: snippets/FeaturePropParallelDerivativesOnly.rst + .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst diff --git a/doc/en/ref_algorithm_AdjointTest.rst b/doc/en/ref_algorithm_AdjointTest.rst index f0052cd..4a930af 100644 --- a/doc/en/ref_algorithm_AdjointTest.rst +++ b/doc/en/ref_algorithm_AdjointTest.rst @@ -32,7 +32,8 @@ Checking algorithm "*AdjointTest*" This algorithm allows to check the quality of the adjoint of an operator :math:`F`, by computing a residue whose theoretical properties are known. The -test is applicable to any operator, of evolution or observation. +test is applicable to any operator, of evolution :math:`\mathcal{D}` or +observation :math:`\mathcal{H}`.. For all formulas, with :math:`\mathbf{x}` the current verification point, we take :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and diff --git a/doc/en/ref_algorithm_GradientTest.rst b/doc/en/ref_algorithm_GradientTest.rst index ca24b21..08064fd 100644 --- a/doc/en/ref_algorithm_GradientTest.rst +++ b/doc/en/ref_algorithm_GradientTest.rst @@ -30,9 +30,10 @@ Checking algorithm "*GradientTest*" .. ------------------------------------ .. .. include:: snippets/Header2Algo01.rst -This algorithm allows to check the quality of the adjoint operator, by +This algorithm allows to check the quality of an adjoint operator, by calculating a residue with known theoretical properties. Different residue -formula are available. +formula are available. The test is applicable to any operator, of evolution +:math:`\mathcal{D}` or observation :math:`\mathcal{H}`. In any cases, one take :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and :math:`\mathbf{dx}=\alpha*\mathbf{dx}_0` with :math:`\alpha_0` a user scaling diff --git a/doc/en/ref_algorithm_LinearityTest.rst b/doc/en/ref_algorithm_LinearityTest.rst index 7b39c83..5cfa3d4 100644 --- a/doc/en/ref_algorithm_LinearityTest.rst +++ b/doc/en/ref_algorithm_LinearityTest.rst @@ -32,10 +32,14 @@ Checking algorithm "*LinearityTest*" This algorithm allows to check the linear quality of the operator, by calculating a residue with known theoretical properties. Different residue -formula are available. - -In any cases, one take :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and -:math:`\mathbf{dx}=\alpha*\mathbf{dx}_0`. :math:`F` is the calculation code. +formula are available. The test is applicable to any operator, of evolution +:math:`\mathcal{D}` or observation :math:`\mathcal{H}`. + +In any cases, with :math:`\mathbf{x}` the current verification point, one take +:math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and +:math:`\mathbf{dx}=\alpha*\mathbf{dx}_0` with :math:`\alpha_0`a user scale +parameter, at 1 by default. :math:`F` is the calculation code (given here by +the user by using the observation operator command "*ObservationOperator*"). "CenteredDL" residue ******************** diff --git a/doc/en/ref_algorithm_LocalSensitivityTest.rst b/doc/en/ref_algorithm_LocalSensitivityTest.rst index 9506365..c4f2b27 100644 --- a/doc/en/ref_algorithm_LocalSensitivityTest.rst +++ b/doc/en/ref_algorithm_LocalSensitivityTest.rst @@ -30,15 +30,15 @@ Checking algorithm "*LocalSensitivityTest*" .. ------------------------------------ .. .. include:: snippets/Header2Algo01.rst -This algorithm allows to calculate the value of the Jacobian of the operator -:math:`H` with respect to the input variables :math:`\mathbf{x}`. This operator -appears in the relation: +This algorithm allows to calculate the value of the Jacobian of the observation +operator :math:`\mathcal{H}` with respect to the input variables +:math:`\mathbf{x}`. This operator appears in the relation: -.. math:: \mathbf{y} = H(\mathbf{x}) +.. math:: \mathbf{y} = \mathcal{H}(\mathbf{x}) (see :ref:`section_theory` for further explanations). This Jacobian is the -linearized operator (or the tangent one) :math:`\mathbf{H}` of the :math:`H` -near the chosen checking point. +linearized operator (or the tangent one) :math:`\mathbf{H}` of the +:math:`\mathcal{H}` near the chosen checking point. .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst diff --git a/doc/en/ref_algorithm_TangentTest.rst b/doc/en/ref_algorithm_TangentTest.rst index 76df818..441acdb 100644 --- a/doc/en/ref_algorithm_TangentTest.rst +++ b/doc/en/ref_algorithm_TangentTest.rst @@ -32,7 +32,8 @@ Checking algorithm "*TangentTest*" This algorithm allows to check the quality of the tangent operator, by calculating a residue whose theoretical properties are known. The test is -applicable to any operator, of evolution or observation. +applicable to any operator, of evolution :math:`\mathcal{D}` or observation +:math:`\mathcal{H}`. For all formulas, with :math:`\mathbf{x}` the current verification point, we take :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` and diff --git a/doc/en/scripts/simple_3DVAR1.png b/doc/en/scripts/simple_3DVAR1.png index 648154e676442b36bbaf458530001d0128976903..d7127cfa502baa18e79c3ff7bb3039a0e0419954 100644 GIT binary patch delta 43 zcmdn7pJ~T_rU@Q$COQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36Ag60DNWxo&g delta 43 zcmdn7pJ~T_rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek53z`oAaJUh0 diff --git a/doc/en/scripts/simple_3DVAR1Plus.png b/doc/en/scripts/simple_3DVAR1Plus.png index 56c498ad16f5e10b96c88851857658393fcd873d..143125eaed03f051c172dac87ba8046e398062fd 100644 GIT binary patch delta 43 zcmbPrmTA^mrU@Q$COQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36AOqKxvU_KAN delta 43 zcmbPrmTA^mrU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5Gg$@zYc~-& diff --git a/doc/en/scripts/simple_3DVAR2_state.png b/doc/en/scripts/simple_3DVAR2_state.png index 1c69070b994b5202ecff02986ecfc5129e5930a0..9b96418c6ccd41faa5c9d811c1718b4dc12894d1 100644 GIT binary patch delta 43 ycmbQ$%rvW+X@ZBGiH<@>Nl8JmmA-y%Vo5Nl8JmmA-y%Vo5+lj-nGrU@Q$COQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36AVtW8xO%JyK delta 43 zcmX>+lj-nGrU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5i|qjbXhsn- diff --git a/doc/en/scripts/simple_3DVAR3_variance.png b/doc/en/scripts/simple_3DVAR3_variance.png index 513175b7c5432b3a29854b999f452db6c3b9464b..398823409f65f0106de8e64b315aed96b7351e12 100644 GIT binary patch delta 43 zcmcbAf${zY#t9yBCOQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36Aex?Hec#IIz delta 43 zcmcbAf${zY#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5`H6}j%B36Arpy5VdB6~? delta 43 zcmeyhlj+w^rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5n=%Iggt-zY diff --git a/doc/en/scripts/simple_KalmanFilter1_state.png b/doc/en/scripts/simple_KalmanFilter1_state.png index 769bd797db9cd226e0e6cbed88e9a94796a20a74..05b4920d172479c3f29ad2b26d3be79d3b3217ea 100644 GIT binary patch delta 43 zcmbR8jd991#t9yBCOQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36A)>Q!jYrYUr delta 43 zcmbR8jd991#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5TUP}DcDE7B diff --git a/doc/en/scripts/simple_KalmanFilter1_variance.png b/doc/en/scripts/simple_KalmanFilter1_variance.png index 8a453647851c98227e19ef526cb2c3e74a2997e9..de88046cf4127e76d394a964974bf8b8cfd7e0ec 100644 GIT binary patch delta 42 xcmaEMgz?!C#t9yBCOQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B2g_k^p6P54r#V delta 42 ycmaEMgz?!C#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFcNQO9B9H^bs`x diff --git a/doc/en/scripts/simple_KalmanFilter2_state.png b/doc/en/scripts/simple_KalmanFilter2_state.png index 769bd797db9cd226e0e6cbed88e9a94796a20a74..05b4920d172479c3f29ad2b26d3be79d3b3217ea 100644 GIT binary patch delta 43 zcmbR8jd991#t9yBCOQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36A)>Q!jYrYUr delta 43 zcmbR8jd991#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5TUP}DcDE7B diff --git a/doc/en/scripts/simple_KalmanFilter2_variance.png b/doc/en/scripts/simple_KalmanFilter2_variance.png index 8a453647851c98227e19ef526cb2c3e74a2997e9..de88046cf4127e76d394a964974bf8b8cfd7e0ec 100644 GIT binary patch delta 42 xcmaEMgz?!C#t9yBCOQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B2g_k^p6P54r#V delta 42 ycmaEMgz?!C#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFcNQO9B9H^bs`x diff --git a/doc/en/scripts/simple_NonLinearLeastSquares.png b/doc/en/scripts/simple_NonLinearLeastSquares.png index a63dd6c855377dde477d626f2a6ba2340e45e9f0..d68b0bd401b7ecc1bd468cb8d00c6324cf24dc40 100644 GIT binary patch delta 43 zcmeyhlj+w^rU@Q$COQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36Arpy5VdB6~? delta 43 zcmeyhlj+w^rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5n=%Iggt-zY diff --git a/doc/en/scripts/simple_ParticleSwarmOptimization1.png b/doc/en/scripts/simple_ParticleSwarmOptimization1.png index f037f98182aab20632e53f0ac5dfd3f1896c693e..6f7f37e03a714e7993dfea67348a32fc7e19134b 100644 GIT binary patch delta 43 zcmcb+o9XUurU@Q$COQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36A%I5+AaKaE; delta 43 zcmcb+o9XUurU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5E1wGhd%F?U diff --git a/doc/en/snippets/FeaturePropDerivativeFree.rst b/doc/en/snippets/FeaturePropDerivativeFree.rst new file mode 100644 index 0000000..58658fe --- /dev/null +++ b/doc/en/snippets/FeaturePropDerivativeFree.rst @@ -0,0 +1,6 @@ +.. index:: single: Derivation not required + +- The methods proposed by this algorithm **do not require derivation of the + objective function or of one of the operators**, thus avoiding this + additional cost when derivatives are calculated numerically by multiple + evaluations. diff --git a/doc/en/snippets/FeaturePropDerivativeNeeded.rst b/doc/en/snippets/FeaturePropDerivativeNeeded.rst new file mode 100644 index 0000000..f8bd752 --- /dev/null +++ b/doc/en/snippets/FeaturePropDerivativeNeeded.rst @@ -0,0 +1,7 @@ +.. index:: single: Derivation required + +- The methods proposed by this algorithm **require the derivation of the + objective function or of one of the operators**. It requires that at least + one or both of the observation or evolution operators be differentiable, and + this implies an additional cost in the case where the derivatives are + calculated numerically by multiple evaluations. diff --git a/doc/en/snippets/FeaturePropGlobalOptimization.rst b/doc/en/snippets/FeaturePropGlobalOptimization.rst new file mode 100644 index 0000000..bde4741 --- /dev/null +++ b/doc/en/snippets/FeaturePropGlobalOptimization.rst @@ -0,0 +1,7 @@ +.. index:: single: Global optimization + +- The optimization methods proposed by this algorithm perform a **global search + for the minimum**, theoretically achieving a globally optimal state over the + search domain. However, this global optimality is achieved "*at + convergence*", which means in long or infinite time during an iterative + optimization "*with real values*" (as opposed to "*with integer values*"). diff --git a/doc/en/snippets/FeaturePropLocalOptimization.rst b/doc/en/snippets/FeaturePropLocalOptimization.rst new file mode 100644 index 0000000..ea218ac --- /dev/null +++ b/doc/en/snippets/FeaturePropLocalOptimization.rst @@ -0,0 +1,5 @@ +.. index:: single: Local optimization + +- The optimization methods proposed by this algorithm perform a **local search + for the minimum**, theoretically enabling a locally optimal state (as opposed + to a "*globally optimal*" state) to be reached. diff --git a/doc/en/snippets/FeaturePropNonLocalOptimization.rst b/doc/en/snippets/FeaturePropNonLocalOptimization.rst new file mode 100644 index 0000000..45cd8c1 --- /dev/null +++ b/doc/en/snippets/FeaturePropNonLocalOptimization.rst @@ -0,0 +1,6 @@ +.. index:: single: Non local optimization + +- The optimization methods proposed by this algorithm perform a **non-local + search for the minimum**, without however ensuring a global search. This is + the case when optimization methods have the ability to avoid being trapped by + the first local minimum found. These capabilities are sometimes heuristic. diff --git a/doc/en/snippets/FeaturePropParallelAlgorithm.rst b/doc/en/snippets/FeaturePropParallelAlgorithm.rst new file mode 100644 index 0000000..a8908e7 --- /dev/null +++ b/doc/en/snippets/FeaturePropParallelAlgorithm.rst @@ -0,0 +1,7 @@ +.. index:: single: Algorithmic parallelism included + +- The methods proposed by this algorithm **have internal parallelism**, and can + therefore take advantage of computational distribution resources. The + potential interaction between the internal parallelism of the methods and the + parallelism that may be present in the user's observation or evolution + operators must therefore be carefully tuned. diff --git a/doc/en/snippets/FeaturePropParallelDerivativesOnly.rst b/doc/en/snippets/FeaturePropParallelDerivativesOnly.rst new file mode 100644 index 0000000..5e2c03d --- /dev/null +++ b/doc/en/snippets/FeaturePropParallelDerivativesOnly.rst @@ -0,0 +1,7 @@ +.. index:: single: Parallelism of derivation + +- The methods proposed by this algorithm **have no internal parallelism, but + use the numerical derivation of operator(s), which can be parallelized**. The + potential interaction between the parallelism of the numerical derivation and + the parallelism that may be present in the user's observation or evolution + operators must therefore be carefully tuned. diff --git a/doc/en/snippets/FeaturePropParallelFree.rst b/doc/en/snippets/FeaturePropParallelFree.rst new file mode 100644 index 0000000..192e9a4 --- /dev/null +++ b/doc/en/snippets/FeaturePropParallelFree.rst @@ -0,0 +1,7 @@ +.. index:: single: Absence of algorithmic parallelism + +- The methods proposed by this algorithm **have no internal parallelism**, and + therefore cannot take advantage of computer resources for distributing + calculations. The methods are sequential, and any use of parallelism + resources is therefore reserved for the user's observation or evolution + operators. diff --git a/doc/en/snippets/Header2Algo12.rst b/doc/en/snippets/Header2Algo12.rst new file mode 100644 index 0000000..709ef70 --- /dev/null +++ b/doc/en/snippets/Header2Algo12.rst @@ -0,0 +1,8 @@ +Some noteworthy properties of the implemented algorithm ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +To complete the description, we summarize here a few notable properties of the +algorithm or of its implementation. These properties may have an influence on +how it is used or on its computational performance. For further information, +please refer to the more comprehensive references given at the end of this +algorithm description. diff --git a/doc/en/snippets/ModuleCompatibility.rst b/doc/en/snippets/ModuleCompatibility.rst index 22ae77b..eb55df6 100644 --- a/doc/en/snippets/ModuleCompatibility.rst +++ b/doc/en/snippets/ModuleCompatibility.rst @@ -16,7 +16,7 @@ versions within the range described below. Python, 3.6.5, 3.12.2 Numpy, 1.14.3, 1.26.4 - Scipy, 0.19.1, 1.12.0 - MatplotLib, 2.2.2, 3.8.3 + Scipy, 0.19.1, 1.13.0 + MatplotLib, 2.2.2, 3.8.4 GnuplotPy, 1.8, 1.8 NLopt, 2.4.2, 2.7.1 diff --git a/doc/fr/images/sampling_01_SampleAsnUplet.png b/doc/fr/images/sampling_01_SampleAsnUplet.png index 00e993d6f2eab55a75b803b2e587fee866dda6f0..e903025b78fab364cf80735e59cce699ee553a83 100644 GIT binary patch delta 41 wcmbPGGofaJhn$IyLPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`D2PG*u`eo?x2PG*u`eo?yq@n;di8`D2PG*u`eo?x2PG*u`eo?yq@n;di8`DNl8JmmA-y%Vo5{t)p1 diff --git a/doc/fr/images/sampling_05_SampleAsMinMaxSobolSequence.png b/doc/fr/images/sampling_05_SampleAsMinMaxSobolSequence.png index 4bd68e7dc20777f7f669e56c4b75bf8e757e6ab5..ad1accf5a7d76055fb7bc7c84d2f769bc5841503 100644 GIT binary patch delta 43 ycmbQ!!8oUbae{}OiH<@>Nl8JmmA-y%Vo5Nl8JmmA-y%Vo52PG*u`eo?x2PG*u`eo?yq@n;di8`E^H0ZD}r2><{9 diff --git a/doc/fr/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png b/doc/fr/images/sampling_08_SampleAsIndependantRandomVariables_weibull.png index c548aab2f39a696d09a0cc7c9f5467b779d3c766..1208a8eb5123bab9569484387ea21966e6b6c053 100644 GIT binary patch delta 41 wcmdm4yt82PG*u`eo?x2PG*u`eo?yq@n;di8`FX<0aSMoaR2}S diff --git a/doc/fr/ref_algorithm_3DVAR.rst b/doc/fr/ref_algorithm_3DVAR.rst index 065e996..037740e 100644 --- a/doc/fr/ref_algorithm_3DVAR.rst +++ b/doc/fr/ref_algorithm_3DVAR.rst @@ -80,6 +80,15 @@ ces covariances d'erreurs *a posteriori*. On atteint cette extension en utilisant le variant "E3DVAR" de l'algorithme de filtrage :ref:`section_ref_algorithm_EnsembleKalmanFilter`. +.. ------------------------------------ .. +.. include:: snippets/Header2Algo12.rst + +.. include:: snippets/FeaturePropNonLocalOptimization.rst + +.. include:: snippets/FeaturePropDerivativeNeeded.rst + +.. include:: snippets/FeaturePropParallelDerivativesOnly.rst + .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst diff --git a/doc/fr/ref_algorithm_AdjointTest.rst b/doc/fr/ref_algorithm_AdjointTest.rst index cc7dbf8..d5c7735 100644 --- a/doc/fr/ref_algorithm_AdjointTest.rst +++ b/doc/fr/ref_algorithm_AdjointTest.rst @@ -32,8 +32,8 @@ Algorithme de vérification "*AdjointTest*" Cet algorithme permet de vérifier la qualité de l'adjoint d'un opérateur :math:`F`, en calculant un résidu dont les propriétés théoriques sont connues. -Le test est applicable à un opérateur quelconque, d'évolution comme -d'observation. +Le test est applicable à un opérateur quelconque, d'évolution +:math:`\mathcal{D}` comme d'observation :math:`\mathcal{H}`. Pour toutes les formules, avec :math:`\mathbf{x}` le point courant de vérification, on prend :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` et diff --git a/doc/fr/ref_algorithm_GradientTest.rst b/doc/fr/ref_algorithm_GradientTest.rst index bf8c1a3..86f6b63 100644 --- a/doc/fr/ref_algorithm_GradientTest.rst +++ b/doc/fr/ref_algorithm_GradientTest.rst @@ -30,10 +30,11 @@ Algorithme de vérification "*GradientTest*" .. ------------------------------------ .. .. include:: snippets/Header2Algo01.rst -Cet algorithme permet de vérifier la qualité du gradient de l'opérateur, en +Cet algorithme permet de vérifier la qualité du gradient d'un opérateur, en calculant un résidu dont les propriétés théoriques sont connues. Plusieurs formules de résidu sont disponibles. Le test est applicable à un opérateur -quelconque, d'évolution comme d'observation. +quelconque, d'évolution :math:`\mathcal{D}` comme d'observation +:math:`\mathcal{H}`.. Pour toutes les formules, avec :math:`\mathbf{x}` le point courant de vérification, on prend :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` et diff --git a/doc/fr/ref_algorithm_LinearityTest.rst b/doc/fr/ref_algorithm_LinearityTest.rst index f4bf507..617cd2f 100644 --- a/doc/fr/ref_algorithm_LinearityTest.rst +++ b/doc/fr/ref_algorithm_LinearityTest.rst @@ -34,7 +34,7 @@ Cet algorithme permet de vérifier la qualité de linéarité d'un opérateur, e calculant un résidu dont les propriétés théoriques sont connues. Plusieurs formules de résidu sont utilisables et sont décrites ci-dessous avec leur interprétation. Le test est applicable à un opérateur quelconque, d'évolution -comme d'observation. +:math:`\mathcal{D}` comme d'observation :math:`\mathcal{H}`. Pour toutes les formules, avec :math:`\mathbf{x}` le point courant de vérification, on prend :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` et diff --git a/doc/fr/ref_algorithm_LocalSensitivityTest.rst b/doc/fr/ref_algorithm_LocalSensitivityTest.rst index 63cc32f..ad6db9b 100644 --- a/doc/fr/ref_algorithm_LocalSensitivityTest.rst +++ b/doc/fr/ref_algorithm_LocalSensitivityTest.rst @@ -31,14 +31,14 @@ Algorithme de vérification "*LocalSensitivityTest*" .. include:: snippets/Header2Algo01.rst Cet algorithme permet d'établir la valeur de la Jacobienne de l'opérateur -:math:`H` par rapport aux variables d'entrée :math:`\mathbf{x}`. Cet opérateur -intervient dans la relation : +d'observation :math:`\mathcal{H}` par rapport aux variables d'entrée +:math:`\mathbf{x}`. Cet opérateur intervient dans la relation : -.. math:: \mathbf{y} = H(\mathbf{x}) +.. math:: \mathbf{y} = \mathcal{H}(\mathbf{x}) (voir :ref:`section_theory` pour de plus amples explications). Cette jacobienne est l'opérateur linéarisé (ou opérateur tangent) :math:`\mathbf{H}` de -:math:`H` autour du point de vérification choisi. +:math:`\mathcal{H}` autour du point de vérification choisi. .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst diff --git a/doc/fr/ref_algorithm_TangentTest.rst b/doc/fr/ref_algorithm_TangentTest.rst index 658e6d8..bc05399 100644 --- a/doc/fr/ref_algorithm_TangentTest.rst +++ b/doc/fr/ref_algorithm_TangentTest.rst @@ -32,7 +32,8 @@ Algorithme de vérification "*TangentTest*" Cet algorithme permet de vérifier la qualité de l'opérateur tangent, en calculant un résidu dont les propriétés théoriques sont connues. Le test est -applicable à un opérateur quelconque, d'évolution comme d'observation. +applicable à un opérateur quelconque, d'évolution :math:`\mathcal{D}` comme +d'observation :math:`\mathcal{H}`. Pour toutes les formules, avec :math:`\mathbf{x}` le point courant de vérification, on prend :math:`\mathbf{dx}_0=Normal(0,\mathbf{x})` et diff --git a/doc/fr/scripts/simple_3DVAR1.png b/doc/fr/scripts/simple_3DVAR1.png index 70d3d2d3f5041dc872bbac5458e7531c6c000bcd..50724ec43db073e6f658fe9e30c00cc827e9a565 100644 GIT binary patch delta 43 zcmdmSkZH$3rU@Q$COQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36Af))S(W?~Qe delta 43 zcmdmSkZH$3rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek53t9jGaa$2} diff --git a/doc/fr/scripts/simple_3DVAR1Plus.png b/doc/fr/scripts/simple_3DVAR1Plus.png index 561b07e6684dd05162b6b3b6be173b59b40a071a..d0952c5e2ac2c674a1f4c58a12999f4fc7be59f5 100644 GIT binary patch delta 43 zcmcb6jp^<+rU@Q$COQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36AzOMoRdRh@3 delta 43 zcmcb6jp^<+rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5`@RYQg;Nrk diff --git a/doc/fr/scripts/simple_3DVAR2_state.png b/doc/fr/scripts/simple_3DVAR2_state.png index d2205430e70f0b8fa3f3b7b74ee5cf2246fe25c4..f9f33ae589b3041d61062d1f608362dbd4cb4693 100644 GIT binary patch delta 43 ycmaF-lkxFS#t9yBCOQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36AYHI+6?hwTQ delta 43 zcmaF-lkxFS#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5tE~Y5kmnLW diff --git a/doc/fr/scripts/simple_3DVAR2_variance.png b/doc/fr/scripts/simple_3DVAR2_variance.png index b42c0947da16639b4d9ae275283e2494125f904d..8ac41b3c54e0a567e18d46ac445a3aeaff2a1a11 100644 GIT binary patch delta 43 zcmdlrg>lyu#t9yBCOQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36Au6P0fSt$?A delta 43 zcmdlrg>lyu#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5yW$A|WFiqr diff --git a/doc/fr/scripts/simple_3DVAR3_state.png b/doc/fr/scripts/simple_3DVAR3_state.png index 2b2d7ee932e80e6244a8c2b1d617457c9c354330..71077ccad1a7ca9fc44b5b329a7f4e84628aee02 100644 GIT binary patch delta 43 ycmbQ=&osZEX@ZBGiH<@>Nl8JmmA-y%Vo5H6}j%B36A*fRlp)(`>! delta 43 zcmaERiSg|v#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5W6uNthR+dw diff --git a/doc/fr/scripts/simple_DerivativeFreeOptimization.png b/doc/fr/scripts/simple_DerivativeFreeOptimization.png index 2afe397238ad4b4e818a39913cdf4c07b543e91a..74a51d88eba628b1e48fd2d423c70845879c23c5 100644 GIT binary patch delta 43 zcmX@To9Xm!rU@Q$COQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36AQs)8yYRC{I delta 43 zcmX@To9Xm!rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5OPvbH6}j%B36As!IWY&k&pd delta 43 zcmaF(n(^Ul#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5t1blqi^vie diff --git a/doc/fr/scripts/simple_KalmanFilter1_variance.png b/doc/fr/scripts/simple_KalmanFilter1_variance.png index 3d89cb1e0c6be3fb7119142f43ff63a13ce1762d..6c2d1aaa2d428b18a9e16f5e2fb78d1135e5ea32 100644 GIT binary patch delta 43 zcmZ2*jB&v+#t9yBCOQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36Ab|nD-U~&)q delta 43 zcmZ2*jB&v+#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5+m!?WYikjA diff --git a/doc/fr/scripts/simple_KalmanFilter2_state.png b/doc/fr/scripts/simple_KalmanFilter2_state.png index 07ee3f2960c10935a35b04d47399f875012a2af6..2d80a3dc64e5b55275952443d7b5294d1d9940b4 100644 GIT binary patch delta 43 ycmaF(n(^Ul#t9yBCOQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36As!IWY&k&pd delta 43 zcmaF(n(^Ul#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5t1blqi^vie diff --git a/doc/fr/scripts/simple_KalmanFilter2_variance.png b/doc/fr/scripts/simple_KalmanFilter2_variance.png index 3d89cb1e0c6be3fb7119142f43ff63a13ce1762d..6c2d1aaa2d428b18a9e16f5e2fb78d1135e5ea32 100644 GIT binary patch delta 43 zcmZ2*jB&v+#t9yBCOQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36Ab|nD-U~&)q delta 43 zcmZ2*jB&v+#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5+m!?WYikjA diff --git a/doc/fr/scripts/simple_NonLinearLeastSquares.png b/doc/fr/scripts/simple_NonLinearLeastSquares.png index 2afe397238ad4b4e818a39913cdf4c07b543e91a..74a51d88eba628b1e48fd2d423c70845879c23c5 100644 GIT binary patch delta 43 zcmX@To9Xm!rU@Q$COQfkB_##LR{Hw6i6sR&`6W4-NqYH3>H6}j%B36AQs)8yYRC{I delta 43 zcmX@To9Xm!rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5OPvbH6}j%B36AJmvuaVagBE delta 43 zcmZ3tmucNzrU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5^Oy$!Y{L;v diff --git a/doc/fr/snippets/FeaturePropDerivativeFree.rst b/doc/fr/snippets/FeaturePropDerivativeFree.rst new file mode 100644 index 0000000..e1eeaa4 --- /dev/null +++ b/doc/fr/snippets/FeaturePropDerivativeFree.rst @@ -0,0 +1,6 @@ +.. index:: single: Dérivation non requise + +- Les méthodes proposées par cet algorithme **ne requièrent pas de dérivation + de la fonction objectif ou de l'un des opérateurs**, permettant d'éviter ce + coût supplémentaire dans le cas où les dérivées sont calculées numériquement + par de multiples évaluations. diff --git a/doc/fr/snippets/FeaturePropDerivativeNeeded.rst b/doc/fr/snippets/FeaturePropDerivativeNeeded.rst new file mode 100644 index 0000000..0b1246a --- /dev/null +++ b/doc/fr/snippets/FeaturePropDerivativeNeeded.rst @@ -0,0 +1,7 @@ +.. index:: single: Dérivation requise + +- Les méthodes proposées par cet algorithme **requièrent la dérivation de la + fonction objectif ou de l'un des opérateurs**. Il nécessite que l'un au moins + des opérateurs d'observation ou d'évolution soit différentiable voire les + deux, et cela implique un coût supplémentaire dans le cas où les dérivées + sont calculées numériquement par de multiples évaluations. diff --git a/doc/fr/snippets/FeaturePropGlobalOptimization.rst b/doc/fr/snippets/FeaturePropGlobalOptimization.rst new file mode 100644 index 0000000..f095661 --- /dev/null +++ b/doc/fr/snippets/FeaturePropGlobalOptimization.rst @@ -0,0 +1,8 @@ +.. index:: single: Optimisation globale + +- Les méthodes d'optimisation proposées par cet algorithme effectuent une + **recherche globale du minimum**, permettant en théorie d'atteindre un état + globalement optimal sur le domaine de recherche. Cette optimalité globale est + néanmoins obtenue "*à convergence*", ce qui signifie en temps long ou infini + lors d'une optimisation itérative *à valeurs réelles* (par opposition *à + valeurs entières*). diff --git a/doc/fr/snippets/FeaturePropLocalOptimization.rst b/doc/fr/snippets/FeaturePropLocalOptimization.rst new file mode 100644 index 0000000..96fceeb --- /dev/null +++ b/doc/fr/snippets/FeaturePropLocalOptimization.rst @@ -0,0 +1,5 @@ +.. index:: single: Optimisation locale + +- Les méthodes d'optimisation proposées par cet algorithme effectuent une + **recherche locale du minimum**, permettant en théorie d'atteindre un état + localement optimal (par opposition à un état "*globalement optimal*"). diff --git a/doc/fr/snippets/FeaturePropNonLocalOptimization.rst b/doc/fr/snippets/FeaturePropNonLocalOptimization.rst new file mode 100644 index 0000000..d5f7d80 --- /dev/null +++ b/doc/fr/snippets/FeaturePropNonLocalOptimization.rst @@ -0,0 +1,7 @@ +.. index:: single: Optimisation non locale + +- Les méthodes d'optimisation proposées par cet algorithme effectuent une + **recherche non locale du minimum**, sans pour autant néanmoins assurer une + recherche globale. C'est le cas lorsque les méthodes d'optimisation + présentent des capacités d'éviter de rester bloquées par le premier minimum + local trouvé. Ces capacités sont parfois heuristiques. diff --git a/doc/fr/snippets/FeaturePropParallelAlgorithm.rst b/doc/fr/snippets/FeaturePropParallelAlgorithm.rst new file mode 100644 index 0000000..a78f27f --- /dev/null +++ b/doc/fr/snippets/FeaturePropParallelAlgorithm.rst @@ -0,0 +1,8 @@ +.. index:: single: Parallélisme algorithmique présent + +- Les méthodes proposées par cet algorithme **présentent un parallélisme + interne**, et peuvent donc profiter de ressources informatiques de + répartition de calculs. L'interaction potentielle, entre le parallélisme + interne des méthodes, et le parallélisme éventuellement présent dans les + opérateurs d'observation ou d'évolution de l'utilisateur, doit donc être + soigneusement réglée. diff --git a/doc/fr/snippets/FeaturePropParallelDerivativesOnly.rst b/doc/fr/snippets/FeaturePropParallelDerivativesOnly.rst new file mode 100644 index 0000000..939db42 --- /dev/null +++ b/doc/fr/snippets/FeaturePropParallelDerivativesOnly.rst @@ -0,0 +1,8 @@ +.. index:: single: Parallélisme de dérivation + +- Les méthodes proposées par cet algorithme **ne présentent pas de parallélisme + interne, mais utilisent la dérivation numérique d'opérateur(s) qui est, elle, + parallélisable**. L'interaction potentielle, entre le parallélisme de la + dérivation numérique, et le parallélisme éventuellement présent dans les + opérateurs d'observation ou d'évolution de l'utilisateur, doit donc être + soigneusement réglée. diff --git a/doc/fr/snippets/FeaturePropParallelFree.rst b/doc/fr/snippets/FeaturePropParallelFree.rst new file mode 100644 index 0000000..76234aa --- /dev/null +++ b/doc/fr/snippets/FeaturePropParallelFree.rst @@ -0,0 +1,7 @@ +.. index:: single: Parallélisme algorithmique absent + +- Les méthodes proposées par cet algorithme **ne présentent pas de parallélisme + interne**, et ne peuvent donc profiter de ressources informatiques de + répartition de calculs. Les méthodes sont séquentielles, et un usage éventuel + des ressources du parallélisme est donc réservé aux opérateurs d'observation + ou d'évolution de l'utilisateur. diff --git a/doc/fr/snippets/Header2Algo12.rst b/doc/fr/snippets/Header2Algo12.rst new file mode 100644 index 0000000..ad8d63e --- /dev/null +++ b/doc/fr/snippets/Header2Algo12.rst @@ -0,0 +1,8 @@ +Quelques propriétés notables de l'algorithme implémenté ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Pour compléter la description on synthétise ici quelques propriétés notables, +de l'algorithme ou de son implémentation. Ces propriétés peuvent avoir une +influence sur la manière de l'utiliser ou sur ses performances de calcul. Pour +de plus amples renseignements, on se reportera aux références plus complètes +indiquées à la fin du descriptif de cet algorithme. diff --git a/doc/fr/snippets/ModuleCompatibility.rst b/doc/fr/snippets/ModuleCompatibility.rst index e7281ce..f646b6d 100644 --- a/doc/fr/snippets/ModuleCompatibility.rst +++ b/doc/fr/snippets/ModuleCompatibility.rst @@ -17,7 +17,7 @@ l'étendue décrite ci-dessous. Python, 3.6.5, 3.12.2 Numpy, 1.14.3, 1.26.4 - Scipy, 0.19.1, 1.12.0 - MatplotLib, 2.2.2, 3.8.3 + Scipy, 0.19.1, 1.13.0 + MatplotLib, 2.2.2, 3.8.4 GnuplotPy, 1.8, 1.8 NLopt, 2.4.2, 2.7.1 diff --git a/src/daComposant/daAlgorithms/Atoms/ecw2ukf.py b/src/daComposant/daAlgorithms/Atoms/ecw2ukf.py index 6f6ac79..f935804 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecw2ukf.py +++ b/src/daComposant/daAlgorithms/Atoms/ecw2ukf.py @@ -109,7 +109,6 @@ def ecw2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="UKF"): else: Cm = None # - # Pndemi = numpy.real(scipy.linalg.cholesky(Pn)) Pndemi = numpy.real(scipy.linalg.sqrtm(Pn)) Xnmu = Xn + Pndemi @ SC nbSpts = SC.shape[1] diff --git a/src/daComposant/daAlgorithms/Atoms/ecwnlls.py b/src/daComposant/daAlgorithms/Atoms/ecwnlls.py index 3444073..b6ed31a 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecwnlls.py +++ b/src/daComposant/daAlgorithms/Atoms/ecwnlls.py @@ -146,6 +146,8 @@ def ecwnlls(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/ecwukf.py b/src/daComposant/daAlgorithms/Atoms/ecwukf.py index 107a5bb..465168d 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecwukf.py +++ b/src/daComposant/daAlgorithms/Atoms/ecwukf.py @@ -107,7 +107,6 @@ def ecwukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="UKF"): else: Cm = None # - # Pndemi = numpy.real(scipy.linalg.cholesky(Pn)) Pndemi = numpy.real(scipy.linalg.sqrtm(Pn)) Xnmu = Xn + Pndemi @ SC nbSpts = SC.shape[1] @@ -133,7 +132,6 @@ def ecwukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="UKF"): dXEnnmuXhmn = XEnnmu[:, point].flat - Xhmn Pmn += Wc[point] * numpy.outer(dXEnnmuXhmn, dXEnnmuXhmn) # - # Pmndemi = numpy.real(scipy.linalg.cholesky(Pmn)) Pmndemi = numpy.real(scipy.linalg.sqrtm(Pmn)) Xnnmu = Xhmn.reshape((-1, 1)) + Pmndemi @ SC # diff --git a/src/daComposant/daAlgorithms/Atoms/incr3dvar.py b/src/daComposant/daAlgorithms/Atoms/incr3dvar.py index d6570c4..6fb3632 100644 --- a/src/daComposant/daAlgorithms/Atoms/incr3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/incr3dvar.py @@ -147,6 +147,8 @@ def incr3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/lbfgsb113hlt.py b/src/daComposant/daAlgorithms/Atoms/lbfgsb113hlt.py new file mode 100644 index 0000000..4aea5a3 --- /dev/null +++ b/src/daComposant/daAlgorithms/Atoms/lbfgsb113hlt.py @@ -0,0 +1,553 @@ +# Modification de la version 1.13.0 +# flake8: noqa +""" +Functions +--------- +.. autosummary:: + :toctree: generated/ + + fmin_l_bfgs_b + +""" + +## License for the Python wrapper +## ============================== + +## Copyright (c) 2004 David M. Cooke + +## Permission is hereby granted, free of charge, to any person obtaining a +## copy of this software and associated documentation files (the "Software"), +## to deal in the Software without restriction, including without limitation +## the rights to use, copy, modify, merge, publish, distribute, sublicense, +## and/or sell copies of the Software, and to permit persons to whom the +## Software is furnished to do so, subject to the following conditions: + +## The above copyright notice and this permission notice shall be included in +## all copies or substantial portions of the Software. + +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +## DEALINGS IN THE SOFTWARE. + +## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy + +import numpy as np +from numpy import array, asarray, float64, zeros +from scipy.optimize import _lbfgsb +from scipy.optimize._optimize import (MemoizeJac, OptimizeResult, _call_callback_maybe_halt, + _wrap_callback, _check_unknown_options, + _prepare_scalar_function) +from scipy.optimize._constraints import old_bound_to_new + +from scipy.sparse.linalg import LinearOperator + +__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct'] + + +def fmin_l_bfgs_b(func, x0, fprime=None, args=(), + approx_grad=0, + bounds=None, m=10, factr=1e7, pgtol=1e-5, + epsilon=1e-8, + iprint=-1, maxfun=15000, maxiter=15000, disp=None, + callback=None, maxls=20): + """ + Minimize a function func using the L-BFGS-B algorithm. + + Parameters + ---------- + func : callable f(x,*args) + Function to minimize. + x0 : ndarray + Initial guess. + fprime : callable fprime(x,*args), optional + The gradient of `func`. If None, then `func` returns the function + value and the gradient (``f, g = func(x, *args)``), unless + `approx_grad` is True in which case `func` returns only ``f``. + args : sequence, optional + Arguments to pass to `func` and `fprime`. + approx_grad : bool, optional + Whether to approximate the gradient numerically (in which case + `func` returns only the function value). + bounds : list, optional + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None or +-inf for one of ``min`` or + ``max`` when there is no bound in that direction. + m : int, optional + The maximum number of variable metric corrections + used to define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms in an + approximation to it.) + factr : float, optional + The iteration stops when + ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, + where ``eps`` is the machine precision, which is automatically + generated by the code. Typical values for `factr` are: 1e12 for + low accuracy; 1e7 for moderate accuracy; 10.0 for extremely + high accuracy. See Notes for relationship to `ftol`, which is exposed + (instead of `factr`) by the `scipy.optimize.minimize` interface to + L-BFGS-B. + pgtol : float, optional + The iteration will stop when + ``max{|proj g_i | i = 1, ..., n} <= pgtol`` + where ``proj g_i`` is the i-th component of the projected gradient. + epsilon : float, optional + Step size used when `approx_grad` is True, for numerically + calculating the gradient + iprint : int, optional + Controls the frequency of output. ``iprint < 0`` means no output; + ``iprint = 0`` print only one line at the last iteration; + ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; + ``iprint = 99`` print details of every iteration except n-vectors; + ``iprint = 100`` print also the changes of active set and final x; + ``iprint > 100`` print details of every iteration including x and g. + disp : int, optional + If zero, then no output. If a positive number, then this over-rides + `iprint` (i.e., `iprint` gets the value of `disp`). + maxfun : int, optional + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int, optional + Maximum number of iterations. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + + Returns + ------- + x : array_like + Estimated position of the minimum. + f : float + Value of `func` at the minimum. + d : dict + Information dictionary. + + * d['warnflag'] is + + - 0 if converged, + - 1 if too many function evaluations or too many iterations, + - 2 if stopped for another reason, given in d['task'] + + * d['grad'] is the gradient at the minimum (should be 0 ish) + * d['funcalls'] is the number of function calls made. + * d['nit'] is the number of iterations. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'L-BFGS-B' `method` in particular. Note that the + `ftol` option is made available via that interface, while `factr` is + provided via this interface, where `factr` is the factor multiplying + the default machine floating-point precision to arrive at `ftol`: + ``ftol = factr * numpy.finfo(float).eps``. + + Notes + ----- + License of L-BFGS-B (FORTRAN code): + + The version included here (in fortran code) is 3.0 + (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd, + and Jorge Nocedal . It carries the following + condition for use: + + This software is freely available, but we expect that all publications + describing work using this software, or all commercial products using it, + quote at least one of the references given below. This software is released + under the BSD License. + + References + ---------- + * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound + Constrained Optimization, (1995), SIAM Journal on Scientific and + Statistical Computing, 16, 5, pp. 1190-1208. + * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (1997), + ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. + * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (2011), + ACM Transactions on Mathematical Software, 38, 1. + + Examples + -------- + Solve a linear regression problem via `fmin_l_bfgs_b`. To do this, first we define + an objective function ``f(m, b) = (y - y_model)**2``, where `y` describes the + observations and `y_model` the prediction of the linear model as + ``y_model = m*x + b``. The bounds for the parameters, ``m`` and ``b``, are arbitrarily + chosen as ``(0,5)`` and ``(5,10)`` for this example. + + >>> import numpy as np + >>> from scipy.optimize import fmin_l_bfgs_b + >>> X = np.arange(0, 10, 1) + >>> M = 2 + >>> B = 3 + >>> Y = M * X + B + >>> def func(parameters, *args): + ... x = args[0] + ... y = args[1] + ... m, b = parameters + ... y_model = m*x + b + ... error = sum(np.power((y - y_model), 2)) + ... return error + + >>> initial_values = np.array([0.0, 1.0]) + + >>> x_opt, f_opt, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y), + ... approx_grad=True) + >>> x_opt, f_opt + array([1.99999999, 3.00000006]), 1.7746231151323805e-14 # may vary + + The optimized parameters in ``x_opt`` agree with the ground truth parameters + ``m`` and ``b``. Next, let us perform a bound contrained optimization using the `bounds` + parameter. + + >>> bounds = [(0, 5), (5, 10)] + >>> x_opt, f_op, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y), + ... approx_grad=True, bounds=bounds) + >>> x_opt, f_opt + array([1.65990508, 5.31649385]), 15.721334516453945 # may vary + """ + # handle fprime/approx_grad + if approx_grad: + fun = func + jac = None + elif fprime is None: + fun = MemoizeJac(func) + jac = fun.derivative + else: + fun = func + jac = fprime + + # build options + if disp is None: + disp = iprint + callback = _wrap_callback(callback) + opts = {'disp': disp, + 'iprint': iprint, + 'maxcor': m, + 'ftol': factr * np.finfo(float).eps, + 'gtol': pgtol, + 'eps': epsilon, + 'maxfun': maxfun, + 'maxiter': maxiter, + 'callback': callback, + 'maxls': maxls} + + res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds, + **opts) + d = {'grad': res['jac'], + 'task': res['message'], + 'funcalls': res['nfev'], + 'nit': res['nit'], + 'warnflag': res['status']} + f = res['fun'] + x = res['x'] + + return x, f, d + + +def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, + disp=None, maxcor=10, ftol=2.2204460492503131e-09, + gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000, + iprint=-1, callback=None, maxls=20, + finite_diff_rel_step=None, **unknown_options): + """ + Minimize a scalar function of one or more variables using the L-BFGS-B + algorithm. + + Options + ------- + disp : None or int + If `disp is None` (the default), then the supplied version of `iprint` + is used. If `disp is not None`, then it overrides the supplied version + of `iprint` with the behaviour you outlined. + maxcor : int + The maximum number of variable metric corrections used to + define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms + in an approximation to it.) + ftol : float + The iteration stops when ``(f^k - + f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``. + gtol : float + The iteration will stop when ``max{|proj g_i | i = 1, ..., n} + <= gtol`` where ``proj g_i`` is the i-th component of the + projected gradient. + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + maxfun : int + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int + Maximum number of iterations. + iprint : int, optional + Controls the frequency of output. ``iprint < 0`` means no output; + ``iprint = 0`` print only one line at the last iteration; + ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; + ``iprint = 99`` print details of every iteration except n-vectors; + ``iprint = 100`` print also the changes of active set and final x; + ``iprint > 100`` print details of every iteration including x and g. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + + Notes + ----- + The option `ftol` is exposed via the `scipy.optimize.minimize` interface, + but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The + relationship between the two is ``ftol = factr * numpy.finfo(float).eps``. + I.e., `factr` multiplies the default machine floating-point precision to + arrive at `ftol`. + + """ + _check_unknown_options(unknown_options) + m = maxcor + pgtol = gtol + factr = ftol / np.finfo(float).eps + + x0 = asarray(x0).ravel() + n, = x0.shape + + # historically old-style bounds were/are expected by lbfgsb. + # That's still the case but we'll deal with new-style from here on, + # it's easier + if bounds is None: + pass + elif len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + else: + bounds = np.array(old_bound_to_new(bounds)) + + # check bounds + if (bounds[0] > bounds[1]).any(): + raise ValueError( + "LBFGSB - one of the lower bounds is greater than an upper bound." + ) + + # initial vector must lie within the bounds. Otherwise ScalarFunction and + # approx_derivative will cause problems + x0 = np.clip(x0, bounds[0], bounds[1]) + + if disp is not None: + if disp == 0: + iprint = -1 + else: + iprint = disp + + # _prepare_scalar_function can use bounds=None to represent no bounds + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + bounds=bounds, + finite_diff_rel_step=finite_diff_rel_step) + + func_and_grad = sf.fun_and_grad + + fortran_int = _lbfgsb.types.intvar.dtype + + nbd = zeros(n, fortran_int) + low_bnd = zeros(n, float64) + upper_bnd = zeros(n, float64) + bounds_map = {(-np.inf, np.inf): 0, + (1, np.inf): 1, + (1, 1): 2, + (-np.inf, 1): 3} + + if bounds is not None: + for i in range(0, n): + l, u = bounds[0, i], bounds[1, i] + if not np.isinf(l): + low_bnd[i] = l + l = 1 + if not np.isinf(u): + upper_bnd[i] = u + u = 1 + nbd[i] = bounds_map[l, u] + + if not maxls > 0: + raise ValueError('maxls must be positive.') + + x = array(x0, float64) + f = array(0.0, float64) + g = zeros((n,), float64) + wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64) + iwa = zeros(3*n, fortran_int) + task = zeros(1, 'S60') + csave = zeros(1, 'S60') + lsave = zeros(4, fortran_int) + isave = zeros(44, fortran_int) + dsave = zeros(29, float64) + + task[:] = 'START' + + n_iterations = 0 + + while 1: + # g may become float32 if a user provides a function that calculates + # the Jacobian in float32 (see gh-18730). The underlying Fortran code + # expects float64, so upcast it + g = g.astype(np.float64) + # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ + _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, + pgtol, wa, iwa, task, iprint, csave, lsave, + isave, dsave, maxls) + task_str = task.tobytes() + if task_str.startswith(b'FG'): + # The minimization routine wants f and g at the current x. + # Note that interruptions due to maxfun are postponed + # until the completion of the current minimization iteration. + # Overwrite f and g: + f, g = func_and_grad(x) + if sf.nfev > maxfun: + task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS ' + 'EXCEEDS LIMIT') + elif task_str.startswith(b'NEW_X'): + # new iteration + n_iterations += 1 + + intermediate_result = OptimizeResult(x=x, fun=f) + if _call_callback_maybe_halt(callback, intermediate_result): + task[:] = 'STOP: CALLBACK REQUESTED HALT' + if n_iterations >= maxiter: + task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT' + elif sf.nfev > maxfun: + task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS ' + 'EXCEEDS LIMIT') + else: + break + + task_str = task.tobytes().strip(b'\x00').strip() + if task_str.startswith(b'CONV'): + warnflag = 0 + elif sf.nfev > maxfun or n_iterations >= maxiter: + warnflag = 1 + else: + warnflag = 2 + + # These two portions of the workspace are described in the mainlb + # subroutine in lbfgsb.f. See line 363. + s = wa[0: m*n].reshape(m, n) + y = wa[m*n: 2*m*n].reshape(m, n) + + # See lbfgsb.f line 160 for this portion of the workspace. + # isave(31) = the total number of BFGS updates prior the current iteration; + n_bfgs_updates = isave[30] + + n_corrs = min(n_bfgs_updates, maxcor) + hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs]) + + task_str = task_str.decode() + return OptimizeResult(fun=f, jac=g, nfev=sf.nfev, + njev=sf.ngev, + nit=n_iterations, status=warnflag, message=task_str, + x=x, success=(warnflag == 0), hess_inv=hess_inv) + + +class LbfgsInvHessProduct(LinearOperator): + """Linear operator for the L-BFGS approximate inverse Hessian. + + This operator computes the product of a vector with the approximate inverse + of the Hessian of the objective function, using the L-BFGS limited + memory approximation to the inverse Hessian, accumulated during the + optimization. + + Objects of this class implement the ``scipy.sparse.linalg.LinearOperator`` + interface. + + Parameters + ---------- + sk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the solution vector. + (See [1]). + yk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the gradient. (See [1]). + + References + ---------- + .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited + storage." Mathematics of computation 35.151 (1980): 773-782. + + """ + + def __init__(self, sk, yk): + """Construct the operator.""" + if sk.shape != yk.shape or sk.ndim != 2: + raise ValueError('sk and yk must have matching shape, (n_corrs, n)') + n_corrs, n = sk.shape + + super().__init__(dtype=np.float64, shape=(n, n)) + + self.sk = sk + self.yk = yk + self.n_corrs = n_corrs + self.rho = 1 / np.einsum('ij,ij->i', sk, yk) + + def _matvec(self, x): + """Efficient matrix-vector multiply with the BFGS matrices. + + This calculation is described in Section (4) of [1]. + + Parameters + ---------- + x : ndarray + An array with shape (n,) or (n,1). + + Returns + ------- + y : ndarray + The matrix-vector product + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + q = np.array(x, dtype=self.dtype, copy=True) + if q.ndim == 2 and q.shape[1] == 1: + q = q.reshape(-1) + + alpha = np.empty(n_corrs) + + for i in range(n_corrs-1, -1, -1): + alpha[i] = rho[i] * np.dot(s[i], q) + q = q - alpha[i]*y[i] + + r = q + for i in range(n_corrs): + beta = rho[i] * np.dot(y[i], r) + r = r + s[i] * (alpha[i] - beta) + + return r + + def todense(self): + """Return a dense array representation of this operator. + + Returns + ------- + arr : ndarray, shape=(n, n) + An array with the same shape and containing + the same data represented by this `LinearOperator`. + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + I = np.eye(*self.shape, dtype=self.dtype) + Hk = I + + for i in range(n_corrs): + A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i] + A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i] + + Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] * + s[i][np.newaxis, :]) + return Hk diff --git a/src/daComposant/daAlgorithms/Atoms/psas3dvar.py b/src/daComposant/daAlgorithms/Atoms/psas3dvar.py index bf5e279..2e31e58 100644 --- a/src/daComposant/daAlgorithms/Atoms/psas3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/psas3dvar.py @@ -128,6 +128,8 @@ def psas3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/std3dvar.py b/src/daComposant/daAlgorithms/Atoms/std3dvar.py index 8fa53f3..303845d 100644 --- a/src/daComposant/daAlgorithms/Atoms/std3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/std3dvar.py @@ -131,6 +131,8 @@ def std3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/std4dvar.py b/src/daComposant/daAlgorithms/Atoms/std4dvar.py index e4d1af9..6af5464 100644 --- a/src/daComposant/daAlgorithms/Atoms/std4dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/std4dvar.py @@ -192,6 +192,8 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/van3dvar.py b/src/daComposant/daAlgorithms/Atoms/van3dvar.py index 8c44da3..34eb24d 100644 --- a/src/daComposant/daAlgorithms/Atoms/van3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/van3dvar.py @@ -139,6 +139,8 @@ def van3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur + elif vt("1.13.0") <= vt(scipy.version.version) <= vt("1.13.99"): + import daAlgorithms.Atoms.lbfgsb113hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py b/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py index 8018de5..65891bb 100644 --- a/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py +++ b/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py @@ -73,7 +73,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): name = "EstimationOf", default = "State", typecast = str, - message = "Estimation d'etat ou de parametres", + message = "Estimation d'état ou de paramètres", listval = ["State", "Parameters"], ) self.defineRequiredParameter( diff --git a/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py b/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py index fe8c37d..63a7ee7 100644 --- a/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py +++ b/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py @@ -52,7 +52,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): name = "EstimationOf", default = "State", typecast = str, - message = "Estimation d'etat ou de parametres", + message = "Estimation d'état ou de paramètres", listval = ["State", "Parameters"], ) self.defineRequiredParameter( diff --git a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py index d0d6a15..1147c35 100644 --- a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py +++ b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py @@ -51,7 +51,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): name = "EstimationOf", default = "State", typecast = str, - message = "Estimation d'etat ou de parametres", + message = "Estimation d'état ou de paramètres", listval = ["State", "Parameters"], ) self.defineRequiredParameter( diff --git a/src/daSalome/__init__.py b/src/daSalome/__init__.py index 29c53cd..c2bcae2 100644 --- a/src/daSalome/__init__.py +++ b/src/daSalome/__init__.py @@ -129,8 +129,11 @@ import os, sys, logging adao_py_dir = os.path.abspath(os.path.dirname(__file__)) # Variable témoin sys.path.insert(0, adao_py_dir) -from daCore import Persistence # PathManagement activé dans Persistence from daCore.version import name, version, year, date, __version__ +try: + from daCore import Persistence # PathManagement activé dans Persistence +except: + logging.debug("INIT Pas de chargement initial de Persistence") try: from daYacsIntegration.daOptimizerLoop import * except: -- 2.39.2