From c04fc493f2d041cff3f2abd968545f6f8ab450da Mon Sep 17 00:00:00 2001 From: Jean-Philippe ARGAUD Date: Fri, 8 Mar 2024 09:23:20 +0100 Subject: [PATCH] Documentation corrections and code performance update --- README.txt | 4 +- doc/en/images/sampling_01_SampleAsnUplet.png | Bin 14632 -> 14632 bytes .../sampling_02_SampleAsExplicitHyperCube.png | Bin 14632 -> 14632 bytes ...ampling_03_SampleAsMinMaxStepHyperCube.png | Bin 14632 -> 14632 bytes ...mpling_04_SampleAsMinMaxLatinHyperCube.png | Bin 15799 -> 15799 bytes ...ampling_05_SampleAsMinMaxSobolSequence.png | Bin 16216 -> 16216 bytes ...pleAsIndependantRandomVariables_normal.png | Bin 16708 -> 16708 bytes ...leAsIndependantRandomVariables_uniform.png | Bin 14004 -> 14004 bytes ...leAsIndependantRandomVariables_weibull.png | Bin 13608 -> 13608 bytes doc/en/ref_algorithm_3DVAR.rst | 5 +- ...f_algorithm_DerivativeFreeOptimization.rst | 9 +- .../ref_algorithm_DifferentialEvolution.rst | 10 +- ...thm_MeasurementsOptimalPositioningTask.rst | 6 +- ...ef_algorithm_ParticleSwarmOptimization.rst | 2 +- doc/en/ref_algorithm_TabuSearch.rst | 11 +- doc/en/scripts/simple_3DVAR1.png | Bin 40888 -> 40888 bytes doc/en/scripts/simple_3DVAR1Plus.png | Bin 42650 -> 42650 bytes doc/en/scripts/simple_3DVAR2_state.png | Bin 33178 -> 33178 bytes doc/en/scripts/simple_3DVAR2_variance.png | Bin 18819 -> 18819 bytes doc/en/scripts/simple_3DVAR3_state.png | Bin 36035 -> 36035 bytes doc/en/scripts/simple_3DVAR3_variance.png | Bin 26719 -> 26719 bytes .../simple_DerivativeFreeOptimization.png | Bin 40186 -> 40186 bytes doc/en/scripts/simple_KalmanFilter1_state.png | Bin 31508 -> 31508 bytes .../scripts/simple_KalmanFilter1_variance.png | Bin 25190 -> 25190 bytes doc/en/scripts/simple_KalmanFilter2_state.png | Bin 31508 -> 31508 bytes .../scripts/simple_KalmanFilter2_variance.png | Bin 25190 -> 25190 bytes .../scripts/simple_NonLinearLeastSquares.png | Bin 40186 -> 40186 bytes .../simple_ParticleSwarmOptimization1.png | Bin 40413 -> 40413 bytes doc/en/snippets/EntryTypeDataFile.rst | 14 +- doc/en/snippets/ModuleCompatibility.rst | 6 +- doc/en/snippets/ReduceMemoryUse.rst | 11 + doc/fr/images/sampling_01_SampleAsnUplet.png | Bin 15888 -> 15888 bytes .../sampling_02_SampleAsExplicitHyperCube.png | Bin 15888 -> 15888 bytes ...ampling_03_SampleAsMinMaxStepHyperCube.png | Bin 15888 -> 15888 bytes ...mpling_04_SampleAsMinMaxLatinHyperCube.png | Bin 17108 -> 17108 bytes ...ampling_05_SampleAsMinMaxSobolSequence.png | Bin 17436 -> 17436 bytes ...pleAsIndependantRandomVariables_normal.png | Bin 17891 -> 17891 bytes ...leAsIndependantRandomVariables_uniform.png | Bin 15251 -> 15251 bytes ...leAsIndependantRandomVariables_weibull.png | Bin 14777 -> 14777 bytes doc/fr/ref_algorithm_3DVAR.rst | 17 +- ...f_algorithm_DerivativeFreeOptimization.rst | 7 +- .../ref_algorithm_DifferentialEvolution.rst | 7 +- ...thm_MeasurementsOptimalPositioningTask.rst | 5 +- ...ef_algorithm_ParticleSwarmOptimization.rst | 18 +- doc/fr/ref_algorithm_TabuSearch.rst | 10 +- doc/fr/scripts/simple_3DVAR1.png | Bin 41144 -> 41144 bytes doc/fr/scripts/simple_3DVAR1Plus.png | Bin 43869 -> 43869 bytes doc/fr/scripts/simple_3DVAR2_state.png | Bin 31971 -> 31971 bytes doc/fr/scripts/simple_3DVAR2_variance.png | Bin 19002 -> 19002 bytes doc/fr/scripts/simple_3DVAR3_state.png | Bin 34719 -> 34719 bytes doc/fr/scripts/simple_3DVAR3_variance.png | Bin 26989 -> 26989 bytes .../simple_DerivativeFreeOptimization.png | Bin 40395 -> 40395 bytes doc/fr/scripts/simple_KalmanFilter1_state.png | Bin 30177 -> 30177 bytes .../scripts/simple_KalmanFilter1_variance.png | Bin 25376 -> 25376 bytes doc/fr/scripts/simple_KalmanFilter2_state.png | Bin 30177 -> 30177 bytes .../scripts/simple_KalmanFilter2_variance.png | Bin 25376 -> 25376 bytes .../scripts/simple_NonLinearLeastSquares.png | Bin 40395 -> 40395 bytes .../simple_ParticleSwarmOptimization1.png | Bin 40622 -> 40622 bytes doc/fr/snippets/EntryTypeDataFile.rst | 7 +- doc/fr/snippets/ModuleCompatibility.rst | 6 +- doc/fr/snippets/ReduceMemoryUse.rst | 12 + doc/fr/snippets/SetDebug.rst | 2 +- src/daComposant/daAlgorithms/Atoms/c2ukf.py | 79 +-- src/daComposant/daAlgorithms/Atoms/ecwdeim.py | 30 +- src/daComposant/daAlgorithms/Atoms/ecweim.py | 69 +-- src/daComposant/daAlgorithms/Atoms/ecwnlls.py | 2 + .../daAlgorithms/Atoms/ecwubfeim.py | 194 +++++++ src/daComposant/daAlgorithms/Atoms/ecwukf.py | 264 +++++++++ .../daAlgorithms/Atoms/incr3dvar.py | 2 + .../daAlgorithms/Atoms/lbfgsb112hlt.py | 514 ++++++++++++++++++ .../daAlgorithms/Atoms/psas3dvar.py | 2 + .../daAlgorithms/Atoms/std3dvar.py | 2 + .../daAlgorithms/Atoms/std4dvar.py | 2 + src/daComposant/daAlgorithms/Atoms/uskf.py | 70 +-- .../daAlgorithms/Atoms/van3dvar.py | 2 + .../MeasurementsOptimalPositioningTask.py | 32 +- .../daAlgorithms/ReducedModelingTest.py | 10 +- .../daAlgorithms/UnscentedKalmanFilter.py | 15 +- src/daComposant/daCore/BasicObjects.py | 22 +- src/daComposant/daCore/Interfaces.py | 6 +- src/daComposant/daCore/NumericObjects.py | 114 +++- src/daComposant/daCore/Persistence.py | 65 ++- 82 files changed, 1421 insertions(+), 244 deletions(-) create mode 100644 doc/en/snippets/ReduceMemoryUse.rst create mode 100644 doc/fr/snippets/ReduceMemoryUse.rst create mode 100644 src/daComposant/daAlgorithms/Atoms/ecwubfeim.py create mode 100644 src/daComposant/daAlgorithms/Atoms/ecwukf.py create mode 100644 src/daComposant/daAlgorithms/Atoms/lbfgsb112hlt.py diff --git a/README.txt b/README.txt index 5aecbf8..4518d49 100644 --- a/README.txt +++ b/README.txt @@ -2,8 +2,8 @@ ADAO: A module for Data Assimilation and Optimization ===================================================== -About ------ +About ADAO: A module for Data Assimilation and Optimization +----------------------------------------------------------- **The ADAO module provides data assimilation and optimization** features in Python or SALOME context (see http://www.salome-platform.org/). Briefly stated, diff --git a/doc/en/images/sampling_01_SampleAsnUplet.png b/doc/en/images/sampling_01_SampleAsnUplet.png index c23ccfd68c719b1f0f11067bbb2a1e1bd0aefe4a..00f30b7ba87afab729a1858bf8478efc058063dd 100644 GIT binary patch delta 41 xcmZ2cw4!K&hn%sFLPkkRL9vy-er{q(K~8>2PG*u`eo?yq@n;di8`BP1002+65W4^X delta 41 xcmZ2cw4!K&hn$g)LPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`BP1002+65W4^X delta 41 xcmZ2cw4!K&hn$g)LPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`BP1002+65W4^X delta 41 xcmZ2cw4!K&hn$g)LPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`A=90aiy4e*gdg delta 41 xcmdm9y}f#Zhn$g)LPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`D170RUpe5orJb delta 41 xcmcanccX5Chn$g)LPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`J#E08s`IQvd(} delta 41 xcmdmzyCrvmhn$g)LPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`BP%002!w5T^hD delta 41 xcmZ3HwIXYRhn$g)LPkkRL9vy-er{q(K~8>2PG*u`eo?xv5i0-y diff --git a/doc/en/ref_algorithm_3DVAR.rst b/doc/en/ref_algorithm_3DVAR.rst index b14ef78..6d6f302 100644 --- a/doc/en/ref_algorithm_3DVAR.rst +++ b/doc/en/ref_algorithm_3DVAR.rst @@ -60,8 +60,9 @@ of the initial point of their minimization, even if it is not recommended. This mono-objective optimization algorithm is naturally written for a single estimate, without any dynamic or iterative notion (there is no need in this case for an incremental evolution operator, nor for an evolution error -covariance). In ADAO, it can also be used on a succession of observations, -placing the estimate in a recursive framework similar to a +covariance). In the traditional framework of temporal or iterative data +assimilation that ADAO deals with, it can also be used on a succession of +observations, placing the estimate in a recursive framework similar to a :ref:`section_ref_algorithm_KalmanFilter`. A standard estimate is made at each observation step on the state predicted by the incremental evolution model, knowing that the state error covariance remains the background covariance diff --git a/doc/en/ref_algorithm_DerivativeFreeOptimization.rst b/doc/en/ref_algorithm_DerivativeFreeOptimization.rst index e5e5308..e8af797 100644 --- a/doc/en/ref_algorithm_DerivativeFreeOptimization.rst +++ b/doc/en/ref_algorithm_DerivativeFreeOptimization.rst @@ -31,10 +31,11 @@ Calculation algorithm "*DerivativeFreeOptimization*" .. include:: snippets/Header2Algo01.rst This algorithm realizes an estimation of the state of a system by minimization -of a cost function :math:`J` without gradient. It is a method that does not use -the derivatives of the cost function. It falls in the same category than the -:ref:`section_ref_algorithm_ParticleSwarmOptimization`, the -:ref:`section_ref_algorithm_DifferentialEvolution` or the +without gradient of a cost function :math:`J`, using a search method by simplex +type or similar approximation. It is a method that does not use the derivatives +of the cost function. It falls in the same category than the +:ref:`section_ref_algorithm_DifferentialEvolution`, +:ref:`section_ref_algorithm_ParticleSwarmOptimization` or :ref:`section_ref_algorithm_TabuSearch`. This is a mono-objective optimization method allowing for global minimum search diff --git a/doc/en/ref_algorithm_DifferentialEvolution.rst b/doc/en/ref_algorithm_DifferentialEvolution.rst index d23c4d2..6bb41b9 100644 --- a/doc/en/ref_algorithm_DifferentialEvolution.rst +++ b/doc/en/ref_algorithm_DifferentialEvolution.rst @@ -31,11 +31,11 @@ Calculation algorithm "*DifferentialEvolution*" .. include:: snippets/Header2Algo01.rst This algorithm realizes an estimation of the state of a system by minimization -of a cost function :math:`J` by using an evolutionary strategy of differential -evolution. It is a method that does not use the derivatives of the cost -function. It falls in the same category than the -:ref:`section_ref_algorithm_DerivativeFreeOptimization`, the -:ref:`section_ref_algorithm_ParticleSwarmOptimization` or the +without gradient of a cost function :math:`J`, using an evolutionary strategy +of differential evolution. It is a method that does not use the derivatives of +the cost function. It falls in the same category than the +:ref:`section_ref_algorithm_DerivativeFreeOptimization`, +:ref:`section_ref_algorithm_ParticleSwarmOptimization` or :ref:`section_ref_algorithm_TabuSearch`. This is an optimization method allowing for global minimum search of a general diff --git a/doc/en/ref_algorithm_MeasurementsOptimalPositioningTask.rst b/doc/en/ref_algorithm_MeasurementsOptimalPositioningTask.rst index c7f3be3..40d9b1c 100644 --- a/doc/en/ref_algorithm_MeasurementsOptimalPositioningTask.rst +++ b/doc/en/ref_algorithm_MeasurementsOptimalPositioningTask.rst @@ -83,7 +83,9 @@ computations are optimized according to the computer resources available and the options requested by the user. You can refer to the :ref:`section_ref_sampling_requirements` for an illustration of sampling. Beware of the size of the hypercube (and then to the number of computations) -that can be reached, it can grow quickly to be quite large. +that can be reached, it can grow quickly to be quite large. The memory required +is then the product of the size of an individual :math:`\mathbf{y}` state and +the size of the hypercube. .. _mop_determination: .. image:: images/mop_determination.png @@ -116,6 +118,8 @@ constrained positioning search. .. include:: snippets/NameOfLocations.rst +.. include:: snippets/ReduceMemoryUse.rst + .. include:: snippets/SampleAsExplicitHyperCube.rst .. include:: snippets/SampleAsIndependantRandomVariables.rst diff --git a/doc/en/ref_algorithm_ParticleSwarmOptimization.rst b/doc/en/ref_algorithm_ParticleSwarmOptimization.rst index cd3e6fb..38595b8 100644 --- a/doc/en/ref_algorithm_ParticleSwarmOptimization.rst +++ b/doc/en/ref_algorithm_ParticleSwarmOptimization.rst @@ -114,7 +114,7 @@ when the dimension of the state space is large, or when the non-linearities of the simulation make the evaluation of the gradient of the functional by numerical approximation complicated or invalid. But it is also necessary that the calculation of the function to be simulated is not too costly to avoid a -prohibitive optimization time. +prohibitive optimization time length. .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst diff --git a/doc/en/ref_algorithm_TabuSearch.rst b/doc/en/ref_algorithm_TabuSearch.rst index cf26253..f09f610 100644 --- a/doc/en/ref_algorithm_TabuSearch.rst +++ b/doc/en/ref_algorithm_TabuSearch.rst @@ -31,11 +31,12 @@ Calculation algorithm "*TabuSearch*" .. include:: snippets/Header2Algo01.rst This algorithm realizes an estimation of the state of a system by minimization -of a cost function :math:`J` without gradient. It is a method that does not use -the derivatives of the cost function. It falls in the same category than the -:ref:`section_ref_algorithm_DerivativeFreeOptimization`, the -:ref:`section_ref_algorithm_ParticleSwarmOptimization` or the -:ref:`section_ref_algorithm_DifferentialEvolution`. +without gradient of a cost function :math:`J`, using a Tabu list search method. +It is a method that does not use the derivatives of the cost function. It falls +in the same category than the +:ref:`section_ref_algorithm_DerivativeFreeOptimization`, +:ref:`section_ref_algorithm_DifferentialEvolution` or +:ref:`section_ref_algorithm_ParticleSwarmOptimization`. This is a mono-objective optimization method allowing for global minimum search of a general error function :math:`J` of type :math:`L^1`, :math:`L^2` or diff --git a/doc/en/scripts/simple_3DVAR1.png b/doc/en/scripts/simple_3DVAR1.png index 9287bc1cd05b2e9d570bccc60c96c264c1f4d8e9..648154e676442b36bbaf458530001d0128976903 100644 GIT binary patch delta 43 zcmdn7pJ~T_rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek53z`oAaJUh0 delta 43 zcmdn7pJ~T_rU@Q$Mmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(w7Bn9Kataaj diff --git a/doc/en/scripts/simple_3DVAR1Plus.png b/doc/en/scripts/simple_3DVAR1Plus.png index 269b4787202add4ee5d0feb65317a06bf8ae3a39..56c498ad16f5e10b96c88851857658393fcd873d 100644 GIT binary patch delta 43 zcmbPrmTA^mrU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5Gg$@zYc~-& delta 43 zcmbPrmTA^mrU@Q$Mmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wX0i+bY>5%Q diff --git a/doc/en/scripts/simple_3DVAR2_state.png b/doc/en/scripts/simple_3DVAR2_state.png index af457a353a24dc4c1a8cfbabebc9fdeb35012dce..1c69070b994b5202ecff02986ecfc5129e5930a0 100644 GIT binary patch delta 43 ycmbQ$%rvW+X@ZBGv5rDUNl8JmmA-y%Vo5+lj-nGrU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5i|qjbXhsn- delta 43 zcmX>+lj-nGrU@Q$Mmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(w7TW^=X_yhV diff --git a/doc/en/scripts/simple_3DVAR3_variance.png b/doc/en/scripts/simple_3DVAR3_variance.png index 7b848a221d5710667270569c1ee06559c15f6242..513175b7c5432b3a29854b999f452db6c3b9464b 100644 GIT binary patch delta 43 zcmcbAf${zY#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5`G}%4{~g(w_A?y-gx3<$ diff --git a/doc/en/scripts/simple_DerivativeFreeOptimization.png b/doc/en/scripts/simple_DerivativeFreeOptimization.png index f32cae57a4b5ebdb80cdc1471ffb6b95097f9474..a63dd6c855377dde477d626f2a6ba2340e45e9f0 100644 GIT binary patch delta 43 zcmeyhlj+w^rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5n=%Iggt-zY delta 43 zcmeyhlj+w^rU@Q$Mmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wHf0U~h6@s_ diff --git a/doc/en/scripts/simple_KalmanFilter1_state.png b/doc/en/scripts/simple_KalmanFilter1_state.png index 8212c744573b551453ab6d75e6e65eaf23d0a2a3..769bd797db9cd226e0e6cbed88e9a94796a20a74 100644 GIT binary patch delta 43 zcmbR8jd991#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5TUP}DcDE7B delta 43 zcmbR8jd991#t9yBMmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wwyp{QcnK0u diff --git a/doc/en/scripts/simple_KalmanFilter1_variance.png b/doc/en/scripts/simple_KalmanFilter1_variance.png index 17fa819d3f4651d885e6d5fdb949b49fefae1e41..8a453647851c98227e19ef526cb2c3e74a2997e9 100644 GIT binary patch delta 42 ycmaEMgz?!C#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFcNQO9B9H^bs`x delta 42 ycmaEMgz?!C#t9yBMmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~cMFmIMHA!V$Ru diff --git a/doc/en/scripts/simple_KalmanFilter2_state.png b/doc/en/scripts/simple_KalmanFilter2_state.png index 8212c744573b551453ab6d75e6e65eaf23d0a2a3..769bd797db9cd226e0e6cbed88e9a94796a20a74 100644 GIT binary patch delta 43 zcmbR8jd991#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5TUP}DcDE7B delta 43 zcmbR8jd991#t9yBMmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wwyp{QcnK0u diff --git a/doc/en/scripts/simple_KalmanFilter2_variance.png b/doc/en/scripts/simple_KalmanFilter2_variance.png index 17fa819d3f4651d885e6d5fdb949b49fefae1e41..8a453647851c98227e19ef526cb2c3e74a2997e9 100644 GIT binary patch delta 42 ycmaEMgz?!C#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFcNQO9B9H^bs`x delta 42 ycmaEMgz?!C#t9yBMmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~cMFmIMHA!V$Ru diff --git a/doc/en/scripts/simple_NonLinearLeastSquares.png b/doc/en/scripts/simple_NonLinearLeastSquares.png index f32cae57a4b5ebdb80cdc1471ffb6b95097f9474..a63dd6c855377dde477d626f2a6ba2340e45e9f0 100644 GIT binary patch delta 43 zcmeyhlj+w^rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5n=%Iggt-zY delta 43 zcmeyhlj+w^rU@Q$Mmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wHf0U~h6@s_ diff --git a/doc/en/scripts/simple_ParticleSwarmOptimization1.png b/doc/en/scripts/simple_ParticleSwarmOptimization1.png index 961053409623882f2acf41e0f885aa7383a93ae4..f037f98182aab20632e53f0ac5dfd3f1896c693e 100644 GIT binary patch delta 43 zcmcb+o9XUurU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5E1wGhd%F?U delta 43 zcmcb+o9XUurU@Q$Mmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wRz4R1eGL*> diff --git a/doc/en/snippets/EntryTypeDataFile.rst b/doc/en/snippets/EntryTypeDataFile.rst index 3b817df..855e69b 100644 --- a/doc/en/snippets/EntryTypeDataFile.rst +++ b/doc/en/snippets/EntryTypeDataFile.rst @@ -22,10 +22,10 @@ be ordered in lines to be acquired record row by record row ("ColMajor=True"). - Without information or with a void list, all the values of all the - variables are used, but one can also select only the ones of the variables - that are indicated in the name list "ColNames". the variable names are - always as header of columns. + Without information or with a void list for the variable names, all the + values of all the variables are used, but one can also select only the ones + of the variables that are indicated in the name list "ColNames". The + variable names are always as header of columns. Example of CSV file for "*Observation*" variable in "*DataFile*" :: @@ -59,7 +59,7 @@ .. warning:: It is recommended, before using them as an input of this type, to carefully - check text or binary files . Various checks are carried out on loading, but + check text or binary files. Various checks are carried out on loading, but the variety of potential errors is great. In practice, by respecting the - requirements for naming variables and comments, text files from programs or - spreadsheets are (most of the time) compatible. + requirements for naming variables and for comments, text files from + classical programs or spreadsheets are (most of the time) compatible. diff --git a/doc/en/snippets/ModuleCompatibility.rst b/doc/en/snippets/ModuleCompatibility.rst index 4da838b..46c2f9a 100644 --- a/doc/en/snippets/ModuleCompatibility.rst +++ b/doc/en/snippets/ModuleCompatibility.rst @@ -15,8 +15,8 @@ versions within the range described below. :widths: 20, 10, 10 Python, 3.6.5, 3.11.7 - Numpy, 1.14.3, 1.26.2 - Scipy, 0.19.1, 1.11.4 - MatplotLib, 2.2.2, 3.8.2 + Numpy, 1.14.3, 1.26.4 + Scipy, 0.19.1, 1.12.0 + MatplotLib, 2.2.2, 3.8.3 GnuplotPy, 1.8, 1.8 NLopt, 2.4.2, 2.7.1 diff --git a/doc/en/snippets/ReduceMemoryUse.rst b/doc/en/snippets/ReduceMemoryUse.rst new file mode 100644 index 0000000..cf7ab2c --- /dev/null +++ b/doc/en/snippets/ReduceMemoryUse.rst @@ -0,0 +1,11 @@ +.. index:: single: ReduceMemoryUse + +ReduceMemoryUse + *Boolean value*. The variable leads to the activation, or not, of the memory + footprint reduction mode at runtime, at the cost of a potential increase in + calculation time. Results may differ above a certain precision (1.e-12 to + 1.e-14), usually close to machine precision (1.e-16). The default is "False", + the choices are "True" or "False". + + Example: + ``{"ReduceMemoryUse":False}`` diff --git a/doc/fr/images/sampling_01_SampleAsnUplet.png b/doc/fr/images/sampling_01_SampleAsnUplet.png index d4cfb7120859d75b746ad954e2ec26e03818d25a..00e993d6f2eab55a75b803b2e587fee866dda6f0 100644 GIT binary patch delta 41 xcmbPGGofaJhn%sFLPkkRL9vy-er{q(K~8>2PG*u`eo?yq@n;di8`D2PG*u`eo?x2PG*u`eo?yq@n;di8`D2PG*u`eo?x2PG*u`eo?yq@n;di8`D2PG*u`eo?x{t)p1 delta 43 zcmcc8%6O%fae{}Ok&Z$}Nl8JmmA-y%Vo52PG*u`eo?yq@n;di8`E^H0ZD}r2><{9 delta 41 xcmbPSKDm5?hn$g)LPkkRL9vy-er{q(K~8>2PG*u`eo?x2PG*u`eo?yq@n;di8`FX<0aSMoaR2}S delta 41 xcmdm4yt82PG*u`eo?xH5c?MFek53t9jGaa$2} delta 43 zcmdmSkZH$3rU@Q$Mmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(w7PJ5Wa;*{h diff --git a/doc/fr/scripts/simple_3DVAR1Plus.png b/doc/fr/scripts/simple_3DVAR1Plus.png index 0a30be5bd17359b810e75673e6fd6d79d20860ec..561b07e6684dd05162b6b3b6be173b59b40a071a 100644 GIT binary patch delta 43 zcmcb6jp^<+rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5`@RYQg;Nrk delta 43 zcmcb6jp^<+rU@Q$Mmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(w_I(uqhNTl6 diff --git a/doc/fr/scripts/simple_3DVAR2_state.png b/doc/fr/scripts/simple_3DVAR2_state.png index 85a36f42dd3fc82171c1657658a283efcf1350dc..d2205430e70f0b8fa3f3b7b74ee5cf2246fe25c4 100644 GIT binary patch delta 43 zcmaF-lkxFS#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5tE~Y5kmnLW delta 43 zcmaF-lkxFS#t9yBMmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wR$BuAk~tE@ diff --git a/doc/fr/scripts/simple_3DVAR2_variance.png b/doc/fr/scripts/simple_3DVAR2_variance.png index 0880dd51f259c4f5eb3007f0333d08254ce3afcf..b42c0947da16639b4d9ae275283e2494125f904d 100644 GIT binary patch delta 43 zcmdlrg>lyu#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5yW$A|WFiqr delta 43 zcmdlrg>lyu#t9yBMmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wcEu9_WpokD diff --git a/doc/fr/scripts/simple_3DVAR3_state.png b/doc/fr/scripts/simple_3DVAR3_state.png index a4ce41334b011e0bff26b7a7e18678153d48fe67..2b2d7ee932e80e6244a8c2b1d617457c9c354330 100644 GIT binary patch delta 43 ycmbQ=&osZEX@ZBGv5rDUNl8JmmA-y%Vo5H5c?MFek5W6uNthR+dw delta 43 zcmaERiSg|v#t9yBMmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(w#-0fPh#?XI diff --git a/doc/fr/scripts/simple_DerivativeFreeOptimization.png b/doc/fr/scripts/simple_DerivativeFreeOptimization.png index 8d6b1307e0092ee13d4759854b487a5e56c304ac..2afe397238ad4b4e818a39913cdf4c07b543e91a 100644 GIT binary patch delta 43 zcmX@To9Xm!rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5OPvbG}%4{~g(wmO2*zcM}pL diff --git a/doc/fr/scripts/simple_KalmanFilter1_state.png b/doc/fr/scripts/simple_KalmanFilter1_state.png index 9ad86315189181961c6462dd45b3a192f1b03492..07ee3f2960c10935a35b04d47399f875012a2af6 100644 GIT binary patch delta 43 zcmaF(n(^Ul#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5t1blqi^vie delta 43 zcmaF(n(^Ul#t9yBMmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wR$U4JjT#c0 diff --git a/doc/fr/scripts/simple_KalmanFilter1_variance.png b/doc/fr/scripts/simple_KalmanFilter1_variance.png index ccc5630dbbeffb949fac633791e2c5f7fcef3061..3d89cb1e0c6be3fb7119142f43ff63a13ce1762d 100644 GIT binary patch delta 43 zcmZ2*jB&v+#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5+m!?WYikjA delta 43 zcmZ2*jB&v+#t9yBMmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wwkrt$Y`qct diff --git a/doc/fr/scripts/simple_KalmanFilter2_state.png b/doc/fr/scripts/simple_KalmanFilter2_state.png index 9ad86315189181961c6462dd45b3a192f1b03492..07ee3f2960c10935a35b04d47399f875012a2af6 100644 GIT binary patch delta 43 zcmaF(n(^Ul#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5t1blqi^vie delta 43 zcmaF(n(^Ul#t9yBMmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wR$U4JjT#c0 diff --git a/doc/fr/scripts/simple_KalmanFilter2_variance.png b/doc/fr/scripts/simple_KalmanFilter2_variance.png index ccc5630dbbeffb949fac633791e2c5f7fcef3061..3d89cb1e0c6be3fb7119142f43ff63a13ce1762d 100644 GIT binary patch delta 43 zcmZ2*jB&v+#t9yB#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5+m!?WYikjA delta 43 zcmZ2*jB&v+#t9yBMmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(wwkrt$Y`qct diff --git a/doc/fr/scripts/simple_NonLinearLeastSquares.png b/doc/fr/scripts/simple_NonLinearLeastSquares.png index 8d6b1307e0092ee13d4759854b487a5e56c304ac..2afe397238ad4b4e818a39913cdf4c07b543e91a 100644 GIT binary patch delta 43 zcmX@To9Xm!rU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5OPvbG}%4{~g(wmO2*zcM}pL diff --git a/doc/fr/scripts/simple_ParticleSwarmOptimization1.png b/doc/fr/scripts/simple_ParticleSwarmOptimization1.png index e5dc3362e9a2fe1c2b965f85e60da54f55b9c34f..e2e9e8cdade680d10cb754c4870141d5121ac279 100644 GIT binary patch delta 43 zcmZ3tmucNzrU@Q$#yScaB_##LR{Hw6i6sR&`6W4-NqYH3>H5c?MFek5^Oy$!Y{L;v delta 43 zcmZ3tmucNzrU@Q$Mmh=^B_##LR{Hw6i6sR&`6W4-NqYH3>G}%4{~g(w<}nWdZWR&H diff --git a/doc/fr/snippets/EntryTypeDataFile.rst b/doc/fr/snippets/EntryTypeDataFile.rst index 2d4bf59..1b31b88 100644 --- a/doc/fr/snippets/EntryTypeDataFile.rst +++ b/doc/fr/snippets/EntryTypeDataFile.rst @@ -25,9 +25,10 @@ ("ColMajor=True"). Sans précision ou avec une liste vide pour les noms de variables, on - utilise les valeurs toutes les variables, mais on peut aussi sélectionner - uniquement celles des variables indiquées dans la liste de noms fournie - dans "ColNames". Les noms de variable sont toujours en entête de colonne. + utilise les valeurs de toutes les variables, mais on peut aussi + sélectionner uniquement celles des variables indiquées dans la liste de + noms fournie dans "ColNames". Les noms de variable sont toujours en entête + de colonne. Exemple de fichier CSV pour la variable "*Observation*" en "*DataFile*" :: diff --git a/doc/fr/snippets/ModuleCompatibility.rst b/doc/fr/snippets/ModuleCompatibility.rst index c05bea8..f65a502 100644 --- a/doc/fr/snippets/ModuleCompatibility.rst +++ b/doc/fr/snippets/ModuleCompatibility.rst @@ -16,8 +16,8 @@ l'étendue décrite ci-dessous. :widths: 20, 10, 10 Python, 3.6.5, 3.11.7 - Numpy, 1.14.3, 1.26.2 - Scipy, 0.19.1, 1.11.4 - MatplotLib, 2.2.2, 3.8.2 + Numpy, 1.14.3, 1.26.4 + Scipy, 0.19.1, 1.12.0 + MatplotLib, 2.2.2, 3.8.3 GnuplotPy, 1.8, 1.8 NLopt, 2.4.2, 2.7.1 diff --git a/doc/fr/snippets/ReduceMemoryUse.rst b/doc/fr/snippets/ReduceMemoryUse.rst new file mode 100644 index 0000000..d8e472b --- /dev/null +++ b/doc/fr/snippets/ReduceMemoryUse.rst @@ -0,0 +1,12 @@ +.. index:: single: ReduceMemoryUse + +ReduceMemoryUse + *Valeur booléenne*. La variable conduit à l'activation, ou pas, du mode de + réduction de l'empreinte mémoire lors de l'exécution, au prix d'une + augmentation potentielle du temps de calcul. Les résultats peuvent différer à + partir d'une certaine précision (1.e-12 à 1.e-14) usuellement proche de la + précision machine (1.e-16). La valeur par défaut est "False", les choix sont + "True" ou "False". + + Exemple : + ``{"ReduceMemoryUse":False}`` diff --git a/doc/fr/snippets/SetDebug.rst b/doc/fr/snippets/SetDebug.rst index 0928a9d..021bde7 100644 --- a/doc/fr/snippets/SetDebug.rst +++ b/doc/fr/snippets/SetDebug.rst @@ -3,7 +3,7 @@ SetDebug *Valeur booléenne*. La variable conduit à l'activation, ou pas, du mode de débogage durant l'évaluation de la fonction ou de l'opérateur. La valeur par - défaut est "True", les choix sont "True" ou "False". + défaut est "False", les choix sont "True" ou "False". Exemple : ``{"SetDebug":False}`` diff --git a/src/daComposant/daAlgorithms/Atoms/c2ukf.py b/src/daComposant/daAlgorithms/Atoms/c2ukf.py index 9f945f9..93b940d 100644 --- a/src/daComposant/daAlgorithms/Atoms/c2ukf.py +++ b/src/daComposant/daAlgorithms/Atoms/c2ukf.py @@ -25,11 +25,9 @@ __doc__ = """ """ __author__ = "Jean-Philippe ARGAUD" -import math, numpy, scipy +import math, numpy, scipy, copy +from daCore.PlatformInfo import vfloat from daCore.NumericObjects import ApplyBounds, ForceNumericBounds -from daCore.PlatformInfo import PlatformInfo, vfloat -mpr = PlatformInfo().MachinePrecision() -mfp = PlatformInfo().MaximumPrecision() # ============================================================================== def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): @@ -120,80 +118,83 @@ def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): Cm = None # Pndemi = numpy.real(scipy.linalg.sqrtm(Pn)) - Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi]) + Xnmu = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi]) nbSpts = 2*Xn.size+1 # if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": for point in range(nbSpts): - Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] ) + Xnmu[:,point] = ApplyBounds( Xnmu[:,point], selfA._parameters["Bounds"] ) # - XEtnnp = [] + XEnnmu = [] for point in range(nbSpts): if selfA._parameters["EstimationOf"] == "State": Mm = EM["Direct"].appliedControledFormTo - XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1)) + XEnnmui = numpy.asarray( Mm( (Xnmu[:,point], Un) ) ).reshape((-1,1)) if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape - XEtnnpi = XEtnnpi + Cm @ Un + XEnnmui = XEnnmui + Cm @ Un if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] ) + XEnnmui = ApplyBounds( XEnnmui, selfA._parameters["Bounds"] ) elif selfA._parameters["EstimationOf"] == "Parameters": # --- > Par principe, M = Id, Q = 0 - XEtnnpi = Xnp[:,point] - XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) ) - XEtnnp = numpy.concatenate( XEtnnp, axis=1 ) + XEnnmui = Xnmu[:,point] + XEnnmu.append( numpy.ravel(XEnnmui).reshape((-1,1)) ) + XEnnmu = numpy.concatenate( XEnnmu, axis=1 ) # - Xncm = ( XEtnnp * Wm ).sum(axis=1) + Xhmn = ( XEnnmu * Wm ).sum(axis=1) # if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] ) + Xhmn = ApplyBounds( Xhmn, selfA._parameters["Bounds"] ) # - if selfA._parameters["EstimationOf"] == "State": Pnm = Q - elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0. + if selfA._parameters["EstimationOf"] == "State": Pmn = copy.copy(Q) + elif selfA._parameters["EstimationOf"] == "Parameters": Pmn = 0. for point in range(nbSpts): - Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm)) + dXEnnmuXhmn = XEnnmu[:,point].flat-Xhmn + Pmn += Wc[i] * numpy.outer(dXEnnmuXhmn, dXEnnmuXhmn) # if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None: - Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm)) + Pmndemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pmn)) else: - Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm)) + Pmndemi = numpy.real(scipy.linalg.sqrtm(Pmn)) # - Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi]) + Xnnmu = numpy.hstack([Xhmn.reshape((-1,1)), Xhmn.reshape((-1,1))+Gamma*Pmndemi, Xhmn.reshape((-1,1))-Gamma*Pmndemi]) # if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": for point in range(nbSpts): - Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] ) + Xnnmu[:,point] = ApplyBounds( Xnnmu[:,point], selfA._parameters["Bounds"] ) # Hm = HO["Direct"].appliedControledFormTo - Ynnp = [] + Ynnmu = [] for point in range(nbSpts): if selfA._parameters["EstimationOf"] == "State": - Ynnpi = Hm( (Xnnp[:,point], None) ) + Ynnmui = Hm( (Xnnmu[:,point], None) ) elif selfA._parameters["EstimationOf"] == "Parameters": - Ynnpi = Hm( (Xnnp[:,point], Un) ) - Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) ) - Ynnp = numpy.concatenate( Ynnp, axis=1 ) + Ynnmui = Hm( (Xnnmu[:,point], Un) ) + Ynnmu.append( numpy.ravel(Ynnmui).reshape((__p,1)) ) + Ynnmu = numpy.concatenate( Ynnmu, axis=1 ) # - Yncm = ( Ynnp * Wm ).sum(axis=1) + Yhmn = ( Ynnmu * Wm ).sum(axis=1) # - Pyyn = R + Pyyn = copy.copy(R) Pxyn = 0. for point in range(nbSpts): - Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm)) - Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm)) + dYnnmuYhmn = Ynnmu[:,point].flat-Yhmn + dXnnmuXhmn = Xnnmu[:,point].flat-Xhmn + Pyyn += Wc[i] * numpy.outer(dYnnmuYhmn, dYnnmuYhmn) + Pxyn += Wc[i] * numpy.outer(dXnnmuXhmn, dYnnmuYhmn) # if hasattr(Y,"store"): Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) else: Ynpu = numpy.ravel( Y ).reshape((__p,1)) - _Innovation = Ynpu - Yncm.reshape((-1,1)) + _Innovation = Ynpu - Yhmn.reshape((-1,1)) if selfA._parameters["EstimationOf"] == "Parameters": if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! _Innovation = _Innovation - Cm @ Un # - Kn = Pxyn * Pyyn.I - Xn = Xncm.reshape((-1,1)) + Kn * _Innovation - Pn = Pnm - Kn * Pyyn * Kn.T + Kn = Pxyn @ Pyyn.I + Xn = Xhmn.reshape((-1,1)) + Kn @ _Innovation + Pn = Pmn - Kn @ (Pyyn @ Kn.T) # if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] ) @@ -216,16 +217,16 @@ def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): or selfA._toStore("CurrentState"): selfA.StoredVariables["CurrentState"].store( Xn ) if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( Xncm ) + selfA.StoredVariables["ForecastState"].store( Xhmn ) if selfA._toStore("ForecastCovariance"): - selfA.StoredVariables["ForecastCovariance"].store( Pnm ) + selfA.StoredVariables["ForecastCovariance"].store( Pmn ) if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( Xncm - Xa ) + selfA.StoredVariables["BMA"].store( Xhmn - Xa ) if selfA._toStore("InnovationAtCurrentState"): selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) if selfA._toStore("SimulatedObservationAtCurrentState") \ or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm ) + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yhmn ) # ---> autres if selfA._parameters["StoreInternalVariables"] \ or selfA._toStore("CostFunctionJ") \ diff --git a/src/daComposant/daAlgorithms/Atoms/ecwdeim.py b/src/daComposant/daAlgorithms/Atoms/ecwdeim.py index f752f14..bb2c3af 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecwdeim.py +++ b/src/daComposant/daAlgorithms/Atoms/ecwdeim.py @@ -28,6 +28,8 @@ __author__ = "Jean-Philippe ARGAUD" import numpy, scipy, logging import daCore.Persistence from daCore.NumericObjects import FindIndexesFromNames +from daCore.NumericObjects import InterpolationErrorByColumn +from daCore.NumericObjects import SingularValuesEstimation from daCore.PlatformInfo import vt # ============================================================================== @@ -38,11 +40,12 @@ def DEIM_offline(selfA, EOS = None, Verbose = False): # # Initialisations # --------------- + if numpy.array(EOS).size == 0: + raise ValueError("EnsembleOfSnapshots has not to be void, but an array/matrix (each column being a vector) or a list/tuple (each element being a vector).") if isinstance(EOS, (numpy.ndarray, numpy.matrix)): __EOS = numpy.asarray(EOS) elif isinstance(EOS, (list, tuple, daCore.Persistence.Persistence)): __EOS = numpy.stack([numpy.ravel(_sn) for _sn in EOS], axis=1) - # __EOS = numpy.asarray(EOS).T else: raise ValueError("EnsembleOfSnapshots has to be an array/matrix (each column being a vector) or a list/tuple (each element being a vector).") __dimS, __nbmS = __EOS.shape @@ -88,17 +91,14 @@ def DEIM_offline(selfA, EOS = None, Verbose = False): else: selfA._parameters["EpsilonEIM"] = 1.e-2 # - # Ne pas utiliser : scipy.linalg.svd - __vs = scipy.linalg.svdvals( __EOS ) + __sv, __svsq, __tisv, __qisv = SingularValuesEstimation( __EOS ) if vt(scipy.version.version) < vt("1.1.0"): __rhoM = scipy.linalg.orth( __EOS ) - __rhoM = numpy.compress(__vs > selfA._parameters["EpsilonEIM"]*max(__vs), __rhoM, axis=1) + __rhoM = numpy.compress(__sv > selfA._parameters["EpsilonEIM"]*max(__sv), __rhoM, axis=1) else: __rhoM = scipy.linalg.orth( __EOS, selfA._parameters["EpsilonEIM"] ) __lVs, __svdM = __rhoM.shape assert __lVs == __dimS, "Différence entre lVs et dim(EOS)" - __vs2 = __vs**2 - __qivs = 1. - __vs2[:__svdM].cumsum()/__vs2.sum() __maxM = min(__maxM,__svdM) # if __LcCsts and len(__IncludedMagicPoints) > 0: @@ -116,7 +116,12 @@ def DEIM_offline(selfA, EOS = None, Verbose = False): __errors = [] # __M = 1 # Car le premier est déjà construit - __errors.append(__qivs[0]) + if selfA._toStore("Residus"): + __eM, _ = InterpolationErrorByColumn( + __EOS, __Q, __I, __M, + __ErrorNorm = selfA._parameters["ErrorNorm"], + __LcCsts = __LcCsts, __IncludedPoints = __IncludedMagicPoints) + __errors.append(__eM) # # Boucle # ------ @@ -147,14 +152,19 @@ def DEIM_offline(selfA, EOS = None, Verbose = False): )) __Q = numpy.column_stack((__Q, __rhoM[:,__M])) # - __errors.append(__qivs[__M]) __I.append(__iM) __mu.append(None) # Convention + if selfA._toStore("Residus"): + __eM, _ = InterpolationErrorByColumn( + __EOS, __Q, __I, __M+1, + __ErrorNorm = selfA._parameters["ErrorNorm"], + __LcCsts = __LcCsts, __IncludedPoints = __IncludedMagicPoints) + __errors.append(__eM) # __M = __M + 1 # #-------------------------- - if __errors[-1] < selfA._parameters["EpsilonEIM"]: + if len(__errors)>0 and __errors[-1] < selfA._parameters["EpsilonEIM"]: logging.debug("%s %s (%.1e)"%(selfA._name,"The convergence is obtained when reaching the required EIM tolerance",selfA._parameters["EpsilonEIM"])) if __M >= __maxM: logging.debug("%s %s (%i)"%(selfA._name,"The convergence is obtained when reaching the maximum number of RB dimension",__maxM)) @@ -169,7 +179,7 @@ def DEIM_offline(selfA, EOS = None, Verbose = False): if selfA._toStore("ExcludedPoints"): selfA.StoredVariables["ExcludedPoints"].store( __ExcludedMagicPoints ) if selfA._toStore("SingularValues"): - selfA.StoredVariables["SingularValues"].store( __vs ) + selfA.StoredVariables["SingularValues"].store( __sv ) # return __mu, __I, __Q, __errors diff --git a/src/daComposant/daAlgorithms/Atoms/ecweim.py b/src/daComposant/daAlgorithms/Atoms/ecweim.py index 5df1330..ca8b70d 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecweim.py +++ b/src/daComposant/daAlgorithms/Atoms/ecweim.py @@ -28,6 +28,7 @@ __author__ = "Jean-Philippe ARGAUD" import numpy, logging import daCore.Persistence from daCore.NumericObjects import FindIndexesFromNames +from daCore.NumericObjects import InterpolationErrorByColumn # ============================================================================== def EIM_offline(selfA, EOS = None, Verbose = False): @@ -37,21 +38,17 @@ def EIM_offline(selfA, EOS = None, Verbose = False): # # Initialisations # --------------- + if numpy.array(EOS).size == 0: + raise ValueError("EnsembleOfSnapshots has not to be void, but an array/matrix (each column being a vector) or a list/tuple (each element being a vector).") if isinstance(EOS, (numpy.ndarray, numpy.matrix)): __EOS = numpy.asarray(EOS) elif isinstance(EOS, (list, tuple, daCore.Persistence.Persistence)): __EOS = numpy.stack([numpy.ravel(_sn) for _sn in EOS], axis=1) - # __EOS = numpy.asarray(EOS).T else: raise ValueError("EnsembleOfSnapshots has to be an array/matrix (each column being a vector) or a list/tuple (each element being a vector).") __dimS, __nbmS = __EOS.shape logging.debug("%s Using a collection of %i snapshots of individual size of %i"%(selfA._name,__nbmS,__dimS)) # - if selfA._parameters["ErrorNorm"] == "L2": - MaxNormByColumn = MaxL2NormByColumn - else: - MaxNormByColumn = MaxLinfNormByColumn - # if selfA._parameters["Variant"] in ["EIM", "PositioningByEIM"]: __LcCsts = False else: @@ -91,6 +88,10 @@ def EIM_offline(selfA, EOS = None, Verbose = False): selfA._parameters["EpsilonEIM"] = selfA._parameters["ErrorNormTolerance"] else: selfA._parameters["EpsilonEIM"] = 1.e-2 + if "ReduceMemoryUse" in selfA._parameters: + rmu = selfA._parameters["ReduceMemoryUse"] + else: + rmu = False # __mu = [] __I = [] @@ -100,8 +101,12 @@ def EIM_offline(selfA, EOS = None, Verbose = False): __M = 0 __rhoM = numpy.empty(__dimS) # - __eM, __muM = MaxNormByColumn(__EOS, __LcCsts, __IncludedMagicPoints) - __residuM = __EOS[:,__muM] + __eM, __muM, __residuM = InterpolationErrorByColumn( + __Differences = __EOS, __M = __M, + __ErrorNorm = selfA._parameters["ErrorNorm"], + __LcCsts = __LcCsts, __IncludedPoints = __IncludedMagicPoints, + __CDM = True, __RMU = rmu, + ) __errors.append(__eM) # # Boucle @@ -130,24 +135,13 @@ def EIM_offline(selfA, EOS = None, Verbose = False): __Q = __rhoM.reshape((-1,1)) __I.append(__iM) # - __restrictedQi = numpy.tril( __Q[__I,:] ) - if __M > 1: - __Qi_inv = numpy.linalg.inv(__restrictedQi) - else: - __Qi_inv = 1. / __restrictedQi - # - __restrictedEOSi = __EOS[__I,:] - # - if __M > 1: - __interpolator = numpy.dot(__Q,numpy.dot(__Qi_inv,__restrictedEOSi)) - else: - __interpolator = numpy.outer(__Q,numpy.outer(__Qi_inv,__restrictedEOSi)) - # - __dataForNextIter = __EOS - __interpolator - __eM, __muM = MaxNormByColumn(__dataForNextIter, __LcCsts, __IncludedMagicPoints) + __eM, __muM, __residuM = InterpolationErrorByColumn( + __Ensemble = __EOS, __Basis = __Q, __Points = __I, __M = __M, + __ErrorNorm = selfA._parameters["ErrorNorm"], + __LcCsts = __LcCsts, __IncludedPoints = __IncludedMagicPoints, + __CDM = True, __RMU = rmu, __FTL = True, + ) __errors.append(__eM) - # - __residuM = __dataForNextIter[:,__muM] # #-------------------------- if __errors[-1] < selfA._parameters["EpsilonEIM"]: @@ -211,31 +205,6 @@ def EIM_online(selfA, QEIM, gJmu = None, mPoints = None, mu = None, PseudoInvers # return __gMmu -# ============================================================================== -def MaxL2NormByColumn(Ensemble, LcCsts = False, IncludedPoints = []): - if LcCsts and len(IncludedPoints) > 0: - normes = numpy.linalg.norm( - numpy.take(Ensemble, IncludedPoints, axis=0, mode='clip'), - axis = 0, - ) - else: - normes = numpy.linalg.norm( Ensemble, axis = 0) - nmax = numpy.max(normes) - imax = numpy.argmax(normes) - return nmax, imax - -def MaxLinfNormByColumn(Ensemble, LcCsts = False, IncludedPoints = []): - if LcCsts and len(IncludedPoints) > 0: - normes = numpy.linalg.norm( - numpy.take(Ensemble, IncludedPoints, axis=0, mode='clip'), - axis = 0, ord=numpy.inf, - ) - else: - normes = numpy.linalg.norm( Ensemble, axis = 0, ord=numpy.inf) - nmax = numpy.max(normes) - imax = numpy.argmax(normes) - return nmax, imax - # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daAlgorithms/Atoms/ecwnlls.py b/src/daComposant/daAlgorithms/Atoms/ecwnlls.py index bc723dc..5d8e83c 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecwnlls.py +++ b/src/daComposant/daAlgorithms/Atoms/ecwnlls.py @@ -143,6 +143,8 @@ def ecwnlls(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb19hlt as optimiseur elif vt("1.11.0") <= vt(scipy.version.version) <= vt("1.11.99"): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur + elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): + import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/ecwubfeim.py b/src/daComposant/daAlgorithms/Atoms/ecwubfeim.py new file mode 100644 index 0000000..233353f --- /dev/null +++ b/src/daComposant/daAlgorithms/Atoms/ecwubfeim.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2008-2024 EDF R&D +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# +# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D + +__doc__ = """ + Empirical Interpolation Method EIM & lcEIM with User Defined Function +""" +__author__ = "Jean-Philippe ARGAUD" + +import numpy, scipy, logging +import daCore.Persistence +from daCore.NumericObjects import FindIndexesFromNames +from daCore.NumericObjects import InterpolationErrorByColumn +from daCore.NumericObjects import SingularValuesEstimation + +# ============================================================================== +def UBFEIM_offline(selfA, EOS = None, Verbose = False): + """ + Établissement de la base + """ + # + # Initialisations + # --------------- + if numpy.array(EOS).size == 0: + raise ValueError("EnsembleOfSnapshots has not to be void, but an array/matrix (each column being a vector) or a list/tuple (each element being a vector).") + if isinstance(EOS, (numpy.ndarray, numpy.matrix)): + __EOS = numpy.asarray(EOS) + elif isinstance(EOS, (list, tuple, daCore.Persistence.Persistence)): + __EOS = numpy.stack([numpy.ravel(_sn) for _sn in EOS], axis=1) + else: + raise ValueError("EnsembleOfSnapshots has to be an array/matrix (each column being a vector) or a list/tuple (each element being a vector).") + __dimS, __nbmS = __EOS.shape + logging.debug("%s Using a collection of %i snapshots of individual size of %i"%(selfA._name,__nbmS,__dimS)) + # + if numpy.array(selfA._parameters["UserBasisFunctions"]).size == 0: + logging.debug("%s Using the snapshots in place of user defined basis functions, the latter being not provided"%(selfA._name)) + UBF = __EOS + else: + UBF = selfA._parameters["UserBasisFunctions"] + if isinstance(UBF, (numpy.ndarray, numpy.matrix)): + __UBF = numpy.asarray(UBF) + elif isinstance(UBF, (list, tuple, daCore.Persistence.Persistence)): + __UBF = numpy.stack([numpy.ravel(_sn) for _sn in UBF], axis=1) + else: + raise ValueError("UserBasisFunctions has to be an array/matrix (each column being a vector) or a list/tuple (each element being a vector).") + assert __EOS.shape[0] == __UBF.shape[0], "Individual snapshot and user defined basis function has to be of the same size, which is false: %i =/= %i"%(__EOS.shape[0], __UBF.shape[0]) + __dimS, __nbmS = __UBF.shape + logging.debug("%s Using a collection of %i user defined basis functions of individual size of %i"%(selfA._name,__nbmS,__dimS)) + # + if selfA._parameters["Variant"] in ["UBFEIM", "PositioningByUBFEIM"]: + __LcCsts = False + else: + __LcCsts = True + if __LcCsts and "ExcludeLocations" in selfA._parameters: + __ExcludedMagicPoints = selfA._parameters["ExcludeLocations"] + else: + __ExcludedMagicPoints = () + if __LcCsts and "NameOfLocations" in selfA._parameters: + if isinstance(selfA._parameters["NameOfLocations"], (list, numpy.ndarray, tuple)) and len(selfA._parameters["NameOfLocations"]) == __dimS: + __NameOfLocations = selfA._parameters["NameOfLocations"] + else: + __NameOfLocations = () + else: + __NameOfLocations = () + if __LcCsts and len(__ExcludedMagicPoints) > 0: + __ExcludedMagicPoints = FindIndexesFromNames( __NameOfLocations, __ExcludedMagicPoints ) + __ExcludedMagicPoints = numpy.ravel(numpy.asarray(__ExcludedMagicPoints, dtype=int)) + __IncludedMagicPoints = numpy.setdiff1d( + numpy.arange(__UBF.shape[0]), + __ExcludedMagicPoints, + assume_unique = True, + ) + else: + __IncludedMagicPoints = [] + # + if "MaximumNumberOfLocations" in selfA._parameters and "MaximumRBSize" in selfA._parameters: + selfA._parameters["MaximumRBSize"] = min(selfA._parameters["MaximumNumberOfLocations"],selfA._parameters["MaximumRBSize"]) + elif "MaximumNumberOfLocations" in selfA._parameters: + selfA._parameters["MaximumRBSize"] = selfA._parameters["MaximumNumberOfLocations"] + elif "MaximumRBSize" in selfA._parameters: + pass + else: + selfA._parameters["MaximumRBSize"] = __nbmS + __maxM = min(selfA._parameters["MaximumRBSize"], __dimS, __nbmS) + if "ErrorNormTolerance" in selfA._parameters: + selfA._parameters["EpsilonEIM"] = selfA._parameters["ErrorNormTolerance"] + else: + selfA._parameters["EpsilonEIM"] = 1.e-2 + # + __rhoM = __UBF + # + if __LcCsts and len(__IncludedMagicPoints) > 0: + __iM = numpy.argmax( numpy.abs( + numpy.take(__rhoM[:,0], __IncludedMagicPoints, mode='clip') + )) + else: + __iM = numpy.argmax( numpy.abs( + __rhoM[:,0] + )) + # + __mu = [None,] # Convention + __I = [__iM,] + __Q = __rhoM[:,0].reshape((-1,1)) + __errors = [] + # + __M = 1 # Car le premier est déjà construit + if selfA._toStore("Residus"): + __eM, _ = InterpolationErrorByColumn( + __EOS, __Q, __I, __M, + __ErrorNorm = selfA._parameters["ErrorNorm"], + __LcCsts = __LcCsts, __IncludedPoints = __IncludedMagicPoints) + __errors.append(__eM) + # + # Boucle + # ------ + while __M < __maxM: + # + __restrictedQi = __Q[__I,:] + if __M > 1: + __Qi_inv = numpy.linalg.inv(__restrictedQi) + else: + __Qi_inv = 1. / __restrictedQi + # + __restrictedrhoMi = __rhoM[__I,__M].reshape((-1,1)) + # + if __M > 1: + __interpolator = numpy.dot(__Q,numpy.dot(__Qi_inv,__restrictedrhoMi)) + else: + __interpolator = numpy.outer(__Q,numpy.outer(__Qi_inv,__restrictedrhoMi)) + # + __residuM = __rhoM[:,__M].reshape((-1,1)) - __interpolator + # + if __LcCsts and len(__IncludedMagicPoints) > 0: + __iM = numpy.argmax( numpy.abs( + numpy.take(__residuM, __IncludedMagicPoints, mode='clip') + )) + else: + __iM = numpy.argmax( numpy.abs( + __residuM + )) + __Q = numpy.column_stack((__Q, __rhoM[:,__M])) + # + __I.append(__iM) + __mu.append(None) # Convention + if selfA._toStore("Residus"): + __eM, _ = InterpolationErrorByColumn( + __EOS, __Q, __I, __M+1, + __ErrorNorm = selfA._parameters["ErrorNorm"], + __LcCsts = __LcCsts, __IncludedPoints = __IncludedMagicPoints) + __errors.append(__eM) + # + __M = __M + 1 + # + #-------------------------- + if len(__errors)>0 and __errors[-1] < selfA._parameters["EpsilonEIM"]: + logging.debug("%s %s (%.1e)"%(selfA._name,"The convergence is obtained when reaching the required EIM tolerance",selfA._parameters["EpsilonEIM"])) + if __M >= __maxM: + logging.debug("%s %s (%i)"%(selfA._name,"The convergence is obtained when reaching the maximum number of RB dimension",__maxM)) + logging.debug("%s The RB of size %i has been correctly build"%(selfA._name,__Q.shape[1])) + logging.debug("%s There are %i points that have been excluded from the potential optimal points"%(selfA._name,len(__ExcludedMagicPoints))) + if hasattr(selfA, "StoredVariables"): + selfA.StoredVariables["OptimalPoints"].store( __I ) + if selfA._toStore("ReducedBasis"): + selfA.StoredVariables["ReducedBasis"].store( __Q ) + if selfA._toStore("Residus"): + selfA.StoredVariables["Residus"].store( __errors ) + if selfA._toStore("ExcludedPoints"): + selfA.StoredVariables["ExcludedPoints"].store( __ExcludedMagicPoints ) + # + return __mu, __I, __Q, __errors + +# ============================================================================== +# UBFEIM_online == EIM_online +# ============================================================================== +if __name__ == "__main__": + print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daAlgorithms/Atoms/ecwukf.py b/src/daComposant/daAlgorithms/Atoms/ecwukf.py new file mode 100644 index 0000000..b27fea2 --- /dev/null +++ b/src/daComposant/daAlgorithms/Atoms/ecwukf.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2008-2024 EDF R&D +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# +# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D + +__doc__ = """ + Unscented Kalman Filter +""" +__author__ = "Jean-Philippe ARGAUD" + +import math, numpy, scipy, copy +from daCore.PlatformInfo import vfloat + +# ============================================================================== +def ecwukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): + """ + Unscented Kalman Filter + """ + if selfA._parameters["EstimationOf"] == "Parameters": + selfA._parameters["StoreInternalVariables"] = True + # + L = Xb.size + Alpha = selfA._parameters["Alpha"] + Beta = selfA._parameters["Beta"] + if selfA._parameters["Kappa"] == 0: + if selfA._parameters["EstimationOf"] == "State": + Kappa = 0 + elif selfA._parameters["EstimationOf"] == "Parameters": + Kappa = 3 - L + else: + Kappa = selfA._parameters["Kappa"] + Lambda = float( Alpha**2 ) * ( L + Kappa ) - L + Gamma = math.sqrt( L + Lambda ) + # + Ww = [] + Ww.append( 0. ) + for i in range(2*L): + Ww.append( 1. / (2.*(L + Lambda)) ) + # + Wm = numpy.array( Ww ) + Wm[0] = Lambda / (L + Lambda) + Wc = numpy.array( Ww ) + Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta) + # + # Durée d'observation et tailles + if hasattr(Y,"stepnumber"): + duration = Y.stepnumber() + __p = numpy.cumprod(Y.shape())[-1] + else: + duration = 2 + __p = numpy.size(Y) + # + # Précalcul des inversions de B et R + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + BI = B.getI() + RI = R.getI() + # + __n = Xb.size + nbPreviousSteps = len(selfA.StoredVariables["Analysis"]) + # + if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: + Xn = Xb + if hasattr(B,"asfullmatrix"): + Pn = B.asfullmatrix(__n) + else: + Pn = B + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( Xb ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + elif selfA._parameters["nextStep"]: + Xn = selfA._getInternalState("Xn") + Pn = selfA._getInternalState("Pn") + # + if selfA._parameters["EstimationOf"] == "Parameters": + XaMin = Xn + previousJMinimum = numpy.finfo(float).max + # + for step in range(duration-1): + # + if U is not None: + if hasattr(U,"store") and len(U)>1: + Un = numpy.ravel( U[step] ).reshape((-1,1)) + elif hasattr(U,"store") and len(U)==1: + Un = numpy.ravel( U[0] ).reshape((-1,1)) + else: + Un = numpy.ravel( U ).reshape((-1,1)) + else: + Un = None + # + if CM is not None and "Tangent" in CM and U is not None: + Cm = CM["Tangent"].asMatrix(Xn) + else: + Cm = None + # + Pndemi = numpy.real(scipy.linalg.sqrtm(Pn)) + Xnmu = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi]) + nbSpts = 2*Xn.size+1 + # + XEnnmu = [] + for point in range(nbSpts): + if selfA._parameters["EstimationOf"] == "State": + Mm = EM["Direct"].appliedControledFormTo + XEnnmui = numpy.asarray( Mm( (Xnmu[:,point], Un) ) ).reshape((-1,1)) + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! + Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape + XEnnmui = XEnnmui + Cm @ Un + elif selfA._parameters["EstimationOf"] == "Parameters": + # --- > Par principe, M = Id, Q = 0 + XEnnmui = Xnmu[:,point] + XEnnmu.append( numpy.ravel(XEnnmui).reshape((-1,1)) ) + XEnnmu = numpy.concatenate( XEnnmu, axis=1 ) + # + Xhmn = ( XEnnmu * Wm ).sum(axis=1) + # + if selfA._parameters["EstimationOf"] == "State": Pmn = copy.copy(Q) + elif selfA._parameters["EstimationOf"] == "Parameters": Pmn = 0. + for point in range(nbSpts): + dXEnnmuXhmn = XEnnmu[:,point].flat-Xhmn + Pmn += Wc[i] * numpy.outer(dXEnnmuXhmn, dXEnnmuXhmn) + # + Pmndemi = numpy.real(scipy.linalg.sqrtm(Pmn)) + Xnnmu = numpy.hstack([Xhmn.reshape((-1,1)), Xhmn.reshape((-1,1))+Gamma*Pmndemi, Xhmn.reshape((-1,1))-Gamma*Pmndemi]) + # + Hm = HO["Direct"].appliedControledFormTo + Ynnmu = [] + for point in range(nbSpts): + if selfA._parameters["EstimationOf"] == "State": + Ynnmui = Hm( (Xnnmu[:,point], None) ) + elif selfA._parameters["EstimationOf"] == "Parameters": + Ynnmui = Hm( (Xnnmu[:,point], Un) ) + Ynnmu.append( numpy.ravel(Ynnmui).reshape((__p,1)) ) + Ynnmu = numpy.concatenate( Ynnmu, axis=1 ) + # + Yhmn = ( Ynnmu * Wm ).sum(axis=1) + # + Pyyn = copy.copy(R) + Pxyn = 0. + for point in range(nbSpts): + dYnnmuYhmn = Ynnmu[:,point].flat-Yhmn + dXnnmuXhmn = Xnnmu[:,point].flat-Xhmn + Pyyn += Wc[i] * numpy.outer(dYnnmuYhmn, dYnnmuYhmn) + Pxyn += Wc[i] * numpy.outer(dXnnmuXhmn, dYnnmuYhmn) + # + if hasattr(Y,"store"): + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) + else: + Ynpu = numpy.ravel( Y ).reshape((__p,1)) + _Innovation = Ynpu - Yhmn.reshape((-1,1)) + if selfA._parameters["EstimationOf"] == "Parameters": + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! + _Innovation = _Innovation - Cm @ Un + # + Kn = Pxyn @ Pyyn.I + Xn = Xhmn.reshape((-1,1)) + Kn @ _Innovation + Pn = Pmn - Kn @ (Pyyn @ Kn.T) + # + Xa = Xn # Pointeurs + #-------------------------- + selfA._setInternalState("Xn", Xn) + selfA._setInternalState("Pn", Pn) + #-------------------------- + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + # ---> avec analysis + selfA.StoredVariables["Analysis"].store( Xa ) + if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): + selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) ) + if selfA._toStore("InnovationAtCurrentAnalysis"): + selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) + # ---> avec current state + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CurrentState"): + selfA.StoredVariables["CurrentState"].store( Xn ) + if selfA._toStore("ForecastState"): + selfA.StoredVariables["ForecastState"].store( Xhmn ) + if selfA._toStore("ForecastCovariance"): + selfA.StoredVariables["ForecastCovariance"].store( Pmn ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( Xhmn - Xa ) + if selfA._toStore("InnovationAtCurrentState"): + selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) + if selfA._toStore("SimulatedObservationAtCurrentState") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yhmn ) + # ---> autres + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + Jb = vfloat( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) ) + Jo = vfloat( 0.5 * _Innovation.T * (RI * _Innovation) ) + J = Jb + Jo + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + # + if selfA._toStore("IndexOfOptimum") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("CostFunctionJAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) + if selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + if selfA._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if selfA._toStore("APosterioriCovariance"): + covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] + # + # Stockage final supplémentaire de l'optimum en estimation de paramètres + # ---------------------------------------------------------------------- + if selfA._parameters["EstimationOf"] == "Parameters": + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( XaMin ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + # + return 0 + +# ============================================================================== +if __name__ == "__main__": + print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daAlgorithms/Atoms/incr3dvar.py b/src/daComposant/daAlgorithms/Atoms/incr3dvar.py index 5ba5044..84536e6 100644 --- a/src/daComposant/daAlgorithms/Atoms/incr3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/incr3dvar.py @@ -144,6 +144,8 @@ def incr3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb19hlt as optimiseur elif vt("1.11.0") <= vt(scipy.version.version) <= vt("1.11.99"): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur + elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): + import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/lbfgsb112hlt.py b/src/daComposant/daAlgorithms/Atoms/lbfgsb112hlt.py new file mode 100644 index 0000000..a6fc965 --- /dev/null +++ b/src/daComposant/daAlgorithms/Atoms/lbfgsb112hlt.py @@ -0,0 +1,514 @@ +# Modification de la version 1.12.0 +""" +Functions +--------- +.. autosummary:: + :toctree: generated/ + + fmin_l_bfgs_b + +""" + +## License for the Python wrapper +## ============================== + +## Copyright (c) 2004 David M. Cooke + +## Permission is hereby granted, free of charge, to any person obtaining a +## copy of this software and associated documentation files (the "Software"), +## to deal in the Software without restriction, including without limitation +## the rights to use, copy, modify, merge, publish, distribute, sublicense, +## and/or sell copies of the Software, and to permit persons to whom the +## Software is furnished to do so, subject to the following conditions: + +## The above copyright notice and this permission notice shall be included in +## all copies or substantial portions of the Software. + +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +## DEALINGS IN THE SOFTWARE. + +## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy + +import numpy as np +from numpy import array, asarray, float64, zeros +from scipy.optimize import _lbfgsb +from scipy.optimize._optimize import (MemoizeJac, OptimizeResult, _call_callback_maybe_halt, + _wrap_callback, _check_unknown_options, + _prepare_scalar_function) +from scipy.optimize._constraints import old_bound_to_new + +from scipy.sparse.linalg import LinearOperator + +__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct'] + + +def fmin_l_bfgs_b(func, x0, fprime=None, args=(), + approx_grad=0, + bounds=None, m=10, factr=1e7, pgtol=1e-5, + epsilon=1e-8, + iprint=-1, maxfun=15000, maxiter=15000, disp=None, + callback=None, maxls=20): + """ + Minimize a function func using the L-BFGS-B algorithm. + + Parameters + ---------- + func : callable f(x,*args) + Function to minimize. + x0 : ndarray + Initial guess. + fprime : callable fprime(x,*args), optional + The gradient of `func`. If None, then `func` returns the function + value and the gradient (``f, g = func(x, *args)``), unless + `approx_grad` is True in which case `func` returns only ``f``. + args : sequence, optional + Arguments to pass to `func` and `fprime`. + approx_grad : bool, optional + Whether to approximate the gradient numerically (in which case + `func` returns only the function value). + bounds : list, optional + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None or +-inf for one of ``min`` or + ``max`` when there is no bound in that direction. + m : int, optional + The maximum number of variable metric corrections + used to define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms in an + approximation to it.) + factr : float, optional + The iteration stops when + ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, + where ``eps`` is the machine precision, which is automatically + generated by the code. Typical values for `factr` are: 1e12 for + low accuracy; 1e7 for moderate accuracy; 10.0 for extremely + high accuracy. See Notes for relationship to `ftol`, which is exposed + (instead of `factr`) by the `scipy.optimize.minimize` interface to + L-BFGS-B. + pgtol : float, optional + The iteration will stop when + ``max{|proj g_i | i = 1, ..., n} <= pgtol`` + where ``proj g_i`` is the i-th component of the projected gradient. + epsilon : float, optional + Step size used when `approx_grad` is True, for numerically + calculating the gradient + iprint : int, optional + Controls the frequency of output. ``iprint < 0`` means no output; + ``iprint = 0`` print only one line at the last iteration; + ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; + ``iprint = 99`` print details of every iteration except n-vectors; + ``iprint = 100`` print also the changes of active set and final x; + ``iprint > 100`` print details of every iteration including x and g. + disp : int, optional + If zero, then no output. If a positive number, then this over-rides + `iprint` (i.e., `iprint` gets the value of `disp`). + maxfun : int, optional + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int, optional + Maximum number of iterations. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + + Returns + ------- + x : array_like + Estimated position of the minimum. + f : float + Value of `func` at the minimum. + d : dict + Information dictionary. + + * d['warnflag'] is + + - 0 if converged, + - 1 if too many function evaluations or too many iterations, + - 2 if stopped for another reason, given in d['task'] + + * d['grad'] is the gradient at the minimum (should be 0 ish) + * d['funcalls'] is the number of function calls made. + * d['nit'] is the number of iterations. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'L-BFGS-B' `method` in particular. Note that the + `ftol` option is made available via that interface, while `factr` is + provided via this interface, where `factr` is the factor multiplying + the default machine floating-point precision to arrive at `ftol`: + ``ftol = factr * numpy.finfo(float).eps``. + + Notes + ----- + License of L-BFGS-B (FORTRAN code): + + The version included here (in fortran code) is 3.0 + (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd, + and Jorge Nocedal . It carries the following + condition for use: + + This software is freely available, but we expect that all publications + describing work using this software, or all commercial products using it, + quote at least one of the references given below. This software is released + under the BSD License. + + References + ---------- + * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound + Constrained Optimization, (1995), SIAM Journal on Scientific and + Statistical Computing, 16, 5, pp. 1190-1208. + * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (1997), + ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. + * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (2011), + ACM Transactions on Mathematical Software, 38, 1. + + """ + # handle fprime/approx_grad + if approx_grad: + fun = func + jac = None + elif fprime is None: + fun = MemoizeJac(func) + jac = fun.derivative + else: + fun = func + jac = fprime + + # build options + if disp is None: + disp = iprint + callback = _wrap_callback(callback) + opts = {'disp': disp, + 'iprint': iprint, + 'maxcor': m, + 'ftol': factr * np.finfo(float).eps, + 'gtol': pgtol, + 'eps': epsilon, + 'maxfun': maxfun, + 'maxiter': maxiter, + 'callback': callback, + 'maxls': maxls} + + res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds, + **opts) + d = {'grad': res['jac'], + 'task': res['message'], + 'funcalls': res['nfev'], + 'nit': res['nit'], + 'warnflag': res['status']} + f = res['fun'] + x = res['x'] + + return x, f, d + + +def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, + disp=None, maxcor=10, ftol=2.2204460492503131e-09, + gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000, + iprint=-1, callback=None, maxls=20, + finite_diff_rel_step=None, **unknown_options): + """ + Minimize a scalar function of one or more variables using the L-BFGS-B + algorithm. + + Options + ------- + disp : None or int + If `disp is None` (the default), then the supplied version of `iprint` + is used. If `disp is not None`, then it overrides the supplied version + of `iprint` with the behaviour you outlined. + maxcor : int + The maximum number of variable metric corrections used to + define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms + in an approximation to it.) + ftol : float + The iteration stops when ``(f^k - + f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``. + gtol : float + The iteration will stop when ``max{|proj g_i | i = 1, ..., n} + <= gtol`` where ``proj g_i`` is the i-th component of the + projected gradient. + eps : float or ndarray + If `jac is None` the absolute step size used for numerical + approximation of the jacobian via forward differences. + maxfun : int + Maximum number of function evaluations. Note that this function + may violate the limit because of evaluating gradients by numerical + differentiation. + maxiter : int + Maximum number of iterations. + iprint : int, optional + Controls the frequency of output. ``iprint < 0`` means no output; + ``iprint = 0`` print only one line at the last iteration; + ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; + ``iprint = 99`` print details of every iteration except n-vectors; + ``iprint = 100`` print also the changes of active set and final x; + ``iprint > 100`` print details of every iteration including x and g. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + finite_diff_rel_step : None or array_like, optional + If `jac in ['2-point', '3-point', 'cs']` the relative step size to + use for numerical approximation of the jacobian. The absolute step + size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, + possibly adjusted to fit into the bounds. For ``method='3-point'`` + the sign of `h` is ignored. If None (default) then step is selected + automatically. + + Notes + ----- + The option `ftol` is exposed via the `scipy.optimize.minimize` interface, + but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The + relationship between the two is ``ftol = factr * numpy.finfo(float).eps``. + I.e., `factr` multiplies the default machine floating-point precision to + arrive at `ftol`. + + """ + _check_unknown_options(unknown_options) + m = maxcor + pgtol = gtol + factr = ftol / np.finfo(float).eps + + x0 = asarray(x0).ravel() + n, = x0.shape + + # historically old-style bounds were/are expected by lbfgsb. + # That's still the case but we'll deal with new-style from here on, + # it's easier + if bounds is None: + pass + elif len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + else: + bounds = np.array(old_bound_to_new(bounds)) + + # check bounds + if (bounds[0] > bounds[1]).any(): + raise ValueError( + "LBFGSB - one of the lower bounds is greater than an upper bound." + ) + + # initial vector must lie within the bounds. Otherwise ScalarFunction and + # approx_derivative will cause problems + x0 = np.clip(x0, bounds[0], bounds[1]) + + if disp is not None: + if disp == 0: + iprint = -1 + else: + iprint = disp + + # _prepare_scalar_function can use bounds=None to represent no bounds + sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, + bounds=bounds, + finite_diff_rel_step=finite_diff_rel_step) + + func_and_grad = sf.fun_and_grad + + fortran_int = _lbfgsb.types.intvar.dtype + + nbd = zeros(n, fortran_int) + low_bnd = zeros(n, float64) + upper_bnd = zeros(n, float64) + bounds_map = {(-np.inf, np.inf): 0, + (1, np.inf): 1, + (1, 1): 2, + (-np.inf, 1): 3} + + if bounds is not None: + for i in range(0, n): + l, u = bounds[0, i], bounds[1, i] + if not np.isinf(l): + low_bnd[i] = l + l = 1 + if not np.isinf(u): + upper_bnd[i] = u + u = 1 + nbd[i] = bounds_map[l, u] + + if not maxls > 0: + raise ValueError('maxls must be positive.') + + x = array(x0, float64) + f = array(0.0, float64) + g = zeros((n,), float64) + wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64) + iwa = zeros(3*n, fortran_int) + task = zeros(1, 'S60') + csave = zeros(1, 'S60') + lsave = zeros(4, fortran_int) + isave = zeros(44, fortran_int) + dsave = zeros(29, float64) + + task[:] = 'START' + + n_iterations = 0 + + while 1: + # g may become float32 if a user provides a function that calculates + # the Jacobian in float32 (see gh-18730). The underlying Fortran code + # expects float64, so upcast it + g = g.astype(np.float64) + # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ + _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, + pgtol, wa, iwa, task, iprint, csave, lsave, + isave, dsave, maxls) + task_str = task.tobytes() + if task_str.startswith(b'FG'): + # The minimization routine wants f and g at the current x. + # Note that interruptions due to maxfun are postponed + # until the completion of the current minimization iteration. + # Overwrite f and g: + f, g = func_and_grad(x) + if sf.nfev > maxfun: + task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS ' + 'EXCEEDS LIMIT') + elif task_str.startswith(b'NEW_X'): + # new iteration + n_iterations += 1 + + intermediate_result = OptimizeResult(x=x, fun=f) + if _call_callback_maybe_halt(callback, intermediate_result): + task[:] = 'STOP: CALLBACK REQUESTED HALT' + if n_iterations >= maxiter: + task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT' + elif sf.nfev > maxfun: + task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS ' + 'EXCEEDS LIMIT') + else: + break + + task_str = task.tobytes().strip(b'\x00').strip() + if task_str.startswith(b'CONV'): + warnflag = 0 + elif sf.nfev > maxfun or n_iterations >= maxiter: + warnflag = 1 + else: + warnflag = 2 + + # These two portions of the workspace are described in the mainlb + # subroutine in lbfgsb.f. See line 363. + s = wa[0: m*n].reshape(m, n) + y = wa[m*n: 2*m*n].reshape(m, n) + + # See lbfgsb.f line 160 for this portion of the workspace. + # isave(31) = the total number of BFGS updates prior the current iteration; + n_bfgs_updates = isave[30] + + n_corrs = min(n_bfgs_updates, maxcor) + hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs]) + + task_str = task_str.decode() + return OptimizeResult(fun=f, jac=g, nfev=sf.nfev, + njev=sf.ngev, + nit=n_iterations, status=warnflag, message=task_str, + x=x, success=(warnflag == 0), hess_inv=hess_inv) + + +class LbfgsInvHessProduct(LinearOperator): + """Linear operator for the L-BFGS approximate inverse Hessian. + + This operator computes the product of a vector with the approximate inverse + of the Hessian of the objective function, using the L-BFGS limited + memory approximation to the inverse Hessian, accumulated during the + optimization. + + Objects of this class implement the ``scipy.sparse.linalg.LinearOperator`` + interface. + + Parameters + ---------- + sk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the solution vector. + (See [1]). + yk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the gradient. (See [1]). + + References + ---------- + .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited + storage." Mathematics of computation 35.151 (1980): 773-782. + + """ + + def __init__(self, sk, yk): + """Construct the operator.""" + if sk.shape != yk.shape or sk.ndim != 2: + raise ValueError('sk and yk must have matching shape, (n_corrs, n)') + n_corrs, n = sk.shape + + super().__init__(dtype=np.float64, shape=(n, n)) + + self.sk = sk + self.yk = yk + self.n_corrs = n_corrs + self.rho = 1 / np.einsum('ij,ij->i', sk, yk) + + def _matvec(self, x): + """Efficient matrix-vector multiply with the BFGS matrices. + + This calculation is described in Section (4) of [1]. + + Parameters + ---------- + x : ndarray + An array with shape (n,) or (n,1). + + Returns + ------- + y : ndarray + The matrix-vector product + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + q = np.array(x, dtype=self.dtype, copy=True) + if q.ndim == 2 and q.shape[1] == 1: + q = q.reshape(-1) + + alpha = np.empty(n_corrs) + + for i in range(n_corrs-1, -1, -1): + alpha[i] = rho[i] * np.dot(s[i], q) + q = q - alpha[i]*y[i] + + r = q + for i in range(n_corrs): + beta = rho[i] * np.dot(y[i], r) + r = r + s[i] * (alpha[i] - beta) + + return r + + def todense(self): + """Return a dense array representation of this operator. + + Returns + ------- + arr : ndarray, shape=(n, n) + An array with the same shape and containing + the same data represented by this `LinearOperator`. + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + I = np.eye(*self.shape, dtype=self.dtype) + Hk = I + + for i in range(n_corrs): + A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i] + A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i] + + Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] * + s[i][np.newaxis, :]) + return Hk diff --git a/src/daComposant/daAlgorithms/Atoms/psas3dvar.py b/src/daComposant/daAlgorithms/Atoms/psas3dvar.py index 9a5041c..137ab7f 100644 --- a/src/daComposant/daAlgorithms/Atoms/psas3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/psas3dvar.py @@ -125,6 +125,8 @@ def psas3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb19hlt as optimiseur elif vt("1.11.0") <= vt(scipy.version.version) <= vt("1.11.99"): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur + elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): + import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/std3dvar.py b/src/daComposant/daAlgorithms/Atoms/std3dvar.py index 34399d5..9b6baa2 100644 --- a/src/daComposant/daAlgorithms/Atoms/std3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/std3dvar.py @@ -128,6 +128,8 @@ def std3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb19hlt as optimiseur elif vt("1.11.0") <= vt(scipy.version.version) <= vt("1.11.99"): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur + elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): + import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/std4dvar.py b/src/daComposant/daAlgorithms/Atoms/std4dvar.py index cb147cd..7508af9 100644 --- a/src/daComposant/daAlgorithms/Atoms/std4dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/std4dvar.py @@ -188,6 +188,8 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): import daAlgorithms.Atoms.lbfgsb19hlt as optimiseur elif vt("1.11.0") <= vt(scipy.version.version) <= vt("1.11.99"): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur + elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): + import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/Atoms/uskf.py b/src/daComposant/daAlgorithms/Atoms/uskf.py index 6e43d4a..08a5f40 100644 --- a/src/daComposant/daAlgorithms/Atoms/uskf.py +++ b/src/daComposant/daAlgorithms/Atoms/uskf.py @@ -25,10 +25,8 @@ __doc__ = """ """ __author__ = "Jean-Philippe ARGAUD" -import math, numpy, scipy -from daCore.PlatformInfo import PlatformInfo, vfloat -mpr = PlatformInfo().MachinePrecision() -mfp = PlatformInfo().MaximumPrecision() +import math, numpy, scipy, copy +from daCore.PlatformInfo import vfloat # ============================================================================== def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): @@ -118,64 +116,66 @@ def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): Cm = None # Pndemi = numpy.real(scipy.linalg.sqrtm(Pn)) - Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi]) + Xnmu = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi]) nbSpts = 2*Xn.size+1 # - XEtnnp = [] + XEnnmu = [] for point in range(nbSpts): if selfA._parameters["EstimationOf"] == "State": Mm = EM["Direct"].appliedControledFormTo - XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1)) + XEnnmui = numpy.asarray( Mm( (Xnmu[:,point], Un) ) ).reshape((-1,1)) if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape - XEtnnpi = XEtnnpi + Cm @ Un + XEnnmui = XEnnmui + Cm @ Un elif selfA._parameters["EstimationOf"] == "Parameters": # --- > Par principe, M = Id, Q = 0 - XEtnnpi = Xnp[:,point] - XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) ) - XEtnnp = numpy.concatenate( XEtnnp, axis=1 ) + XEnnmui = Xnmu[:,point] + XEnnmu.append( numpy.ravel(XEnnmui).reshape((-1,1)) ) + XEnnmu = numpy.concatenate( XEnnmu, axis=1 ) # - Xncm = ( XEtnnp * Wm ).sum(axis=1) + Xhmn = ( XEnnmu * Wm ).sum(axis=1) # - if selfA._parameters["EstimationOf"] == "State": Pnm = Q - elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0. + if selfA._parameters["EstimationOf"] == "State": Pmn = copy.copy(Q) + elif selfA._parameters["EstimationOf"] == "Parameters": Pmn = 0. for point in range(nbSpts): - Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm)) + dXEnnmuXhmn = XEnnmu[:,point].flat-Xhmn + Pmn += Wc[i] * numpy.outer(dXEnnmuXhmn, dXEnnmuXhmn) # - Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm)) - # - Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi]) + Pmndemi = numpy.real(scipy.linalg.sqrtm(Pmn)) + Xnnmu = numpy.hstack([Xhmn.reshape((-1,1)), Xhmn.reshape((-1,1))+Gamma*Pmndemi, Xhmn.reshape((-1,1))-Gamma*Pmndemi]) # Hm = HO["Direct"].appliedControledFormTo - Ynnp = [] + Ynnmu = [] for point in range(nbSpts): if selfA._parameters["EstimationOf"] == "State": - Ynnpi = Hm( (Xnnp[:,point], None) ) + Ynnmui = Hm( (Xnnmu[:,point], None) ) elif selfA._parameters["EstimationOf"] == "Parameters": - Ynnpi = Hm( (Xnnp[:,point], Un) ) - Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) ) - Ynnp = numpy.concatenate( Ynnp, axis=1 ) + Ynnmui = Hm( (Xnnmu[:,point], Un) ) + Ynnmu.append( numpy.ravel(Ynnmui).reshape((__p,1)) ) + Ynnmu = numpy.concatenate( Ynnmu, axis=1 ) # - Yncm = ( Ynnp * Wm ).sum(axis=1) + Yhmn = ( Ynnmu * Wm ).sum(axis=1) # - Pyyn = R + Pyyn = copy.copy(R) Pxyn = 0. for point in range(nbSpts): - Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm)) - Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm)) + dYnnmuYhmn = Ynnmu[:,point].flat-Yhmn + dXnnmuXhmn = Xnnmu[:,point].flat-Xhmn + Pyyn += Wc[i] * numpy.outer(dYnnmuYhmn, dYnnmuYhmn) + Pxyn += Wc[i] * numpy.outer(dXnnmuXhmn, dYnnmuYhmn) # if hasattr(Y,"store"): Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) else: Ynpu = numpy.ravel( Y ).reshape((__p,1)) - _Innovation = Ynpu - Yncm.reshape((-1,1)) + _Innovation = Ynpu - Yhmn.reshape((-1,1)) if selfA._parameters["EstimationOf"] == "Parameters": if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! _Innovation = _Innovation - Cm @ Un # - Kn = Pxyn * Pyyn.I - Xn = Xncm.reshape((-1,1)) + Kn * _Innovation - Pn = Pnm - Kn * Pyyn * Kn.T + Kn = Pxyn @ Pyyn.I + Xn = Xhmn.reshape((-1,1)) + Kn @ _Innovation + Pn = Pmn - Kn @ (Pyyn @ Kn.T) # Xa = Xn # Pointeurs #-------------------------- @@ -195,16 +195,16 @@ def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): or selfA._toStore("CurrentState"): selfA.StoredVariables["CurrentState"].store( Xn ) if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( Xncm ) + selfA.StoredVariables["ForecastState"].store( Xhmn ) if selfA._toStore("ForecastCovariance"): - selfA.StoredVariables["ForecastCovariance"].store( Pnm ) + selfA.StoredVariables["ForecastCovariance"].store( Pmn ) if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( Xncm - Xa ) + selfA.StoredVariables["BMA"].store( Xhmn - Xa ) if selfA._toStore("InnovationAtCurrentState"): selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) if selfA._toStore("SimulatedObservationAtCurrentState") \ or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm ) + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yhmn ) # ---> autres if selfA._parameters["StoreInternalVariables"] \ or selfA._toStore("CostFunctionJ") \ diff --git a/src/daComposant/daAlgorithms/Atoms/van3dvar.py b/src/daComposant/daAlgorithms/Atoms/van3dvar.py index c0d1419..14b4088 100644 --- a/src/daComposant/daAlgorithms/Atoms/van3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/van3dvar.py @@ -136,6 +136,8 @@ def van3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): import daAlgorithms.Atoms.lbfgsb19hlt as optimiseur elif vt("1.11.0") <= vt(scipy.version.version) <= vt("1.11.99"): import daAlgorithms.Atoms.lbfgsb111hlt as optimiseur + elif vt("1.12.0") <= vt(scipy.version.version) <= vt("1.12.99"): + import daAlgorithms.Atoms.lbfgsb112hlt as optimiseur else: import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( diff --git a/src/daComposant/daAlgorithms/MeasurementsOptimalPositioningTask.py b/src/daComposant/daAlgorithms/MeasurementsOptimalPositioningTask.py index 70db0d2..4c727d0 100644 --- a/src/daComposant/daAlgorithms/MeasurementsOptimalPositioningTask.py +++ b/src/daComposant/daAlgorithms/MeasurementsOptimalPositioningTask.py @@ -22,7 +22,7 @@ import numpy from daCore import BasicObjects -from daAlgorithms.Atoms import ecweim, ecwdeim, eosg +from daAlgorithms.Atoms import ecweim, ecwdeim, ecwubfeim, eosg # ============================================================================== class ElementaryAlgorithm(BasicObjects.Algorithm): @@ -39,6 +39,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "DEIM", "PositioningByDEIM", "lcDEIM", "PositioningBylcDEIM", ], + listadv = [ + "UBFEIM", "PositioningByUBFEIM", + "lcUBFEIM", "PositioningBylcUBFEIM", + ], ) self.defineRequiredParameter( name = "EnsembleOfSnapshots", @@ -46,6 +50,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): typecast = numpy.array, message = "Ensemble de vecteurs d'état physique (snapshots), 1 état par colonne (Training Set)", ) + self.defineRequiredParameter( + name = "UserBasisFunctions", + default = [], + typecast = numpy.array, + message = "Ensemble de fonctions de base définis par l'utilisateur, 1 fonction de base par colonne", + ) self.defineRequiredParameter( name = "MaximumNumberOfLocations", default = 1, @@ -115,6 +125,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): typecast = tuple, message = "Points de calcul définis par un hyper-cube dont les points sur chaque axe proviennent de l'échantillonnage indépendant de la variable selon la spécification ['distribution',[parametres],nombre]", ) + self.defineRequiredParameter( + name = "ReduceMemoryUse", + default = False, + typecast = bool, + message = "Réduction de l'empreinte mémoire lors de l'exécution au prix d'une augmentation du temps de calcul", + ) self.defineRequiredParameter( name = "SetDebug", default = False, @@ -162,7 +178,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): elif isinstance(HO, dict): ecweim.EIM_offline(self, eosg.eosg(self, Xb, HO)) else: - raise ValueError("Snapshots or Operator have to be given in order to launch the analysis") + raise ValueError("Snapshots or Operator have to be given in order to launch the EIM analysis") # #-------------------------- elif self._parameters["Variant"] in ["lcDEIM", "PositioningBylcDEIM", "DEIM", "PositioningByDEIM"]: @@ -173,7 +189,17 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): elif isinstance(HO, dict): ecwdeim.DEIM_offline(self, eosg.eosg(self, Xb, HO)) else: - raise ValueError("Snapshots or Operator have to be given in order to launch the analysis") + raise ValueError("Snapshots or Operator have to be given in order to launch the DEIM analysis") + #-------------------------- + elif self._parameters["Variant"] in ["lcUBFEIM", "PositioningBylcUBFEIM", "UBFEIM", "PositioningByUBFEIM"]: + if len(self._parameters["EnsembleOfSnapshots"]) > 0: + if self._toStore("EnsembleOfSimulations"): + self.StoredVariables["EnsembleOfSimulations"].store( self._parameters["EnsembleOfSnapshots"] ) + ecwubfeim.UBFEIM_offline(self, self._parameters["EnsembleOfSnapshots"]) + elif isinstance(HO, dict): + ecwubfeim.UBFEIM_offline(self, eosg.eosg(self, Xb, HO)) + else: + raise ValueError("Snapshots or Operator have to be given in order to launch the UBFEIM analysis") # #-------------------------- else: diff --git a/src/daComposant/daAlgorithms/ReducedModelingTest.py b/src/daComposant/daAlgorithms/ReducedModelingTest.py index 57590cd..86740f8 100644 --- a/src/daComposant/daAlgorithms/ReducedModelingTest.py +++ b/src/daComposant/daAlgorithms/ReducedModelingTest.py @@ -243,9 +243,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): else: __Ensemble = __EOS # - __sv, __svsq = SingularValuesEstimation( __Ensemble ) - __tisv = __svsq / __svsq.sum() - __qisv = 1. - __tisv.cumsum() + __sv, __svsq, __tisv, __qisv = SingularValuesEstimation( __Ensemble ) if self._parameters["MaximumNumberOfModes"] < len(__sv): __sv = __sv[:self._parameters["MaximumNumberOfModes"]] __tisv = __tisv[:self._parameters["MaximumNumberOfModes"]] @@ -286,7 +284,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): svalue = __sv[ns] rvalue = __sv[ns] / __sv[0] vsinfo = 100 * __tisv[ns] - rsinfo = 100 * __qisv[ns] + rsinfo = max(100 * __qisv[ns],0.) if __s: msgs += (__marge + " %0"+str(__ordre)+"i | %22."+str(__p)+"e | %22."+str(__p)+"e | %2i%s , %4.1f%s\n")%(ns,svalue,rvalue,vsinfo,"%",rsinfo,"%") if rsinfo > 10: cut1pd = ns+2 # 10% @@ -309,6 +307,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): msgs += __marge + "Representing more than 99.99%s of variance requires at least %i mode(s).\n"%("%",cut1pi) # if has_matplotlib and self._parameters["PlotAndSave"]: + # Evite les message debug de matplotlib + dL = logging.getLogger().getEffectiveLevel() + logging.getLogger().setLevel(logging.WARNING) try: msgs += ("\n") msgs += (__marge + "Plot and save results in a file named \"%s\"\n"%str(self._parameters["ResultFile"])) @@ -361,6 +362,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): msgs += ("\n") msgs += (__marge + "Saving figure fail, please update your Matplolib version.\n") msgs += ("\n") + logging.getLogger().setLevel(dL) # msgs += ("\n") msgs += (__marge + "%s\n"%("-"*75,)) diff --git a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py index 5e129ed..981cf65 100644 --- a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py +++ b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py @@ -21,7 +21,7 @@ # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D from daCore import BasicObjects -from daAlgorithms.Atoms import c2ukf, uskf +from daAlgorithms.Atoms import ecwukf, c2ukf, uskf # ============================================================================== class ElementaryAlgorithm(BasicObjects.Algorithm): @@ -36,6 +36,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "UKF", "2UKF", ], + listadv = [ + "UKF-Std", + "UKF-MSP", + ], ) self.defineRequiredParameter( name = "EstimationOf", @@ -139,8 +143,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): #-------------------------- # Default UKF #-------------------------- - if self._parameters["Variant"] == "UKF": - uskf.uskf(self, Xb, Y, U, HO, EM, CM, R, B, Q) + if self._parameters["Variant"] in ["UKF", "UKF-Std"]: + ecwukf.ecwukf(self, Xb, Y, U, HO, EM, CM, R, B, Q) # #-------------------------- # Default 2UKF @@ -148,6 +152,11 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): c2ukf.c2ukf(self, Xb, Y, U, HO, EM, CM, R, B, Q) # #-------------------------- + # UKF-MSP + elif self._parameters["Variant"] == "UKF-MSP": + uskf.uskf(self, Xb, Y, U, HO, EM, CM, R, B, Q) + # + #-------------------------- else: raise ValueError("Error in Variant name: %s"%self._parameters["Variant"]) # diff --git a/src/daComposant/daCore/BasicObjects.py b/src/daComposant/daCore/BasicObjects.py index 038c3f2..63f3abc 100644 --- a/src/daComposant/daCore/BasicObjects.py +++ b/src/daComposant/daCore/BasicObjects.py @@ -835,7 +835,7 @@ class Algorithm(object): # # Mise à jour des paramètres internes avec le contenu de Parameters, en # reprenant les valeurs par défauts pour toutes celles non définies - self.__setParameters(Parameters, reset=True) + self.__setParameters(Parameters, reset=True) # Copie for k, v in self.__variable_names_not_public.items(): if k not in self._parameters: self.__setParameters( {k:v} ) @@ -851,9 +851,13 @@ class Algorithm(object): logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol)) else: if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]: - logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size)) + logging.debug( + "%s %s vector %s is required and set, and its size is %i."%( + self._name,argname,symbol,numpy.array(argument).size)) elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: - logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size)) + logging.debug( + "%s %s vector %s is optional and set, and its size is %i."%( + self._name,argname,symbol,numpy.array(argument).size)) else: logging.debug( "%s %s vector %s is set although neither required nor optional, and its size is %i."%( @@ -862,7 +866,7 @@ class Algorithm(object): __test_vvalue( Xb, "Xb", "Background or initial state" ) __test_vvalue( Y, "Y", "Observation" ) __test_vvalue( U, "U", "Control" ) - + # # Corrections et compléments des covariances def __test_cvalue(argument, variable, argname, symbol=None): if symbol is None: symbol = variable @@ -879,12 +883,14 @@ class Algorithm(object): elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol)) else: - logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol)) + logging.debug( + "%s %s error covariance matrix %s is set although neither required nor optional."%( + self._name,argname,symbol)) return 0 __test_cvalue( B, "B", "Background" ) __test_cvalue( R, "R", "Observation" ) __test_cvalue( Q, "Q", "Evolution" ) - + # # Corrections et compléments des opérateurs def __test_ovalue(argument, variable, argname, symbol=None): if symbol is None: symbol = variable @@ -1158,7 +1164,9 @@ class Algorithm(object): self._parameters[k] = self.setParameterValue(k) else: pass - if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100: + if hasattr(self._parameters[k],"size") and self._parameters[k].size > 100: + logging.debug("%s %s d'une taille totale de %s", self._name, self.__required_parameters[k]["message"], self._parameters[k].size) + elif hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100: logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k])) else: logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k]) diff --git a/src/daComposant/daCore/Interfaces.py b/src/daComposant/daCore/Interfaces.py index c0cdd81..9c0b9b8 100644 --- a/src/daComposant/daCore/Interfaces.py +++ b/src/daComposant/daCore/Interfaces.py @@ -952,9 +952,9 @@ class ImportFromFile(object): if self._format == "text/csv" or Format.upper() == "CSV": self._format = "text/csv" self.__filestring = "".join(self.__header) - if self.__filestring.count(",") > 1: + if self.__filestring.count(",") > 0: self._delimiter = "," - elif self.__filestring.count(";") > 1: + elif self.__filestring.count(";") > 0: self._delimiter = ";" else: self._delimiter = None @@ -1196,6 +1196,7 @@ class ImportScalarLinesFromFile(ImportFromFile): usecols = __usecols, skiprows = self._skiprows, converters = __converters, + ndmin = 1, ) elif self._format in ["text/csv", "text/tab-separated-values"]: __content = numpy.loadtxt( @@ -1205,6 +1206,7 @@ class ImportScalarLinesFromFile(ImportFromFile): skiprows = self._skiprows, converters = __converters, delimiter = self._delimiter, + ndmin = 1, ) else: raise ValueError("Unkown file format \"%s\""%self._format) diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index e01bae7..d579846 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -656,7 +656,119 @@ def SingularValuesEstimation( __Ensemble, __Using = "SVDVALS"): else: raise ValueError("Error in requested variant name: %s"%__Using) # - return __sv, __svsq + __tisv = __svsq / __svsq.sum() + __qisv = 1. - __svsq.cumsum() / __svsq.sum() + # Différence à 1.e-16 : __qisv = 1. - __tisv.cumsum() + # + return __sv, __svsq, __tisv, __qisv + +# ============================================================================== +def MaxL2NormByColumn(__Ensemble, __LcCsts = False, __IncludedPoints = []): + "Maximum des normes L2 calculées par colonne" + if __LcCsts and len(__IncludedPoints) > 0: + normes = numpy.linalg.norm( + numpy.take(__Ensemble, __IncludedPoints, axis=0, mode='clip'), + axis = 0, + ) + else: + normes = numpy.linalg.norm( __Ensemble, axis = 0) + nmax = numpy.max(normes) + imax = numpy.argmax(normes) + return nmax, imax, normes + +def MaxLinfNormByColumn(__Ensemble, __LcCsts = False, __IncludedPoints = []): + "Maximum des normes Linf calculées par colonne" + if __LcCsts and len(__IncludedPoints) > 0: + normes = numpy.linalg.norm( + numpy.take(__Ensemble, __IncludedPoints, axis=0, mode='clip'), + axis = 0, ord=numpy.inf, + ) + else: + normes = numpy.linalg.norm( __Ensemble, axis = 0, ord=numpy.inf) + nmax = numpy.max(normes) + imax = numpy.argmax(normes) + return nmax, imax, normes + +def InterpolationErrorByColumn( + __Ensemble = None, __Basis = None, __Points = None, __M = 2, # Usage 1 + __Differences = None, # Usage 2 + __ErrorNorm = None, # Commun + __LcCsts = False, __IncludedPoints = [], # Commun + __CDM = False, # ComputeMaxDifference # Commun + __RMU = False, # ReduceMemoryUse # Commun + __FTL = False, # ForceTril # Commun + ): + "Analyse des normes d'erreurs d'interpolation calculées par colonne" + if __ErrorNorm == "L2": + NormByColumn = MaxL2NormByColumn + else: + NormByColumn = MaxLinfNormByColumn + # + if __Differences is None and not __RMU: # Usage 1 + if __FTL: + rBasis = numpy.tril( __Basis[__Points,:] ) + else: + rBasis = __Basis[__Points,:] + rEnsemble = __Ensemble[__Points,:] + # + if __M > 1: + rBasis_inv = numpy.linalg.inv(rBasis) + Interpolator = numpy.dot(__Basis,numpy.dot(rBasis_inv,rEnsemble)) + else: + rBasis_inv = 1. / rBasis + Interpolator = numpy.outer(__Basis,numpy.outer(rBasis_inv,rEnsemble)) + # + differences = __Ensemble - Interpolator + # + error, nbr, _ = NormByColumn(differences, __LcCsts, __IncludedPoints) + # + if __CDM: + maxDifference = differences[:,nbr] + # + elif __Differences is None and __RMU: # Usage 1 + if __FTL: + rBasis = numpy.tril( __Basis[__Points,:] ) + else: + rBasis = __Basis[__Points,:] + rEnsemble = __Ensemble[__Points,:] + # + if __M > 1: + rBasis_inv = numpy.linalg.inv(rBasis) + rCoordinates = numpy.dot(rBasis_inv,rEnsemble) + else: + rBasis_inv = 1. / rBasis + rCoordinates = numpy.outer(rBasis_inv,rEnsemble) + # + error = 0. + nbr = -1 + for iCol in range(__Ensemble.shape[1]): + if __M > 1: + iDifference = __Ensemble[:,iCol] - numpy.dot(__Basis, rCoordinates[:,iCol]) + else: + iDifference = __Ensemble[:,iCol] - numpy.ravel(numpy.outer(__Basis, rCoordinates[:,iCol])) + # + normDifference, _, _ = NormByColumn(iDifference, __LcCsts, __IncludedPoints) + # + if normDifference > error: + error = normDifference + nbr = iCol + # + if __CDM: + maxDifference = __Ensemble[:,nbr] - numpy.dot(__Basis, rCoordinates[:,nbr]) + # + else: # Usage 2 + differences = __Differences + # + error, nbr, _ = NormByColumn(differences, __LcCsts, __IncludedPoints) + # + if __CDM: + # faire cette variable intermédiaire coûte cher + maxDifference = differences[:,nbr] + # + if __CDM: + return error, nbr, maxDifference + else: + return error, nbr # ============================================================================== def EnsemblePerturbationWithGivenCovariance( diff --git a/src/daComposant/daCore/Persistence.py b/src/daComposant/daCore/Persistence.py index 97c1c6c..0b1a340 100644 --- a/src/daComposant/daCore/Persistence.py +++ b/src/daComposant/daCore/Persistence.py @@ -272,7 +272,7 @@ class Persistence(object): # --------------------------------------------------------- def means(self): """ - Renvoie la série, contenant à chaque pas, la valeur moyenne des données + Renvoie la série contenant, à chaque pas, la valeur moyenne des données au pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. """ @@ -283,7 +283,7 @@ class Persistence(object): def stds(self, ddof=0): """ - Renvoie la série, contenant à chaque pas, l'écart-type des données + Renvoie la série contenant, à chaque pas, l'écart-type des données au pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. @@ -300,7 +300,7 @@ class Persistence(object): def sums(self): """ - Renvoie la série, contenant à chaque pas, la somme des données au pas. + Renvoie la série contenant, à chaque pas, la somme des données au pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. """ @@ -311,7 +311,7 @@ class Persistence(object): def mins(self): """ - Renvoie la série, contenant à chaque pas, le minimum des données au pas. + Renvoie la série contenant, à chaque pas, le minimum des données au pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. """ @@ -322,7 +322,7 @@ class Persistence(object): def maxs(self): """ - Renvoie la série, contenant à chaque pas, la maximum des données au pas. + Renvoie la série contenant, à chaque pas, la maximum des données au pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. """ @@ -333,7 +333,7 @@ class Persistence(object): def powers(self, x2): """ - Renvoie la série, contenant à chaque pas, la puissance "**x2" au pas. + Renvoie la série contenant, à chaque pas, la puissance "**x2" au pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. """ @@ -346,7 +346,7 @@ class Persistence(object): """ Norm (_ord : voir numpy.linalg.norm) - Renvoie la série, contenant à chaque pas, la norme des données au pas. + Renvoie la série contenant, à chaque pas, la norme des données au pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. """ @@ -355,12 +355,25 @@ class Persistence(object): except Exception: raise TypeError("Base type is incompatible with numpy") + def traces(self, offset=0): + """ + Trace + + Renvoie la série contenant, à chaque pas, la trace (avec l'offset) des + données au pas. Il faut que le type de base soit compatible avec les + types élémentaires numpy. + """ + try: + return [numpy.trace(item, offset, dtype=mfp) for item in self.__values] + except Exception: + raise TypeError("Base type is incompatible with numpy") + def maes(self, _predictor=None): """ Mean Absolute Error (MAE) mae(dX) = 1/n sum(dX_i) - Renvoie la série, contenant à chaque pas, la MAE des données au pas. + Renvoie la série contenant, à chaque pas, la MAE des données au pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. C'est réservé aux variables d'écarts ou d'incréments si le prédicteur est None, sinon c'est appliqué à l'écart entre les données @@ -387,7 +400,7 @@ class Persistence(object): Mean-Square Error (MSE) ou Mean-Square Deviation (MSD) mse(dX) = 1/n sum(dX_i**2) - Renvoie la série, contenant à chaque pas, la MSE des données au pas. Il + Renvoie la série contenant, à chaque pas, la MSE des données au pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. C'est réservé aux variables d'écarts ou d'incréments si le prédicteur est None, sinon c'est appliqué à l'écart entre les données @@ -418,7 +431,7 @@ class Persistence(object): Root-Mean-Square Error (RMSE) ou Root-Mean-Square Deviation (RMSD) rmse(dX) = sqrt( 1/n sum(dX_i**2) ) = sqrt( mse(dX) ) - Renvoie la série, contenant à chaque pas, la RMSE des données au pas. + Renvoie la série contenant, à chaque pas, la RMSE des données au pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. C'est réservé aux variables d'écarts ou d'incréments si le prédicteur est None, sinon c'est appliqué à l'écart entre les données @@ -460,14 +473,15 @@ class Persistence(object): raise ImportError("The Gnuplot module is required to plot the object.") # # Vérification et compléments sur les paramètres d'entrée + if ltitle is None: ltitle = "" + __geometry = str(geometry) + __sizespec = (__geometry.split('+')[0]).replace('x',',') + # if persist: - Gnuplot.GnuplotOpts.gnuplot_command = 'gnuplot -persist -geometry '+geometry - else: - Gnuplot.GnuplotOpts.gnuplot_command = 'gnuplot -geometry '+geometry - if ltitle is None: - ltitle = "" + Gnuplot.GnuplotOpts.gnuplot_command = 'gnuplot -persist ' + # self.__g = Gnuplot.Gnuplot() # persist=1 - self.__g('set terminal '+Gnuplot.GnuplotOpts.default_term) + self.__g('set terminal '+Gnuplot.GnuplotOpts.default_term+' size '+__sizespec) self.__g('set style data lines') self.__g('set grid') self.__g('set autoscale') @@ -544,7 +558,7 @@ class Persistence(object): i = -1 for index in indexes: self.__g('set title "'+str(title)+' (pas '+str(index)+')"') - if isinstance(steps,list) or isinstance(steps,numpy.ndarray): + if isinstance(steps, (list, numpy.ndarray)): Steps = list(steps) else: Steps = list(range(len(self.__values[index]))) @@ -690,18 +704,19 @@ class Persistence(object): raise ImportError("The Gnuplot module is required to plot the object.") # # Vérification et compléments sur les paramètres d'entrée - if persist: - Gnuplot.GnuplotOpts.gnuplot_command = 'gnuplot -persist -geometry '+geometry - else: - Gnuplot.GnuplotOpts.gnuplot_command = 'gnuplot -geometry '+geometry - if ltitle is None: - ltitle = "" - if isinstance(steps,list) or isinstance(steps, numpy.ndarray): + if ltitle is None: ltitle = "" + if isinstance(steps, (list, numpy.ndarray)): Steps = list(steps) else: Steps = list(range(len(self.__values[0]))) + __geometry = str(geometry) + __sizespec = (__geometry.split('+')[0]).replace('x',',') + # + if persist: + Gnuplot.GnuplotOpts.gnuplot_command = 'gnuplot -persist ' + # self.__g = Gnuplot.Gnuplot() # persist=1 - self.__g('set terminal '+Gnuplot.GnuplotOpts.default_term) + self.__g('set terminal '+Gnuplot.GnuplotOpts.default_term+' size '+__sizespec) self.__g('set style data lines') self.__g('set grid') self.__g('set autoscale') -- 2.39.2