]> SALOME platform Git repositories - modules/adao.git/commitdiff
Salome HOME
Code improvements, review and simplifications (2)
authorJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Mon, 7 Feb 2022 18:22:10 +0000 (19:22 +0100)
committerJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Mon, 7 Feb 2022 18:22:10 +0000 (19:22 +0100)
28 files changed:
src/daComposant/daAlgorithms/3DVAR.py
src/daComposant/daAlgorithms/Atoms/cekf.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/ecwblue.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/ecwexblue.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/ecwlls.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/ecwnlls.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/enks.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/etkf.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/exkf.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/ienkf.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/incr3dvar.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/mlef.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/psas3dvar.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/senkf.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/std3dvar.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/std4dvar.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/stdkf.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Atoms/van3dvar.py [new file with mode: 0644]
src/daComposant/daAlgorithms/Blue.py
src/daComposant/daAlgorithms/EnsembleKalmanFilter.py
src/daComposant/daAlgorithms/ExtendedBlue.py
src/daComposant/daAlgorithms/ExtendedKalmanFilter.py
src/daComposant/daAlgorithms/KalmanFilter.py
src/daComposant/daAlgorithms/LinearLeastSquares.py
src/daComposant/daAlgorithms/NonLinearLeastSquares.py
src/daComposant/daAlgorithms/lbfgsbhlt.py [deleted file]
src/daComposant/daCore/NumericObjects.py
src/daComposant/daCore/Persistence.py

index 0136e9827eef1e39d0ac0675296a20f2d0932de5..28ae3bc4e4dd97c0c22b4d99818659034d902238 100644 (file)
 #
 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
 
-import logging, numpy
+import numpy
 from daCore import BasicObjects, NumericObjects
+from daAlgorithms.Atoms import std3dvar, van3dvar, incr3dvar, psas3dvar
 
 # ==============================================================================
 class ElementaryAlgorithm(BasicObjects.Algorithm):
     def __init__(self):
         BasicObjects.Algorithm.__init__(self, "3DVAR")
-        self.defineRequiredParameter(
-            name     = "Minimizer",
-            default  = "LBFGSB",
-            typecast = str,
-            message  = "Minimiseur utilisé",
-            listval  = [
-                "LBFGSB",
-                "TNC",
-                "CG",
-                "NCG",
-                "BFGS",
-                ],
-            )
         self.defineRequiredParameter(
             name     = "Variant",
             default  = "3DVAR",
@@ -50,11 +38,25 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
                 "3DVAR-VAN",
                 "3DVAR-Incr",
                 "3DVAR-PSAS",
+                "OneCorrection",
                 ],
             listadv  = [
                 "3DVAR-Std",
                 "Incr3DVAR",
-                "OneCycle3DVAR-Std",
+                "OneCorrection3DVAR-Std",
+                ],
+            )
+        self.defineRequiredParameter(
+            name     = "Minimizer",
+            default  = "LBFGSB",
+            typecast = str,
+            message  = "Minimiseur utilisé",
+            listval  = [
+                "LBFGSB",
+                "TNC",
+                "CG",
+                "NCG",
+                "BFGS",
                 ],
             )
         self.defineRequiredParameter(
@@ -122,6 +124,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
                 "ForecastState",
                 "IndexOfOptimum",
                 "Innovation",
+                "InnovationAtCurrentAnalysis",
                 "InnovationAtCurrentState",
                 "JacobianMatrixAtBackground",
                 "JacobianMatrixAtOptimum",
@@ -180,6 +183,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             )
         self.requireInputArguments(
             mandatory= ("Xb", "Y", "HO", "R", "B" ),
+            optional = ("U", "EM", "CM", "Q"),
             )
         self.setAttributes(tags=(
             "DataAssimilation",
@@ -191,22 +195,21 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
         #
         #--------------------------
-        # Default 3DVAR
         if   self._parameters["Variant"] in ["3DVAR", "3DVAR-Std"]:
-            NumericObjects.multi3dvar(self, Xb, Y, U, HO, EM, CM, R, B, Q, NumericObjects.std3dvar)
+            NumericObjects.multiXOsteps(self, Xb, Y, U, HO, EM, CM, R, B, Q, std3dvar.std3dvar)
         #
         elif self._parameters["Variant"] == "3DVAR-VAN":
-            NumericObjects.multi3dvar(self, Xb, Y, U, HO, EM, CM, R, B, Q, NumericObjects.van3dvar)
+            NumericObjects.multiXOsteps(self, Xb, Y, U, HO, EM, CM, R, B, Q, van3dvar.van3dvar)
         #
         elif self._parameters["Variant"] in ["3DVAR-Incr", "Incr3DVAR"]:
-            NumericObjects.multi3dvar(self, Xb, Y, U, HO, EM, CM, R, B, Q, NumericObjects.incr3dvar)
+            NumericObjects.multiXOsteps(self, Xb, Y, U, HO, EM, CM, R, B, Q, incr3dvar.incr3dvar)
         #
         elif self._parameters["Variant"] == "3DVAR-PSAS":
-            NumericObjects.multi3dvar(self, Xb, Y, U, HO, EM, CM, R, B, Q, NumericObjects.psas3dvar)
+            NumericObjects.multiXOsteps(self, Xb, Y, U, HO, EM, CM, R, B, Q, psas3dvar.psas3dvar)
         #
         #--------------------------
-        elif self._parameters["Variant"] == "OneCycle3DVAR-Std":
-            NumericObjects.std3dvar(self, Xb, Y, U, HO, EM, CM, R, B, Q)
+        elif self._parameters["Variant"] in ["OneCorrection", "OneCorrection3DVAR-Std"]:
+            std3dvar.std3dvar(self, Xb, Y, HO, R, B)
         #
         #--------------------------
         else:
diff --git a/src/daComposant/daAlgorithms/Atoms/cekf.py b/src/daComposant/daAlgorithms/Atoms/cekf.py
new file mode 100644 (file)
index 0000000..f4bc1f8
--- /dev/null
@@ -0,0 +1,239 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    Contrained Extended Kalman Filter
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import numpy
+from daCore.NumericObjects import ForceNumericBounds
+from daCore.NumericObjects import ApplyBounds
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+mfp = PlatformInfo().MaximumPrecision()
+
+# ==============================================================================
+def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Contrained Extended Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = Xb
+        Pn = B
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+        Pn = selfA._getInternalState("Pn")
+    #
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        XaMin            = Xn
+        previousJMinimum = numpy.finfo(float).max
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
+        Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
+        Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
+        Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
+        #
+        if selfA._parameters["EstimationOf"] == "State":
+            Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
+            Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
+            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
+            Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+            Pn_predicted = Q + Mt * (Pn * Ma)
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = Xn
+            Pn_predicted = Pn
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
+        #
+        if selfA._parameters["EstimationOf"] == "State":
+            HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
+            _Innovation  = Ynpu - HX_predicted
+        elif selfA._parameters["EstimationOf"] == "Parameters":
+            HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
+            _Innovation  = Ynpu - HX_predicted
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+                _Innovation = _Innovation - Cm @ Un
+        #
+        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
+        Xn = Xn_predicted + Kn * _Innovation
+        Pn = Pn_predicted - Kn * Ht * Pn_predicted
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
+        #
+        Xa = Xn # Pointeurs
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("Pn", Pn)
+        #--------------------------
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/ecwblue.py b/src/daComposant/daAlgorithms/Atoms/ecwblue.py
new file mode 100644 (file)
index 0000000..e21dcf0
--- /dev/null
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    BLUE
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import logging, numpy
+from daCore.NumericObjects import QuantilesEstimations
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+
+# ==============================================================================
+def ecwblue(selfA, Xb, Y, HO, R, B):
+    """
+    BLUE
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Tangent"].asMatrix(Xb)
+    Hm = Hm.reshape(Y.size,Xb.size) # ADAO & check shape
+    Ha = HO["Adjoint"].asMatrix(Xb)
+    Ha = Ha.reshape(Xb.size,Y.size) # ADAO & check shape
+    #
+    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+        HXb = numpy.asarray(HO["AppliedInX"]["HXb"])
+    else:
+        HXb = Hm @ Xb
+    HXb = HXb.reshape((-1,1))
+    if Y.size != HXb.size:
+        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+    if max(Y.shape) != max(HXb.shape):
+        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+    #
+    BI = B.getI()
+    RI = R.getI()
+    #
+    Innovation  = Y - HXb
+    #
+    # Calcul de la matrice de gain et de l'analyse
+    # --------------------------------------------
+    if Y.size <= Xb.size:
+        _A = R + numpy.dot(Hm, B * Ha)
+        _u = numpy.linalg.solve( _A , Innovation )
+        Xa = Xb + B * Ha * _u
+    else:
+        _A = BI + numpy.dot(Ha, RI * Hm)
+        _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * Innovation) )
+        Xa = Xb + _u
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    # Calcul de la fonction coût
+    # --------------------------
+    if selfA._parameters["StoreInternalVariables"] or \
+        selfA._toStore("CostFunctionJ")  or selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJb") or selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJo") or selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+        selfA._toStore("OMA") or \
+        selfA._toStore("InnovationAtCurrentAnalysis") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("MahalanobisConsistency") or \
+        selfA._toStore("SimulatedObservationAtCurrentOptimum") or \
+        selfA._toStore("SimulatedObservationAtCurrentState") or \
+        selfA._toStore("SimulatedObservationAtOptimum") or \
+        selfA._toStore("SimulationQuantiles"):
+        HXa = Hm @ Xa
+        oma = Y - HXa.reshape((-1,1))
+    if selfA._parameters["StoreInternalVariables"] or \
+        selfA._toStore("CostFunctionJ")  or selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJb") or selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJo") or selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+        selfA._toStore("MahalanobisConsistency"):
+        Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+        Jo  = float( 0.5 * oma.T * (RI * oma) )
+        J   = Jb + Jo
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( Jb )
+        selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( Jo )
+        selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( J )
+    #
+    # Calcul de la covariance d'analyse
+    # ---------------------------------
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles"):
+        if   (Y.size <= Xb.size): K  = B * Ha * (R + numpy.dot(Hm, B * Ha)).I
+        elif (Y.size >  Xb.size): K = (BI + numpy.dot(Ha, RI * Hm)).I * Ha * RI
+        A = B - K * Hm * B
+        A = (A + A.T) * 0.5 # Symétrie
+        A = A + mpr*numpy.trace( A ) * numpy.identity(Xa.size) # Positivité
+        if min(A.shape) != max(A.shape):
+            raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+        if (numpy.diag(A) < 0).any():
+            raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+        if logging.getLogger().level < logging.WARNING: # La vérification n'a lieu qu'en debug
+            try:
+                numpy.linalg.cholesky( A )
+            except:
+                raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+        selfA.StoredVariables["APosterioriCovariance"].store( A )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        selfA.StoredVariables["CurrentState"].store( Xa )
+    if selfA._toStore("CurrentOptimum"):
+        selfA.StoredVariables["CurrentOptimum"].store( Xa )
+    if selfA._toStore("Innovation"):
+        selfA.StoredVariables["Innovation"].store( Innovation )
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( oma )
+    if selfA._toStore("InnovationAtCurrentAnalysis"):
+        selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( oma )
+    if selfA._toStore("OMB"):
+        selfA.StoredVariables["OMB"].store( Innovation )
+    if selfA._toStore("SigmaObs2"):
+        TraceR = R.trace(Y.size)
+        selfA.StoredVariables["SigmaObs2"].store( float( Innovation.T @ oma ) / TraceR )
+    if selfA._toStore("SigmaBck2"):
+        selfA.StoredVariables["SigmaBck2"].store( float( (Innovation.T @ (Hm @ (numpy.ravel(Xa) - numpy.ravel(Xb))))/(Hm * (B * Hm.T)).trace() ) )
+    if selfA._toStore("MahalanobisConsistency"):
+        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/Innovation.size ) )
+    if selfA._toStore("SimulationQuantiles"):
+        H  = HO["Direct"].appliedTo
+        QuantilesEstimations(selfA, A, Xa, HXa, H, Hm)
+    if selfA._toStore("SimulatedObservationAtBackground"):
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
+    if selfA._toStore("SimulatedObservationAtCurrentState"):
+        selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXa )
+    if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( HXa )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/ecwexblue.py b/src/daComposant/daAlgorithms/Atoms/ecwexblue.py
new file mode 100644 (file)
index 0000000..b94c4aa
--- /dev/null
@@ -0,0 +1,165 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    Extended BLUE
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import logging, numpy
+from daCore.NumericObjects import QuantilesEstimations
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+
+# ==============================================================================
+def ecwexblue(selfA, Xb, Y, HO, R, B):
+    """
+    Extended BLUE
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Tangent"].asMatrix(Xb)
+    Hm = Hm.reshape(Y.size,Xb.size) # ADAO & check shape
+    Ha = HO["Adjoint"].asMatrix(Xb)
+    Ha = Ha.reshape(Xb.size,Y.size) # ADAO & check shape
+    H  = HO["Direct"].appliedTo
+    #
+    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+        HXb = numpy.asarray(H( Xb, HO["AppliedInX"]["HXb"]))
+    else:
+        HXb = numpy.asarray(H( Xb ))
+    HXb = HXb.reshape((-1,1))
+    if Y.size != HXb.size:
+        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+    if max(Y.shape) != max(HXb.shape):
+        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+    #
+    BI = B.getI()
+    RI = R.getI()
+    #
+    Innovation  = Y - HXb
+    #
+    # Calcul de la matrice de gain et de l'analyse
+    # --------------------------------------------
+    if Y.size <= Xb.size:
+        _A = R + numpy.dot(Hm, B * Ha)
+        _u = numpy.linalg.solve( _A , Innovation )
+        Xa = Xb + B * Ha * _u
+    else:
+        _A = BI + numpy.dot(Ha, RI * Hm)
+        _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * Innovation) )
+        Xa = Xb + _u
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    # Calcul de la fonction coût
+    # --------------------------
+    if selfA._parameters["StoreInternalVariables"] or \
+        selfA._toStore("CostFunctionJ")  or selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJb") or selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJo") or selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+        selfA._toStore("OMA") or \
+        selfA._toStore("InnovationAtCurrentAnalysis") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("MahalanobisConsistency") or \
+        selfA._toStore("SimulatedObservationAtCurrentOptimum") or \
+        selfA._toStore("SimulatedObservationAtCurrentState") or \
+        selfA._toStore("SimulatedObservationAtOptimum") or \
+        selfA._toStore("SimulationQuantiles"):
+        HXa = H( Xa )
+        oma = Y - HXa.reshape((-1,1))
+    if selfA._parameters["StoreInternalVariables"] or \
+        selfA._toStore("CostFunctionJ")  or selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJb") or selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJo") or selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+        selfA._toStore("MahalanobisConsistency"):
+        Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+        Jo  = float( 0.5 * oma.T * (RI * oma) )
+        J   = Jb + Jo
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( Jb )
+        selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( Jo )
+        selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( J )
+    #
+    # Calcul de la covariance d'analyse
+    # ---------------------------------
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles"):
+        if   (Y.size <= Xb.size): K  = B * Ha * (R + numpy.dot(Hm, B * Ha)).I
+        elif (Y.size >  Xb.size): K = (BI + numpy.dot(Ha, RI * Hm)).I * Ha * RI
+        A = B - K * Hm * B
+        A = (A + A.T) * 0.5 # Symétrie
+        A = A + mpr*numpy.trace( A ) * numpy.identity(Xa.size) # Positivité
+        if min(A.shape) != max(A.shape):
+            raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+        if (numpy.diag(A) < 0).any():
+            raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+        if logging.getLogger().level < logging.WARNING: # La vérification n'a lieu qu'en debug
+            try:
+                numpy.linalg.cholesky( A )
+            except:
+                raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+        selfA.StoredVariables["APosterioriCovariance"].store( A )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        selfA.StoredVariables["CurrentState"].store( Xa )
+    if selfA._toStore("CurrentOptimum"):
+        selfA.StoredVariables["CurrentOptimum"].store( Xa )
+    if selfA._toStore("Innovation"):
+        selfA.StoredVariables["Innovation"].store( Innovation )
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( oma )
+    if selfA._toStore("InnovationAtCurrentAnalysis"):
+        selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( oma )
+    if selfA._toStore("OMB"):
+        selfA.StoredVariables["OMB"].store( Innovation )
+    if selfA._toStore("SigmaObs2"):
+        TraceR = R.trace(Y.size)
+        selfA.StoredVariables["SigmaObs2"].store( float( Innovation.T @ oma ) / TraceR )
+    if selfA._toStore("SigmaBck2"):
+        selfA.StoredVariables["SigmaBck2"].store( float( (Innovation.T @ (Hm @ (numpy.ravel(Xa) - numpy.ravel(Xb))))/(Hm * (B * Hm.T)).trace() ) )
+    if selfA._toStore("MahalanobisConsistency"):
+        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/Innovation.size ) )
+    if selfA._toStore("SimulationQuantiles"):
+        HtM  = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+        HtM  = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+        QuantilesEstimations(selfA, A, Xa, HXa, H, HtM)
+    if selfA._toStore("SimulatedObservationAtBackground"):
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
+    if selfA._toStore("SimulatedObservationAtCurrentState"):
+        selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXa )
+    if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( HXa )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/ecwlls.py b/src/daComposant/daAlgorithms/Atoms/ecwlls.py
new file mode 100644 (file)
index 0000000..9971ac8
--- /dev/null
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    Linear Least Squares
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+# ==============================================================================
+def ecwlls(selfA, Xb, Y, HO, R, B):
+    """
+    Linear Least Squares
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Tangent"].asMatrix(Xb)
+    Hm = Hm.reshape(Y.size,-1) # ADAO & check shape
+    Ha = HO["Adjoint"].asMatrix(Xb)
+    Ha = Ha.reshape(-1,Y.size) # ADAO & check shape
+    #
+    if R is None:
+        RI = 1.
+    else:
+        RI = R.getI()
+    #
+    # Calcul de la matrice de gain et de l'analyse
+    # --------------------------------------------
+    K = (Ha * (RI * Hm)).I * Ha * RI
+    Xa =  K * Y
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    # Calcul de la fonction coût
+    # --------------------------
+    if selfA._parameters["StoreInternalVariables"] or \
+        selfA._toStore("CostFunctionJ")  or selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJb") or selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJo") or selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+        selfA._toStore("OMA") or \
+        selfA._toStore("InnovationAtCurrentAnalysis") or \
+        selfA._toStore("SimulatedObservationAtCurrentOptimum") or \
+        selfA._toStore("SimulatedObservationAtCurrentState") or \
+        selfA._toStore("SimulatedObservationAtOptimum"):
+        HXa = Hm @ Xa
+        oma = Y - HXa.reshape((-1,1))
+    if selfA._parameters["StoreInternalVariables"] or \
+        selfA._toStore("CostFunctionJ")  or selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJb") or selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+        selfA._toStore("CostFunctionJo") or selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+        Jb  = 0.
+        Jo  = float( 0.5 * oma.T * (RI * oma) )
+        J   = Jb + Jo
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( Jb )
+        selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( Jo )
+        selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( J )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        selfA.StoredVariables["CurrentState"].store( Xa )
+    if selfA._toStore("CurrentOptimum"):
+        selfA.StoredVariables["CurrentOptimum"].store( Xa )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( oma )
+    if selfA._toStore("InnovationAtCurrentAnalysis"):
+        selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( oma )
+    if selfA._toStore("SimulatedObservationAtCurrentState"):
+        selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXa )
+    if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( HXa )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/ecwnlls.py b/src/daComposant/daAlgorithms/Atoms/ecwnlls.py
new file mode 100644 (file)
index 0000000..013745d
--- /dev/null
@@ -0,0 +1,259 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    Non Linear Least Squares
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import numpy, scipy, scipy.optimize, scipy.version
+
+# ==============================================================================
+def ecwnlls(selfA, Xb, Y, HO, R, B):
+    """
+    Non Linear Least Squares
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Direct"].appliedTo
+    Ha = HO["Adjoint"].appliedInXTo
+    #
+    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
+    else:
+        HXb = numpy.asarray(Hm( Xb ))
+    HXb = HXb.reshape((-1,1))
+    if Y.size != HXb.size:
+        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+    if max(Y.shape) != max(HXb.shape):
+        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+    #
+    RI = R.getI()
+    if selfA._parameters["Minimizer"] == "LM":
+        RdemiI = R.choleskyI()
+    #
+    Xini = selfA._parameters["InitializationPoint"]
+    #
+    # Définition de la fonction-coût
+    # ------------------------------
+    def CostFunction(x):
+        _X  = numpy.asarray(x).reshape((-1,1))
+        if selfA._parameters["StoreInternalVariables"] or \
+            selfA._toStore("CurrentState") or \
+            selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentState"].store( _X )
+        _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
+        _Innovation = Y - _HX
+        if selfA._toStore("SimulatedObservationAtCurrentState") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        #
+        Jb  = 0.
+        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+        J   = Jb + Jo
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        if selfA._toStore("IndexOfOptimum") or \
+            selfA._toStore("CurrentOptimum") or \
+            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+        if selfA._toStore("IndexOfOptimum"):
+            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+        if selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        return J
+    #
+    def GradientOfCostFunction(x):
+        _X      = numpy.asarray(x).reshape((-1,1))
+        _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
+        GradJb  = 0.
+        GradJo  = - Ha( (_X, RI * (Y - _HX)) )
+        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+        return GradJ
+    #
+    def CostFunctionLM(x):
+        _X  = numpy.ravel( x ).reshape((-1,1))
+        _HX = Hm( _X ).reshape((-1,1))
+        _Innovation = Y - _HX
+        Jb  = 0.
+        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+        J   = Jb + Jo
+        if selfA._parameters["StoreInternalVariables"] or \
+            selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( _X )
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        #
+        return numpy.ravel( RdemiI*_Innovation )
+    #
+    def GradientOfCostFunctionLM(x):
+        _X      = x.reshape((-1,1))
+        return - RdemiI*HO["Tangent"].asMatrix( _X )
+    #
+    # Minimisation de la fonctionnelle
+    # --------------------------------
+    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+    #
+    if selfA._parameters["Minimizer"] == "LBFGSB":
+        if "0.19" <= scipy.version.version <= "1.1.0":
+            import daAlgorithms.Atoms.lbfgsbhlt as optimiseur
+        else:
+            import scipy.optimize as optimiseur
+        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = selfA._parameters["Bounds"],
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
+            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            iprint      = selfA._parameters["optiprint"],
+            )
+        # nfeval = Informations['funcalls']
+        # rc     = Informations['warnflag']
+    elif selfA._parameters["Minimizer"] == "TNC":
+        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = selfA._parameters["Bounds"],
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            ftol        = selfA._parameters["CostDecrementTolerance"],
+            messages    = selfA._parameters["optmessages"],
+            )
+    elif selfA._parameters["Minimizer"] == "CG":
+        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "NCG":
+        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            avextol     = selfA._parameters["CostDecrementTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "BFGS":
+        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "LM":
+        Minimum, cov_x, infodict, mesg, rc = scipy.optimize.leastsq(
+            func        = CostFunctionLM,
+            x0          = Xini,
+            Dfun        = GradientOfCostFunctionLM,
+            args        = (),
+            ftol        = selfA._parameters["CostDecrementTolerance"],
+            maxfev      = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            full_output = True,
+            )
+        # nfeval = infodict['nfev']
+    else:
+        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+    #
+    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+    #
+    # Correction pour pallier a un bug de TNC sur le retour du Minimum
+    # ----------------------------------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+    #
+    Xa = Minimum
+    #--------------------------
+    #
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    if selfA._toStore("OMA") or \
+        selfA._toStore("InnovationAtCurrentAnalysis") or \
+        selfA._toStore("SimulatedObservationAtOptimum"):
+        if selfA._toStore("SimulatedObservationAtCurrentState"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+        else:
+            HXa = Hm( Xa )
+        oma = Y - HXa.reshape((-1,1))
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._toStore("Innovation") or \
+        selfA._toStore("OMB"):
+        Innovation  = Y - HXb
+    if selfA._toStore("Innovation"):
+        selfA.StoredVariables["Innovation"].store( Innovation )
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( oma )
+    if selfA._toStore("InnovationAtCurrentAnalysis"):
+        selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( oma )
+    if selfA._toStore("OMB"):
+        selfA.StoredVariables["OMB"].store( Innovation )
+    if selfA._toStore("SimulatedObservationAtBackground"):
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/enks.py b/src/daComposant/daAlgorithms/Atoms/enks.py
new file mode 100644 (file)
index 0000000..fc91920
--- /dev/null
@@ -0,0 +1,180 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    Ensemble Kalman Smoother
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import copy, math, numpy, scipy, scipy.optimize, scipy.version
+from daCore.NumericObjects import EnsembleErrorCovariance
+from daCore.NumericObjects import EnsembleOfAnomalies
+from daCore.NumericObjects import EnsembleOfBackgroundPerturbations
+from daCore.NumericObjects import EnsemblePerturbationWithGivenCovariance
+from daAlgorithms.Atoms import etkf
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+mfp = PlatformInfo().MaximumPrecision()
+
+# ==============================================================================
+def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
+    """
+    Ensemble Kalman Smoother
+    """
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Précalcul des inversions de B et R
+    RIdemi = R.sqrtmI()
+    #
+    # Durée d'observation et tailles
+    LagL = selfA._parameters["SmootherLagL"]
+    if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
+        raise ValueError("Fixed-lag smoother requires a series of observation")
+    if Y.stepnumber() < LagL:
+        raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
+    duration = Y.stepnumber()
+    __p = numpy.cumprod(Y.shape())[-1]
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+    #
+    # Calcul direct initial (on privilégie la mémorisation au recalcul)
+    __seed = numpy.random.get_state()
+    selfB = copy.deepcopy(selfA)
+    selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
+    if VariantM == "EnKS16-KalmanFilterFormula":
+        etkf.etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
+    else:
+        raise ValueError("VariantM has to be chosen in the authorized methods list.")
+    if LagL > 0:
+        EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
+    else:
+        EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
+    selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
+    #
+    for step in range(LagL,duration-1):
+        #
+        sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
+        sEL.append(None)
+        #
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        #--------------------------
+        if VariantM == "EnKS16-KalmanFilterFormula":
+            if selfA._parameters["EstimationOf"] == "State": # Forecast
+                EL = M( [(EL[:,i], Un) for i in range(__m)],
+                    argsAsSerie = True,
+                    returnSerieAsArrayMatrix = True )
+                EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
+                EZ = H( [(EL[:,i], Un) for i in range(__m)],
+                    argsAsSerie = True,
+                    returnSerieAsArrayMatrix = True )
+                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                    Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                    EZ = EZ + Cm @ Un
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                # --- > Par principe, M = Id, Q = 0
+                EZ = H( [(EL[:,i], Un) for i in range(__m)],
+                    argsAsSerie = True,
+                    returnSerieAsArrayMatrix = True )
+            #
+            vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+            vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+            #
+            mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
+            mS    = mS.reshape((-1,__m)) # Pour dimension 1
+            delta = RIdemi @ ( Ynpu - vZm )
+            mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
+            vw    = mT @ mS.T @ delta
+            #
+            Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
+            mU    = numpy.identity(__m)
+            wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
+            #
+            EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
+            EL    = vEm + EX @ wTU
+            #
+            sEL[LagL] = EL
+            for irl in range(LagL): # Lissage des L précédentes analysis
+                vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+                EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
+                sEL[irl] = vEm + EX @ wTU
+            #
+            # Conservation de l'analyse retrospective d'ordre 0 avant rotation
+            Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+            if selfA._toStore("APosterioriCovariance"):
+                EXn = sEL[0]
+            #
+            for irl in range(LagL):
+                sEL[irl] = sEL[irl+1]
+            sEL[LagL] = None
+        #--------------------------
+        else:
+            raise ValueError("VariantM has to be chosen in the authorized methods list.")
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
+    #
+    # Stockage des dernières analyses incomplètement remises à jour
+    for irl in range(LagL):
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/etkf.py b/src/daComposant/daAlgorithms/Atoms/etkf.py
new file mode 100644 (file)
index 0000000..4cbcfa8
--- /dev/null
@@ -0,0 +1,402 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    Ensemble-Transform Kalman Filter
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import math, numpy, scipy, scipy.optimize, scipy.version
+from daCore.NumericObjects import Apply3DVarRecentringOnEnsemble
+from daCore.NumericObjects import CovarianceInflation
+from daCore.NumericObjects import EnsembleErrorCovariance
+from daCore.NumericObjects import EnsembleMean
+from daCore.NumericObjects import EnsembleOfAnomalies
+from daCore.NumericObjects import EnsembleOfBackgroundPerturbations
+from daCore.NumericObjects import EnsemblePerturbationWithGivenCovariance
+
+# ==============================================================================
+def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="KalmanFilterFormula",
+    Hybrid=None,
+    ):
+    """
+    Ensemble-Transform Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    elif VariantM != "KalmanFilterFormula":
+        RI = R.getI()
+    if VariantM == "KalmanFilterFormula":
+        RIdemi = R.sqrtmI()
+    #
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+    #
+    for step in range(duration-1):
+        numpy.random.set_state(selfA._getInternalState("seed"))
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
+            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = EMX = Xn
+            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+        #
+        # Mean of forecast and observation of forecast
+        Xfm  = EnsembleMean( Xn_predicted )
+        Hfm  = EnsembleMean( HX_predicted )
+        #
+        # Anomalies
+        EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
+        EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
+        #
+        #--------------------------
+        if VariantM == "KalmanFilterFormula":
+            mS    = RIdemi * EaHX / math.sqrt(__m-1)
+            mS    = mS.reshape((-1,__m)) # Pour dimension 1
+            delta = RIdemi * ( Ynpu - Hfm )
+            mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
+            vw    = mT @ mS.T @ delta
+            #
+            Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
+            mU    = numpy.identity(__m)
+            #
+            EaX   = EaX / math.sqrt(__m-1)
+            Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
+        #--------------------------
+        elif VariantM == "Variational":
+            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+            def CostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _Jo = 0.5 * _A.T @ (RI * _A)
+                _Jb = 0.5 * (__m-1) * w.T @ w
+                _J  = _Jo + _Jb
+                return float(_J)
+            def GradientOfCostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _GardJo = - EaHX.T @ (RI * _A)
+                _GradJb = (__m-1) * w.reshape((__m,1))
+                _GradJ  = _GardJo + _GradJb
+                return numpy.ravel(_GradJ)
+            vw = scipy.optimize.fmin_cg(
+                f           = CostFunction,
+                x0          = numpy.zeros(__m),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                disp        = False,
+                )
+            #
+            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
+            Htb = (__m-1) * numpy.identity(__m)
+            Hta = Hto + Htb
+            #
+            Pta = numpy.linalg.inv( Hta )
+            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+            #
+            Xn  = Xfm + EaX @ (vw[:,None] + EWa)
+        #--------------------------
+        elif VariantM == "FiniteSize11": # Jauge Boc2011
+            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+            def CostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _Jo = 0.5 * _A.T @ (RI * _A)
+                _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
+                _J  = _Jo + _Jb
+                return float(_J)
+            def GradientOfCostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _GardJo = - EaHX.T @ (RI * _A)
+                _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
+                _GradJ  = _GardJo + _GradJb
+                return numpy.ravel(_GradJ)
+            vw = scipy.optimize.fmin_cg(
+                f           = CostFunction,
+                x0          = numpy.zeros(__m),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                disp        = False,
+                )
+            #
+            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
+            Htb = __m * \
+                ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
+                / (1 + 1/__m + vw.T @ vw)**2
+            Hta = Hto + Htb
+            #
+            Pta = numpy.linalg.inv( Hta )
+            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+            #
+            Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
+        #--------------------------
+        elif VariantM == "FiniteSize15": # Jauge Boc2015
+            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+            def CostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _Jo = 0.5 * _A.T * (RI * _A)
+                _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
+                _J  = _Jo + _Jb
+                return float(_J)
+            def GradientOfCostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _GardJo = - EaHX.T @ (RI * _A)
+                _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
+                _GradJ  = _GardJo + _GradJb
+                return numpy.ravel(_GradJ)
+            vw = scipy.optimize.fmin_cg(
+                f           = CostFunction,
+                x0          = numpy.zeros(__m),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                disp        = False,
+                )
+            #
+            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
+            Htb = (__m+1) * \
+                ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
+                / (1 + 1/__m + vw.T @ vw)**2
+            Hta = Hto + Htb
+            #
+            Pta = numpy.linalg.inv( Hta )
+            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+            #
+            Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
+        #--------------------------
+        elif VariantM == "FiniteSize16": # Jauge Boc2016
+            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+            def CostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _Jo = 0.5 * _A.T @ (RI * _A)
+                _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
+                _J  = _Jo + _Jb
+                return float(_J)
+            def GradientOfCostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _GardJo = - EaHX.T @ (RI * _A)
+                _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
+                _GradJ  = _GardJo + _GradJb
+                return numpy.ravel(_GradJ)
+            vw = scipy.optimize.fmin_cg(
+                f           = CostFunction,
+                x0          = numpy.zeros(__m),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                disp        = False,
+                )
+            #
+            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
+            Htb = ((__m+1) / (__m-1)) * \
+                ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
+                / (1 + 1/__m + vw.T @ vw / (__m-1))**2
+            Hta = Hto + Htb
+            #
+            Pta = numpy.linalg.inv( Hta )
+            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+            #
+            Xn  = Xfm + EaX @ (vw[:,None] + EWa)
+        #--------------------------
+        else:
+            raise ValueError("VariantM has to be chosen in the authorized methods list.")
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("seed", numpy.random.get_state())
+        #--------------------------
+        #
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("APosterioriCovariance") \
+            or selfA._toStore("InnovationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
+            _Innovation = Ynpu - _HXa
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( EMX )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( EMX - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/exkf.py b/src/daComposant/daAlgorithms/Atoms/exkf.py
new file mode 100644 (file)
index 0000000..19e87c6
--- /dev/null
@@ -0,0 +1,227 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    Extended Kalman Filter
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import numpy
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+mfp = PlatformInfo().MaximumPrecision()
+
+# ==============================================================================
+def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Extended Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = Xb
+        Pn = B
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+        Pn = selfA._getInternalState("Pn")
+    #
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        XaMin            = Xn
+        previousJMinimum = numpy.finfo(float).max
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
+        Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
+        Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
+        Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
+        #
+        if selfA._parameters["EstimationOf"] == "State":
+            Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
+            Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
+            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
+            Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+            Pn_predicted = Q + Mt * (Pn * Ma)
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = Xn
+            Pn_predicted = Pn
+        #
+        if selfA._parameters["EstimationOf"] == "State":
+            HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
+            _Innovation  = Ynpu - HX_predicted
+        elif selfA._parameters["EstimationOf"] == "Parameters":
+            HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
+            _Innovation  = Ynpu - HX_predicted
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+                _Innovation = _Innovation - Cm @ Un
+        #
+        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
+        Xn = Xn_predicted + Kn * _Innovation
+        Pn = Pn_predicted - Kn * Ht * Pn_predicted
+        #
+        Xa = Xn # Pointeurs
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("Pn", Pn)
+        #--------------------------
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/ienkf.py b/src/daComposant/daAlgorithms/Atoms/ienkf.py
new file mode 100644 (file)
index 0000000..ca356c7
--- /dev/null
@@ -0,0 +1,281 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    Iterative Ensemble Kalman Filter
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import math, numpy, scipy, scipy.optimize, scipy.version
+from daCore.NumericObjects import EnsembleOfBackgroundPerturbations
+from daCore.NumericObjects import EnsembleOfAnomalies
+from daCore.NumericObjects import CovarianceInflation
+from daCore.NumericObjects import EnsembleMean
+from daCore.NumericObjects import EnsembleErrorCovariance
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+mfp = PlatformInfo().MaximumPrecision()
+
+# ==============================================================================
+def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
+    BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
+    """
+    Iterative Ensemble Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+    RI = R.getI()
+    #
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+        else:                         Pn = B
+        Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+    #
+    for step in range(duration-1):
+        numpy.random.set_state(selfA._getInternalState("seed"))
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        #--------------------------
+        if VariantM == "IEnKF12":
+            Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
+            EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
+            __j = 0
+            Deltaw = 1
+            if not BnotT:
+                Ta  = numpy.identity(__m)
+            vw  = numpy.zeros(__m)
+            while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
+                vx1 = (Xfm + EaX @ vw).reshape((__n,1))
+                #
+                if BnotT:
+                    E1 = vx1 + _epsilon * EaX
+                else:
+                    E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
+                #
+                if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
+                    E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
+                        argsAsSerie = True,
+                        returnSerieAsArrayMatrix = True )
+                elif selfA._parameters["EstimationOf"] == "Parameters":
+                    # --- > Par principe, M = Id
+                    E2 = Xn
+                vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+                vy1 = H((vx2, Un)).reshape((__p,1))
+                #
+                HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
+                    argsAsSerie = True,
+                    returnSerieAsArrayMatrix = True )
+                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+                #
+                if BnotT:
+                    EaY = (HE2 - vy2) / _epsilon
+                else:
+                    EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
+                #
+                GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
+                mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
+                Deltaw = - numpy.linalg.solve(mH,GradJ)
+                #
+                vw = vw + Deltaw
+                #
+                if not BnotT:
+                    Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+                #
+                __j = __j + 1
+            #
+            A2 = EnsembleOfAnomalies( E2 )
+            #
+            if BnotT:
+                Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+                A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
+            #
+            Xn = vx2 + A2
+        #--------------------------
+        else:
+            raise ValueError("VariantM has to be chosen in the authorized methods list.")
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        Xa = EnsembleMean( Xn )
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("seed", numpy.random.get_state())
+        #--------------------------
+        #
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("APosterioriCovariance") \
+            or selfA._toStore("InnovationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
+            _Innovation = Ynpu - _HXa
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( E2 )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( E2 - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/incr3dvar.py b/src/daComposant/daAlgorithms/Atoms/incr3dvar.py
new file mode 100644 (file)
index 0000000..d936901
--- /dev/null
@@ -0,0 +1,288 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    3DVAR incrémental
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import numpy, scipy, scipy.optimize, scipy.version
+from daCore.NumericObjects import HessienneEstimation, QuantilesEstimations
+from daCore.NumericObjects import RecentredBounds
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+
+# ==============================================================================
+def incr3dvar(selfA, Xb, Y, HO, R, B):
+    """
+    3DVAR incrémental
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Direct"].appliedTo
+    #
+    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
+    else:
+        HXb = numpy.asarray(Hm( Xb ))
+    HXb = HXb.reshape((-1,1))
+    if Y.size != HXb.size:
+        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+    if max(Y.shape) != max(HXb.shape):
+        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+    #
+    if selfA._toStore("JacobianMatrixAtBackground"):
+        HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+        HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+        selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+    #
+    BI = B.getI()
+    RI = R.getI()
+    #
+    Innovation = Y - HXb
+    #
+    # Outer Loop
+    # ----------
+    iOuter = 0
+    J      = 1./mpr
+    DeltaJ = 1./mpr
+    Xr     = numpy.asarray(selfA._parameters["InitializationPoint"]).reshape((-1,1))
+    while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
+        #
+        # Inner Loop
+        # ----------
+        Ht = HO["Tangent"].asMatrix(Xr)
+        Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
+        #
+        # Définition de la fonction-coût
+        # ------------------------------
+        def CostFunction(dx):
+            _dX  = numpy.asarray(dx).reshape((-1,1))
+            if selfA._parameters["StoreInternalVariables"] or \
+                selfA._toStore("CurrentState") or \
+                selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentState"].store( Xb + _dX )
+            _HdX = (Ht @ _dX).reshape((-1,1))
+            _dInnovation = Innovation - _HdX
+            if selfA._toStore("SimulatedObservationAtCurrentState") or \
+                selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
+            if selfA._toStore("InnovationAtCurrentState"):
+                selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
+            #
+            Jb  = float( 0.5 * _dX.T * (BI * _dX) )
+            Jo  = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
+            J   = Jb + Jo
+            #
+            selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            if selfA._toStore("IndexOfOptimum") or \
+                selfA._toStore("CurrentOptimum") or \
+                selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+                selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+                selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+                selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+            return J
+        #
+        def GradientOfCostFunction(dx):
+            _dX          = numpy.ravel( dx )
+            _HdX         = (Ht @ _dX).reshape((-1,1))
+            _dInnovation = Innovation - _HdX
+            GradJb       = BI @ _dX
+            GradJo       = - Ht.T @ (RI * _dInnovation)
+            GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+            return GradJ
+        #
+        # Minimisation de la fonctionnelle
+        # --------------------------------
+        nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+        #
+        if selfA._parameters["Minimizer"] == "LBFGSB":
+            # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
+            if "0.19" <= scipy.version.version <= "1.1.0":
+                import daAlgorithms.Atoms.lbfgsbhlt as optimiseur
+            else:
+                import scipy.optimize as optimiseur
+            Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+                func        = CostFunction,
+                x0          = numpy.zeros(Xb.size),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
+                maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
+                factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
+                pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+                iprint      = selfA._parameters["optiprint"],
+                )
+            # nfeval = Informations['funcalls']
+            # rc     = Informations['warnflag']
+        elif selfA._parameters["Minimizer"] == "TNC":
+            Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+                func        = CostFunction,
+                x0          = numpy.zeros(Xb.size),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
+                maxfun      = selfA._parameters["MaximumNumberOfSteps"],
+                pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+                ftol        = selfA._parameters["CostDecrementTolerance"],
+                messages    = selfA._parameters["optmessages"],
+                )
+        elif selfA._parameters["Minimizer"] == "CG":
+            Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+                f           = CostFunction,
+                x0          = numpy.zeros(Xb.size),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+                gtol        = selfA._parameters["GradientNormTolerance"],
+                disp        = selfA._parameters["optdisp"],
+                full_output = True,
+                )
+        elif selfA._parameters["Minimizer"] == "NCG":
+            Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+                f           = CostFunction,
+                x0          = numpy.zeros(Xb.size),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+                avextol     = selfA._parameters["CostDecrementTolerance"],
+                disp        = selfA._parameters["optdisp"],
+                full_output = True,
+                )
+        elif selfA._parameters["Minimizer"] == "BFGS":
+            Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+                f           = CostFunction,
+                x0          = numpy.zeros(Xb.size),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+                gtol        = selfA._parameters["GradientNormTolerance"],
+                disp        = selfA._parameters["optdisp"],
+                full_output = True,
+                )
+        else:
+            raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+        #
+        IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+        MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+        #
+        if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+            Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+        else:
+            Minimum = Xb + Minimum.reshape((-1,1))
+        #
+        Xr     = Minimum
+        DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
+        iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
+    #
+    Xa = Xr
+    #--------------------------
+    #
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    if selfA._toStore("OMA") or \
+        selfA._toStore("InnovationAtCurrentAnalysis") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("SimulatedObservationAtOptimum"):
+        if selfA._toStore("SimulatedObservationAtCurrentState"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+        else:
+            HXa = Hm( Xa )
+        oma = Y - HXa.reshape((-1,1))
+    #
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("JacobianMatrixAtOptimum") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles"):
+        A = HessienneEstimation(selfA, Xa.size, HaM, HtM, BI, RI)
+    if selfA._toStore("APosterioriCovariance"):
+        selfA.StoredVariables["APosterioriCovariance"].store( A )
+    if selfA._toStore("JacobianMatrixAtOptimum"):
+        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+    if selfA._toStore("KalmanGainAtOptimum"):
+        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._toStore("Innovation") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("MahalanobisConsistency") or \
+        selfA._toStore("OMB"):
+        Innovation  = Y - HXb
+    if selfA._toStore("Innovation"):
+        selfA.StoredVariables["Innovation"].store( Innovation )
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( oma )
+    if selfA._toStore("InnovationAtCurrentAnalysis"):
+        selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( oma )
+    if selfA._toStore("OMB"):
+        selfA.StoredVariables["OMB"].store( Innovation )
+    if selfA._toStore("SigmaObs2"):
+        TraceR = R.trace(Y.size)
+        selfA.StoredVariables["SigmaObs2"].store( float( (Innovation.T @ oma) ) / TraceR )
+    if selfA._toStore("MahalanobisConsistency"):
+        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/Innovation.size ) )
+    if selfA._toStore("SimulationQuantiles"):
+        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
+    if selfA._toStore("SimulatedObservationAtBackground"):
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/mlef.py b/src/daComposant/daAlgorithms/Atoms/mlef.py
new file mode 100644 (file)
index 0000000..945325b
--- /dev/null
@@ -0,0 +1,287 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    Maximum Likelihood Ensemble Filter
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import math, numpy, scipy, scipy.optimize, scipy.version
+from daCore.NumericObjects import Apply3DVarRecentringOnEnsemble
+from daCore.NumericObjects import CovarianceInflation
+from daCore.NumericObjects import EnsembleErrorCovariance
+from daCore.NumericObjects import EnsembleMean
+from daCore.NumericObjects import EnsembleOfAnomalies
+from daCore.NumericObjects import EnsembleOfBackgroundPerturbations
+from daCore.NumericObjects import EnsemblePerturbationWithGivenCovariance
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+mfp = PlatformInfo().MaximumPrecision()
+
+# ==============================================================================
+def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
+    Hybrid=None,
+    ):
+    """
+    Maximum Likelihood Ensemble Filter (MLEF)
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+    RI = R.getI()
+    #
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+    #
+    for step in range(duration-1):
+        numpy.random.set_state(selfA._getInternalState("seed"))
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = EMX = Xn
+        #
+        #--------------------------
+        if VariantM == "MLEF13":
+            Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
+            EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
+            Ua  = numpy.identity(__m)
+            __j = 0
+            Deltaw = 1
+            if not BnotT:
+                Ta  = numpy.identity(__m)
+            vw  = numpy.zeros(__m)
+            while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
+                vx1 = (Xfm + EaX @ vw).reshape((__n,1))
+                #
+                if BnotT:
+                    E1 = vx1 + _epsilon * EaX
+                else:
+                    E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
+                #
+                HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
+                    argsAsSerie = True,
+                    returnSerieAsArrayMatrix = True )
+                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+                #
+                if BnotT:
+                    EaY = (HE2 - vy2) / _epsilon
+                else:
+                    EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
+                #
+                GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
+                mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
+                Deltaw = - numpy.linalg.solve(mH,GradJ)
+                #
+                vw = vw + Deltaw
+                #
+                if not BnotT:
+                    Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+                #
+                __j = __j + 1
+            #
+            if BnotT:
+                Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+            #
+            Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
+        #--------------------------
+        else:
+            raise ValueError("VariantM has to be chosen in the authorized methods list.")
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("seed", numpy.random.get_state())
+        #--------------------------
+        #
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("APosterioriCovariance") \
+            or selfA._toStore("InnovationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
+            _Innovation = Ynpu - _HXa
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( EMX )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( EMX - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/psas3dvar.py b/src/daComposant/daAlgorithms/Atoms/psas3dvar.py
new file mode 100644 (file)
index 0000000..4e9b471
--- /dev/null
@@ -0,0 +1,267 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    3DVAR PSAS
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import numpy, scipy, scipy.optimize, scipy.version
+from daCore.NumericObjects import HessienneEstimation, QuantilesEstimations
+
+# ==============================================================================
+def psas3dvar(selfA, Xb, Y, HO, R, B):
+    """
+    3DVAR PSAS
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Direct"].appliedTo
+    #
+    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
+    else:
+        HXb = numpy.asarray(Hm( Xb ))
+    HXb = HXb.reshape((-1,1))
+    if Y.size != HXb.size:
+        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+    if max(Y.shape) != max(HXb.shape):
+        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+    #
+    if selfA._toStore("JacobianMatrixAtBackground"):
+        HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+        HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+        selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+    #
+    Ht = HO["Tangent"].asMatrix(Xb)
+    BHT = B * Ht.T
+    HBHTpR = R + Ht * BHT
+    Innovation = Y - HXb
+    #
+    Xini = numpy.zeros(Y.size)
+    #
+    # Définition de la fonction-coût
+    # ------------------------------
+    def CostFunction(w):
+        _W = numpy.asarray(w).reshape((-1,1))
+        if selfA._parameters["StoreInternalVariables"] or \
+            selfA._toStore("CurrentState") or \
+            selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
+        if selfA._toStore("SimulatedObservationAtCurrentState") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
+        #
+        Jb  = float( 0.5 * _W.T @ (HBHTpR @ _W) )
+        Jo  = float( - _W.T @ Innovation )
+        J   = Jb + Jo
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        if selfA._toStore("IndexOfOptimum") or \
+            selfA._toStore("CurrentOptimum") or \
+            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+        if selfA._toStore("IndexOfOptimum"):
+            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+        if selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        return J
+    #
+    def GradientOfCostFunction(w):
+        _W = numpy.asarray(w).reshape((-1,1))
+        GradJb  = HBHTpR @ _W
+        GradJo  = - Innovation
+        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+        return GradJ
+    #
+    # Minimisation de la fonctionnelle
+    # --------------------------------
+    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+    #
+    if selfA._parameters["Minimizer"] == "LBFGSB":
+        if "0.19" <= scipy.version.version <= "1.1.0":
+            import daAlgorithms.Atoms.lbfgsbhlt as optimiseur
+        else:
+            import scipy.optimize as optimiseur
+        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
+            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            iprint      = selfA._parameters["optiprint"],
+            )
+        # nfeval = Informations['funcalls']
+        # rc     = Informations['warnflag']
+    elif selfA._parameters["Minimizer"] == "TNC":
+        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            ftol        = selfA._parameters["CostDecrementTolerance"],
+            messages    = selfA._parameters["optmessages"],
+            )
+    elif selfA._parameters["Minimizer"] == "CG":
+        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "NCG":
+        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            avextol     = selfA._parameters["CostDecrementTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "BFGS":
+        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    else:
+        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+    #
+    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+    #
+    # Correction pour pallier a un bug de TNC sur le retour du Minimum
+    # ----------------------------------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+    else:
+        Minimum = Xb + BHT @ Minimum.reshape((-1,1))
+    #
+    Xa = Minimum
+    #--------------------------
+    #
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    if selfA._toStore("OMA") or \
+        selfA._toStore("InnovationAtCurrentAnalysis") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("SimulatedObservationAtOptimum"):
+        if selfA._toStore("SimulatedObservationAtCurrentState"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+        else:
+            HXa = Hm( Xa )
+        oma = Y - HXa.reshape((-1,1))
+    #
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("JacobianMatrixAtOptimum") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles"):
+        BI = B.getI()
+        RI = R.getI()
+        A = HessienneEstimation(selfA, Xa.size, HaM, HtM, BI, RI)
+    if selfA._toStore("APosterioriCovariance"):
+        selfA.StoredVariables["APosterioriCovariance"].store( A )
+    if selfA._toStore("JacobianMatrixAtOptimum"):
+        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+    if selfA._toStore("KalmanGainAtOptimum"):
+        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._toStore("Innovation") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("MahalanobisConsistency") or \
+        selfA._toStore("OMB"):
+        Innovation  = Y - HXb
+    if selfA._toStore("Innovation"):
+        selfA.StoredVariables["Innovation"].store( Innovation )
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( oma )
+    if selfA._toStore("InnovationAtCurrentAnalysis"):
+        selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( oma )
+    if selfA._toStore("OMB"):
+        selfA.StoredVariables["OMB"].store( Innovation )
+    if selfA._toStore("SigmaObs2"):
+        TraceR = R.trace(Y.size)
+        selfA.StoredVariables["SigmaObs2"].store( float( (Innovation.T @ oma) ) / TraceR )
+    if selfA._toStore("MahalanobisConsistency"):
+        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/Innovation.size ) )
+    if selfA._toStore("SimulationQuantiles"):
+        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
+    if selfA._toStore("SimulatedObservationAtBackground"):
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/senkf.py b/src/daComposant/daAlgorithms/Atoms/senkf.py
new file mode 100644 (file)
index 0000000..fee7075
--- /dev/null
@@ -0,0 +1,285 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    Stochastic EnKF
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import math, numpy
+from daCore.NumericObjects import Apply3DVarRecentringOnEnsemble
+from daCore.NumericObjects import CovarianceInflation
+from daCore.NumericObjects import EnsembleErrorCovariance
+from daCore.NumericObjects import EnsembleMean
+from daCore.NumericObjects import EnsembleOfAnomalies
+from daCore.NumericObjects import EnsembleOfBackgroundPerturbations
+from daCore.NumericObjects import EnsembleOfCenteredPerturbations
+from daCore.NumericObjects import EnsemblePerturbationWithGivenCovariance
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+mfp = PlatformInfo().MaximumPrecision()
+
+# ==============================================================================
+def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="KalmanFilterFormula16",
+    Hybrid=None,
+    ):
+    """
+    Stochastic EnKF
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
+    #
+    if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
+    else:                         Rn = R
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+        else:                         Pn = B
+        Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+    #
+    for step in range(duration-1):
+        numpy.random.set_state(selfA._getInternalState("seed"))
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
+            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = EMX = Xn
+            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+        #
+        # Mean of forecast and observation of forecast
+        Xfm  = EnsembleMean( Xn_predicted )
+        Hfm  = EnsembleMean( HX_predicted )
+        #
+        #--------------------------
+        if VariantM == "KalmanFilterFormula05":
+            PfHT, HPfHT = 0., 0.
+            for i in range(__m):
+                Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
+                Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
+                PfHT  += Exfi * Eyfi.T
+                HPfHT += Eyfi * Eyfi.T
+            PfHT  = (1./(__m-1)) * PfHT
+            HPfHT = (1./(__m-1)) * HPfHT
+            Kn     = PfHT * ( R + HPfHT ).I
+            del PfHT, HPfHT
+            #
+            for i in range(__m):
+                ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
+                Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
+        #--------------------------
+        elif VariantM == "KalmanFilterFormula16":
+            EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
+            EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+            #
+            EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
+            EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
+            #
+            Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
+            #
+            for i in range(__m):
+                Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
+        #--------------------------
+        else:
+            raise ValueError("VariantM has to be chosen in the authorized methods list.")
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("seed", numpy.random.get_state())
+        #--------------------------
+        #
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("APosterioriCovariance") \
+            or selfA._toStore("InnovationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
+            _Innovation = Ynpu - _HXa
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( EMX )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( EMX - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/std3dvar.py b/src/daComposant/daAlgorithms/Atoms/std3dvar.py
new file mode 100644 (file)
index 0000000..e6fb211
--- /dev/null
@@ -0,0 +1,267 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    3DVAR
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import numpy, scipy, scipy.optimize, scipy.version
+from daCore.NumericObjects import HessienneEstimation, QuantilesEstimations
+
+# ==============================================================================
+def std3dvar(selfA, Xb, Y, HO, R, B):
+    """
+    3DVAR
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Direct"].appliedTo
+    Ha = HO["Adjoint"].appliedInXTo
+    #
+    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
+    else:
+        HXb = numpy.asarray(Hm( Xb ))
+    HXb = HXb.reshape((-1,1))
+    if Y.size != HXb.size:
+        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+    if max(Y.shape) != max(HXb.shape):
+        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+    #
+    if selfA._toStore("JacobianMatrixAtBackground"):
+        HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+        HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+        selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+    #
+    BI = B.getI()
+    RI = R.getI()
+    #
+    Xini = selfA._parameters["InitializationPoint"]
+    #
+    # Définition de la fonction-coût
+    # ------------------------------
+    def CostFunction(x):
+        _X  = numpy.asarray(x).reshape((-1,1))
+        if selfA._parameters["StoreInternalVariables"] or \
+            selfA._toStore("CurrentState") or \
+            selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentState"].store( _X )
+        _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
+        _Innovation = Y - _HX
+        if selfA._toStore("SimulatedObservationAtCurrentState") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        #
+        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
+        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+        J   = Jb + Jo
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        if selfA._toStore("IndexOfOptimum") or \
+            selfA._toStore("CurrentOptimum") or \
+            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+        if selfA._toStore("IndexOfOptimum"):
+            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+        if selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        return J
+    #
+    def GradientOfCostFunction(x):
+        _X      = numpy.asarray(x).reshape((-1,1))
+        _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
+        GradJb  = BI * (_X - Xb)
+        GradJo  = - Ha( (_X, RI * (Y - _HX)) )
+        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+        return GradJ
+    #
+    # Minimisation de la fonctionnelle
+    # --------------------------------
+    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+    #
+    if selfA._parameters["Minimizer"] == "LBFGSB":
+        if "0.19" <= scipy.version.version <= "1.1.0":
+            import daAlgorithms.Atoms.lbfgsbhlt as optimiseur
+        else:
+            import scipy.optimize as optimiseur
+        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = selfA._parameters["Bounds"],
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
+            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            iprint      = selfA._parameters["optiprint"],
+            )
+        # nfeval = Informations['funcalls']
+        # rc     = Informations['warnflag']
+    elif selfA._parameters["Minimizer"] == "TNC":
+        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = selfA._parameters["Bounds"],
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            ftol        = selfA._parameters["CostDecrementTolerance"],
+            messages    = selfA._parameters["optmessages"],
+            )
+    elif selfA._parameters["Minimizer"] == "CG":
+        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "NCG":
+        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            avextol     = selfA._parameters["CostDecrementTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "BFGS":
+        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    else:
+        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+    #
+    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+    #
+    # Correction pour pallier a un bug de TNC sur le retour du Minimum
+    # ----------------------------------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+    #
+    Xa = Minimum
+    #--------------------------
+    #
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    if selfA._toStore("OMA") or \
+        selfA._toStore("InnovationAtCurrentAnalysis") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("SimulatedObservationAtOptimum"):
+        if selfA._toStore("SimulatedObservationAtCurrentState"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+        else:
+            HXa = Hm( Xa )
+        oma = Y - HXa.reshape((-1,1))
+    #
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("JacobianMatrixAtOptimum") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles"):
+        A = HessienneEstimation(selfA, Xa.size, HaM, HtM, BI, RI)
+    if selfA._toStore("APosterioriCovariance"):
+        selfA.StoredVariables["APosterioriCovariance"].store( A )
+    if selfA._toStore("JacobianMatrixAtOptimum"):
+        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+    if selfA._toStore("KalmanGainAtOptimum"):
+        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._toStore("Innovation") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("MahalanobisConsistency") or \
+        selfA._toStore("OMB"):
+        Innovation  = Y - HXb
+    if selfA._toStore("Innovation"):
+        selfA.StoredVariables["Innovation"].store( Innovation )
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( oma )
+    if selfA._toStore("InnovationAtCurrentAnalysis"):
+        selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( oma )
+    if selfA._toStore("OMB"):
+        selfA.StoredVariables["OMB"].store( Innovation )
+    if selfA._toStore("SigmaObs2"):
+        TraceR = R.trace(Y.size)
+        selfA.StoredVariables["SigmaObs2"].store( float( (Innovation.T @ oma) ) / TraceR )
+    if selfA._toStore("MahalanobisConsistency"):
+        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/Innovation.size ) )
+    if selfA._toStore("SimulationQuantiles"):
+        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
+    if selfA._toStore("SimulatedObservationAtBackground"):
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/std4dvar.py b/src/daComposant/daAlgorithms/Atoms/std4dvar.py
new file mode 100644 (file)
index 0000000..fbcebc1
--- /dev/null
@@ -0,0 +1,268 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    4DVAR
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import numpy, scipy, scipy.optimize, scipy.version
+from daCore.NumericObjects import ForceNumericBounds, ApplyBounds
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+mfp = PlatformInfo().MaximumPrecision()
+
+# ==============================================================================
+def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    4DVAR
+    """
+    #
+    # Initialisations
+    # ---------------
+    #
+    # Opérateurs
+    Hm = HO["Direct"].appliedControledFormTo
+    Mm = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    def Un(_step):
+        if U is not None:
+            if hasattr(U,"store") and 1<=_step<len(U) :
+                _Un = numpy.ravel( U[_step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                _Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                _Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            _Un = None
+        return _Un
+    def CmUn(_xn,_un):
+        if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
+            _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
+            _CmUn = (_Cm @ _un).reshape((-1,1))
+        else:
+            _CmUn = 0.
+        return _CmUn
+    #
+    # Remarque : les observations sont exploitées à partir du pas de temps
+    # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
+    # Donc le pas 0 n'est pas utilisé puisque la première étape commence
+    # avec l'observation du pas 1.
+    #
+    # Nombre de pas identique au nombre de pas d'observations
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+    else:
+        duration = 2
+    #
+    # Précalcul des inversions de B et R
+    BI = B.getI()
+    RI = R.getI()
+    #
+    # Point de démarrage de l'optimisation
+    Xini = selfA._parameters["InitializationPoint"]
+    #
+    # Définition de la fonction-coût
+    # ------------------------------
+    selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
+    selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
+    def CostFunction(x):
+        _X  = numpy.asarray(x).reshape((-1,1))
+        if selfA._parameters["StoreInternalVariables"] or \
+            selfA._toStore("CurrentState") or \
+            selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentState"].store( _X )
+        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
+        selfA.DirectCalculation = [None,]
+        selfA.DirectInnovation  = [None,]
+        Jo  = 0.
+        _Xn = _X
+        for step in range(0,duration-1):
+            if hasattr(Y,"store"):
+                _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
+            else:
+                _Ynpu = numpy.ravel( Y ).reshape((-1,1))
+            _Un = Un(step)
+            #
+            # Etape d'évolution
+            if selfA._parameters["EstimationOf"] == "State":
+                _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                pass
+            #
+            if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+                _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
+            #
+            # Etape de différence aux observations
+            if selfA._parameters["EstimationOf"] == "State":
+                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
+            #
+            # Stockage de l'état
+            selfA.DirectCalculation.append( _Xn )
+            selfA.DirectInnovation.append( _YmHMX )
+            #
+            # Ajout dans la fonctionnelle d'observation
+            Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
+        J = Jb + Jo
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        if selfA._toStore("IndexOfOptimum") or \
+            selfA._toStore("CurrentOptimum") or \
+            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+        if selfA._toStore("IndexOfOptimum"):
+            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+        if selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+        return J
+    #
+    def GradientOfCostFunction(x):
+        _X      = numpy.asarray(x).reshape((-1,1))
+        GradJb  = BI * (_X - Xb)
+        GradJo  = 0.
+        for step in range(duration-1,0,-1):
+            # Étape de récupération du dernier stockage de l'évolution
+            _Xn = selfA.DirectCalculation.pop()
+            # Étape de récupération du dernier stockage de l'innovation
+            _YmHMX = selfA.DirectInnovation.pop()
+            # Calcul des adjoints
+            Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
+            Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
+            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
+            Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
+            # Calcul du gradient par état adjoint
+            GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
+            GradJo = Ma * GradJo                 # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
+        GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
+        return GradJ
+    #
+    # Minimisation de la fonctionnelle
+    # --------------------------------
+    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+    #
+    if selfA._parameters["Minimizer"] == "LBFGSB":
+        if "0.19" <= scipy.version.version <= "1.1.0":
+            import daAlgorithms.Atoms.lbfgsbhlt as optimiseur
+        else:
+            import scipy.optimize as optimiseur
+        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = selfA._parameters["Bounds"],
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
+            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            iprint      = selfA._parameters["optiprint"],
+            )
+        # nfeval = Informations['funcalls']
+        # rc     = Informations['warnflag']
+    elif selfA._parameters["Minimizer"] == "TNC":
+        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = selfA._parameters["Bounds"],
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            ftol        = selfA._parameters["CostDecrementTolerance"],
+            messages    = selfA._parameters["optmessages"],
+            )
+    elif selfA._parameters["Minimizer"] == "CG":
+        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "NCG":
+        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            avextol     = selfA._parameters["CostDecrementTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "BFGS":
+        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    else:
+        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+    #
+    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+    #
+    # Correction pour pallier a un bug de TNC sur le retour du Minimum
+    # ----------------------------------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+    #
+    # Obtention de l'analyse
+    # ----------------------
+    Xa = Minimum
+    #
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/stdkf.py b/src/daComposant/daAlgorithms/Atoms/stdkf.py
new file mode 100644 (file)
index 0000000..b383d90
--- /dev/null
@@ -0,0 +1,219 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    Standard Kalman Filter
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import numpy
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+mfp = PlatformInfo().MaximumPrecision()
+
+# ==============================================================================
+def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Standard Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    # ----------
+    Ht = HO["Tangent"].asMatrix(Xb)
+    Ha = HO["Adjoint"].asMatrix(Xb)
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        Mt = EM["Tangent"].asMatrix(Xb)
+        Ma = EM["Adjoint"].asMatrix(Xb)
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = Xb
+        Pn = B
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+        Pn = selfA._getInternalState("Pn")
+    #
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        XaMin            = Xn
+        previousJMinimum = numpy.finfo(float).max
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            Xn_predicted = Mt @ Xn
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+            Pn_predicted = Q + Mt * (Pn * Ma)
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = Xn
+            Pn_predicted = Pn
+        #
+        if selfA._parameters["EstimationOf"] == "State":
+            HX_predicted = Ht @ Xn_predicted
+            _Innovation  = Ynpu - HX_predicted
+        elif selfA._parameters["EstimationOf"] == "Parameters":
+            HX_predicted = Ht @ Xn_predicted
+            _Innovation  = Ynpu - HX_predicted
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+                _Innovation = _Innovation - Cm @ Un
+        #
+        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
+        Xn = Xn_predicted + Kn * _Innovation
+        Pn = Pn_predicted - Kn * Ht * Pn_predicted
+        #
+        Xa = Xn # Pointeurs
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("Pn", Pn)
+        #--------------------------
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
diff --git a/src/daComposant/daAlgorithms/Atoms/van3dvar.py b/src/daComposant/daAlgorithms/Atoms/van3dvar.py
new file mode 100644 (file)
index 0000000..93d608f
--- /dev/null
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2008-2022 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
+
+__doc__ = """
+    3DVAR variational analysis with no inversion of B
+"""
+__author__ = "Jean-Philippe ARGAUD"
+
+import numpy, scipy, scipy.optimize, scipy.version
+from daCore.NumericObjects import HessienneEstimation, QuantilesEstimations
+from daCore.NumericObjects import RecentredBounds
+from daCore.PlatformInfo import PlatformInfo
+mpr = PlatformInfo().MachinePrecision()
+
+# ==============================================================================
+def van3dvar(selfA, Xb, Y, HO, R, B):
+    """
+    3DVAR variational analysis with no inversion of B
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Direct"].appliedTo
+    Ha = HO["Adjoint"].appliedInXTo
+    #
+    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
+    else:
+        HXb = numpy.asarray(Hm( Xb ))
+    HXb = HXb.reshape((-1,1))
+    if Y.size != HXb.size:
+        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+    if max(Y.shape) != max(HXb.shape):
+        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+    #
+    if selfA._toStore("JacobianMatrixAtBackground"):
+        HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+        HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+        selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+    #
+    BT = B.getT()
+    RI = R.getI()
+    if ("Bounds" in selfA._parameters) and selfA._parameters["Bounds"] is not None:
+        BI = B.getI()
+    else:
+        BI = None
+    #
+    Xini = numpy.zeros(Xb.size)
+    #
+    # Définition de la fonction-coût
+    # ------------------------------
+    def CostFunction(v):
+        _V = numpy.asarray(v).reshape((-1,1))
+        _X = Xb + (B @ _V).reshape((-1,1))
+        if selfA._parameters["StoreInternalVariables"] or \
+            selfA._toStore("CurrentState") or \
+            selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentState"].store( _X )
+        _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
+        _Innovation = Y - _HX
+        if selfA._toStore("SimulatedObservationAtCurrentState") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        #
+        Jb  = float( 0.5 * _V.T * (BT * _V) )
+        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+        J   = Jb + Jo
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        if selfA._toStore("IndexOfOptimum") or \
+            selfA._toStore("CurrentOptimum") or \
+            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+        if selfA._toStore("IndexOfOptimum"):
+            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+        if selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        return J
+    #
+    def GradientOfCostFunction(v):
+        _V = numpy.asarray(v).reshape((-1,1))
+        _X = Xb + (B @ _V).reshape((-1,1))
+        _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
+        GradJb  = BT * _V
+        GradJo  = - BT * Ha( (_X, RI * (Y - _HX)) )
+        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+        return GradJ
+    #
+    # Minimisation de la fonctionnelle
+    # --------------------------------
+    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+    #
+    if selfA._parameters["Minimizer"] == "LBFGSB":
+        if "0.19" <= scipy.version.version <= "1.1.0":
+            import daAlgorithms.Atoms.lbfgsbhlt as optimiseur
+        else:
+            import scipy.optimize as optimiseur
+        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb, BI),
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
+            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            iprint      = selfA._parameters["optiprint"],
+            )
+        # nfeval = Informations['funcalls']
+        # rc     = Informations['warnflag']
+    elif selfA._parameters["Minimizer"] == "TNC":
+        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb, BI),
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            ftol        = selfA._parameters["CostDecrementTolerance"],
+            messages    = selfA._parameters["optmessages"],
+            )
+    elif selfA._parameters["Minimizer"] == "CG":
+        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "NCG":
+        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            avextol     = selfA._parameters["CostDecrementTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "BFGS":
+        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    else:
+        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+    #
+    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+    #
+    # Correction pour pallier a un bug de TNC sur le retour du Minimum
+    # ----------------------------------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+    else:
+        Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
+    #
+    Xa = Minimum
+    #--------------------------
+    #
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    if selfA._toStore("OMA") or \
+        selfA._toStore("InnovationAtCurrentAnalysis") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("SimulatedObservationAtOptimum"):
+        if selfA._toStore("SimulatedObservationAtCurrentState"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+        else:
+            HXa = Hm( Xa )
+        oma = Y - HXa.reshape((-1,1))
+    #
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("JacobianMatrixAtOptimum") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles"):
+        BI = B.getI()
+        A = HessienneEstimation(selfA, Xa.size, HaM, HtM, BI, RI)
+    if selfA._toStore("APosterioriCovariance"):
+        selfA.StoredVariables["APosterioriCovariance"].store( A )
+    if selfA._toStore("JacobianMatrixAtOptimum"):
+        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+    if selfA._toStore("KalmanGainAtOptimum"):
+        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._toStore("Innovation") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("MahalanobisConsistency") or \
+        selfA._toStore("OMB"):
+        Innovation  = Y - HXb
+    if selfA._toStore("Innovation"):
+        selfA.StoredVariables["Innovation"].store( Innovation )
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( oma )
+    if selfA._toStore("InnovationAtCurrentAnalysis"):
+        selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( oma )
+    if selfA._toStore("OMB"):
+        selfA.StoredVariables["OMB"].store( Innovation )
+    if selfA._toStore("SigmaObs2"):
+        TraceR = R.trace(Y.size)
+        selfA.StoredVariables["SigmaObs2"].store( float( (Innovation.T @ oma) ) / TraceR )
+    if selfA._toStore("MahalanobisConsistency"):
+        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/Innovation.size ) )
+    if selfA._toStore("SimulationQuantiles"):
+        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
+    if selfA._toStore("SimulatedObservationAtBackground"):
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+    #
+    return 0
+
+# ==============================================================================
+if __name__ == "__main__":
+    print('\n AUTODIAGNOSTIC\n')
index 6140935c0869629948e6090093d38a02d070c587..ccba80cf069b9f3f69162111f95ef185d4ab2737 100644 (file)
 #
 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
 
-import logging
-from daCore import BasicObjects, NumericObjects
 import numpy
+from daCore import BasicObjects, NumericObjects
+from daAlgorithms.Atoms import ecwblue
 
 # ==============================================================================
 class ElementaryAlgorithm(BasicObjects.Algorithm):
     def __init__(self):
         BasicObjects.Algorithm.__init__(self, "BLUE")
+        self.defineRequiredParameter(
+            name     = "Variant",
+            default  = "Blue",
+            typecast = str,
+            message  = "Variant ou formulation de la méthode",
+            listval  = [
+                "Blue",
+                "OneCorrection",
+                ],
+            )
+        self.defineRequiredParameter(
+            name     = "EstimationOf",
+            default  = "Parameters",
+            typecast = str,
+            message  = "Estimation d'état ou de paramètres",
+            listval  = ["State", "Parameters"],
+            )
         self.defineRequiredParameter(
             name     = "StoreInternalVariables",
             default  = False,
@@ -52,9 +69,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
                 "CostFunctionJbAtCurrentOptimum",
                 "CostFunctionJo",
                 "CostFunctionJoAtCurrentOptimum",
+                "CurrentIterationNumber",
                 "CurrentOptimum",
                 "CurrentState",
+                "ForecastState",
                 "Innovation",
+                "InnovationAtCurrentAnalysis",
                 "MahalanobisConsistency",
                 "OMA",
                 "OMB",
@@ -101,6 +121,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             )
         self.requireInputArguments(
             mandatory= ("Xb", "Y", "HO", "R", "B"),
+            optional = ("U", "EM", "CM", "Q"),
             )
         self.setAttributes(tags=(
             "DataAssimilation",
@@ -111,118 +132,17 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
     def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
         self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
         #
-        Hm = HO["Tangent"].asMatrix(Xb)
-        Hm = Hm.reshape(Y.size,Xb.size) # ADAO & check shape
-        Ha = HO["Adjoint"].asMatrix(Xb)
-        Ha = Ha.reshape(Xb.size,Y.size) # ADAO & check shape
-        #
-        if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
-            HXb = HO["AppliedInX"]["HXb"]
-        else:
-            HXb = Hm @ Xb
-        HXb = HXb.reshape((-1,1))
-        if Y.size != HXb.size:
-            raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
-        if max(Y.shape) != max(HXb.shape):
-            raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
-        #
-        BI = B.getI()
-        RI = R.getI()
+        #--------------------------
+        if   self._parameters["Variant"] == "Blue":
+            NumericObjects.multiXOsteps(self, Xb, Y, U, HO, EM, CM, R, B, Q, ecwblue.ecwblue)
         #
-        Innovation  = Y - HXb
+        #--------------------------
+        elif self._parameters["Variant"] == "OneCorrection":
+            ecwblue.ecwblue(self, Xb, Y, HO, R, B)
         #
-        # Calcul de la matrice de gain et de l'analyse
-        # --------------------------------------------
-        if Y.size <= Xb.size:
-            _A = R + numpy.dot(Hm, B * Ha)
-            _u = numpy.linalg.solve( _A , Innovation )
-            Xa = Xb + B * Ha * _u
+        #--------------------------
         else:
-            _A = BI + numpy.dot(Ha, RI * Hm)
-            _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * Innovation) )
-            Xa = Xb + _u
-        self.StoredVariables["Analysis"].store( Xa )
-        #
-        # Calcul de la fonction coût
-        # --------------------------
-        if self._parameters["StoreInternalVariables"] or \
-            self._toStore("CostFunctionJ")  or self._toStore("CostFunctionJAtCurrentOptimum") or \
-            self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
-            self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum") or \
-            self._toStore("OMA") or \
-            self._toStore("SigmaObs2") or \
-            self._toStore("MahalanobisConsistency") or \
-            self._toStore("SimulatedObservationAtCurrentOptimum") or \
-            self._toStore("SimulatedObservationAtCurrentState") or \
-            self._toStore("SimulatedObservationAtOptimum") or \
-            self._toStore("SimulationQuantiles"):
-            HXa = Hm @ Xa
-            oma = Y - HXa
-        if self._parameters["StoreInternalVariables"] or \
-            self._toStore("CostFunctionJ")  or self._toStore("CostFunctionJAtCurrentOptimum") or \
-            self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
-            self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum") or \
-            self._toStore("MahalanobisConsistency"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * oma.T * (RI * oma) )
-            J   = Jb + Jo
-            self.StoredVariables["CostFunctionJb"].store( Jb )
-            self.StoredVariables["CostFunctionJo"].store( Jo )
-            self.StoredVariables["CostFunctionJ" ].store( J )
-            self.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( Jb )
-            self.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( Jo )
-            self.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( J )
-        #
-        # Calcul de la covariance d'analyse
-        # ---------------------------------
-        if self._toStore("APosterioriCovariance") or \
-            self._toStore("SimulationQuantiles"):
-            if   (Y.size <= Xb.size): K  = B * Ha * (R + numpy.dot(Hm, B * Ha)).I
-            elif (Y.size >  Xb.size): K = (BI + numpy.dot(Ha, RI * Hm)).I * Ha * RI
-            A = B - K * Hm * B
-            if min(A.shape) != max(A.shape):
-                raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(self._name,str(A.shape)))
-            if (numpy.diag(A) < 0).any():
-                raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(self._name,))
-            if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
-                try:
-                    L = numpy.linalg.cholesky( A )
-                except:
-                    raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(self._name,))
-            self.StoredVariables["APosterioriCovariance"].store( A )
-        #
-        # Calculs et/ou stockages supplémentaires
-        # ---------------------------------------
-        if self._parameters["StoreInternalVariables"] or self._toStore("CurrentState"):
-            self.StoredVariables["CurrentState"].store( Xa )
-        if self._toStore("CurrentOptimum"):
-            self.StoredVariables["CurrentOptimum"].store( Xa )
-        if self._toStore("Innovation"):
-            self.StoredVariables["Innovation"].store( Innovation )
-        if self._toStore("BMA"):
-            self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-        if self._toStore("OMA"):
-            self.StoredVariables["OMA"].store( oma )
-        if self._toStore("OMB"):
-            self.StoredVariables["OMB"].store( Innovation )
-        if self._toStore("SigmaObs2"):
-            TraceR = R.trace(Y.size)
-            self.StoredVariables["SigmaObs2"].store( float( Innovation.T @ oma ) / TraceR )
-        if self._toStore("SigmaBck2"):
-            self.StoredVariables["SigmaBck2"].store( float( (Innovation.T @ (Hm @ (Xa - Xb)))/(Hm * (B * Hm.T)).trace() ) )
-        if self._toStore("MahalanobisConsistency"):
-            self.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/Innovation.size ) )
-        if self._toStore("SimulationQuantiles"):
-            H  = HO["Direct"].appliedTo
-            NumericObjects.QuantilesEstimations(self, A, Xa, HXa, H, Hm)
-        if self._toStore("SimulatedObservationAtBackground"):
-            self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-        if self._toStore("SimulatedObservationAtCurrentState"):
-            self.StoredVariables["SimulatedObservationAtCurrentState"].store( HXa )
-        if self._toStore("SimulatedObservationAtCurrentOptimum"):
-            self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( HXa )
-        if self._toStore("SimulatedObservationAtOptimum"):
-            self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+            raise ValueError("Error in Variant name: %s"%self._parameters["Variant"])
         #
         self._post_run(HO)
         return 0
index 75b851faf6710635f602701c611d833d21d789bb..19be0174b453679dd47abc5aa0c09fe8570f3a05 100644 (file)
@@ -20,9 +20,9 @@
 #
 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
 
-import logging
-from daCore import BasicObjects, NumericObjects
 import numpy
+from daCore import BasicObjects
+from daAlgorithms.Atoms import enks, etkf, ienkf, mlef, senkf
 
 # ==============================================================================
 class ElementaryAlgorithm(BasicObjects.Algorithm):
@@ -179,61 +179,61 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         #--------------------------
         # Default EnKF = EnKF-16 = StochasticEnKF
         if   self._parameters["Variant"] == "EnKF-05":
-            NumericObjects.senkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula05")
+            senkf.senkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula05")
         #
         elif self._parameters["Variant"] in ["EnKF-16", "StochasticEnKF", "EnKF"]:
-            NumericObjects.senkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16")
+            senkf.senkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16")
         #
         #--------------------------
         # Default ETKF = ETKF-KFF
         elif self._parameters["Variant"] in ["ETKF-KFF", "ETKF"]:
-            NumericObjects.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula")
+            etkf.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula")
         #
         elif self._parameters["Variant"] == "ETKF-VAR":
-            NumericObjects.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="Variational")
+            etkf.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="Variational")
         #
         #--------------------------
         # Default ETKF-N = ETKF-N-16
         elif self._parameters["Variant"] == "ETKF-N-11":
-            NumericObjects.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="FiniteSize11")
+            etkf.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="FiniteSize11")
         #
         elif self._parameters["Variant"] == "ETKF-N-15":
-            NumericObjects.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="FiniteSize15")
+            etkf.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="FiniteSize15")
         #
         elif self._parameters["Variant"] in ["ETKF-N-16", "ETKF-N"]:
-            NumericObjects.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="FiniteSize16")
+            etkf.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="FiniteSize16")
         #
         #--------------------------
         # Default MLEF = MLEF-T
         elif self._parameters["Variant"] in ["MLEF-T", "MLEF"]:
-            NumericObjects.mlef(self, Xb, Y, U, HO, EM, CM, R, B, Q, BnotT=False)
+            mlef.mlef(self, Xb, Y, U, HO, EM, CM, R, B, Q, BnotT=False)
         #
         elif self._parameters["Variant"] == "MLEF-B":
-            NumericObjects.mlef(self, Xb, Y, U, HO, EM, CM, R, B, Q, BnotT=True)
+            mlef.mlef(self, Xb, Y, U, HO, EM, CM, R, B, Q, BnotT=True)
         #
         #--------------------------
         # Default IEnKF = IEnKF-T
         elif self._parameters["Variant"] in ["IEnKF-T", "IEnKF"]:
-            NumericObjects.ienkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, BnotT=False)
+            ienkf.ienkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, BnotT=False)
         #
         elif self._parameters["Variant"] in ["IEnKF-B", "IEKF"]:
-            NumericObjects.ienkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, BnotT=True)
+            ienkf.ienkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, BnotT=True)
         #
         #--------------------------
         # Default EnKS = EnKS-KFF
         elif self._parameters["Variant"] in ["EnKS-KFF", "EnKS"]:
-            NumericObjects.enks(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula")
+            enks.enks(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula")
         #
         #--------------------------
-        # Default E3DVAR = E3DVAR-EnKF
-        elif self._parameters["Variant"] in ["E3DVAR-EnKF", "E3DVAR"]:
-            NumericObjects.senkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
+        # Default E3DVAR = E3DVAR-ETKF
+        elif self._parameters["Variant"] == "E3DVAR-EnKF":
+            senkf.senkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
         #
-        elif self._parameters["Variant"] == "E3DVAR-ETKF":
-            NumericObjects.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
+        elif self._parameters["Variant"] in ["E3DVAR-ETKF", "E3DVAR"]:
+            etkf.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
         #
         elif self._parameters["Variant"] == "E3DVAR-MLEF":
-            NumericObjects.mlef(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
+            mlef.mlef(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
         #
         #--------------------------
         else:
index fad75ec74466fed31e9391cbb8ac552f628aef4a..cc7b569edc43c4f74d3e56ba464d63810adb1689 100644 (file)
 #
 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
 
-import logging
-from daCore import BasicObjects, NumericObjects
 import numpy
+from daCore import BasicObjects, NumericObjects
+from daAlgorithms.Atoms import ecwexblue
 
 # ==============================================================================
 class ElementaryAlgorithm(BasicObjects.Algorithm):
     def __init__(self):
         BasicObjects.Algorithm.__init__(self, "EXTENDEDBLUE")
+        self.defineRequiredParameter(
+            name     = "Variant",
+            default  = "ExtendedBlue",
+            typecast = str,
+            message  = "Variant ou formulation de la méthode",
+            listval  = [
+                "ExtendedBlue",
+                "OneCorrection",
+                ],
+            )
+        self.defineRequiredParameter(
+            name     = "EstimationOf",
+            default  = "Parameters",
+            typecast = str,
+            message  = "Estimation d'état ou de paramètres",
+            listval  = ["State", "Parameters"],
+            )
         self.defineRequiredParameter(
             name     = "StoreInternalVariables",
             default  = False,
@@ -54,7 +71,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
                 "CostFunctionJoAtCurrentOptimum",
                 "CurrentOptimum",
                 "CurrentState",
+                "ForecastState",
                 "Innovation",
+                "InnovationAtCurrentAnalysis",
                 "MahalanobisConsistency",
                 "OMA",
                 "OMB",
@@ -101,6 +120,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             )
         self.requireInputArguments(
             mandatory= ("Xb", "Y", "HO", "R", "B"),
+            optional = ("U", "EM", "CM", "Q"),
             )
         self.setAttributes(tags=(
             "DataAssimilation",
@@ -111,120 +131,17 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
     def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
         self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
         #
-        Hm = HO["Tangent"].asMatrix(Xb)
-        Hm = Hm.reshape(Y.size,Xb.size) # ADAO & check shape
-        Ha = HO["Adjoint"].asMatrix(Xb)
-        Ha = Ha.reshape(Xb.size,Y.size) # ADAO & check shape
-        H  = HO["Direct"].appliedTo
-        #
-        if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
-            HXb = H( Xb, HO["AppliedInX"]["HXb"])
-        else:
-            HXb = H( Xb )
-        HXb = HXb.reshape((-1,1))
-        if Y.size != HXb.size:
-            raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
-        if max(Y.shape) != max(HXb.shape):
-            raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
-        #
-        BI = B.getI()
-        RI = R.getI()
+        #--------------------------
+        if   self._parameters["Variant"] == "ExtendedBlue":
+            NumericObjects.multiXOsteps(self, Xb, Y, U, HO, EM, CM, R, B, Q, ecwexblue.ecwexblue)
         #
-        Innovation  = Y - HXb
+        #--------------------------
+        elif self._parameters["Variant"] == "OneCorrection":
+            ecwexblue.ecwexblue(self, Xb, Y, HO, R, B)
         #
-        # Calcul de la matrice de gain et de l'analyse
-        # --------------------------------------------
-        if Y.size <= Xb.size:
-            _A = R + numpy.dot(Hm, B * Ha)
-            _u = numpy.linalg.solve( _A , Innovation )
-            Xa = Xb + B * Ha * _u
+        #--------------------------
         else:
-            _A = BI + numpy.dot(Ha, RI * Hm)
-            _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * Innovation) )
-            Xa = Xb + _u
-        self.StoredVariables["Analysis"].store( Xa )
-        #
-        # Calcul de la fonction coût
-        # --------------------------
-        if self._parameters["StoreInternalVariables"] or \
-            self._toStore("CostFunctionJ")  or self._toStore("CostFunctionJAtCurrentOptimum") or \
-            self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
-            self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum") or \
-            self._toStore("OMA") or \
-            self._toStore("SigmaObs2") or \
-            self._toStore("MahalanobisConsistency") or \
-            self._toStore("SimulatedObservationAtCurrentOptimum") or \
-            self._toStore("SimulatedObservationAtCurrentState") or \
-            self._toStore("SimulatedObservationAtOptimum") or \
-            self._toStore("SimulationQuantiles"):
-            HXa  = H( Xa ).reshape((-1,1))
-            oma = Y - HXa
-        if self._parameters["StoreInternalVariables"] or \
-            self._toStore("CostFunctionJ")  or self._toStore("CostFunctionJAtCurrentOptimum") or \
-            self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
-            self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum") or \
-            self._toStore("MahalanobisConsistency"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * oma.T * (RI * oma) )
-            J   = Jb + Jo
-            self.StoredVariables["CostFunctionJb"].store( Jb )
-            self.StoredVariables["CostFunctionJo"].store( Jo )
-            self.StoredVariables["CostFunctionJ" ].store( J )
-            self.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( Jb )
-            self.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( Jo )
-            self.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( J )
-        #
-        # Calcul de la covariance d'analyse
-        # ---------------------------------
-        if self._toStore("APosterioriCovariance") or \
-            self._toStore("SimulationQuantiles"):
-            if   (Y.size <= Xb.size): K  = B * Ha * (R + numpy.dot(Hm, B * Ha)).I
-            elif (Y.size >  Xb.size): K = (BI + numpy.dot(Ha, RI * Hm)).I * Ha * RI
-            A = B - K * Hm * B
-            if min(A.shape) != max(A.shape):
-                raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(self._name,str(A.shape)))
-            if (numpy.diag(A) < 0).any():
-                raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(self._name,))
-            if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
-                try:
-                    L = numpy.linalg.cholesky( A )
-                except:
-                    raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(self._name,))
-            self.StoredVariables["APosterioriCovariance"].store( A )
-        #
-        # Calculs et/ou stockages supplémentaires
-        # ---------------------------------------
-        if self._parameters["StoreInternalVariables"] or self._toStore("CurrentState"):
-            self.StoredVariables["CurrentState"].store( Xa )
-        if self._toStore("CurrentOptimum"):
-            self.StoredVariables["CurrentOptimum"].store( Xa )
-        if self._toStore("Innovation"):
-            self.StoredVariables["Innovation"].store( Innovation )
-        if self._toStore("BMA"):
-            self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-        if self._toStore("OMA"):
-            self.StoredVariables["OMA"].store( oma )
-        if self._toStore("OMB"):
-            self.StoredVariables["OMB"].store( Innovation )
-        if self._toStore("SigmaObs2"):
-            TraceR = R.trace(Y.size)
-            self.StoredVariables["SigmaObs2"].store( float( Innovation.T @ oma ) / TraceR )
-        if self._toStore("SigmaBck2"):
-            self.StoredVariables["SigmaBck2"].store( float( (Innovation.T @ (Hm @ (Xa - Xb)))/(Hm * (B * Hm.T)).trace() ) )
-        if self._toStore("MahalanobisConsistency"):
-            self.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/Innovation.size ) )
-        if self._toStore("SimulationQuantiles"):
-            HtM  = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
-            HtM  = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
-            NumericObjects.QuantilesEstimations(self, A, Xa, HXa, H, HtM)
-        if self._toStore("SimulatedObservationAtBackground"):
-            self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-        if self._toStore("SimulatedObservationAtCurrentState"):
-            self.StoredVariables["SimulatedObservationAtCurrentState"].store( HXa )
-        if self._toStore("SimulatedObservationAtCurrentOptimum"):
-            self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( HXa )
-        if self._toStore("SimulatedObservationAtOptimum"):
-            self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+            raise ValueError("Error in Variant name: %s"%self._parameters["Variant"])
         #
         self._post_run(HO)
         return 0
index 789c771d84807fbf9e5198a8bc1a9dfb743282e3..141d9ad7348e5885a57b1dfa242a7b78a47b1bdc 100644 (file)
@@ -20,8 +20,8 @@
 #
 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
 
-import logging
-from daCore import BasicObjects, NumericObjects
+from daCore import BasicObjects
+from daAlgorithms.Atoms import cekf, exkf
 
 # ==============================================================================
 class ElementaryAlgorithm(BasicObjects.Algorithm):
@@ -110,12 +110,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         # Default EKF
         #--------------------------
         if   self._parameters["Variant"] == "EKF":
-            NumericObjects.exkf(self, Xb, Y, U, HO, EM, CM, R, B, Q)
+            exkf.exkf(self, Xb, Y, U, HO, EM, CM, R, B, Q)
         #
         #--------------------------
         # Default CEKF
         elif self._parameters["Variant"] == "CEKF":
-            NumericObjects.cekf(self, Xb, Y, U, HO, EM, CM, R, B, Q)
+            cekf.cekf(self, Xb, Y, U, HO, EM, CM, R, B, Q)
         #
         #--------------------------
         else:
index 83c096586f24cd323f6120fe796a884ba499cbf1..c06764ce62c5498e650066e1f9bbb719be6b70f5 100644 (file)
@@ -20,9 +20,8 @@
 #
 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
 
-import logging
-from daCore import BasicObjects, NumericObjects
-import numpy
+from daCore import BasicObjects
+from daAlgorithms.Atoms import stdkf
 
 # ==============================================================================
 class ElementaryAlgorithm(BasicObjects.Algorithm):
@@ -87,7 +86,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
         #
         #--------------------------
-        NumericObjects.stdkf(self, Xb, Y, U, HO, EM, CM, R, B, Q)
+        stdkf.stdkf(self, Xb, Y, U, HO, EM, CM, R, B, Q)
         #--------------------------
         #
         self._post_run(HO)
index f612d0efedbf34430b68907dcf9e83e355831b8d..e5baedcaeeba947631774dd2ccd320bcd7d1f8af 100644 (file)
 #
 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
 
-import logging
-from daCore import BasicObjects
-import numpy
+from daCore import BasicObjects, NumericObjects
+from daAlgorithms.Atoms import ecwlls
 
 # ==============================================================================
 class ElementaryAlgorithm(BasicObjects.Algorithm):
     def __init__(self):
         BasicObjects.Algorithm.__init__(self, "LINEARLEASTSQUARES")
+        self.defineRequiredParameter(
+            name     = "Variant",
+            default  = "LinearLeastSquares",
+            typecast = str,
+            message  = "Variant ou formulation de la méthode",
+            listval  = [
+                "LinearLeastSquares",
+                "OneCorrection",
+                ],
+            )
+        self.defineRequiredParameter(
+            name     = "EstimationOf",
+            default  = "Parameters",
+            typecast = str,
+            message  = "Estimation d'état ou de paramètres",
+            listval  = ["State", "Parameters"],
+            )
         self.defineRequiredParameter(
             name     = "StoreInternalVariables",
             default  = False,
@@ -49,6 +65,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
                 "CostFunctionJoAtCurrentOptimum",
                 "CurrentOptimum",
                 "CurrentState",
+                "ForecastState",
+                "InnovationAtCurrentAnalysis",
                 "OMA",
                 "SimulatedObservationAtCurrentOptimum",
                 "SimulatedObservationAtCurrentState",
@@ -68,64 +86,17 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
     def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
         self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
         #
-        Hm = HO["Tangent"].asMatrix(Xb)
-        Hm = Hm.reshape(Y.size,-1) # ADAO & check shape
-        Ha = HO["Adjoint"].asMatrix(Xb)
-        Ha = Ha.reshape(-1,Y.size) # ADAO & check shape
+        #--------------------------
+        if   self._parameters["Variant"] == "LinearLeastSquares":
+            NumericObjects.multiXOsteps(self, Xb, Y, U, HO, EM, CM, R, B, Q, ecwlls.ecwlls)
         #
-        if R is None:
-            RI = 1.
-        else:
-            RI = R.getI()
-        #
-        # Calcul de la matrice de gain et de l'analyse
-        # --------------------------------------------
-        K = (Ha * (RI * Hm)).I * Ha * RI
-        Xa =  K * Y
-        self.StoredVariables["Analysis"].store( Xa )
+        #--------------------------
+        elif self._parameters["Variant"] == "OneCorrection":
+            ecwlls.ecwlls(self, Xb, Y, HO, R, B)
         #
-        # Calcul de la fonction coût
-        # --------------------------
-        if self._parameters["StoreInternalVariables"] or \
-            self._toStore("CostFunctionJ")  or self._toStore("CostFunctionJAtCurrentOptimum") or \
-            self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
-            self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum") or \
-            self._toStore("OMA") or \
-            self._toStore("SimulatedObservationAtCurrentOptimum") or \
-            self._toStore("SimulatedObservationAtCurrentState") or \
-            self._toStore("SimulatedObservationAtOptimum"):
-            HXa = Hm * Xa
-            oma = Y - HXa
-        if self._parameters["StoreInternalVariables"] or \
-            self._toStore("CostFunctionJ")  or self._toStore("CostFunctionJAtCurrentOptimum") or \
-            self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
-            self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum"):
-            Jb  = 0.
-            Jo  = float( 0.5 * oma.T * (RI * oma) )
-            J   = Jb + Jo
-            self.StoredVariables["CostFunctionJb"].store( Jb )
-            self.StoredVariables["CostFunctionJo"].store( Jo )
-            self.StoredVariables["CostFunctionJ" ].store( J )
-            self.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( Jb )
-            self.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( Jo )
-            self.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( J )
-        #
-        # Calculs et/ou stockages supplémentaires
-        # ---------------------------------------
-        if self._parameters["StoreInternalVariables"] or self._toStore("CurrentState"):
-            self.StoredVariables["CurrentState"].store( Xa )
-        if self._toStore("CurrentOptimum"):
-            self.StoredVariables["CurrentOptimum"].store( Xa )
-        if self._toStore("OMA"):
-            self.StoredVariables["OMA"].store( oma )
-        if self._toStore("SimulatedObservationAtBackground"):
-            self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-        if self._toStore("SimulatedObservationAtCurrentState"):
-            self.StoredVariables["SimulatedObservationAtCurrentState"].store( HXa )
-        if self._toStore("SimulatedObservationAtCurrentOptimum"):
-            self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( HXa )
-        if self._toStore("SimulatedObservationAtOptimum"):
-            self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+        #--------------------------
+        else:
+            raise ValueError("Error in Variant name: %s"%self._parameters["Variant"])
         #
         self._post_run(HO)
         return 0
index fc0a5421720116980cbb278aa0d68c80178c425b..00e89403b0bdfd0067487f9e672a3e6e4599bdea 100644 (file)
 #
 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
 
-import logging
-from daCore import BasicObjects
-import numpy, scipy.optimize, scipy.version
+import numpy
+from daCore import BasicObjects, NumericObjects
+from daAlgorithms.Atoms import ecwnlls
 
 # ==============================================================================
 class ElementaryAlgorithm(BasicObjects.Algorithm):
     def __init__(self):
         BasicObjects.Algorithm.__init__(self, "NONLINEARLEASTSQUARES")
+        self.defineRequiredParameter(
+            name     = "Variant",
+            default  = "NonLinearLeastSquares",
+            typecast = str,
+            message  = "Variant ou formulation de la méthode",
+            listval  = [
+                "NonLinearLeastSquares",
+                "OneCorrection",
+                ],
+            )
         self.defineRequiredParameter(
             name     = "Minimizer",
             default  = "LBFGSB",
@@ -42,6 +52,13 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
                 "LM",
                 ],
             )
+        self.defineRequiredParameter(
+            name     = "EstimationOf",
+            default  = "Parameters",
+            typecast = str,
+            message  = "Estimation d'état ou de paramètres",
+            listval  = ["State", "Parameters"],
+            )
         self.defineRequiredParameter(
             name     = "MaximumNumberOfSteps",
             default  = 15000,
@@ -93,8 +110,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
                 "CurrentIterationNumber",
                 "CurrentOptimum",
                 "CurrentState",
+                "ForecastState",
                 "IndexOfOptimum",
                 "Innovation",
+                "InnovationAtCurrentAnalysis",
                 "InnovationAtCurrentState",
                 "OMA",
                 "OMB",
@@ -115,6 +134,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             )
         self.requireInputArguments(
             mandatory= ("Xb", "Y", "HO", "R"),
+            optional = ("U", "EM", "CM", "Q"),
             )
         self.setAttributes(tags=(
             "Optimization",
@@ -125,221 +145,17 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
     def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
         self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
         #
-        # Initialisations
-        # ---------------
-        Hm = HO["Direct"].appliedTo
-        Ha = HO["Adjoint"].appliedInXTo
-        #
-        if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
-            HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
-        else:
-            HXb = Hm( Xb )
-        HXb = HXb.reshape((-1,1))
-        if Y.size != HXb.size:
-            raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
-        if max(Y.shape) != max(HXb.shape):
-            raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
-        #
-        RI = R.getI()
-        if self._parameters["Minimizer"] == "LM":
-            RdemiI = R.choleskyI()
-        #
-        Xini = self._parameters["InitializationPoint"]
-        #
-        # Définition de la fonction-coût
-        # ------------------------------
-        def CostFunction(x):
-            _X  = numpy.ravel( x ).reshape((-1,1))
-            if self._parameters["StoreInternalVariables"] or \
-                self._toStore("CurrentState") or \
-                self._toStore("CurrentOptimum"):
-                self.StoredVariables["CurrentState"].store( _X )
-            _HX = Hm( _X ).reshape((-1,1))
-            _Innovation = Y - _HX
-            if self._toStore("SimulatedObservationAtCurrentState") or \
-                self._toStore("SimulatedObservationAtCurrentOptimum"):
-                self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
-            if self._toStore("InnovationAtCurrentState"):
-                self.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-            #
-            Jb  = 0.
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            #
-            self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["CostFunctionJ"]) )
-            self.StoredVariables["CostFunctionJb"].store( Jb )
-            self.StoredVariables["CostFunctionJo"].store( Jo )
-            self.StoredVariables["CostFunctionJ" ].store( J )
-            if self._toStore("IndexOfOptimum") or \
-                self._toStore("CurrentOptimum") or \
-                self._toStore("CostFunctionJAtCurrentOptimum") or \
-                self._toStore("CostFunctionJbAtCurrentOptimum") or \
-                self._toStore("CostFunctionJoAtCurrentOptimum") or \
-                self._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( self.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if self._toStore("IndexOfOptimum"):
-                self.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if self._toStore("CurrentOptimum"):
-                self.StoredVariables["CurrentOptimum"].store( self.StoredVariables["CurrentState"][IndexMin] )
-            if self._toStore("SimulatedObservationAtCurrentOptimum"):
-                self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( self.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
-            if self._toStore("CostFunctionJbAtCurrentOptimum"):
-                self.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJb"][IndexMin] )
-            if self._toStore("CostFunctionJoAtCurrentOptimum"):
-                self.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJo"][IndexMin] )
-            if self._toStore("CostFunctionJAtCurrentOptimum"):
-                self.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( self.StoredVariables["CostFunctionJ" ][IndexMin] )
-            return J
-        #
-        def GradientOfCostFunction(x):
-            _X      = x.reshape((-1,1))
-            _HX     = Hm( _X ).reshape((-1,1))
-            GradJb  = 0.
-            GradJo  = - Ha( (_X, RI * (Y - _HX)) )
-            GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
-            return GradJ
-        #
-        def CostFunctionLM(x):
-            _X  = numpy.ravel( x ).reshape((-1,1))
-            _HX = Hm( _X ).reshape((-1,1))
-            _Innovation = Y - _HX
-            Jb  = 0.
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            if self._parameters["StoreInternalVariables"] or \
-                self._toStore("CurrentState"):
-                self.StoredVariables["CurrentState"].store( _X )
-            self.StoredVariables["CostFunctionJb"].store( Jb )
-            self.StoredVariables["CostFunctionJo"].store( Jo )
-            self.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            return numpy.ravel( RdemiI*_Innovation )
-        #
-        def GradientOfCostFunctionLM(x):
-            _X      = x.reshape((-1,1))
-            return - RdemiI*HO["Tangent"].asMatrix( _X )
-        #
-        # Minimisation de la fonctionnelle
-        # --------------------------------
-        nbPreviousSteps = self.StoredVariables["CostFunctionJ"].stepnumber()
-        #
-        if self._parameters["Minimizer"] == "LBFGSB":
-            if "0.19" <= scipy.version.version <= "1.1.0":
-                import lbfgsbhlt as optimiseur
-            else:
-                import scipy.optimize as optimiseur
-            Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
-                func        = CostFunction,
-                x0          = Xini,
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                bounds      = self._parameters["Bounds"],
-                maxfun      = self._parameters["MaximumNumberOfSteps"]-1,
-                factr       = self._parameters["CostDecrementTolerance"]*1.e14,
-                pgtol       = self._parameters["ProjectedGradientTolerance"],
-                iprint      = self._parameters["optiprint"],
-                )
-            nfeval = Informations['funcalls']
-            rc     = Informations['warnflag']
-        elif self._parameters["Minimizer"] == "TNC":
-            Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
-                func        = CostFunction,
-                x0          = Xini,
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                bounds      = self._parameters["Bounds"],
-                maxfun      = self._parameters["MaximumNumberOfSteps"],
-                pgtol       = self._parameters["ProjectedGradientTolerance"],
-                ftol        = self._parameters["CostDecrementTolerance"],
-                messages    = self._parameters["optmessages"],
-                )
-        elif self._parameters["Minimizer"] == "CG":
-            Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
-                f           = CostFunction,
-                x0          = Xini,
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                maxiter     = self._parameters["MaximumNumberOfSteps"],
-                gtol        = self._parameters["GradientNormTolerance"],
-                disp        = self._parameters["optdisp"],
-                full_output = True,
-                )
-        elif self._parameters["Minimizer"] == "NCG":
-            Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
-                f           = CostFunction,
-                x0          = Xini,
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                maxiter     = self._parameters["MaximumNumberOfSteps"],
-                avextol     = self._parameters["CostDecrementTolerance"],
-                disp        = self._parameters["optdisp"],
-                full_output = True,
-                )
-        elif self._parameters["Minimizer"] == "BFGS":
-            Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
-                f           = CostFunction,
-                x0          = Xini,
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                maxiter     = self._parameters["MaximumNumberOfSteps"],
-                gtol        = self._parameters["GradientNormTolerance"],
-                disp        = self._parameters["optdisp"],
-                full_output = True,
-                )
-        elif self._parameters["Minimizer"] == "LM":
-            Minimum, cov_x, infodict, mesg, rc = scipy.optimize.leastsq(
-                func        = CostFunctionLM,
-                x0          = Xini,
-                Dfun        = GradientOfCostFunctionLM,
-                args        = (),
-                ftol        = self._parameters["CostDecrementTolerance"],
-                maxfev      = self._parameters["MaximumNumberOfSteps"],
-                gtol        = self._parameters["GradientNormTolerance"],
-                full_output = True,
-                )
-            nfeval = infodict['nfev']
-        else:
-            raise ValueError("Error in Minimizer name: %s"%self._parameters["Minimizer"])
-        #
-        IndexMin = numpy.argmin( self.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-        MinJ     = self.StoredVariables["CostFunctionJ"][IndexMin]
-        #
-        # Correction pour pallier a un bug de TNC sur le retour du Minimum
-        # ----------------------------------------------------------------
-        if self._parameters["StoreInternalVariables"] or self._toStore("CurrentState"):
-            Minimum = self.StoredVariables["CurrentState"][IndexMin]
-        #
-        Xa = Minimum
         #--------------------------
+        if   self._parameters["Variant"] == "NonLinearLeastSquares":
+            NumericObjects.multiXOsteps(self, Xb, Y, U, HO, EM, CM, R, B, Q, ecwnlls.ecwnlls)
         #
-        self.StoredVariables["Analysis"].store( Xa )
-        #
-        if self._toStore("OMA") or \
-            self._toStore("SimulatedObservationAtOptimum"):
-            if self._toStore("SimulatedObservationAtCurrentState"):
-                HXa = self.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
-            elif self._toStore("SimulatedObservationAtCurrentOptimum"):
-                HXa = self.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
-            else:
-                HXa = Hm( Xa )
+        #--------------------------
+        elif self._parameters["Variant"] == "OneCorrection":
+            ecwnlls.ecwnlls(self, Xb, Y, HO, R, B)
         #
-        # Calculs et/ou stockages supplémentaires
-        # ---------------------------------------
-        if self._toStore("Innovation") or \
-            self._toStore("OMB"):
-            Innovation  = Y - HXb
-        if self._toStore("Innovation"):
-            self.StoredVariables["Innovation"].store( Innovation )
-        if self._toStore("BMA"):
-            self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-        if self._toStore("OMA"):
-            self.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
-        if self._toStore("OMB"):
-            self.StoredVariables["OMB"].store( Innovation )
-        if self._toStore("SimulatedObservationAtBackground"):
-            self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-        if self._toStore("SimulatedObservationAtOptimum"):
-            self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+        #--------------------------
+        else:
+            raise ValueError("Error in Variant name: %s"%self._parameters["Variant"])
         #
         self._post_run(HO)
         return 0
diff --git a/src/daComposant/daAlgorithms/lbfgsbhlt.py b/src/daComposant/daAlgorithms/lbfgsbhlt.py
deleted file mode 100644 (file)
index f59c1ec..0000000
+++ /dev/null
@@ -1,472 +0,0 @@
-# Modification de la version 1.1.0
-"""
-Functions
----------
-.. autosummary::
-   :toctree: generated/
-
-    fmin_l_bfgs_b
-
-"""
-
-## License for the Python wrapper
-## ==============================
-
-## Copyright (c) 2004 David M. Cooke <cookedm@physics.mcmaster.ca>
-
-## Permission is hereby granted, free of charge, to any person obtaining a
-## copy of this software and associated documentation files (the "Software"),
-## to deal in the Software without restriction, including without limitation
-## the rights to use, copy, modify, merge, publish, distribute, sublicense,
-## and/or sell copies of the Software, and to permit persons to whom the
-## Software is furnished to do so, subject to the following conditions:
-
-## The above copyright notice and this permission notice shall be included in
-## all copies or substantial portions of the Software.
-
-## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-## DEALINGS IN THE SOFTWARE.
-
-## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
-
-from __future__ import division, print_function, absolute_import
-
-import numpy as np
-from numpy import array, asarray, float64, int32, zeros
-from scipy.optimize import _lbfgsb
-from scipy.optimize.optimize import (approx_fprime, MemoizeJac, OptimizeResult,
-                       _check_unknown_options, wrap_function,
-                       _approx_fprime_helper)
-from scipy.sparse.linalg import LinearOperator
-
-__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
-
-
-def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
-                  approx_grad=0,
-                  bounds=None, m=10, factr=1e7, pgtol=1e-5,
-                  epsilon=1e-8,
-                  iprint=-1, maxfun=15000, maxiter=15000, disp=None,
-                  callback=None, maxls=20):
-    """
-    Minimize a function func using the L-BFGS-B algorithm.
-
-    Parameters
-    ----------
-    func : callable f(x,*args)
-        Function to minimise.
-    x0 : ndarray
-        Initial guess.
-    fprime : callable fprime(x,*args), optional
-        The gradient of `func`.  If None, then `func` returns the function
-        value and the gradient (``f, g = func(x, *args)``), unless
-        `approx_grad` is True in which case `func` returns only ``f``.
-    args : sequence, optional
-        Arguments to pass to `func` and `fprime`.
-    approx_grad : bool, optional
-        Whether to approximate the gradient numerically (in which case
-        `func` returns only the function value).
-    bounds : list, optional
-        ``(min, max)`` pairs for each element in ``x``, defining
-        the bounds on that parameter. Use None or +-inf for one of ``min`` or
-        ``max`` when there is no bound in that direction.
-    m : int, optional
-        The maximum number of variable metric corrections
-        used to define the limited memory matrix. (The limited memory BFGS
-        method does not store the full hessian but uses this many terms in an
-        approximation to it.)
-    factr : float, optional
-        The iteration stops when
-        ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
-        where ``eps`` is the machine precision, which is automatically
-        generated by the code. Typical values for `factr` are: 1e12 for
-        low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
-        high accuracy. See Notes for relationship to `ftol`, which is exposed
-        (instead of `factr`) by the `scipy.optimize.minimize` interface to
-        L-BFGS-B.
-    pgtol : float, optional
-        The iteration will stop when
-        ``max{|proj g_i | i = 1, ..., n} <= pgtol``
-        where ``pg_i`` is the i-th component of the projected gradient.
-    epsilon : float, optional
-        Step size used when `approx_grad` is True, for numerically
-        calculating the gradient
-    iprint : int, optional
-        Controls the frequency of output. ``iprint < 0`` means no output;
-        ``iprint = 0``    print only one line at the last iteration;
-        ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
-        ``iprint = 99``   print details of every iteration except n-vectors;
-        ``iprint = 100``  print also the changes of active set and final x;
-        ``iprint > 100``  print details of every iteration including x and g.
-    disp : int, optional
-        If zero, then no output.  If a positive number, then this over-rides
-        `iprint` (i.e., `iprint` gets the value of `disp`).
-    maxfun : int, optional
-        Maximum number of function evaluations.
-    maxiter : int, optional
-        Maximum number of iterations.
-    callback : callable, optional
-        Called after each iteration, as ``callback(xk)``, where ``xk`` is the
-        current parameter vector.
-    maxls : int, optional
-        Maximum number of line search steps (per iteration). Default is 20.
-
-    Returns
-    -------
-    x : array_like
-        Estimated position of the minimum.
-    f : float
-        Value of `func` at the minimum.
-    d : dict
-        Information dictionary.
-
-        * d['warnflag'] is
-
-          - 0 if converged,
-          - 1 if too many function evaluations or too many iterations,
-          - 2 if stopped for another reason, given in d['task']
-
-        * d['grad'] is the gradient at the minimum (should be 0 ish)
-        * d['funcalls'] is the number of function calls made.
-        * d['nit'] is the number of iterations.
-
-    See also
-    --------
-    minimize: Interface to minimization algorithms for multivariate
-        functions. See the 'L-BFGS-B' `method` in particular. Note that the
-        `ftol` option is made available via that interface, while `factr` is
-        provided via this interface, where `factr` is the factor multiplying
-        the default machine floating-point precision to arrive at `ftol`:
-        ``ftol = factr * numpy.finfo(float).eps``.
-
-    Notes
-    -----
-    License of L-BFGS-B (FORTRAN code):
-
-    The version included here (in fortran code) is 3.0
-    (released April 25, 2011).  It was written by Ciyou Zhu, Richard Byrd,
-    and Jorge Nocedal <nocedal@ece.nwu.edu>. It carries the following
-    condition for use:
-
-    This software is freely available, but we expect that all publications
-    describing work using this software, or all commercial products using it,
-    quote at least one of the references given below. This software is released
-    under the BSD License.
-
-    References
-    ----------
-    * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
-      Constrained Optimization, (1995), SIAM Journal on Scientific and
-      Statistical Computing, 16, 5, pp. 1190-1208.
-    * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
-      FORTRAN routines for large scale bound constrained optimization (1997),
-      ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
-    * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
-      FORTRAN routines for large scale bound constrained optimization (2011),
-      ACM Transactions on Mathematical Software, 38, 1.
-
-    """
-    # handle fprime/approx_grad
-    if approx_grad:
-        fun = func
-        jac = None
-    elif fprime is None:
-        fun = MemoizeJac(func)
-        jac = fun.derivative
-    else:
-        fun = func
-        jac = fprime
-
-    # build options
-    if disp is None:
-        disp = iprint
-    opts = {'disp': disp,
-            'iprint': iprint,
-            'maxcor': m,
-            'ftol': factr * np.finfo(float).eps,
-            'gtol': pgtol,
-            'eps': epsilon,
-            'maxfun': maxfun,
-            'maxiter': maxiter,
-            'callback': callback,
-            'maxls': maxls}
-
-    res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
-                           **opts)
-    d = {'grad': res['jac'],
-         'task': res['message'],
-         'funcalls': res['nfev'],
-         'nit': res['nit'],
-         'warnflag': res['status']}
-    f = res['fun']
-    x = res['x']
-
-    return x, f, d
-
-
-def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
-                     disp=None, maxcor=10, ftol=2.2204460492503131e-09,
-                     gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
-                     iprint=-1, callback=None, maxls=20, **unknown_options):
-    """
-    Minimize a scalar function of one or more variables using the L-BFGS-B
-    algorithm.
-
-    Options
-    -------
-    disp : bool
-       Set to True to print convergence messages.
-    maxcor : int
-        The maximum number of variable metric corrections used to
-        define the limited memory matrix. (The limited memory BFGS
-        method does not store the full hessian but uses this many terms
-        in an approximation to it.)
-    ftol : float
-        The iteration stops when ``(f^k -
-        f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
-    gtol : float
-        The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
-        <= gtol`` where ``pg_i`` is the i-th component of the
-        projected gradient.
-    eps : float
-        Step size used for numerical approximation of the jacobian.
-    disp : int
-        Set to True to print convergence messages.
-    maxfun : int
-        Maximum number of function evaluations.
-    maxiter : int
-        Maximum number of iterations.
-    maxls : int, optional
-        Maximum number of line search steps (per iteration). Default is 20.
-
-    Notes
-    -----
-    The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
-    but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
-    relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
-    I.e., `factr` multiplies the default machine floating-point precision to
-    arrive at `ftol`.
-
-    """
-    _check_unknown_options(unknown_options)
-    m = maxcor
-    epsilon = eps
-    pgtol = gtol
-    factr = ftol / np.finfo(float).eps
-
-    x0 = asarray(x0).ravel()
-    n, = x0.shape
-
-    if bounds is None:
-        bounds = [(None, None)] * n
-    if len(bounds) != n:
-        raise ValueError('length of x0 != length of bounds')
-    # unbounded variables must use None, not +-inf, for optimizer to work properly
-    bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds]
-
-    if disp is not None:
-        if disp == 0:
-            iprint = -1
-        else:
-            iprint = disp
-
-    n_function_evals, fun = wrap_function(fun, ())
-    if jac is None:
-        def func_and_grad(x):
-            f = fun(x, *args)
-            g = _approx_fprime_helper(x, fun, epsilon, args=args, f0=f)
-            return f, g
-    else:
-        def func_and_grad(x):
-            f = fun(x, *args)
-            g = jac(x, *args)
-            return f, g
-
-    nbd = zeros(n, int32)
-    low_bnd = zeros(n, float64)
-    upper_bnd = zeros(n, float64)
-    bounds_map = {(None, None): 0,
-                  (1, None): 1,
-                  (1, 1): 2,
-                  (None, 1): 3}
-    for i in range(0, n):
-        l, u = bounds[i]
-        if l is not None:
-            low_bnd[i] = l
-            l = 1
-        if u is not None:
-            upper_bnd[i] = u
-            u = 1
-        nbd[i] = bounds_map[l, u]
-
-    if not maxls > 0:
-        raise ValueError('maxls must be positive.')
-
-    x = array(x0, float64)
-    f = array(0.0, float64)
-    g = zeros((n,), float64)
-    wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
-    iwa = zeros(3*n, int32)
-    task = zeros(1, 'S60')
-    csave = zeros(1, 'S60')
-    lsave = zeros(4, int32)
-    isave = zeros(44, int32)
-    dsave = zeros(29, float64)
-
-    task[:] = 'START'
-
-    n_iterations = 0
-
-    while 1:
-        # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
-        _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
-                       pgtol, wa, iwa, task, iprint, csave, lsave,
-                       isave, dsave, maxls)
-        task_str = task.tostring()
-        if task_str.startswith(b'FG'):
-            # The minimization routine wants f and g at the current x.
-            # Note that interruptions due to maxfun are postponed
-            # until the completion of the current minimization iteration.
-            # Overwrite f and g:
-            f, g = func_and_grad(x)
-            if n_function_evals[0] > maxfun:
-                task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
-                           'EXCEEDS LIMIT')
-        elif task_str.startswith(b'NEW_X'):
-            # new iteration
-            n_iterations += 1
-            if callback is not None:
-                callback(np.copy(x))
-
-            if n_iterations >= maxiter:
-                task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
-            elif n_function_evals[0] > maxfun:
-                task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
-                           'EXCEEDS LIMIT')
-        else:
-            break
-
-    task_str = task.tostring().strip(b'\x00').strip()
-    if task_str.startswith(b'CONV'):
-        warnflag = 0
-    elif n_function_evals[0] > maxfun or n_iterations >= maxiter:
-        warnflag = 1
-    else:
-        warnflag = 2
-
-    # These two portions of the workspace are described in the mainlb
-    # subroutine in lbfgsb.f. See line 363.
-    s = wa[0: m*n].reshape(m, n)
-    y = wa[m*n: 2*m*n].reshape(m, n)
-
-    # See lbfgsb.f line 160 for this portion of the workspace.
-    # isave(31) = the total number of BFGS updates prior the current iteration;
-    n_bfgs_updates = isave[30]
-
-    n_corrs = min(n_bfgs_updates, maxcor)
-    hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
-
-    return OptimizeResult(fun=f, jac=g, nfev=n_function_evals[0],
-                          nit=n_iterations, status=warnflag, message=task_str,
-                          x=x, success=(warnflag == 0), hess_inv=hess_inv)
-
-
-class LbfgsInvHessProduct(LinearOperator):
-    """Linear operator for the L-BFGS approximate inverse Hessian.
-
-    This operator computes the product of a vector with the approximate inverse
-    of the Hessian of the objective function, using the L-BFGS limited
-    memory approximation to the inverse Hessian, accumulated during the
-    optimization.
-
-    Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
-    interface.
-
-    Parameters
-    ----------
-    sk : array_like, shape=(n_corr, n)
-        Array of `n_corr` most recent updates to the solution vector.
-        (See [1]).
-    yk : array_like, shape=(n_corr, n)
-        Array of `n_corr` most recent updates to the gradient. (See [1]).
-
-    References
-    ----------
-    .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
-       storage." Mathematics of computation 35.151 (1980): 773-782.
-
-    """
-    def __init__(self, sk, yk):
-        """Construct the operator."""
-        if sk.shape != yk.shape or sk.ndim != 2:
-            raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
-        n_corrs, n = sk.shape
-
-        super(LbfgsInvHessProduct, self).__init__(
-            dtype=np.float64, shape=(n, n))
-
-        self.sk = sk
-        self.yk = yk
-        self.n_corrs = n_corrs
-        self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
-
-    def _matvec(self, x):
-        """Efficient matrix-vector multiply with the BFGS matrices.
-
-        This calculation is described in Section (4) of [1].
-
-        Parameters
-        ----------
-        x : ndarray
-            An array with shape (n,) or (n,1).
-
-        Returns
-        -------
-        y : ndarray
-            The matrix-vector product
-
-        """
-        s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
-        q = np.array(x, dtype=self.dtype, copy=True)
-        if q.ndim == 2 and q.shape[1] == 1:
-            q = q.reshape(-1)
-
-        alpha = np.zeros(n_corrs)
-
-        for i in range(n_corrs-1, -1, -1):
-            alpha[i] = rho[i] * np.dot(s[i], q)
-            q = q - alpha[i]*y[i]
-
-        r = q
-        for i in range(n_corrs):
-            beta = rho[i] * np.dot(y[i], r)
-            r = r + s[i] * (alpha[i] - beta)
-
-        return r
-
-    def todense(self):
-        """Return a dense array representation of this operator.
-
-        Returns
-        -------
-        arr : ndarray, shape=(n, n)
-            An array with the same shape and containing
-            the same data represented by this `LinearOperator`.
-
-        """
-        s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
-        I = np.eye(*self.shape, dtype=self.dtype)
-        Hk = I
-
-        for i in range(n_corrs):
-            A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
-            A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
-
-            Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
-                                                        s[i][np.newaxis, :])
-        return Hk
index aa6c683592e0c1a146b1ed356675970ae5d8defd..c1feb221981cc671808b5293eb96286c7c7e3de8 100644 (file)
@@ -25,8 +25,7 @@ __doc__ = """
 """
 __author__ = "Jean-Philippe ARGAUD"
 
-import os, time, copy, types, sys, logging
-import math, numpy, scipy, scipy.optimize, scipy.version
+import os, copy, types, sys, logging, numpy
 from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
 from daCore.PlatformInfo import PlatformInfo
 mpr = PlatformInfo().MachinePrecision()
@@ -478,24 +477,24 @@ class FDApproximation(object):
             else:                return _HaY
 
 # ==============================================================================
-def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
-    "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
+def EnsembleOfCenteredPerturbations( __bgcenter, __bgcovariance, __nbmembers ):
+    "Génération d'un ensemble de taille __nbmembers-1 d'états aléatoires centrés"
     #
-    _bgcenter = numpy.ravel(_bgcenter)[:,None]
-    if _nbmembers < 1:
-        raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
+    __bgcenter = numpy.ravel(__bgcenter)[:,None]
+    if __nbmembers < 1:
+        raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(__nbmembers),))
     #
-    if _bgcovariance is None:
-        _Perturbations = numpy.tile( _bgcenter, _nbmembers)
+    if __bgcovariance is None:
+        _Perturbations = numpy.tile( __bgcenter, __nbmembers)
     else:
-        _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
-        _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
+        _Z = numpy.random.multivariate_normal(numpy.zeros(__bgcenter.size), __bgcovariance, size=__nbmembers).T
+        _Perturbations = numpy.tile( __bgcenter, __nbmembers) + _Z
     #
     return _Perturbations
 
 # ==============================================================================
-def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
-    "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
+def EnsembleOfBackgroundPerturbations( __bgcenter, __bgcovariance, __nbmembers, __withSVD = True):
+    "Génération d'un ensemble de taille __nbmembers-1 d'états aléatoires centrés"
     def __CenteredRandomAnomalies(Zr, N):
         """
         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
@@ -509,31 +508,31 @@ def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _wi
         Zr = numpy.dot(Q,Zr)
         return Zr.T
     #
-    _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
-    if _nbmembers < 1:
-        raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
-    if _bgcovariance is None:
-        _Perturbations = numpy.tile( _bgcenter, _nbmembers)
+    __bgcenter = numpy.ravel(__bgcenter).reshape((-1,1))
+    if __nbmembers < 1:
+        raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(__nbmembers),))
+    if __bgcovariance is None:
+        _Perturbations = numpy.tile( __bgcenter, __nbmembers)
     else:
-        if _withSVD:
-            _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
-            _nbctl = _bgcenter.size
-            if _nbmembers > _nbctl:
+        if __withSVD:
+            _U, _s, _V = numpy.linalg.svd(__bgcovariance, full_matrices=False)
+            _nbctl = __bgcenter.size
+            if __nbmembers > _nbctl:
                 _Z = numpy.concatenate((numpy.dot(
                     numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
-                    numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
+                    numpy.random.multivariate_normal(numpy.zeros(_nbctl),__bgcovariance,__nbmembers-1-_nbctl)), axis = 0)
             else:
-                _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
-            _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
-            _Perturbations = _bgcenter + _Zca
-        else:
-            if max(abs(_bgcovariance.flatten())) > 0:
-                _nbctl = _bgcenter.size
-                _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
-                _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
-                _Perturbations = _bgcenter + _Zca
+                _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:__nbmembers-1])), _V[:__nbmembers-1])
+            _Zca = __CenteredRandomAnomalies(_Z, __nbmembers)
+            _Perturbations = __bgcenter + _Zca
+        else:
+            if max(abs(__bgcovariance.flatten())) > 0:
+                _nbctl = __bgcenter.size
+                _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),__bgcovariance,__nbmembers-1)
+                _Zca = __CenteredRandomAnomalies(_Z, __nbmembers)
+                _Perturbations = __bgcenter + _Zca
             else:
-                _Perturbations = numpy.tile( _bgcenter, _nbmembers)
+                _Perturbations = numpy.tile( __bgcenter, __nbmembers)
     #
     return _Perturbations
 
@@ -573,7 +572,11 @@ def EnsembleErrorCovariance( __Ensemble, __quick = False ):
     return __Covariance
 
 # ==============================================================================
-def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
+def EnsemblePerturbationWithGivenCovariance(
+        __Ensemble,
+        __Covariance,
+        __Seed = None,
+        ):
     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
     if hasattr(__Covariance,"assparsematrix"):
         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
@@ -620,97 +623,99 @@ def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=No
 
 # ==============================================================================
 def CovarianceInflation(
-        InputCovOrEns,
-        InflationType   = None,
-        InflationFactor = None,
-        BackgroundCov   = None,
+        __InputCovOrEns,
+        __InflationType   = None,
+        __InflationFactor = None,
+        __BackgroundCov   = None,
         ):
     """
     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
 
     Synthèse : Hunt 2007, section 2.3.5
     """
-    if InflationFactor is None:
-        return InputCovOrEns
+    if __InflationFactor is None:
+        return __InputCovOrEns
     else:
-        InflationFactor = float(InflationFactor)
+        __InflationFactor = float(__InflationFactor)
     #
-    if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
-        if InflationFactor < 1.:
+    if __InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
+        if __InflationFactor < 1.:
             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
-        if InflationFactor < 1.+mpr:
-            return InputCovOrEns
-        OutputCovOrEns = InflationFactor**2 * InputCovOrEns
+        if __InflationFactor < 1.+mpr:
+            return __InputCovOrEns
+        __OutputCovOrEns = __InflationFactor**2 * __InputCovOrEns
     #
-    elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
-        if InflationFactor < 1.:
+    elif __InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
+        if __InflationFactor < 1.:
             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
-        if InflationFactor < 1.+mpr:
-            return InputCovOrEns
-        InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
-        OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
-            + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
-    #
-    elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
-        if InflationFactor < 0.:
+        if __InflationFactor < 1.+mpr:
+            return __InputCovOrEns
+        __InputCovOrEnsMean = __InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
+        __OutputCovOrEns = __InputCovOrEnsMean[:,numpy.newaxis] \
+            + __InflationFactor * (__InputCovOrEns - __InputCovOrEnsMean[:,numpy.newaxis])
+    #
+    elif __InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
+        if __InflationFactor < 0.:
             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
-        if InflationFactor < mpr:
-            return InputCovOrEns
+        if __InflationFactor < mpr:
+            return __InputCovOrEns
         __n, __m = numpy.asarray(InputCovOrEns).shape
         if __n != __m:
             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
-        OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
+        __OutputCovOrEns = (1. - __InflationFactor) * __InputCovOrEns + __InflationFactor * numpy.identity(__n)
     #
-    elif InflationType == "HybridOnBackgroundCovariance":
-        if InflationFactor < 0.:
+    elif __InflationType == "HybridOnBackgroundCovariance":
+        if __InflationFactor < 0.:
             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
-        if InflationFactor < mpr:
-            return InputCovOrEns
+        if __InflationFactor < mpr:
+            return __InputCovOrEns
         __n, __m = numpy.asarray(InputCovOrEns).shape
         if __n != __m:
             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
-        if BackgroundCov is None:
+        if __BackgroundCov is None:
             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
-        if InputCovOrEns.shape != BackgroundCov.shape:
+        if __InputCovOrEns.shape != __BackgroundCov.shape:
             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
-        OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
+        __OutputCovOrEns = (1. - __InflationFactor) * __InputCovOrEns + __InflationFactor * __BackgroundCov
     #
-    elif InflationType == "Relaxation":
+    elif __InflationType == "Relaxation":
         raise NotImplementedError("InflationType Relaxation")
     #
     else:
         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
     #
-    return OutputCovOrEns
+    return __OutputCovOrEns
 
 # ==============================================================================
-def HessienneEstimation(nb, HaM, HtM, BI, RI):
+def HessienneEstimation(__selfA, __nb, __HaM, __HtM, __BI, __RI):
     "Estimation de la Hessienne"
     #
-    HessienneI = []
-    for i in range(int(nb)):
-        _ee    = numpy.zeros((nb,1))
-        _ee[i] = 1.
-        _HtEE  = numpy.dot(HtM,_ee).reshape((-1,1))
-        HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
-    #
-    A = numpy.linalg.inv(numpy.array( HessienneI ))
-    #
-    if min(A.shape) != max(A.shape):
-        raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
-    if (numpy.diag(A) < 0).any():
-        raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
-    if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+    __HessienneI = []
+    for i in range(int(__nb)):
+        __ee    = numpy.zeros((__nb,1))
+        __ee[i] = 1.
+        __HtEE  = numpy.dot(__HtM,__ee).reshape((-1,1))
+        __HessienneI.append( numpy.ravel( __BI * __ee + __HaM * (__RI * __HtEE) ) )
+    #
+    __A = numpy.linalg.inv(numpy.array( __HessienneI ))
+    __A = (__A + __A.T) * 0.5 # Symétrie
+    __A = __A + mpr*numpy.trace( __A ) * numpy.identity(__nb) # Positivité
+    #
+    if min(__A.shape) != max(__A.shape):
+        raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(__selfA._name,str(__A.shape)))
+    if (numpy.diag(__A) < 0).any():
+        raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(__selfA._name,))
+    if logging.getLogger().level < logging.WARNING: # La vérification n'a lieu qu'en debug
         try:
-            L = numpy.linalg.cholesky( A )
+            numpy.linalg.cholesky( __A )
         except:
-            raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+            raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(__selfA._name,))
     #
-    return A
+    return __A
 
 # ==============================================================================
 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
-    "Estimation des quantiles a posteriori (selfA est modifié)"
+    "Estimation des quantiles a posteriori à partir de A>0 (selfA est modifié)"
     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
     #
     # Traitement des bornes
@@ -722,22 +727,22 @@ def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
         LBounds = None
     if LBounds is not None:
         LBounds = ForceNumericBounds( LBounds )
-    _Xa = numpy.ravel(Xa)
+    __Xa = numpy.ravel(Xa)
     #
     # Échantillonnage des états
     YfQ  = None
     EXr  = None
     for i in range(nbsamples):
         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
-            dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
+            dXr = (numpy.random.multivariate_normal(__Xa,A) - __Xa).reshape((-1,1))
             if LBounds is not None: # "EstimateProjection" par défaut
-                dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
-                dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
+                dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - __Xa.reshape((-1,1))),axis=1)
+                dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - __Xa.reshape((-1,1))),axis=1)
             dYr = HtM @ dXr
             Yr = HXa.reshape((-1,1)) + dYr
-            if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
+            if selfA._toStore("SampledStateForQuantiles"): Xr = __Xa + numpy.ravel(dXr)
         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
-            Xr = numpy.random.multivariate_normal(_Xa,A)
+            Xr = numpy.random.multivariate_normal(__Xa,A)
             if LBounds is not None: # "EstimateProjection" par défaut
                 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
                 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
@@ -781,12 +786,16 @@ def ForceNumericBounds( __Bounds ):
     return __Bounds
 
 # ==============================================================================
-def RecentredBounds( __Bounds, __Center):
+def RecentredBounds( __Bounds, __Center, __Scale = None):
     "Recentre les bornes autour de 0, sauf si globalement None"
     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
     if __Bounds is None: return None
-    # Recentre les valeurs numériques de bornes
-    return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))
+    if __Scale is None:
+        # Recentre les valeurs numériques de bornes
+        return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))
+    else:
+        # Recentre les valeurs numériques de bornes et change l'échelle par une matrice
+        return __Scale @ (ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1)))
 
 # ==============================================================================
 def ApplyBounds( __Vector, __Bounds, __newClip = True):
@@ -821,7 +830,7 @@ def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Bet
     #
     Xf = EnsembleMean( __EnXf )
     Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
-    Pf = (1 - __Betaf) * __B + __Betaf * Pf
+    Pf = (1 - __Betaf) * __B.asfullmatrix(Xf.size) + __Betaf * Pf
     #
     selfB = PartialAlgorithm("3DVAR")
     selfB._parameters["Minimizer"] = "LBFGSB"
@@ -834,3601 +843,84 @@ def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Bet
     selfB._parameters["optdisp"] = 0
     selfB._parameters["Bounds"] = None
     selfB._parameters["InitializationPoint"] = Xf
-    std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
+    from daAlgorithms.Atoms import std3dvar
+    std3dvar.std3dvar(selfB, Xf, __Ynpu, __HO, __R, Pf)
     Xa = selfB.get("Analysis")[-1].reshape((-1,1))
     del selfB
     #
     return Xa + EnsembleOfAnomalies( __EnXn )
 
 # ==============================================================================
-def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+def multiXOsteps(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
     """
-    Constrained Unscented Kalman Filter
+    Prévision multi-pas avec une correction par pas en X (multi-méthodes)
     """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
-    #
-    L     = Xb.size
-    Alpha = selfA._parameters["Alpha"]
-    Beta  = selfA._parameters["Beta"]
-    if selfA._parameters["Kappa"] == 0:
-        if selfA._parameters["EstimationOf"] == "State":
-            Kappa = 0
-        elif selfA._parameters["EstimationOf"] == "Parameters":
-            Kappa = 3 - L
-    else:
-        Kappa = selfA._parameters["Kappa"]
-    Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
-    Gamma  = math.sqrt( L + Lambda )
-    #
-    Ww = []
-    Ww.append( 0. )
-    for i in range(2*L):
-        Ww.append( 1. / (2.*(L + Lambda)) )
-    #
-    Wm = numpy.array( Ww )
-    Wm[0] = Lambda / (L + Lambda)
-    Wc = numpy.array( Ww )
-    Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
-    #
-    # Opérateurs
-    Hm = HO["Direct"].appliedControledFormTo
     #
+    # Initialisation
+    # --------------
     if selfA._parameters["EstimationOf"] == "State":
-        Mm = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = Xb
-        if hasattr(B,"asfullmatrix"):
-            Pn = B.asfullmatrix(__n)
-        else:
-            Pn = B
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-        Pn = selfA._getInternalState("Pn")
-    #
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        XaMin            = Xn
-        previousJMinimum = numpy.finfo(float).max
-    #
-    for step in range(duration-1):
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
-        Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
-        nbSpts = 2*Xn.size+1
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            for point in range(nbSpts):
-                Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
-        #
-        XEtnnp = []
-        for point in range(nbSpts):
-            if selfA._parameters["EstimationOf"] == "State":
-                XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
-                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                    Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
-                    XEtnnpi = XEtnnpi + Cm @ Un
-                if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-                    XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                # --- > Par principe, M = Id, Q = 0
-                XEtnnpi = Xnp[:,point]
-            XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
-        XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
-        #
-        Xncm = ( XEtnnp * Wm ).sum(axis=1)
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
-        #
-        if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
-        elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
-        for point in range(nbSpts):
-            Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
-        #
-        if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
-            Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
-        else:
-            Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
-        #
-        Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            for point in range(nbSpts):
-                Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
-        #
-        Ynnp = []
-        for point in range(nbSpts):
-            if selfA._parameters["EstimationOf"] == "State":
-                Ynnpi = Hm( (Xnnp[:,point], None) )
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                Ynnpi = Hm( (Xnnp[:,point], Un) )
-            Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
-        Ynnp = numpy.concatenate( Ynnp, axis=1 )
-        #
-        Yncm = ( Ynnp * Wm ).sum(axis=1)
-        #
-        Pyyn = R
-        Pxyn = 0.
-        for point in range(nbSpts):
-            Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
-            Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
-        #
-        _Innovation  = Ynpu - Yncm.reshape((-1,1))
-        if selfA._parameters["EstimationOf"] == "Parameters":
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm @ Un
-        #
-        Kn = Pxyn * Pyyn.I
-        Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
-        Pn = Pnm - Kn * Pyyn * Kn.T
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
-        #
-        Xa = Xn # Pointeurs
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("Pn", Pn)
-        #--------------------------
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( Xncm )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( Pnm )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( Xncm - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
+        if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+            Xn = numpy.asarray(Xb)
+            selfA.StoredVariables["Analysis"].store( Xn )
             if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    Contrained Extended Kalman Filter
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
+                if hasattr(B,"asfullmatrix"):
+                    selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
+                else:
+                    selfA.StoredVariables["APosterioriCovariance"].store( B )
+            if selfA._toStore("ForecastState"):
+                selfA.StoredVariables["ForecastState"].store( Xn )
+            selfA._setInternalState("seed", numpy.random.get_state())
+        elif selfA._parameters["nextStep"]:
+            Xn = selfA._getInternalState("Xn")
     else:
-        Cm = None
+        Xn = numpy.asarray(Xb)
     #
-    # Durée d'observation et tailles
     if hasattr(Y,"stepnumber"):
         duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
     else:
         duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = Xb
-        Pn = B
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-        Pn = selfA._getInternalState("Pn")
-    #
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        XaMin            = Xn
-        previousJMinimum = numpy.finfo(float).max
     #
+    # Multi-steps
+    # -----------
     for step in range(duration-1):
         if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+            Ynpu = numpy.asarray( Y[step+1] ).reshape((-1,1))
         else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
-        Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
-        Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
-        Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
-        #
-        if selfA._parameters["EstimationOf"] == "State":
-            Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
-            Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
-            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
-            Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
+            Ynpu = numpy.asarray( Y ).reshape((-1,1))
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
+                Un = numpy.asarray( U[step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
+                Un = numpy.asarray( U[0] ).reshape((-1,1))
             else:
-                Un = numpy.ravel( U ).reshape((-1,1))
+                Un = numpy.asarray( U ).reshape((-1,1))
         else:
             Un = None
         #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-            Pn_predicted = Q + Mt * (Pn * Ma)
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = Xn
-            Pn_predicted = Pn
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
-        #
-        if selfA._parameters["EstimationOf"] == "State":
-            HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
-            _Innovation  = Ynpu - HX_predicted
-        elif selfA._parameters["EstimationOf"] == "Parameters":
-            HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
-            _Innovation  = Ynpu - HX_predicted
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm @ Un
-        #
-        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
-        Xn = Xn_predicted + Kn * _Innovation
-        Pn = Pn_predicted - Kn * Ht * Pn_predicted
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
-        #
-        Xa = Xn # Pointeurs
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("Pn", Pn)
-        #--------------------------
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
-    """
-    EnKS
-    """
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Précalcul des inversions de B et R
-    RIdemi = R.sqrtmI()
-    #
-    # Durée d'observation et tailles
-    LagL = selfA._parameters["SmootherLagL"]
-    if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
-        raise ValueError("Fixed-lag smoother requires a series of observation")
-    if Y.stepnumber() < LagL:
-        raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
-    duration = Y.stepnumber()
-    __p = numpy.cumprod(Y.shape())[-1]
-    __n = Xb.size
-    __m = selfA._parameters["NumberOfMembers"]
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-    #
-    # Calcul direct initial (on privilégie la mémorisation au recalcul)
-    __seed = numpy.random.get_state()
-    selfB = copy.deepcopy(selfA)
-    selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
-    if VariantM == "EnKS16-KalmanFilterFormula":
-        etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
-    else:
-        raise ValueError("VariantM has to be chosen in the authorized methods list.")
-    if LagL > 0:
-        EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
-    else:
-        EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
-    selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
-    #
-    for step in range(LagL,duration-1):
-        #
-        sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
-        sEL.append(None)
-        #
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
+        if selfA._parameters["EstimationOf"] == "State": # Forecast
+            M = EM["Direct"].appliedControledFormTo
+            if CM is not None and "Tangent" in CM and Un is not None:
+                Cm = CM["Tangent"].asMatrix(Xn)
             else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        #--------------------------
-        if VariantM == "EnKS16-KalmanFilterFormula":
-            if selfA._parameters["EstimationOf"] == "State": # Forecast
-                EL = M( [(EL[:,i], Un) for i in range(__m)],
-                    argsAsSerie = True,
-                    returnSerieAsArrayMatrix = True )
-                EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
-                EZ = H( [(EL[:,i], Un) for i in range(__m)],
-                    argsAsSerie = True,
-                    returnSerieAsArrayMatrix = True )
-                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                    Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                    EZ = EZ + Cm @ Un
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                # --- > Par principe, M = Id, Q = 0
-                EZ = H( [(EL[:,i], Un) for i in range(__m)],
-                    argsAsSerie = True,
-                    returnSerieAsArrayMatrix = True )
-            #
-            vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-            vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+                Cm = None
             #
-            mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
-            mS    = mS.reshape((-1,__m)) # Pour dimension 1
-            delta = RIdemi @ ( Ynpu - vZm )
-            mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
-            vw    = mT @ mS.T @ delta
-            #
-            Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
-            mU    = numpy.identity(__m)
-            wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
-            #
-            EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
-            EL    = vEm + EX @ wTU
-            #
-            sEL[LagL] = EL
-            for irl in range(LagL): # Lissage des L précédentes analysis
-                vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-                EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
-                sEL[irl] = vEm + EX @ wTU
-            #
-            # Conservation de l'analyse retrospective d'ordre 0 avant rotation
-            Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-            if selfA._toStore("APosterioriCovariance"):
-                EXn = sEL[0]
-            #
-            for irl in range(LagL):
-                sEL[irl] = sEL[irl+1]
-            sEL[LagL] = None
-        #--------------------------
-        else:
-            raise ValueError("VariantM has to be chosen in the authorized methods list.")
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
-    #
-    # Stockage des dernières analyses incomplètement remises à jour
-    for irl in range(LagL):
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-        selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    return 0
-
-# ==============================================================================
-def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
-    VariantM="KalmanFilterFormula",
-    Hybrid=None,
-    ):
-    """
-    Ensemble-Transform EnKF
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    elif VariantM != "KalmanFilterFormula":
-        RI = R.getI()
-    if VariantM == "KalmanFilterFormula":
-        RIdemi = R.sqrtmI()
-    #
-    __n = Xb.size
-    __m = selfA._parameters["NumberOfMembers"]
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    previousJMinimum = numpy.finfo(float).max
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-    #
-    for step in range(duration-1):
-        numpy.random.set_state(selfA._getInternalState("seed"))
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
-            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
+            Xn_predicted = M( (Xn, Un) )
+            if selfA._toStore("ForecastState"):
+                selfA.StoredVariables["ForecastState"].store( Xn_predicted )
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
                 Xn_predicted = Xn_predicted + Cm @ Un
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+        elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
             # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = EMX = Xn
-            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-        #
-        # Mean of forecast and observation of forecast
-        Xfm  = EnsembleMean( Xn_predicted )
-        Hfm  = EnsembleMean( HX_predicted )
-        #
-        # Anomalies
-        EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
-        EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
-        #
-        #--------------------------
-        if VariantM == "KalmanFilterFormula":
-            mS    = RIdemi * EaHX / math.sqrt(__m-1)
-            mS    = mS.reshape((-1,__m)) # Pour dimension 1
-            delta = RIdemi * ( Ynpu - Hfm )
-            mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
-            vw    = mT @ mS.T @ delta
-            #
-            Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
-            mU    = numpy.identity(__m)
-            #
-            EaX   = EaX / math.sqrt(__m-1)
-            Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
-        #--------------------------
-        elif VariantM == "Variational":
-            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
-            def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _Jo = 0.5 * _A.T @ (RI * _A)
-                _Jb = 0.5 * (__m-1) * w.T @ w
-                _J  = _Jo + _Jb
-                return float(_J)
-            def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _GardJo = - EaHX.T @ (RI * _A)
-                _GradJb = (__m-1) * w.reshape((__m,1))
-                _GradJ  = _GardJo + _GradJb
-                return numpy.ravel(_GradJ)
-            vw = scipy.optimize.fmin_cg(
-                f           = CostFunction,
-                x0          = numpy.zeros(__m),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                disp        = False,
-                )
-            #
-            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
-            Htb = (__m-1) * numpy.identity(__m)
-            Hta = Hto + Htb
-            #
-            Pta = numpy.linalg.inv( Hta )
-            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
-            #
-            Xn  = Xfm + EaX @ (vw[:,None] + EWa)
-        #--------------------------
-        elif VariantM == "FiniteSize11": # Jauge Boc2011
-            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
-            def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _Jo = 0.5 * _A.T @ (RI * _A)
-                _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
-                _J  = _Jo + _Jb
-                return float(_J)
-            def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _GardJo = - EaHX.T @ (RI * _A)
-                _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
-                _GradJ  = _GardJo + _GradJb
-                return numpy.ravel(_GradJ)
-            vw = scipy.optimize.fmin_cg(
-                f           = CostFunction,
-                x0          = numpy.zeros(__m),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                disp        = False,
-                )
-            #
-            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
-            Htb = __m * \
-                ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
-                / (1 + 1/__m + vw.T @ vw)**2
-            Hta = Hto + Htb
-            #
-            Pta = numpy.linalg.inv( Hta )
-            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
-            #
-            Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
-        #--------------------------
-        elif VariantM == "FiniteSize15": # Jauge Boc2015
-            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
-            def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _Jo = 0.5 * _A.T * (RI * _A)
-                _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
-                _J  = _Jo + _Jb
-                return float(_J)
-            def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _GardJo = - EaHX.T @ (RI * _A)
-                _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
-                _GradJ  = _GardJo + _GradJb
-                return numpy.ravel(_GradJ)
-            vw = scipy.optimize.fmin_cg(
-                f           = CostFunction,
-                x0          = numpy.zeros(__m),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                disp        = False,
-                )
-            #
-            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
-            Htb = (__m+1) * \
-                ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
-                / (1 + 1/__m + vw.T @ vw)**2
-            Hta = Hto + Htb
-            #
-            Pta = numpy.linalg.inv( Hta )
-            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
-            #
-            Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
-        #--------------------------
-        elif VariantM == "FiniteSize16": # Jauge Boc2016
-            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
-            def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _Jo = 0.5 * _A.T @ (RI * _A)
-                _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
-                _J  = _Jo + _Jb
-                return float(_J)
-            def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _GardJo = - EaHX.T @ (RI * _A)
-                _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
-                _GradJ  = _GardJo + _GradJb
-                return numpy.ravel(_GradJ)
-            vw = scipy.optimize.fmin_cg(
-                f           = CostFunction,
-                x0          = numpy.zeros(__m),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                disp        = False,
-                )
-            #
-            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
-            Htb = ((__m+1) / (__m-1)) * \
-                ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
-                / (1 + 1/__m + vw.T @ vw / (__m-1))**2
-            Hta = Hto + Htb
-            #
-            Pta = numpy.linalg.inv( Hta )
-            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
-            #
-            Xn  = Xfm + EaX @ (vw[:,None] + EWa)
-        #--------------------------
-        else:
-            raise ValueError("VariantM has to be chosen in the authorized methods list.")
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
+            Xn_predicted = Xn
+        Xn_predicted = numpy.asarray(Xn_predicted).reshape((-1,1))
         #
-        if Hybrid == "E3DVAR":
-            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
-            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        oneCycle(selfA, Xn_predicted, Ynpu, HO, R, B) # Correct
         #
-        Xa = EnsembleMean( Xn )
+        Xn = selfA.StoredVariables["Analysis"][-1]
         #--------------------------
         selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("seed", numpy.random.get_state())
-        #--------------------------
-        #
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("APosterioriCovariance") \
-            or selfA._toStore("InnovationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
-            _Innovation = Ynpu - _HXa
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( EMX )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( EMX - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-        # ---> Pour les smoothers
-        if selfA._toStore("CurrentEnsembleState"):
-            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    Extended Kalman Filter
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = Xb
-        Pn = B
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-        Pn = selfA._getInternalState("Pn")
-    #
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        XaMin            = Xn
-        previousJMinimum = numpy.finfo(float).max
-    #
-    for step in range(duration-1):
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
-        Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
-        Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
-        Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
-        #
-        if selfA._parameters["EstimationOf"] == "State":
-            Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
-            Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
-            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
-            Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-            Pn_predicted = Q + Mt * (Pn * Ma)
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = Xn
-            Pn_predicted = Pn
-        #
-        if selfA._parameters["EstimationOf"] == "State":
-            HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
-            _Innovation  = Ynpu - HX_predicted
-        elif selfA._parameters["EstimationOf"] == "Parameters":
-            HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
-            _Innovation  = Ynpu - HX_predicted
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm @ Un
-        #
-        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
-        Xn = Xn_predicted + Kn * _Innovation
-        Pn = Pn_predicted - Kn * Ht * Pn_predicted
-        #
-        Xa = Xn # Pointeurs
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("Pn", Pn)
-        #--------------------------
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
-    BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
-    """
-    Iterative EnKF
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-    RI = R.getI()
-    #
-    __n = Xb.size
-    __m = selfA._parameters["NumberOfMembers"]
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    previousJMinimum = numpy.finfo(float).max
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
-        else:                         Pn = B
-        Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-    #
-    for step in range(duration-1):
-        numpy.random.set_state(selfA._getInternalState("seed"))
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        #--------------------------
-        if VariantM == "IEnKF12":
-            Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
-            EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
-            __j = 0
-            Deltaw = 1
-            if not BnotT:
-                Ta  = numpy.identity(__m)
-            vw  = numpy.zeros(__m)
-            while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
-                vx1 = (Xfm + EaX @ vw).reshape((__n,1))
-                #
-                if BnotT:
-                    E1 = vx1 + _epsilon * EaX
-                else:
-                    E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
-                #
-                if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
-                    E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
-                        argsAsSerie = True,
-                        returnSerieAsArrayMatrix = True )
-                elif selfA._parameters["EstimationOf"] == "Parameters":
-                    # --- > Par principe, M = Id
-                    E2 = Xn
-                vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-                vy1 = H((vx2, Un)).reshape((__p,1))
-                #
-                HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
-                    argsAsSerie = True,
-                    returnSerieAsArrayMatrix = True )
-                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
-                #
-                if BnotT:
-                    EaY = (HE2 - vy2) / _epsilon
-                else:
-                    EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
-                #
-                GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
-                mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
-                Deltaw = - numpy.linalg.solve(mH,GradJ)
-                #
-                vw = vw + Deltaw
-                #
-                if not BnotT:
-                    Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
-                #
-                __j = __j + 1
-            #
-            A2 = EnsembleOfAnomalies( E2 )
-            #
-            if BnotT:
-                Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
-                A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
-            #
-            Xn = vx2 + A2
-        #--------------------------
-        else:
-            raise ValueError("VariantM has to be chosen in the authorized methods list.")
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        Xa = EnsembleMean( Xn )
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("seed", numpy.random.get_state())
-        #--------------------------
-        #
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("APosterioriCovariance") \
-            or selfA._toStore("InnovationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
-            _Innovation = Ynpu - _HXa
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( E2 )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( E2 - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-        # ---> Pour les smoothers
-        if selfA._toStore("CurrentEnsembleState"):
-            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    3DVAR incrémental
-    """
-    #
-    # Initialisations
-    # ---------------
-    Hm = HO["Direct"].appliedTo
-    #
-    BI = B.getI()
-    RI = R.getI()
-    #
-    HXb = numpy.asarray(Hm( Xb )).reshape((-1,1))
-    Innovation = Y - HXb
-    #
-    # Outer Loop
-    # ----------
-    iOuter = 0
-    J      = 1./mpr
-    DeltaJ = 1./mpr
-    Xr     = numpy.asarray(selfA._parameters["InitializationPoint"]).reshape((-1,1))
-    while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
-        #
-        # Inner Loop
-        # ----------
-        Ht = HO["Tangent"].asMatrix(Xr)
-        Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
-        #
-        # Définition de la fonction-coût
-        # ------------------------------
-        def CostFunction(dx):
-            _dX  = numpy.asarray(dx).reshape((-1,1))
-            if selfA._parameters["StoreInternalVariables"] or \
-                selfA._toStore("CurrentState") or \
-                selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentState"].store( Xb + _dX )
-            _HdX = (Ht @ _dX).reshape((-1,1))
-            _dInnovation = Innovation - _HdX
-            if selfA._toStore("SimulatedObservationAtCurrentState") or \
-                selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
-            if selfA._toStore("InnovationAtCurrentState"):
-                selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
-            #
-            Jb  = float( 0.5 * _dX.T * (BI * _dX) )
-            Jo  = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
-            J   = Jb + Jo
-            #
-            selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            if selfA._toStore("IndexOfOptimum") or \
-                selfA._toStore("CurrentOptimum") or \
-                selfA._toStore("CostFunctionJAtCurrentOptimum") or \
-                selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
-                selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
-                selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-            return J
-        #
-        def GradientOfCostFunction(dx):
-            _dX          = numpy.ravel( dx )
-            _HdX         = (Ht @ _dX).reshape((-1,1))
-            _dInnovation = Innovation - _HdX
-            GradJb       = BI @ _dX
-            GradJo       = - Ht.T @ (RI * _dInnovation)
-            GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
-            return GradJ
-        #
-        # Minimisation de la fonctionnelle
-        # --------------------------------
-        nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
-        #
-        if selfA._parameters["Minimizer"] == "LBFGSB":
-            # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
-            if "0.19" <= scipy.version.version <= "1.1.0":
-                import lbfgsbhlt as optimiseur
-            else:
-                import scipy.optimize as optimiseur
-            Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
-                func        = CostFunction,
-                x0          = numpy.zeros(Xb.size),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
-                maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
-                factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
-                pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-                iprint      = selfA._parameters["optiprint"],
-                )
-            nfeval = Informations['funcalls']
-            rc     = Informations['warnflag']
-        elif selfA._parameters["Minimizer"] == "TNC":
-            Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
-                func        = CostFunction,
-                x0          = numpy.zeros(Xb.size),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
-                maxfun      = selfA._parameters["MaximumNumberOfSteps"],
-                pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-                ftol        = selfA._parameters["CostDecrementTolerance"],
-                messages    = selfA._parameters["optmessages"],
-                )
-        elif selfA._parameters["Minimizer"] == "CG":
-            Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
-                f           = CostFunction,
-                x0          = numpy.zeros(Xb.size),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-                gtol        = selfA._parameters["GradientNormTolerance"],
-                disp        = selfA._parameters["optdisp"],
-                full_output = True,
-                )
-        elif selfA._parameters["Minimizer"] == "NCG":
-            Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
-                f           = CostFunction,
-                x0          = numpy.zeros(Xb.size),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-                avextol     = selfA._parameters["CostDecrementTolerance"],
-                disp        = selfA._parameters["optdisp"],
-                full_output = True,
-                )
-        elif selfA._parameters["Minimizer"] == "BFGS":
-            Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
-                f           = CostFunction,
-                x0          = numpy.zeros(Xb.size),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-                gtol        = selfA._parameters["GradientNormTolerance"],
-                disp        = selfA._parameters["optdisp"],
-                full_output = True,
-                )
-        else:
-            raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
-        #
-        IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-        MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
-        #
-        if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
-            Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-        else:
-            Minimum = Xb + Minimum.reshape((-1,1))
-        #
-        Xr     = Minimum
-        DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
-        iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
-    #
-    Xa = Xr
-    #--------------------------
-    #
-    selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    if selfA._toStore("OMA") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("SimulatedObservationAtOptimum"):
-        if selfA._toStore("SimulatedObservationAtCurrentState"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
-        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
-        else:
-            HXa = Hm( Xa )
-    #
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("JacobianMatrixAtOptimum") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
-        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
-        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles"):
-        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
-    if selfA._toStore("APosterioriCovariance"):
-        selfA.StoredVariables["APosterioriCovariance"].store( A )
-    if selfA._toStore("JacobianMatrixAtOptimum"):
-        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
-    if selfA._toStore("KalmanGainAtOptimum"):
-        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
-        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
-        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
-    #
-    # Calculs et/ou stockages supplémentaires
-    # ---------------------------------------
-    if selfA._toStore("Innovation") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("MahalanobisConsistency") or \
-        selfA._toStore("OMB"):
-        d  = Y - HXb
-    if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( d )
-    if selfA._toStore("BMA"):
-        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-    if selfA._toStore("OMA"):
-        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
-    if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( d )
-    if selfA._toStore("SigmaObs2"):
-        TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
-    if selfA._toStore("MahalanobisConsistency"):
-        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
-    if selfA._toStore("SimulationQuantiles"):
-        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
-    if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-    if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
-    #
-    return 0
-
-# ==============================================================================
-def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
-    VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
-    Hybrid=None,
-    ):
-    """
-    Maximum Likelihood Ensemble Filter
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-    RI = R.getI()
-    #
-    __n = Xb.size
-    __m = selfA._parameters["NumberOfMembers"]
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    previousJMinimum = numpy.finfo(float).max
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-    #
-    for step in range(duration-1):
-        numpy.random.set_state(selfA._getInternalState("seed"))
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = EMX = Xn
-        #
-        #--------------------------
-        if VariantM == "MLEF13":
-            Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
-            EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
-            Ua  = numpy.identity(__m)
-            __j = 0
-            Deltaw = 1
-            if not BnotT:
-                Ta  = numpy.identity(__m)
-            vw  = numpy.zeros(__m)
-            while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
-                vx1 = (Xfm + EaX @ vw).reshape((__n,1))
-                #
-                if BnotT:
-                    E1 = vx1 + _epsilon * EaX
-                else:
-                    E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
-                #
-                HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
-                    argsAsSerie = True,
-                    returnSerieAsArrayMatrix = True )
-                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
-                #
-                if BnotT:
-                    EaY = (HE2 - vy2) / _epsilon
-                else:
-                    EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
-                #
-                GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
-                mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
-                Deltaw = - numpy.linalg.solve(mH,GradJ)
-                #
-                vw = vw + Deltaw
-                #
-                if not BnotT:
-                    Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
-                #
-                __j = __j + 1
-            #
-            if BnotT:
-                Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
-            #
-            Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
-        #--------------------------
-        else:
-            raise ValueError("VariantM has to be chosen in the authorized methods list.")
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        if Hybrid == "E3DVAR":
-            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
-            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
-        #
-        Xa = EnsembleMean( Xn )
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("seed", numpy.random.get_state())
-        #--------------------------
-        #
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("APosterioriCovariance") \
-            or selfA._toStore("InnovationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
-            _Innovation = Ynpu - _HXa
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( EMX )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( EMX - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-        # ---> Pour les smoothers
-        if selfA._toStore("CurrentEnsembleState"):
-            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def mmqr(
-        func     = None,
-        x0       = None,
-        fprime   = None,
-        bounds   = None,
-        quantile = 0.5,
-        maxfun   = 15000,
-        toler    = 1.e-06,
-        y        = None,
-        ):
-    """
-    Implémentation informatique de l'algorithme MMQR, basée sur la publication :
-    David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
-    Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
-    """
-    #
-    # Recuperation des donnees et informations initiales
-    # --------------------------------------------------
-    variables = numpy.ravel( x0 )
-    mesures   = numpy.ravel( y )
-    increment = sys.float_info[0]
-    p         = variables.size
-    n         = mesures.size
-    quantile  = float(quantile)
-    #
-    # Calcul des parametres du MM
-    # ---------------------------
-    tn      = float(toler) / n
-    e0      = -tn / math.log(tn)
-    epsilon = (e0-tn)/(1+math.log(e0))
-    #
-    # Calculs d'initialisation
-    # ------------------------
-    residus  = mesures - numpy.ravel( func( variables ) )
-    poids    = 1./(epsilon+numpy.abs(residus))
-    veps     = 1. - 2. * quantile - residus * poids
-    lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
-    iteration = 0
-    #
-    # Recherche iterative
-    # -------------------
-    while (increment > toler) and (iteration < maxfun) :
-        iteration += 1
-        #
-        Derivees  = numpy.array(fprime(variables))
-        Derivees  = Derivees.reshape(n,p) # ADAO & check shape
-        DeriveesT = Derivees.transpose()
-        M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
-        SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
-        step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
-        #
-        variables = variables + step
-        if bounds is not None:
-            # Attention : boucle infinie à éviter si un intervalle est trop petit
-            while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
-                step      = step/2.
-                variables = variables - step
-        residus   = mesures - numpy.ravel( func(variables) )
-        surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
-        #
-        while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
-            step      = step/2.
-            variables = variables - step
-            residus   = mesures - numpy.ravel( func(variables) )
-            surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
-        #
-        increment     = lastsurrogate-surrogate
-        poids         = 1./(epsilon+numpy.abs(residus))
-        veps          = 1. - 2. * quantile - residus * poids
-        lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
-    #
-    # Mesure d'écart
-    # --------------
-    Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
-    #
-    return variables, Ecart, [n,p,iteration,increment,0]
-
-# ==============================================================================
-def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
-    """
-    3DVAR multi-pas et multi-méthodes
-    """
-    #
-    # Initialisation
-    # --------------
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-        if CM is not None and "Tangent" in CM and U is not None:
-            Cm = CM["Tangent"].asMatrix(Xb)
-        else:
-            Cm = None
-        #
-        if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-            Xn = numpy.ravel(Xb).reshape((-1,1))
-            selfA.StoredVariables["Analysis"].store( Xn )
-            if selfA._toStore("APosterioriCovariance"):
-                if hasattr(B,"asfullmatrix"):
-                    selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
-                else:
-                    selfA.StoredVariables["APosterioriCovariance"].store( B )
-            if selfA._toStore("ForecastState"):
-                selfA.StoredVariables["ForecastState"].store( Xn )
-        elif selfA._parameters["nextStep"]:
-            Xn = selfA._getInternalState("Xn")
-    else:
-        Xn = numpy.ravel(Xb).reshape((-1,1))
-    #
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-    else:
-        duration = 2
-    #
-    # Multi-pas
-    for step in range(duration-1):
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((-1,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast
-            Xn_predicted = M( (Xn, Un) )
-            if selfA._toStore("ForecastState"):
-                selfA.StoredVariables["ForecastState"].store( Xn_predicted )
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-        elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = Xn
-        Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
-        #
-        oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
-        #
-        Xn = selfA.StoredVariables["Analysis"][-1]
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-    #
-    return 0
-
-# ==============================================================================
-def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    3DVAR PSAS
-    """
-    #
-    # Initialisations
-    # ---------------
-    Hm = HO["Direct"].appliedTo
-    #
-    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
-        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
-    else:
-        HXb = numpy.asarray(Hm( Xb ))
-    HXb = numpy.ravel( HXb ).reshape((-1,1))
-    if Y.size != HXb.size:
-        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
-    if max(Y.shape) != max(HXb.shape):
-        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
-    #
-    if selfA._toStore("JacobianMatrixAtBackground"):
-        HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
-        HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
-        selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
-    #
-    Ht = HO["Tangent"].asMatrix(Xb)
-    BHT = B * Ht.T
-    HBHTpR = R + Ht * BHT
-    Innovation = Y - HXb
-    #
-    Xini = numpy.zeros(Y.size)
-    #
-    # Définition de la fonction-coût
-    # ------------------------------
-    def CostFunction(w):
-        _W = numpy.asarray(w).reshape((-1,1))
-        if selfA._parameters["StoreInternalVariables"] or \
-            selfA._toStore("CurrentState") or \
-            selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
-        if selfA._toStore("SimulatedObservationAtCurrentState") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
-        #
-        Jb  = float( 0.5 * _W.T @ (HBHTpR @ _W) )
-        Jo  = float( - _W.T @ Innovation )
-        J   = Jb + Jo
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
-        selfA.StoredVariables["CostFunctionJb"].store( Jb )
-        selfA.StoredVariables["CostFunctionJo"].store( Jo )
-        selfA.StoredVariables["CostFunctionJ" ].store( J )
-        if selfA._toStore("IndexOfOptimum") or \
-            selfA._toStore("CurrentOptimum") or \
-            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-        if selfA._toStore("IndexOfOptimum"):
-            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-        if selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
-        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
-        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        return J
-    #
-    def GradientOfCostFunction(w):
-        _W = numpy.asarray(w).reshape((-1,1))
-        GradJb  = HBHTpR @ _W
-        GradJo  = - Innovation
-        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
-        return GradJ
-    #
-    # Minimisation de la fonctionnelle
-    # --------------------------------
-    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
-    #
-    if selfA._parameters["Minimizer"] == "LBFGSB":
-        if "0.19" <= scipy.version.version <= "1.1.0":
-            import lbfgsbhlt as optimiseur
-        else:
-            import scipy.optimize as optimiseur
-        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
-            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            iprint      = selfA._parameters["optiprint"],
-            )
-        nfeval = Informations['funcalls']
-        rc     = Informations['warnflag']
-    elif selfA._parameters["Minimizer"] == "TNC":
-        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            ftol        = selfA._parameters["CostDecrementTolerance"],
-            messages    = selfA._parameters["optmessages"],
-            )
-    elif selfA._parameters["Minimizer"] == "CG":
-        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "NCG":
-        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            avextol     = selfA._parameters["CostDecrementTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "BFGS":
-        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    else:
-        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
-    #
-    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
-    #
-    # Correction pour pallier a un bug de TNC sur le retour du Minimum
-    # ----------------------------------------------------------------
-    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
-        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-    else:
-        Minimum = Xb + BHT @ Minimum.reshape((-1,1))
-    #
-    Xa = Minimum
-    #--------------------------
-    #
-    selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    if selfA._toStore("OMA") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("SimulatedObservationAtOptimum"):
-        if selfA._toStore("SimulatedObservationAtCurrentState"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
-        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
-        else:
-            HXa = Hm( Xa )
-    #
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("JacobianMatrixAtOptimum") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
-        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
-        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles"):
-        BI = B.getI()
-        RI = R.getI()
-        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
-    if selfA._toStore("APosterioriCovariance"):
-        selfA.StoredVariables["APosterioriCovariance"].store( A )
-    if selfA._toStore("JacobianMatrixAtOptimum"):
-        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
-    if selfA._toStore("KalmanGainAtOptimum"):
-        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
-        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
-        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
-    #
-    # Calculs et/ou stockages supplémentaires
-    # ---------------------------------------
-    if selfA._toStore("Innovation") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("MahalanobisConsistency") or \
-        selfA._toStore("OMB"):
-        d  = Y - HXb
-    if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( d )
-    if selfA._toStore("BMA"):
-        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-    if selfA._toStore("OMA"):
-        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
-    if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( d )
-    if selfA._toStore("SigmaObs2"):
-        TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
-    if selfA._toStore("MahalanobisConsistency"):
-        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
-    if selfA._toStore("SimulationQuantiles"):
-        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
-    if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-    if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
-    #
-    return 0
-
-# ==============================================================================
-def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
-    VariantM="KalmanFilterFormula16",
-    Hybrid=None,
-    ):
-    """
-    Stochastic EnKF
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    __m = selfA._parameters["NumberOfMembers"]
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    previousJMinimum = numpy.finfo(float).max
-    #
-    if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
-    else:                         Rn = R
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
-        else:                         Pn = B
-        Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-    #
-    for step in range(duration-1):
-        numpy.random.set_state(selfA._getInternalState("seed"))
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
-            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = EMX = Xn
-            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-        #
-        # Mean of forecast and observation of forecast
-        Xfm  = EnsembleMean( Xn_predicted )
-        Hfm  = EnsembleMean( HX_predicted )
-        #
-        #--------------------------
-        if VariantM == "KalmanFilterFormula05":
-            PfHT, HPfHT = 0., 0.
-            for i in range(__m):
-                Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
-                Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
-                PfHT  += Exfi * Eyfi.T
-                HPfHT += Eyfi * Eyfi.T
-            PfHT  = (1./(__m-1)) * PfHT
-            HPfHT = (1./(__m-1)) * HPfHT
-            Kn     = PfHT * ( R + HPfHT ).I
-            del PfHT, HPfHT
-            #
-            for i in range(__m):
-                ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
-                Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
-        #--------------------------
-        elif VariantM == "KalmanFilterFormula16":
-            EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
-            EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
-            #
-            EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
-            EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
-            #
-            Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
-            #
-            for i in range(__m):
-                Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
-        #--------------------------
-        else:
-            raise ValueError("VariantM has to be chosen in the authorized methods list.")
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        if Hybrid == "E3DVAR":
-            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
-            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
-        #
-        Xa = EnsembleMean( Xn )
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("seed", numpy.random.get_state())
-        #--------------------------
-        #
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("APosterioriCovariance") \
-            or selfA._toStore("InnovationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
-            _Innovation = Ynpu - _HXa
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( EMX )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( EMX - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-        # ---> Pour les smoothers
-        if selfA._toStore("CurrentEnsembleState"):
-            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    3DVAR
-    """
-    #
-    # Initialisations
-    # ---------------
-    Hm = HO["Direct"].appliedTo
-    Ha = HO["Adjoint"].appliedInXTo
-    #
-    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
-        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
-    else:
-        HXb = numpy.asarray(Hm( Xb ))
-    HXb = HXb.reshape((-1,1))
-    if Y.size != HXb.size:
-        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
-    if max(Y.shape) != max(HXb.shape):
-        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
-    #
-    if selfA._toStore("JacobianMatrixAtBackground"):
-        HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
-        HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
-        selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
-    #
-    BI = B.getI()
-    RI = R.getI()
-    #
-    Xini = selfA._parameters["InitializationPoint"]
-    #
-    # Définition de la fonction-coût
-    # ------------------------------
-    def CostFunction(x):
-        _X  = numpy.asarray(x).reshape((-1,1))
-        if selfA._parameters["StoreInternalVariables"] or \
-            selfA._toStore("CurrentState") or \
-            selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentState"].store( _X )
-        _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
-        _Innovation = Y - _HX
-        if selfA._toStore("SimulatedObservationAtCurrentState") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        #
-        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
-        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-        J   = Jb + Jo
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
-        selfA.StoredVariables["CostFunctionJb"].store( Jb )
-        selfA.StoredVariables["CostFunctionJo"].store( Jo )
-        selfA.StoredVariables["CostFunctionJ" ].store( J )
-        if selfA._toStore("IndexOfOptimum") or \
-            selfA._toStore("CurrentOptimum") or \
-            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-        if selfA._toStore("IndexOfOptimum"):
-            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-        if selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
-        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
-        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        return J
-    #
-    def GradientOfCostFunction(x):
-        _X      = numpy.asarray(x).reshape((-1,1))
-        _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
-        GradJb  = BI * (_X - Xb)
-        GradJo  = - Ha( (_X, RI * (Y - _HX)) )
-        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
-        return GradJ
-    #
-    # Minimisation de la fonctionnelle
-    # --------------------------------
-    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
-    #
-    if selfA._parameters["Minimizer"] == "LBFGSB":
-        if "0.19" <= scipy.version.version <= "1.1.0":
-            import lbfgsbhlt as optimiseur
-        else:
-            import scipy.optimize as optimiseur
-        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = selfA._parameters["Bounds"],
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
-            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            iprint      = selfA._parameters["optiprint"],
-            )
-        nfeval = Informations['funcalls']
-        rc     = Informations['warnflag']
-    elif selfA._parameters["Minimizer"] == "TNC":
-        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = selfA._parameters["Bounds"],
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            ftol        = selfA._parameters["CostDecrementTolerance"],
-            messages    = selfA._parameters["optmessages"],
-            )
-    elif selfA._parameters["Minimizer"] == "CG":
-        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "NCG":
-        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            avextol     = selfA._parameters["CostDecrementTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "BFGS":
-        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    else:
-        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
-    #
-    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
-    #
-    # Correction pour pallier a un bug de TNC sur le retour du Minimum
-    # ----------------------------------------------------------------
-    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
-        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-    #
-    Xa = Minimum
-    #--------------------------
-    #
-    selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    if selfA._toStore("OMA") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("SimulatedObservationAtOptimum"):
-        if selfA._toStore("SimulatedObservationAtCurrentState"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
-        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
-        else:
-            HXa = Hm( Xa )
-    #
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("JacobianMatrixAtOptimum") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
-        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
-        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles"):
-        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
-    if selfA._toStore("APosterioriCovariance"):
-        selfA.StoredVariables["APosterioriCovariance"].store( A )
-    if selfA._toStore("JacobianMatrixAtOptimum"):
-        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
-    if selfA._toStore("KalmanGainAtOptimum"):
-        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
-        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
-        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
-    #
-    # Calculs et/ou stockages supplémentaires
-    # ---------------------------------------
-    if selfA._toStore("Innovation") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("MahalanobisConsistency") or \
-        selfA._toStore("OMB"):
-        d  = Y - HXb
-    if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( d )
-    if selfA._toStore("BMA"):
-        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-    if selfA._toStore("OMA"):
-        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
-    if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( d )
-    if selfA._toStore("SigmaObs2"):
-        TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
-    if selfA._toStore("MahalanobisConsistency"):
-        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
-    if selfA._toStore("SimulationQuantiles"):
-        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
-    if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-    if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
-    #
-    return 0
-
-# ==============================================================================
-def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    4DVAR
-    """
-    #
-    # Initialisations
-    # ---------------
-    #
-    # Opérateurs
-    Hm = HO["Direct"].appliedControledFormTo
-    Mm = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    def Un(_step):
-        if U is not None:
-            if hasattr(U,"store") and 1<=_step<len(U) :
-                _Un = numpy.ravel( U[_step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                _Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                _Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            _Un = None
-        return _Un
-    def CmUn(_xn,_un):
-        if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
-            _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
-            _CmUn = (_Cm @ _un).reshape((-1,1))
-        else:
-            _CmUn = 0.
-        return _CmUn
-    #
-    # Remarque : les observations sont exploitées à partir du pas de temps
-    # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
-    # Donc le pas 0 n'est pas utilisé puisque la première étape commence
-    # avec l'observation du pas 1.
-    #
-    # Nombre de pas identique au nombre de pas d'observations
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-    else:
-        duration = 2
-    #
-    # Précalcul des inversions de B et R
-    BI = B.getI()
-    RI = R.getI()
-    #
-    # Point de démarrage de l'optimisation
-    Xini = selfA._parameters["InitializationPoint"]
-    #
-    # Définition de la fonction-coût
-    # ------------------------------
-    selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
-    selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
-    def CostFunction(x):
-        _X  = numpy.asarray(x).reshape((-1,1))
-        if selfA._parameters["StoreInternalVariables"] or \
-            selfA._toStore("CurrentState") or \
-            selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentState"].store( _X )
-        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
-        selfA.DirectCalculation = [None,]
-        selfA.DirectInnovation  = [None,]
-        Jo  = 0.
-        _Xn = _X
-        for step in range(0,duration-1):
-            if hasattr(Y,"store"):
-                _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
-            else:
-                _Ynpu = numpy.ravel( Y ).reshape((-1,1))
-            _Un = Un(step)
-            #
-            # Etape d'évolution
-            if selfA._parameters["EstimationOf"] == "State":
-                _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                pass
-            #
-            if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-                _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
-            #
-            # Etape de différence aux observations
-            if selfA._parameters["EstimationOf"] == "State":
-                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
-            #
-            # Stockage de l'état
-            selfA.DirectCalculation.append( _Xn )
-            selfA.DirectInnovation.append( _YmHMX )
-            #
-            # Ajout dans la fonctionnelle d'observation
-            Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
-        J = Jb + Jo
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
-        selfA.StoredVariables["CostFunctionJb"].store( Jb )
-        selfA.StoredVariables["CostFunctionJo"].store( Jo )
-        selfA.StoredVariables["CostFunctionJ" ].store( J )
-        if selfA._toStore("IndexOfOptimum") or \
-            selfA._toStore("CurrentOptimum") or \
-            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-        if selfA._toStore("IndexOfOptimum"):
-            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-        if selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
-        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-        return J
-    #
-    def GradientOfCostFunction(x):
-        _X      = numpy.asarray(x).reshape((-1,1))
-        GradJb  = BI * (_X - Xb)
-        GradJo  = 0.
-        for step in range(duration-1,0,-1):
-            # Étape de récupération du dernier stockage de l'évolution
-            _Xn = selfA.DirectCalculation.pop()
-            # Étape de récupération du dernier stockage de l'innovation
-            _YmHMX = selfA.DirectInnovation.pop()
-            # Calcul des adjoints
-            Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
-            Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
-            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
-            Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
-            # Calcul du gradient par état adjoint
-            GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
-            GradJo = Ma * GradJo                 # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
-        GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
-        return GradJ
-    #
-    # Minimisation de la fonctionnelle
-    # --------------------------------
-    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
-    #
-    if selfA._parameters["Minimizer"] == "LBFGSB":
-        if "0.19" <= scipy.version.version <= "1.1.0":
-            import lbfgsbhlt as optimiseur
-        else:
-            import scipy.optimize as optimiseur
-        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = selfA._parameters["Bounds"],
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
-            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            iprint      = selfA._parameters["optiprint"],
-            )
-        nfeval = Informations['funcalls']
-        rc     = Informations['warnflag']
-    elif selfA._parameters["Minimizer"] == "TNC":
-        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = selfA._parameters["Bounds"],
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            ftol        = selfA._parameters["CostDecrementTolerance"],
-            messages    = selfA._parameters["optmessages"],
-            )
-    elif selfA._parameters["Minimizer"] == "CG":
-        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "NCG":
-        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            avextol     = selfA._parameters["CostDecrementTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "BFGS":
-        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    else:
-        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
-    #
-    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
-    #
-    # Correction pour pallier a un bug de TNC sur le retour du Minimum
-    # ----------------------------------------------------------------
-    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
-        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-    #
-    # Obtention de l'analyse
-    # ----------------------
-    Xa = Minimum
-    #
-    selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    # Calculs et/ou stockages supplémentaires
-    # ---------------------------------------
-    if selfA._toStore("BMA"):
-        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-    #
-    return 0
-
-# ==============================================================================
-def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    Standard Kalman Filter
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    # ----------
-    Ht = HO["Tangent"].asMatrix(Xb)
-    Ha = HO["Adjoint"].asMatrix(Xb)
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        Mt = EM["Tangent"].asMatrix(Xb)
-        Ma = EM["Adjoint"].asMatrix(Xb)
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = Xb
-        Pn = B
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-        Pn = selfA._getInternalState("Pn")
-    #
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        XaMin            = Xn
-        previousJMinimum = numpy.finfo(float).max
-    #
-    for step in range(duration-1):
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            Xn_predicted = Mt @ Xn
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-            Pn_predicted = Q + Mt * (Pn * Ma)
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = Xn
-            Pn_predicted = Pn
-        #
-        if selfA._parameters["EstimationOf"] == "State":
-            HX_predicted = Ht @ Xn_predicted
-            _Innovation  = Ynpu - HX_predicted
-        elif selfA._parameters["EstimationOf"] == "Parameters":
-            HX_predicted = Ht @ Xn_predicted
-            _Innovation  = Ynpu - HX_predicted
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm @ Un
-        #
-        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
-        Xn = Xn_predicted + Kn * _Innovation
-        Pn = Pn_predicted - Kn * Ht * Pn_predicted
-        #
-        Xa = Xn # Pointeurs
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("Pn", Pn)
-        #--------------------------
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    Unscented Kalman Filter
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    L     = Xb.size
-    Alpha = selfA._parameters["Alpha"]
-    Beta  = selfA._parameters["Beta"]
-    if selfA._parameters["Kappa"] == 0:
-        if selfA._parameters["EstimationOf"] == "State":
-            Kappa = 0
-        elif selfA._parameters["EstimationOf"] == "Parameters":
-            Kappa = 3 - L
-    else:
-        Kappa = selfA._parameters["Kappa"]
-    Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
-    Gamma  = math.sqrt( L + Lambda )
-    #
-    Ww = []
-    Ww.append( 0. )
-    for i in range(2*L):
-        Ww.append( 1. / (2.*(L + Lambda)) )
-    #
-    Wm = numpy.array( Ww )
-    Wm[0] = Lambda / (L + Lambda)
-    Wc = numpy.array( Ww )
-    Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
-    #
-    # Opérateurs
-    Hm = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        Mm = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = Xb
-        if hasattr(B,"asfullmatrix"):
-            Pn = B.asfullmatrix(__n)
-        else:
-            Pn = B
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-        Pn = selfA._getInternalState("Pn")
-    #
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        XaMin            = Xn
-        previousJMinimum = numpy.finfo(float).max
-    #
-    for step in range(duration-1):
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
-        Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
-        nbSpts = 2*Xn.size+1
-        #
-        XEtnnp = []
-        for point in range(nbSpts):
-            if selfA._parameters["EstimationOf"] == "State":
-                XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
-                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                    Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
-                    XEtnnpi = XEtnnpi + Cm @ Un
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                # --- > Par principe, M = Id, Q = 0
-                XEtnnpi = Xnp[:,point]
-            XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
-        XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
-        #
-        Xncm = ( XEtnnp * Wm ).sum(axis=1)
-        #
-        if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
-        elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
-        for point in range(nbSpts):
-            Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
-        #
-        Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
-        #
-        Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
-        #
-        Ynnp = []
-        for point in range(nbSpts):
-            if selfA._parameters["EstimationOf"] == "State":
-                Ynnpi = Hm( (Xnnp[:,point], None) )
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                Ynnpi = Hm( (Xnnp[:,point], Un) )
-            Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
-        Ynnp = numpy.concatenate( Ynnp, axis=1 )
-        #
-        Yncm = ( Ynnp * Wm ).sum(axis=1)
-        #
-        Pyyn = R
-        Pxyn = 0.
-        for point in range(nbSpts):
-            Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
-            Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
-        #
-        _Innovation  = Ynpu - Yncm.reshape((-1,1))
-        if selfA._parameters["EstimationOf"] == "Parameters":
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm @ Un
-        #
-        Kn = Pxyn * Pyyn.I
-        Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
-        Pn = Pnm - Kn * Pyyn * Kn.T
-        #
-        Xa = Xn # Pointeurs
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("Pn", Pn)
-        #--------------------------
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( Xncm )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( Pnm )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( Xncm - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    3DVAR variational analysis with no inversion of B
-    """
-    #
-    # Initialisations
-    # ---------------
-    Hm = HO["Direct"].appliedTo
-    Ha = HO["Adjoint"].appliedInXTo
-    #
-    BT = B.getT()
-    RI = R.getI()
-    #
-    Xini = numpy.zeros(Xb.size)
-    #
-    # Définition de la fonction-coût
-    # ------------------------------
-    def CostFunction(v):
-        _V = numpy.asarray(v).reshape((-1,1))
-        _X = Xb + (B @ _V).reshape((-1,1))
-        if selfA._parameters["StoreInternalVariables"] or \
-            selfA._toStore("CurrentState") or \
-            selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentState"].store( _X )
-        _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
-        _Innovation = Y - _HX
-        if selfA._toStore("SimulatedObservationAtCurrentState") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        #
-        Jb  = float( 0.5 * _V.T * (BT * _V) )
-        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-        J   = Jb + Jo
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
-        selfA.StoredVariables["CostFunctionJb"].store( Jb )
-        selfA.StoredVariables["CostFunctionJo"].store( Jo )
-        selfA.StoredVariables["CostFunctionJ" ].store( J )
-        if selfA._toStore("IndexOfOptimum") or \
-            selfA._toStore("CurrentOptimum") or \
-            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-        if selfA._toStore("IndexOfOptimum"):
-            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-        if selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
-        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
-        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        return J
-    #
-    def GradientOfCostFunction(v):
-        _V = numpy.asarray(v).reshape((-1,1))
-        _X = Xb + (B @ _V).reshape((-1,1))
-        _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
-        GradJb  = BT * _V
-        GradJo  = - Ha( (_X, RI * (Y - _HX)) )
-        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
-        return GradJ
-    #
-    # Minimisation de la fonctionnelle
-    # --------------------------------
-    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
-    #
-    if selfA._parameters["Minimizer"] == "LBFGSB":
-        if "0.19" <= scipy.version.version <= "1.1.0":
-            import lbfgsbhlt as optimiseur
-        else:
-            import scipy.optimize as optimiseur
-        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
-            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            iprint      = selfA._parameters["optiprint"],
-            )
-        nfeval = Informations['funcalls']
-        rc     = Informations['warnflag']
-    elif selfA._parameters["Minimizer"] == "TNC":
-        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            ftol        = selfA._parameters["CostDecrementTolerance"],
-            messages    = selfA._parameters["optmessages"],
-            )
-    elif selfA._parameters["Minimizer"] == "CG":
-        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "NCG":
-        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            avextol     = selfA._parameters["CostDecrementTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "BFGS":
-        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    else:
-        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
-    #
-    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
-    #
-    # Correction pour pallier a un bug de TNC sur le retour du Minimum
-    # ----------------------------------------------------------------
-    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
-        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-    else:
-        Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
-    #
-    Xa = Minimum
-    #--------------------------
-    #
-    selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    if selfA._toStore("OMA") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("SimulatedObservationAtOptimum"):
-        if selfA._toStore("SimulatedObservationAtCurrentState"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
-        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
-        else:
-            HXa = Hm( Xa )
-    #
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("JacobianMatrixAtOptimum") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
-        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
-        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles"):
-        BI = B.getI()
-        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
-    if selfA._toStore("APosterioriCovariance"):
-        selfA.StoredVariables["APosterioriCovariance"].store( A )
-    if selfA._toStore("JacobianMatrixAtOptimum"):
-        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
-    if selfA._toStore("KalmanGainAtOptimum"):
-        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
-        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
-        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
-    #
-    # Calculs et/ou stockages supplémentaires
-    # ---------------------------------------
-    if selfA._toStore("Innovation") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("MahalanobisConsistency") or \
-        selfA._toStore("OMB"):
-        d  = Y - HXb
-    if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( d )
-    if selfA._toStore("BMA"):
-        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-    if selfA._toStore("OMA"):
-        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
-    if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( d )
-    if selfA._toStore("SigmaObs2"):
-        TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
-    if selfA._toStore("MahalanobisConsistency"):
-        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
-    if selfA._toStore("SimulationQuantiles"):
-        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
-    if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-    if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
     #
     return 0
 
index 8e6e3555cc418d46e0dc86c74d41a2dd1e5f3903..f155146926ee0f3295cff795c97cbd168e91ebc0 100644 (file)
@@ -326,6 +326,108 @@ class Persistence(object):
         except:
             raise TypeError("Base type is incompatible with numpy")
 
+    def norms(self, _ord=None):
+        """
+        Norm (_ord : voir numpy.linalg.norm)
+
+        Renvoie la série, contenant à chaque pas, la norme des données au pas.
+        Il faut que le type de base soit compatible avec les types élémentaires
+        numpy.
+        """
+        try:
+            return [numpy.linalg.norm(item, _ord) for item in self.__values]
+        except:
+            raise TypeError("Base type is incompatible with numpy")
+
+    def maes(self, _predictor=None):
+        """
+        Mean Absolute Error (MAE)
+        mae(dX) = 1/n sum(dX_i)
+
+        Renvoie la série, contenant à chaque pas, la MAE des données au pas.
+        Il faut que le type de base soit compatible avec les types élémentaires
+        numpy. C'est réservé aux variables d'écarts ou d'incréments si le
+        prédicteur est None, sinon c'est appliqué à l'écart entre les données
+        au pas et le prédicteur au même pas.
+        """
+        if _predictor is None:
+            try:
+                return [numpy.mean(numpy.abs(item)) for item in self.__values]
+            except:
+                raise TypeError("Base type is incompatible with numpy")
+        else:
+            if len(_predictor) != len(self.__values):
+                raise ValueError("Predictor number of steps is incompatible with the values")
+            for i, item in enumerate(self.__values):
+                if numpy.asarray(_predictor[i]).size != numpy.asarray(item).size:
+                    raise ValueError("Predictor size at step %i is incompatible with the values"%i)
+            try:
+                return [numpy.mean(numpy.abs(numpy.ravel(item) - numpy.ravel(_predictor[i]))) for i, item in enumerate(self.__values)]
+            except:
+                raise TypeError("Base type is incompatible with numpy")
+
+    def mses(self, _predictor=None):
+        """
+        Mean-Square Error (MSE) ou Mean-Square Deviation (MSD)
+        mse(dX) = 1/n sum(dX_i**2)
+
+        Renvoie la série, contenant à chaque pas, la MSE des données au pas. Il
+        faut que le type de base soit compatible avec les types élémentaires
+        numpy. C'est réservé aux variables d'écarts ou d'incréments si le
+        prédicteur est None, sinon c'est appliqué à l'écart entre les données
+        au pas et le prédicteur au même pas.
+        """
+        if _predictor is None:
+            try:
+                __n = self.shape()[0]
+                return [(numpy.linalg.norm(item)**2 / __n) for item in self.__values]
+            except:
+                raise TypeError("Base type is incompatible with numpy")
+        else:
+            if len(_predictor) != len(self.__values):
+                raise ValueError("Predictor number of steps is incompatible with the values")
+            for i, item in enumerate(self.__values):
+                if numpy.asarray(_predictor[i]).size != numpy.asarray(item).size:
+                    raise ValueError("Predictor size at step %i is incompatible with the values"%i)
+            try:
+                __n = self.shape()[0]
+                return [(numpy.linalg.norm(numpy.ravel(item) - numpy.ravel(_predictor[i]))**2 / __n) for i, item in enumerate(self.__values)]
+            except:
+                raise TypeError("Base type is incompatible with numpy")
+
+    msds=mses # Mean-Square Deviation (MSD=MSE)
+
+    def rmses(self, _predictor=None):
+        """
+        Root-Mean-Square Error (RMSE) ou Root-Mean-Square Deviation (RMSD)
+        rmse(dX) = sqrt( 1/n sum(dX_i**2) ) = sqrt( mse(dX) )
+
+        Renvoie la série, contenant à chaque pas, la RMSE des données au pas.
+        Il faut que le type de base soit compatible avec les types élémentaires
+        numpy. C'est réservé aux variables d'écarts ou d'incréments si le
+        prédicteur est None, sinon c'est appliqué à l'écart entre les données
+        au pas et le prédicteur au même pas.
+        """
+        if _predictor is None:
+            try:
+                __n = self.shape()[0]
+                return [(numpy.linalg.norm(item) / math.sqrt(__n)) for item in self.__values]
+            except:
+                raise TypeError("Base type is incompatible with numpy")
+        else:
+            if len(_predictor) != len(self.__values):
+                raise ValueError("Predictor number of steps is incompatible with the values")
+            for i, item in enumerate(self.__values):
+                if numpy.asarray(_predictor[i]).size != numpy.asarray(item).size:
+                    raise ValueError("Predictor size at step %i is incompatible with the values"%i)
+            try:
+                __n = self.shape()[0]
+                return [(numpy.linalg.norm(numpy.ravel(item) - numpy.ravel(_predictor[i])) / math.sqrt(__n)) for i, item in enumerate(self.__values)]
+            except:
+                raise TypeError("Base type is incompatible with numpy")
+
+    rmsds = rmses # Root-Mean-Square Deviation (RMSD=RMSE)
+
     def __preplots(self,
                    title    = "",
                    xlabel   = "",