]> SALOME platform Git repositories - modules/adao.git/commitdiff
Salome HOME
Improvement and extension of EnKF algorithm
authorJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Tue, 22 Dec 2020 20:20:51 +0000 (21:20 +0100)
committerJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Tue, 22 Dec 2020 20:26:47 +0000 (21:26 +0100)
src/daComposant/daAlgorithms/EnsembleKalmanFilter.py
src/daComposant/daCore/NumericObjects.py

index d2358285fd26dd12f83525fd7bf8ea6b09bd90fa..c24bc9400226e917e17cf1c8c00764e60c236460 100644 (file)
 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
 
 import logging
-from daCore import BasicObjects, PlatformInfo
-import numpy, math
-mfp = PlatformInfo.PlatformInfo().MaximumPrecision()
+from daCore import BasicObjects, NumericObjects
+import numpy
 
 # ==============================================================================
 class ElementaryAlgorithm(BasicObjects.Algorithm):
     def __init__(self):
         BasicObjects.Algorithm.__init__(self, "ENSEMBLEKALMANFILTER")
+        self.defineRequiredParameter(
+            name     = "Minimizer",
+            default  = "StochasticEnKF",
+            typecast = str,
+            message  = "Minimiseur utilisé",
+            listval  = ["StochasticEnKF",  "DeterministicEnKF", "ETKF"],
+            )
         self.defineRequiredParameter(
             name     = "NumberOfMembers",
             default  = 100,
@@ -99,204 +105,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
     def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
         self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
         #
-        if self._parameters["EstimationOf"] == "Parameters":
-            self._parameters["StoreInternalVariables"] = True
-        #
-        # Opérateurs
-        # ----------
-        H = HO["Direct"].appliedControledFormTo
-        #
-        if self._parameters["EstimationOf"] == "State":
-            M = EM["Direct"].appliedControledFormTo
-        #
-        if CM is not None and "Tangent" in CM and U is not None:
-            Cm = CM["Tangent"].asMatrix(Xb)
-        else:
-            Cm = None
-        #
-        # Nombre de pas identique au nombre de pas d'observations
-        # -------------------------------------------------------
-        if hasattr(Y,"stepnumber"):
-            duration = Y.stepnumber()
-            __p = numpy.cumprod(Y.shape())[-1]
+        if self._parameters["Minimizer"] == "StochasticEnKF":
+            NumericObjects.senkf(self, Xb, Y, U, HO, EM, CM, R, B, Q)
+        elif self._parameters["Minimizer"] in ["DeterministicEnKF", "ETKF"]:
+            NumericObjects.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q)
         else:
-            duration = 2
-            __p = numpy.array(Y).size
-        #
-        # Précalcul des inversions de B et R
-        # ----------------------------------
-        if self._parameters["StoreInternalVariables"] \
-            or self._toStore("CostFunctionJ") \
-            or self._toStore("CostFunctionJb") \
-            or self._toStore("CostFunctionJo") \
-            or self._toStore("CurrentOptimum") \
-            or self._toStore("APosterioriCovariance"):
-            BI = B.getI()
-            RI = R.getI()
-        # BIdemi = B.choleskyI()
-        # RIdemi = R.choleskyI()
-        #
-        # Initialisation
-        # --------------
-        __n = Xb.size
-        __m = self._parameters["NumberOfMembers"]
-        Xn = numpy.asmatrix(numpy.dot( Xb.reshape(__n,1), numpy.ones((1,__m)) ))
-        if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
-        else:                         Pn = B
-        if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
-        else:                         Rn = R
-        if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
-        else:                         Qn = Q
-        #
-        if len(self.StoredVariables["Analysis"])==0 or not self._parameters["nextStep"]:
-            self.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
-            if self._toStore("APosterioriCovariance"):
-                self.StoredVariables["APosterioriCovariance"].store( Pn )
-                covarianceXa = Pn
-        #
-        Xa               = Xb
-        XaMin            = Xb
-        previousJMinimum = numpy.finfo(float).max
-        #
-        # Predimensionnement
-        Xn_predicted = numpy.asmatrix(numpy.zeros((__n,__m)))
-        HX_predicted = numpy.asmatrix(numpy.zeros((__p,__m)))
-        #
-        for step in range(duration-1):
-            if hasattr(Y,"store"):
-                Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
-            else:
-                Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
-            #
-            if U is not None:
-                if hasattr(U,"store") and len(U)>1:
-                    Un = numpy.asmatrix(numpy.ravel( U[step] )).T
-                elif hasattr(U,"store") and len(U)==1:
-                    Un = numpy.asmatrix(numpy.ravel( U[0] )).T
-                else:
-                    Un = numpy.asmatrix(numpy.ravel( U )).T
-            else:
-                Un = None
-            #
-            if self._parameters["EstimationOf"] == "State":
-                for i in range(__m):
-                    qi = numpy.asmatrix(numpy.random.multivariate_normal(numpy.zeros(__n), Qn, (1,1,1))).T
-                    Xn_predicted[:,i] = numpy.asmatrix(numpy.ravel( M((Xn[:,i], Un)) )).T + qi
-                    HX_predicted[:,i] = numpy.asmatrix(numpy.ravel( H((Xn_predicted[:,i], Un)) )).T
-                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                    Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                    Xn_predicted = Xn_predicted + Cm * Un
-            elif self._parameters["EstimationOf"] == "Parameters":
-                # --- > Par principe, M = Id, Q = 0
-                Xn_predicted = Xn
-            #
-            Xfm = numpy.asmatrix(numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))).T
-            Hfm = numpy.asmatrix(numpy.ravel(HX_predicted.mean(axis=1, dtype=mfp).astype('float'))).T
-            #
-            PfHT, HPfHT = 0., 0.
-            for i in range(__m):
-                Exfi = Xn_predicted[:,i] - Xfm
-                Eyfi = HX_predicted[:,i] - Hfm
-                PfHT  += Exfi * Eyfi.T
-                HPfHT += Eyfi * Eyfi.T
-            PfHT  = (1./(__m-1)) * PfHT
-            HPfHT = (1./(__m-1)) * HPfHT
-            K     = PfHT * ( R + HPfHT ).I
-            del PfHT, HPfHT
-            #
-            for i in range(__m):
-                ri = numpy.asmatrix(numpy.random.multivariate_normal(numpy.zeros(__p), Rn, (1,1,1))).T
-                Xn[:,i] = Xn_predicted[:,i] + K * (Ynpu + ri - HX_predicted[:,i])
-            #
-            Xa = Xn.mean(axis=1, dtype=mfp).astype('float')
-            #
-            if self._parameters["StoreInternalVariables"] \
-                or self._toStore("CostFunctionJ") \
-                or self._toStore("CostFunctionJb") \
-                or self._toStore("CostFunctionJo") \
-                or self._toStore("APosterioriCovariance") \
-                or self._toStore("InnovationAtCurrentAnalysis") \
-                or self._toStore("SimulatedObservationAtCurrentAnalysis") \
-                or self._toStore("SimulatedObservationAtCurrentOptimum"):
-                _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
-                _Innovation = Ynpu - _HXa
-            #
-            self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["Analysis"]) )
-            # ---> avec analysis
-            self.StoredVariables["Analysis"].store( Xa )
-            if self._toStore("SimulatedObservationAtCurrentAnalysis"):
-                self.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
-            if self._toStore("InnovationAtCurrentAnalysis"):
-                self.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-            # ---> avec current state
-            if self._parameters["StoreInternalVariables"] \
-                or self._toStore("CurrentState"):
-                self.StoredVariables["CurrentState"].store( Xn )
-            if self._toStore("ForecastState"):
-                self.StoredVariables["ForecastState"].store( Xn_predicted )
-            if self._toStore("BMA"):
-                self.StoredVariables["BMA"].store( Xn_predicted - Xa )
-            if self._toStore("InnovationAtCurrentState"):
-                self.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
-            if self._toStore("SimulatedObservationAtCurrentState") \
-                or self._toStore("SimulatedObservationAtCurrentOptimum"):
-                self.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
-            # ---> autres
-            if self._parameters["StoreInternalVariables"] \
-                or self._toStore("CostFunctionJ") \
-                or self._toStore("CostFunctionJb") \
-                or self._toStore("CostFunctionJo") \
-                or self._toStore("CurrentOptimum") \
-                or self._toStore("APosterioriCovariance"):
-                Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-                Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
-                J   = Jb + Jo
-                self.StoredVariables["CostFunctionJb"].store( Jb )
-                self.StoredVariables["CostFunctionJo"].store( Jo )
-                self.StoredVariables["CostFunctionJ" ].store( J )
-                #
-                if self._toStore("IndexOfOptimum") \
-                    or self._toStore("CurrentOptimum") \
-                    or self._toStore("CostFunctionJAtCurrentOptimum") \
-                    or self._toStore("CostFunctionJbAtCurrentOptimum") \
-                    or self._toStore("CostFunctionJoAtCurrentOptimum") \
-                    or self._toStore("SimulatedObservationAtCurrentOptimum"):
-                    IndexMin = numpy.argmin( self.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-                if self._toStore("IndexOfOptimum"):
-                    self.StoredVariables["IndexOfOptimum"].store( IndexMin )
-                if self._toStore("CurrentOptimum"):
-                    self.StoredVariables["CurrentOptimum"].store( self.StoredVariables["Analysis"][IndexMin] )
-                if self._toStore("SimulatedObservationAtCurrentOptimum"):
-                    self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( self.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-                if self._toStore("CostFunctionJbAtCurrentOptimum"):
-                    self.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJb"][IndexMin] )
-                if self._toStore("CostFunctionJoAtCurrentOptimum"):
-                    self.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJo"][IndexMin] )
-                if self._toStore("CostFunctionJAtCurrentOptimum"):
-                    self.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( self.StoredVariables["CostFunctionJ" ][IndexMin] )
-            if self._toStore("APosterioriCovariance"):
-                Pn = 0.
-                for i in range(__m):
-                    Eai = Xn[:,i] - Xa
-                    Pn += Eai * Eai.T
-                Pn  = (1./(__m-1)) * Pn
-                self.StoredVariables["APosterioriCovariance"].store( Pn )
-            if self._parameters["EstimationOf"] == "Parameters" \
-                and J < previousJMinimum:
-                previousJMinimum    = J
-                XaMin               = Xa
-                if self._toStore("APosterioriCovariance"):
-                    covarianceXaMin = Pn
-        #
-        # Stockage final supplémentaire de l'optimum en estimation de paramètres
-        # ----------------------------------------------------------------------
-        if self._parameters["EstimationOf"] == "Parameters":
-            self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["Analysis"]) )
-            self.StoredVariables["Analysis"].store( XaMin )
-            if self._toStore("APosterioriCovariance"):
-                self.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-            if self._toStore("BMA"):
-                self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+            raise ValueError("Error in Minimizer name: %s"%self._parameters["Minimizer"])
         #
         self._post_run(HO)
         return 0
index 97b0e36982ab00eff68ad093d11724b94965b446..8acdd77e8332d00420f7e64016d26140427b2e21 100644 (file)
@@ -507,6 +507,417 @@ def mmqr(
     #
     return variables, Ecart, [n,p,iteration,increment,0]
 
+# ==============================================================================
+def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Stochastic EnKF (Envensen 1994, Burgers 1998)
+
+    selfA est identique au "self" d'algorithme appelant et contient les
+    valeurs.
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    # ----------
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Nombre de pas identique au nombre de pas d'observations
+    # -------------------------------------------------------
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    # ----------------------------------
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    # Initialisation
+    # --------------
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    Xn = numpy.asmatrix(numpy.dot( Xb.reshape(__n,1), numpy.ones((1,__m)) ))
+    if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+    else:                         Pn = B
+    if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
+    else:                         Rn = R
+    if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
+    else:                         Qn = Q
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+            covarianceXa = Pn
+    #
+    previousJMinimum = numpy.finfo(float).max
+    #
+    # Predimensionnement
+    Xn_predicted = numpy.asmatrix(numpy.zeros((__n,__m)))
+    HX_predicted = numpy.asmatrix(numpy.zeros((__p,__m)))
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
+        else:
+            Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+            else:
+                Un = numpy.asmatrix(numpy.ravel( U )).T
+        else:
+            Un = None
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            for i in range(__m):
+                qi = numpy.asmatrix(numpy.random.multivariate_normal(numpy.zeros(__n), Qn, (1,1,1))).T
+                Xn_predicted[:,i] = numpy.asmatrix(numpy.ravel( M((Xn[:,i], Un)) )).T + qi
+                HX_predicted[:,i] = numpy.asmatrix(numpy.ravel( H((Xn_predicted[:,i], Un)) )).T
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm * Un
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = Xn
+            for i in range(__m):
+                HX_predicted[:,i] = numpy.asmatrix(numpy.ravel( H((Xn_predicted[:,i], Un)) )).T
+        #
+        # Mean of forecast and observation of forecast
+        Xfm  = numpy.asmatrix(numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))).T
+        Hfm  = numpy.asmatrix(numpy.ravel(HX_predicted.mean(axis=1, dtype=mfp).astype('float'))).T
+        #
+        PfHT, HPfHT = 0., 0.
+        for i in range(__m):
+            Exfi = Xn_predicted[:,i] - Xfm
+            Eyfi = HX_predicted[:,i] - Hfm
+            PfHT  += Exfi * Eyfi.T
+            HPfHT += Eyfi * Eyfi.T
+        PfHT  = (1./(__m-1)) * PfHT
+        HPfHT = (1./(__m-1)) * HPfHT
+        K     = PfHT * ( R + HPfHT ).I
+        del PfHT, HPfHT
+        #
+        for i in range(__m):
+            ri = numpy.asmatrix(numpy.random.multivariate_normal(numpy.zeros(__p), Rn, (1,1,1))).T
+            Xn[:,i] = Xn_predicted[:,i] + K * (Ynpu + ri - HX_predicted[:,i])
+        #
+        Xa = Xn.mean(axis=1, dtype=mfp).astype('float')
+        #
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("APosterioriCovariance") \
+            or selfA._toStore("InnovationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+            _Innovation = Ynpu - _HXa
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            Eai = (1/numpy.sqrt(__m-1)) * (Xn - Xa.reshape((__n,-1))) # Anomalies
+            Pn = Eai @ Eai.T
+            Pn = 0.5 * (Pn + Pn.T)
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = Pn
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
+
+    selfA est identique au "self" d'algorithme appelant et contient les
+    valeurs.
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    # ----------
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Nombre de pas identique au nombre de pas d'observations
+    # -------------------------------------------------------
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    # ----------------------------------
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    RIdemi = R.choleskyI()
+    #
+    # Initialisation
+    # --------------
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    Xn = numpy.asmatrix(numpy.dot( Xb.reshape(__n,1), numpy.ones((1,__m)) ))
+    if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+    else:                         Pn = B
+    if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
+    else:                         Rn = R
+    if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
+    else:                         Qn = Q
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+            covarianceXa = Pn
+    #
+    previousJMinimum = numpy.finfo(float).max
+    #
+    # Predimensionnement
+    Xn_predicted = numpy.asmatrix(numpy.zeros((__n,__m)))
+    HX_predicted = numpy.asmatrix(numpy.zeros((__p,__m)))
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
+        else:
+            Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+            else:
+                Un = numpy.asmatrix(numpy.ravel( U )).T
+        else:
+            Un = None
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            for i in range(__m):
+                qi = numpy.asmatrix(numpy.random.multivariate_normal(numpy.zeros(__n), Qn, (1,1,1))).T
+                Xn_predicted[:,i] = numpy.asmatrix(numpy.ravel( M((Xn[:,i], Un)) )).T + qi
+                HX_predicted[:,i] = numpy.asmatrix(numpy.ravel( H((Xn_predicted[:,i], Un)) )).T
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm * Un
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = Xn
+            for i in range(__m):
+                HX_predicted[:,i] = numpy.asmatrix(numpy.ravel( H((Xn_predicted[:,i], Un)) )).T
+        #
+        # Mean of forecast and observation of forecast
+        Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float')
+        Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float')
+        #
+        EaX   = (Xn_predicted - Xfm.reshape((__n,-1))) / numpy.sqrt(__m-1)
+        EaHX  = (HX_predicted - Hfm.reshape((__p,-1))) / numpy.sqrt(__m-1)
+        #
+        mS    = RIdemi * EaHX
+        delta = RIdemi * ( Ynpu.reshape((__p,-1)) - Hfm.reshape((__p,-1)) )
+        mT    = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
+        vw    = mT @ mS.transpose() @ delta
+        #
+        Tdemi = numpy.linalg.cholesky(mT)
+        mU    = numpy.eye(__m)
+        #
+        Xn = Xfm.reshape((__n,-1)) + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
+        #
+        Xa = Xn.mean(axis=1, dtype=mfp).astype('float')
+        #
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("APosterioriCovariance") \
+            or selfA._toStore("InnovationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+            _Innovation = Ynpu - _HXa
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            Eai = (1/numpy.sqrt(__m-1)) * (Xn - Xa.reshape((__n,-1))) # Anomalies
+            Pn = Eai @ Eai.T
+            Pn = 0.5 * (Pn + Pn.T)
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = Pn
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
 # ==============================================================================
 if __name__ == "__main__":
     print('\n AUTODIAGNOSTIC\n')