]> SALOME platform Git repositories - modules/adao.git/commitdiff
Salome HOME
Documentation and C*KF use improvements
authorJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Sun, 12 Sep 2021 04:55:26 +0000 (06:55 +0200)
committerJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Sun, 12 Sep 2021 04:55:26 +0000 (06:55 +0200)
doc/en/ref_algorithm_ExtendedKalmanFilter.rst
doc/en/ref_algorithm_UnscentedKalmanFilter.rst
doc/en/snippets/Variant_UKF.rst [new file with mode: 0644]
doc/fr/ref_algorithm_ExtendedKalmanFilter.rst
doc/fr/ref_algorithm_UnscentedKalmanFilter.rst
doc/fr/snippets/Variant_UKF.rst [new file with mode: 0644]
src/daComposant/daAlgorithms/UnscentedKalmanFilter.py
src/daComposant/daCore/NumericObjects.py

index 900b3fa1008878e551ffe5cbbc6cf34eb886cda2..39fdb628a4b78aa8ae1566b47edf94cbca440a1e 100644 (file)
@@ -57,6 +57,15 @@ In case of really non-linear operators, one can easily use the
 adapted to non-linear behavior but more costly. One can verify the linearity of
 the operators with the help of the :ref:`section_ref_algorithm_LinearityTest`.
 
+.. index::
+    pair: Variant ; EKF
+    pair: Variant ; CEKF
+
+A difference is made between the extended Kalman filter taking into account
+bounds on the states (the variant named "CEKF", which is recommended and used
+by default), and the extended Kalman filter conducted without any constraint
+(the variant named "EKF", which is not recommended).
+
 .. ------------------------------------ ..
 .. include:: snippets/Header2Algo02.rst
 
index 828843cdc3d4d9b736bff912dffa483961f7439f..885f7f26f14cdcaeefc4c89a6712329eb513fbf3 100644 (file)
@@ -52,6 +52,15 @@ In case of linear of "slightly" non-linear operators, one can easily use the
 to evaluate on small systems. One can verify the linearity of the operators
 with the help of the :ref:`section_ref_algorithm_LinearityTest`.
 
+.. index::
+    pair: Variant ; UKF
+    pair: Variant ; CUKF
+
+A difference is made between the "unscented" Kalman filter taking into account
+bounds on the states (the variant named "CUKF", which is recommended and used
+by default), and the "unscented" Kalman filter conducted without any constraint
+(the variant named "UKF", which is not recommended).
+
 .. ------------------------------------ ..
 .. include:: snippets/Header2Algo02.rst
 
@@ -117,6 +126,8 @@ StoreSupplementaryCalculations
   Example :
   ``{"StoreSupplementaryCalculations":["BMA", "CurrentState"]}``
 
+.. include:: snippets/Variant_UKF.rst
+
 .. ------------------------------------ ..
 .. include:: snippets/Header2Algo04.rst
 
diff --git a/doc/en/snippets/Variant_UKF.rst b/doc/en/snippets/Variant_UKF.rst
new file mode 100644 (file)
index 0000000..8c22751
--- /dev/null
@@ -0,0 +1,14 @@
+.. index::
+    single: Variant
+    pair: Variant ; UKF
+    pair: Variant ; CUKF
+
+Variant
+  *Predifined name*. This key allows to choose one of the possible variants for
+  the main algorithm. The default variant is the constrained version "CUKF" of
+  the original algorithm "UKF", and the possible choices are
+  "UKF" (Unscented Kalman Filter),
+  "CUKF" (Constrained Unscented Kalman Filter).
+
+  Example :
+  ``{"Variant":"CUKF"}``
index e680d49daed61aedd1feadfc47ccf37334a9a15a..9fc7dbe2a448fd07b7c0a74fe8c963f2c543225a 100644 (file)
@@ -58,6 +58,15 @@ largement plus adaptés aux comportements non-linéaires mais plus coûteux. On
 peut vérifier la linéarité des opérateurs à l'aide de
 l':ref:`section_ref_algorithm_LinearityTest`.
 
+.. index::
+    pair: Variant ; EKF
+    pair: Variant ; CEKF
+
+On fait une différence entre le filtre de Kalman étendu tenant compte de
+bornes sur les états (la variante nommée "CEKF", qui est recommandée et qui est
+utilisée par défaut), et le filtre de Kalman étendu conduit sans
+aucune contrainte (la variante nommée "EKF", qui n'est pas recommandée).
+
 .. ------------------------------------ ..
 .. include:: snippets/Header2Algo02.rst
 
index 25338d7e137abc6fafe766ad2f7d54da84a4c949..dbcb6866f1045395ab6d350e3f866fd9c24aca14 100644 (file)
@@ -53,6 +53,15 @@ l':ref:`section_ref_algorithm_KalmanFilter`, qui sont souvent largement moins
 coûteux en évaluation sur de petits systèmes. On peut vérifier la linéarité des
 opérateurs à l'aide de l':ref:`section_ref_algorithm_LinearityTest`.
 
+.. index::
+    pair: Variant ; UKF
+    pair: Variant ; CUKF
+
+On fait une différence entre le filtre de Kalman "unscented" tenant compte de
+bornes sur les états (la variante nommée "CUKF", qui est recommandée et qui est
+utilisée par défaut), et le filtre de Kalman "unscented" conduit sans
+aucune contrainte (la variante nommée "UKF", qui n'est pas recommandée).
+
 .. ------------------------------------ ..
 .. include:: snippets/Header2Algo02.rst
 
@@ -119,6 +128,8 @@ StoreSupplementaryCalculations
   Exemple :
   ``{"StoreSupplementaryCalculations":["BMA", "CurrentState"]}``
 
+.. include:: snippets/Variant_UKF.rst
+
 .. ------------------------------------ ..
 .. include:: snippets/Header2Algo04.rst
 
diff --git a/doc/fr/snippets/Variant_UKF.rst b/doc/fr/snippets/Variant_UKF.rst
new file mode 100644 (file)
index 0000000..6f9d4a8
--- /dev/null
@@ -0,0 +1,14 @@
+.. index::
+    single: Variant
+    pair: Variant ; UKF
+    pair: Variant ; CUKF
+
+Variant
+  *Nom prédéfini*. Cette clé permet de choisir l'une des variantes possibles
+  pour l'algorithme principal. La variante par défaut est la version contrainte
+  "CUKF" de l'algorithme original "UKF", et les choix possibles sont
+  "UKF" (Unscented Kalman Filter),
+  "CUKF" (Constrained Unscented Kalman Filter).
+
+  Exemple :
+  ``{"Variant":"CUKF"}``
index a8904ba0261fde19c9a311c2d4fcd7ba76ed4721..addb56c935c23def2cb31e853fc0c14ebbc03e72 100644 (file)
@@ -29,13 +29,11 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         BasicObjects.Algorithm.__init__(self, "UNSCENTEDKALMANFILTER")
         self.defineRequiredParameter(
             name     = "Variant",
-            default  = "UKF",
+            default  = "CUKF",
             typecast = str,
             message  = "Variant ou formulation de la méthode",
             listval  = [
                 "UKF",
-                ],
-            listadv  = [
                 "CUKF",
                 ],
             )
@@ -139,8 +137,19 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
         #
         #--------------------------
-        NumericObjects.uckf(self, Xb, Y, U, HO, EM, CM, R, B, Q)
+        # Default UKF
+        #--------------------------
+        if   self._parameters["Variant"] == "UKF":
+            NumericObjects.uskf(self, Xb, Y, U, HO, EM, CM, R, B, Q)
+        #
+        #--------------------------
+        # Default CUKF
+        elif self._parameters["Variant"] == "CUKF":
+            NumericObjects.uckf(self, Xb, Y, U, HO, EM, CM, R, B, Q)
+        #
         #--------------------------
+        else:
+            raise ValueError("Error in Variant name: %s"%self._parameters["Variant"])
         #
         self._post_run(HO)
         return 0
index 2aa13c91d3bc3ed531a2dbe3ccba3032e34f84d2..39f96e72ce92664950a0ccc2bbecf6046acabc22 100644 (file)
@@ -3931,6 +3931,238 @@ def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     #
     return 0
 
+# ==============================================================================
+def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Unscented Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    L     = Xb.size
+    Alpha = selfA._parameters["Alpha"]
+    Beta  = selfA._parameters["Beta"]
+    if selfA._parameters["Kappa"] == 0:
+        if selfA._parameters["EstimationOf"] == "State":
+            Kappa = 0
+        elif selfA._parameters["EstimationOf"] == "Parameters":
+            Kappa = 3 - L
+    else:
+        Kappa = selfA._parameters["Kappa"]
+    Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
+    Gamma  = math.sqrt( L + Lambda )
+    #
+    Ww = []
+    Ww.append( 0. )
+    for i in range(2*L):
+        Ww.append( 1. / (2.*(L + Lambda)) )
+    #
+    Wm = numpy.array( Ww )
+    Wm[0] = Lambda / (L + Lambda)
+    Wc = numpy.array( Ww )
+    Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
+    #
+    # Opérateurs
+    Hm = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        Mm = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = Xb
+        if hasattr(B,"asfullmatrix"):
+            Pn = B.asfullmatrix(__n)
+        else:
+            Pn = B
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+        Pn = selfA._getInternalState("Pn")
+    #
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        XaMin            = Xn
+        previousJMinimum = numpy.finfo(float).max
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        Pndemi = numpy.linalg.cholesky(Pn)
+        Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
+        nbSpts = 2*Xn.size+1
+        #
+        XEtnnp = []
+        for point in range(nbSpts):
+            if selfA._parameters["EstimationOf"] == "State":
+                XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
+                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                    Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
+                    XEtnnpi = XEtnnpi + Cm * Un
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                # --- > Par principe, M = Id, Q = 0
+                XEtnnpi = Xnp[:,point]
+            XEtnnp.append( XEtnnpi )
+        XEtnnp = numpy.hstack( XEtnnp )
+        #
+        Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
+        #
+        if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
+        elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
+        for point in range(nbSpts):
+            Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
+        #
+        Pnmdemi = numpy.linalg.cholesky(Pnm)
+        #
+        Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
+        #
+        Ynnp = []
+        for point in range(nbSpts):
+            if selfA._parameters["EstimationOf"] == "State":
+                Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
+            Ynnp.append( Ynnpi )
+        Ynnp = numpy.hstack( Ynnp )
+        #
+        Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
+        #
+        Pyyn = R
+        Pxyn = 0.
+        for point in range(nbSpts):
+            Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
+            Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
+        #
+        _Innovation  = Ynpu - Yncm
+        if selfA._parameters["EstimationOf"] == "Parameters":
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+                _Innovation = _Innovation - Cm * Un
+        #
+        Kn = Pxyn * Pyyn.I
+        Xn = Xncm + Kn * _Innovation
+        Pn = Pnm - Kn * Pyyn * Kn.T
+        #
+        Xa = Xn # Pointeurs
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("Pn", Pn)
+        #--------------------------
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xncm )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( Pnm )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xncm - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
 # ==============================================================================
 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     """