From c41f015518306719f3130567a3e2c949f17556af Mon Sep 17 00:00:00 2001 From: Jean-Philippe ARGAUD Date: Sun, 12 Sep 2021 06:55:26 +0200 Subject: [PATCH] Documentation and C*KF use improvements --- doc/en/ref_algorithm_ExtendedKalmanFilter.rst | 9 + .../ref_algorithm_UnscentedKalmanFilter.rst | 11 + doc/en/snippets/Variant_UKF.rst | 14 ++ doc/fr/ref_algorithm_ExtendedKalmanFilter.rst | 9 + .../ref_algorithm_UnscentedKalmanFilter.rst | 11 + doc/fr/snippets/Variant_UKF.rst | 14 ++ .../daAlgorithms/UnscentedKalmanFilter.py | 17 +- src/daComposant/daCore/NumericObjects.py | 232 ++++++++++++++++++ 8 files changed, 313 insertions(+), 4 deletions(-) create mode 100644 doc/en/snippets/Variant_UKF.rst create mode 100644 doc/fr/snippets/Variant_UKF.rst diff --git a/doc/en/ref_algorithm_ExtendedKalmanFilter.rst b/doc/en/ref_algorithm_ExtendedKalmanFilter.rst index 900b3fa..39fdb62 100644 --- a/doc/en/ref_algorithm_ExtendedKalmanFilter.rst +++ b/doc/en/ref_algorithm_ExtendedKalmanFilter.rst @@ -57,6 +57,15 @@ In case of really non-linear operators, one can easily use the adapted to non-linear behavior but more costly. One can verify the linearity of the operators with the help of the :ref:`section_ref_algorithm_LinearityTest`. +.. index:: + pair: Variant ; EKF + pair: Variant ; CEKF + +A difference is made between the extended Kalman filter taking into account +bounds on the states (the variant named "CEKF", which is recommended and used +by default), and the extended Kalman filter conducted without any constraint +(the variant named "EKF", which is not recommended). + .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst diff --git a/doc/en/ref_algorithm_UnscentedKalmanFilter.rst b/doc/en/ref_algorithm_UnscentedKalmanFilter.rst index 828843c..885f7f2 100644 --- a/doc/en/ref_algorithm_UnscentedKalmanFilter.rst +++ b/doc/en/ref_algorithm_UnscentedKalmanFilter.rst @@ -52,6 +52,15 @@ In case of linear of "slightly" non-linear operators, one can easily use the to evaluate on small systems. One can verify the linearity of the operators with the help of the :ref:`section_ref_algorithm_LinearityTest`. +.. index:: + pair: Variant ; UKF + pair: Variant ; CUKF + +A difference is made between the "unscented" Kalman filter taking into account +bounds on the states (the variant named "CUKF", which is recommended and used +by default), and the "unscented" Kalman filter conducted without any constraint +(the variant named "UKF", which is not recommended). + .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst @@ -117,6 +126,8 @@ StoreSupplementaryCalculations Example : ``{"StoreSupplementaryCalculations":["BMA", "CurrentState"]}`` +.. include:: snippets/Variant_UKF.rst + .. ------------------------------------ .. .. include:: snippets/Header2Algo04.rst diff --git a/doc/en/snippets/Variant_UKF.rst b/doc/en/snippets/Variant_UKF.rst new file mode 100644 index 0000000..8c22751 --- /dev/null +++ b/doc/en/snippets/Variant_UKF.rst @@ -0,0 +1,14 @@ +.. index:: + single: Variant + pair: Variant ; UKF + pair: Variant ; CUKF + +Variant + *Predifined name*. This key allows to choose one of the possible variants for + the main algorithm. The default variant is the constrained version "CUKF" of + the original algorithm "UKF", and the possible choices are + "UKF" (Unscented Kalman Filter), + "CUKF" (Constrained Unscented Kalman Filter). + + Example : + ``{"Variant":"CUKF"}`` diff --git a/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst b/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst index e680d49..9fc7dbe 100644 --- a/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst +++ b/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst @@ -58,6 +58,15 @@ largement plus adaptés aux comportements non-linéaires mais plus coûteux. On peut vérifier la linéarité des opérateurs à l'aide de l':ref:`section_ref_algorithm_LinearityTest`. +.. index:: + pair: Variant ; EKF + pair: Variant ; CEKF + +On fait une différence entre le filtre de Kalman étendu tenant compte de +bornes sur les états (la variante nommée "CEKF", qui est recommandée et qui est +utilisée par défaut), et le filtre de Kalman étendu conduit sans +aucune contrainte (la variante nommée "EKF", qui n'est pas recommandée). + .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst diff --git a/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst b/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst index 25338d7..dbcb686 100644 --- a/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst +++ b/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst @@ -53,6 +53,15 @@ l':ref:`section_ref_algorithm_KalmanFilter`, qui sont souvent largement moins coûteux en évaluation sur de petits systèmes. On peut vérifier la linéarité des opérateurs à l'aide de l':ref:`section_ref_algorithm_LinearityTest`. +.. index:: + pair: Variant ; UKF + pair: Variant ; CUKF + +On fait une différence entre le filtre de Kalman "unscented" tenant compte de +bornes sur les états (la variante nommée "CUKF", qui est recommandée et qui est +utilisée par défaut), et le filtre de Kalman "unscented" conduit sans +aucune contrainte (la variante nommée "UKF", qui n'est pas recommandée). + .. ------------------------------------ .. .. include:: snippets/Header2Algo02.rst @@ -119,6 +128,8 @@ StoreSupplementaryCalculations Exemple : ``{"StoreSupplementaryCalculations":["BMA", "CurrentState"]}`` +.. include:: snippets/Variant_UKF.rst + .. ------------------------------------ .. .. include:: snippets/Header2Algo04.rst diff --git a/doc/fr/snippets/Variant_UKF.rst b/doc/fr/snippets/Variant_UKF.rst new file mode 100644 index 0000000..6f9d4a8 --- /dev/null +++ b/doc/fr/snippets/Variant_UKF.rst @@ -0,0 +1,14 @@ +.. index:: + single: Variant + pair: Variant ; UKF + pair: Variant ; CUKF + +Variant + *Nom prédéfini*. Cette clé permet de choisir l'une des variantes possibles + pour l'algorithme principal. La variante par défaut est la version contrainte + "CUKF" de l'algorithme original "UKF", et les choix possibles sont + "UKF" (Unscented Kalman Filter), + "CUKF" (Constrained Unscented Kalman Filter). + + Exemple : + ``{"Variant":"CUKF"}`` diff --git a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py index a8904ba..addb56c 100644 --- a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py +++ b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py @@ -29,13 +29,11 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): BasicObjects.Algorithm.__init__(self, "UNSCENTEDKALMANFILTER") self.defineRequiredParameter( name = "Variant", - default = "UKF", + default = "CUKF", typecast = str, message = "Variant ou formulation de la méthode", listval = [ "UKF", - ], - listadv = [ "CUKF", ], ) @@ -139,8 +137,19 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q) # #-------------------------- - NumericObjects.uckf(self, Xb, Y, U, HO, EM, CM, R, B, Q) + # Default UKF + #-------------------------- + if self._parameters["Variant"] == "UKF": + NumericObjects.uskf(self, Xb, Y, U, HO, EM, CM, R, B, Q) + # + #-------------------------- + # Default CUKF + elif self._parameters["Variant"] == "CUKF": + NumericObjects.uckf(self, Xb, Y, U, HO, EM, CM, R, B, Q) + # #-------------------------- + else: + raise ValueError("Error in Variant name: %s"%self._parameters["Variant"]) # self._post_run(HO) return 0 diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index 2aa13c9..39f96e7 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -3931,6 +3931,238 @@ def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): # return 0 +# ============================================================================== +def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): + """ + Unscented Kalman Filter + """ + if selfA._parameters["EstimationOf"] == "Parameters": + selfA._parameters["StoreInternalVariables"] = True + # + L = Xb.size + Alpha = selfA._parameters["Alpha"] + Beta = selfA._parameters["Beta"] + if selfA._parameters["Kappa"] == 0: + if selfA._parameters["EstimationOf"] == "State": + Kappa = 0 + elif selfA._parameters["EstimationOf"] == "Parameters": + Kappa = 3 - L + else: + Kappa = selfA._parameters["Kappa"] + Lambda = float( Alpha**2 ) * ( L + Kappa ) - L + Gamma = math.sqrt( L + Lambda ) + # + Ww = [] + Ww.append( 0. ) + for i in range(2*L): + Ww.append( 1. / (2.*(L + Lambda)) ) + # + Wm = numpy.array( Ww ) + Wm[0] = Lambda / (L + Lambda) + Wc = numpy.array( Ww ) + Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta) + # + # Opérateurs + Hm = HO["Direct"].appliedControledFormTo + # + if selfA._parameters["EstimationOf"] == "State": + Mm = EM["Direct"].appliedControledFormTo + # + if CM is not None and "Tangent" in CM and U is not None: + Cm = CM["Tangent"].asMatrix(Xb) + else: + Cm = None + # + # Durée d'observation et tailles + if hasattr(Y,"stepnumber"): + duration = Y.stepnumber() + __p = numpy.cumprod(Y.shape())[-1] + else: + duration = 2 + __p = numpy.array(Y).size + # + # Précalcul des inversions de B et R + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + BI = B.getI() + RI = R.getI() + # + __n = Xb.size + # + if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: + Xn = Xb + if hasattr(B,"asfullmatrix"): + Pn = B.asfullmatrix(__n) + else: + Pn = B + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( Xb ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + elif selfA._parameters["nextStep"]: + Xn = selfA._getInternalState("Xn") + Pn = selfA._getInternalState("Pn") + # + if selfA._parameters["EstimationOf"] == "Parameters": + XaMin = Xn + previousJMinimum = numpy.finfo(float).max + # + for step in range(duration-1): + if hasattr(Y,"store"): + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) + else: + Ynpu = numpy.ravel( Y ).reshape((__p,1)) + # + if U is not None: + if hasattr(U,"store") and len(U)>1: + Un = numpy.ravel( U[step] ).reshape((-1,1)) + elif hasattr(U,"store") and len(U)==1: + Un = numpy.ravel( U[0] ).reshape((-1,1)) + else: + Un = numpy.ravel( U ).reshape((-1,1)) + else: + Un = None + # + Pndemi = numpy.linalg.cholesky(Pn) + Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi]) + nbSpts = 2*Xn.size+1 + # + XEtnnp = [] + for point in range(nbSpts): + if selfA._parameters["EstimationOf"] == "State": + XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! + Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape + XEtnnpi = XEtnnpi + Cm * Un + elif selfA._parameters["EstimationOf"] == "Parameters": + # --- > Par principe, M = Id, Q = 0 + XEtnnpi = Xnp[:,point] + XEtnnp.append( XEtnnpi ) + XEtnnp = numpy.hstack( XEtnnp ) + # + Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1) + # + if selfA._parameters["EstimationOf"] == "State": Pnm = Q + elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0. + for point in range(nbSpts): + Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T + # + Pnmdemi = numpy.linalg.cholesky(Pnm) + # + Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi]) + # + Ynnp = [] + for point in range(nbSpts): + if selfA._parameters["EstimationOf"] == "State": + Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T + elif selfA._parameters["EstimationOf"] == "Parameters": + Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T + Ynnp.append( Ynnpi ) + Ynnp = numpy.hstack( Ynnp ) + # + Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1) + # + Pyyn = R + Pxyn = 0. + for point in range(nbSpts): + Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T + Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T + # + _Innovation = Ynpu - Yncm + if selfA._parameters["EstimationOf"] == "Parameters": + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! + _Innovation = _Innovation - Cm * Un + # + Kn = Pxyn * Pyyn.I + Xn = Xncm + Kn * _Innovation + Pn = Pnm - Kn * Pyyn * Kn.T + # + Xa = Xn # Pointeurs + #-------------------------- + selfA._setInternalState("Xn", Xn) + selfA._setInternalState("Pn", Pn) + #-------------------------- + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + # ---> avec analysis + selfA.StoredVariables["Analysis"].store( Xa ) + if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): + selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) ) + if selfA._toStore("InnovationAtCurrentAnalysis"): + selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) + # ---> avec current state + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CurrentState"): + selfA.StoredVariables["CurrentState"].store( Xn ) + if selfA._toStore("ForecastState"): + selfA.StoredVariables["ForecastState"].store( Xncm ) + if selfA._toStore("ForecastCovariance"): + selfA.StoredVariables["ForecastCovariance"].store( Pnm ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( Xncm - Xa ) + if selfA._toStore("InnovationAtCurrentState"): + selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) + if selfA._toStore("SimulatedObservationAtCurrentState") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm ) + # ---> autres + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) + Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + J = Jb + Jo + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + # + if selfA._toStore("IndexOfOptimum") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("CostFunctionJAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) + if selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + if selfA._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if selfA._toStore("APosterioriCovariance"): + covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] + # + # Stockage final supplémentaire de l'optimum en estimation de paramètres + # ---------------------------------------------------------------------- + if selfA._parameters["EstimationOf"] == "Parameters": + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( XaMin ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + # + return 0 + # ============================================================================== def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): """ -- 2.39.2