From: Jean-Philippe ARGAUD Date: Wed, 7 Jul 2021 08:38:53 +0000 (+0200) Subject: Fixing iterating observation use (3) X-Git-Tag: V9_8_0a1~22 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=abce6a45a57dbd62b35e4eb8904a144bf916375f;p=modules%2Fadao.git Fixing iterating observation use (3) --- diff --git a/doc/en/ref_algorithm_KalmanFilter.rst b/doc/en/ref_algorithm_KalmanFilter.rst index 81d2eab..f9c5f90 100644 --- a/doc/en/ref_algorithm_KalmanFilter.rst +++ b/doc/en/ref_algorithm_KalmanFilter.rst @@ -108,6 +108,7 @@ StoreSupplementaryCalculations "CurrentIterationNumber", "CurrentOptimum", "CurrentState", + "ForecastCovariance", "ForecastState", "IndexOfOptimum", "InnovationAtCurrentAnalysis", @@ -158,6 +159,8 @@ StoreSupplementaryCalculations .. include:: snippets/CurrentState.rst +.. include:: snippets/ForecastCovariance.rst + .. include:: snippets/ForecastState.rst .. include:: snippets/IndexOfOptimum.rst diff --git a/doc/en/scripts/simple_KalmanFilter2.py b/doc/en/scripts/simple_KalmanFilter2.py index cb91761..f677dad 100644 --- a/doc/en/scripts/simple_KalmanFilter2.py +++ b/doc/en/scripts/simple_KalmanFilter2.py @@ -14,6 +14,9 @@ print("") from adao import adaoBuilder case = adaoBuilder.New('') # +case.setBackground (Vector = [0.]) +case.setBackgroundError (ScalarSparseMatrix = 1.) +# case.setObservationOperator(Matrix = [1.]) case.setObservationError (ScalarSparseMatrix = 0.1**2) # @@ -29,28 +32,19 @@ case.setAlgorithmParameters( ], }, ) -case.setObserver( - Info=" Analyzed state at current observation:", - Template='ValuePrinter', - Variable='Analysis', - ) # # Loop to obtain an analysis at each observation arrival # -XaStep, VaStep = 0., 1. for i in range(1,len(Yobs)): - case.setBackground (Vector = "%s"%float(XaStep)) - case.setBackgroundError (ScalarSparseMatrix = "%s"%float(VaStep)) - case.setObservation (Vector = Yobs[i]) + case.setObservation(Vector = Yobs[i]) case.execute( nextStep = True ) - XaStep = case.get("Analysis")[-1] - VaStep = case.get("APosterioriCovariance")[-1] # Xa = case.get("Analysis") Pa = case.get("APosterioriCovariance") # +print(" Analyzed state at final observation:", Xa[-1]) print("") -print(" Final a posteriori variance:",Pa[-1]) +print(" Final a posteriori variance:", Pa[-1]) print("") # #------------------------------------------------------------------------------- diff --git a/doc/en/scripts/simple_KalmanFilter2.res b/doc/en/scripts/simple_KalmanFilter2.res index b9aaf66..a5c459d 100644 --- a/doc/en/scripts/simple_KalmanFilter2.res +++ b/doc/en/scripts/simple_KalmanFilter2.res @@ -2,57 +2,7 @@ Estimation of a constant variable by filtering ---------------------------------------------- Noisy measurements acquired on 50 time steps - Analyzed state at current observation: [0.] - Analyzed state at current observation: [-0.41804504] - Analyzed state at current observation: [-0.3114053] - Analyzed state at current observation: [-0.31191336] - Analyzed state at current observation: [-0.32761493] - Analyzed state at current observation: [-0.33597167] - Analyzed state at current observation: [-0.35629573] - Analyzed state at current observation: [-0.36840289] - Analyzed state at current observation: [-0.37392713] - Analyzed state at current observation: [-0.36331937] - Analyzed state at current observation: [-0.35750362] - Analyzed state at current observation: [-0.37963052] - Analyzed state at current observation: [-0.37117993] - Analyzed state at current observation: [-0.36732985] - Analyzed state at current observation: [-0.37148382] - Analyzed state at current observation: [-0.36798059] - Analyzed state at current observation: [-0.37371077] - Analyzed state at current observation: [-0.3661228] - Analyzed state at current observation: [-0.36777529] - Analyzed state at current observation: [-0.37681677] - Analyzed state at current observation: [-0.37007654] - Analyzed state at current observation: [-0.37974517] - Analyzed state at current observation: [-0.37964703] - Analyzed state at current observation: [-0.37514278] - Analyzed state at current observation: [-0.38143128] - Analyzed state at current observation: [-0.38790654] - Analyzed state at current observation: [-0.38880008] - Analyzed state at current observation: [-0.38393577] - Analyzed state at current observation: [-0.3831028] - Analyzed state at current observation: [-0.37680097] - Analyzed state at current observation: [-0.37891813] - Analyzed state at current observation: [-0.38147782] - Analyzed state at current observation: [-0.37981569] - Analyzed state at current observation: [-0.38274266] - Analyzed state at current observation: [-0.38418507] - Analyzed state at current observation: [-0.38923054] - Analyzed state at current observation: [-0.38400006] - Analyzed state at current observation: [-0.38562502] - Analyzed state at current observation: [-0.3840503] - Analyzed state at current observation: [-0.38775222] - Analyzed state at current observation: [-0.37700787] - Analyzed state at current observation: [-0.37328191] - Analyzed state at current observation: [-0.38024181] - Analyzed state at current observation: [-0.3815806] - Analyzed state at current observation: [-0.38392063] - Analyzed state at current observation: [-0.38539266] - Analyzed state at current observation: [-0.37856929] - Analyzed state at current observation: [-0.37744505] - Analyzed state at current observation: [-0.37154554] - Analyzed state at current observation: [-0.37405773] - Analyzed state at current observation: [-0.37725236] + Analyzed state at final observation: [-0.37725236] Final a posteriori variance: [[0.00033921]] diff --git a/doc/en/scripts/simple_KalmanFilter2.rst b/doc/en/scripts/simple_KalmanFilter2.rst index 117508f..f516425 100644 --- a/doc/en/scripts/simple_KalmanFilter2.rst +++ b/doc/en/scripts/simple_KalmanFilter2.rst @@ -8,7 +8,8 @@ execution of a Kalman step at the arrival of each observation provided iteratively. The keyword "*nextStep*", included in the execution order, allows to not store the background in duplicate of the previous analysis. -In a completely similar way to the reanalysis, the estimate is made in -displaying intermediate results during iterative filtering. Thanks to these -intermediate information, one can also obtain the graphs illustrating the -estimation of the state and the associated *a posteriori* error covariance. +In an entirely similar way to the reanalysis (knowing that intermediate results +can be displayed, which are omitted here for simplicity), estimation gives the +same results during iterative filtering. Thanks to these intermediate +information, one can also obtain the graphs illustrating the estimation of the +state and the associated *a posteriori* error covariance. diff --git a/doc/fr/ref_algorithm_KalmanFilter.rst b/doc/fr/ref_algorithm_KalmanFilter.rst index e05f8a5..2c464c6 100644 --- a/doc/fr/ref_algorithm_KalmanFilter.rst +++ b/doc/fr/ref_algorithm_KalmanFilter.rst @@ -109,6 +109,7 @@ StoreSupplementaryCalculations "CurrentIterationNumber", "CurrentOptimum", "CurrentState", + "ForecastCovariance", "ForecastState", "IndexOfOptimum", "InnovationAtCurrentAnalysis", @@ -159,6 +160,8 @@ StoreSupplementaryCalculations .. include:: snippets/CurrentState.rst +.. include:: snippets/ForecastCovariance.rst + .. include:: snippets/ForecastState.rst .. include:: snippets/IndexOfOptimum.rst diff --git a/doc/fr/scripts/simple_KalmanFilter2.py b/doc/fr/scripts/simple_KalmanFilter2.py index 24a8558..01dc98e 100644 --- a/doc/fr/scripts/simple_KalmanFilter2.py +++ b/doc/fr/scripts/simple_KalmanFilter2.py @@ -14,6 +14,9 @@ print("") from adao import adaoBuilder case = adaoBuilder.New('') # +case.setBackground (Vector = [0.]) +case.setBackgroundError (ScalarSparseMatrix = 1.) +# case.setObservationOperator(Matrix = [1.]) case.setObservationError (ScalarSparseMatrix = 0.1**2) # @@ -29,28 +32,19 @@ case.setAlgorithmParameters( ], }, ) -case.setObserver( - Info=" État analysé à l'observation courante :", - Template='ValuePrinter', - Variable='Analysis', - ) # # Boucle pour obtenir une analyse à l'arrivée de chaque observation # -XaStep, VaStep = 0., 1. for i in range(1,len(Yobs)): - case.setBackground (Vector = "%s"%float(XaStep)) - case.setBackgroundError (ScalarSparseMatrix = "%s"%float(VaStep)) - case.setObservation (Vector = Yobs[i]) + case.setObservation(Vector = Yobs[i]) case.execute( nextStep = True ) - XaStep = case.get("Analysis")[-1] - VaStep = case.get("APosterioriCovariance")[-1] # Xa = case.get("Analysis") Pa = case.get("APosterioriCovariance") # +print(" État analysé à l'observation finale :", Xa[-1]) print("") -print(" Variance a posteriori finale :",Pa[-1]) +print(" Variance a posteriori finale :", Pa[-1]) print("") # #------------------------------------------------------------------------------- diff --git a/doc/fr/scripts/simple_KalmanFilter2.res b/doc/fr/scripts/simple_KalmanFilter2.res index ab3621b..66ec110 100644 --- a/doc/fr/scripts/simple_KalmanFilter2.res +++ b/doc/fr/scripts/simple_KalmanFilter2.res @@ -2,57 +2,7 @@ Estimation par filtrage d'une variable constante ------------------------------------------------ Observations bruitées acquises sur 50 pas de temps - État analysé à l'observation courante : [0.] - État analysé à l'observation courante : [-0.41804504] - État analysé à l'observation courante : [-0.3114053] - État analysé à l'observation courante : [-0.31191336] - État analysé à l'observation courante : [-0.32761493] - État analysé à l'observation courante : [-0.33597167] - État analysé à l'observation courante : [-0.35629573] - État analysé à l'observation courante : [-0.36840289] - État analysé à l'observation courante : [-0.37392713] - État analysé à l'observation courante : [-0.36331937] - État analysé à l'observation courante : [-0.35750362] - État analysé à l'observation courante : [-0.37963052] - État analysé à l'observation courante : [-0.37117993] - État analysé à l'observation courante : [-0.36732985] - État analysé à l'observation courante : [-0.37148382] - État analysé à l'observation courante : [-0.36798059] - État analysé à l'observation courante : [-0.37371077] - État analysé à l'observation courante : [-0.3661228] - État analysé à l'observation courante : [-0.36777529] - État analysé à l'observation courante : [-0.37681677] - État analysé à l'observation courante : [-0.37007654] - État analysé à l'observation courante : [-0.37974517] - État analysé à l'observation courante : [-0.37964703] - État analysé à l'observation courante : [-0.37514278] - État analysé à l'observation courante : [-0.38143128] - État analysé à l'observation courante : [-0.38790654] - État analysé à l'observation courante : [-0.38880008] - État analysé à l'observation courante : [-0.38393577] - État analysé à l'observation courante : [-0.3831028] - État analysé à l'observation courante : [-0.37680097] - État analysé à l'observation courante : [-0.37891813] - État analysé à l'observation courante : [-0.38147782] - État analysé à l'observation courante : [-0.37981569] - État analysé à l'observation courante : [-0.38274266] - État analysé à l'observation courante : [-0.38418507] - État analysé à l'observation courante : [-0.38923054] - État analysé à l'observation courante : [-0.38400006] - État analysé à l'observation courante : [-0.38562502] - État analysé à l'observation courante : [-0.3840503] - État analysé à l'observation courante : [-0.38775222] - État analysé à l'observation courante : [-0.37700787] - État analysé à l'observation courante : [-0.37328191] - État analysé à l'observation courante : [-0.38024181] - État analysé à l'observation courante : [-0.3815806] - État analysé à l'observation courante : [-0.38392063] - État analysé à l'observation courante : [-0.38539266] - État analysé à l'observation courante : [-0.37856929] - État analysé à l'observation courante : [-0.37744505] - État analysé à l'observation courante : [-0.37154554] - État analysé à l'observation courante : [-0.37405773] - État analysé à l'observation courante : [-0.37725236] + État analysé à l'observation finale : [-0.37725236] Variance a posteriori finale : [[0.00033921]] diff --git a/doc/fr/scripts/simple_KalmanFilter2.rst b/doc/fr/scripts/simple_KalmanFilter2.rst index 29cf97d..3c62ec9 100644 --- a/doc/fr/scripts/simple_KalmanFilter2.rst +++ b/doc/fr/scripts/simple_KalmanFilter2.rst @@ -8,7 +8,8 @@ l'exécution d'une étape de Kalman à l'arrivée de chaque observation fournie itérativement. Le mot-clé "*nextStep*", inclut dans l'ordre d'exécution, permet de ne pas stocker l'ébauche en double de l'analyse précédente. -De manière entièrement similaire à la réanalyse, l'estimation s'effectue en -affichant des résultats intermédiaires lors du filtrage itératif. Grâce à ces -informations intermédiaires, on peut aussi obtenir les graphiques illustrant -l'estimation de l'état et de la covariance d'erreur *a posteriori* associée. +De manière entièrement similaire à la réanalyse (sachant que l'on peut afficher +des résultats intermédiaires qui sont ici omis par simplicité), l'estimation +donne les mêmes résultats lors du filtrage itératif. Grâce à ces informations +intermédiaires, on peut aussi obtenir les graphiques illustrant l'estimation de +l'état et de la covariance d'erreur *a posteriori* associée. diff --git a/src/daComposant/daAlgorithms/KalmanFilter.py b/src/daComposant/daAlgorithms/KalmanFilter.py index b0a6a63..a2e9cd6 100644 --- a/src/daComposant/daAlgorithms/KalmanFilter.py +++ b/src/daComposant/daAlgorithms/KalmanFilter.py @@ -21,7 +21,7 @@ # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D import logging -from daCore import BasicObjects +from daCore import BasicObjects, NumericObjects import numpy # ============================================================================== @@ -62,6 +62,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "CurrentIterationNumber", "CurrentOptimum", "CurrentState", + "ForecastCovariance", "ForecastState", "IndexOfOptimum", "InnovationAtCurrentAnalysis", @@ -85,171 +86,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None): self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q) # - if self._parameters["EstimationOf"] == "Parameters": - self._parameters["StoreInternalVariables"] = True - # - # Opérateurs - # ---------- - Ht = HO["Tangent"].asMatrix(Xb) - Ha = HO["Adjoint"].asMatrix(Xb) - # - if self._parameters["EstimationOf"] == "State": - Mt = EM["Tangent"].asMatrix(Xb) - Ma = EM["Adjoint"].asMatrix(Xb) - # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) - else: - Cm = None - # - # Nombre de pas identique au nombre de pas d'observations - # ------------------------------------------------------- - if hasattr(Y,"stepnumber"): - duration = Y.stepnumber() - else: - duration = 2 - # - # Précalcul des inversions de B et R - # ---------------------------------- - if self._parameters["StoreInternalVariables"] \ - or self._toStore("CostFunctionJ") \ - or self._toStore("CostFunctionJb") \ - or self._toStore("CostFunctionJo") \ - or self._toStore("CurrentOptimum") \ - or self._toStore("APosterioriCovariance"): - BI = B.getI() - RI = R.getI() - # - # Initialisation - # -------------- - Xn = Xb - Pn = B - # - if len(self.StoredVariables["Analysis"])==0 or not self._parameters["nextStep"]: - self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["Analysis"]) ) - self.StoredVariables["Analysis"].store( numpy.ravel(Xn) ) - if self._toStore("APosterioriCovariance"): - self.StoredVariables["APosterioriCovariance"].store( Pn.asfullmatrix(Xn.size) ) - covarianceXa = Pn - if self._parameters["EstimationOf"] == "Parameters": - covarianceXaMin = Pn - # - if self._parameters["EstimationOf"] == "Parameters": - XaMin = Xn - previousJMinimum = numpy.finfo(float).max - # - for step in range(duration-1): - if hasattr(Y,"store"): - Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T - else: - Ynpu = numpy.asmatrix(numpy.ravel( Y )).T - # - if U is not None: - if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T - elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T - else: - Un = numpy.asmatrix(numpy.ravel( U )).T - else: - Un = None - # - if self._parameters["EstimationOf"] == "State": - Xn_predicted = Mt * Xn - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! - Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - Pn_predicted = Q + Mt * Pn * Ma - elif self._parameters["EstimationOf"] == "Parameters": - # --- > Par principe, M = Id, Q = 0 - Xn_predicted = Xn - Pn_predicted = Pn - # - if self._parameters["EstimationOf"] == "State": - _HX = Ht * Xn_predicted - _Innovation = Ynpu - _HX - elif self._parameters["EstimationOf"] == "Parameters": - _HX = Ht * Xn_predicted - _Innovation = Ynpu - _HX - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! - _Innovation = _Innovation - Cm * Un - # - Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha)) - Xn = Xn_predicted + Kn * _Innovation - Pn = Pn_predicted - Kn * Ht * Pn_predicted - Xa = Xn # Pointeurs - # - self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["Analysis"]) ) - # ---> avec analysis - self.StoredVariables["Analysis"].store( Xa ) - if self._toStore("SimulatedObservationAtCurrentAnalysis"): - self.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xn ) - if self._toStore("InnovationAtCurrentAnalysis"): - self.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if self._parameters["StoreInternalVariables"] \ - or self._toStore("CurrentState"): - self.StoredVariables["CurrentState"].store( Xn ) - if self._toStore("ForecastState"): - self.StoredVariables["ForecastState"].store( Xn_predicted ) - if self._toStore("BMA"): - self.StoredVariables["BMA"].store( Xn_predicted - Xa ) - if self._toStore("InnovationAtCurrentState"): - self.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) - if self._toStore("SimulatedObservationAtCurrentState") \ - or self._toStore("SimulatedObservationAtCurrentOptimum"): - self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) - # ---> autres - if self._parameters["StoreInternalVariables"] \ - or self._toStore("CostFunctionJ") \ - or self._toStore("CostFunctionJb") \ - or self._toStore("CostFunctionJo") \ - or self._toStore("CurrentOptimum") \ - or self._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - self.StoredVariables["CostFunctionJb"].store( Jb ) - self.StoredVariables["CostFunctionJo"].store( Jo ) - self.StoredVariables["CostFunctionJ" ].store( J ) - # - if self._toStore("IndexOfOptimum") \ - or self._toStore("CurrentOptimum") \ - or self._toStore("CostFunctionJAtCurrentOptimum") \ - or self._toStore("CostFunctionJbAtCurrentOptimum") \ - or self._toStore("CostFunctionJoAtCurrentOptimum") \ - or self._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( self.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if self._toStore("IndexOfOptimum"): - self.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if self._toStore("CurrentOptimum"): - self.StoredVariables["CurrentOptimum"].store( self.StoredVariables["Analysis"][IndexMin] ) - if self._toStore("SimulatedObservationAtCurrentOptimum"): - self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( self.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if self._toStore("CostFunctionJbAtCurrentOptimum"): - self.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJb"][IndexMin] ) - if self._toStore("CostFunctionJoAtCurrentOptimum"): - self.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJo"][IndexMin] ) - if self._toStore("CostFunctionJAtCurrentOptimum"): - self.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( self.StoredVariables["CostFunctionJ" ][IndexMin] ) - if self._toStore("APosterioriCovariance"): - self.StoredVariables["APosterioriCovariance"].store( Pn ) - if self._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if self._toStore("APosterioriCovariance"): - covarianceXaMin = Pn - # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if self._parameters["EstimationOf"] == "Parameters": - self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["Analysis"]) ) - self.StoredVariables["Analysis"].store( XaMin ) - if self._toStore("APosterioriCovariance"): - self.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if self._toStore("BMA"): - self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + #-------------------------- + NumericObjects.stdkf(self, Xb, Y, U, HO, EM, CM, R, B, Q) + #-------------------------- # self._post_run(HO) return 0 diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index cb256b9..0c6b441 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -1263,7 +1263,9 @@ def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): Xn = selfA._getInternalState("Xn") Pn = selfA._getInternalState("Pn") # - previousJMinimum = numpy.finfo(float).max + if selfA._parameters["EstimationOf"] == "Parameters": + XaMin = Xn + previousJMinimum = numpy.finfo(float).max # for step in range(duration-1): if hasattr(Y,"store"): @@ -1326,21 +1328,11 @@ def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): selfA._setInternalState("Pn", Pn) #-------------------------- # - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("APosterioriCovariance") \ - or selfA._toStore("InnovationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T - # selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) # ---> avec analysis selfA.StoredVariables["Analysis"].store( Xa ) if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) + selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) ) if selfA._toStore("InnovationAtCurrentAnalysis"): selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) # ---> avec current state @@ -2773,6 +2765,189 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16" # return 0 +# ============================================================================== +def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): + """ + Standard Kalman Filter + """ + if selfA._parameters["EstimationOf"] == "Parameters": + selfA._parameters["StoreInternalVariables"] = True + # + # Opérateurs + # ---------- + Ht = HO["Tangent"].asMatrix(Xb) + Ha = HO["Adjoint"].asMatrix(Xb) + # + if selfA._parameters["EstimationOf"] == "State": + Mt = EM["Tangent"].asMatrix(Xb) + Ma = EM["Adjoint"].asMatrix(Xb) + # + if CM is not None and "Tangent" in CM and U is not None: + Cm = CM["Tangent"].asMatrix(Xb) + else: + Cm = None + # + # Durée d'observation et tailles + if hasattr(Y,"stepnumber"): + duration = Y.stepnumber() + __p = numpy.cumprod(Y.shape())[-1] + else: + duration = 2 + __p = numpy.array(Y).size + # + # Précalcul des inversions de B et R + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + BI = B.getI() + RI = R.getI() + # + __n = Xb.size + # + if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: + Xn = Xb + Pn = B + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( Xb ) + if selfA._toStore("APosterioriCovariance"): + if hasattr(B,"asfullmatrix"): + selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) ) + else: + selfA.StoredVariables["APosterioriCovariance"].store( B ) + selfA._setInternalState("seed", numpy.random.get_state()) + elif selfA._parameters["nextStep"]: + Xn = selfA._getInternalState("Xn") + Pn = selfA._getInternalState("Pn") + # + if selfA._parameters["EstimationOf"] == "Parameters": + XaMin = Xn + previousJMinimum = numpy.finfo(float).max + # + for step in range(duration-1): + if hasattr(Y,"store"): + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) + else: + Ynpu = numpy.ravel( Y ).reshape((__p,1)) + # + if U is not None: + if hasattr(U,"store") and len(U)>1: + Un = numpy.asmatrix(numpy.ravel( U[step] )).T + elif hasattr(U,"store") and len(U)==1: + Un = numpy.asmatrix(numpy.ravel( U[0] )).T + else: + Un = numpy.asmatrix(numpy.ravel( U )).T + else: + Un = None + # + if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast + Xn_predicted = Mt * Xn + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! + Cm = Cm.reshape(__n,Un.size) # ADAO & check shape + Xn_predicted = Xn_predicted + Cm * Un + Pn_predicted = Q + Mt * Pn * Ma + elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast + # --- > Par principe, M = Id, Q = 0 + Xn_predicted = Xn + Pn_predicted = Pn + # + if selfA._parameters["EstimationOf"] == "State": + HX_predicted = Ht * Xn_predicted + _Innovation = Ynpu - HX_predicted + elif selfA._parameters["EstimationOf"] == "Parameters": + HX_predicted = Ht * Xn_predicted + _Innovation = Ynpu - HX_predicted + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! + _Innovation = _Innovation - Cm * Un + # + Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha)) + Xn = Xn_predicted + Kn * _Innovation + Pn = Pn_predicted - Kn * Ht * Pn_predicted + # + Xa = Xn # Pointeurs + #-------------------------- + selfA._setInternalState("Xn", Xn) + selfA._setInternalState("Pn", Pn) + #-------------------------- + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + # ---> avec analysis + selfA.StoredVariables["Analysis"].store( Xa ) + if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): + selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa ) + if selfA._toStore("InnovationAtCurrentAnalysis"): + selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) + # ---> avec current state + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CurrentState"): + selfA.StoredVariables["CurrentState"].store( Xn ) + if selfA._toStore("ForecastState"): + selfA.StoredVariables["ForecastState"].store( Xn_predicted ) + if selfA._toStore("ForecastCovariance"): + selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( Xn_predicted - Xa ) + if selfA._toStore("InnovationAtCurrentState"): + selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) + if selfA._toStore("SimulatedObservationAtCurrentState") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) + # ---> autres + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) + Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + J = Jb + Jo + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + # + if selfA._toStore("IndexOfOptimum") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("CostFunctionJAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) + if selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + if selfA._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if selfA._toStore("APosterioriCovariance"): + covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] + # + # Stockage final supplémentaire de l'optimum en estimation de paramètres + # ---------------------------------------------------------------------- + if selfA._parameters["EstimationOf"] == "Parameters": + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( XaMin ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + # + return 0 + # ============================================================================== def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): """