From fe8b997f658e927ade352d7e145991bcd5ed71ca Mon Sep 17 00:00:00 2001 From: Jean-Philippe ARGAUD Date: Mon, 28 Jun 2021 19:01:20 +0200 Subject: [PATCH] Fixing iterating observation use (2) --- .../daAlgorithms/ExtendedKalmanFilter.py | 184 +--------------- src/daComposant/daCore/NumericObjects.py | 203 ++++++++++++++++++ 2 files changed, 208 insertions(+), 179 deletions(-) diff --git a/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py b/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py index 27a3a1f..20c3976 100644 --- a/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py +++ b/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py @@ -21,7 +21,7 @@ # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D import logging -from daCore import BasicObjects +from daCore import BasicObjects, NumericObjects import numpy # ============================================================================== @@ -69,6 +69,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "CurrentIterationNumber", "CurrentOptimum", "CurrentState", + "ForecastCovariance", "ForecastState", "IndexOfOptimum", "InnovationAtCurrentAnalysis", @@ -96,184 +97,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None): self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q) # - if self._parameters["EstimationOf"] == "Parameters": - self._parameters["StoreInternalVariables"] = True - # - # Opérateurs - # ---------- - Hm = HO["Direct"].appliedControledFormTo - # - if self._parameters["EstimationOf"] == "State": - Mm = EM["Direct"].appliedControledFormTo - # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) - else: - Cm = None - # - # Nombre de pas identique au nombre de pas d'observations - # ------------------------------------------------------- - if hasattr(Y,"stepnumber"): - duration = Y.stepnumber() - else: - duration = 2 - # - # Précalcul des inversions de B et R - # ---------------------------------- - if self._parameters["StoreInternalVariables"] \ - or self._toStore("CostFunctionJ") \ - or self._toStore("CostFunctionJb") \ - or self._toStore("CostFunctionJo") \ - or self._toStore("CurrentOptimum") \ - or self._toStore("APosterioriCovariance"): - BI = B.getI() - RI = R.getI() - # - # Initialisation - # -------------- - Xn = Xb - Pn = B - # - if len(self.StoredVariables["Analysis"])==0 or not self._parameters["nextStep"]: - self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["Analysis"]) ) - self.StoredVariables["Analysis"].store( numpy.ravel(Xn) ) - if self._toStore("APosterioriCovariance"): - self.StoredVariables["APosterioriCovariance"].store( Pn.asfullmatrix(Xn.size) ) - covarianceXa = Pn - if self._parameters["EstimationOf"] == "Parameters": - covarianceXaMin = Pn - # - if self._parameters["EstimationOf"] == "Parameters": - XaMin = Xn - previousJMinimum = numpy.finfo(float).max - # - for step in range(duration-1): - if hasattr(Y,"store"): - Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T - else: - Ynpu = numpy.asmatrix(numpy.ravel( Y )).T - # - Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn) - Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape - Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn) - Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape - # - if self._parameters["EstimationOf"] == "State": - Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn) - Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape - Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn) - Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape - # - if U is not None: - if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T - elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T - else: - Un = numpy.asmatrix(numpy.ravel( U )).T - else: - Un = None - # - if self._parameters["EstimationOf"] == "State": - Xn_predicted = numpy.asmatrix(numpy.ravel( Mm( (Xn, Un) ) )).T - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! - Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - Pn_predicted = Q + Mt * Pn * Ma - elif self._parameters["EstimationOf"] == "Parameters": - # --- > Par principe, M = Id, Q = 0 - Xn_predicted = Xn - Pn_predicted = Pn - # - if self._parameters["Bounds"] is not None and self._parameters["ConstrainedBy"] == "EstimateProjection": - Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(self._parameters["Bounds"])[:,0])),axis=1) - Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(self._parameters["Bounds"])[:,1])),axis=1) - # - if self._parameters["EstimationOf"] == "State": - _HX = numpy.asmatrix(numpy.ravel( Hm( (Xn_predicted, None) ) )).T - _Innovation = Ynpu - _HX - elif self._parameters["EstimationOf"] == "Parameters": - _HX = numpy.asmatrix(numpy.ravel( Hm( (Xn_predicted, Un) ) )).T - _Innovation = Ynpu - _HX - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! - _Innovation = _Innovation - Cm * Un - # - Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha)) - Xn = Xn_predicted + Kn * _Innovation - Pn = Pn_predicted - Kn * Ht * Pn_predicted - Xa = Xn # Pointeurs - # - self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["Analysis"]) ) - # ---> avec analysis - self.StoredVariables["Analysis"].store( Xa ) - if self._toStore("SimulatedObservationAtCurrentAnalysis"): - self.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xn, None)) ) - if self._toStore("InnovationAtCurrentAnalysis"): - self.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if self._parameters["StoreInternalVariables"] \ - or self._toStore("CurrentState"): - self.StoredVariables["CurrentState"].store( Xn ) - if self._toStore("ForecastState"): - self.StoredVariables["ForecastState"].store( Xn_predicted ) - if self._toStore("BMA"): - self.StoredVariables["BMA"].store( Xn_predicted - Xa ) - if self._toStore("InnovationAtCurrentState"): - self.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) - if self._toStore("SimulatedObservationAtCurrentState") \ - or self._toStore("SimulatedObservationAtCurrentOptimum"): - self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) - # ---> autres - if self._parameters["StoreInternalVariables"] \ - or self._toStore("CostFunctionJ") \ - or self._toStore("CostFunctionJb") \ - or self._toStore("CostFunctionJo") \ - or self._toStore("CurrentOptimum") \ - or self._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - self.StoredVariables["CostFunctionJb"].store( Jb ) - self.StoredVariables["CostFunctionJo"].store( Jo ) - self.StoredVariables["CostFunctionJ" ].store( J ) - # - if self._toStore("IndexOfOptimum") \ - or self._toStore("CurrentOptimum") \ - or self._toStore("CostFunctionJAtCurrentOptimum") \ - or self._toStore("CostFunctionJbAtCurrentOptimum") \ - or self._toStore("CostFunctionJoAtCurrentOptimum") \ - or self._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( self.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if self._toStore("IndexOfOptimum"): - self.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if self._toStore("CurrentOptimum"): - self.StoredVariables["CurrentOptimum"].store( self.StoredVariables["Analysis"][IndexMin] ) - if self._toStore("SimulatedObservationAtCurrentOptimum"): - self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( self.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if self._toStore("CostFunctionJbAtCurrentOptimum"): - self.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJb"][IndexMin] ) - if self._toStore("CostFunctionJoAtCurrentOptimum"): - self.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJo"][IndexMin] ) - if self._toStore("CostFunctionJAtCurrentOptimum"): - self.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( self.StoredVariables["CostFunctionJ" ][IndexMin] ) - if self._toStore("APosterioriCovariance"): - self.StoredVariables["APosterioriCovariance"].store( Pn ) - if self._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if self._toStore("APosterioriCovariance"): - covarianceXaMin = Pn - # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if self._parameters["EstimationOf"] == "Parameters": - self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["Analysis"]) ) - self.StoredVariables["Analysis"].store( XaMin ) - if self._toStore("APosterioriCovariance"): - self.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if self._toStore("BMA"): - self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + #-------------------------- + NumericObjects.exkf(self, Xb, Y, U, HO, EM, CM, R, B, Q) + #-------------------------- # self._post_run(HO) return 0 diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index 99cc8fb..cb256b9 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -1209,6 +1209,209 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): # return 0 +# ============================================================================== +def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): + """ + Extended Kalman Filter + """ + if selfA._parameters["EstimationOf"] == "Parameters": + selfA._parameters["StoreInternalVariables"] = True + # + # Opérateurs + H = HO["Direct"].appliedControledFormTo + # + if selfA._parameters["EstimationOf"] == "State": + M = EM["Direct"].appliedControledFormTo + # + if CM is not None and "Tangent" in CM and U is not None: + Cm = CM["Tangent"].asMatrix(Xb) + else: + Cm = None + # + # Durée d'observation et tailles + if hasattr(Y,"stepnumber"): + duration = Y.stepnumber() + __p = numpy.cumprod(Y.shape())[-1] + else: + duration = 2 + __p = numpy.array(Y).size + # + # Précalcul des inversions de B et R + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + BI = B.getI() + RI = R.getI() + # + __n = Xb.size + # + if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: + Xn = Xb + Pn = B + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( Xb ) + if selfA._toStore("APosterioriCovariance"): + if hasattr(B,"asfullmatrix"): + selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) ) + else: + selfA.StoredVariables["APosterioriCovariance"].store( B ) + selfA._setInternalState("seed", numpy.random.get_state()) + elif selfA._parameters["nextStep"]: + Xn = selfA._getInternalState("Xn") + Pn = selfA._getInternalState("Pn") + # + previousJMinimum = numpy.finfo(float).max + # + for step in range(duration-1): + if hasattr(Y,"store"): + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) + else: + Ynpu = numpy.ravel( Y ).reshape((__p,1)) + # + Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn) + Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape + Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn) + Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape + # + if selfA._parameters["EstimationOf"] == "State": + Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn) + Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape + Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn) + Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape + # + if U is not None: + if hasattr(U,"store") and len(U)>1: + Un = numpy.asmatrix(numpy.ravel( U[step] )).T + elif hasattr(U,"store") and len(U)==1: + Un = numpy.asmatrix(numpy.ravel( U[0] )).T + else: + Un = numpy.asmatrix(numpy.ravel( U )).T + else: + Un = None + # + if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast + Xn_predicted = numpy.asmatrix(numpy.ravel( M( (Xn, Un) ) )).T + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! + Cm = Cm.reshape(__n,Un.size) # ADAO & check shape + Xn_predicted = Xn_predicted + Cm * Un + Pn_predicted = Q + Mt * Pn * Ma + elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast + # --- > Par principe, M = Id, Q = 0 + Xn_predicted = Xn + Pn_predicted = Pn + # + if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": + Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1) + Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1) + # + if selfA._parameters["EstimationOf"] == "State": + HX_predicted = numpy.asmatrix(numpy.ravel( H( (Xn_predicted, None) ) )).T + _Innovation = Ynpu - HX_predicted + elif selfA._parameters["EstimationOf"] == "Parameters": + HX_predicted = numpy.asmatrix(numpy.ravel( H( (Xn_predicted, Un) ) )).T + _Innovation = Ynpu - HX_predicted + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! + _Innovation = _Innovation - Cm * Un + # + Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha)) + Xn = Xn_predicted + Kn * _Innovation + Pn = Pn_predicted - Kn * Ht * Pn_predicted + # + Xa = Xn # Pointeurs + #-------------------------- + selfA._setInternalState("Xn", Xn) + selfA._setInternalState("Pn", Pn) + #-------------------------- + # + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("APosterioriCovariance") \ + or selfA._toStore("InnovationAtCurrentAnalysis") \ + or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + # ---> avec analysis + selfA.StoredVariables["Analysis"].store( Xa ) + if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): + selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) + if selfA._toStore("InnovationAtCurrentAnalysis"): + selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) + # ---> avec current state + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CurrentState"): + selfA.StoredVariables["CurrentState"].store( Xn ) + if selfA._toStore("ForecastState"): + selfA.StoredVariables["ForecastState"].store( Xn_predicted ) + if selfA._toStore("ForecastCovariance"): + selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( Xn_predicted - Xa ) + if selfA._toStore("InnovationAtCurrentState"): + selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) + if selfA._toStore("SimulatedObservationAtCurrentState") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) + # ---> autres + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) + Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + J = Jb + Jo + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + # + if selfA._toStore("IndexOfOptimum") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("CostFunctionJAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) + if selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + if selfA._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if selfA._toStore("APosterioriCovariance"): + covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] + # + # Stockage final supplémentaire de l'optimum en estimation de paramètres + # ---------------------------------------------------------------------- + if selfA._parameters["EstimationOf"] == "Parameters": + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( XaMin ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + # + return 0 + # ============================================================================== def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000): -- 2.39.2