# ==============================================================================
def ForceNumericBounds( __Bounds ):
- "Force les bornes à être des valeurs numériques, sauf si None"
+ "Force les bornes à être des valeurs numériques, sauf si globalement None"
# Conserve une valeur par défaut à None s'il n'y a pas de bornes
if __Bounds is None: return None
# Converti toutes les bornes individuelles None à +/- l'infini
__Bounds = numpy.asarray( __Bounds, dtype=float )
- if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0:
+ if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
raise ValueError("Incorrectly defined bounds data")
__Bounds[numpy.isnan(__Bounds)[:,0],0] = -sys.float_info.max
__Bounds[numpy.isnan(__Bounds)[:,1],1] = sys.float_info.max
# Recentre les valeurs numériques de bornes
return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
+# ==============================================================================
+def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+ """
+ Contrained Extended Kalman Filter
+ """
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
+ selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
+ #
+ # Opérateurs
+ H = HO["Direct"].appliedControledFormTo
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ # Durée d'observation et tailles
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ else:
+ duration = 2
+ __p = numpy.array(Y).size
+ #
+ # Précalcul des inversions de B et R
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ BI = B.getI()
+ RI = R.getI()
+ #
+ __n = Xb.size
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ Xn = Xb
+ Pn = B
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ if hasattr(B,"asfullmatrix"):
+ selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+ else:
+ selfA.StoredVariables["APosterioriCovariance"].store( B )
+ selfA._setInternalState("seed", numpy.random.get_state())
+ elif selfA._parameters["nextStep"]:
+ Xn = selfA._getInternalState("Xn")
+ Pn = selfA._getInternalState("Pn")
+ #
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ XaMin = Xn
+ previousJMinimum = numpy.finfo(float).max
+ #
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
+ Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
+ Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
+ Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
+ Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
+ Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
+ Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
+ else:
+ Un = numpy.ravel( U ).reshape((-1,1))
+ else:
+ Un = None
+ #
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
+ Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
+ #
+ if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+ Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ Xn_predicted = Xn_predicted + Cm * Un
+ Pn_predicted = Q + Mt * Pn * Ma
+ elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+ # --- > Par principe, M = Id, Q = 0
+ Xn_predicted = Xn
+ Pn_predicted = Pn
+ #
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
+ Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
+ _Innovation = Ynpu - HX_predicted
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
+ _Innovation = Ynpu - HX_predicted
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+ _Innovation = _Innovation - Cm * Un
+ #
+ Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
+ Xn = Xn_predicted + Kn * _Innovation
+ Pn = Pn_predicted - Kn * Ht * Pn_predicted
+ #
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
+ Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
+ #
+ Xa = Xn # Pointeurs
+ #--------------------------
+ selfA._setInternalState("Xn", Xn)
+ selfA._setInternalState("Pn", Pn)
+ #--------------------------
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+ if selfA._toStore("ForecastCovariance"):
+ selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+ #
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ #
+ return 0
+
# ==============================================================================
def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
"""
"""
if selfA._parameters["EstimationOf"] == "Parameters":
selfA._parameters["StoreInternalVariables"] = True
- selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
#
# Opérateurs
H = HO["Direct"].appliedControledFormTo
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
- if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
- Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
- Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
- #
if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
- Xn_predicted = numpy.asmatrix(numpy.ravel( M( (Xn, Un) ) )).T
+ Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
Xn_predicted = Xn_predicted + Cm * Un
Xn_predicted = Xn
Pn_predicted = Pn
#
- if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
- Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
- Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
- #
if selfA._parameters["EstimationOf"] == "State":
- HX_predicted = numpy.asmatrix(numpy.ravel( H( (Xn_predicted, None) ) )).T
+ HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
_Innovation = Ynpu - HX_predicted
elif selfA._parameters["EstimationOf"] == "Parameters":
- HX_predicted = numpy.asmatrix(numpy.ravel( H( (Xn_predicted, Un) ) )).T
+ HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
_Innovation = Ynpu - HX_predicted
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
_Innovation = _Innovation - Cm * Un
Xn = Xn_predicted + Kn * _Innovation
Pn = Pn_predicted - Kn * Ht * Pn_predicted
#
- if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
- Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
- Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
- #
Xa = Xn # Pointeurs
#--------------------------
selfA._setInternalState("Xn", Xn)
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
return 0
# ==============================================================================
-def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
"""
- Standard Kalman Filter
+ 3DVAR
"""
- if selfA._parameters["EstimationOf"] == "Parameters":
- selfA._parameters["StoreInternalVariables"] = True
#
- # Opérateurs
- # ----------
- Ht = HO["Tangent"].asMatrix(Xb)
- Ha = HO["Adjoint"].asMatrix(Xb)
+ # Initialisations
+ # ---------------
#
- if selfA._parameters["EstimationOf"] == "State":
- Mt = EM["Tangent"].asMatrix(Xb)
- Ma = EM["Adjoint"].asMatrix(Xb)
+ # Opérateurs
+ Hm = HO["Direct"].appliedTo
+ Ha = HO["Adjoint"].appliedInXTo
#
- if CM is not None and "Tangent" in CM and U is not None:
- Cm = CM["Tangent"].asMatrix(Xb)
+ # Utilisation éventuelle d'un vecteur H(Xb) précalculé
+ if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+ HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
else:
- Cm = None
+ HXb = Hm( Xb )
+ HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+ if Y.size != HXb.size:
+ raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+ if max(Y.shape) != max(HXb.shape):
+ raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
#
- # Durée d'observation et tailles
- if hasattr(Y,"stepnumber"):
- duration = Y.stepnumber()
- __p = numpy.cumprod(Y.shape())[-1]
- else:
- duration = 2
- __p = numpy.array(Y).size
- #
- # Précalcul des inversions de B et R
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("APosterioriCovariance"):
- BI = B.getI()
- RI = R.getI()
- #
- __n = Xb.size
- #
- if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
- Xn = Xb
- Pn = B
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- selfA.StoredVariables["Analysis"].store( Xb )
- if selfA._toStore("APosterioriCovariance"):
- if hasattr(B,"asfullmatrix"):
- selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
- else:
- selfA.StoredVariables["APosterioriCovariance"].store( B )
- selfA._setInternalState("seed", numpy.random.get_state())
- elif selfA._parameters["nextStep"]:
- Xn = selfA._getInternalState("Xn")
- Pn = selfA._getInternalState("Pn")
- #
- if selfA._parameters["EstimationOf"] == "Parameters":
- XaMin = Xn
- previousJMinimum = numpy.finfo(float).max
- #
- for step in range(duration-1):
- if hasattr(Y,"store"):
- Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
- else:
- Ynpu = numpy.ravel( Y ).reshape((__p,1))
- #
- if U is not None:
- if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
- elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
- else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
- else:
- Un = None
- #
- if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
- Xn_predicted = Mt * Xn
- if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
- Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
- Pn_predicted = Q + Mt * Pn * Ma
- elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
- # --- > Par principe, M = Id, Q = 0
- Xn_predicted = Xn
- Pn_predicted = Pn
- #
- if selfA._parameters["EstimationOf"] == "State":
- HX_predicted = Ht * Xn_predicted
- _Innovation = Ynpu - HX_predicted
- elif selfA._parameters["EstimationOf"] == "Parameters":
- HX_predicted = Ht * Xn_predicted
- _Innovation = Ynpu - HX_predicted
- if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
- _Innovation = _Innovation - Cm * Un
- #
- Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
- Xn = Xn_predicted + Kn * _Innovation
- Pn = Pn_predicted - Kn * Ht * Pn_predicted
- #
- Xa = Xn # Pointeurs
- #--------------------------
- selfA._setInternalState("Xn", Xn)
- selfA._setInternalState("Pn", Pn)
- #--------------------------
- #
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- # ---> avec analysis
- selfA.StoredVariables["Analysis"].store( Xa )
- if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
- selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
- if selfA._toStore("InnovationAtCurrentAnalysis"):
- selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
- # ---> avec current state
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CurrentState"):
- selfA.StoredVariables["CurrentState"].store( Xn )
- if selfA._toStore("ForecastState"):
- selfA.StoredVariables["ForecastState"].store( Xn_predicted )
- if selfA._toStore("ForecastCovariance"):
- selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
- if selfA._toStore("InnovationAtCurrentState"):
- selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
- if selfA._toStore("SimulatedObservationAtCurrentState") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
- # ---> autres
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
- J = Jb + Jo
- selfA.StoredVariables["CostFunctionJb"].store( Jb )
- selfA.StoredVariables["CostFunctionJo"].store( Jo )
- selfA.StoredVariables["CostFunctionJ" ].store( J )
- #
- if selfA._toStore("IndexOfOptimum") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("CostFunctionJAtCurrentOptimum") \
- or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
- or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
- if selfA._toStore("IndexOfOptimum"):
- selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
- if selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
- if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
- if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
- if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
- if selfA._toStore("CostFunctionJAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( Pn )
- if selfA._parameters["EstimationOf"] == "Parameters" \
- and J < previousJMinimum:
- previousJMinimum = J
- XaMin = Xa
- if selfA._toStore("APosterioriCovariance"):
- covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
- #
- # Stockage final supplémentaire de l'optimum en estimation de paramètres
- # ----------------------------------------------------------------------
- if selfA._parameters["EstimationOf"] == "Parameters":
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- selfA.StoredVariables["Analysis"].store( XaMin )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
- #
- return 0
-
-# ==============================================================================
-def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
- """
- 3DVAR
- """
- #
- # Initialisations
- # ---------------
- #
- # Opérateurs
- Hm = HO["Direct"].appliedTo
- Ha = HO["Adjoint"].appliedInXTo
- #
- # Utilisation éventuelle d'un vecteur H(Xb) précalculé
- if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
- HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
- else:
- HXb = Hm( Xb )
- HXb = numpy.asmatrix(numpy.ravel( HXb )).T
- if Y.size != HXb.size:
- raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
- if max(Y.shape) != max(HXb.shape):
- raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
- #
- if selfA._toStore("JacobianMatrixAtBackground"):
- HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
- HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
- selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+ if selfA._toStore("JacobianMatrixAtBackground"):
+ HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+ HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+ selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
#
# Précalcul des inversions de B et R
BI = B.getI()
#
return 0
+# ==============================================================================
+def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+ """
+ Standard Kalman Filter
+ """
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
+ #
+ # Opérateurs
+ # ----------
+ Ht = HO["Tangent"].asMatrix(Xb)
+ Ha = HO["Adjoint"].asMatrix(Xb)
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ Mt = EM["Tangent"].asMatrix(Xb)
+ Ma = EM["Adjoint"].asMatrix(Xb)
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ # Durée d'observation et tailles
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ else:
+ duration = 2
+ __p = numpy.array(Y).size
+ #
+ # Précalcul des inversions de B et R
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ BI = B.getI()
+ RI = R.getI()
+ #
+ __n = Xb.size
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ Xn = Xb
+ Pn = B
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ if hasattr(B,"asfullmatrix"):
+ selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+ else:
+ selfA.StoredVariables["APosterioriCovariance"].store( B )
+ selfA._setInternalState("seed", numpy.random.get_state())
+ elif selfA._parameters["nextStep"]:
+ Xn = selfA._getInternalState("Xn")
+ Pn = selfA._getInternalState("Pn")
+ #
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ XaMin = Xn
+ previousJMinimum = numpy.finfo(float).max
+ #
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ else:
+ Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ Un = None
+ #
+ if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+ Xn_predicted = Mt * Xn
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ Xn_predicted = Xn_predicted + Cm * Un
+ Pn_predicted = Q + Mt * Pn * Ma
+ elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+ # --- > Par principe, M = Id, Q = 0
+ Xn_predicted = Xn
+ Pn_predicted = Pn
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ HX_predicted = Ht * Xn_predicted
+ _Innovation = Ynpu - HX_predicted
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ HX_predicted = Ht * Xn_predicted
+ _Innovation = Ynpu - HX_predicted
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+ _Innovation = _Innovation - Cm * Un
+ #
+ Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
+ Xn = Xn_predicted + Kn * _Innovation
+ Pn = Pn_predicted - Kn * Ht * Pn_predicted
+ #
+ Xa = Xn # Pointeurs
+ #--------------------------
+ selfA._setInternalState("Xn", Xn)
+ selfA._setInternalState("Pn", Pn)
+ #--------------------------
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+ if selfA._toStore("ForecastCovariance"):
+ selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+ #
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ #
+ return 0
+
# ==============================================================================
def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
"""
if selfA._parameters["StoreInternalVariables"] \
or selfA._toStore("CostFunctionJ") \
or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo"):
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
BI = B.getI()
RI = R.getI()
#
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
# ---> avec analysis
selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
# ---> avec current state
if selfA._parameters["StoreInternalVariables"] \
or selfA._toStore("CurrentState"):
selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( Xncm )
+ if selfA._toStore("ForecastCovariance"):
+ selfA.StoredVariables["ForecastCovariance"].store( Pnm )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( Xncm - Xa )
if selfA._toStore("InnovationAtCurrentState"):
selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
+ # ---> autres
if selfA._parameters["StoreInternalVariables"] \
or selfA._toStore("CostFunctionJ") \
or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo"):
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
if selfA._toStore("APosterioriCovariance"):
selfA.StoredVariables["APosterioriCovariance"].store( Pn )
if selfA._parameters["EstimationOf"] == "Parameters" \