+ Pn_predicted = Pn
+ #
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
+ _Innovation = Ynpu - HX_predicted
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
+ _Innovation = Ynpu - HX_predicted
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+ _Innovation = _Innovation - Cm @ Un
+ #
+ Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
+ Xn = Xn_predicted + Kn * _Innovation
+ Pn = Pn_predicted - Kn * Ht * Pn_predicted
+ #
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
+ #
+ Xa = Xn # Pointeurs
+ #--------------------------
+ selfA._setInternalState("Xn", Xn)
+ selfA._setInternalState("Pn", Pn)
+ #--------------------------
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+ if selfA._toStore("ForecastCovariance"):
+ selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+ #
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ #
+ return 0
+
+# ==============================================================================
+def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
+ """
+ EnKS
+ """
+ #
+ # Opérateurs
+ H = HO["Direct"].appliedControledFormTo
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ # Précalcul des inversions de B et R
+ RIdemi = R.sqrtmI()
+ #
+ # Durée d'observation et tailles
+ LagL = selfA._parameters["SmootherLagL"]
+ if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
+ raise ValueError("Fixed-lag smoother requires a series of observation")
+ if Y.stepnumber() < LagL:
+ raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ __n = Xb.size
+ __m = selfA._parameters["NumberOfMembers"]
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ if hasattr(B,"asfullmatrix"):
+ selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+ else:
+ selfA.StoredVariables["APosterioriCovariance"].store( B )
+ #
+ # Calcul direct initial (on privilégie la mémorisation au recalcul)
+ __seed = numpy.random.get_state()
+ selfB = copy.deepcopy(selfA)
+ selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
+ if VariantM == "EnKS16-KalmanFilterFormula":
+ etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ if LagL > 0:
+ EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
+ else:
+ EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
+ selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
+ #
+ for step in range(LagL,duration-1):
+ #
+ sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
+ sEL.append(None)
+ #
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
+ else:
+ Un = numpy.ravel( U ).reshape((-1,1))
+ else:
+ Un = None
+ #
+ #--------------------------
+ if VariantM == "EnKS16-KalmanFilterFormula":
+ if selfA._parameters["EstimationOf"] == "State": # Forecast
+ EL = M( [(EL[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
+ EZ = H( [(EL[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ EZ = EZ + Cm @ Un
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ # --- > Par principe, M = Id, Q = 0
+ EZ = H( [(EL[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ #
+ vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ #
+ mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
+ mS = mS.reshape((-1,__m)) # Pour dimension 1
+ delta = RIdemi @ ( Ynpu - vZm )
+ mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
+ vw = mT @ mS.T @ delta
+ #
+ Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
+ mU = numpy.identity(__m)
+ wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
+ #
+ EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
+ EL = vEm + EX @ wTU
+ #
+ sEL[LagL] = EL
+ for irl in range(LagL): # Lissage des L précédentes analysis
+ vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
+ sEL[irl] = vEm + EX @ wTU
+ #
+ # Conservation de l'analyse retrospective d'ordre 0 avant rotation
+ Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ if selfA._toStore("APosterioriCovariance"):
+ EXn = sEL[0]
+ #
+ for irl in range(LagL):
+ sEL[irl] = sEL[irl+1]
+ sEL[LagL] = None
+ #--------------------------
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
+ #
+ # Stockage des dernières analyses incomplètement remises à jour
+ for irl in range(LagL):
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ selfA.StoredVariables["Analysis"].store( Xa )
+ #
+ return 0
+
+# ==============================================================================
+def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+ VariantM="KalmanFilterFormula",
+ Hybrid=None,
+ ):
+ """
+ Ensemble-Transform EnKF
+ """
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
+ #
+ # Opérateurs
+ H = HO["Direct"].appliedControledFormTo
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ # Durée d'observation et tailles
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ else:
+ duration = 2
+ __p = numpy.array(Y).size
+ #
+ # Précalcul des inversions de B et R
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ BI = B.getI()
+ RI = R.getI()
+ elif VariantM != "KalmanFilterFormula":
+ RI = R.getI()
+ if VariantM == "KalmanFilterFormula":
+ RIdemi = R.sqrtmI()
+ #
+ __n = Xb.size
+ __m = selfA._parameters["NumberOfMembers"]
+ nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
+ previousJMinimum = numpy.finfo(float).max
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ if hasattr(B,"asfullmatrix"):
+ selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+ else:
+ selfA.StoredVariables["APosterioriCovariance"].store( B )
+ selfA._setInternalState("seed", numpy.random.get_state())
+ elif selfA._parameters["nextStep"]:
+ Xn = selfA._getInternalState("Xn")
+ #
+ for step in range(duration-1):
+ numpy.random.set_state(selfA._getInternalState("seed"))
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
+ else:
+ Un = numpy.ravel( U ).reshape((-1,1))
+ else:
+ Un = None
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+ EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
+ HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ Xn_predicted = Xn_predicted + Cm @ Un
+ elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+ # --- > Par principe, M = Id, Q = 0
+ Xn_predicted = EMX = Xn
+ HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],