if self.__mfEnabled: return [_HaY.A1,]
else: return _HaY.A1
-# ==============================================================================
-def mmqr(
- func = None,
- x0 = None,
- fprime = None,
- bounds = None,
- quantile = 0.5,
- maxfun = 15000,
- toler = 1.e-06,
- y = None,
- ):
- """
- Implémentation informatique de l'algorithme MMQR, basée sur la publication :
- David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
- Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
- """
- #
- # Recuperation des donnees et informations initiales
- # --------------------------------------------------
- variables = numpy.ravel( x0 )
- mesures = numpy.ravel( y )
- increment = sys.float_info[0]
- p = variables.size
- n = mesures.size
- quantile = float(quantile)
- #
- # Calcul des parametres du MM
- # ---------------------------
- tn = float(toler) / n
- e0 = -tn / math.log(tn)
- epsilon = (e0-tn)/(1+math.log(e0))
- #
- # Calculs d'initialisation
- # ------------------------
- residus = mesures - numpy.ravel( func( variables ) )
- poids = 1./(epsilon+numpy.abs(residus))
- veps = 1. - 2. * quantile - residus * poids
- lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
- iteration = 0
- #
- # Recherche iterative
- # -------------------
- while (increment > toler) and (iteration < maxfun) :
- iteration += 1
- #
- Derivees = numpy.array(fprime(variables))
- Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
- DeriveesT = Derivees.transpose()
- M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
- SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
- step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
- #
- variables = variables + step
- if bounds is not None:
- # Attention : boucle infinie à éviter si un intervalle est trop petit
- while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
- step = step/2.
- variables = variables - step
- residus = mesures - numpy.ravel( func(variables) )
- surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
- #
- while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
- step = step/2.
- variables = variables - step
- residus = mesures - numpy.ravel( func(variables) )
- surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
- #
- increment = lastsurrogate-surrogate
- poids = 1./(epsilon+numpy.abs(residus))
- veps = 1. - 2. * quantile - residus * poids
- lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
- #
- # Mesure d'écart
- # --------------
- Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
- #
- return variables, Ecart, [n,p,iteration,increment,0]
-
# ==============================================================================
def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
"Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
"Renvoie la covariance d'ensemble"
__Anomalies = EnsembleOfAnomalies( Ensemble )
__n, __m = numpy.asarray(__Anomalies).shape
+ # Estimation empirique
__Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
# Assure la symétrie
__Covariance = (__Covariance + __Covariance.T) * 0.5
return OutputCovOrEns
# ==============================================================================
-def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
+def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
"""
- 3DVAR multi-pas et multi-méthodes
+ EnKS
"""
#
- # Initialisation
- # --------------
- Xn = numpy.ravel(Xb).reshape((-1,1))
+ # Initialisations
+ # ---------------
+ #
+ # Opérateurs
+ H = HO["Direct"].appliedControledFormTo
#
if selfA._parameters["EstimationOf"] == "State":
- M = EM["Direct"].appliedTo
- #
- if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
- selfA.StoredVariables["Analysis"].store( Xn )
- if selfA._toStore("APosterioriCovariance"):
- if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
- else: Pn = B
- selfA.StoredVariables["APosterioriCovariance"].store( Pn )
- if selfA._toStore("ForecastState"):
- selfA.StoredVariables["ForecastState"].store( Xn )
+ M = EM["Direct"].appliedControledFormTo
#
- if hasattr(Y,"stepnumber"):
- duration = Y.stepnumber()
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
else:
- duration = 2
+ Cm = None
#
- # Multi-pas
- # ---------
- for step in range(duration-1):
+ # Précalcul des inversions de B et R
+ RIdemi = R.sqrtmI()
+ #
+ LagL = selfA._parameters["SmootherLagL"]
+ if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
+ raise ValueError("Fixed-lag smoother requires a series of observation")
+ if Y.stepnumber() < LagL:
+ raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ __n = Xb.size
+ __m = selfA._parameters["NumberOfMembers"]
+ #
+ if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+ else: Pn = B
+ if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
+ else: Qn = Q
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ covarianceXa = Pn
+ #
+ # Calcul direct initial (on privilégie la mémorisation au recalcul)
+ __seed = numpy.random.get_state()
+ selfB = copy.deepcopy(selfA)
+ selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
+ if VariantM == "EnKS16-KalmanFilterFormula":
+ etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ if LagL > 0:
+ EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
+ else:
+ EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
+ selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
+ #
+ for step in range(LagL,duration-1):
+ #
+ sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
+ sEL.append(None)
+ #
if hasattr(Y,"store"):
- Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
else:
- Ynpu = numpy.ravel( Y ).reshape((-1,1))
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
#
- if selfA._parameters["EstimationOf"] == "State": # Forecast
- Xn = selfA.StoredVariables["Analysis"][-1]
- Xn_predicted = M( Xn )
- if selfA._toStore("ForecastState"):
- selfA.StoredVariables["ForecastState"].store( Xn_predicted )
- elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
- # --- > Par principe, M = Id, Q = 0
- Xn_predicted = Xn
- Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ else:
+ Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ Un = None
#
- oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
+ #--------------------------
+ if VariantM == "EnKS16-KalmanFilterFormula":
+ if selfA._parameters["EstimationOf"] == "State": # Forecast
+ EL = M( [(EL[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ EL = EL + numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
+ EZ = H( [(EL[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ EZ = EZ + Cm * Un
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ # --- > Par principe, M = Id, Q = 0
+ EZ = H( [(EL[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ #
+ vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ #
+ mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
+ delta = RIdemi @ ( Ynpu - vZm )
+ mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
+ vw = mT @ mS.T @ delta
+ #
+ Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
+ mU = numpy.identity(__m)
+ wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
+ #
+ EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
+ EL = vEm + EX @ wTU
+ #
+ sEL[LagL] = EL
+ for irl in range(LagL): # Lissage des L précédentes analysis
+ vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
+ sEL[irl] = vEm + EX @ wTU
+ #
+ # Conservation de l'analyse retrospective d'ordre 0 avant rotation
+ Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ if selfA._toStore("APosterioriCovariance"):
+ EXn = sEL[0]
+ #
+ for irl in range(LagL):
+ sEL[irl] = sEL[irl+1]
+ sEL[LagL] = None
+ #--------------------------
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
+ #
+ # Stockage des dernières analyses incomplètement remises à jour
+ for irl in range(LagL):
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ selfA.StoredVariables["Analysis"].store( Xa )
#
return 0
# ==============================================================================
-def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
"""
- 3DVAR
+ Ensemble-Transform EnKF
"""
- #
- # Initialisations
- # ---------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
#
# Opérateurs
- Hm = HO["Direct"].appliedTo
- Ha = HO["Adjoint"].appliedInXTo
+ # ----------
+ H = HO["Direct"].appliedControledFormTo
#
- # Utilisation éventuelle d'un vecteur H(Xb) précalculé
- if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
- HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
else:
- HXb = Hm( Xb )
- HXb = numpy.asmatrix(numpy.ravel( HXb )).T
- if Y.size != HXb.size:
- raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
- if max(Y.shape) != max(HXb.shape):
- raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+ Cm = None
#
- if selfA._toStore("JacobianMatrixAtBackground"):
- HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
- HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
- selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+ # Nombre de pas identique au nombre de pas d'observations
+ # -------------------------------------------------------
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ else:
+ duration = 2
+ __p = numpy.array(Y).size
#
# Précalcul des inversions de B et R
- BI = B.getI()
- RI = R.getI()
+ # ----------------------------------
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ BI = B.getI()
+ RI = R.getI()
+ elif VariantM != "KalmanFilterFormula":
+ RI = R.getI()
+ if VariantM == "KalmanFilterFormula":
+ RIdemi = R.sqrtmI()
#
- # Point de démarrage de l'optimisation
- Xini = selfA._parameters["InitializationPoint"]
+ # Initialisation
+ # --------------
+ __n = Xb.size
+ __m = selfA._parameters["NumberOfMembers"]
+ if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+ else: Pn = B
+ if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
+ else: Qn = Q
+ Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+ #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
#
- # Définition de la fonction-coût
- # ------------------------------
- def CostFunction(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
- if selfA._parameters["StoreInternalVariables"] or \
- selfA._toStore("CurrentState") or \
- selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentState"].store( _X )
- _HX = Hm( _X )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
- _Innovation = Y - _HX
- if selfA._toStore("SimulatedObservationAtCurrentState") or \
- selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
- if selfA._toStore("InnovationAtCurrentState"):
- selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
- #
- Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
- J = Jb + Jo
- #
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
- selfA.StoredVariables["CostFunctionJb"].store( Jb )
- selfA.StoredVariables["CostFunctionJo"].store( Jo )
- selfA.StoredVariables["CostFunctionJ" ].store( J )
- if selfA._toStore("IndexOfOptimum") or \
- selfA._toStore("CurrentOptimum") or \
- selfA._toStore("CostFunctionJAtCurrentOptimum") or \
- selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
- selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
- selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
- if selfA._toStore("IndexOfOptimum"):
- selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
- if selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
- if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
- if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
- if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
- if selfA._toStore("CostFunctionJAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
- return J
- #
- def GradientOfCostFunction(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
- _HX = Hm( _X )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
- GradJb = BI * (_X - Xb)
- GradJo = - Ha( (_X, RI * (Y - _HX)) )
- GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
- return GradJ
- #
- # Minimisation de la fonctionnelle
- # --------------------------------
- nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
- #
- if selfA._parameters["Minimizer"] == "LBFGSB":
- if "0.19" <= scipy.version.version <= "1.1.0":
- import lbfgsbhlt as optimiseur
- else:
- import scipy.optimize as optimiseur
- Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
- func = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- bounds = selfA._parameters["Bounds"],
- maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
- factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
- pgtol = selfA._parameters["ProjectedGradientTolerance"],
- iprint = selfA._parameters["optiprint"],
- )
- nfeval = Informations['funcalls']
- rc = Informations['warnflag']
- elif selfA._parameters["Minimizer"] == "TNC":
- Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
- func = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- bounds = selfA._parameters["Bounds"],
- maxfun = selfA._parameters["MaximumNumberOfSteps"],
- pgtol = selfA._parameters["ProjectedGradientTolerance"],
- ftol = selfA._parameters["CostDecrementTolerance"],
- messages = selfA._parameters["optmessages"],
- )
- elif selfA._parameters["Minimizer"] == "CG":
- Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
- f = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- maxiter = selfA._parameters["MaximumNumberOfSteps"],
- gtol = selfA._parameters["GradientNormTolerance"],
- disp = selfA._parameters["optdisp"],
- full_output = True,
- )
- elif selfA._parameters["Minimizer"] == "NCG":
- Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
- f = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- maxiter = selfA._parameters["MaximumNumberOfSteps"],
- avextol = selfA._parameters["CostDecrementTolerance"],
- disp = selfA._parameters["optdisp"],
- full_output = True,
- )
- elif selfA._parameters["Minimizer"] == "BFGS":
- Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
- f = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- maxiter = selfA._parameters["MaximumNumberOfSteps"],
- gtol = selfA._parameters["GradientNormTolerance"],
- disp = selfA._parameters["optdisp"],
- full_output = True,
- )
- else:
- raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
- #
- IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
- MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
- #
- # Correction pour pallier a un bug de TNC sur le retour du Minimum
- # ----------------------------------------------------------------
- if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
- Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
- #
- # Obtention de l'analyse
- # ----------------------
- Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ covarianceXa = Pn
#
- selfA.StoredVariables["Analysis"].store( Xa )
+ previousJMinimum = numpy.finfo(float).max
#
- if selfA._toStore("OMA") or \
- selfA._toStore("SigmaObs2") or \
- selfA._toStore("SimulationQuantiles") or \
- selfA._toStore("SimulatedObservationAtOptimum"):
- if selfA._toStore("SimulatedObservationAtCurrentState"):
- HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
- elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
else:
- HXa = Hm( Xa )
- #
- # Calcul de la covariance d'analyse
- # ---------------------------------
- if selfA._toStore("APosterioriCovariance") or \
- selfA._toStore("SimulationQuantiles") or \
- selfA._toStore("JacobianMatrixAtOptimum") or \
- selfA._toStore("KalmanGainAtOptimum"):
- HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
- HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
- if selfA._toStore("APosterioriCovariance") or \
- selfA._toStore("SimulationQuantiles") or \
- selfA._toStore("KalmanGainAtOptimum"):
- HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
- HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
- if selfA._toStore("APosterioriCovariance") or \
- selfA._toStore("SimulationQuantiles"):
- HessienneI = []
- nb = Xa.size
- for i in range(nb):
- _ee = numpy.matrix(numpy.zeros(nb)).T
- _ee[i] = 1.
- _HtEE = numpy.dot(HtM,_ee)
- _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
- HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
- HessienneI = numpy.matrix( HessienneI )
- A = HessienneI.I
- if min(A.shape) != max(A.shape):
- raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
- if (numpy.diag(A) < 0).any():
- raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
- if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
- try:
- L = numpy.linalg.cholesky( A )
- except:
- raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( A )
- if selfA._toStore("JacobianMatrixAtOptimum"):
- selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
- if selfA._toStore("KalmanGainAtOptimum"):
- if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
- elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
- selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
- #
- # Calculs et/ou stockages supplémentaires
- # ---------------------------------------
- if selfA._toStore("Innovation") or \
- selfA._toStore("SigmaObs2") or \
- selfA._toStore("MahalanobisConsistency") or \
- selfA._toStore("OMB"):
- d = Y - HXb
- if selfA._toStore("Innovation"):
- selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
- if selfA._toStore("OMA"):
- selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
- if selfA._toStore("OMB"):
- selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
- if selfA._toStore("SigmaObs2"):
- TraceR = R.trace(Y.size)
- selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
- if selfA._toStore("MahalanobisConsistency"):
- selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
- if selfA._toStore("SimulationQuantiles"):
- nech = selfA._parameters["NumberOfSamplesForQuantiles"]
- HXa = numpy.matrix(numpy.ravel( HXa )).T
- YfQ = None
- for i in range(nech):
- if selfA._parameters["SimulationForQuantiles"] == "Linear":
- dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
- dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
- Yr = HXa + dYr
- elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
- Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
- Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
- if YfQ is None:
- YfQ = Yr
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.asmatrix(numpy.ravel( U[0] )).T
else:
- YfQ = numpy.hstack((YfQ,Yr))
- YfQ.sort(axis=-1)
- YQ = None
- for quantile in selfA._parameters["Quantiles"]:
- if not (0. <= float(quantile) <= 1.): continue
- indice = int(nech * float(quantile) - 1./nech)
- if YQ is None: YQ = YfQ[:,indice]
- else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
- selfA.StoredVariables["SimulationQuantiles"].store( YQ )
- if selfA._toStore("SimulatedObservationAtBackground"):
- selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
- if selfA._toStore("SimulatedObservationAtOptimum"):
- selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
- #
- return 0
-
-# ==============================================================================
-def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
- """
- 3DVAR variational analysis with no inversion of B
- """
- #
- # Initialisations
- # ---------------
- #
- # Opérateurs
- Hm = HO["Direct"].appliedTo
- Ha = HO["Adjoint"].appliedInXTo
- #
- # Précalcul des inversions de B et R
- BT = B.getT()
- RI = R.getI()
- #
- # Point de démarrage de l'optimisation
- Xini = numpy.zeros(Xb.shape)
- #
- # Définition de la fonction-coût
- # ------------------------------
- def CostFunction(v):
- _V = numpy.asmatrix(numpy.ravel( v )).T
- _X = Xb + B * _V
- if selfA._parameters["StoreInternalVariables"] or \
- selfA._toStore("CurrentState") or \
- selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentState"].store( _X )
- _HX = Hm( _X )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
- _Innovation = Y - _HX
- if selfA._toStore("SimulatedObservationAtCurrentState") or \
- selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
- if selfA._toStore("InnovationAtCurrentState"):
- selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+ Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ Un = None
#
- Jb = float( 0.5 * _V.T * BT * _V )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
- J = Jb + Jo
+ if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
#
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
- selfA.StoredVariables["CostFunctionJb"].store( Jb )
- selfA.StoredVariables["CostFunctionJo"].store( Jo )
- selfA.StoredVariables["CostFunctionJ" ].store( J )
- if selfA._toStore("IndexOfOptimum") or \
- selfA._toStore("CurrentOptimum") or \
- selfA._toStore("CostFunctionJAtCurrentOptimum") or \
- selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
- selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
- selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
- if selfA._toStore("IndexOfOptimum"):
- selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
- if selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
- if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
- if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
- if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
- if selfA._toStore("CostFunctionJAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
- return J
- #
- def GradientOfCostFunction(v):
- _V = numpy.asmatrix(numpy.ravel( v )).T
- _X = Xb + B * _V
- _HX = Hm( _X )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
- GradJb = BT * _V
- GradJo = - Ha( (_X, RI * (Y - _HX)) )
- GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
- return GradJ
- #
- # Minimisation de la fonctionnelle
- # --------------------------------
- nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
- #
- if selfA._parameters["Minimizer"] == "LBFGSB":
- if "0.19" <= scipy.version.version <= "1.1.0":
- import lbfgsbhlt as optimiseur
+ if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+ EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
+ Xn_predicted = EMX + qi
+ HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ Xn_predicted = Xn_predicted + Cm * Un
+ elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+ # --- > Par principe, M = Id, Q = 0
+ Xn_predicted = Xn
+ HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ #
+ # Mean of forecast and observation of forecast
+ Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ #
+ # Anomalies
+ EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
+ EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
+ #
+ #--------------------------
+ if VariantM == "KalmanFilterFormula":
+ mS = RIdemi * EaHX / math.sqrt(__m-1)
+ delta = RIdemi * ( Ynpu - Hfm )
+ mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
+ vw = mT @ mS.T @ delta
+ #
+ Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
+ mU = numpy.identity(__m)
+ #
+ EaX = EaX / math.sqrt(__m-1)
+ Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
+ #--------------------------
+ elif VariantM == "Variational":
+ HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+ def CostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _Jo = 0.5 * _A.T @ (RI * _A)
+ _Jb = 0.5 * (__m-1) * w.T @ w
+ _J = _Jo + _Jb
+ return float(_J)
+ def GradientOfCostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _GardJo = - EaHX.T @ (RI * _A)
+ _GradJb = (__m-1) * w.reshape((__m,1))
+ _GradJ = _GardJo + _GradJb
+ return numpy.ravel(_GradJ)
+ vw = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = numpy.zeros(__m),
+ fprime = GradientOfCostFunction,
+ args = (),
+ disp = False,
+ )
+ #
+ Hto = EaHX.T @ (RI * EaHX)
+ Htb = (__m-1) * numpy.identity(__m)
+ Hta = Hto + Htb
+ #
+ Pta = numpy.linalg.inv( Hta )
+ EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+ #
+ Xn = Xfm + EaX @ (vw[:,None] + EWa)
+ #--------------------------
+ elif VariantM == "FiniteSize11": # Jauge Boc2011
+ HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+ def CostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _Jo = 0.5 * _A.T @ (RI * _A)
+ _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
+ _J = _Jo + _Jb
+ return float(_J)
+ def GradientOfCostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _GardJo = - EaHX.T @ (RI * _A)
+ _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
+ _GradJ = _GardJo + _GradJb
+ return numpy.ravel(_GradJ)
+ vw = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = numpy.zeros(__m),
+ fprime = GradientOfCostFunction,
+ args = (),
+ disp = False,
+ )
+ #
+ Hto = EaHX.T @ (RI * EaHX)
+ Htb = __m * \
+ ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
+ / (1 + 1/__m + vw.T @ vw)**2
+ Hta = Hto + Htb
+ #
+ Pta = numpy.linalg.inv( Hta )
+ EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+ #
+ Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
+ #--------------------------
+ elif VariantM == "FiniteSize15": # Jauge Boc2015
+ HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+ def CostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _Jo = 0.5 * _A.T * RI * _A
+ _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
+ _J = _Jo + _Jb
+ return float(_J)
+ def GradientOfCostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _GardJo = - EaHX.T @ (RI * _A)
+ _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
+ _GradJ = _GardJo + _GradJb
+ return numpy.ravel(_GradJ)
+ vw = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = numpy.zeros(__m),
+ fprime = GradientOfCostFunction,
+ args = (),
+ disp = False,
+ )
+ #
+ Hto = EaHX.T @ (RI * EaHX)
+ Htb = (__m+1) * \
+ ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
+ / (1 + 1/__m + vw.T @ vw)**2
+ Hta = Hto + Htb
+ #
+ Pta = numpy.linalg.inv( Hta )
+ EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+ #
+ Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
+ #--------------------------
+ elif VariantM == "FiniteSize16": # Jauge Boc2016
+ HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+ def CostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _Jo = 0.5 * _A.T @ (RI * _A)
+ _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
+ _J = _Jo + _Jb
+ return float(_J)
+ def GradientOfCostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _GardJo = - EaHX.T @ (RI * _A)
+ _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
+ _GradJ = _GardJo + _GradJb
+ return numpy.ravel(_GradJ)
+ vw = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = numpy.zeros(__m),
+ fprime = GradientOfCostFunction,
+ args = (),
+ disp = False,
+ )
+ #
+ Hto = EaHX.T @ (RI * EaHX)
+ Htb = ((__m+1) / (__m-1)) * \
+ ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
+ / (1 + 1/__m + vw.T @ vw / (__m-1))**2
+ Hta = Hto + Htb
+ #
+ Pta = numpy.linalg.inv( Hta )
+ EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+ #
+ Xn = Xfm + EaX @ (vw[:,None] + EWa)
+ #--------------------------
else:
- import scipy.optimize as optimiseur
- Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
- func = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- bounds = selfA._parameters["Bounds"],
- maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
- factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
- pgtol = selfA._parameters["ProjectedGradientTolerance"],
- iprint = selfA._parameters["optiprint"],
- )
- nfeval = Informations['funcalls']
- rc = Informations['warnflag']
- elif selfA._parameters["Minimizer"] == "TNC":
- Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
- func = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- bounds = selfA._parameters["Bounds"],
- maxfun = selfA._parameters["MaximumNumberOfSteps"],
- pgtol = selfA._parameters["ProjectedGradientTolerance"],
- ftol = selfA._parameters["CostDecrementTolerance"],
- messages = selfA._parameters["optmessages"],
- )
- elif selfA._parameters["Minimizer"] == "CG":
- Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
- f = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- maxiter = selfA._parameters["MaximumNumberOfSteps"],
- gtol = selfA._parameters["GradientNormTolerance"],
- disp = selfA._parameters["optdisp"],
- full_output = True,
- )
- elif selfA._parameters["Minimizer"] == "NCG":
- Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
- f = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- maxiter = selfA._parameters["MaximumNumberOfSteps"],
- avextol = selfA._parameters["CostDecrementTolerance"],
- disp = selfA._parameters["optdisp"],
- full_output = True,
- )
- elif selfA._parameters["Minimizer"] == "BFGS":
- Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
- f = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- maxiter = selfA._parameters["MaximumNumberOfSteps"],
- gtol = selfA._parameters["GradientNormTolerance"],
- disp = selfA._parameters["optdisp"],
- full_output = True,
- )
- else:
- raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ #--------------------------
+ #
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("APosterioriCovariance") \
+ or selfA._toStore("InnovationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _Innovation = Ynpu - _HXa
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( EMX )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = Pn
+ # ---> Pour les smoothers
+ if selfA._toStore("CurrentEnsembleState"):
+ selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
#
- IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
- MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
#
- # Correction pour pallier a un bug de TNC sur le retour du Minimum
- # ----------------------------------------------------------------
- if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
- Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
- Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
- else:
- Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
+ return 0
+
+# ==============================================================================
+def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
+ BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
+ """
+ Iterative EnKF
+ """
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
#
- # Obtention de l'analyse
- # ----------------------
- Xa = Minimum
+ # Opérateurs
+ # ----------
+ H = HO["Direct"].appliedControledFormTo
#
- selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
#
- if selfA._toStore("OMA") or \
- selfA._toStore("SigmaObs2") or \
- selfA._toStore("SimulationQuantiles") or \
- selfA._toStore("SimulatedObservationAtOptimum"):
- if selfA._toStore("SimulatedObservationAtCurrentState"):
- HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
- elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
- else:
- HXa = Hm( Xa )
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
#
- # Calcul de la covariance d'analyse
- # ---------------------------------
- if selfA._toStore("APosterioriCovariance") or \
- selfA._toStore("SimulationQuantiles") or \
- selfA._toStore("JacobianMatrixAtOptimum") or \
- selfA._toStore("KalmanGainAtOptimum"):
- HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
- HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
- if selfA._toStore("APosterioriCovariance") or \
- selfA._toStore("SimulationQuantiles") or \
- selfA._toStore("KalmanGainAtOptimum"):
- HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
- HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
- if selfA._toStore("APosterioriCovariance") or \
- selfA._toStore("SimulationQuantiles"):
+ # Nombre de pas identique au nombre de pas d'observations
+ # -------------------------------------------------------
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ else:
+ duration = 2
+ __p = numpy.array(Y).size
+ #
+ # Précalcul des inversions de B et R
+ # ----------------------------------
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
BI = B.getI()
- HessienneI = []
- nb = Xa.size
- for i in range(nb):
- _ee = numpy.matrix(numpy.zeros(nb)).T
- _ee[i] = 1.
- _HtEE = numpy.dot(HtM,_ee)
- _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
- HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
- HessienneI = numpy.matrix( HessienneI )
- A = HessienneI.I
- if min(A.shape) != max(A.shape):
- raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
- if (numpy.diag(A) < 0).any():
- raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
- if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
- try:
- L = numpy.linalg.cholesky( A )
- except:
- raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( A )
- if selfA._toStore("JacobianMatrixAtOptimum"):
- selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
- if selfA._toStore("KalmanGainAtOptimum"):
- if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
- elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
- selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+ RI = R.getI()
#
- # Calculs et/ou stockages supplémentaires
- # ---------------------------------------
- if selfA._toStore("Innovation") or \
- selfA._toStore("SigmaObs2") or \
- selfA._toStore("MahalanobisConsistency") or \
- selfA._toStore("OMB"):
- d = Y - HXb
- if selfA._toStore("Innovation"):
- selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
- if selfA._toStore("OMA"):
- selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
- if selfA._toStore("OMB"):
- selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
- if selfA._toStore("SigmaObs2"):
- TraceR = R.trace(Y.size)
- selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
- if selfA._toStore("MahalanobisConsistency"):
- selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
- if selfA._toStore("SimulationQuantiles"):
- nech = selfA._parameters["NumberOfSamplesForQuantiles"]
- HXa = numpy.matrix(numpy.ravel( HXa )).T
- YfQ = None
- for i in range(nech):
- if selfA._parameters["SimulationForQuantiles"] == "Linear":
- dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
- dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
- Yr = HXa + dYr
- elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
- Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
- Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
- if YfQ is None:
- YfQ = Yr
+ # Initialisation
+ # --------------
+ __n = Xb.size
+ __m = selfA._parameters["NumberOfMembers"]
+ if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+ else: Pn = B
+ if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
+ else: Rn = R
+ if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
+ else: Qn = Q
+ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ covarianceXa = Pn
+ #
+ previousJMinimum = numpy.finfo(float).max
+ #
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.asmatrix(numpy.ravel( U[0] )).T
else:
- YfQ = numpy.hstack((YfQ,Yr))
- YfQ.sort(axis=-1)
- YQ = None
- for quantile in selfA._parameters["Quantiles"]:
- if not (0. <= float(quantile) <= 1.): continue
- indice = int(nech * float(quantile) - 1./nech)
- if YQ is None: YQ = YfQ[:,indice]
- else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
- selfA.StoredVariables["SimulationQuantiles"].store( YQ )
- if selfA._toStore("SimulatedObservationAtBackground"):
- selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
- if selfA._toStore("SimulatedObservationAtOptimum"):
- selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ Un = None
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ #--------------------------
+ if VariantM == "IEnKF12":
+ Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
+ EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
+ __j = 0
+ Deltaw = 1
+ if not BnotT:
+ Ta = numpy.identity(__m)
+ vw = numpy.zeros(__m)
+ while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
+ vx1 = (Xfm + EaX @ vw).reshape((__n,1))
+ #
+ if BnotT:
+ E1 = vx1 + _epsilon * EaX
+ else:
+ E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
+ #
+ if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
+ E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ # --- > Par principe, M = Id
+ E2 = Xn
+ vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ vy1 = H((vx2, Un)).reshape((__p,1))
+ #
+ HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ #
+ if BnotT:
+ EaY = (HE2 - vy2) / _epsilon
+ else:
+ EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
+ #
+ GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
+ mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
+ Deltaw = - numpy.linalg.solve(mH,GradJ)
+ #
+ vw = vw + Deltaw
+ #
+ if not BnotT:
+ Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+ #
+ __j = __j + 1
+ #
+ A2 = EnsembleOfAnomalies( E2 )
+ #
+ if BnotT:
+ Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+ A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
+ #
+ Xn = vx2 + A2
+ #--------------------------
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ #--------------------------
+ #
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("APosterioriCovariance") \
+ or selfA._toStore("InnovationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _Innovation = Ynpu - _HXa
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( E2 )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( E2 - Xa )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = Pn
+ #
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
#
return 0
return 0
# ==============================================================================
-def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
+ BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
"""
- 3DVAR PSAS
+ Maximum Likelihood Ensemble Filter
"""
- #
- # Initialisations
- # ---------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
#
# Opérateurs
- Hm = HO["Direct"].appliedTo
+ # ----------
+ H = HO["Direct"].appliedControledFormTo
#
- # Utilisation éventuelle d'un vecteur H(Xb) précalculé
- if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
- HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
else:
- HXb = Hm( Xb )
- HXb = numpy.asmatrix(numpy.ravel( HXb )).T
- if Y.size != HXb.size:
- raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
- if max(Y.shape) != max(HXb.shape):
- raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
- #
- if selfA._toStore("JacobianMatrixAtBackground"):
- HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
- HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
- selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
- #
- Ht = HO["Tangent"].asMatrix(Xb)
- BHT = B * Ht.T
- HBHTpR = R + Ht * BHT
- Innovation = Y - HXb
- #
- # Point de démarrage de l'optimisation
- Xini = numpy.zeros(Xb.shape)
- #
- # Définition de la fonction-coût
- # ------------------------------
- def CostFunction(w):
- _W = numpy.asmatrix(numpy.ravel( w )).T
- if selfA._parameters["StoreInternalVariables"] or \
- selfA._toStore("CurrentState") or \
- selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
- if selfA._toStore("SimulatedObservationAtCurrentState") or \
- selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
- if selfA._toStore("InnovationAtCurrentState"):
- selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
- #
- Jb = float( 0.5 * _W.T * HBHTpR * _W )
- Jo = float( - _W.T * Innovation )
- J = Jb + Jo
- #
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
- selfA.StoredVariables["CostFunctionJb"].store( Jb )
- selfA.StoredVariables["CostFunctionJo"].store( Jo )
- selfA.StoredVariables["CostFunctionJ" ].store( J )
- if selfA._toStore("IndexOfOptimum") or \
- selfA._toStore("CurrentOptimum") or \
- selfA._toStore("CostFunctionJAtCurrentOptimum") or \
- selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
- selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
- selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
- if selfA._toStore("IndexOfOptimum"):
- selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
- if selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
- if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
- if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
- if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
- if selfA._toStore("CostFunctionJAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
- return J
- #
- def GradientOfCostFunction(w):
- _W = numpy.asmatrix(numpy.ravel( w )).T
- GradJb = HBHTpR * _W
- GradJo = - Innovation
- GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
- return GradJ
- #
- # Minimisation de la fonctionnelle
- # --------------------------------
- nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+ Cm = None
#
- if selfA._parameters["Minimizer"] == "LBFGSB":
- if "0.19" <= scipy.version.version <= "1.1.0":
- import lbfgsbhlt as optimiseur
- else:
- import scipy.optimize as optimiseur
- Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
- func = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- bounds = selfA._parameters["Bounds"],
- maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
- factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
- pgtol = selfA._parameters["ProjectedGradientTolerance"],
- iprint = selfA._parameters["optiprint"],
- )
- nfeval = Informations['funcalls']
- rc = Informations['warnflag']
- elif selfA._parameters["Minimizer"] == "TNC":
- Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
- func = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- bounds = selfA._parameters["Bounds"],
- maxfun = selfA._parameters["MaximumNumberOfSteps"],
- pgtol = selfA._parameters["ProjectedGradientTolerance"],
- ftol = selfA._parameters["CostDecrementTolerance"],
- messages = selfA._parameters["optmessages"],
- )
- elif selfA._parameters["Minimizer"] == "CG":
- Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
- f = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- maxiter = selfA._parameters["MaximumNumberOfSteps"],
- gtol = selfA._parameters["GradientNormTolerance"],
- disp = selfA._parameters["optdisp"],
- full_output = True,
- )
- elif selfA._parameters["Minimizer"] == "NCG":
- Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
- f = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- maxiter = selfA._parameters["MaximumNumberOfSteps"],
- avextol = selfA._parameters["CostDecrementTolerance"],
- disp = selfA._parameters["optdisp"],
- full_output = True,
- )
- elif selfA._parameters["Minimizer"] == "BFGS":
- Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
- f = CostFunction,
- x0 = Xini,
- fprime = GradientOfCostFunction,
- args = (),
- maxiter = selfA._parameters["MaximumNumberOfSteps"],
- gtol = selfA._parameters["GradientNormTolerance"],
- disp = selfA._parameters["optdisp"],
- full_output = True,
- )
+ # Nombre de pas identique au nombre de pas d'observations
+ # -------------------------------------------------------
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
else:
- raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+ duration = 2
+ __p = numpy.array(Y).size
#
- IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
- MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+ # Précalcul des inversions de B et R
+ # ----------------------------------
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ BI = B.getI()
+ RI = R.getI()
#
- # Correction pour pallier a un bug de TNC sur le retour du Minimum
- # ----------------------------------------------------------------
- if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
- Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
- Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
- else:
- Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
+ # Initialisation
+ # --------------
+ __n = Xb.size
+ __m = selfA._parameters["NumberOfMembers"]
+ if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+ else: Pn = B
+ if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
+ else: Rn = R
+ if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
+ else: Qn = Q
+ Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
#
- # Obtention de l'analyse
- # ----------------------
- Xa = Minimum
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ covarianceXa = Pn
#
- selfA.StoredVariables["Analysis"].store( Xa )
+ previousJMinimum = numpy.finfo(float).max
#
- if selfA._toStore("OMA") or \
- selfA._toStore("SigmaObs2") or \
- selfA._toStore("SimulationQuantiles") or \
- selfA._toStore("SimulatedObservationAtOptimum"):
- if selfA._toStore("SimulatedObservationAtCurrentState"):
- HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
- elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
else:
- HXa = Hm( Xa )
- #
- # Calcul de la covariance d'analyse
- # ---------------------------------
- if selfA._toStore("APosterioriCovariance") or \
- selfA._toStore("SimulationQuantiles") or \
- selfA._toStore("JacobianMatrixAtOptimum") or \
- selfA._toStore("KalmanGainAtOptimum"):
- HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
- HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
- if selfA._toStore("APosterioriCovariance") or \
- selfA._toStore("SimulationQuantiles") or \
- selfA._toStore("KalmanGainAtOptimum"):
- HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
- HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
- if selfA._toStore("APosterioriCovariance") or \
- selfA._toStore("SimulationQuantiles"):
- BI = B.getI()
- RI = R.getI()
- HessienneI = []
- nb = Xa.size
- for i in range(nb):
- _ee = numpy.matrix(numpy.zeros(nb)).T
- _ee[i] = 1.
- _HtEE = numpy.dot(HtM,_ee)
- _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
- HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
- HessienneI = numpy.matrix( HessienneI )
- A = HessienneI.I
- if min(A.shape) != max(A.shape):
- raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
- if (numpy.diag(A) < 0).any():
- raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
- if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
- try:
- L = numpy.linalg.cholesky( A )
- except:
- raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( A )
- if selfA._toStore("JacobianMatrixAtOptimum"):
- selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
- if selfA._toStore("KalmanGainAtOptimum"):
- if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
- elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
- selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
- #
- # Calculs et/ou stockages supplémentaires
- # ---------------------------------------
- if selfA._toStore("Innovation") or \
- selfA._toStore("SigmaObs2") or \
- selfA._toStore("MahalanobisConsistency") or \
- selfA._toStore("OMB"):
- d = Y - HXb
- if selfA._toStore("Innovation"):
- selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
- if selfA._toStore("OMA"):
- selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
- if selfA._toStore("OMB"):
- selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
- if selfA._toStore("SigmaObs2"):
- TraceR = R.trace(Y.size)
- selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
- if selfA._toStore("MahalanobisConsistency"):
- selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
- if selfA._toStore("SimulationQuantiles"):
- nech = selfA._parameters["NumberOfSamplesForQuantiles"]
- HXa = numpy.matrix(numpy.ravel( HXa )).T
- YfQ = None
- for i in range(nech):
- if selfA._parameters["SimulationForQuantiles"] == "Linear":
- dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
- dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
- Yr = HXa + dYr
- elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
- Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
- Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
- if YfQ is None:
- YfQ = Yr
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.asmatrix(numpy.ravel( U[0] )).T
else:
- YfQ = numpy.hstack((YfQ,Yr))
- YfQ.sort(axis=-1)
- YQ = None
- for quantile in selfA._parameters["Quantiles"]:
- if not (0. <= float(quantile) <= 1.): continue
- indice = int(nech * float(quantile) - 1./nech)
- if YQ is None: YQ = YfQ[:,indice]
- else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
- selfA.StoredVariables["SimulationQuantiles"].store( YQ )
- if selfA._toStore("SimulatedObservationAtBackground"):
- selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
- if selfA._toStore("SimulatedObservationAtOptimum"):
- selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ Un = None
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+ EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
+ Xn_predicted = EMX + qi
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ Xn_predicted = Xn_predicted + Cm * Un
+ elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+ # --- > Par principe, M = Id, Q = 0
+ Xn_predicted = Xn
+ #
+ #--------------------------
+ if VariantM == "MLEF13":
+ Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
+ EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
+ Ua = numpy.identity(__m)
+ __j = 0
+ Deltaw = 1
+ if not BnotT:
+ Ta = numpy.identity(__m)
+ vw = numpy.zeros(__m)
+ while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
+ vx1 = (Xfm + EaX @ vw).reshape((__n,1))
+ #
+ if BnotT:
+ E1 = vx1 + _epsilon * EaX
+ else:
+ E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
+ #
+ HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ #
+ if BnotT:
+ EaY = (HE2 - vy2) / _epsilon
+ else:
+ EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
+ #
+ GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
+ mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
+ Deltaw = - numpy.linalg.solve(mH,GradJ)
+ #
+ vw = vw + Deltaw
+ #
+ if not BnotT:
+ Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+ #
+ __j = __j + 1
+ #
+ if BnotT:
+ Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+ #
+ Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
+ #--------------------------
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ #--------------------------
+ #
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("APosterioriCovariance") \
+ or selfA._toStore("InnovationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _Innovation = Ynpu - _HXa
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( EMX )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( EMX - Xa )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = Pn
+ #
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
#
return 0
# ==============================================================================
-def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+def mmqr(
+ func = None,
+ x0 = None,
+ fprime = None,
+ bounds = None,
+ quantile = 0.5,
+ maxfun = 15000,
+ toler = 1.e-06,
+ y = None,
+ ):
"""
- 4DVAR
+ Implémentation informatique de l'algorithme MMQR, basée sur la publication :
+ David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
+ Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
"""
#
- # Initialisations
- # ---------------
+ # Recuperation des donnees et informations initiales
+ # --------------------------------------------------
+ variables = numpy.ravel( x0 )
+ mesures = numpy.ravel( y )
+ increment = sys.float_info[0]
+ p = variables.size
+ n = mesures.size
+ quantile = float(quantile)
#
- # Opérateurs
- Hm = HO["Direct"].appliedControledFormTo
- Mm = EM["Direct"].appliedControledFormTo
+ # Calcul des parametres du MM
+ # ---------------------------
+ tn = float(toler) / n
+ e0 = -tn / math.log(tn)
+ epsilon = (e0-tn)/(1+math.log(e0))
#
- if CM is not None and "Tangent" in CM and U is not None:
- Cm = CM["Tangent"].asMatrix(Xb)
- else:
- Cm = None
+ # Calculs d'initialisation
+ # ------------------------
+ residus = mesures - numpy.ravel( func( variables ) )
+ poids = 1./(epsilon+numpy.abs(residus))
+ veps = 1. - 2. * quantile - residus * poids
+ lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
+ iteration = 0
#
- def Un(_step):
- if U is not None:
- if hasattr(U,"store") and 1<=_step<len(U) :
- _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
- elif hasattr(U,"store") and len(U)==1:
- _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
- else:
- _Un = numpy.asmatrix(numpy.ravel( U )).T
- else:
- _Un = None
- return _Un
- def CmUn(_xn,_un):
- if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
- _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
- _CmUn = _Cm * _un
- else:
- _CmUn = 0.
- return _CmUn
+ # Recherche iterative
+ # -------------------
+ while (increment > toler) and (iteration < maxfun) :
+ iteration += 1
+ #
+ Derivees = numpy.array(fprime(variables))
+ Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
+ DeriveesT = Derivees.transpose()
+ M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
+ SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
+ step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
+ #
+ variables = variables + step
+ if bounds is not None:
+ # Attention : boucle infinie à éviter si un intervalle est trop petit
+ while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
+ step = step/2.
+ variables = variables - step
+ residus = mesures - numpy.ravel( func(variables) )
+ surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
+ #
+ while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
+ step = step/2.
+ variables = variables - step
+ residus = mesures - numpy.ravel( func(variables) )
+ surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
+ #
+ increment = lastsurrogate-surrogate
+ poids = 1./(epsilon+numpy.abs(residus))
+ veps = 1. - 2. * quantile - residus * poids
+ lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
#
- # Remarque : les observations sont exploitées à partir du pas de temps
- # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
- # Donc le pas 0 n'est pas utilisé puisque la première étape commence
- # avec l'observation du pas 1.
+ # Mesure d'écart
+ # --------------
+ Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
+ #
+ return variables, Ecart, [n,p,iteration,increment,0]
+
+# ==============================================================================
+def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
+ """
+ 3DVAR multi-pas et multi-méthodes
+ """
+ #
+ # Initialisation
+ # --------------
+ Xn = numpy.ravel(Xb).reshape((-1,1))
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedTo
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xn )
+ if selfA._toStore("APosterioriCovariance"):
+ if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
+ else: Pn = B
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( Xn )
#
- # Nombre de pas identique au nombre de pas d'observations
if hasattr(Y,"stepnumber"):
duration = Y.stepnumber()
else:
duration = 2
#
- # Précalcul des inversions de B et R
- BI = B.getI()
- RI = R.getI()
+ # Multi-pas
+ # ---------
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((-1,1))
+ #
+ if selfA._parameters["EstimationOf"] == "State": # Forecast
+ Xn = selfA.StoredVariables["Analysis"][-1]
+ Xn_predicted = M( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+ elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
+ # --- > Par principe, M = Id, Q = 0
+ Xn_predicted = Xn
+ Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
+ #
+ oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
+ #
+ return 0
+
+# ==============================================================================
+def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+ """
+ 3DVAR PSAS
+ """
+ #
+ # Initialisations
+ # ---------------
+ #
+ # Opérateurs
+ Hm = HO["Direct"].appliedTo
+ #
+ # Utilisation éventuelle d'un vecteur H(Xb) précalculé
+ if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+ HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
+ else:
+ HXb = Hm( Xb )
+ HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+ if Y.size != HXb.size:
+ raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+ if max(Y.shape) != max(HXb.shape):
+ raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+ #
+ if selfA._toStore("JacobianMatrixAtBackground"):
+ HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+ HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+ selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+ #
+ Ht = HO["Tangent"].asMatrix(Xb)
+ BHT = B * Ht.T
+ HBHTpR = R + Ht * BHT
+ Innovation = Y - HXb
#
# Point de démarrage de l'optimisation
- Xini = selfA._parameters["InitializationPoint"]
+ Xini = numpy.zeros(Xb.shape)
#
# Définition de la fonction-coût
# ------------------------------
- selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
- selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
- def CostFunction(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
+ def CostFunction(w):
+ _W = numpy.asmatrix(numpy.ravel( w )).T
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentState"].store( _X )
- Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
- selfA.DirectCalculation = [None,]
- selfA.DirectInnovation = [None,]
- Jo = 0.
- _Xn = _X
- for step in range(0,duration-1):
- if hasattr(Y,"store"):
- _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
- else:
- _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
- _Un = Un(step)
- #
- # Etape d'évolution
- if selfA._parameters["EstimationOf"] == "State":
- _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
- elif selfA._parameters["EstimationOf"] == "Parameters":
- pass
- #
- if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
- _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
- _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
- #
- # Etape de différence aux observations
- if selfA._parameters["EstimationOf"] == "State":
- _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
- elif selfA._parameters["EstimationOf"] == "Parameters":
- _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
- #
- # Stockage de l'état
- selfA.DirectCalculation.append( _Xn )
- selfA.DirectInnovation.append( _YmHMX )
- #
- # Ajout dans la fonctionnelle d'observation
- Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
- J = Jb + Jo
+ selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
+ if selfA._toStore("SimulatedObservationAtCurrentState") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
+ #
+ Jb = float( 0.5 * _W.T * HBHTpR * _W )
+ Jo = float( - _W.T * Innovation )
+ J = Jb + Jo
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA._toStore("CurrentOptimum") or \
selfA._toStore("CostFunctionJAtCurrentOptimum") or \
selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
- selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
if selfA._toStore("IndexOfOptimum"):
selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
if selfA._toStore("CurrentOptimum"):
selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
- if selfA._toStore("CostFunctionJAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
return J
#
- def GradientOfCostFunction(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
- GradJb = BI * (_X - Xb)
- GradJo = 0.
- for step in range(duration-1,0,-1):
- # Étape de récupération du dernier stockage de l'évolution
- _Xn = selfA.DirectCalculation.pop()
- # Étape de récupération du dernier stockage de l'innovation
- _YmHMX = selfA.DirectInnovation.pop()
- # Calcul des adjoints
- Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
- Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
- Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
- Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
- # Calcul du gradient par état adjoint
- GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
- GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
- GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
+ def GradientOfCostFunction(w):
+ _W = numpy.asmatrix(numpy.ravel( w )).T
+ GradJb = HBHTpR * _W
+ GradJo = - Innovation
+ GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
return GradJ
#
# Minimisation de la fonctionnelle
# ----------------------------------------------------------------
if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+ Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
+ else:
+ Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
#
# Obtention de l'analyse
# ----------------------
- Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+ Xa = Minimum
#
selfA.StoredVariables["Analysis"].store( Xa )
#
- # Calculs et/ou stockages supplémentaires
- # ---------------------------------------
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
- #
- return 0
-
-# ==============================================================================
-def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
- """
- Stochastic EnKF
- """
- if selfA._parameters["EstimationOf"] == "Parameters":
- selfA._parameters["StoreInternalVariables"] = True
- #
- # Opérateurs
- # ----------
- H = HO["Direct"].appliedControledFormTo
- #
- if selfA._parameters["EstimationOf"] == "State":
- M = EM["Direct"].appliedControledFormTo
- #
- if CM is not None and "Tangent" in CM and U is not None:
- Cm = CM["Tangent"].asMatrix(Xb)
- else:
- Cm = None
+ if selfA._toStore("OMA") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("SimulatedObservationAtOptimum"):
+ if selfA._toStore("SimulatedObservationAtCurrentState"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+ elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+ else:
+ HXa = Hm( Xa )
#
- # Nombre de pas identique au nombre de pas d'observations
- # -------------------------------------------------------
- if hasattr(Y,"stepnumber"):
+ # Calcul de la covariance d'analyse
+ # ---------------------------------
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("JacobianMatrixAtOptimum") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+ HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+ HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles"):
+ BI = B.getI()
+ RI = R.getI()
+ HessienneI = []
+ nb = Xa.size
+ for i in range(nb):
+ _ee = numpy.matrix(numpy.zeros(nb)).T
+ _ee[i] = 1.
+ _HtEE = numpy.dot(HtM,_ee)
+ _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
+ HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
+ HessienneI = numpy.matrix( HessienneI )
+ A = HessienneI.I
+ if min(A.shape) != max(A.shape):
+ raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+ if (numpy.diag(A) < 0).any():
+ raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+ if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+ try:
+ L = numpy.linalg.cholesky( A )
+ except:
+ raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( A )
+ if selfA._toStore("JacobianMatrixAtOptimum"):
+ selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+ if selfA._toStore("KalmanGainAtOptimum"):
+ if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+ elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+ selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+ #
+ # Calculs et/ou stockages supplémentaires
+ # ---------------------------------------
+ if selfA._toStore("Innovation") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("MahalanobisConsistency") or \
+ selfA._toStore("OMB"):
+ d = Y - HXb
+ if selfA._toStore("Innovation"):
+ selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+ if selfA._toStore("OMA"):
+ selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+ if selfA._toStore("OMB"):
+ selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+ if selfA._toStore("SigmaObs2"):
+ TraceR = R.trace(Y.size)
+ selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+ if selfA._toStore("MahalanobisConsistency"):
+ selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
+ if selfA._toStore("SimulationQuantiles"):
+ nech = selfA._parameters["NumberOfSamplesForQuantiles"]
+ HXa = numpy.matrix(numpy.ravel( HXa )).T
+ YfQ = None
+ for i in range(nech):
+ if selfA._parameters["SimulationForQuantiles"] == "Linear":
+ dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
+ dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
+ Yr = HXa + dYr
+ elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
+ Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
+ Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
+ if YfQ is None:
+ YfQ = Yr
+ else:
+ YfQ = numpy.hstack((YfQ,Yr))
+ YfQ.sort(axis=-1)
+ YQ = None
+ for quantile in selfA._parameters["Quantiles"]:
+ if not (0. <= float(quantile) <= 1.): continue
+ indice = int(nech * float(quantile) - 1./nech)
+ if YQ is None: YQ = YfQ[:,indice]
+ else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
+ selfA.StoredVariables["SimulationQuantiles"].store( YQ )
+ if selfA._toStore("SimulatedObservationAtBackground"):
+ selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ if selfA._toStore("SimulatedObservationAtOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ #
+ return 0
+
+# ==============================================================================
+def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
+ """
+ Stochastic EnKF
+ """
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
+ #
+ # Opérateurs
+ # ----------
+ H = HO["Direct"].appliedControledFormTo
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ # Nombre de pas identique au nombre de pas d'observations
+ # -------------------------------------------------------
+ if hasattr(Y,"stepnumber"):
duration = Y.stepnumber()
__p = numpy.cumprod(Y.shape())[-1]
else:
return 0
# ==============================================================================
-def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
+def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
"""
- Ensemble-Transform EnKF
+ 3DVAR
"""
- if selfA._parameters["EstimationOf"] == "Parameters":
- selfA._parameters["StoreInternalVariables"] = True
#
- # Opérateurs
- # ----------
- H = HO["Direct"].appliedControledFormTo
+ # Initialisations
+ # ---------------
#
- if selfA._parameters["EstimationOf"] == "State":
- M = EM["Direct"].appliedControledFormTo
+ # Opérateurs
+ Hm = HO["Direct"].appliedTo
+ Ha = HO["Adjoint"].appliedInXTo
#
- if CM is not None and "Tangent" in CM and U is not None:
- Cm = CM["Tangent"].asMatrix(Xb)
+ # Utilisation éventuelle d'un vecteur H(Xb) précalculé
+ if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+ HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
else:
- Cm = None
+ HXb = Hm( Xb )
+ HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+ if Y.size != HXb.size:
+ raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+ if max(Y.shape) != max(HXb.shape):
+ raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
#
- # Nombre de pas identique au nombre de pas d'observations
- # -------------------------------------------------------
- if hasattr(Y,"stepnumber"):
- duration = Y.stepnumber()
- __p = numpy.cumprod(Y.shape())[-1]
- else:
- duration = 2
- __p = numpy.array(Y).size
+ if selfA._toStore("JacobianMatrixAtBackground"):
+ HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+ HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+ selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
#
# Précalcul des inversions de B et R
- # ----------------------------------
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("APosterioriCovariance"):
- BI = B.getI()
- RI = R.getI()
- elif VariantM != "KalmanFilterFormula":
- RI = R.getI()
- if VariantM == "KalmanFilterFormula":
- RIdemi = R.sqrtmI()
- #
- # Initialisation
- # --------------
- __n = Xb.size
- __m = selfA._parameters["NumberOfMembers"]
- if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
- else: Pn = B
- if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
- else: Rn = R
- if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
- else: Qn = Q
- Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
- #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
- #
- if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
- selfA.StoredVariables["Analysis"].store( Xb )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( Pn )
- covarianceXa = Pn
+ BI = B.getI()
+ RI = R.getI()
#
- previousJMinimum = numpy.finfo(float).max
+ # Point de démarrage de l'optimisation
+ Xini = selfA._parameters["InitializationPoint"]
#
- for step in range(duration-1):
- if hasattr(Y,"store"):
- Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
- else:
- Ynpu = numpy.ravel( Y ).reshape((__p,1))
- #
- if U is not None:
- if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
- elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
- else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
- else:
- Un = None
- #
- if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
- Xn = CovarianceInflation( Xn,
- selfA._parameters["InflationType"],
- selfA._parameters["InflationFactor"],
- )
+ # Définition de la fonction-coût
+ # ------------------------------
+ def CostFunction(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ if selfA._parameters["StoreInternalVariables"] or \
+ selfA._toStore("CurrentState") or \
+ selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentState"].store( _X )
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _Innovation = Y - _HX
+ if selfA._toStore("SimulatedObservationAtCurrentState") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
#
- if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
- EMX = M( [(Xn[:,i], Un) for i in range(__m)],
- argsAsSerie = True,
- returnSerieAsArrayMatrix = True )
- qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
- Xn_predicted = EMX + qi
- HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
- argsAsSerie = True,
- returnSerieAsArrayMatrix = True )
- if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
- Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
- elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
- # --- > Par principe, M = Id, Q = 0
- Xn_predicted = Xn
- HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
- argsAsSerie = True,
- returnSerieAsArrayMatrix = True )
+ Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
#
- # Mean of forecast and observation of forecast
- Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
- Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
- #
- # Anomalies
- EaX = EnsembleOfAnomalies( Xn_predicted )
- EaHX = numpy.array(HX_predicted - Hfm)
- #
- #--------------------------
- if VariantM == "KalmanFilterFormula":
- mS = RIdemi * EaHX / math.sqrt(__m-1)
- delta = RIdemi * ( Ynpu - Hfm )
- mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
- vw = mT @ mS.T @ delta
- #
- Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
- mU = numpy.identity(__m)
- #
- EaX = EaX / math.sqrt(__m-1)
- Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
- #--------------------------
- elif VariantM == "Variational":
- HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
- def CostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
- _Jo = 0.5 * _A.T @ (RI * _A)
- _Jb = 0.5 * (__m-1) * w.T @ w
- _J = _Jo + _Jb
- return float(_J)
- def GradientOfCostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
- _GardJo = - EaHX.T @ (RI * _A)
- _GradJb = (__m-1) * w.reshape((__m,1))
- _GradJ = _GardJo + _GradJb
- return numpy.ravel(_GradJ)
- vw = scipy.optimize.fmin_cg(
- f = CostFunction,
- x0 = numpy.zeros(__m),
- fprime = GradientOfCostFunction,
- args = (),
- disp = False,
- )
- #
- Hto = EaHX.T @ (RI * EaHX)
- Htb = (__m-1) * numpy.identity(__m)
- Hta = Hto + Htb
- #
- Pta = numpy.linalg.inv( Hta )
- EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
- #
- Xn = Xfm + EaX @ (vw[:,None] + EWa)
- #--------------------------
- elif VariantM == "FiniteSize11": # Jauge Boc2011
- HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
- def CostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
- _Jo = 0.5 * _A.T @ (RI * _A)
- _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
- _J = _Jo + _Jb
- return float(_J)
- def GradientOfCostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
- _GardJo = - EaHX.T @ (RI * _A)
- _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
- _GradJ = _GardJo + _GradJb
- return numpy.ravel(_GradJ)
- vw = scipy.optimize.fmin_cg(
- f = CostFunction,
- x0 = numpy.zeros(__m),
- fprime = GradientOfCostFunction,
- args = (),
- disp = False,
- )
- #
- Hto = EaHX.T @ (RI * EaHX)
- Htb = __m * \
- ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
- / (1 + 1/__m + vw.T @ vw)**2
- Hta = Hto + Htb
- #
- Pta = numpy.linalg.inv( Hta )
- EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
- #
- Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
- #--------------------------
- elif VariantM == "FiniteSize15": # Jauge Boc2015
- HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
- def CostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
- _Jo = 0.5 * _A.T * RI * _A
- _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
- _J = _Jo + _Jb
- return float(_J)
- def GradientOfCostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
- _GardJo = - EaHX.T @ (RI * _A)
- _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
- _GradJ = _GardJo + _GradJb
- return numpy.ravel(_GradJ)
- vw = scipy.optimize.fmin_cg(
- f = CostFunction,
- x0 = numpy.zeros(__m),
- fprime = GradientOfCostFunction,
- args = (),
- disp = False,
- )
- #
- Hto = EaHX.T @ (RI * EaHX)
- Htb = (__m+1) * \
- ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
- / (1 + 1/__m + vw.T @ vw)**2
- Hta = Hto + Htb
- #
- Pta = numpy.linalg.inv( Hta )
- EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
- #
- Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
- #--------------------------
- elif VariantM == "FiniteSize16": # Jauge Boc2016
- HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
- def CostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
- _Jo = 0.5 * _A.T @ (RI * _A)
- _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
- _J = _Jo + _Jb
- return float(_J)
- def GradientOfCostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
- _GardJo = - EaHX.T @ (RI * _A)
- _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
- _GradJ = _GardJo + _GradJb
- return numpy.ravel(_GradJ)
- vw = scipy.optimize.fmin_cg(
- f = CostFunction,
- x0 = numpy.zeros(__m),
- fprime = GradientOfCostFunction,
- args = (),
- disp = False,
- )
- #
- Hto = EaHX.T @ (RI * EaHX)
- Htb = ((__m+1) / (__m-1)) * \
- ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
- / (1 + 1/__m + vw.T @ vw / (__m-1))**2
- Hta = Hto + Htb
- #
- Pta = numpy.linalg.inv( Hta )
- EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
- #
- Xn = Xfm + EaX @ (vw[:,None] + EWa)
- #--------------------------
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ if selfA._toStore("IndexOfOptimum") or \
+ selfA._toStore("CurrentOptimum") or \
+ selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ return J
+ #
+ def GradientOfCostFunction(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ GradJb = BI * (_X - Xb)
+ GradJo = - Ha( (_X, RI * (Y - _HX)) )
+ GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+ return GradJ
+ #
+ # Minimisation de la fonctionnelle
+ # --------------------------------
+ nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+ #
+ if selfA._parameters["Minimizer"] == "LBFGSB":
+ if "0.19" <= scipy.version.version <= "1.1.0":
+ import lbfgsbhlt as optimiseur
else:
- raise ValueError("VariantM has to be chosen in the authorized methods list.")
- #
- if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
- Xn = CovarianceInflation( Xn,
- selfA._parameters["InflationType"],
- selfA._parameters["InflationFactor"],
- )
- #
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
- #--------------------------
- #
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("APosterioriCovariance") \
- or selfA._toStore("InnovationAtCurrentAnalysis") \
- or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
- _Innovation = Ynpu - _HXa
- #
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- # ---> avec analysis
- selfA.StoredVariables["Analysis"].store( Xa )
- if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
- selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
- if selfA._toStore("InnovationAtCurrentAnalysis"):
- selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
- # ---> avec current state
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CurrentState"):
- selfA.StoredVariables["CurrentState"].store( Xn )
- if selfA._toStore("ForecastState"):
- selfA.StoredVariables["ForecastState"].store( EMX )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
- if selfA._toStore("InnovationAtCurrentState"):
- selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
- if selfA._toStore("SimulatedObservationAtCurrentState") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
- # ---> autres
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
- J = Jb + Jo
- selfA.StoredVariables["CostFunctionJb"].store( Jb )
- selfA.StoredVariables["CostFunctionJo"].store( Jo )
- selfA.StoredVariables["CostFunctionJ" ].store( J )
- #
- if selfA._toStore("IndexOfOptimum") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("CostFunctionJAtCurrentOptimum") \
- or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
- or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
- if selfA._toStore("IndexOfOptimum"):
- selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
- if selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
- if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
- if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
- if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
- if selfA._toStore("CostFunctionJAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
- if selfA._parameters["EstimationOf"] == "Parameters" \
- and J < previousJMinimum:
- previousJMinimum = J
- XaMin = Xa
- if selfA._toStore("APosterioriCovariance"):
- covarianceXaMin = Pn
- # ---> Pour les smoothers
- if selfA._toStore("CurrentEnsembleState"):
- selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
- if selfA._toStore("LastEnsembleForecastState"):
- selfA.StoredVariables["LastEnsembleForecastState"].store( EMX )
+ import scipy.optimize as optimiseur
+ Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
+ factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ iprint = selfA._parameters["optiprint"],
+ )
+ nfeval = Informations['funcalls']
+ rc = Informations['warnflag']
+ elif selfA._parameters["Minimizer"] == "TNC":
+ Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"],
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ ftol = selfA._parameters["CostDecrementTolerance"],
+ messages = selfA._parameters["optmessages"],
+ )
+ elif selfA._parameters["Minimizer"] == "CG":
+ Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "NCG":
+ Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ avextol = selfA._parameters["CostDecrementTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "BFGS":
+ Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ else:
+ raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
#
- # Stockage final supplémentaire de l'optimum en estimation de paramètres
- # ----------------------------------------------------------------------
- if selfA._parameters["EstimationOf"] == "Parameters":
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- selfA.StoredVariables["Analysis"].store( XaMin )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+ #
+ # Correction pour pallier a un bug de TNC sur le retour du Minimum
+ # ----------------------------------------------------------------
+ if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+ Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+ #
+ # Obtention de l'analyse
+ # ----------------------
+ Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+ #
+ selfA.StoredVariables["Analysis"].store( Xa )
+ #
+ if selfA._toStore("OMA") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("SimulatedObservationAtOptimum"):
+ if selfA._toStore("SimulatedObservationAtCurrentState"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+ elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+ else:
+ HXa = Hm( Xa )
+ #
+ # Calcul de la covariance d'analyse
+ # ---------------------------------
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("JacobianMatrixAtOptimum") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+ HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+ HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles"):
+ HessienneI = []
+ nb = Xa.size
+ for i in range(nb):
+ _ee = numpy.matrix(numpy.zeros(nb)).T
+ _ee[i] = 1.
+ _HtEE = numpy.dot(HtM,_ee)
+ _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
+ HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
+ HessienneI = numpy.matrix( HessienneI )
+ A = HessienneI.I
+ if min(A.shape) != max(A.shape):
+ raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+ if (numpy.diag(A) < 0).any():
+ raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+ if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+ try:
+ L = numpy.linalg.cholesky( A )
+ except:
+ raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( A )
+ if selfA._toStore("JacobianMatrixAtOptimum"):
+ selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+ if selfA._toStore("KalmanGainAtOptimum"):
+ if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+ elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+ selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+ #
+ # Calculs et/ou stockages supplémentaires
+ # ---------------------------------------
+ if selfA._toStore("Innovation") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("MahalanobisConsistency") or \
+ selfA._toStore("OMB"):
+ d = Y - HXb
+ if selfA._toStore("Innovation"):
+ selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+ if selfA._toStore("OMA"):
+ selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+ if selfA._toStore("OMB"):
+ selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+ if selfA._toStore("SigmaObs2"):
+ TraceR = R.trace(Y.size)
+ selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+ if selfA._toStore("MahalanobisConsistency"):
+ selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
+ if selfA._toStore("SimulationQuantiles"):
+ nech = selfA._parameters["NumberOfSamplesForQuantiles"]
+ HXa = numpy.matrix(numpy.ravel( HXa )).T
+ YfQ = None
+ for i in range(nech):
+ if selfA._parameters["SimulationForQuantiles"] == "Linear":
+ dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
+ dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
+ Yr = HXa + dYr
+ elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
+ Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
+ Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
+ if YfQ is None:
+ YfQ = Yr
+ else:
+ YfQ = numpy.hstack((YfQ,Yr))
+ YfQ.sort(axis=-1)
+ YQ = None
+ for quantile in selfA._parameters["Quantiles"]:
+ if not (0. <= float(quantile) <= 1.): continue
+ indice = int(nech * float(quantile) - 1./nech)
+ if YQ is None: YQ = YfQ[:,indice]
+ else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
+ selfA.StoredVariables["SimulationQuantiles"].store( YQ )
+ if selfA._toStore("SimulatedObservationAtBackground"):
+ selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ if selfA._toStore("SimulatedObservationAtOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
#
return 0
# ==============================================================================
-def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
- BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
+def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
"""
- Maximum Likelihood Ensemble Filter
+ 4DVAR
"""
- if selfA._parameters["EstimationOf"] == "Parameters":
- selfA._parameters["StoreInternalVariables"] = True
#
- # Opérateurs
- # ----------
- H = HO["Direct"].appliedControledFormTo
+ # Initialisations
+ # ---------------
#
- if selfA._parameters["EstimationOf"] == "State":
- M = EM["Direct"].appliedControledFormTo
+ # Opérateurs
+ Hm = HO["Direct"].appliedControledFormTo
+ Mm = EM["Direct"].appliedControledFormTo
#
if CM is not None and "Tangent" in CM and U is not None:
Cm = CM["Tangent"].asMatrix(Xb)
else:
Cm = None
#
+ def Un(_step):
+ if U is not None:
+ if hasattr(U,"store") and 1<=_step<len(U) :
+ _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ else:
+ _Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ _Un = None
+ return _Un
+ def CmUn(_xn,_un):
+ if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
+ _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
+ _CmUn = _Cm * _un
+ else:
+ _CmUn = 0.
+ return _CmUn
+ #
+ # Remarque : les observations sont exploitées à partir du pas de temps
+ # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
+ # Donc le pas 0 n'est pas utilisé puisque la première étape commence
+ # avec l'observation du pas 1.
+ #
# Nombre de pas identique au nombre de pas d'observations
- # -------------------------------------------------------
if hasattr(Y,"stepnumber"):
duration = Y.stepnumber()
- __p = numpy.cumprod(Y.shape())[-1]
else:
duration = 2
- __p = numpy.array(Y).size
#
# Précalcul des inversions de B et R
- # ----------------------------------
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("APosterioriCovariance"):
- BI = B.getI()
+ BI = B.getI()
RI = R.getI()
#
- # Initialisation
- # --------------
- __n = Xb.size
- __m = selfA._parameters["NumberOfMembers"]
- if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
- else: Pn = B
- if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
- else: Rn = R
- if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
- else: Qn = Q
- Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+ # Point de démarrage de l'optimisation
+ Xini = selfA._parameters["InitializationPoint"]
+ #
+ # Définition de la fonction-coût
+ # ------------------------------
+ selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
+ selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
+ def CostFunction(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ if selfA._parameters["StoreInternalVariables"] or \
+ selfA._toStore("CurrentState") or \
+ selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentState"].store( _X )
+ Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
+ selfA.DirectCalculation = [None,]
+ selfA.DirectInnovation = [None,]
+ Jo = 0.
+ _Xn = _X
+ for step in range(0,duration-1):
+ if hasattr(Y,"store"):
+ _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
+ else:
+ _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
+ _Un = Un(step)
+ #
+ # Etape d'évolution
+ if selfA._parameters["EstimationOf"] == "State":
+ _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ pass
+ #
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
+ _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
+ #
+ # Etape de différence aux observations
+ if selfA._parameters["EstimationOf"] == "State":
+ _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
+ #
+ # Stockage de l'état
+ selfA.DirectCalculation.append( _Xn )
+ selfA.DirectInnovation.append( _YmHMX )
+ #
+ # Ajout dans la fonctionnelle d'observation
+ Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
+ J = Jb + Jo
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ if selfA._toStore("IndexOfOptimum") or \
+ selfA._toStore("CurrentOptimum") or \
+ selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ return J
#
- if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
- selfA.StoredVariables["Analysis"].store( Xb )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( Pn )
- covarianceXa = Pn
+ def GradientOfCostFunction(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ GradJb = BI * (_X - Xb)
+ GradJo = 0.
+ for step in range(duration-1,0,-1):
+ # Étape de récupération du dernier stockage de l'évolution
+ _Xn = selfA.DirectCalculation.pop()
+ # Étape de récupération du dernier stockage de l'innovation
+ _YmHMX = selfA.DirectInnovation.pop()
+ # Calcul des adjoints
+ Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
+ Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
+ Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
+ Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
+ # Calcul du gradient par état adjoint
+ GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
+ GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
+ GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
+ return GradJ
#
- previousJMinimum = numpy.finfo(float).max
+ # Minimisation de la fonctionnelle
+ # --------------------------------
+ nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
#
- for step in range(duration-1):
- if hasattr(Y,"store"):
- Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
- else:
- Ynpu = numpy.ravel( Y ).reshape((__p,1))
- #
- if U is not None:
- if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
- elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
- else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
- else:
- Un = None
- #
- if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
- Xn = CovarianceInflation( Xn,
- selfA._parameters["InflationType"],
- selfA._parameters["InflationFactor"],
- )
- #
- if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
- EMX = M( [(Xn[:,i], Un) for i in range(__m)],
- argsAsSerie = True,
- returnSerieAsArrayMatrix = True )
- qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
- Xn_predicted = EMX + qi
- if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
- Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
- elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
- # --- > Par principe, M = Id, Q = 0
- Xn_predicted = Xn
- #
- #--------------------------
- if VariantM == "MLEF13":
- Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
- EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
- Ua = numpy.identity(__m)
- __j = 0
- Deltaw = 1
- if not BnotT:
- Ta = numpy.identity(__m)
- vw = numpy.zeros(__m)
- while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
- vx1 = (Xfm + EaX @ vw).reshape((__n,1))
- #
- if BnotT:
- E1 = vx1 + _epsilon * EaX
- else:
- E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
- #
- HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
- argsAsSerie = True,
- returnSerieAsArrayMatrix = True )
- vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
- #
- if BnotT:
- EaY = (HE2 - vy2) / _epsilon
- else:
- EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
- #
- GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
- mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
- Deltaw = - numpy.linalg.solve(mH,GradJ)
- #
- vw = vw + Deltaw
- #
- if not BnotT:
- Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
- #
- __j = __j + 1
- #
- if BnotT:
- Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
- #
- Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
- #--------------------------
+ if selfA._parameters["Minimizer"] == "LBFGSB":
+ if "0.19" <= scipy.version.version <= "1.1.0":
+ import lbfgsbhlt as optimiseur
else:
- raise ValueError("VariantM has to be chosen in the authorized methods list.")
- #
- if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
- Xn = CovarianceInflation( Xn,
- selfA._parameters["InflationType"],
- selfA._parameters["InflationFactor"],
- )
- #
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
- #--------------------------
- #
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("APosterioriCovariance") \
- or selfA._toStore("InnovationAtCurrentAnalysis") \
- or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
- _Innovation = Ynpu - _HXa
- #
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- # ---> avec analysis
- selfA.StoredVariables["Analysis"].store( Xa )
- if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
- selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
- if selfA._toStore("InnovationAtCurrentAnalysis"):
- selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
- # ---> avec current state
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CurrentState"):
- selfA.StoredVariables["CurrentState"].store( Xn )
- if selfA._toStore("ForecastState"):
- selfA.StoredVariables["ForecastState"].store( EMX )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( EMX - Xa )
- if selfA._toStore("InnovationAtCurrentState"):
- selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
- if selfA._toStore("SimulatedObservationAtCurrentState") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
- # ---> autres
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
- J = Jb + Jo
- selfA.StoredVariables["CostFunctionJb"].store( Jb )
- selfA.StoredVariables["CostFunctionJo"].store( Jo )
- selfA.StoredVariables["CostFunctionJ" ].store( J )
- #
- if selfA._toStore("IndexOfOptimum") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("CostFunctionJAtCurrentOptimum") \
- or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
- or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
- if selfA._toStore("IndexOfOptimum"):
- selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
- if selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
- if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
- if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
- if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
- if selfA._toStore("CostFunctionJAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
- if selfA._parameters["EstimationOf"] == "Parameters" \
- and J < previousJMinimum:
- previousJMinimum = J
- XaMin = Xa
- if selfA._toStore("APosterioriCovariance"):
- covarianceXaMin = Pn
+ import scipy.optimize as optimiseur
+ Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
+ factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ iprint = selfA._parameters["optiprint"],
+ )
+ nfeval = Informations['funcalls']
+ rc = Informations['warnflag']
+ elif selfA._parameters["Minimizer"] == "TNC":
+ Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"],
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ ftol = selfA._parameters["CostDecrementTolerance"],
+ messages = selfA._parameters["optmessages"],
+ )
+ elif selfA._parameters["Minimizer"] == "CG":
+ Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "NCG":
+ Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ avextol = selfA._parameters["CostDecrementTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "BFGS":
+ Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ else:
+ raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
#
- # Stockage final supplémentaire de l'optimum en estimation de paramètres
- # ----------------------------------------------------------------------
- if selfA._parameters["EstimationOf"] == "Parameters":
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- selfA.StoredVariables["Analysis"].store( XaMin )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+ #
+ # Correction pour pallier a un bug de TNC sur le retour du Minimum
+ # ----------------------------------------------------------------
+ if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+ Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+ #
+ # Obtention de l'analyse
+ # ----------------------
+ Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+ #
+ selfA.StoredVariables["Analysis"].store( Xa )
+ #
+ # Calculs et/ou stockages supplémentaires
+ # ---------------------------------------
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
#
return 0
# ==============================================================================
-def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
- BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
+def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
"""
- Iterative EnKF
+ 3DVAR variational analysis with no inversion of B
"""
- if selfA._parameters["EstimationOf"] == "Parameters":
- selfA._parameters["StoreInternalVariables"] = True
- #
- # Opérateurs
- # ----------
- H = HO["Direct"].appliedControledFormTo
- #
- if selfA._parameters["EstimationOf"] == "State":
- M = EM["Direct"].appliedControledFormTo
#
- if CM is not None and "Tangent" in CM and U is not None:
- Cm = CM["Tangent"].asMatrix(Xb)
- else:
- Cm = None
+ # Initialisations
+ # ---------------
#
- # Nombre de pas identique au nombre de pas d'observations
- # -------------------------------------------------------
- if hasattr(Y,"stepnumber"):
- duration = Y.stepnumber()
- __p = numpy.cumprod(Y.shape())[-1]
- else:
- duration = 2
- __p = numpy.array(Y).size
+ # Opérateurs
+ Hm = HO["Direct"].appliedTo
+ Ha = HO["Adjoint"].appliedInXTo
#
# Précalcul des inversions de B et R
- # ----------------------------------
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("APosterioriCovariance"):
- BI = B.getI()
+ BT = B.getT()
RI = R.getI()
#
- # Initialisation
- # --------------
- __n = Xb.size
- __m = selfA._parameters["NumberOfMembers"]
- if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
- else: Pn = B
- if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
- else: Rn = R
- if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
- else: Qn = Q
- Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
+ # Point de démarrage de l'optimisation
+ Xini = numpy.zeros(Xb.shape)
#
- if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
- selfA.StoredVariables["Analysis"].store( Xb )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( Pn )
- covarianceXa = Pn
+ # Définition de la fonction-coût
+ # ------------------------------
+ def CostFunction(v):
+ _V = numpy.asmatrix(numpy.ravel( v )).T
+ _X = Xb + B * _V
+ if selfA._parameters["StoreInternalVariables"] or \
+ selfA._toStore("CurrentState") or \
+ selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentState"].store( _X )
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _Innovation = Y - _HX
+ if selfA._toStore("SimulatedObservationAtCurrentState") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+ #
+ Jb = float( 0.5 * _V.T * BT * _V )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ if selfA._toStore("IndexOfOptimum") or \
+ selfA._toStore("CurrentOptimum") or \
+ selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ return J
#
- previousJMinimum = numpy.finfo(float).max
+ def GradientOfCostFunction(v):
+ _V = numpy.asmatrix(numpy.ravel( v )).T
+ _X = Xb + B * _V
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ GradJb = BT * _V
+ GradJo = - Ha( (_X, RI * (Y - _HX)) )
+ GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+ return GradJ
#
- for step in range(duration-1):
- if hasattr(Y,"store"):
- Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
- else:
- Ynpu = numpy.ravel( Y ).reshape((__p,1))
- #
- if U is not None:
- if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
- elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
- else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ # Minimisation de la fonctionnelle
+ # --------------------------------
+ nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+ #
+ if selfA._parameters["Minimizer"] == "LBFGSB":
+ if "0.19" <= scipy.version.version <= "1.1.0":
+ import lbfgsbhlt as optimiseur
else:
- Un = None
- #
- if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
- Xn = CovarianceInflation( Xn,
- selfA._parameters["InflationType"],
- selfA._parameters["InflationFactor"],
- )
- #
- #--------------------------
- if VariantM == "IEnKF12":
- Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
- EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
- __j = 0
- Deltaw = 1
- if not BnotT:
- Ta = numpy.identity(__m)
- vw = numpy.zeros(__m)
- while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
- vx1 = (Xfm + EaX @ vw).reshape((__n,1))
- #
- if BnotT:
- E1 = vx1 + _epsilon * EaX
- else:
- E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
- #
- if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
- E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
- argsAsSerie = True,
- returnSerieAsArrayMatrix = True )
- elif selfA._parameters["EstimationOf"] == "Parameters":
- # --- > Par principe, M = Id
- E2 = Xn
- vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
- vy1 = H((vx2, Un)).reshape((__p,1))
- #
- HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
- argsAsSerie = True,
- returnSerieAsArrayMatrix = True )
- vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
- #
- if BnotT:
- EaY = (HE2 - vy2) / _epsilon
- else:
- EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
- #
- GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
- mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
- Deltaw = - numpy.linalg.solve(mH,GradJ)
- #
- vw = vw + Deltaw
- #
- if not BnotT:
- Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
- #
- __j = __j + 1
- #
- A2 = EnsembleOfAnomalies( E2 )
- #
- if BnotT:
- Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
- A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
- #
- Xn = vx2 + A2
- #--------------------------
+ import scipy.optimize as optimiseur
+ Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
+ factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ iprint = selfA._parameters["optiprint"],
+ )
+ nfeval = Informations['funcalls']
+ rc = Informations['warnflag']
+ elif selfA._parameters["Minimizer"] == "TNC":
+ Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"],
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ ftol = selfA._parameters["CostDecrementTolerance"],
+ messages = selfA._parameters["optmessages"],
+ )
+ elif selfA._parameters["Minimizer"] == "CG":
+ Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "NCG":
+ Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ avextol = selfA._parameters["CostDecrementTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "BFGS":
+ Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ else:
+ raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+ #
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+ #
+ # Correction pour pallier a un bug de TNC sur le retour du Minimum
+ # ----------------------------------------------------------------
+ if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+ Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+ Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
+ else:
+ Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
+ #
+ # Obtention de l'analyse
+ # ----------------------
+ Xa = Minimum
+ #
+ selfA.StoredVariables["Analysis"].store( Xa )
+ #
+ if selfA._toStore("OMA") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("SimulatedObservationAtOptimum"):
+ if selfA._toStore("SimulatedObservationAtCurrentState"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+ elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
else:
- raise ValueError("VariantM has to be chosen in the authorized methods list.")
- #
- if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
- Xn = CovarianceInflation( Xn,
- selfA._parameters["InflationType"],
- selfA._parameters["InflationFactor"],
- )
- #
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
- #--------------------------
- #
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("APosterioriCovariance") \
- or selfA._toStore("InnovationAtCurrentAnalysis") \
- or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
- _Innovation = Ynpu - _HXa
- #
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- # ---> avec analysis
- selfA.StoredVariables["Analysis"].store( Xa )
- if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
- selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
- if selfA._toStore("InnovationAtCurrentAnalysis"):
- selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
- # ---> avec current state
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CurrentState"):
- selfA.StoredVariables["CurrentState"].store( Xn )
- if selfA._toStore("ForecastState"):
- selfA.StoredVariables["ForecastState"].store( E2 )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( E2 - Xa )
- if selfA._toStore("InnovationAtCurrentState"):
- selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
- if selfA._toStore("SimulatedObservationAtCurrentState") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
- # ---> autres
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
- J = Jb + Jo
- selfA.StoredVariables["CostFunctionJb"].store( Jb )
- selfA.StoredVariables["CostFunctionJo"].store( Jo )
- selfA.StoredVariables["CostFunctionJ" ].store( J )
- #
- if selfA._toStore("IndexOfOptimum") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("CostFunctionJAtCurrentOptimum") \
- or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
- or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
- if selfA._toStore("IndexOfOptimum"):
- selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
- if selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
- if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
- if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
- if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
- if selfA._toStore("CostFunctionJAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
- if selfA._parameters["EstimationOf"] == "Parameters" \
- and J < previousJMinimum:
- previousJMinimum = J
- XaMin = Xa
- if selfA._toStore("APosterioriCovariance"):
- covarianceXaMin = Pn
+ HXa = Hm( Xa )
#
- # Stockage final supplémentaire de l'optimum en estimation de paramètres
- # ----------------------------------------------------------------------
- if selfA._parameters["EstimationOf"] == "Parameters":
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- selfA.StoredVariables["Analysis"].store( XaMin )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ # Calcul de la covariance d'analyse
+ # ---------------------------------
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("JacobianMatrixAtOptimum") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+ HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+ HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles"):
+ BI = B.getI()
+ HessienneI = []
+ nb = Xa.size
+ for i in range(nb):
+ _ee = numpy.matrix(numpy.zeros(nb)).T
+ _ee[i] = 1.
+ _HtEE = numpy.dot(HtM,_ee)
+ _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
+ HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
+ HessienneI = numpy.matrix( HessienneI )
+ A = HessienneI.I
+ if min(A.shape) != max(A.shape):
+ raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+ if (numpy.diag(A) < 0).any():
+ raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+ if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+ try:
+ L = numpy.linalg.cholesky( A )
+ except:
+ raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( A )
+ if selfA._toStore("JacobianMatrixAtOptimum"):
+ selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+ if selfA._toStore("KalmanGainAtOptimum"):
+ if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+ elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+ selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+ #
+ # Calculs et/ou stockages supplémentaires
+ # ---------------------------------------
+ if selfA._toStore("Innovation") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("MahalanobisConsistency") or \
+ selfA._toStore("OMB"):
+ d = Y - HXb
+ if selfA._toStore("Innovation"):
+ selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+ if selfA._toStore("OMA"):
+ selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+ if selfA._toStore("OMB"):
+ selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+ if selfA._toStore("SigmaObs2"):
+ TraceR = R.trace(Y.size)
+ selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+ if selfA._toStore("MahalanobisConsistency"):
+ selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
+ if selfA._toStore("SimulationQuantiles"):
+ nech = selfA._parameters["NumberOfSamplesForQuantiles"]
+ HXa = numpy.matrix(numpy.ravel( HXa )).T
+ YfQ = None
+ for i in range(nech):
+ if selfA._parameters["SimulationForQuantiles"] == "Linear":
+ dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
+ dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
+ Yr = HXa + dYr
+ elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
+ Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
+ Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
+ if YfQ is None:
+ YfQ = Yr
+ else:
+ YfQ = numpy.hstack((YfQ,Yr))
+ YfQ.sort(axis=-1)
+ YQ = None
+ for quantile in selfA._parameters["Quantiles"]:
+ if not (0. <= float(quantile) <= 1.): continue
+ indice = int(nech * float(quantile) - 1./nech)
+ if YQ is None: YQ = YfQ[:,indice]
+ else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
+ selfA.StoredVariables["SimulationQuantiles"].store( YQ )
+ if selfA._toStore("SimulatedObservationAtBackground"):
+ selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ if selfA._toStore("SimulatedObservationAtOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
#
return 0