# -*- coding: utf-8 -*-
#
-# Copyright (C) 2008-2020 EDF R&D
+# Copyright (C) 2008-2021 EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
__doc__ = """
- Définit les versions approximées des opérateurs tangents et adjoints.
+ Définit les objets numériques génériques.
"""
__author__ = "Jean-Philippe ARGAUD"
import os, time, copy, types, sys, logging
-import math, numpy, scipy
+import math, numpy, scipy, scipy.optimize, scipy.version
from daCore.BasicObjects import Operator
from daCore.PlatformInfo import PlatformInfo
mpr = PlatformInfo().MachinePrecision()
# logging.getLogger().setLevel(logging.DEBUG)
# ==============================================================================
-def ExecuteFunction( paire ):
- assert len(paire) == 2, "Incorrect number of arguments"
- X, funcrepr = paire
+def ExecuteFunction( triplet ):
+ assert len(triplet) == 3, "Incorrect number of arguments"
+ X, xArgs, funcrepr = triplet
__X = numpy.asmatrix(numpy.ravel( X )).T
__sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
__module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
__fonction = getattr(__module,funcrepr["__userFunction__name"])
sys.path = __sys_path_tmp ; del __sys_path_tmp
- __HX = __fonction( __X )
+ if isinstance(xArgs, dict):
+ __HX = __fonction( __X, **xArgs )
+ else:
+ __HX = __fonction( __X )
return numpy.ravel( __HX )
# ==============================================================================
centrées si le booléen "centeredDF" est vrai.
"""
def __init__(self,
+ name = "FDApproximation",
Function = None,
centeredDF = False,
increment = 0.01,
dX = None,
+ extraArguments = None,
avoidingRedundancy = True,
toleranceInRedundancy = 1.e-18,
lenghtOfRedundancy = -1,
mpWorkers = None,
mfEnabled = False,
):
+ self.__name = str(name)
+ self.__extraArgs = extraArguments
if mpEnabled:
try:
import multiprocessing
self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
self.__userFunction__path = os.path.dirname(mod)
del mod
- self.__userOperator = Operator( fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
+ self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
elif isinstance(Function,types.MethodType):
logging.debug("FDA Calculs en multiprocessing : MethodType")
self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
self.__userFunction__path = os.path.dirname(mod)
del mod
- self.__userOperator = Operator( fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
+ self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
else:
raise TypeError("User defined function or method has to be provided for finite differences approximation.")
else:
- self.__userOperator = Operator( fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
+ self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
self.__userFunction = self.__userOperator.appliedTo
#
self.__centeredDF = bool(centeredDF)
return __ac, __iac
# ---------------------------------------------------------
- def DirectOperator(self, X ):
+ def DirectOperator(self, X, **extraArgs ):
"""
Calcul du direct à l'aide de la fonction fournie.
+
+ NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
+ ne doivent pas être données ici à la fonction utilisateur.
"""
logging.debug("FDA Calcul DirectOperator (explicite)")
if self.__mfEnabled:
_X_moins_dXi = numpy.array( _X.A1, dtype=float )
_X_moins_dXi[i] = _X[i] - _dXi
#
- _jobs.append( (_X_plus_dXi, funcrepr) )
- _jobs.append( (_X_moins_dXi, funcrepr) )
+ _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
+ _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
#
import multiprocessing
self.__pool = multiprocessing.Pool(self.__mpWorkers)
"__userFunction__name" : self.__userFunction__name,
}
_jobs = []
- _jobs.append( (_X.A1, funcrepr) )
+ _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
for i in range( len(_dX) ):
_X_plus_dXi = numpy.array( _X.A1, dtype=float )
_X_plus_dXi[i] = _X[i] + _dX[i]
#
- _jobs.append( (_X_plus_dXi, funcrepr) )
+ _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
#
import multiprocessing
self.__pool = multiprocessing.Pool(self.__mpWorkers)
return _Jacobienne
# ---------------------------------------------------------
- def TangentOperator(self, paire ):
+ def TangentOperator(self, paire, **extraArgs ):
"""
Calcul du tangent à l'aide de la Jacobienne.
+
+ NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
+ ne doivent pas être données ici à la fonction utilisateur.
"""
if self.__mfEnabled:
assert len(paire) == 1, "Incorrect lenght of arguments"
else: return _HtX.A1
# ---------------------------------------------------------
- def AdjointOperator(self, paire ):
+ def AdjointOperator(self, paire, **extraArgs ):
"""
Calcul de l'adjoint à l'aide de la Jacobienne.
+
+ NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
+ ne doivent pas être données ici à la fonction utilisateur.
"""
if self.__mfEnabled:
assert len(paire) == 1, "Incorrect lenght of arguments"
if self.__mfEnabled: return [_HaY.A1,]
else: return _HaY.A1
+# ==============================================================================
+def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
+ "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
+ #
+ _bgcenter = numpy.ravel(_bgcenter)[:,None]
+ if _nbmembers < 1:
+ raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
+ #
+ if _bgcovariance is None:
+ BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+ else:
+ _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
+ BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
+ #
+ return BackgroundEnsemble
+
+# ==============================================================================
+def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
+ "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
+ def __CenteredRandomAnomalies(Zr, N):
+ """
+ Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
+ notes manuscrites de MB et conforme au code de PS avec eps = -1
+ """
+ eps = -1
+ Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
+ Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
+ R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
+ Q = numpy.dot(Q,R)
+ Zr = numpy.dot(Q,Zr)
+ return Zr.T
+ #
+ _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
+ if _nbmembers < 1:
+ raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
+ if _bgcovariance is None:
+ BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+ else:
+ if _withSVD:
+ U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
+ _nbctl = _bgcenter.size
+ if _nbmembers > _nbctl:
+ _Z = numpy.concatenate((numpy.dot(
+ numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
+ numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
+ else:
+ _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
+ _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
+ BackgroundEnsemble = _bgcenter + _Zca
+ else:
+ if max(abs(_bgcovariance.flatten())) > 0:
+ _nbctl = _bgcenter.size
+ _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
+ _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
+ BackgroundEnsemble = _bgcenter + _Zca
+ else:
+ BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+ #
+ return BackgroundEnsemble
+
+# ==============================================================================
+def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
+ "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
+ if OptMean is None:
+ __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
+ else:
+ __Em = numpy.ravel(OptMean).reshape((-1,1))
+ #
+ return Normalisation * (numpy.asarray(Ensemble) - __Em)
+
+# ==============================================================================
+def EnsembleErrorCovariance( Ensemble ):
+ "Renvoie la covariance d'ensemble"
+ __Anomalies = EnsembleOfAnomalies( Ensemble )
+ __n, __m = numpy.asarray(__Anomalies).shape
+ # Estimation empirique
+ __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
+ # Assure la symétrie
+ __Covariance = (__Covariance + __Covariance.T) * 0.5
+ # Assure la positivité
+ __epsilon = mpr*numpy.trace(__Covariance)
+ __Covariance = __Covariance + __epsilon * numpy.identity(__n)
+ #
+ return __Covariance
+
+# ==============================================================================
+def CovarianceInflation(
+ InputCovOrEns,
+ InflationType = None,
+ InflationFactor = None,
+ BackgroundCov = None,
+ ):
+ """
+ Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
+
+ Synthèse : Hunt 2007, section 2.3.5
+ """
+ if InflationFactor is None:
+ return InputCovOrEns
+ else:
+ InflationFactor = float(InflationFactor)
+ #
+ if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
+ if InflationFactor < 1.:
+ raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
+ if InflationFactor < 1.+mpr:
+ return InputCovOrEns
+ OutputCovOrEns = InflationFactor**2 * InputCovOrEns
+ #
+ elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
+ if InflationFactor < 1.:
+ raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
+ if InflationFactor < 1.+mpr:
+ return InputCovOrEns
+ InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
+ OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
+ + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
+ #
+ elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
+ if InflationFactor < 0.:
+ raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
+ if InflationFactor < mpr:
+ return InputCovOrEns
+ __n, __m = numpy.asarray(InputCovOrEns).shape
+ if __n != __m:
+ raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
+ OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
+ #
+ elif InflationType == "HybridOnBackgroundCovariance":
+ if InflationFactor < 0.:
+ raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
+ if InflationFactor < mpr:
+ return InputCovOrEns
+ __n, __m = numpy.asarray(InputCovOrEns).shape
+ if __n != __m:
+ raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
+ if BackgroundCov is None:
+ raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
+ if InputCovOrEns.shape != BackgroundCov.shape:
+ raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
+ OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
+ #
+ elif InflationType == "Relaxation":
+ raise NotImplementedError("InflationType Relaxation")
+ #
+ else:
+ raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
+ #
+ return OutputCovOrEns
+
+# ==============================================================================
+def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
+ """
+ EnKS
+ """
+ #
+ # Initialisations
+ # ---------------
+ #
+ # Opérateurs
+ H = HO["Direct"].appliedControledFormTo
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ # Précalcul des inversions de B et R
+ RIdemi = R.sqrtmI()
+ #
+ LagL = selfA._parameters["SmootherLagL"]
+ if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
+ raise ValueError("Fixed-lag smoother requires a series of observation")
+ if Y.stepnumber() < LagL:
+ raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ __n = Xb.size
+ __m = selfA._parameters["NumberOfMembers"]
+ #
+ if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+ else: Pn = B
+ if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
+ else: Qn = Q
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ covarianceXa = Pn
+ #
+ # Calcul direct initial (on privilégie la mémorisation au recalcul)
+ __seed = numpy.random.get_state()
+ selfB = copy.deepcopy(selfA)
+ selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
+ if VariantM == "EnKS16-KalmanFilterFormula":
+ etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ if LagL > 0:
+ EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
+ else:
+ EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
+ selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
+ #
+ for step in range(LagL,duration-1):
+ #
+ sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
+ sEL.append(None)
+ #
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ else:
+ Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ Un = None
+ #
+ #--------------------------
+ if VariantM == "EnKS16-KalmanFilterFormula":
+ if selfA._parameters["EstimationOf"] == "State": # Forecast
+ EL = M( [(EL[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ EL = EL + numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
+ EZ = H( [(EL[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ EZ = EZ + Cm * Un
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ # --- > Par principe, M = Id, Q = 0
+ EZ = H( [(EL[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ #
+ vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ #
+ mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
+ delta = RIdemi @ ( Ynpu - vZm )
+ mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
+ vw = mT @ mS.T @ delta
+ #
+ Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
+ mU = numpy.identity(__m)
+ wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
+ #
+ EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
+ EL = vEm + EX @ wTU
+ #
+ sEL[LagL] = EL
+ for irl in range(LagL): # Lissage des L précédentes analysis
+ vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
+ sEL[irl] = vEm + EX @ wTU
+ #
+ # Conservation de l'analyse retrospective d'ordre 0 avant rotation
+ Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ if selfA._toStore("APosterioriCovariance"):
+ EXn = sEL[0]
+ #
+ for irl in range(LagL):
+ sEL[irl] = sEL[irl+1]
+ sEL[LagL] = None
+ #--------------------------
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
+ #
+ # Stockage des dernières analyses incomplètement remises à jour
+ for irl in range(LagL):
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ selfA.StoredVariables["Analysis"].store( Xa )
+ #
+ return 0
+
+# ==============================================================================
+def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
+ """
+ Ensemble-Transform EnKF
+ """
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
+ #
+ # Opérateurs
+ # ----------
+ H = HO["Direct"].appliedControledFormTo
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ # Nombre de pas identique au nombre de pas d'observations
+ # -------------------------------------------------------
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ else:
+ duration = 2
+ __p = numpy.array(Y).size
+ #
+ # Précalcul des inversions de B et R
+ # ----------------------------------
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ BI = B.getI()
+ RI = R.getI()
+ elif VariantM != "KalmanFilterFormula":
+ RI = R.getI()
+ if VariantM == "KalmanFilterFormula":
+ RIdemi = R.sqrtmI()
+ #
+ # Initialisation
+ # --------------
+ __n = Xb.size
+ __m = selfA._parameters["NumberOfMembers"]
+ if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+ else: Pn = B
+ if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
+ else: Qn = Q
+ Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+ #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ covarianceXa = Pn
+ #
+ previousJMinimum = numpy.finfo(float).max
+ #
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ else:
+ Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ Un = None
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+ EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
+ Xn_predicted = EMX + qi
+ HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ Xn_predicted = Xn_predicted + Cm * Un
+ elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+ # --- > Par principe, M = Id, Q = 0
+ Xn_predicted = Xn
+ HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ #
+ # Mean of forecast and observation of forecast
+ Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ #
+ # Anomalies
+ EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
+ EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
+ #
+ #--------------------------
+ if VariantM == "KalmanFilterFormula":
+ mS = RIdemi * EaHX / math.sqrt(__m-1)
+ delta = RIdemi * ( Ynpu - Hfm )
+ mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
+ vw = mT @ mS.T @ delta
+ #
+ Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
+ mU = numpy.identity(__m)
+ #
+ EaX = EaX / math.sqrt(__m-1)
+ Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
+ #--------------------------
+ elif VariantM == "Variational":
+ HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+ def CostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _Jo = 0.5 * _A.T @ (RI * _A)
+ _Jb = 0.5 * (__m-1) * w.T @ w
+ _J = _Jo + _Jb
+ return float(_J)
+ def GradientOfCostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _GardJo = - EaHX.T @ (RI * _A)
+ _GradJb = (__m-1) * w.reshape((__m,1))
+ _GradJ = _GardJo + _GradJb
+ return numpy.ravel(_GradJ)
+ vw = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = numpy.zeros(__m),
+ fprime = GradientOfCostFunction,
+ args = (),
+ disp = False,
+ )
+ #
+ Hto = EaHX.T @ (RI * EaHX)
+ Htb = (__m-1) * numpy.identity(__m)
+ Hta = Hto + Htb
+ #
+ Pta = numpy.linalg.inv( Hta )
+ EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+ #
+ Xn = Xfm + EaX @ (vw[:,None] + EWa)
+ #--------------------------
+ elif VariantM == "FiniteSize11": # Jauge Boc2011
+ HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+ def CostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _Jo = 0.5 * _A.T @ (RI * _A)
+ _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
+ _J = _Jo + _Jb
+ return float(_J)
+ def GradientOfCostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _GardJo = - EaHX.T @ (RI * _A)
+ _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
+ _GradJ = _GardJo + _GradJb
+ return numpy.ravel(_GradJ)
+ vw = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = numpy.zeros(__m),
+ fprime = GradientOfCostFunction,
+ args = (),
+ disp = False,
+ )
+ #
+ Hto = EaHX.T @ (RI * EaHX)
+ Htb = __m * \
+ ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
+ / (1 + 1/__m + vw.T @ vw)**2
+ Hta = Hto + Htb
+ #
+ Pta = numpy.linalg.inv( Hta )
+ EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+ #
+ Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
+ #--------------------------
+ elif VariantM == "FiniteSize15": # Jauge Boc2015
+ HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+ def CostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _Jo = 0.5 * _A.T * RI * _A
+ _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
+ _J = _Jo + _Jb
+ return float(_J)
+ def GradientOfCostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _GardJo = - EaHX.T @ (RI * _A)
+ _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
+ _GradJ = _GardJo + _GradJb
+ return numpy.ravel(_GradJ)
+ vw = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = numpy.zeros(__m),
+ fprime = GradientOfCostFunction,
+ args = (),
+ disp = False,
+ )
+ #
+ Hto = EaHX.T @ (RI * EaHX)
+ Htb = (__m+1) * \
+ ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
+ / (1 + 1/__m + vw.T @ vw)**2
+ Hta = Hto + Htb
+ #
+ Pta = numpy.linalg.inv( Hta )
+ EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+ #
+ Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
+ #--------------------------
+ elif VariantM == "FiniteSize16": # Jauge Boc2016
+ HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+ def CostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _Jo = 0.5 * _A.T @ (RI * _A)
+ _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
+ _J = _Jo + _Jb
+ return float(_J)
+ def GradientOfCostFunction(w):
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+ _GardJo = - EaHX.T @ (RI * _A)
+ _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
+ _GradJ = _GardJo + _GradJb
+ return numpy.ravel(_GradJ)
+ vw = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = numpy.zeros(__m),
+ fprime = GradientOfCostFunction,
+ args = (),
+ disp = False,
+ )
+ #
+ Hto = EaHX.T @ (RI * EaHX)
+ Htb = ((__m+1) / (__m-1)) * \
+ ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
+ / (1 + 1/__m + vw.T @ vw / (__m-1))**2
+ Hta = Hto + Htb
+ #
+ Pta = numpy.linalg.inv( Hta )
+ EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+ #
+ Xn = Xfm + EaX @ (vw[:,None] + EWa)
+ #--------------------------
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ #--------------------------
+ #
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("APosterioriCovariance") \
+ or selfA._toStore("InnovationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _Innovation = Ynpu - _HXa
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( EMX )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = Pn
+ # ---> Pour les smoothers
+ if selfA._toStore("CurrentEnsembleState"):
+ selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
+ #
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ #
+ return 0
+
+# ==============================================================================
+def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
+ BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
+ """
+ Iterative EnKF
+ """
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
+ #
+ # Opérateurs
+ # ----------
+ H = HO["Direct"].appliedControledFormTo
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ # Nombre de pas identique au nombre de pas d'observations
+ # -------------------------------------------------------
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ else:
+ duration = 2
+ __p = numpy.array(Y).size
+ #
+ # Précalcul des inversions de B et R
+ # ----------------------------------
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ BI = B.getI()
+ RI = R.getI()
+ #
+ # Initialisation
+ # --------------
+ __n = Xb.size
+ __m = selfA._parameters["NumberOfMembers"]
+ if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+ else: Pn = B
+ if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
+ else: Rn = R
+ if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
+ else: Qn = Q
+ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ covarianceXa = Pn
+ #
+ previousJMinimum = numpy.finfo(float).max
+ #
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ else:
+ Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ Un = None
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ #--------------------------
+ if VariantM == "IEnKF12":
+ Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
+ EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
+ __j = 0
+ Deltaw = 1
+ if not BnotT:
+ Ta = numpy.identity(__m)
+ vw = numpy.zeros(__m)
+ while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
+ vx1 = (Xfm + EaX @ vw).reshape((__n,1))
+ #
+ if BnotT:
+ E1 = vx1 + _epsilon * EaX
+ else:
+ E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
+ #
+ if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
+ E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ # --- > Par principe, M = Id
+ E2 = Xn
+ vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ vy1 = H((vx2, Un)).reshape((__p,1))
+ #
+ HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ #
+ if BnotT:
+ EaY = (HE2 - vy2) / _epsilon
+ else:
+ EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
+ #
+ GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
+ mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
+ Deltaw = - numpy.linalg.solve(mH,GradJ)
+ #
+ vw = vw + Deltaw
+ #
+ if not BnotT:
+ Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+ #
+ __j = __j + 1
+ #
+ A2 = EnsembleOfAnomalies( E2 )
+ #
+ if BnotT:
+ Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+ A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
+ #
+ Xn = vx2 + A2
+ #--------------------------
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ #--------------------------
+ #
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("APosterioriCovariance") \
+ or selfA._toStore("InnovationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _Innovation = Ynpu - _HXa
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( E2 )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( E2 - Xa )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = Pn
+ #
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ #
+ return 0
+
+# ==============================================================================
+def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+ """
+ 3DVAR incrémental
+ """
+ #
+ # Initialisations
+ # ---------------
+ #
+ # Opérateur non-linéaire pour la boucle externe
+ Hm = HO["Direct"].appliedTo
+ #
+ # Précalcul des inversions de B et R
+ BI = B.getI()
+ RI = R.getI()
+ #
+ # Point de démarrage de l'optimisation
+ Xini = selfA._parameters["InitializationPoint"]
+ #
+ HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
+ Innovation = Y - HXb
+ #
+ # Outer Loop
+ # ----------
+ iOuter = 0
+ J = 1./mpr
+ DeltaJ = 1./mpr
+ Xr = Xini.reshape((-1,1))
+ while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
+ #
+ # Inner Loop
+ # ----------
+ Ht = HO["Tangent"].asMatrix(Xr)
+ Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
+ #
+ # Définition de la fonction-coût
+ # ------------------------------
+ def CostFunction(dx):
+ _dX = numpy.asmatrix(numpy.ravel( dx )).T
+ if selfA._parameters["StoreInternalVariables"] or \
+ selfA._toStore("CurrentState") or \
+ selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentState"].store( Xb + _dX )
+ _HdX = Ht * _dX
+ _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
+ _dInnovation = Innovation - _HdX
+ if selfA._toStore("SimulatedObservationAtCurrentState") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
+ #
+ Jb = float( 0.5 * _dX.T * BI * _dX )
+ Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
+ J = Jb + Jo
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ if selfA._toStore("IndexOfOptimum") or \
+ selfA._toStore("CurrentOptimum") or \
+ selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ return J
+ #
+ def GradientOfCostFunction(dx):
+ _dX = numpy.asmatrix(numpy.ravel( dx )).T
+ _HdX = Ht * _dX
+ _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
+ _dInnovation = Innovation - _HdX
+ GradJb = BI * _dX
+ GradJo = - Ht.T @ (RI * _dInnovation)
+ GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+ return GradJ
+ #
+ # Minimisation de la fonctionnelle
+ # --------------------------------
+ nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+ #
+ if selfA._parameters["Minimizer"] == "LBFGSB":
+ # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
+ if "0.19" <= scipy.version.version <= "1.1.0":
+ import lbfgsbhlt as optimiseur
+ else:
+ import scipy.optimize as optimiseur
+ Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+ func = CostFunction,
+ x0 = numpy.zeros(Xini.size),
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
+ factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ iprint = selfA._parameters["optiprint"],
+ )
+ nfeval = Informations['funcalls']
+ rc = Informations['warnflag']
+ elif selfA._parameters["Minimizer"] == "TNC":
+ Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+ func = CostFunction,
+ x0 = numpy.zeros(Xini.size),
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"],
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ ftol = selfA._parameters["CostDecrementTolerance"],
+ messages = selfA._parameters["optmessages"],
+ )
+ elif selfA._parameters["Minimizer"] == "CG":
+ Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = numpy.zeros(Xini.size),
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "NCG":
+ Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+ f = CostFunction,
+ x0 = numpy.zeros(Xini.size),
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ avextol = selfA._parameters["CostDecrementTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "BFGS":
+ Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+ f = CostFunction,
+ x0 = numpy.zeros(Xini.size),
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ else:
+ raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+ #
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+ #
+ if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+ Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+ Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
+ else:
+ Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
+ #
+ Xr = Minimum
+ DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
+ iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
+ #
+ # Obtention de l'analyse
+ # ----------------------
+ Xa = Xr
+ #
+ selfA.StoredVariables["Analysis"].store( Xa )
+ #
+ if selfA._toStore("OMA") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("SimulatedObservationAtOptimum"):
+ if selfA._toStore("SimulatedObservationAtCurrentState"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+ elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+ else:
+ HXa = Hm( Xa )
+ #
+ # Calcul de la covariance d'analyse
+ # ---------------------------------
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("JacobianMatrixAtOptimum") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+ HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+ HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles"):
+ HessienneI = []
+ nb = Xa.size
+ for i in range(nb):
+ _ee = numpy.matrix(numpy.zeros(nb)).T
+ _ee[i] = 1.
+ _HtEE = numpy.dot(HtM,_ee)
+ _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
+ HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
+ HessienneI = numpy.matrix( HessienneI )
+ A = HessienneI.I
+ if min(A.shape) != max(A.shape):
+ raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+ if (numpy.diag(A) < 0).any():
+ raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+ if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+ try:
+ L = numpy.linalg.cholesky( A )
+ except:
+ raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( A )
+ if selfA._toStore("JacobianMatrixAtOptimum"):
+ selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+ if selfA._toStore("KalmanGainAtOptimum"):
+ if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+ elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+ selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+ #
+ # Calculs et/ou stockages supplémentaires
+ # ---------------------------------------
+ if selfA._toStore("Innovation") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("MahalanobisConsistency") or \
+ selfA._toStore("OMB"):
+ d = Y - HXb
+ if selfA._toStore("Innovation"):
+ selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+ if selfA._toStore("OMA"):
+ selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+ if selfA._toStore("OMB"):
+ selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+ if selfA._toStore("SigmaObs2"):
+ TraceR = R.trace(Y.size)
+ selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+ if selfA._toStore("MahalanobisConsistency"):
+ selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
+ if selfA._toStore("SimulationQuantiles"):
+ nech = selfA._parameters["NumberOfSamplesForQuantiles"]
+ HXa = numpy.matrix(numpy.ravel( HXa )).T
+ YfQ = None
+ for i in range(nech):
+ if selfA._parameters["SimulationForQuantiles"] == "Linear":
+ dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
+ dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
+ Yr = HXa + dYr
+ elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
+ Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
+ Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
+ if YfQ is None:
+ YfQ = Yr
+ else:
+ YfQ = numpy.hstack((YfQ,Yr))
+ YfQ.sort(axis=-1)
+ YQ = None
+ for quantile in selfA._parameters["Quantiles"]:
+ if not (0. <= float(quantile) <= 1.): continue
+ indice = int(nech * float(quantile) - 1./nech)
+ if YQ is None: YQ = YfQ[:,indice]
+ else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
+ selfA.StoredVariables["SimulationQuantiles"].store( YQ )
+ if selfA._toStore("SimulatedObservationAtBackground"):
+ selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ if selfA._toStore("SimulatedObservationAtOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ #
+ return 0
+
+# ==============================================================================
+def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
+ BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
+ """
+ Maximum Likelihood Ensemble Filter
+ """
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
+ #
+ # Opérateurs
+ # ----------
+ H = HO["Direct"].appliedControledFormTo
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ # Nombre de pas identique au nombre de pas d'observations
+ # -------------------------------------------------------
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ else:
+ duration = 2
+ __p = numpy.array(Y).size
+ #
+ # Précalcul des inversions de B et R
+ # ----------------------------------
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ BI = B.getI()
+ RI = R.getI()
+ #
+ # Initialisation
+ # --------------
+ __n = Xb.size
+ __m = selfA._parameters["NumberOfMembers"]
+ if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+ else: Pn = B
+ if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
+ else: Rn = R
+ if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
+ else: Qn = Q
+ Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ covarianceXa = Pn
+ #
+ previousJMinimum = numpy.finfo(float).max
+ #
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ else:
+ Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ Un = None
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+ EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
+ Xn_predicted = EMX + qi
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ Xn_predicted = Xn_predicted + Cm * Un
+ elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+ # --- > Par principe, M = Id, Q = 0
+ Xn_predicted = Xn
+ #
+ #--------------------------
+ if VariantM == "MLEF13":
+ Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
+ EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
+ Ua = numpy.identity(__m)
+ __j = 0
+ Deltaw = 1
+ if not BnotT:
+ Ta = numpy.identity(__m)
+ vw = numpy.zeros(__m)
+ while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
+ vx1 = (Xfm + EaX @ vw).reshape((__n,1))
+ #
+ if BnotT:
+ E1 = vx1 + _epsilon * EaX
+ else:
+ E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
+ #
+ HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ #
+ if BnotT:
+ EaY = (HE2 - vy2) / _epsilon
+ else:
+ EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
+ #
+ GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
+ mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
+ Deltaw = - numpy.linalg.solve(mH,GradJ)
+ #
+ vw = vw + Deltaw
+ #
+ if not BnotT:
+ Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+ #
+ __j = __j + 1
+ #
+ if BnotT:
+ Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+ #
+ Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
+ #--------------------------
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ #--------------------------
+ #
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("APosterioriCovariance") \
+ or selfA._toStore("InnovationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _Innovation = Ynpu - _HXa
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( EMX )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( EMX - Xa )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = Pn
+ #
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ #
+ return 0
+
# ==============================================================================
def mmqr(
func = None,
#
variables = variables + step
if bounds is not None:
+ # Attention : boucle infinie à éviter si un intervalle est trop petit
while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
step = step/2.
variables = variables - step
return variables, Ecart, [n,p,iteration,increment,0]
# ==============================================================================
+def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
+ """
+ 3DVAR multi-pas et multi-méthodes
+ """
+ #
+ # Initialisation
+ # --------------
+ Xn = numpy.ravel(Xb).reshape((-1,1))
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedTo
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xn )
+ if selfA._toStore("APosterioriCovariance"):
+ if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
+ else: Pn = B
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( Xn )
+ #
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ else:
+ duration = 2
+ #
+ # Multi-pas
+ # ---------
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((-1,1))
+ #
+ if selfA._parameters["EstimationOf"] == "State": # Forecast
+ Xn = selfA.StoredVariables["Analysis"][-1]
+ Xn_predicted = M( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+ elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
+ # --- > Par principe, M = Id, Q = 0
+ Xn_predicted = Xn
+ Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
+ #
+ oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
+ #
+ return 0
-def _BackgroundEnsembleGeneration( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
- "Génération d'un ensemble d'ébauche de taille _nbmembers-1"
- # ~ numpy.random.seed(1234567)
- if _nbmembers < 1:
- raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
- if _withSVD:
- U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
- _nbctl = len(_bgcenter)
- if _nbmembers > _nbctl:
- _Z = numpy.concatenate((numpy.dot(
- numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
- numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
- else:
- _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
- _Zca = _CenteredAnomalies(_Z, _nbmembers)
- BackgroundEnsemble = (_bgcenter + _Zca.T).T
+# ==============================================================================
+def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+ """
+ 3DVAR PSAS
+ """
+ #
+ # Initialisations
+ # ---------------
+ #
+ # Opérateurs
+ Hm = HO["Direct"].appliedTo
+ #
+ # Utilisation éventuelle d'un vecteur H(Xb) précalculé
+ if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+ HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
else:
- if max(abs(_bgcovariance.flatten())) > 0:
- _nbctl = len(_bgcenter)
- _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
- _Zca = _CenteredAnomalies(_Z, _nbmembers)
- BackgroundEnsemble = (_bgcenter + _Zca.T).T
+ HXb = Hm( Xb )
+ HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+ if Y.size != HXb.size:
+ raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+ if max(Y.shape) != max(HXb.shape):
+ raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+ #
+ if selfA._toStore("JacobianMatrixAtBackground"):
+ HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+ HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+ selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+ #
+ Ht = HO["Tangent"].asMatrix(Xb)
+ BHT = B * Ht.T
+ HBHTpR = R + Ht * BHT
+ Innovation = Y - HXb
+ #
+ # Point de démarrage de l'optimisation
+ Xini = numpy.zeros(Xb.shape)
+ #
+ # Définition de la fonction-coût
+ # ------------------------------
+ def CostFunction(w):
+ _W = numpy.asmatrix(numpy.ravel( w )).T
+ if selfA._parameters["StoreInternalVariables"] or \
+ selfA._toStore("CurrentState") or \
+ selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
+ if selfA._toStore("SimulatedObservationAtCurrentState") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
+ #
+ Jb = float( 0.5 * _W.T * HBHTpR * _W )
+ Jo = float( - _W.T * Innovation )
+ J = Jb + Jo
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ if selfA._toStore("IndexOfOptimum") or \
+ selfA._toStore("CurrentOptimum") or \
+ selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ return J
+ #
+ def GradientOfCostFunction(w):
+ _W = numpy.asmatrix(numpy.ravel( w )).T
+ GradJb = HBHTpR * _W
+ GradJo = - Innovation
+ GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+ return GradJ
+ #
+ # Minimisation de la fonctionnelle
+ # --------------------------------
+ nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+ #
+ if selfA._parameters["Minimizer"] == "LBFGSB":
+ if "0.19" <= scipy.version.version <= "1.1.0":
+ import lbfgsbhlt as optimiseur
else:
- BackgroundEnsemble = numpy.tile([_bgcenter],(_nbmembers,1)).T
- return BackgroundEnsemble
+ import scipy.optimize as optimiseur
+ Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
+ factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ iprint = selfA._parameters["optiprint"],
+ )
+ nfeval = Informations['funcalls']
+ rc = Informations['warnflag']
+ elif selfA._parameters["Minimizer"] == "TNC":
+ Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"],
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ ftol = selfA._parameters["CostDecrementTolerance"],
+ messages = selfA._parameters["optmessages"],
+ )
+ elif selfA._parameters["Minimizer"] == "CG":
+ Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "NCG":
+ Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ avextol = selfA._parameters["CostDecrementTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "BFGS":
+ Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ else:
+ raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+ #
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+ #
+ # Correction pour pallier a un bug de TNC sur le retour du Minimum
+ # ----------------------------------------------------------------
+ if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+ Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+ Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
+ else:
+ Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
+ #
+ # Obtention de l'analyse
+ # ----------------------
+ Xa = Minimum
+ #
+ selfA.StoredVariables["Analysis"].store( Xa )
+ #
+ if selfA._toStore("OMA") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("SimulatedObservationAtOptimum"):
+ if selfA._toStore("SimulatedObservationAtCurrentState"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+ elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+ else:
+ HXa = Hm( Xa )
+ #
+ # Calcul de la covariance d'analyse
+ # ---------------------------------
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("JacobianMatrixAtOptimum") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+ HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+ HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles"):
+ BI = B.getI()
+ RI = R.getI()
+ HessienneI = []
+ nb = Xa.size
+ for i in range(nb):
+ _ee = numpy.matrix(numpy.zeros(nb)).T
+ _ee[i] = 1.
+ _HtEE = numpy.dot(HtM,_ee)
+ _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
+ HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
+ HessienneI = numpy.matrix( HessienneI )
+ A = HessienneI.I
+ if min(A.shape) != max(A.shape):
+ raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+ if (numpy.diag(A) < 0).any():
+ raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+ if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+ try:
+ L = numpy.linalg.cholesky( A )
+ except:
+ raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( A )
+ if selfA._toStore("JacobianMatrixAtOptimum"):
+ selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+ if selfA._toStore("KalmanGainAtOptimum"):
+ if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+ elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+ selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+ #
+ # Calculs et/ou stockages supplémentaires
+ # ---------------------------------------
+ if selfA._toStore("Innovation") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("MahalanobisConsistency") or \
+ selfA._toStore("OMB"):
+ d = Y - HXb
+ if selfA._toStore("Innovation"):
+ selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+ if selfA._toStore("OMA"):
+ selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+ if selfA._toStore("OMB"):
+ selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+ if selfA._toStore("SigmaObs2"):
+ TraceR = R.trace(Y.size)
+ selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+ if selfA._toStore("MahalanobisConsistency"):
+ selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
+ if selfA._toStore("SimulationQuantiles"):
+ nech = selfA._parameters["NumberOfSamplesForQuantiles"]
+ HXa = numpy.matrix(numpy.ravel( HXa )).T
+ YfQ = None
+ for i in range(nech):
+ if selfA._parameters["SimulationForQuantiles"] == "Linear":
+ dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
+ dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
+ Yr = HXa + dYr
+ elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
+ Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
+ Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
+ if YfQ is None:
+ YfQ = Yr
+ else:
+ YfQ = numpy.hstack((YfQ,Yr))
+ YfQ.sort(axis=-1)
+ YQ = None
+ for quantile in selfA._parameters["Quantiles"]:
+ if not (0. <= float(quantile) <= 1.): continue
+ indice = int(nech * float(quantile) - 1./nech)
+ if YQ is None: YQ = YfQ[:,indice]
+ else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
+ selfA.StoredVariables["SimulationQuantiles"].store( YQ )
+ if selfA._toStore("SimulatedObservationAtBackground"):
+ selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ if selfA._toStore("SimulatedObservationAtOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ #
+ return 0
-def _CenteredAnomalies(Zr, N):
+# ==============================================================================
+def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
"""
- Génère une matrice d'anomalies centrées selon les notes manuscrites de MB
- et conforme au code de PS avec eps = -1
+ Stochastic EnKF
"""
- eps = -1
- Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
- Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
- R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
- Q = numpy.dot(Q,R)
- Zr = numpy.dot(Q,Zr)
- return Zr.T
-
-def _IEnKF_cycle_Lag_1_SDA_GN(
- E0 = None,
- yObs = None,
- RIdemi = None,
- Mnnpu = None,
- Hn = None,
- variant = "IEnKF", # IEnKF or IEKF
- iMaximum = 15000,
- sTolerance = mfp,
- jTolerance = mfp,
- epsilonE = 1e-5,
- nbPS = 0, # nbPreviousSteps
- ):
- # 201206
- if logging.getLogger().level < logging.WARNING:
- assert len(E0.shape) == 2, "Ensemble E0 is not well formed: not of shape 2!"
- assert len(RIdemi.shape) == 2, "R^{-1/2} is not well formed: not of shape 2!"
- assert variant in ("IEnKF", "IEKF"), "Variant has to be IEnKF or IEKF"
- #
- nbCtl, nbMbr = E0.shape
- nbObs = yObs.size
- #
- if logging.getLogger().level < logging.WARNING:
- assert RIdemi.shape[0] == RIdemi.shape[1] == nbObs, "R^{-1} not of good size: not of size nbObs!"
- #
- yo = yObs.reshape((nbObs,1))
- IN = numpy.identity(nbMbr)
- if variant == "IEnKF":
- T = numpy.identity(nbMbr)
- Tinv = numpy.identity(nbMbr)
- x00 = numpy.mean(E0, axis = 1)
- Ah0 = E0 - x00
- Ap0 = numpy.linalg.pinv( Ah0.T.dot(Ah0) )
- if logging.getLogger().level < logging.WARNING:
- assert len(Ah0.shape) == 2, "Ensemble A0 is not well formed, of shape 2!"
- assert Ah0.shape[0] == nbCtl and Ah0.shape[1] == nbMbr, "Ensemble A0 is not well shaped!"
- assert abs(max(numpy.mean(Ah0, axis = 1))) < nbMbr*mpr, "Ensemble A0 seems not to be centered!"
- #
- def _convergence_condition(j, dx, JCurr, JPrev):
- if j > iMaximum:
- logging.debug("Convergence on maximum number of iterations per cycle, that reach the limit of %i."%iMaximum)
- return True
- #---------
- if j == 1:
- _deltaOnJ = 1.
- else:
- _deltaOnJ = abs(JCurr - JPrev) / JPrev
- if _deltaOnJ <= jTolerance:
- logging.debug("Convergence on cost decrement tolerance, that is below the threshold of %.1e."%jTolerance)
- return True
- #---------
- _deltaOnX = numpy.linalg.norm(dx)
- if _deltaOnX <= sTolerance:
- logging.debug("Convergence on norm of state correction, that is below the threshold of %.1e."%sTolerance)
- return True # En correction de l'état
- #---------
- return False
- #
- St = dict([(k,[]) for k in [
- "CurrentState", "CurrentEnsemble",
- "CostFunctionJb", "CostFunctionJo", "CostFunctionJ",
- ]])
- #
- j, convergence, JPrev = 1, False, numpy.nan
- x1 = x00
- while not convergence:
- logging.debug("Internal IEnKS step number %i"%j)
- St["CurrentState"].append( x1.squeeze() )
- if variant == "IEnKF": # Transform
- E1 = x1 + Ah0.dot(T)
- else: # IEKF
- E1 = x1 + epsilonE * Ah0
- St["CurrentEnsemble"].append( E1 )
- E2 = numpy.array([Mnnpu(_x) for _x in E1.T]).reshape((nbCtl, nbMbr)) # Evolution 1->2
- HEL = numpy.array([Hn(_x) for _x in E2.T]).T # Observation à 2
- yLm = numpy.mean( HEL, axis = 1).reshape((nbObs,1))
- HA2 = HEL - yLm
- if variant == "IEnKF":
- HA2 = HA2.dot(Tinv)
- else:
- HA2 = HA2 / epsilonE
- RIdemidy = RIdemi.dot(yo - yLm)
- xs = RIdemidy / math.sqrt(nbMbr-1)
- ES = RIdemi.dot(HA2) / math.sqrt(nbMbr-1)
- G = numpy.linalg.inv(IN + ES.T.dot(ES))
- xb = G.dot(ES.T.dot(xs))
- dx = Ah0.dot(xb) + Ah0.dot(G.dot(Ap0.dot(Ah0.T.dot(x00 - x1))))
- #
- Jb = float(dx.T.dot(dx))
- Jo = float(RIdemidy.T.dot(RIdemidy))
- J = Jo + Jb
- logging.debug("Values for cost functions are: J = %.5e Jo = %.5e Jb = %.5e"%(J,Jo,Jb))
- St["CostFunctionJb"].append( Jb )
- St["CostFunctionJo"].append( Jo )
- St["CostFunctionJ"].append( J )
- #
- x1 = x1 + dx
- j = j + 1
- convergence = _convergence_condition(j, dx, J, JPrev)
- JPrev = J
- #
- if variant == "IEnKF":
- T = numpy.real_if_close(scipy.linalg.sqrtm(G))
- Tinv = numpy.linalg.inv(T)
- #
- # Stocke le dernier pas
- x2 = numpy.mean( E2, axis = 1)
- if variant == "IEKF":
- A2 = E2 - x2
- A2 = A2.dot(numpy.linalg.cholesky(G)) / epsilonE
- E2 = x2 + A2
- St["CurrentState"].append( x2.squeeze() )
- St["CurrentEnsemble"].append( E2 )
- #
- IndexMin = numpy.argmin( St["CostFunctionJ"][nbPS:] ) + nbPS
- xa = St["CurrentState"][IndexMin]
- Ea = St["CurrentEnsemble"][IndexMin]
- #
- return (xa, Ea, St)
-
-def ienkf(
- xb = None, # Background (None si E0)
- E0 = None, # Background ensemble (None si xb)
- yObs = None, # Observation (série)
- B = None, # B
- RIdemi = None, # R^(-1/2)
- Mnnpu = None, # Evolution operator
- Hn = None, # Observation operator
- variant = "IEnKF", # IEnKF or IEKF
- nMembers = 5, # Number of members
- sMaximum = 0, # Number of spinup steps
- cMaximum = 15000, # Number of steps or cycles
- iMaximum = 15000, # Number of iterations per cycle
- sTolerance = mfp, # State correction tolerance
- jTolerance = mfp, # Cost decrement tolerance
- epsilon = 1e-5,
- inflation = 1.,
- nbPS = 0, # Number of previous steps
- setSeed = None,
- ):
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
+ #
+ # Opérateurs
+ # ----------
+ H = HO["Direct"].appliedControledFormTo
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ M = EM["Direct"].appliedControledFormTo
#
- # Initial
- if setSeed is not None: numpy.random.seed(setSeed)
- if E0 is None: E0 = _BackgroundEnsembleGeneration( xb, B, nMembers)
- #
- # Spinup
- # ------
- #
- # Cycles
- # ------
- xa, Ea, Sa = [xb,], [E0,], [{}]
- for step in range(cMaximum):
- if hasattr(yObs,"store"): Ynpu = numpy.ravel( yObs[step+1] )
- elif type(yObs) in [list, tuple]: Ynpu = numpy.ravel( yObs[step+1] )
- else: Ynpu = numpy.ravel( yObs )
- #
- (xa_c, Ea_c, Sa_c) = _IEnKF_cycle_Lag_1_SDA_GN(
- E0,
- Ynpu,
- RIdemi,
- Mnnpu,
- Hn,
- variant,
- iMaximum,
- sTolerance,
- jTolerance,
- epsilon,
- nbPS,
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ # Nombre de pas identique au nombre de pas d'observations
+ # -------------------------------------------------------
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ else:
+ duration = 2
+ __p = numpy.array(Y).size
+ #
+ # Précalcul des inversions de B et R
+ # ----------------------------------
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ BI = B.getI()
+ RI = R.getI()
+ #
+ # Initialisation
+ # --------------
+ __n = Xb.size
+ __m = selfA._parameters["NumberOfMembers"]
+ if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+ else: Pn = B
+ if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
+ else: Rn = R
+ if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
+ else: Qn = Q
+ Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ covarianceXa = Pn
+ #
+ previousJMinimum = numpy.finfo(float).max
+ #
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ else:
+ Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ Un = None
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+ EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
+ Xn_predicted = EMX + qi
+ HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ Xn_predicted = Xn_predicted + Cm * Un
+ elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+ # --- > Par principe, M = Id, Q = 0
+ Xn_predicted = Xn
+ HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+ argsAsSerie = True,
+ returnSerieAsArrayMatrix = True )
+ #
+ # Mean of forecast and observation of forecast
+ Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ #
+ #--------------------------
+ if VariantM == "KalmanFilterFormula05":
+ PfHT, HPfHT = 0., 0.
+ for i in range(__m):
+ Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
+ Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
+ PfHT += Exfi * Eyfi.T
+ HPfHT += Eyfi * Eyfi.T
+ PfHT = (1./(__m-1)) * PfHT
+ HPfHT = (1./(__m-1)) * HPfHT
+ Kn = PfHT * ( R + HPfHT ).I
+ del PfHT, HPfHT
+ #
+ for i in range(__m):
+ ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
+ Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
+ #--------------------------
+ elif VariantM == "KalmanFilterFormula16":
+ EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
+ EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ #
+ EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
+ EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
+ #
+ Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
+ #
+ for i in range(__m):
+ Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
+ #--------------------------
+ else:
+ raise ValueError("VariantM has to be chosen in the authorized methods list.")
+ #
+ if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+ Xn = CovarianceInflation( Xn,
+ selfA._parameters["InflationType"],
+ selfA._parameters["InflationFactor"],
+ )
+ #
+ Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ #--------------------------
+ #
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("APosterioriCovariance") \
+ or selfA._toStore("InnovationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _Innovation = Ynpu - _HXa
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( EMX )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( EMX - Xa )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = Pn
+ #
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ #
+ return 0
+
+# ==============================================================================
+def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+ """
+ 3DVAR
+ """
+ #
+ # Initialisations
+ # ---------------
+ #
+ # Opérateurs
+ Hm = HO["Direct"].appliedTo
+ Ha = HO["Adjoint"].appliedInXTo
+ #
+ # Utilisation éventuelle d'un vecteur H(Xb) précalculé
+ if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+ HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
+ else:
+ HXb = Hm( Xb )
+ HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+ if Y.size != HXb.size:
+ raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+ if max(Y.shape) != max(HXb.shape):
+ raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+ #
+ if selfA._toStore("JacobianMatrixAtBackground"):
+ HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+ HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+ selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+ #
+ # Précalcul des inversions de B et R
+ BI = B.getI()
+ RI = R.getI()
+ #
+ # Point de démarrage de l'optimisation
+ Xini = selfA._parameters["InitializationPoint"]
+ #
+ # Définition de la fonction-coût
+ # ------------------------------
+ def CostFunction(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ if selfA._parameters["StoreInternalVariables"] or \
+ selfA._toStore("CurrentState") or \
+ selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentState"].store( _X )
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _Innovation = Y - _HX
+ if selfA._toStore("SimulatedObservationAtCurrentState") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+ #
+ Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ if selfA._toStore("IndexOfOptimum") or \
+ selfA._toStore("CurrentOptimum") or \
+ selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ return J
+ #
+ def GradientOfCostFunction(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ GradJb = BI * (_X - Xb)
+ GradJo = - Ha( (_X, RI * (Y - _HX)) )
+ GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+ return GradJ
+ #
+ # Minimisation de la fonctionnelle
+ # --------------------------------
+ nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+ #
+ if selfA._parameters["Minimizer"] == "LBFGSB":
+ if "0.19" <= scipy.version.version <= "1.1.0":
+ import lbfgsbhlt as optimiseur
+ else:
+ import scipy.optimize as optimiseur
+ Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
+ factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ iprint = selfA._parameters["optiprint"],
)
- xa.append( xa_c )
- Ea.append( Ea_c )
- Sa.append( Sa_c )
- #
- # Inflation for next cycle
- E0 = xa_c + inflation * (Ea_c - xa_c)
- #
- return (xa, Ea, Sa)
-
-def _IEnKS_cycle_Lag_L_SDA_GN(
- E0 = None,
- yObs = None,
- RIdemi = None,
- Mnnpu = None,
- Hn = None,
- method = "Transform",
- iMaximum = 15000,
- sTolerance = mfp,
- jTolerance = mfp,
- Lag = 1,
- epsilon = -1.,
- nbPS = 0,
- ):
- # 201407 & 201905
- if logging.getLogger().level < logging.WARNING:
- assert len(E0.shape) == 2, "Ensemble E0 is not well formed: not of shape 2!"
- assert len(RIdemi.shape) == 2, "R^{-1/2} is not well formed: not of shape 2!"
- assert method in ("Transform", "Bundle"), "Method has to be Transform or Bundle"
- #
- nbCtl, nbMbr = E0.shape
- nbObs = yObs.size
- #
- if logging.getLogger().level < logging.WARNING:
- assert RIdemi.shape[0] == RIdemi.shape[1] == nbObs, "R^{-1} not of good size: not of size nbObs!"
- #
- yo = yObs.reshape((nbObs,1))
- IN = numpy.identity(nbMbr)
- if method == "Transform":
- T = numpy.identity(nbMbr)
- Tinv = numpy.identity(nbMbr)
- x00 = numpy.mean(E0, axis = 1)
- Ah0 = E0 - x00
- Am0 = (1/math.sqrt(nbMbr - 1)) * Ah0
- w = numpy.zeros((nbMbr,1))
- if logging.getLogger().level < logging.WARNING:
- assert len(Ah0.shape) == 2, "Ensemble A0 is not well formed, of shape 2!"
- assert Ah0.shape[0] == nbCtl and Ah0.shape[1] == nbMbr, "Ensemble A0 is not well shaped!"
- assert abs(max(numpy.mean(Ah0, axis = 1))) < nbMbr*mpr, "Ensemble A0 seems not to be centered!"
- #
- def _convergence_condition(j, dw, JCurr, JPrev):
- if j > iMaximum:
- logging.debug("Convergence on maximum number of iterations per cycle, that reach the limit of %i."%iMaximum)
- return True
- #---------
- if j == 1:
- _deltaOnJ = 1.
- else:
- _deltaOnJ = abs(JCurr - JPrev) / JPrev
- if _deltaOnJ <= jTolerance:
- logging.debug("Convergence on cost decrement tolerance, that is below the threshold of %.1e."%jTolerance)
- return True
- #---------
- _deltaOnW = numpy.sqrt(numpy.mean(dw.squeeze()**2))
- if _deltaOnW <= sTolerance:
- logging.debug("Convergence on norm of weights correction, that is below the threshold of %.1e."%sTolerance)
- return True # En correction des poids
- #---------
- return False
- #
- St = dict([(k,[]) for k in [
- "CurrentState", "CurrentEnsemble", "CurrentWeights",
- "CostFunctionJb", "CostFunctionJo", "CostFunctionJ",
- ]])
- #
- j, convergence, JPrev = 1, False, numpy.nan
- while not convergence:
- logging.debug("Internal IEnKS step number %i"%j)
- x0 = x00 + Am0.dot( w )
- St["CurrentState"].append( x0.squeeze() )
- if method == "Transform":
- E0 = x0 + Ah0.dot(T)
- else:
- E0 = x0 + epsilon * Am0
- St["CurrentEnsemble"].append( E0 )
- Ek = E0
- yHmean = numpy.mean(E0, axis = 1)
- for k in range(1, Lag+1):
- Ek = numpy.array([Mnnpu(_x) for _x in Ek.T]).reshape((nbCtl, nbMbr)) # Evolution 0->L
- if method == "Transform":
- yHmean = Mnnpu(yHmean)
- HEL = numpy.array([Hn(_x) for _x in Ek.T]).T # Observation à L
- #
- if method == "Transform":
- yLm = Hn( yHmean ).reshape((nbObs,1))
- YL = RIdemi.dot( (HEL - numpy.mean( HEL, axis = 1).reshape((nbObs,1))).dot(Tinv) ) / math.sqrt(nbMbr-1)
- else:
- yLm = numpy.mean( HEL, axis = 1).reshape((nbObs,1))
- YL = RIdemi.dot(HEL - yLm) / epsilon
- dy = RIdemi.dot(yo - yLm)
- #
- Jb = float(w.T.dot(w))
- Jo = float(dy.T.dot(dy))
- J = Jo + Jb
- logging.debug("Values for cost functions are: J = %.5e Jo = %.5e Jb = %.5e"%(J,Jo,Jb))
- St["CurrentWeights"].append( w.squeeze() )
- St["CostFunctionJb"].append( Jb )
- St["CostFunctionJo"].append( Jo )
- St["CostFunctionJ"].append( J )
- if method == "Transform":
- GradJ = w - YL.T.dot(dy)
- HTild = IN + YL.T.dot(YL)
- else:
- GradJ = (nbMbr - 1)*w - YL.T.dot(RIdemi.dot(dy))
- HTild = (nbMbr - 1)*IN + YL.T.dot(RIdemi.dot(YL))
- HTild = numpy.array(HTild, dtype=float)
- dw = numpy.linalg.solve( HTild, numpy.array(GradJ, dtype=float) )
- w = w - dw
- j = j + 1
- convergence = _convergence_condition(j, dw, J, JPrev)
- JPrev = J
- #
- if method == "Transform":
- (U, s, _) = numpy.linalg.svd(HTild, full_matrices=False) # Hess = U s V
- T = U.dot(numpy.diag(numpy.sqrt(1./s)).dot(U.T)) # T = Hess^(-1/2)
- Tinv = U.dot(numpy.diag(numpy.sqrt(s)).dot(U.T)) # Tinv = T^(-1)
- #
- # Stocke le dernier pas
- St["CurrentState"].append( numpy.mean( Ek, axis = 1).squeeze() )
- St["CurrentEnsemble"].append( Ek )
- #
- IndexMin = numpy.argmin( St["CostFunctionJ"][nbPS:] ) + nbPS
- xa = St["CurrentState"][IndexMin]
- Ea = St["CurrentEnsemble"][IndexMin]
- #
- return (xa, Ea, St)
-
-def ienks(
- xb = None, # Background
- yObs = None, # Observation (série)
- E0 = None, # Background ensemble
- B = None, # B
- RIdemi = None, # R^(-1/2)
- Mnnpu = None, # Evolution operator
- Hn = None, # Observation operator
- method = "Transform", # Bundle ou Transform
- nMembers = 5, # Number of members
- cMaximum = 15000, # Number of steps or cycles
- iMaximum = 15000, # Number of iterations per cycle
- sTolerance = mfp, # Weights correction tolerance
- jTolerance = mfp, # Cost decrement tolerance
- Lag = 1, # Lenght of smoothing window
- epsilon = -1.,
- inflation = 1.,
- nbPS = 0, # Number of previous steps
- setSeed = None,
- ):
+ nfeval = Informations['funcalls']
+ rc = Informations['warnflag']
+ elif selfA._parameters["Minimizer"] == "TNC":
+ Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"],
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ ftol = selfA._parameters["CostDecrementTolerance"],
+ messages = selfA._parameters["optmessages"],
+ )
+ elif selfA._parameters["Minimizer"] == "CG":
+ Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "NCG":
+ Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ avextol = selfA._parameters["CostDecrementTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "BFGS":
+ Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ else:
+ raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+ #
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+ #
+ # Correction pour pallier a un bug de TNC sur le retour du Minimum
+ # ----------------------------------------------------------------
+ if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+ Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+ #
+ # Obtention de l'analyse
+ # ----------------------
+ Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+ #
+ selfA.StoredVariables["Analysis"].store( Xa )
+ #
+ if selfA._toStore("OMA") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("SimulatedObservationAtOptimum"):
+ if selfA._toStore("SimulatedObservationAtCurrentState"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+ elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+ else:
+ HXa = Hm( Xa )
+ #
+ # Calcul de la covariance d'analyse
+ # ---------------------------------
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("JacobianMatrixAtOptimum") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+ HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+ HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles"):
+ HessienneI = []
+ nb = Xa.size
+ for i in range(nb):
+ _ee = numpy.matrix(numpy.zeros(nb)).T
+ _ee[i] = 1.
+ _HtEE = numpy.dot(HtM,_ee)
+ _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
+ HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
+ HessienneI = numpy.matrix( HessienneI )
+ A = HessienneI.I
+ if min(A.shape) != max(A.shape):
+ raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+ if (numpy.diag(A) < 0).any():
+ raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+ if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+ try:
+ L = numpy.linalg.cholesky( A )
+ except:
+ raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( A )
+ if selfA._toStore("JacobianMatrixAtOptimum"):
+ selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+ if selfA._toStore("KalmanGainAtOptimum"):
+ if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+ elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+ selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+ #
+ # Calculs et/ou stockages supplémentaires
+ # ---------------------------------------
+ if selfA._toStore("Innovation") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("MahalanobisConsistency") or \
+ selfA._toStore("OMB"):
+ d = Y - HXb
+ if selfA._toStore("Innovation"):
+ selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+ if selfA._toStore("OMA"):
+ selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+ if selfA._toStore("OMB"):
+ selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+ if selfA._toStore("SigmaObs2"):
+ TraceR = R.trace(Y.size)
+ selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+ if selfA._toStore("MahalanobisConsistency"):
+ selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
+ if selfA._toStore("SimulationQuantiles"):
+ nech = selfA._parameters["NumberOfSamplesForQuantiles"]
+ HXa = numpy.matrix(numpy.ravel( HXa )).T
+ YfQ = None
+ for i in range(nech):
+ if selfA._parameters["SimulationForQuantiles"] == "Linear":
+ dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
+ dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
+ Yr = HXa + dYr
+ elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
+ Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
+ Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
+ if YfQ is None:
+ YfQ = Yr
+ else:
+ YfQ = numpy.hstack((YfQ,Yr))
+ YfQ.sort(axis=-1)
+ YQ = None
+ for quantile in selfA._parameters["Quantiles"]:
+ if not (0. <= float(quantile) <= 1.): continue
+ indice = int(nech * float(quantile) - 1./nech)
+ if YQ is None: YQ = YfQ[:,indice]
+ else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
+ selfA.StoredVariables["SimulationQuantiles"].store( YQ )
+ if selfA._toStore("SimulatedObservationAtBackground"):
+ selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ if selfA._toStore("SimulatedObservationAtOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ #
+ return 0
+
+# ==============================================================================
+def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+ """
+ 4DVAR
+ """
+ #
+ # Initialisations
+ # ---------------
+ #
+ # Opérateurs
+ Hm = HO["Direct"].appliedControledFormTo
+ Mm = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ def Un(_step):
+ if U is not None:
+ if hasattr(U,"store") and 1<=_step<len(U) :
+ _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
+ elif hasattr(U,"store") and len(U)==1:
+ _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ else:
+ _Un = numpy.asmatrix(numpy.ravel( U )).T
+ else:
+ _Un = None
+ return _Un
+ def CmUn(_xn,_un):
+ if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
+ _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
+ _CmUn = _Cm * _un
+ else:
+ _CmUn = 0.
+ return _CmUn
+ #
+ # Remarque : les observations sont exploitées à partir du pas de temps
+ # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
+ # Donc le pas 0 n'est pas utilisé puisque la première étape commence
+ # avec l'observation du pas 1.
+ #
+ # Nombre de pas identique au nombre de pas d'observations
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ else:
+ duration = 2
+ #
+ # Précalcul des inversions de B et R
+ BI = B.getI()
+ RI = R.getI()
#
- # Initial
- if setSeed is not None: numpy.random.seed(setSeed)
- if E0 is None: E0 = _BackgroundEnsembleGeneration( xb, B, nMembers)
- #
- # Spinup
- # ------
- #
- # Cycles
- # ------
- xa, Ea, Sa = [], [], []
- for i in range(Lag): # Lag void results
- xa.append([])
- Ea.append([])
- Sa.append([])
- for i in range(Lag,cMaximum):
- (xa_c, Ea_c, Sa_c) = _IEnKS_cycle_Lag_L_SDA_GN(
- E0,
- yObs[i-Lag:i],
- RIdemi,
- Mnnpu,
- Hn,
- method,
- iMaximum,
- sTolerance,
- jTolerance,
- Lag,
- epsilon,
- nbPS,
+ # Point de démarrage de l'optimisation
+ Xini = selfA._parameters["InitializationPoint"]
+ #
+ # Définition de la fonction-coût
+ # ------------------------------
+ selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
+ selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
+ def CostFunction(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ if selfA._parameters["StoreInternalVariables"] or \
+ selfA._toStore("CurrentState") or \
+ selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentState"].store( _X )
+ Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
+ selfA.DirectCalculation = [None,]
+ selfA.DirectInnovation = [None,]
+ Jo = 0.
+ _Xn = _X
+ for step in range(0,duration-1):
+ if hasattr(Y,"store"):
+ _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
+ else:
+ _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
+ _Un = Un(step)
+ #
+ # Etape d'évolution
+ if selfA._parameters["EstimationOf"] == "State":
+ _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ pass
+ #
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
+ _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
+ #
+ # Etape de différence aux observations
+ if selfA._parameters["EstimationOf"] == "State":
+ _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
+ #
+ # Stockage de l'état
+ selfA.DirectCalculation.append( _Xn )
+ selfA.DirectInnovation.append( _YmHMX )
+ #
+ # Ajout dans la fonctionnelle d'observation
+ Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
+ J = Jb + Jo
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ if selfA._toStore("IndexOfOptimum") or \
+ selfA._toStore("CurrentOptimum") or \
+ selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ return J
+ #
+ def GradientOfCostFunction(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ GradJb = BI * (_X - Xb)
+ GradJo = 0.
+ for step in range(duration-1,0,-1):
+ # Étape de récupération du dernier stockage de l'évolution
+ _Xn = selfA.DirectCalculation.pop()
+ # Étape de récupération du dernier stockage de l'innovation
+ _YmHMX = selfA.DirectInnovation.pop()
+ # Calcul des adjoints
+ Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
+ Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
+ Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
+ Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
+ # Calcul du gradient par état adjoint
+ GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
+ GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
+ GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
+ return GradJ
+ #
+ # Minimisation de la fonctionnelle
+ # --------------------------------
+ nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+ #
+ if selfA._parameters["Minimizer"] == "LBFGSB":
+ if "0.19" <= scipy.version.version <= "1.1.0":
+ import lbfgsbhlt as optimiseur
+ else:
+ import scipy.optimize as optimiseur
+ Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
+ factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ iprint = selfA._parameters["optiprint"],
+ )
+ nfeval = Informations['funcalls']
+ rc = Informations['warnflag']
+ elif selfA._parameters["Minimizer"] == "TNC":
+ Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"],
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ ftol = selfA._parameters["CostDecrementTolerance"],
+ messages = selfA._parameters["optmessages"],
+ )
+ elif selfA._parameters["Minimizer"] == "CG":
+ Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
)
- xa.append( xa_c )
- Ea.append( Ea_c )
- Sa.append( Sa_c )
+ elif selfA._parameters["Minimizer"] == "NCG":
+ Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ avextol = selfA._parameters["CostDecrementTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "BFGS":
+ Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ else:
+ raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+ #
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+ #
+ # Correction pour pallier a un bug de TNC sur le retour du Minimum
+ # ----------------------------------------------------------------
+ if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+ Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+ #
+ # Obtention de l'analyse
+ # ----------------------
+ Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+ #
+ selfA.StoredVariables["Analysis"].store( Xa )
+ #
+ # Calculs et/ou stockages supplémentaires
+ # ---------------------------------------
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+ #
+ return 0
+
+# ==============================================================================
+def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+ """
+ 3DVAR variational analysis with no inversion of B
+ """
+ #
+ # Initialisations
+ # ---------------
+ #
+ # Opérateurs
+ Hm = HO["Direct"].appliedTo
+ Ha = HO["Adjoint"].appliedInXTo
+ #
+ # Précalcul des inversions de B et R
+ BT = B.getT()
+ RI = R.getI()
+ #
+ # Point de démarrage de l'optimisation
+ Xini = numpy.zeros(Xb.shape)
+ #
+ # Définition de la fonction-coût
+ # ------------------------------
+ def CostFunction(v):
+ _V = numpy.asmatrix(numpy.ravel( v )).T
+ _X = Xb + B * _V
+ if selfA._parameters["StoreInternalVariables"] or \
+ selfA._toStore("CurrentState") or \
+ selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentState"].store( _X )
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _Innovation = Y - _HX
+ if selfA._toStore("SimulatedObservationAtCurrentState") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+ #
+ Jb = float( 0.5 * _V.T * BT * _V )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
#
- # Inflation for next cycle
- E0 = xa_c + inflation * (Ea_c - xa_c)
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ if selfA._toStore("IndexOfOptimum") or \
+ selfA._toStore("CurrentOptimum") or \
+ selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ return J
+ #
+ def GradientOfCostFunction(v):
+ _V = numpy.asmatrix(numpy.ravel( v )).T
+ _X = Xb + B * _V
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ GradJb = BT * _V
+ GradJo = - Ha( (_X, RI * (Y - _HX)) )
+ GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+ return GradJ
+ #
+ # Minimisation de la fonctionnelle
+ # --------------------------------
+ nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+ #
+ if selfA._parameters["Minimizer"] == "LBFGSB":
+ if "0.19" <= scipy.version.version <= "1.1.0":
+ import lbfgsbhlt as optimiseur
+ else:
+ import scipy.optimize as optimiseur
+ Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
+ factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ iprint = selfA._parameters["optiprint"],
+ )
+ nfeval = Informations['funcalls']
+ rc = Informations['warnflag']
+ elif selfA._parameters["Minimizer"] == "TNC":
+ Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"],
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ ftol = selfA._parameters["CostDecrementTolerance"],
+ messages = selfA._parameters["optmessages"],
+ )
+ elif selfA._parameters["Minimizer"] == "CG":
+ Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "NCG":
+ Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ avextol = selfA._parameters["CostDecrementTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "BFGS":
+ Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ else:
+ raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+ #
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+ #
+ # Correction pour pallier a un bug de TNC sur le retour du Minimum
+ # ----------------------------------------------------------------
+ if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+ Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+ Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
+ else:
+ Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
+ #
+ # Obtention de l'analyse
+ # ----------------------
+ Xa = Minimum
+ #
+ selfA.StoredVariables["Analysis"].store( Xa )
+ #
+ if selfA._toStore("OMA") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("SimulatedObservationAtOptimum"):
+ if selfA._toStore("SimulatedObservationAtCurrentState"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+ elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+ else:
+ HXa = Hm( Xa )
+ #
+ # Calcul de la covariance d'analyse
+ # ---------------------------------
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("JacobianMatrixAtOptimum") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+ HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("KalmanGainAtOptimum"):
+ HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+ HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+ if selfA._toStore("APosterioriCovariance") or \
+ selfA._toStore("SimulationQuantiles"):
+ BI = B.getI()
+ HessienneI = []
+ nb = Xa.size
+ for i in range(nb):
+ _ee = numpy.matrix(numpy.zeros(nb)).T
+ _ee[i] = 1.
+ _HtEE = numpy.dot(HtM,_ee)
+ _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
+ HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
+ HessienneI = numpy.matrix( HessienneI )
+ A = HessienneI.I
+ if min(A.shape) != max(A.shape):
+ raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+ if (numpy.diag(A) < 0).any():
+ raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+ if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+ try:
+ L = numpy.linalg.cholesky( A )
+ except:
+ raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( A )
+ if selfA._toStore("JacobianMatrixAtOptimum"):
+ selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+ if selfA._toStore("KalmanGainAtOptimum"):
+ if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+ elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+ selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+ #
+ # Calculs et/ou stockages supplémentaires
+ # ---------------------------------------
+ if selfA._toStore("Innovation") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("MahalanobisConsistency") or \
+ selfA._toStore("OMB"):
+ d = Y - HXb
+ if selfA._toStore("Innovation"):
+ selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+ if selfA._toStore("OMA"):
+ selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+ if selfA._toStore("OMB"):
+ selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+ if selfA._toStore("SigmaObs2"):
+ TraceR = R.trace(Y.size)
+ selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+ if selfA._toStore("MahalanobisConsistency"):
+ selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
+ if selfA._toStore("SimulationQuantiles"):
+ nech = selfA._parameters["NumberOfSamplesForQuantiles"]
+ HXa = numpy.matrix(numpy.ravel( HXa )).T
+ YfQ = None
+ for i in range(nech):
+ if selfA._parameters["SimulationForQuantiles"] == "Linear":
+ dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
+ dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
+ Yr = HXa + dYr
+ elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
+ Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
+ Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
+ if YfQ is None:
+ YfQ = Yr
+ else:
+ YfQ = numpy.hstack((YfQ,Yr))
+ YfQ.sort(axis=-1)
+ YQ = None
+ for quantile in selfA._parameters["Quantiles"]:
+ if not (0. <= float(quantile) <= 1.): continue
+ indice = int(nech * float(quantile) - 1./nech)
+ if YQ is None: YQ = YfQ[:,indice]
+ else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
+ selfA.StoredVariables["SimulationQuantiles"].store( YQ )
+ if selfA._toStore("SimulatedObservationAtBackground"):
+ selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ if selfA._toStore("SimulatedObservationAtOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
#
- return (xa, Ea, Sa)
+ return 0
# ==============================================================================
if __name__ == "__main__":