X-Git-Url: http://git.salome-platform.org/gitweb/?a=blobdiff_plain;f=src%2FdaComposant%2FdaCore%2FNumericObjects.py;h=b6be0670dc6adf7c3843e68a6f7569973417e824;hb=069f0f7fe7697e40e3535257789ee3168210748f;hp=ebd3d358baf40acd0fd8a172af7706ebae986d8c;hpb=dc39aa3db9de38f52a68299e97b2ed35c0ed1c16;p=modules%2Fadao.git diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index ebd3d35..b6be067 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2008-2021 EDF R&D +# Copyright (C) 2008-2022 EDF R&D # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -25,9 +25,8 @@ __doc__ = """ """ __author__ = "Jean-Philippe ARGAUD" -import os, time, copy, types, sys, logging -import math, numpy, scipy, scipy.optimize, scipy.version -from daCore.BasicObjects import Operator +import os, copy, types, sys, logging, numpy +from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm from daCore.PlatformInfo import PlatformInfo mpr = PlatformInfo().MachinePrecision() mfp = PlatformInfo().MaximumPrecision() @@ -37,7 +36,7 @@ mfp = PlatformInfo().MaximumPrecision() def ExecuteFunction( triplet ): assert len(triplet) == 3, "Incorrect number of arguments" X, xArgs, funcrepr = triplet - __X = numpy.asmatrix(numpy.ravel( X )).T + __X = numpy.ravel( X ).reshape((-1,1)) __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"]) __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), []) __fonction = getattr(__module,funcrepr["__userFunction__name"]) @@ -66,6 +65,7 @@ class FDApproximation(object): increment = 0.01, dX = None, extraArguments = None, + reducingMemoryUse = False, avoidingRedundancy = True, toleranceInRedundancy = 1.e-18, lenghtOfRedundancy = -1, @@ -75,6 +75,7 @@ class FDApproximation(object): ): self.__name = str(name) self.__extraArgs = extraArguments + # if mpEnabled: try: import multiprocessing @@ -88,12 +89,12 @@ class FDApproximation(object): self.__mpWorkers = None logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers)) # - if mfEnabled: - self.__mfEnabled = True - else: - self.__mfEnabled = False + self.__mfEnabled = bool(mfEnabled) logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,)) # + self.__rmEnabled = bool(reducingMemoryUse) + logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,)) + # if avoidingRedundancy: self.__avoidRC = True self.__tolerBP = float(toleranceInRedundancy) @@ -105,6 +106,9 @@ class FDApproximation(object): self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms else: self.__avoidRC = False + logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC) + if self.__avoidRC: + logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP) # if self.__mpEnabled: if isinstance(Function,types.FunctionType): @@ -149,10 +153,7 @@ class FDApproximation(object): if dX is None: self.__dX = None else: - self.__dX = numpy.asmatrix(numpy.ravel( dX )).T - logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC) - if self.__avoidRC: - logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP) + self.__dX = numpy.ravel( dX ) # --------------------------------------------------------- def __doublon__(self, e, l, n, v=None): @@ -164,6 +165,29 @@ class FDApproximation(object): break return __ac, __iac + # --------------------------------------------------------- + def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None): + "Produit incrémental d'une matrice liste de colonnes avec un vecteur" + if not isinstance(__LMatrix, (list,tuple)): + raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix)) + if __dotWith is not None: + __Idwx = numpy.ravel( __dotWith ) + assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements" + __Produit = numpy.zeros(__LMatrix[0].size) + for i, col in enumerate(__LMatrix): + __Produit += float(__Idwx[i]) * col + return __Produit + elif __dotTWith is not None: + _Idwy = numpy.ravel( __dotTWith ).T + assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements" + __Produit = numpy.zeros(len(__LMatrix)) + for i, col in enumerate(__LMatrix): + __Produit[i] = float( _Idwy @ col) + return __Produit + else: + __Produit = None + return __Produit + # --------------------------------------------------------- def DirectOperator(self, X, **extraArgs ): """ @@ -176,17 +200,16 @@ class FDApproximation(object): if self.__mfEnabled: _HX = self.__userFunction( X, argsAsSerie = True ) else: - _X = numpy.asmatrix(numpy.ravel( X )).T - _HX = numpy.ravel(self.__userFunction( _X )) + _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) )) # return _HX # --------------------------------------------------------- - def TangentMatrix(self, X ): + def TangentMatrix(self, X, dotWith = None, dotTWith = None ): """ Calcul de l'opérateur tangent comme la Jacobienne par différences finies, c'est-à-dire le gradient de H en X. On utilise des différences finies - directionnelles autour du point X. X est un numpy.matrix. + directionnelles autour du point X. X est un numpy.ndarray. Différences finies centrées (approximation d'ordre 2): 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation @@ -214,12 +237,14 @@ class FDApproximation(object): if X is None or len(X)==0: raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),)) # - _X = numpy.asmatrix(numpy.ravel( X )).T + _X = numpy.ravel( X ) # if self.__dX is None: _dX = self.__increment * _X else: - _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T + _dX = numpy.ravel( self.__dX ) + assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one" + assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one" # if (_dX == 0.).any(): moyenne = _dX.mean() @@ -234,11 +259,16 @@ class FDApproximation(object): __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None) if __alreadyCalculatedP == __alreadyCalculatedI > -1: __alreadyCalculated, __i = True, __alreadyCalculatedP - logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i) + logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i) # if __alreadyCalculated: logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i) _Jacobienne = self.__listJPCR[__i] + logging.debug("FDA Fin du calcul de la Jacobienne") + if dotWith is not None: + return numpy.dot(_Jacobienne, numpy.ravel( dotWith )) + elif dotTWith is not None: + return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith )) else: logging.debug("FDA Calcul Jacobienne (explicite)") if self.__centeredDF: @@ -252,9 +282,9 @@ class FDApproximation(object): _jobs = [] for i in range( len(_dX) ): _dXi = _dX[i] - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dXi - _X_moins_dXi = numpy.array( _X.A1, dtype=float ) + _X_moins_dXi = numpy.array( _X, dtype=float ) _X_moins_dXi[i] = _X[i] - _dXi # _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) ) @@ -274,9 +304,9 @@ class FDApproximation(object): _xserie = [] for i in range( len(_dX) ): _dXi = _dX[i] - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dXi - _X_moins_dXi = numpy.array( _X.A1, dtype=float ) + _X_moins_dXi = numpy.array( _X, dtype=float ) _X_moins_dXi[i] = _X[i] - _dXi # _xserie.append( _X_plus_dXi ) @@ -292,9 +322,9 @@ class FDApproximation(object): _Jacobienne = [] for i in range( _dX.size ): _dXi = _dX[i] - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dXi - _X_moins_dXi = numpy.array( _X.A1, dtype=float ) + _X_moins_dXi = numpy.array( _X, dtype=float ) _X_moins_dXi[i] = _X[i] - _dXi # _HX_plus_dXi = self.DirectOperator( _X_plus_dXi ) @@ -311,9 +341,9 @@ class FDApproximation(object): "__userFunction__name" : self.__userFunction__name, } _jobs = [] - _jobs.append( (_X.A1, self.__extraArgs, funcrepr) ) + _jobs.append( (_X, self.__extraArgs, funcrepr) ) for i in range( len(_dX) ): - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dX[i] # _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) ) @@ -332,9 +362,9 @@ class FDApproximation(object): # elif self.__mfEnabled: _xserie = [] - _xserie.append( _X.A1 ) + _xserie.append( _X ) for i in range( len(_dX) ): - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dX[i] # _xserie.append( _X_plus_dXi ) @@ -352,30 +382,35 @@ class FDApproximation(object): _HX = self.DirectOperator( _X ) for i in range( _dX.size ): _dXi = _dX[i] - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dXi # _HX_plus_dXi = self.DirectOperator( _X_plus_dXi ) # _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) ) - # # - _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T - if self.__avoidRC: - if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size - while len(self.__listJPCP) > self.__lenghtRJ: - self.__listJPCP.pop(0) - self.__listJPCI.pop(0) - self.__listJPCR.pop(0) - self.__listJPPN.pop(0) - self.__listJPIN.pop(0) - self.__listJPCP.append( copy.copy(_X) ) - self.__listJPCI.append( copy.copy(_dX) ) - self.__listJPCR.append( copy.copy(_Jacobienne) ) - self.__listJPPN.append( numpy.linalg.norm(_X) ) - self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) ) - # - logging.debug("FDA Fin du calcul de la Jacobienne") + if (dotWith is not None) or (dotTWith is not None): + __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith) + else: + __Produit = None + if __Produit is None or self.__avoidRC: + _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) ) + if self.__avoidRC: + if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size + while len(self.__listJPCP) > self.__lenghtRJ: + self.__listJPCP.pop(0) + self.__listJPCI.pop(0) + self.__listJPCR.pop(0) + self.__listJPPN.pop(0) + self.__listJPIN.pop(0) + self.__listJPCP.append( copy.copy(_X) ) + self.__listJPCI.append( copy.copy(_dX) ) + self.__listJPCR.append( copy.copy(_Jacobienne) ) + self.__listJPPN.append( numpy.linalg.norm(_X) ) + self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) ) + logging.debug("FDA Fin du calcul de la Jacobienne") + if __Produit is not None: + return __Produit # return _Jacobienne @@ -388,28 +423,27 @@ class FDApproximation(object): ne doivent pas être données ici à la fonction utilisateur. """ if self.__mfEnabled: - assert len(paire) == 1, "Incorrect lenght of arguments" + assert len(paire) == 1, "Incorrect length of arguments" _paire = paire[0] assert len(_paire) == 2, "Incorrect number of arguments" else: assert len(paire) == 2, "Incorrect number of arguments" _paire = paire X, dX = _paire - _Jacobienne = self.TangentMatrix( X ) if dX is None or len(dX) == 0: # # Calcul de la forme matricielle si le second argument est None # ------------------------------------------------------------- + _Jacobienne = self.TangentMatrix( X ) if self.__mfEnabled: return [_Jacobienne,] else: return _Jacobienne else: # # Calcul de la valeur linéarisée de H en X appliqué à dX # ------------------------------------------------------ - _dX = numpy.asmatrix(numpy.ravel( dX )).T - _HtX = numpy.dot(_Jacobienne, _dX) - if self.__mfEnabled: return [_HtX.A1,] - else: return _HtX.A1 + _HtX = self.TangentMatrix( X, dotWith = dX ) + if self.__mfEnabled: return [_HtX,] + else: return _HtX # --------------------------------------------------------- def AdjointOperator(self, paire, **extraArgs ): @@ -420,48 +454,47 @@ class FDApproximation(object): ne doivent pas être données ici à la fonction utilisateur. """ if self.__mfEnabled: - assert len(paire) == 1, "Incorrect lenght of arguments" + assert len(paire) == 1, "Incorrect length of arguments" _paire = paire[0] assert len(_paire) == 2, "Incorrect number of arguments" else: assert len(paire) == 2, "Incorrect number of arguments" _paire = paire X, Y = _paire - _JacobienneT = self.TangentMatrix( X ).T if Y is None or len(Y) == 0: # # Calcul de la forme matricielle si le second argument est None # ------------------------------------------------------------- + _JacobienneT = self.TangentMatrix( X ).T if self.__mfEnabled: return [_JacobienneT,] else: return _JacobienneT else: # # Calcul de la valeur de l'adjoint en X appliqué à Y # -------------------------------------------------- - _Y = numpy.asmatrix(numpy.ravel( Y )).T - _HaY = numpy.dot(_JacobienneT, _Y) - if self.__mfEnabled: return [_HaY.A1,] - else: return _HaY.A1 + _HaY = self.TangentMatrix( X, dotTWith = Y ) + if self.__mfEnabled: return [_HaY,] + else: return _HaY # ============================================================================== -def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ): - "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés" +def EnsembleOfCenteredPerturbations( __bgCenter, __bgCovariance, __nbMembers ): + "Génération d'un ensemble de taille __nbMembers-1 d'états aléatoires centrés" # - _bgcenter = numpy.ravel(_bgcenter)[:,None] - if _nbmembers < 1: - raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),)) + __bgCenter = numpy.ravel(__bgCenter)[:,None] + if __nbMembers < 1: + raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(__nbMembers),)) # - if _bgcovariance is None: - BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + if __bgCovariance is None: + _Perturbations = numpy.tile( __bgCenter, __nbMembers) else: - _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T - BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z + _Z = numpy.random.multivariate_normal(numpy.zeros(__bgCenter.size), __bgCovariance, size=__nbMembers).T + _Perturbations = numpy.tile( __bgCenter, __nbMembers) + _Z # - return BackgroundEnsemble + return _Perturbations # ============================================================================== -def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True): - "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés" +def EnsembleOfBackgroundPerturbations( __bgCenter, __bgCovariance, __nbMembers, __withSVD = True): + "Génération d'un ensemble de taille __nbMembers-1 d'états aléatoires centrés" def __CenteredRandomAnomalies(Zr, N): """ Génère une matrice de N anomalies aléatoires centrées sur Zr selon les @@ -475,33 +508,33 @@ def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _wi Zr = numpy.dot(Q,Zr) return Zr.T # - _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1)) - if _nbmembers < 1: - raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),)) - if _bgcovariance is None: - BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + __bgCenter = numpy.ravel(__bgCenter).reshape((-1,1)) + if __nbMembers < 1: + raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(__nbMembers),)) + if __bgCovariance is None: + _Perturbations = numpy.tile( __bgCenter, __nbMembers) else: - if _withSVD: - U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False) - _nbctl = _bgcenter.size - if _nbmembers > _nbctl: + if __withSVD: + _U, _s, _V = numpy.linalg.svd(__bgCovariance, full_matrices=False) + _nbctl = __bgCenter.size + if __nbMembers > _nbctl: _Z = numpy.concatenate((numpy.dot( - numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]), - numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0) + numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]), + numpy.random.multivariate_normal(numpy.zeros(_nbctl),__bgCovariance,__nbMembers-1-_nbctl)), axis = 0) else: - _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1]) - _Zca = __CenteredRandomAnomalies(_Z, _nbmembers) - BackgroundEnsemble = _bgcenter + _Zca - else: - if max(abs(_bgcovariance.flatten())) > 0: - _nbctl = _bgcenter.size - _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1) - _Zca = __CenteredRandomAnomalies(_Z, _nbmembers) - BackgroundEnsemble = _bgcenter + _Zca + _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:__nbMembers-1])), _V[:__nbMembers-1]) + _Zca = __CenteredRandomAnomalies(_Z, __nbMembers) + _Perturbations = __bgCenter + _Zca + else: + if max(abs(__bgCovariance.flatten())) > 0: + _nbctl = __bgCenter.size + _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),__bgCovariance,__nbMembers-1) + _Zca = __CenteredRandomAnomalies(_Z, __nbMembers) + _Perturbations = __bgCenter + _Zca else: - BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Perturbations = numpy.tile( __bgCenter, __nbMembers) # - return BackgroundEnsemble + return _Perturbations # ============================================================================== def EnsembleMean( __Ensemble ): @@ -519,9 +552,9 @@ def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.): return __Normalisation * (numpy.asarray( __Ensemble ) - __Em) # ============================================================================== -def EnsembleErrorCovariance( __Ensemble, __quick = False ): +def EnsembleErrorCovariance( __Ensemble, __Quick = False ): "Renvoie l'estimation empirique de la covariance d'ensemble" - if __quick: + if __Quick: # Covariance rapide mais rarement définie positive __Covariance = numpy.cov( __Ensemble ) else: @@ -539,7 +572,11 @@ def EnsembleErrorCovariance( __Ensemble, __quick = False ): return __Covariance # ============================================================================== -def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ): +def EnsemblePerturbationWithGivenCovariance( + __Ensemble, + __Covariance, + __Seed = None, + ): "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite" if hasattr(__Covariance,"assparsematrix"): if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all(): @@ -586,72 +623,105 @@ def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=No # ============================================================================== def CovarianceInflation( - InputCovOrEns, - InflationType = None, - InflationFactor = None, - BackgroundCov = None, + __InputCovOrEns, + __InflationType = None, + __InflationFactor = None, + __BackgroundCov = None, ): """ Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa Synthèse : Hunt 2007, section 2.3.5 """ - if InflationFactor is None: - return InputCovOrEns + if __InflationFactor is None: + return __InputCovOrEns else: - InflationFactor = float(InflationFactor) + __InflationFactor = float(__InflationFactor) # - if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]: - if InflationFactor < 1.: - raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.") - if InflationFactor < 1.+mpr: - return InputCovOrEns - OutputCovOrEns = InflationFactor**2 * InputCovOrEns + __InputCovOrEns = numpy.asarray(__InputCovOrEns) + if __InputCovOrEns.size == 0: return __InputCovOrEns # - elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]: - if InflationFactor < 1.: + if __InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]: + if __InflationFactor < 1.: raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.") - if InflationFactor < 1.+mpr: - return InputCovOrEns - InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float') - OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \ - + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis]) + if __InflationFactor < 1.+mpr: # No inflation = 1 + return __InputCovOrEns + __OutputCovOrEns = __InflationFactor**2 * __InputCovOrEns # - elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]: - if InflationFactor < 0.: + elif __InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]: + if __InflationFactor < 1.: + raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.") + if __InflationFactor < 1.+mpr: # No inflation = 1 + return __InputCovOrEns + __InputCovOrEnsMean = __InputCovOrEns.mean(axis=1, dtype=mfp).astype('float') + __OutputCovOrEns = __InputCovOrEnsMean[:,numpy.newaxis] \ + + __InflationFactor * (__InputCovOrEns - __InputCovOrEnsMean[:,numpy.newaxis]) + # + elif __InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]: + if __InflationFactor < 0.: raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.") - if InflationFactor < mpr: - return InputCovOrEns - __n, __m = numpy.asarray(InputCovOrEns).shape + if __InflationFactor < mpr: # No inflation = 0 + return __InputCovOrEns + __n, __m = __InputCovOrEns.shape if __n != __m: raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.") - OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n) + __tr = __InputCovOrEns.trace()/__n + if __InflationFactor > __tr: + raise ValueError("Inflation factor for additive inflation has to be small over %.0e."%__tr) + __OutputCovOrEns = (1. - __InflationFactor)*__InputCovOrEns + __InflationFactor * numpy.identity(__n) # - elif InflationType == "HybridOnBackgroundCovariance": - if InflationFactor < 0.: + elif __InflationType == "HybridOnBackgroundCovariance": + if __InflationFactor < 0.: raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.") - if InflationFactor < mpr: - return InputCovOrEns - __n, __m = numpy.asarray(InputCovOrEns).shape + if __InflationFactor < mpr: # No inflation = 0 + return __InputCovOrEns + __n, __m = __InputCovOrEns.shape if __n != __m: raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.") - if BackgroundCov is None: + if __BackgroundCov is None: raise ValueError("Background covariance matrix B has to be given for hybrid inflation.") - if InputCovOrEns.shape != BackgroundCov.shape: + if __InputCovOrEns.shape != __BackgroundCov.shape: raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.") - OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov + __OutputCovOrEns = (1. - __InflationFactor) * __InputCovOrEns + __InflationFactor * __BackgroundCov # - elif InflationType == "Relaxation": - raise NotImplementedError("InflationType Relaxation") + elif __InflationType == "Relaxation": + raise NotImplementedError("Relaxation inflation type not implemented") # else: - raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType) + raise ValueError("Error in inflation type, '%s' is not a valid keyword."%__InflationType) # - return OutputCovOrEns + return __OutputCovOrEns + +# ============================================================================== +def HessienneEstimation(__selfA, __nb, __HaM, __HtM, __BI, __RI): + "Estimation de la Hessienne" + # + __HessienneI = [] + for i in range(int(__nb)): + __ee = numpy.zeros((__nb,1)) + __ee[i] = 1. + __HtEE = numpy.dot(__HtM,__ee).reshape((-1,1)) + __HessienneI.append( numpy.ravel( __BI * __ee + __HaM * (__RI * __HtEE) ) ) + # + __A = numpy.linalg.inv(numpy.array( __HessienneI )) + __A = (__A + __A.T) * 0.5 # Symétrie + __A = __A + mpr*numpy.trace( __A ) * numpy.identity(__nb) # Positivité + # + if min(__A.shape) != max(__A.shape): + raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(__selfA._name,str(__A.shape))) + if (numpy.diag(__A) < 0).any(): + raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(__selfA._name,)) + if logging.getLogger().level < logging.WARNING: # La vérification n'a lieu qu'en debug + try: + numpy.linalg.cholesky( __A ) + except: + raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(__selfA._name,)) + # + return __A # ============================================================================== def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None): - "Estimation des quantiles a posteriori (selfA est modifié)" + "Estimation des quantiles a posteriori à partir de A>0 (selfA est modifié)" nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"] # # Traitement des bornes @@ -663,26 +733,26 @@ def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None): LBounds = None if LBounds is not None: LBounds = ForceNumericBounds( LBounds ) - _Xa = numpy.ravel(Xa) + __Xa = numpy.ravel(Xa) # # Échantillonnage des états YfQ = None EXr = None for i in range(nbsamples): if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None: - dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1)) + dXr = (numpy.random.multivariate_normal(__Xa,A) - __Xa).reshape((-1,1)) if LBounds is not None: # "EstimateProjection" par défaut - dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1) - dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1) + dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - __Xa.reshape((-1,1))),axis=1) + dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - __Xa.reshape((-1,1))),axis=1) dYr = HtM @ dXr Yr = HXa.reshape((-1,1)) + dYr - if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr) + if selfA._toStore("SampledStateForQuantiles"): Xr = __Xa + numpy.ravel(dXr) elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None: - Xr = numpy.random.multivariate_normal(_Xa,A) + Xr = numpy.random.multivariate_normal(__Xa,A) if LBounds is not None: # "EstimateProjection" par défaut Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1) Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1) - Yr = Hm( Xr ) + Yr = numpy.asarray(Hm( Xr )) else: raise ValueError("Quantile simulations has only to be Linear or NonLinear.") # @@ -716,3449 +786,173 @@ def ForceNumericBounds( __Bounds ): # Converti toutes les bornes individuelles None à +/- l'infini __Bounds = numpy.asarray( __Bounds, dtype=float ) if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2: - raise ValueError("Incorrectly defined bounds data") - __Bounds[numpy.isnan(__Bounds)[:,0],0] = -sys.float_info.max - __Bounds[numpy.isnan(__Bounds)[:,1],1] = sys.float_info.max + raise ValueError("Incorrectly shaped bounds data") + __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max + __Bounds[numpy.isnan(__Bounds[:,1]),1] = sys.float_info.max return __Bounds # ============================================================================== -def RecentredBounds( __Bounds, __Center): +def RecentredBounds( __Bounds, __Center, __Scale = None): "Recentre les bornes autour de 0, sauf si globalement None" # Conserve une valeur par défaut à None s'il n'y a pas de bornes if __Bounds is None: return None - # Recentre les valeurs numériques de bornes - return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1)) + if __Scale is None: + # Recentre les valeurs numériques de bornes + return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1)) + else: + # Recentre les valeurs numériques de bornes et change l'échelle par une matrice + return __Scale @ (ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))) # ============================================================================== -def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): - """ - Contrained Extended Kalman Filter - """ - if selfA._parameters["EstimationOf"] == "Parameters": - selfA._parameters["StoreInternalVariables"] = True - selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] ) - # - # Opérateurs - H = HO["Direct"].appliedControledFormTo - # - if selfA._parameters["EstimationOf"] == "State": - M = EM["Direct"].appliedControledFormTo - # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) - else: - Cm = None +def ApplyBounds( __Vector, __Bounds, __newClip = True): + "Applique des bornes numériques à un point" + # Conserve une valeur par défaut s'il n'y a pas de bornes + if __Bounds is None: return __Vector + # + if not isinstance(__Vector, numpy.ndarray): # Is an array + raise ValueError("Incorrect array definition of vector data") + if not isinstance(__Bounds, numpy.ndarray): # Is an array + raise ValueError("Incorrect array definition of bounds data") + if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght + raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size)) + if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2: + raise ValueError("Incorrectly shaped bounds data") # - # Durée d'observation et tailles - if hasattr(Y,"stepnumber"): - duration = Y.stepnumber() - __p = numpy.cumprod(Y.shape())[-1] + if __newClip: + __Vector = __Vector.clip( + __Bounds[:,0].reshape(__Vector.shape), + __Bounds[:,1].reshape(__Vector.shape), + ) else: - duration = 2 - __p = numpy.array(Y).size - # - # Précalcul des inversions de B et R - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - BI = B.getI() - RI = R.getI() - # - __n = Xb.size - # - if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - Xn = Xb - Pn = B - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( Xb ) - if selfA._toStore("APosterioriCovariance"): - if hasattr(B,"asfullmatrix"): - selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) ) - else: - selfA.StoredVariables["APosterioriCovariance"].store( B ) - selfA._setInternalState("seed", numpy.random.get_state()) - elif selfA._parameters["nextStep"]: - Xn = selfA._getInternalState("Xn") - Pn = selfA._getInternalState("Pn") - # - if selfA._parameters["EstimationOf"] == "Parameters": - XaMin = Xn - previousJMinimum = numpy.finfo(float).max - # - for step in range(duration-1): - if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) - else: - Ynpu = numpy.ravel( Y ).reshape((__p,1)) - # - Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn) - Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape - Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn) - Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape - # - if selfA._parameters["EstimationOf"] == "State": - Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn) - Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape - Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn) - Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape - # - if U is not None: - if hasattr(U,"store") and len(U)>1: - Un = numpy.ravel( U[step] ).reshape((-1,1)) - elif hasattr(U,"store") and len(U)==1: - Un = numpy.ravel( U[0] ).reshape((-1,1)) - else: - Un = numpy.ravel( U ).reshape((-1,1)) - else: - Un = None - # - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1) - Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1) - # - if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast - Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1)) - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! - Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - Pn_predicted = Q + Mt * Pn * Ma - elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast - # --- > Par principe, M = Id, Q = 0 - Xn_predicted = Xn - Pn_predicted = Pn - # - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1) - Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1) - # - if selfA._parameters["EstimationOf"] == "State": - HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1)) - _Innovation = Ynpu - HX_predicted - elif selfA._parameters["EstimationOf"] == "Parameters": - HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1)) - _Innovation = Ynpu - HX_predicted - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! - _Innovation = _Innovation - Cm * Un - # - Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha)) - Xn = Xn_predicted + Kn * _Innovation - Pn = Pn_predicted - Kn * Ht * Pn_predicted - # - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1) - Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1) - # - Xa = Xn # Pointeurs - #-------------------------- - selfA._setInternalState("Xn", Xn) - selfA._setInternalState("Pn", Pn) - #-------------------------- - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) ) - if selfA._toStore("InnovationAtCurrentAnalysis"): - selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CurrentState"): - selfA.StoredVariables["CurrentState"].store( Xn ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( Xn_predicted ) - if selfA._toStore("ForecastCovariance"): - selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( Xn_predicted - Xa ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) - if selfA._toStore("SimulatedObservationAtCurrentState") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) - # ---> autres - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) ) - Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) ) - J = Jb + Jo - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - # - if selfA._toStore("IndexOfOptimum") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("CostFunctionJAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( Pn ) - if selfA._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if selfA._toStore("APosterioriCovariance"): - covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] + __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1) + __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1) + __Vector = numpy.asarray(__Vector) # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) - # - return 0 + return __Vector # ============================================================================== -def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"): - """ - EnKS - """ - # - # Opérateurs - H = HO["Direct"].appliedControledFormTo - # - if selfA._parameters["EstimationOf"] == "State": - M = EM["Direct"].appliedControledFormTo - # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) - else: - Cm = None - # - # Précalcul des inversions de B et R - RIdemi = R.sqrtmI() - # - # Durée d'observation et tailles - LagL = selfA._parameters["SmootherLagL"] - if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")): - raise ValueError("Fixed-lag smoother requires a series of observation") - if Y.stepnumber() < LagL: - raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L") - duration = Y.stepnumber() - __p = numpy.cumprod(Y.shape())[-1] - __n = Xb.size - __m = selfA._parameters["NumberOfMembers"] - # - if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - selfA.StoredVariables["Analysis"].store( Xb ) - if selfA._toStore("APosterioriCovariance"): - if hasattr(B,"asfullmatrix"): - selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) ) - else: - selfA.StoredVariables["APosterioriCovariance"].store( B ) - # - # Calcul direct initial (on privilégie la mémorisation au recalcul) - __seed = numpy.random.get_state() - selfB = copy.deepcopy(selfA) - selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"] - if VariantM == "EnKS16-KalmanFilterFormula": - etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula") - else: - raise ValueError("VariantM has to be chosen in the authorized methods list.") - if LagL > 0: - EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1] - else: - EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf - selfA._parameters["SetSeed"] = numpy.random.set_state(__seed) - # - for step in range(LagL,duration-1): - # - sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1] - sEL.append(None) - # - if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) - else: - Ynpu = numpy.ravel( Y ).reshape((__p,1)) - # - if U is not None: - if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T - elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T - else: - Un = numpy.asmatrix(numpy.ravel( U )).T - else: - Un = None - # - #-------------------------- - if VariantM == "EnKS16-KalmanFilterFormula": - if selfA._parameters["EstimationOf"] == "State": # Forecast - EL = M( [(EL[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - EL = EnsemblePerturbationWithGivenCovariance( EL, Q ) - EZ = H( [(EL[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! - Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - EZ = EZ + Cm * Un - elif selfA._parameters["EstimationOf"] == "Parameters": - # --- > Par principe, M = Id, Q = 0 - EZ = H( [(EL[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - # - vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) - # - mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) ) - mS = mS.reshape((-1,__m)) # Pour dimension 1 - delta = RIdemi @ ( Ynpu - vZm ) - mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS ) - vw = mT @ mS.T @ delta - # - Tdemi = numpy.real(scipy.linalg.sqrtm(mT)) - mU = numpy.identity(__m) - wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU) - # - EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) ) - EL = vEm + EX @ wTU - # - sEL[LagL] = EL - for irl in range(LagL): # Lissage des L précédentes analysis - vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) ) - sEL[irl] = vEm + EX @ wTU - # - # Conservation de l'analyse retrospective d'ordre 0 avant rotation - Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - if selfA._toStore("APosterioriCovariance"): - EXn = sEL[0] - # - for irl in range(LagL): - sEL[irl] = sEL[irl+1] - sEL[LagL] = None - #-------------------------- - else: - raise ValueError("VariantM has to be chosen in the authorized methods list.") - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) ) - # - # Stockage des dernières analyses incomplètement remises à jour - for irl in range(LagL): - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - selfA.StoredVariables["Analysis"].store( Xa ) - # - return 0 +def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf): + "Recentre l'ensemble Xn autour de l'analyse 3DVAR" + # + Xf = EnsembleMean( __EnXf ) + Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) ) + Pf = (1 - __Betaf) * __B.asfullmatrix(Xf.size) + __Betaf * Pf + # + selfB = PartialAlgorithm("3DVAR") + selfB._parameters["Minimizer"] = "LBFGSB" + selfB._parameters["MaximumNumberOfSteps"] = 15000 + selfB._parameters["CostDecrementTolerance"] = 1.e-7 + selfB._parameters["ProjectedGradientTolerance"] = -1 + selfB._parameters["GradientNormTolerance"] = 1.e-05 + selfB._parameters["StoreInternalVariables"] = False + selfB._parameters["optiprint"] = -1 + selfB._parameters["optdisp"] = 0 + selfB._parameters["Bounds"] = None + selfB._parameters["InitializationPoint"] = Xf + from daAlgorithms.Atoms import std3dvar + std3dvar.std3dvar(selfB, Xf, __Ynpu, None, __HO, None, __R, Pf) + Xa = selfB.get("Analysis")[-1].reshape((-1,1)) + del selfB + # + return Xa + EnsembleOfAnomalies( __EnXn ) # ============================================================================== -def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): +def multiXOsteps(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle, + __CovForecast = False, __LinEvolution = False, + ): """ - Ensemble-Transform EnKF + Prévision multi-pas avec une correction par pas (multi-méthodes) """ - if selfA._parameters["EstimationOf"] == "Parameters": - selfA._parameters["StoreInternalVariables"] = True - # - # Opérateurs - H = HO["Direct"].appliedControledFormTo # + # Initialisation + # -------------- if selfA._parameters["EstimationOf"] == "State": - M = EM["Direct"].appliedControledFormTo - # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) + if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: + Xn = numpy.asarray(Xb) + if __CovForecast: Pn = B + selfA.StoredVariables["Analysis"].store( Xn ) + if selfA._toStore("APosterioriCovariance"): + if hasattr(B,"asfullmatrix"): + selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) ) + else: + selfA.StoredVariables["APosterioriCovariance"].store( B ) + selfA._setInternalState("seed", numpy.random.get_state()) + elif selfA._parameters["nextStep"]: + Xn = selfA._getInternalState("Xn") + if __CovForecast: Pn = selfA._getInternalState("Pn") else: - Cm = None + Xn = numpy.asarray(Xb) + if __CovForecast: Pn = B # - # Durée d'observation et tailles if hasattr(Y,"stepnumber"): duration = Y.stepnumber() - __p = numpy.cumprod(Y.shape())[-1] else: duration = 2 - __p = numpy.array(Y).size - # - # Précalcul des inversions de B et R - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - BI = B.getI() - RI = R.getI() - elif VariantM != "KalmanFilterFormula": - RI = R.getI() - if VariantM == "KalmanFilterFormula": - RIdemi = R.sqrtmI() - # - __n = Xb.size - __m = selfA._parameters["NumberOfMembers"] - # - if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m ) - selfA.StoredVariables["Analysis"].store( Xb ) - if selfA._toStore("APosterioriCovariance"): - if hasattr(B,"asfullmatrix"): - selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) ) - else: - selfA.StoredVariables["APosterioriCovariance"].store( B ) - selfA._setInternalState("seed", numpy.random.get_state()) - elif selfA._parameters["nextStep"]: - Xn = selfA._getInternalState("Xn") - # - previousJMinimum = numpy.finfo(float).max # + # Multi-steps + # ----------- for step in range(duration-1): - numpy.random.set_state(selfA._getInternalState("seed")) + selfA.StoredVariables["CurrentStepNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + # if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) + Ynpu = numpy.asarray( Y[step+1] ).reshape((-1,1)) else: - Ynpu = numpy.ravel( Y ).reshape((__p,1)) + Ynpu = numpy.asarray( Y ).reshape((-1,1)) # if U is not None: if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T + Un = numpy.asarray( U[step] ).reshape((-1,1)) elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T + Un = numpy.asarray( U[0] ).reshape((-1,1)) else: - Un = numpy.asmatrix(numpy.ravel( U )).T + Un = numpy.asarray( U ).reshape((-1,1)) else: Un = None # - if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast - EMX = M( [(Xn[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q ) - HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! - Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast - # --- > Par principe, M = Id, Q = 0 - Xn_predicted = EMX = Xn - HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - # - # Mean of forecast and observation of forecast - Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) - # - # Anomalies - EaX = EnsembleOfAnomalies( Xn_predicted, Xfm ) - EaHX = EnsembleOfAnomalies( HX_predicted, Hfm) - # - #-------------------------- - if VariantM == "KalmanFilterFormula": - mS = RIdemi * EaHX / math.sqrt(__m-1) - mS = mS.reshape((-1,__m)) # Pour dimension 1 - delta = RIdemi * ( Ynpu - Hfm ) - mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS ) - vw = mT @ mS.T @ delta - # - Tdemi = numpy.real(scipy.linalg.sqrtm(mT)) - mU = numpy.identity(__m) - # - EaX = EaX / math.sqrt(__m-1) - Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU ) - #-------------------------- - elif VariantM == "Variational": - HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm - def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) - _Jo = 0.5 * _A.T @ (RI * _A) - _Jb = 0.5 * (__m-1) * w.T @ w - _J = _Jo + _Jb - return float(_J) - def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) - _GardJo = - EaHX.T @ (RI * _A) - _GradJb = (__m-1) * w.reshape((__m,1)) - _GradJ = _GardJo + _GradJb - return numpy.ravel(_GradJ) - vw = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = numpy.zeros(__m), - fprime = GradientOfCostFunction, - args = (), - disp = False, - ) - # - Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m)) - Htb = (__m-1) * numpy.identity(__m) - Hta = Hto + Htb - # - Pta = numpy.linalg.inv( Hta ) - EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 - # - Xn = Xfm + EaX @ (vw[:,None] + EWa) - #-------------------------- - elif VariantM == "FiniteSize11": # Jauge Boc2011 - HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm - def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) - _Jo = 0.5 * _A.T @ (RI * _A) - _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w) - _J = _Jo + _Jb - return float(_J) - def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) - _GardJo = - EaHX.T @ (RI * _A) - _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w) - _GradJ = _GardJo + _GradJb - return numpy.ravel(_GradJ) - vw = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = numpy.zeros(__m), - fprime = GradientOfCostFunction, - args = (), - disp = False, - ) - # - Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m)) - Htb = __m * \ - ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \ - / (1 + 1/__m + vw.T @ vw)**2 - Hta = Hto + Htb - # - Pta = numpy.linalg.inv( Hta ) - EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 - # - Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa) - #-------------------------- - elif VariantM == "FiniteSize15": # Jauge Boc2015 - HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm - def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) - _Jo = 0.5 * _A.T * RI * _A - _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w) - _J = _Jo + _Jb - return float(_J) - def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) - _GardJo = - EaHX.T @ (RI * _A) - _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w) - _GradJ = _GardJo + _GradJb - return numpy.ravel(_GradJ) - vw = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = numpy.zeros(__m), - fprime = GradientOfCostFunction, - args = (), - disp = False, - ) - # - Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m)) - Htb = (__m+1) * \ - ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \ - / (1 + 1/__m + vw.T @ vw)**2 - Hta = Hto + Htb - # - Pta = numpy.linalg.inv( Hta ) - EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 - # - Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa) - #-------------------------- - elif VariantM == "FiniteSize16": # Jauge Boc2016 - HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm - def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) - _Jo = 0.5 * _A.T @ (RI * _A) - _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1)) - _J = _Jo + _Jb - return float(_J) - def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) - _GardJo = - EaHX.T @ (RI * _A) - _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1)) - _GradJ = _GardJo + _GradJb - return numpy.ravel(_GradJ) - vw = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = numpy.zeros(__m), - fprime = GradientOfCostFunction, - args = (), - disp = False, - ) - # - Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m)) - Htb = ((__m+1) / (__m-1)) * \ - ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \ - / (1 + 1/__m + vw.T @ vw / (__m-1))**2 - Hta = Hto + Htb - # - Pta = numpy.linalg.inv( Hta ) - EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 - # - Xn = Xfm + EaX @ (vw[:,None] + EWa) - #-------------------------- - else: - raise ValueError("VariantM has to be chosen in the authorized methods list.") - # - if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - #-------------------------- - selfA._setInternalState("Xn", Xn) - selfA._setInternalState("seed", numpy.random.get_state()) - #-------------------------- - # - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("APosterioriCovariance") \ - or selfA._toStore("InnovationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T - _Innovation = Ynpu - _HXa - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) - if selfA._toStore("InnovationAtCurrentAnalysis"): - selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CurrentState"): - selfA.StoredVariables["CurrentState"].store( Xn ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( EMX ) - if selfA._toStore("ForecastCovariance"): - selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( EMX - Xa ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu ) - if selfA._toStore("SimulatedObservationAtCurrentState") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) - # ---> autres - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - # - if selfA._toStore("IndexOfOptimum") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("CostFunctionJAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) ) - if selfA._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if selfA._toStore("APosterioriCovariance"): - covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] - # ---> Pour les smoothers - if selfA._toStore("CurrentEnsembleState"): - selfA.StoredVariables["CurrentEnsembleState"].store( Xn ) - # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) - # - return 0 - -# ============================================================================== -def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): - """ - Extended Kalman Filter - """ - if selfA._parameters["EstimationOf"] == "Parameters": - selfA._parameters["StoreInternalVariables"] = True - # - # Opérateurs - H = HO["Direct"].appliedControledFormTo - # - if selfA._parameters["EstimationOf"] == "State": - M = EM["Direct"].appliedControledFormTo - # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) - else: - Cm = None - # - # Durée d'observation et tailles - if hasattr(Y,"stepnumber"): - duration = Y.stepnumber() - __p = numpy.cumprod(Y.shape())[-1] - else: - duration = 2 - __p = numpy.array(Y).size - # - # Précalcul des inversions de B et R - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - BI = B.getI() - RI = R.getI() - # - __n = Xb.size - # - if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - Xn = Xb - Pn = B - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( Xb ) - if selfA._toStore("APosterioriCovariance"): - if hasattr(B,"asfullmatrix"): - selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) ) - else: - selfA.StoredVariables["APosterioriCovariance"].store( B ) - selfA._setInternalState("seed", numpy.random.get_state()) - elif selfA._parameters["nextStep"]: - Xn = selfA._getInternalState("Xn") - Pn = selfA._getInternalState("Pn") - # - if selfA._parameters["EstimationOf"] == "Parameters": - XaMin = Xn - previousJMinimum = numpy.finfo(float).max - # - for step in range(duration-1): - if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) - else: - Ynpu = numpy.ravel( Y ).reshape((__p,1)) - # - Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn) - Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape - Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn) - Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape - # + # Predict (Time Update) + # --------------------- if selfA._parameters["EstimationOf"] == "State": - Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn) - Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape - Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn) - Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape - # - if U is not None: - if hasattr(U,"store") and len(U)>1: - Un = numpy.ravel( U[step] ).reshape((-1,1)) - elif hasattr(U,"store") and len(U)==1: - Un = numpy.ravel( U[0] ).reshape((-1,1)) + if __CovForecast or __LinEvolution: + Mt = EM["Tangent"].asMatrix(Xn) + Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape + if __CovForecast: + Ma = EM["Adjoint"].asMatrix(Xn) + Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape + Pn_predicted = Q + Mt @ (Pn @ Ma) + if __LinEvolution: + Xn_predicted = Mt @ Xn else: - Un = numpy.ravel( U ).reshape((-1,1)) - else: - Un = None - # - if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast - Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1)) - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! - Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - Pn_predicted = Q + Mt * Pn * Ma - elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast + M = EM["Direct"].appliedControledFormTo + Xn_predicted = M( (Xn, Un) ) + if CM is not None and "Tangent" in CM and Un is not None: # Attention : si Cm est aussi dans M, doublon ! + Cm = CM["Tangent"].asMatrix(Xn_predicted) + Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape + Xn_predicted = Xn_predicted + (Cm @ Un).reshape((-1,1)) + elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast # --- > Par principe, M = Id, Q = 0 Xn_predicted = Xn - Pn_predicted = Pn - # - if selfA._parameters["EstimationOf"] == "State": - HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1)) - _Innovation = Ynpu - HX_predicted - elif selfA._parameters["EstimationOf"] == "Parameters": - HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1)) - _Innovation = Ynpu - HX_predicted - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! - _Innovation = _Innovation - Cm * Un - # - Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha)) - Xn = Xn_predicted + Kn * _Innovation - Pn = Pn_predicted - Kn * Ht * Pn_predicted - # - Xa = Xn # Pointeurs - #-------------------------- - selfA._setInternalState("Xn", Xn) - selfA._setInternalState("Pn", Pn) - #-------------------------- - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) ) - if selfA._toStore("InnovationAtCurrentAnalysis"): - selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CurrentState"): - selfA.StoredVariables["CurrentState"].store( Xn ) + if __CovForecast: Pn_predicted = Pn + Xn_predicted = numpy.asarray(Xn_predicted).reshape((-1,1)) if selfA._toStore("ForecastState"): selfA.StoredVariables["ForecastState"].store( Xn_predicted ) - if selfA._toStore("ForecastCovariance"): - selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( Xn_predicted - Xa ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) - if selfA._toStore("SimulatedObservationAtCurrentState") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) - # ---> autres - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) ) - Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) ) - J = Jb + Jo - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - # - if selfA._toStore("IndexOfOptimum") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("CostFunctionJAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( Pn ) - if selfA._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if selfA._toStore("APosterioriCovariance"): - covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] - # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) - # - return 0 - -# ============================================================================== -def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", - BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000): - """ - Iterative EnKF - """ - if selfA._parameters["EstimationOf"] == "Parameters": - selfA._parameters["StoreInternalVariables"] = True - # - # Opérateurs - H = HO["Direct"].appliedControledFormTo - # - if selfA._parameters["EstimationOf"] == "State": - M = EM["Direct"].appliedControledFormTo - # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) - else: - Cm = None - # - # Durée d'observation et tailles - if hasattr(Y,"stepnumber"): - duration = Y.stepnumber() - __p = numpy.cumprod(Y.shape())[-1] - else: - duration = 2 - __p = numpy.array(Y).size - # - # Précalcul des inversions de B et R - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - BI = B.getI() - RI = R.getI() - # - __n = Xb.size - __m = selfA._parameters["NumberOfMembers"] - # - if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n) - else: Pn = B - Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m ) - selfA.StoredVariables["Analysis"].store( Xb ) - if selfA._toStore("APosterioriCovariance"): - if hasattr(B,"asfullmatrix"): - selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) ) + if __CovForecast: + if hasattr(Pn_predicted,"asfullmatrix"): + Pn_predicted = Pn_predicted.asfullmatrix(Xn.size) else: - selfA.StoredVariables["APosterioriCovariance"].store( B ) - selfA._setInternalState("seed", numpy.random.get_state()) - elif selfA._parameters["nextStep"]: - Xn = selfA._getInternalState("Xn") - # - previousJMinimum = numpy.finfo(float).max - # - for step in range(duration-1): - numpy.random.set_state(selfA._getInternalState("seed")) - if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) - else: - Ynpu = numpy.ravel( Y ).reshape((__p,1)) + Pn_predicted = numpy.asarray(Pn_predicted).reshape((Xn.size,Xn.size)) + if selfA._toStore("ForecastCovariance"): + selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted ) # - if U is not None: - if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T - elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T - else: - Un = numpy.asmatrix(numpy.ravel( U )).T + # Correct (Measurement Update) + # ---------------------------- + if __CovForecast: + oneCycle(selfA, Xn_predicted, Ynpu, Un, HO, CM, R, Pn_predicted, True) else: - Un = None - # - if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) + oneCycle(selfA, Xn_predicted, Ynpu, Un, HO, CM, R, B, True) # #-------------------------- - if VariantM == "IEnKF12": - Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float')) - EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1) - __j = 0 - Deltaw = 1 - if not BnotT: - Ta = numpy.identity(__m) - vw = numpy.zeros(__m) - while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax: - vx1 = (Xfm + EaX @ vw).reshape((__n,1)) - # - if BnotT: - E1 = vx1 + _epsilon * EaX - else: - E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta - # - if selfA._parameters["EstimationOf"] == "State": # Forecast + Q - E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - elif selfA._parameters["EstimationOf"] == "Parameters": - # --- > Par principe, M = Id - E2 = Xn - vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - vy1 = H((vx2, Un)).reshape((__p,1)) - # - HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) - # - if BnotT: - EaY = (HE2 - vy2) / _epsilon - else: - EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1) - # - GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 ))) - mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m)) - Deltaw = - numpy.linalg.solve(mH,GradJ) - # - vw = vw + Deltaw - # - if not BnotT: - Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) - # - __j = __j + 1 - # - A2 = EnsembleOfAnomalies( E2 ) - # - if BnotT: - Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) - A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon - # - Xn = vx2 + A2 - #-------------------------- - else: - raise ValueError("VariantM has to be chosen in the authorized methods list.") - # - if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - #-------------------------- - selfA._setInternalState("Xn", Xn) - selfA._setInternalState("seed", numpy.random.get_state()) - #-------------------------- - # - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("APosterioriCovariance") \ - or selfA._toStore("InnovationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T - _Innovation = Ynpu - _HXa - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) - if selfA._toStore("InnovationAtCurrentAnalysis"): - selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CurrentState"): - selfA.StoredVariables["CurrentState"].store( Xn ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( E2 ) - if selfA._toStore("ForecastCovariance"): - selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( E2 - Xa ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu ) - if selfA._toStore("SimulatedObservationAtCurrentState") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 ) - # ---> autres - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - # - if selfA._toStore("IndexOfOptimum") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("CostFunctionJAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) ) - if selfA._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if selfA._toStore("APosterioriCovariance"): - covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] - # ---> Pour les smoothers - if selfA._toStore("CurrentEnsembleState"): - selfA.StoredVariables["CurrentEnsembleState"].store( Xn ) - # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) - # - return 0 - -# ============================================================================== -def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): - """ - 3DVAR incrémental - """ - # - # Initialisations - # --------------- - # - # Opérateur non-linéaire pour la boucle externe - Hm = HO["Direct"].appliedTo - # - # Précalcul des inversions de B et R - BI = B.getI() - RI = R.getI() - # - # Point de démarrage de l'optimisation - Xini = selfA._parameters["InitializationPoint"] - # - HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T - Innovation = Y - HXb - # - # Outer Loop - # ---------- - iOuter = 0 - J = 1./mpr - DeltaJ = 1./mpr - Xr = Xini.reshape((-1,1)) - while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]: - # - # Inner Loop - # ---------- - Ht = HO["Tangent"].asMatrix(Xr) - Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape - # - # Définition de la fonction-coût - # ------------------------------ - def CostFunction(dx): - _dX = numpy.asmatrix(numpy.ravel( dx )).T - if selfA._parameters["StoreInternalVariables"] or \ - selfA._toStore("CurrentState") or \ - selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentState"].store( Xb + _dX ) - _HdX = Ht * _dX - _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T - _dInnovation = Innovation - _HdX - if selfA._toStore("SimulatedObservationAtCurrentState") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation ) - # - Jb = float( 0.5 * _dX.T * BI * _dX ) - Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation ) - J = Jb + Jo - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - if selfA._toStore("IndexOfOptimum") or \ - selfA._toStore("CurrentOptimum") or \ - selfA._toStore("CostFunctionJAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJbAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJoAtCurrentOptimum") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - return J - # - def GradientOfCostFunction(dx): - _dX = numpy.asmatrix(numpy.ravel( dx )).T - _HdX = Ht * _dX - _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T - _dInnovation = Innovation - _HdX - GradJb = BI * _dX - GradJo = - Ht.T @ (RI * _dInnovation) - GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) - return GradJ - # - # Minimisation de la fonctionnelle - # -------------------------------- - nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() - # - if selfA._parameters["Minimizer"] == "LBFGSB": - # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b( - if "0.19" <= scipy.version.version <= "1.1.0": - import lbfgsbhlt as optimiseur - else: - import scipy.optimize as optimiseur - Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( - func = CostFunction, - x0 = numpy.zeros(Xini.size), - fprime = GradientOfCostFunction, - args = (), - bounds = RecentredBounds(selfA._parameters["Bounds"], Xb), - maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, - factr = selfA._parameters["CostDecrementTolerance"]*1.e14, - pgtol = selfA._parameters["ProjectedGradientTolerance"], - iprint = selfA._parameters["optiprint"], - ) - nfeval = Informations['funcalls'] - rc = Informations['warnflag'] - elif selfA._parameters["Minimizer"] == "TNC": - Minimum, nfeval, rc = scipy.optimize.fmin_tnc( - func = CostFunction, - x0 = numpy.zeros(Xini.size), - fprime = GradientOfCostFunction, - args = (), - bounds = RecentredBounds(selfA._parameters["Bounds"], Xb), - maxfun = selfA._parameters["MaximumNumberOfSteps"], - pgtol = selfA._parameters["ProjectedGradientTolerance"], - ftol = selfA._parameters["CostDecrementTolerance"], - messages = selfA._parameters["optmessages"], - ) - elif selfA._parameters["Minimizer"] == "CG": - Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = numpy.zeros(Xini.size), - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "NCG": - Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( - f = CostFunction, - x0 = numpy.zeros(Xini.size), - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - avextol = selfA._parameters["CostDecrementTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "BFGS": - Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( - f = CostFunction, - x0 = numpy.zeros(Xini.size), - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - else: - raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) - # - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] - # - if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): - Minimum = selfA.StoredVariables["CurrentState"][IndexMin] - Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T - else: - Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T - # - Xr = Minimum - DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J - iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1] - # - # Obtention de l'analyse - # ---------------------- - Xa = Xr - # - selfA.StoredVariables["Analysis"].store( Xa ) - # - if selfA._toStore("OMA") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("SimulatedObservationAtOptimum"): - if selfA._toStore("SimulatedObservationAtCurrentState"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] - elif selfA._toStore("SimulatedObservationAtCurrentOptimum"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1] - else: - HXa = Hm( Xa ) - # - # Calcul de la covariance d'analyse - # --------------------------------- - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("JacobianMatrixAtOptimum") or \ - selfA._toStore("KalmanGainAtOptimum"): - HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa) - HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("KalmanGainAtOptimum"): - HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa) - HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles"): - HessienneI = [] - nb = Xa.size - for i in range(nb): - _ee = numpy.matrix(numpy.zeros(nb)).T - _ee[i] = 1. - _HtEE = numpy.dot(HtM,_ee) - _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T - HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) - HessienneI = numpy.matrix( HessienneI ) - A = HessienneI.I - if min(A.shape) != max(A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) - if (numpy.diag(A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) - if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug - try: - L = numpy.linalg.cholesky( A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( A ) - if selfA._toStore("JacobianMatrixAtOptimum"): - selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM ) - if selfA._toStore("KalmanGainAtOptimum"): - if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I - elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI - selfA.StoredVariables["KalmanGainAtOptimum"].store( KG ) - # - # Calculs et/ou stockages supplémentaires - # --------------------------------------- - if selfA._toStore("Innovation") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("MahalanobisConsistency") or \ - selfA._toStore("OMB"): - d = Y - HXb - if selfA._toStore("Innovation"): - selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) - if selfA._toStore("OMA"): - selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) - if selfA._toStore("OMB"): - selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) - if selfA._toStore("SigmaObs2"): - TraceR = R.trace(Y.size) - selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) - if selfA._toStore("MahalanobisConsistency"): - selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) - if selfA._toStore("SimulationQuantiles"): - QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM) - if selfA._toStore("SimulatedObservationAtBackground"): - selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) - if selfA._toStore("SimulatedObservationAtOptimum"): - selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) - # - return 0 - -# ============================================================================== -def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", - BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000): - """ - Maximum Likelihood Ensemble Filter - """ - if selfA._parameters["EstimationOf"] == "Parameters": - selfA._parameters["StoreInternalVariables"] = True - # - # Opérateurs - H = HO["Direct"].appliedControledFormTo - # - if selfA._parameters["EstimationOf"] == "State": - M = EM["Direct"].appliedControledFormTo - # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) - else: - Cm = None - # - # Durée d'observation et tailles - if hasattr(Y,"stepnumber"): - duration = Y.stepnumber() - __p = numpy.cumprod(Y.shape())[-1] - else: - duration = 2 - __p = numpy.array(Y).size - # - # Précalcul des inversions de B et R - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - BI = B.getI() - RI = R.getI() - # - __n = Xb.size - __m = selfA._parameters["NumberOfMembers"] - # - if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m ) - selfA.StoredVariables["Analysis"].store( Xb ) - if selfA._toStore("APosterioriCovariance"): - if hasattr(B,"asfullmatrix"): - selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) ) - else: - selfA.StoredVariables["APosterioriCovariance"].store( B ) - selfA._setInternalState("seed", numpy.random.get_state()) - elif selfA._parameters["nextStep"]: - Xn = selfA._getInternalState("Xn") - # - previousJMinimum = numpy.finfo(float).max - # - for step in range(duration-1): - numpy.random.set_state(selfA._getInternalState("seed")) - if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) - else: - Ynpu = numpy.ravel( Y ).reshape((__p,1)) - # - if U is not None: - if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T - elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T - else: - Un = numpy.asmatrix(numpy.ravel( U )).T - else: - Un = None - # - if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast - EMX = M( [(Xn[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q ) - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! - Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast - # --- > Par principe, M = Id, Q = 0 - Xn_predicted = EMX = Xn - # - #-------------------------- - if VariantM == "MLEF13": - Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float')) - EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) ) - Ua = numpy.identity(__m) - __j = 0 - Deltaw = 1 - if not BnotT: - Ta = numpy.identity(__m) - vw = numpy.zeros(__m) - while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax: - vx1 = (Xfm + EaX @ vw).reshape((__n,1)) - # - if BnotT: - E1 = vx1 + _epsilon * EaX - else: - E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta - # - HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) - # - if BnotT: - EaY = (HE2 - vy2) / _epsilon - else: - EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1) - # - GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 ))) - mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m)) - Deltaw = - numpy.linalg.solve(mH,GradJ) - # - vw = vw + Deltaw - # - if not BnotT: - Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) - # - __j = __j + 1 - # - if BnotT: - Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) - # - Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua - #-------------------------- - else: - raise ValueError("VariantM has to be chosen in the authorized methods list.") - # - if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - #-------------------------- - selfA._setInternalState("Xn", Xn) - selfA._setInternalState("seed", numpy.random.get_state()) - #-------------------------- - # - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("APosterioriCovariance") \ - or selfA._toStore("InnovationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T - _Innovation = Ynpu - _HXa - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) - if selfA._toStore("InnovationAtCurrentAnalysis"): - selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CurrentState"): - selfA.StoredVariables["CurrentState"].store( Xn ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( EMX ) - if selfA._toStore("ForecastCovariance"): - selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( EMX - Xa ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu ) - if selfA._toStore("SimulatedObservationAtCurrentState") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 ) - # ---> autres - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - # - if selfA._toStore("IndexOfOptimum") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("CostFunctionJAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) ) - if selfA._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if selfA._toStore("APosterioriCovariance"): - covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] - # ---> Pour les smoothers - if selfA._toStore("CurrentEnsembleState"): - selfA.StoredVariables["CurrentEnsembleState"].store( Xn ) - # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) - # - return 0 - -# ============================================================================== -def mmqr( - func = None, - x0 = None, - fprime = None, - bounds = None, - quantile = 0.5, - maxfun = 15000, - toler = 1.e-06, - y = None, - ): - """ - Implémentation informatique de l'algorithme MMQR, basée sur la publication : - David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm", - Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000. - """ - # - # Recuperation des donnees et informations initiales - # -------------------------------------------------- - variables = numpy.ravel( x0 ) - mesures = numpy.ravel( y ) - increment = sys.float_info[0] - p = variables.size - n = mesures.size - quantile = float(quantile) - # - # Calcul des parametres du MM - # --------------------------- - tn = float(toler) / n - e0 = -tn / math.log(tn) - epsilon = (e0-tn)/(1+math.log(e0)) - # - # Calculs d'initialisation - # ------------------------ - residus = mesures - numpy.ravel( func( variables ) ) - poids = 1./(epsilon+numpy.abs(residus)) - veps = 1. - 2. * quantile - residus * poids - lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus) - iteration = 0 - # - # Recherche iterative - # ------------------- - while (increment > toler) and (iteration < maxfun) : - iteration += 1 - # - Derivees = numpy.array(fprime(variables)) - Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS - DeriveesT = Derivees.transpose() - M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) ) - SM = numpy.transpose(numpy.dot( DeriveesT , veps )) - step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0] - # - variables = variables + step - if bounds is not None: - # Attention : boucle infinie à éviter si un intervalle est trop petit - while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ): - step = step/2. - variables = variables - step - residus = mesures - numpy.ravel( func(variables) ) - surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus) - # - while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) : - step = step/2. - variables = variables - step - residus = mesures - numpy.ravel( func(variables) ) - surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus) - # - increment = lastsurrogate-surrogate - poids = 1./(epsilon+numpy.abs(residus)) - veps = 1. - 2. * quantile - residus * poids - lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus) - # - # Mesure d'écart - # -------------- - Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] ) - # - return variables, Ecart, [n,p,iteration,increment,0] - -# ============================================================================== -def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle): - """ - 3DVAR multi-pas et multi-méthodes - """ - # - # Initialisation - if selfA._parameters["EstimationOf"] == "State": - M = EM["Direct"].appliedTo - # - if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - Xn = numpy.ravel(Xb).reshape((-1,1)) - selfA.StoredVariables["Analysis"].store( Xn ) - if selfA._toStore("APosterioriCovariance"): - if hasattr(B,"asfullmatrix"): - selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) ) - else: - selfA.StoredVariables["APosterioriCovariance"].store( B ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( Xn ) - elif selfA._parameters["nextStep"]: - Xn = selfA._getInternalState("Xn") - else: - Xn = numpy.ravel(Xb).reshape((-1,1)) - # - if hasattr(Y,"stepnumber"): - duration = Y.stepnumber() - else: - duration = 2 - # - # Multi-pas - for step in range(duration-1): - if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1)) - else: - Ynpu = numpy.ravel( Y ).reshape((-1,1)) - # - if selfA._parameters["EstimationOf"] == "State": # Forecast - Xn_predicted = M( Xn ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( Xn_predicted ) - elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast - # --- > Par principe, M = Id, Q = 0 - Xn_predicted = Xn - Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1)) - # - oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None) - # - Xn = selfA.StoredVariables["Analysis"][-1] - #-------------------------- - selfA._setInternalState("Xn", Xn) - # - return 0 - -# ============================================================================== -def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): - """ - 3DVAR PSAS - """ - # - # Initialisations - # --------------- - # - # Opérateurs - Hm = HO["Direct"].appliedTo - # - # Utilisation éventuelle d'un vecteur H(Xb) précalculé - if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]: - HXb = Hm( Xb, HO["AppliedInX"]["HXb"] ) - else: - HXb = Hm( Xb ) - HXb = numpy.asmatrix(numpy.ravel( HXb )).T - if Y.size != HXb.size: - raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size)) - if max(Y.shape) != max(HXb.shape): - raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape)) - # - if selfA._toStore("JacobianMatrixAtBackground"): - HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb) - HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape - selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb ) - # - Ht = HO["Tangent"].asMatrix(Xb) - BHT = B * Ht.T - HBHTpR = R + Ht * BHT - Innovation = Y - HXb - # - # Point de démarrage de l'optimisation - Xini = numpy.zeros(Xb.shape) - # - # Définition de la fonction-coût - # ------------------------------ - def CostFunction(w): - _W = numpy.asmatrix(numpy.ravel( w )).T - if selfA._parameters["StoreInternalVariables"] or \ - selfA._toStore("CurrentState") or \ - selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W ) - if selfA._toStore("SimulatedObservationAtCurrentState") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation ) - # - Jb = float( 0.5 * _W.T * HBHTpR * _W ) - Jo = float( - _W.T * Innovation ) - J = Jb + Jo - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - if selfA._toStore("IndexOfOptimum") or \ - selfA._toStore("CurrentOptimum") or \ - selfA._toStore("CostFunctionJAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJbAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJoAtCurrentOptimum") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - return J - # - def GradientOfCostFunction(w): - _W = numpy.asmatrix(numpy.ravel( w )).T - GradJb = HBHTpR * _W - GradJo = - Innovation - GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) - return GradJ - # - # Minimisation de la fonctionnelle - # -------------------------------- - nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() - # - if selfA._parameters["Minimizer"] == "LBFGSB": - if "0.19" <= scipy.version.version <= "1.1.0": - import lbfgsbhlt as optimiseur - else: - import scipy.optimize as optimiseur - Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = RecentredBounds(selfA._parameters["Bounds"], Xb), - maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, - factr = selfA._parameters["CostDecrementTolerance"]*1.e14, - pgtol = selfA._parameters["ProjectedGradientTolerance"], - iprint = selfA._parameters["optiprint"], - ) - nfeval = Informations['funcalls'] - rc = Informations['warnflag'] - elif selfA._parameters["Minimizer"] == "TNC": - Minimum, nfeval, rc = scipy.optimize.fmin_tnc( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = RecentredBounds(selfA._parameters["Bounds"], Xb), - maxfun = selfA._parameters["MaximumNumberOfSteps"], - pgtol = selfA._parameters["ProjectedGradientTolerance"], - ftol = selfA._parameters["CostDecrementTolerance"], - messages = selfA._parameters["optmessages"], - ) - elif selfA._parameters["Minimizer"] == "CG": - Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "NCG": - Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - avextol = selfA._parameters["CostDecrementTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "BFGS": - Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - else: - raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) - # - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] - # - # Correction pour pallier a un bug de TNC sur le retour du Minimum - # ---------------------------------------------------------------- - if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): - Minimum = selfA.StoredVariables["CurrentState"][IndexMin] - Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T - else: - Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T - # - # Obtention de l'analyse - # ---------------------- - Xa = Minimum - # - selfA.StoredVariables["Analysis"].store( Xa ) - # - if selfA._toStore("OMA") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("SimulatedObservationAtOptimum"): - if selfA._toStore("SimulatedObservationAtCurrentState"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] - elif selfA._toStore("SimulatedObservationAtCurrentOptimum"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1] - else: - HXa = Hm( Xa ) - # - # Calcul de la covariance d'analyse - # --------------------------------- - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("JacobianMatrixAtOptimum") or \ - selfA._toStore("KalmanGainAtOptimum"): - HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa) - HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("KalmanGainAtOptimum"): - HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa) - HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles"): - BI = B.getI() - RI = R.getI() - HessienneI = [] - nb = Xa.size - for i in range(nb): - _ee = numpy.matrix(numpy.zeros(nb)).T - _ee[i] = 1. - _HtEE = numpy.dot(HtM,_ee) - _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T - HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) - HessienneI = numpy.matrix( HessienneI ) - A = HessienneI.I - if min(A.shape) != max(A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) - if (numpy.diag(A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) - if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug - try: - L = numpy.linalg.cholesky( A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( A ) - if selfA._toStore("JacobianMatrixAtOptimum"): - selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM ) - if selfA._toStore("KalmanGainAtOptimum"): - if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I - elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI - selfA.StoredVariables["KalmanGainAtOptimum"].store( KG ) - # - # Calculs et/ou stockages supplémentaires - # --------------------------------------- - if selfA._toStore("Innovation") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("MahalanobisConsistency") or \ - selfA._toStore("OMB"): - d = Y - HXb - if selfA._toStore("Innovation"): - selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) - if selfA._toStore("OMA"): - selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) - if selfA._toStore("OMB"): - selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) - if selfA._toStore("SigmaObs2"): - TraceR = R.trace(Y.size) - selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) - if selfA._toStore("MahalanobisConsistency"): - selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) - if selfA._toStore("SimulationQuantiles"): - QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM) - if selfA._toStore("SimulatedObservationAtBackground"): - selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) - if selfA._toStore("SimulatedObservationAtOptimum"): - selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) - # - return 0 - -# ============================================================================== -def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"): - """ - Stochastic EnKF - """ - if selfA._parameters["EstimationOf"] == "Parameters": - selfA._parameters["StoreInternalVariables"] = True - # - # Opérateurs - H = HO["Direct"].appliedControledFormTo - # - if selfA._parameters["EstimationOf"] == "State": - M = EM["Direct"].appliedControledFormTo - # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) - else: - Cm = None - # - # Durée d'observation et tailles - if hasattr(Y,"stepnumber"): - duration = Y.stepnumber() - __p = numpy.cumprod(Y.shape())[-1] - else: - duration = 2 - __p = numpy.array(Y).size - # - # Précalcul des inversions de B et R - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - BI = B.getI() - RI = R.getI() - # - __n = Xb.size - __m = selfA._parameters["NumberOfMembers"] - # - if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p) - else: Rn = R - # - if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n) - else: Pn = B - Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m ) - selfA.StoredVariables["Analysis"].store( Xb ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( Pn ) - selfA._setInternalState("seed", numpy.random.get_state()) - elif selfA._parameters["nextStep"]: - Xn = selfA._getInternalState("Xn") - # - previousJMinimum = numpy.finfo(float).max - # - for step in range(duration-1): - numpy.random.set_state(selfA._getInternalState("seed")) - if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) - else: - Ynpu = numpy.ravel( Y ).reshape((__p,1)) - # - if U is not None: - if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T - elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T - else: - Un = numpy.asmatrix(numpy.ravel( U )).T - else: - Un = None - # - if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast - EMX = M( [(Xn[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q ) - HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! - Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast - # --- > Par principe, M = Id, Q = 0 - Xn_predicted = EMX = Xn - HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - # - # Mean of forecast and observation of forecast - Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) - # - #-------------------------- - if VariantM == "KalmanFilterFormula05": - PfHT, HPfHT = 0., 0. - for i in range(__m): - Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm - Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm - PfHT += Exfi * Eyfi.T - HPfHT += Eyfi * Eyfi.T - PfHT = (1./(__m-1)) * PfHT - HPfHT = (1./(__m-1)) * HPfHT - Kn = PfHT * ( R + HPfHT ).I - del PfHT, HPfHT - # - for i in range(__m): - ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn) - Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i]) - #-------------------------- - elif VariantM == "KalmanFilterFormula16": - EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m) - EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) - # - EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1) - EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1) - # - Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T) - # - for i in range(__m): - Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i]) - #-------------------------- - else: - raise ValueError("VariantM has to be chosen in the authorized methods list.") - # - if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - #-------------------------- - selfA._setInternalState("Xn", Xn) - selfA._setInternalState("seed", numpy.random.get_state()) - #-------------------------- - # - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("APosterioriCovariance") \ - or selfA._toStore("InnovationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T - _Innovation = Ynpu - _HXa - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) - if selfA._toStore("InnovationAtCurrentAnalysis"): - selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CurrentState"): - selfA.StoredVariables["CurrentState"].store( Xn ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( EMX ) - if selfA._toStore("ForecastCovariance"): - selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( EMX - Xa ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu ) - if selfA._toStore("SimulatedObservationAtCurrentState") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) - # ---> autres - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - # - if selfA._toStore("IndexOfOptimum") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("CostFunctionJAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) ) - if selfA._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if selfA._toStore("APosterioriCovariance"): - covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] - # ---> Pour les smoothers - if selfA._toStore("CurrentEnsembleState"): - selfA.StoredVariables["CurrentEnsembleState"].store( Xn ) - # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) - # - return 0 - -# ============================================================================== -def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): - """ - 3DVAR - """ - # - # Initialisations - # --------------- - # - # Opérateurs - Hm = HO["Direct"].appliedTo - Ha = HO["Adjoint"].appliedInXTo - # - # Utilisation éventuelle d'un vecteur H(Xb) précalculé - if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]: - HXb = Hm( Xb, HO["AppliedInX"]["HXb"] ) - else: - HXb = Hm( Xb ) - HXb = HXb.reshape((-1,1)) - if Y.size != HXb.size: - raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size)) - if max(Y.shape) != max(HXb.shape): - raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape)) - # - if selfA._toStore("JacobianMatrixAtBackground"): - HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb) - HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape - selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb ) - # - # Précalcul des inversions de B et R - BI = B.getI() - RI = R.getI() - # - # Point de démarrage de l'optimisation - Xini = selfA._parameters["InitializationPoint"] - # - # Définition de la fonction-coût - # ------------------------------ - def CostFunction(x): - _X = numpy.ravel( x ).reshape((-1,1)) - if selfA._parameters["StoreInternalVariables"] or \ - selfA._toStore("CurrentState") or \ - selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentState"].store( _X ) - _HX = Hm( _X ).reshape((-1,1)) - _Innovation = Y - _HX - if selfA._toStore("SimulatedObservationAtCurrentState") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) - # - Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) ) - Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) ) - J = Jb + Jo - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - if selfA._toStore("IndexOfOptimum") or \ - selfA._toStore("CurrentOptimum") or \ - selfA._toStore("CostFunctionJAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJbAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJoAtCurrentOptimum") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - return J - # - def GradientOfCostFunction(x): - _X = numpy.ravel( x ).reshape((-1,1)) - _HX = Hm( _X ) - _HX = numpy.ravel( _HX ).reshape((-1,1)) - GradJb = BI * (_X - Xb) - GradJo = - Ha( (_X, RI * (Y - _HX)) ) - GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) - return GradJ - # - # Minimisation de la fonctionnelle - # -------------------------------- - nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() - # - if selfA._parameters["Minimizer"] == "LBFGSB": - if "0.19" <= scipy.version.version <= "1.1.0": - import lbfgsbhlt as optimiseur - else: - import scipy.optimize as optimiseur - Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = selfA._parameters["Bounds"], - maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, - factr = selfA._parameters["CostDecrementTolerance"]*1.e14, - pgtol = selfA._parameters["ProjectedGradientTolerance"], - iprint = selfA._parameters["optiprint"], - ) - nfeval = Informations['funcalls'] - rc = Informations['warnflag'] - elif selfA._parameters["Minimizer"] == "TNC": - Minimum, nfeval, rc = scipy.optimize.fmin_tnc( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = selfA._parameters["Bounds"], - maxfun = selfA._parameters["MaximumNumberOfSteps"], - pgtol = selfA._parameters["ProjectedGradientTolerance"], - ftol = selfA._parameters["CostDecrementTolerance"], - messages = selfA._parameters["optmessages"], - ) - elif selfA._parameters["Minimizer"] == "CG": - Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "NCG": - Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - avextol = selfA._parameters["CostDecrementTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "BFGS": - Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - else: - raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) - # - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] - # - # Correction pour pallier a un bug de TNC sur le retour du Minimum - # ---------------------------------------------------------------- - if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): - Minimum = selfA.StoredVariables["CurrentState"][IndexMin] - # - Xa = Minimum - #-------------------------- - # - selfA.StoredVariables["Analysis"].store( Xa ) - # - if selfA._toStore("OMA") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("SimulatedObservationAtOptimum"): - if selfA._toStore("SimulatedObservationAtCurrentState"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] - elif selfA._toStore("SimulatedObservationAtCurrentOptimum"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1] - else: - HXa = Hm( Xa ) - # - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("JacobianMatrixAtOptimum") or \ - selfA._toStore("KalmanGainAtOptimum"): - HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa) - HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("KalmanGainAtOptimum"): - HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa) - HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles"): - HessienneI = [] - nb = Xa.size - for i in range(nb): - _ee = numpy.zeros(nb) - _ee[i] = 1. - _HtEE = numpy.dot(HtM,_ee) - HessienneI.append( numpy.ravel( BI * _ee.reshape((-1,1)) + HaM * (RI * _HtEE.reshape((-1,1))) ) ) - A = numpy.linalg.inv(numpy.array( HessienneI )) - if min(A.shape) != max(A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) - if (numpy.diag(A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) - if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug - try: - L = numpy.linalg.cholesky( A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( A ) - if selfA._toStore("JacobianMatrixAtOptimum"): - selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM ) - if selfA._toStore("KalmanGainAtOptimum"): - if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I - elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI - selfA.StoredVariables["KalmanGainAtOptimum"].store( KG ) - # - if selfA._toStore("Innovation") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("MahalanobisConsistency") or \ - selfA._toStore("OMB"): - d = Y - HXb - if selfA._toStore("Innovation"): - selfA.StoredVariables["Innovation"].store( d ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) - if selfA._toStore("OMA"): - selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) - if selfA._toStore("OMB"): - selfA.StoredVariables["OMB"].store( d ) - if selfA._toStore("SigmaObs2"): - TraceR = R.trace(Y.size) - selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR ) - if selfA._toStore("MahalanobisConsistency"): - selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) - if selfA._toStore("SimulationQuantiles"): - QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM) - if selfA._toStore("SimulatedObservationAtBackground"): - selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb ) - if selfA._toStore("SimulatedObservationAtOptimum"): - selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa ) - # - return 0 - -# ============================================================================== -def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): - """ - 4DVAR - """ - # - # Initialisations - # --------------- - # - # Opérateurs - Hm = HO["Direct"].appliedControledFormTo - Mm = EM["Direct"].appliedControledFormTo - # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) - else: - Cm = None - # - def Un(_step): - if U is not None: - if hasattr(U,"store") and 1<=_step1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T - elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T - else: - Un = numpy.asmatrix(numpy.ravel( U )).T - else: - Un = None - # - if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast - Xn_predicted = Mt * Xn - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! - Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - Pn_predicted = Q + Mt * Pn * Ma - elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast - # --- > Par principe, M = Id, Q = 0 - Xn_predicted = Xn - Pn_predicted = Pn - # - if selfA._parameters["EstimationOf"] == "State": - HX_predicted = Ht * Xn_predicted - _Innovation = Ynpu - HX_predicted - elif selfA._parameters["EstimationOf"] == "Parameters": - HX_predicted = Ht * Xn_predicted - _Innovation = Ynpu - HX_predicted - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! - _Innovation = _Innovation - Cm * Un - # - Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha)) - Xn = Xn_predicted + Kn * _Innovation - Pn = Pn_predicted - Kn * Ht * Pn_predicted - # - Xa = Xn # Pointeurs - #-------------------------- - selfA._setInternalState("Xn", Xn) - selfA._setInternalState("Pn", Pn) - #-------------------------- - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa ) - if selfA._toStore("InnovationAtCurrentAnalysis"): - selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CurrentState"): - selfA.StoredVariables["CurrentState"].store( Xn ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( Xn_predicted ) - if selfA._toStore("ForecastCovariance"): - selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( Xn_predicted - Xa ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) - if selfA._toStore("SimulatedObservationAtCurrentState") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) - # ---> autres - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - # - if selfA._toStore("IndexOfOptimum") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("CostFunctionJAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( Pn ) - if selfA._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if selfA._toStore("APosterioriCovariance"): - covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] - # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) - # - return 0 - -# ============================================================================== -def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): - """ - Constrained Unscented Kalman Filter - """ - if selfA._parameters["EstimationOf"] == "Parameters": - selfA._parameters["StoreInternalVariables"] = True - selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] ) - # - L = Xb.size - Alpha = selfA._parameters["Alpha"] - Beta = selfA._parameters["Beta"] - if selfA._parameters["Kappa"] == 0: - if selfA._parameters["EstimationOf"] == "State": - Kappa = 0 - elif selfA._parameters["EstimationOf"] == "Parameters": - Kappa = 3 - L - else: - Kappa = selfA._parameters["Kappa"] - Lambda = float( Alpha**2 ) * ( L + Kappa ) - L - Gamma = math.sqrt( L + Lambda ) - # - Ww = [] - Ww.append( 0. ) - for i in range(2*L): - Ww.append( 1. / (2.*(L + Lambda)) ) - # - Wm = numpy.array( Ww ) - Wm[0] = Lambda / (L + Lambda) - Wc = numpy.array( Ww ) - Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta) - # - # Opérateurs - Hm = HO["Direct"].appliedControledFormTo - # - if selfA._parameters["EstimationOf"] == "State": - Mm = EM["Direct"].appliedControledFormTo - # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) - else: - Cm = None - # - # Durée d'observation et tailles - if hasattr(Y,"stepnumber"): - duration = Y.stepnumber() - __p = numpy.cumprod(Y.shape())[-1] - else: - duration = 2 - __p = numpy.array(Y).size - # - # Précalcul des inversions de B et R - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - BI = B.getI() - RI = R.getI() - # - __n = Xb.size - # - if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - Xn = Xb - if hasattr(B,"asfullmatrix"): - Pn = B.asfullmatrix(__n) - else: - Pn = B - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( Xb ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( Pn ) - elif selfA._parameters["nextStep"]: - Xn = selfA._getInternalState("Xn") - Pn = selfA._getInternalState("Pn") - # - if selfA._parameters["EstimationOf"] == "Parameters": - XaMin = Xn - previousJMinimum = numpy.finfo(float).max - # - for step in range(duration-1): - if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) - else: - Ynpu = numpy.ravel( Y ).reshape((__p,1)) - # - if U is not None: - if hasattr(U,"store") and len(U)>1: - Un = numpy.ravel( U[step] ).reshape((-1,1)) - elif hasattr(U,"store") and len(U)==1: - Un = numpy.ravel( U[0] ).reshape((-1,1)) - else: - Un = numpy.ravel( U ).reshape((-1,1)) - else: - Un = None - # - Pndemi = numpy.linalg.cholesky(Pn) - Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi]) - nbSpts = 2*Xn.size+1 - # - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - for point in range(nbSpts): - Xnp[:,point] = numpy.max(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1) - Xnp[:,point] = numpy.min(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1) - # - XEtnnp = [] - for point in range(nbSpts): - if selfA._parameters["EstimationOf"] == "State": - XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! - Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape - XEtnnpi = XEtnnpi + Cm * Un - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - XEtnnpi = numpy.max(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1) - XEtnnpi = numpy.min(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1) - elif selfA._parameters["EstimationOf"] == "Parameters": - # --- > Par principe, M = Id, Q = 0 - XEtnnpi = Xnp[:,point] - XEtnnp.append( XEtnnpi ) - XEtnnp = numpy.hstack( XEtnnp ) - # - Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1) - # - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - Xncm = numpy.max(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1) - Xncm = numpy.min(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1) - # - if selfA._parameters["EstimationOf"] == "State": Pnm = Q - elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0. - for point in range(nbSpts): - Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T - # - if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None: - Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm) - else: - Pnmdemi = numpy.linalg.cholesky(Pnm) - # - Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi]) - # - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - for point in range(nbSpts): - Xnnp[:,point] = numpy.max(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1) - Xnnp[:,point] = numpy.min(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1) - # - Ynnp = [] - for point in range(nbSpts): - if selfA._parameters["EstimationOf"] == "State": - Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T - elif selfA._parameters["EstimationOf"] == "Parameters": - Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T - Ynnp.append( Ynnpi ) - Ynnp = numpy.hstack( Ynnp ) - # - Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1) - # - Pyyn = R - Pxyn = 0. - for point in range(nbSpts): - Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T - Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T - # - _Innovation = Ynpu - Yncm - if selfA._parameters["EstimationOf"] == "Parameters": - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! - _Innovation = _Innovation - Cm * Un - # - Kn = Pxyn * Pyyn.I - Xn = Xncm + Kn * _Innovation - Pn = Pnm - Kn * Pyyn * Kn.T - # - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1) - Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1) - # - Xa = Xn # Pointeurs - #-------------------------- - selfA._setInternalState("Xn", Xn) - selfA._setInternalState("Pn", Pn) - #-------------------------- - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) ) - if selfA._toStore("InnovationAtCurrentAnalysis"): - selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CurrentState"): - selfA.StoredVariables["CurrentState"].store( Xn ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( Xncm ) - if selfA._toStore("ForecastCovariance"): - selfA.StoredVariables["ForecastCovariance"].store( Pnm ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( Xncm - Xa ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) - if selfA._toStore("SimulatedObservationAtCurrentState") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm ) - # ---> autres - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - # - if selfA._toStore("IndexOfOptimum") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("CostFunctionJAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( Pn ) - if selfA._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if selfA._toStore("APosterioriCovariance"): - covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] - # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) - # - return 0 - -# ============================================================================== -def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): - """ - 3DVAR variational analysis with no inversion of B - """ - # - # Initialisations - # --------------- - # - # Opérateurs - Hm = HO["Direct"].appliedTo - Ha = HO["Adjoint"].appliedInXTo - # - # Précalcul des inversions de B et R - BT = B.getT() - RI = R.getI() - # - # Point de démarrage de l'optimisation - Xini = numpy.zeros(Xb.shape) - # - # Définition de la fonction-coût - # ------------------------------ - def CostFunction(v): - _V = numpy.asmatrix(numpy.ravel( v )).T - _X = Xb + B * _V - if selfA._parameters["StoreInternalVariables"] or \ - selfA._toStore("CurrentState") or \ - selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentState"].store( _X ) - _HX = Hm( _X ) - _HX = numpy.asmatrix(numpy.ravel( _HX )).T - _Innovation = Y - _HX - if selfA._toStore("SimulatedObservationAtCurrentState") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) - # - Jb = float( 0.5 * _V.T * BT * _V ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - if selfA._toStore("IndexOfOptimum") or \ - selfA._toStore("CurrentOptimum") or \ - selfA._toStore("CostFunctionJAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJbAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJoAtCurrentOptimum") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - return J - # - def GradientOfCostFunction(v): - _V = numpy.asmatrix(numpy.ravel( v )).T - _X = Xb + B * _V - _HX = Hm( _X ) - _HX = numpy.asmatrix(numpy.ravel( _HX )).T - GradJb = BT * _V - GradJo = - Ha( (_X, RI * (Y - _HX)) ) - GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) - return GradJ - # - # Minimisation de la fonctionnelle - # -------------------------------- - nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() - # - if selfA._parameters["Minimizer"] == "LBFGSB": - if "0.19" <= scipy.version.version <= "1.1.0": - import lbfgsbhlt as optimiseur - else: - import scipy.optimize as optimiseur - Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = RecentredBounds(selfA._parameters["Bounds"], Xb), - maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, - factr = selfA._parameters["CostDecrementTolerance"]*1.e14, - pgtol = selfA._parameters["ProjectedGradientTolerance"], - iprint = selfA._parameters["optiprint"], - ) - nfeval = Informations['funcalls'] - rc = Informations['warnflag'] - elif selfA._parameters["Minimizer"] == "TNC": - Minimum, nfeval, rc = scipy.optimize.fmin_tnc( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = RecentredBounds(selfA._parameters["Bounds"], Xb), - maxfun = selfA._parameters["MaximumNumberOfSteps"], - pgtol = selfA._parameters["ProjectedGradientTolerance"], - ftol = selfA._parameters["CostDecrementTolerance"], - messages = selfA._parameters["optmessages"], - ) - elif selfA._parameters["Minimizer"] == "CG": - Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "NCG": - Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - avextol = selfA._parameters["CostDecrementTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "BFGS": - Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - else: - raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) - # - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] - # - # Correction pour pallier a un bug de TNC sur le retour du Minimum - # ---------------------------------------------------------------- - if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): - Minimum = selfA.StoredVariables["CurrentState"][IndexMin] - Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T - else: - Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T - # - # Obtention de l'analyse - # ---------------------- - Xa = Minimum - # - selfA.StoredVariables["Analysis"].store( Xa ) - # - if selfA._toStore("OMA") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("SimulatedObservationAtOptimum"): - if selfA._toStore("SimulatedObservationAtCurrentState"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] - elif selfA._toStore("SimulatedObservationAtCurrentOptimum"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1] - else: - HXa = Hm( Xa ) - # - # Calcul de la covariance d'analyse - # --------------------------------- - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("JacobianMatrixAtOptimum") or \ - selfA._toStore("KalmanGainAtOptimum"): - HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa) - HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("KalmanGainAtOptimum"): - HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa) - HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles"): - BI = B.getI() - HessienneI = [] - nb = Xa.size - for i in range(nb): - _ee = numpy.matrix(numpy.zeros(nb)).T - _ee[i] = 1. - _HtEE = numpy.dot(HtM,_ee) - _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T - HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) - HessienneI = numpy.matrix( HessienneI ) - A = HessienneI.I - if min(A.shape) != max(A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) - if (numpy.diag(A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) - if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug - try: - L = numpy.linalg.cholesky( A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( A ) - if selfA._toStore("JacobianMatrixAtOptimum"): - selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM ) - if selfA._toStore("KalmanGainAtOptimum"): - if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I - elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI - selfA.StoredVariables["KalmanGainAtOptimum"].store( KG ) - # - # Calculs et/ou stockages supplémentaires - # --------------------------------------- - if selfA._toStore("Innovation") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("MahalanobisConsistency") or \ - selfA._toStore("OMB"): - d = Y - HXb - if selfA._toStore("Innovation"): - selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) - if selfA._toStore("OMA"): - selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) - if selfA._toStore("OMB"): - selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) - if selfA._toStore("SigmaObs2"): - TraceR = R.trace(Y.size) - selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) - if selfA._toStore("MahalanobisConsistency"): - selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) - if selfA._toStore("SimulationQuantiles"): - QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM) - if selfA._toStore("SimulatedObservationAtBackground"): - selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) - if selfA._toStore("SimulatedObservationAtOptimum"): - selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) + if __CovForecast: Pn = selfA._getInternalState("Pn") # return 0