X-Git-Url: http://git.salome-platform.org/gitweb/?a=blobdiff_plain;f=src%2FdaComposant%2FdaCore%2FNumericObjects.py;h=aa6c683592e0c1a146b1ed356675970ae5d8defd;hb=ba52f8705ad24049d6fa5957da1cbe00d7bcb4d6;hp=2aa13c91d3bc3ed531a2dbe3ccba3032e34f84d2;hpb=460cf7f4df541b72a55f3ac6ffac15310d318274;p=modules%2Fadao.git diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index 2aa13c9..aa6c683 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2008-2021 EDF R&D +# Copyright (C) 2008-2022 EDF R&D # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -27,7 +27,7 @@ __author__ = "Jean-Philippe ARGAUD" import os, time, copy, types, sys, logging import math, numpy, scipy, scipy.optimize, scipy.version -from daCore.BasicObjects import Operator +from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm from daCore.PlatformInfo import PlatformInfo mpr = PlatformInfo().MachinePrecision() mfp = PlatformInfo().MaximumPrecision() @@ -37,7 +37,7 @@ mfp = PlatformInfo().MaximumPrecision() def ExecuteFunction( triplet ): assert len(triplet) == 3, "Incorrect number of arguments" X, xArgs, funcrepr = triplet - __X = numpy.asmatrix(numpy.ravel( X )).T + __X = numpy.ravel( X ).reshape((-1,1)) __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"]) __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), []) __fonction = getattr(__module,funcrepr["__userFunction__name"]) @@ -66,6 +66,7 @@ class FDApproximation(object): increment = 0.01, dX = None, extraArguments = None, + reducingMemoryUse = False, avoidingRedundancy = True, toleranceInRedundancy = 1.e-18, lenghtOfRedundancy = -1, @@ -75,6 +76,7 @@ class FDApproximation(object): ): self.__name = str(name) self.__extraArgs = extraArguments + # if mpEnabled: try: import multiprocessing @@ -88,12 +90,12 @@ class FDApproximation(object): self.__mpWorkers = None logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers)) # - if mfEnabled: - self.__mfEnabled = True - else: - self.__mfEnabled = False + self.__mfEnabled = bool(mfEnabled) logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,)) # + self.__rmEnabled = bool(reducingMemoryUse) + logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,)) + # if avoidingRedundancy: self.__avoidRC = True self.__tolerBP = float(toleranceInRedundancy) @@ -105,6 +107,9 @@ class FDApproximation(object): self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms else: self.__avoidRC = False + logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC) + if self.__avoidRC: + logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP) # if self.__mpEnabled: if isinstance(Function,types.FunctionType): @@ -149,10 +154,7 @@ class FDApproximation(object): if dX is None: self.__dX = None else: - self.__dX = numpy.asmatrix(numpy.ravel( dX )).T - logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC) - if self.__avoidRC: - logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP) + self.__dX = numpy.ravel( dX ) # --------------------------------------------------------- def __doublon__(self, e, l, n, v=None): @@ -164,6 +166,29 @@ class FDApproximation(object): break return __ac, __iac + # --------------------------------------------------------- + def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None): + "Produit incrémental d'une matrice liste de colonnes avec un vecteur" + if not isinstance(__LMatrix, (list,tuple)): + raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix)) + if __dotWith is not None: + __Idwx = numpy.ravel( __dotWith ) + assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements" + __Produit = numpy.zeros(__LMatrix[0].size) + for i, col in enumerate(__LMatrix): + __Produit += float(__Idwx[i]) * col + return __Produit + elif __dotTWith is not None: + _Idwy = numpy.ravel( __dotTWith ).T + assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements" + __Produit = numpy.zeros(len(__LMatrix)) + for i, col in enumerate(__LMatrix): + __Produit[i] = float( _Idwy @ col) + return __Produit + else: + __Produit = None + return __Produit + # --------------------------------------------------------- def DirectOperator(self, X, **extraArgs ): """ @@ -176,17 +201,16 @@ class FDApproximation(object): if self.__mfEnabled: _HX = self.__userFunction( X, argsAsSerie = True ) else: - _X = numpy.asmatrix(numpy.ravel( X )).T - _HX = numpy.ravel(self.__userFunction( _X )) + _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) )) # return _HX # --------------------------------------------------------- - def TangentMatrix(self, X ): + def TangentMatrix(self, X, dotWith = None, dotTWith = None ): """ Calcul de l'opérateur tangent comme la Jacobienne par différences finies, c'est-à-dire le gradient de H en X. On utilise des différences finies - directionnelles autour du point X. X est un numpy.matrix. + directionnelles autour du point X. X est un numpy.ndarray. Différences finies centrées (approximation d'ordre 2): 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation @@ -214,12 +238,14 @@ class FDApproximation(object): if X is None or len(X)==0: raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),)) # - _X = numpy.asmatrix(numpy.ravel( X )).T + _X = numpy.ravel( X ) # if self.__dX is None: _dX = self.__increment * _X else: - _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T + _dX = numpy.ravel( self.__dX ) + assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one" + assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one" # if (_dX == 0.).any(): moyenne = _dX.mean() @@ -234,11 +260,16 @@ class FDApproximation(object): __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None) if __alreadyCalculatedP == __alreadyCalculatedI > -1: __alreadyCalculated, __i = True, __alreadyCalculatedP - logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i) + logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i) # if __alreadyCalculated: logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i) _Jacobienne = self.__listJPCR[__i] + logging.debug("FDA Fin du calcul de la Jacobienne") + if dotWith is not None: + return numpy.dot(_Jacobienne, numpy.ravel( dotWith )) + elif dotTWith is not None: + return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith )) else: logging.debug("FDA Calcul Jacobienne (explicite)") if self.__centeredDF: @@ -252,9 +283,9 @@ class FDApproximation(object): _jobs = [] for i in range( len(_dX) ): _dXi = _dX[i] - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dXi - _X_moins_dXi = numpy.array( _X.A1, dtype=float ) + _X_moins_dXi = numpy.array( _X, dtype=float ) _X_moins_dXi[i] = _X[i] - _dXi # _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) ) @@ -274,9 +305,9 @@ class FDApproximation(object): _xserie = [] for i in range( len(_dX) ): _dXi = _dX[i] - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dXi - _X_moins_dXi = numpy.array( _X.A1, dtype=float ) + _X_moins_dXi = numpy.array( _X, dtype=float ) _X_moins_dXi[i] = _X[i] - _dXi # _xserie.append( _X_plus_dXi ) @@ -292,9 +323,9 @@ class FDApproximation(object): _Jacobienne = [] for i in range( _dX.size ): _dXi = _dX[i] - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dXi - _X_moins_dXi = numpy.array( _X.A1, dtype=float ) + _X_moins_dXi = numpy.array( _X, dtype=float ) _X_moins_dXi[i] = _X[i] - _dXi # _HX_plus_dXi = self.DirectOperator( _X_plus_dXi ) @@ -311,9 +342,9 @@ class FDApproximation(object): "__userFunction__name" : self.__userFunction__name, } _jobs = [] - _jobs.append( (_X.A1, self.__extraArgs, funcrepr) ) + _jobs.append( (_X, self.__extraArgs, funcrepr) ) for i in range( len(_dX) ): - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dX[i] # _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) ) @@ -332,9 +363,9 @@ class FDApproximation(object): # elif self.__mfEnabled: _xserie = [] - _xserie.append( _X.A1 ) + _xserie.append( _X ) for i in range( len(_dX) ): - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dX[i] # _xserie.append( _X_plus_dXi ) @@ -352,30 +383,35 @@ class FDApproximation(object): _HX = self.DirectOperator( _X ) for i in range( _dX.size ): _dXi = _dX[i] - _X_plus_dXi = numpy.array( _X.A1, dtype=float ) + _X_plus_dXi = numpy.array( _X, dtype=float ) _X_plus_dXi[i] = _X[i] + _dXi # _HX_plus_dXi = self.DirectOperator( _X_plus_dXi ) # _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) ) - # # - _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T - if self.__avoidRC: - if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size - while len(self.__listJPCP) > self.__lenghtRJ: - self.__listJPCP.pop(0) - self.__listJPCI.pop(0) - self.__listJPCR.pop(0) - self.__listJPPN.pop(0) - self.__listJPIN.pop(0) - self.__listJPCP.append( copy.copy(_X) ) - self.__listJPCI.append( copy.copy(_dX) ) - self.__listJPCR.append( copy.copy(_Jacobienne) ) - self.__listJPPN.append( numpy.linalg.norm(_X) ) - self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) ) - # - logging.debug("FDA Fin du calcul de la Jacobienne") + if (dotWith is not None) or (dotTWith is not None): + __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith) + else: + __Produit = None + if __Produit is None or self.__avoidRC: + _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) ) + if self.__avoidRC: + if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size + while len(self.__listJPCP) > self.__lenghtRJ: + self.__listJPCP.pop(0) + self.__listJPCI.pop(0) + self.__listJPCR.pop(0) + self.__listJPPN.pop(0) + self.__listJPIN.pop(0) + self.__listJPCP.append( copy.copy(_X) ) + self.__listJPCI.append( copy.copy(_dX) ) + self.__listJPCR.append( copy.copy(_Jacobienne) ) + self.__listJPPN.append( numpy.linalg.norm(_X) ) + self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) ) + logging.debug("FDA Fin du calcul de la Jacobienne") + if __Produit is not None: + return __Produit # return _Jacobienne @@ -388,28 +424,27 @@ class FDApproximation(object): ne doivent pas être données ici à la fonction utilisateur. """ if self.__mfEnabled: - assert len(paire) == 1, "Incorrect lenght of arguments" + assert len(paire) == 1, "Incorrect length of arguments" _paire = paire[0] assert len(_paire) == 2, "Incorrect number of arguments" else: assert len(paire) == 2, "Incorrect number of arguments" _paire = paire X, dX = _paire - _Jacobienne = self.TangentMatrix( X ) if dX is None or len(dX) == 0: # # Calcul de la forme matricielle si le second argument est None # ------------------------------------------------------------- + _Jacobienne = self.TangentMatrix( X ) if self.__mfEnabled: return [_Jacobienne,] else: return _Jacobienne else: # # Calcul de la valeur linéarisée de H en X appliqué à dX # ------------------------------------------------------ - _dX = numpy.asmatrix(numpy.ravel( dX )).T - _HtX = numpy.dot(_Jacobienne, _dX) - if self.__mfEnabled: return [_HtX.A1,] - else: return _HtX.A1 + _HtX = self.TangentMatrix( X, dotWith = dX ) + if self.__mfEnabled: return [_HtX,] + else: return _HtX # --------------------------------------------------------- def AdjointOperator(self, paire, **extraArgs ): @@ -420,28 +455,27 @@ class FDApproximation(object): ne doivent pas être données ici à la fonction utilisateur. """ if self.__mfEnabled: - assert len(paire) == 1, "Incorrect lenght of arguments" + assert len(paire) == 1, "Incorrect length of arguments" _paire = paire[0] assert len(_paire) == 2, "Incorrect number of arguments" else: assert len(paire) == 2, "Incorrect number of arguments" _paire = paire X, Y = _paire - _JacobienneT = self.TangentMatrix( X ).T if Y is None or len(Y) == 0: # # Calcul de la forme matricielle si le second argument est None # ------------------------------------------------------------- + _JacobienneT = self.TangentMatrix( X ).T if self.__mfEnabled: return [_JacobienneT,] else: return _JacobienneT else: # # Calcul de la valeur de l'adjoint en X appliqué à Y # -------------------------------------------------- - _Y = numpy.asmatrix(numpy.ravel( Y )).T - _HaY = numpy.dot(_JacobienneT, _Y) - if self.__mfEnabled: return [_HaY.A1,] - else: return _HaY.A1 + _HaY = self.TangentMatrix( X, dotTWith = Y ) + if self.__mfEnabled: return [_HaY,] + else: return _HaY # ============================================================================== def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ): @@ -452,12 +486,12 @@ def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ): raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),)) # if _bgcovariance is None: - BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Perturbations = numpy.tile( _bgcenter, _nbmembers) else: _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T - BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z + _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z # - return BackgroundEnsemble + return _Perturbations # ============================================================================== def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True): @@ -479,29 +513,29 @@ def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _wi if _nbmembers < 1: raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),)) if _bgcovariance is None: - BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Perturbations = numpy.tile( _bgcenter, _nbmembers) else: if _withSVD: - U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False) + _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False) _nbctl = _bgcenter.size if _nbmembers > _nbctl: _Z = numpy.concatenate((numpy.dot( - numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]), + numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]), numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0) else: - _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1]) + _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1]) _Zca = __CenteredRandomAnomalies(_Z, _nbmembers) - BackgroundEnsemble = _bgcenter + _Zca + _Perturbations = _bgcenter + _Zca else: if max(abs(_bgcovariance.flatten())) > 0: _nbctl = _bgcenter.size _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1) _Zca = __CenteredRandomAnomalies(_Z, _nbmembers) - BackgroundEnsemble = _bgcenter + _Zca + _Perturbations = _bgcenter + _Zca else: - BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Perturbations = numpy.tile( _bgcenter, _nbmembers) # - return BackgroundEnsemble + return _Perturbations # ============================================================================== def EnsembleMean( __Ensemble ): @@ -649,6 +683,31 @@ def CovarianceInflation( # return OutputCovOrEns +# ============================================================================== +def HessienneEstimation(nb, HaM, HtM, BI, RI): + "Estimation de la Hessienne" + # + HessienneI = [] + for i in range(int(nb)): + _ee = numpy.zeros((nb,1)) + _ee[i] = 1. + _HtEE = numpy.dot(HtM,_ee).reshape((-1,1)) + HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) ) + # + A = numpy.linalg.inv(numpy.array( HessienneI )) + # + if min(A.shape) != max(A.shape): + raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) + if (numpy.diag(A) < 0).any(): + raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) + if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug + try: + L = numpy.linalg.cholesky( A ) + except: + raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) + # + return A + # ============================================================================== def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None): "Estimation des quantiles a posteriori (selfA est modifié)" @@ -682,7 +741,7 @@ def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None): if LBounds is not None: # "EstimateProjection" par défaut Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1) Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1) - Yr = Hm( Xr ) + Yr = numpy.asarray(Hm( Xr )) else: raise ValueError("Quantile simulations has only to be Linear or NonLinear.") # @@ -727,7 +786,7 @@ def RecentredBounds( __Bounds, __Center): # Conserve une valeur par défaut à None s'il n'y a pas de bornes if __Bounds is None: return None # Recentre les valeurs numériques de bornes - return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1)) + return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1)) # ============================================================================== def ApplyBounds( __Vector, __Bounds, __newClip = True): @@ -740,7 +799,7 @@ def ApplyBounds( __Vector, __Bounds, __newClip = True): if not isinstance(__Bounds, numpy.ndarray): # Is an array raise ValueError("Incorrect array definition of bounds data") if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght - raise ValueError("Incorrect bounds number to be applied for this vector") + raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size)) if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2: raise ValueError("Incorrectly shaped bounds data") # @@ -756,6 +815,284 @@ def ApplyBounds( __Vector, __Bounds, __newClip = True): # return __Vector +# ============================================================================== +def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf): + "Recentre l'ensemble Xn autour de l'analyse 3DVAR" + # + Xf = EnsembleMean( __EnXf ) + Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) ) + Pf = (1 - __Betaf) * __B + __Betaf * Pf + # + selfB = PartialAlgorithm("3DVAR") + selfB._parameters["Minimizer"] = "LBFGSB" + selfB._parameters["MaximumNumberOfSteps"] = 15000 + selfB._parameters["CostDecrementTolerance"] = 1.e-7 + selfB._parameters["ProjectedGradientTolerance"] = -1 + selfB._parameters["GradientNormTolerance"] = 1.e-05 + selfB._parameters["StoreInternalVariables"] = False + selfB._parameters["optiprint"] = -1 + selfB._parameters["optdisp"] = 0 + selfB._parameters["Bounds"] = None + selfB._parameters["InitializationPoint"] = Xf + std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None) + Xa = selfB.get("Analysis")[-1].reshape((-1,1)) + del selfB + # + return Xa + EnsembleOfAnomalies( __EnXn ) + +# ============================================================================== +def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): + """ + Constrained Unscented Kalman Filter + """ + if selfA._parameters["EstimationOf"] == "Parameters": + selfA._parameters["StoreInternalVariables"] = True + selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] ) + # + L = Xb.size + Alpha = selfA._parameters["Alpha"] + Beta = selfA._parameters["Beta"] + if selfA._parameters["Kappa"] == 0: + if selfA._parameters["EstimationOf"] == "State": + Kappa = 0 + elif selfA._parameters["EstimationOf"] == "Parameters": + Kappa = 3 - L + else: + Kappa = selfA._parameters["Kappa"] + Lambda = float( Alpha**2 ) * ( L + Kappa ) - L + Gamma = math.sqrt( L + Lambda ) + # + Ww = [] + Ww.append( 0. ) + for i in range(2*L): + Ww.append( 1. / (2.*(L + Lambda)) ) + # + Wm = numpy.array( Ww ) + Wm[0] = Lambda / (L + Lambda) + Wc = numpy.array( Ww ) + Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta) + # + # Opérateurs + Hm = HO["Direct"].appliedControledFormTo + # + if selfA._parameters["EstimationOf"] == "State": + Mm = EM["Direct"].appliedControledFormTo + # + if CM is not None and "Tangent" in CM and U is not None: + Cm = CM["Tangent"].asMatrix(Xb) + else: + Cm = None + # + # Durée d'observation et tailles + if hasattr(Y,"stepnumber"): + duration = Y.stepnumber() + __p = numpy.cumprod(Y.shape())[-1] + else: + duration = 2 + __p = numpy.array(Y).size + # + # Précalcul des inversions de B et R + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + BI = B.getI() + RI = R.getI() + # + __n = Xb.size + nbPreviousSteps = len(selfA.StoredVariables["Analysis"]) + # + if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: + Xn = Xb + if hasattr(B,"asfullmatrix"): + Pn = B.asfullmatrix(__n) + else: + Pn = B + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( Xb ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + elif selfA._parameters["nextStep"]: + Xn = selfA._getInternalState("Xn") + Pn = selfA._getInternalState("Pn") + # + if selfA._parameters["EstimationOf"] == "Parameters": + XaMin = Xn + previousJMinimum = numpy.finfo(float).max + # + for step in range(duration-1): + if hasattr(Y,"store"): + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) + else: + Ynpu = numpy.ravel( Y ).reshape((__p,1)) + # + if U is not None: + if hasattr(U,"store") and len(U)>1: + Un = numpy.ravel( U[step] ).reshape((-1,1)) + elif hasattr(U,"store") and len(U)==1: + Un = numpy.ravel( U[0] ).reshape((-1,1)) + else: + Un = numpy.ravel( U ).reshape((-1,1)) + else: + Un = None + # + Pndemi = numpy.real(scipy.linalg.sqrtm(Pn)) + Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi]) + nbSpts = 2*Xn.size+1 + # + if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": + for point in range(nbSpts): + Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] ) + # + XEtnnp = [] + for point in range(nbSpts): + if selfA._parameters["EstimationOf"] == "State": + XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1)) + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! + Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape + XEtnnpi = XEtnnpi + Cm @ Un + if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": + XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] ) + elif selfA._parameters["EstimationOf"] == "Parameters": + # --- > Par principe, M = Id, Q = 0 + XEtnnpi = Xnp[:,point] + XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) ) + XEtnnp = numpy.concatenate( XEtnnp, axis=1 ) + # + Xncm = ( XEtnnp * Wm ).sum(axis=1) + # + if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": + Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] ) + # + if selfA._parameters["EstimationOf"] == "State": Pnm = Q + elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0. + for point in range(nbSpts): + Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm)) + # + if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None: + Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm)) + else: + Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm)) + # + Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi]) + # + if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": + for point in range(nbSpts): + Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] ) + # + Ynnp = [] + for point in range(nbSpts): + if selfA._parameters["EstimationOf"] == "State": + Ynnpi = Hm( (Xnnp[:,point], None) ) + elif selfA._parameters["EstimationOf"] == "Parameters": + Ynnpi = Hm( (Xnnp[:,point], Un) ) + Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) ) + Ynnp = numpy.concatenate( Ynnp, axis=1 ) + # + Yncm = ( Ynnp * Wm ).sum(axis=1) + # + Pyyn = R + Pxyn = 0. + for point in range(nbSpts): + Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm)) + Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm)) + # + _Innovation = Ynpu - Yncm.reshape((-1,1)) + if selfA._parameters["EstimationOf"] == "Parameters": + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! + _Innovation = _Innovation - Cm @ Un + # + Kn = Pxyn * Pyyn.I + Xn = Xncm.reshape((-1,1)) + Kn * _Innovation + Pn = Pnm - Kn * Pyyn * Kn.T + # + if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": + Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] ) + # + Xa = Xn # Pointeurs + #-------------------------- + selfA._setInternalState("Xn", Xn) + selfA._setInternalState("Pn", Pn) + #-------------------------- + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + # ---> avec analysis + selfA.StoredVariables["Analysis"].store( Xa ) + if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): + selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) ) + if selfA._toStore("InnovationAtCurrentAnalysis"): + selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) + # ---> avec current state + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CurrentState"): + selfA.StoredVariables["CurrentState"].store( Xn ) + if selfA._toStore("ForecastState"): + selfA.StoredVariables["ForecastState"].store( Xncm ) + if selfA._toStore("ForecastCovariance"): + selfA.StoredVariables["ForecastCovariance"].store( Pnm ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( Xncm - Xa ) + if selfA._toStore("InnovationAtCurrentState"): + selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) + if selfA._toStore("SimulatedObservationAtCurrentState") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm ) + # ---> autres + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) ) + Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) ) + J = Jb + Jo + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + # + if selfA._toStore("IndexOfOptimum") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("CostFunctionJAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) + if selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + if selfA._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if selfA._toStore("APosterioriCovariance"): + covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1] + # + # Stockage final supplémentaire de l'optimum en estimation de paramètres + # ---------------------------------------------------------------------- + if selfA._parameters["EstimationOf"] == "Parameters": + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( XaMin ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + # + return 0 + # ============================================================================== def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): """ @@ -795,6 +1132,7 @@ def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): RI = R.getI() # __n = Xb.size + nbPreviousSteps = len(selfA.StoredVariables["Analysis"]) # if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: Xn = Xb @@ -849,8 +1187,8 @@ def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1)) if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - Pn_predicted = Q + Mt * Pn * Ma + Xn_predicted = Xn_predicted + Cm @ Un + Pn_predicted = Q + Mt * (Pn * Ma) elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast # --- > Par principe, M = Id, Q = 0 Xn_predicted = Xn @@ -866,7 +1204,7 @@ def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1)) _Innovation = Ynpu - HX_predicted if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! - _Innovation = _Innovation - Cm * Un + _Innovation = _Innovation - Cm @ Un # Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha)) Xn = Xn_predicted + Kn * _Innovation @@ -1022,11 +1360,11 @@ def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterForm # if U is not None: if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T + Un = numpy.ravel( U[step] ).reshape((-1,1)) elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T + Un = numpy.ravel( U[0] ).reshape((-1,1)) else: - Un = numpy.asmatrix(numpy.ravel( U )).T + Un = numpy.ravel( U ).reshape((-1,1)) else: Un = None # @@ -1042,7 +1380,7 @@ def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterForm returnSerieAsArrayMatrix = True ) if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - EZ = EZ + Cm * Un + EZ = EZ + Cm @ Un elif selfA._parameters["EstimationOf"] == "Parameters": # --- > Par principe, M = Id, Q = 0 EZ = H( [(EL[:,i], Un) for i in range(__m)], @@ -1098,7 +1436,10 @@ def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterForm return 0 # ============================================================================== -def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): +def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, + VariantM="KalmanFilterFormula", + Hybrid=None, + ): """ Ensemble-Transform EnKF """ @@ -1140,6 +1481,8 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): # __n = Xb.size __m = selfA._parameters["NumberOfMembers"] + nbPreviousSteps = len(selfA.StoredVariables["Analysis"]) + previousJMinimum = numpy.finfo(float).max # if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m ) @@ -1153,8 +1496,6 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): elif selfA._parameters["nextStep"]: Xn = selfA._getInternalState("Xn") # - previousJMinimum = numpy.finfo(float).max - # for step in range(duration-1): numpy.random.set_state(selfA._getInternalState("seed")) if hasattr(Y,"store"): @@ -1164,11 +1505,11 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): # if U is not None: if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T + Un = numpy.ravel( U[step] ).reshape((-1,1)) elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T + Un = numpy.ravel( U[0] ).reshape((-1,1)) else: - Un = numpy.asmatrix(numpy.ravel( U )).T + Un = numpy.ravel( U ).reshape((-1,1)) else: Un = None # @@ -1188,7 +1529,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): returnSerieAsArrayMatrix = True ) if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un + Xn_predicted = Xn_predicted + Cm @ Un elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast # --- > Par principe, M = Id, Q = 0 Xn_predicted = EMX = Xn @@ -1197,8 +1538,8 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): returnSerieAsArrayMatrix = True ) # # Mean of forecast and observation of forecast - Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) + Xfm = EnsembleMean( Xn_predicted ) + Hfm = EnsembleMean( HX_predicted ) # # Anomalies EaX = EnsembleOfAnomalies( Xn_predicted, Xfm ) @@ -1286,7 +1627,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm def CostFunction(w): _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) - _Jo = 0.5 * _A.T * RI * _A + _Jo = 0.5 * _A.T * (RI * _A) _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w) _J = _Jo + _Jb return float(_J) @@ -1357,7 +1698,11 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): selfA._parameters["InflationFactor"], ) # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + if Hybrid == "E3DVAR": + betaf = selfA._parameters["HybridCovarianceEquilibrium"] + Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf) + # + Xa = EnsembleMean( Xn ) #-------------------------- selfA._setInternalState("Xn", Xn) selfA._setInternalState("seed", numpy.random.get_state()) @@ -1371,7 +1716,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): or selfA._toStore("InnovationAtCurrentAnalysis") \ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T + _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1)) _Innovation = Ynpu - _HXa # selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) @@ -1403,8 +1748,8 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): or selfA._toStore("CostFunctionJo") \ or selfA._toStore("CurrentOptimum") \ or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) ) + Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) ) J = Jb + Jo selfA.StoredVariables["CostFunctionJb"].store( Jb ) selfA.StoredVariables["CostFunctionJo"].store( Jo ) @@ -1491,6 +1836,7 @@ def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): RI = R.getI() # __n = Xb.size + nbPreviousSteps = len(selfA.StoredVariables["Analysis"]) # if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: Xn = Xb @@ -1542,8 +1888,8 @@ def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1)) if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - Pn_predicted = Q + Mt * Pn * Ma + Xn_predicted = Xn_predicted + Cm @ Un + Pn_predicted = Q + Mt * (Pn * Ma) elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast # --- > Par principe, M = Id, Q = 0 Xn_predicted = Xn @@ -1556,7 +1902,7 @@ def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1)) _Innovation = Ynpu - HX_predicted if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! - _Innovation = _Innovation - Cm * Un + _Innovation = _Innovation - Cm @ Un # Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha)) Xn = Xn_predicted + Kn * _Innovation @@ -1684,6 +2030,8 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", # __n = Xb.size __m = selfA._parameters["NumberOfMembers"] + nbPreviousSteps = len(selfA.StoredVariables["Analysis"]) + previousJMinimum = numpy.finfo(float).max # if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n) @@ -1699,8 +2047,6 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", elif selfA._parameters["nextStep"]: Xn = selfA._getInternalState("Xn") # - previousJMinimum = numpy.finfo(float).max - # for step in range(duration-1): numpy.random.set_state(selfA._getInternalState("seed")) if hasattr(Y,"store"): @@ -1710,11 +2056,11 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", # if U is not None: if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T + Un = numpy.ravel( U[step] ).reshape((-1,1)) elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T + Un = numpy.ravel( U[0] ).reshape((-1,1)) else: - Un = numpy.asmatrix(numpy.ravel( U )).T + Un = numpy.ravel( U ).reshape((-1,1)) else: Un = None # @@ -1789,7 +2135,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", selfA._parameters["InflationFactor"], ) # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + Xa = EnsembleMean( Xn ) #-------------------------- selfA._setInternalState("Xn", Xn) selfA._setInternalState("seed", numpy.random.get_state()) @@ -1803,7 +2149,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", or selfA._toStore("InnovationAtCurrentAnalysis") \ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T + _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1)) _Innovation = Ynpu - _HXa # selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) @@ -1835,8 +2181,8 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", or selfA._toStore("CostFunctionJo") \ or selfA._toStore("CurrentOptimum") \ or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) ) + Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) ) J = Jb + Jo selfA.StoredVariables["CostFunctionJb"].store( Jb ) selfA.StoredVariables["CostFunctionJo"].store( Jo ) @@ -1893,18 +2239,12 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): # # Initialisations # --------------- - # - # Opérateur non-linéaire pour la boucle externe Hm = HO["Direct"].appliedTo # - # Précalcul des inversions de B et R BI = B.getI() RI = R.getI() # - # Point de démarrage de l'optimisation - Xini = selfA._parameters["InitializationPoint"] - # - HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T + HXb = numpy.asarray(Hm( Xb )).reshape((-1,1)) Innovation = Y - HXb # # Outer Loop @@ -1912,7 +2252,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): iOuter = 0 J = 1./mpr DeltaJ = 1./mpr - Xr = Xini.reshape((-1,1)) + Xr = numpy.asarray(selfA._parameters["InitializationPoint"]).reshape((-1,1)) while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]: # # Inner Loop @@ -1923,13 +2263,12 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): # Définition de la fonction-coût # ------------------------------ def CostFunction(dx): - _dX = numpy.asmatrix(numpy.ravel( dx )).T + _dX = numpy.asarray(dx).reshape((-1,1)) if selfA._parameters["StoreInternalVariables"] or \ selfA._toStore("CurrentState") or \ selfA._toStore("CurrentOptimum"): selfA.StoredVariables["CurrentState"].store( Xb + _dX ) - _HdX = Ht * _dX - _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T + _HdX = (Ht @ _dX).reshape((-1,1)) _dInnovation = Innovation - _HdX if selfA._toStore("SimulatedObservationAtCurrentState") or \ selfA._toStore("SimulatedObservationAtCurrentOptimum"): @@ -1937,8 +2276,8 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): if selfA._toStore("InnovationAtCurrentState"): selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation ) # - Jb = float( 0.5 * _dX.T * BI * _dX ) - Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation ) + Jb = float( 0.5 * _dX.T * (BI * _dX) ) + Jo = float( 0.5 * _dInnovation.T * (RI * _dInnovation) ) J = Jb + Jo # selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) @@ -1967,11 +2306,10 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): return J # def GradientOfCostFunction(dx): - _dX = numpy.asmatrix(numpy.ravel( dx )).T - _HdX = Ht * _dX - _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T + _dX = numpy.ravel( dx ) + _HdX = (Ht @ _dX).reshape((-1,1)) _dInnovation = Innovation - _HdX - GradJb = BI * _dX + GradJb = BI @ _dX GradJo = - Ht.T @ (RI * _dInnovation) GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) return GradJ @@ -1988,7 +2326,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): import scipy.optimize as optimiseur Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( func = CostFunction, - x0 = numpy.zeros(Xini.size), + x0 = numpy.zeros(Xb.size), fprime = GradientOfCostFunction, args = (), bounds = RecentredBounds(selfA._parameters["Bounds"], Xb), @@ -2002,7 +2340,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): elif selfA._parameters["Minimizer"] == "TNC": Minimum, nfeval, rc = scipy.optimize.fmin_tnc( func = CostFunction, - x0 = numpy.zeros(Xini.size), + x0 = numpy.zeros(Xb.size), fprime = GradientOfCostFunction, args = (), bounds = RecentredBounds(selfA._parameters["Bounds"], Xb), @@ -2014,7 +2352,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): elif selfA._parameters["Minimizer"] == "CG": Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( f = CostFunction, - x0 = numpy.zeros(Xini.size), + x0 = numpy.zeros(Xb.size), fprime = GradientOfCostFunction, args = (), maxiter = selfA._parameters["MaximumNumberOfSteps"], @@ -2025,7 +2363,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): elif selfA._parameters["Minimizer"] == "NCG": Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( f = CostFunction, - x0 = numpy.zeros(Xini.size), + x0 = numpy.zeros(Xb.size), fprime = GradientOfCostFunction, args = (), maxiter = selfA._parameters["MaximumNumberOfSteps"], @@ -2036,7 +2374,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): elif selfA._parameters["Minimizer"] == "BFGS": Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( f = CostFunction, - x0 = numpy.zeros(Xini.size), + x0 = numpy.zeros(Xb.size), fprime = GradientOfCostFunction, args = (), maxiter = selfA._parameters["MaximumNumberOfSteps"], @@ -2052,17 +2390,15 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): # if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): Minimum = selfA.StoredVariables["CurrentState"][IndexMin] - Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T else: - Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T + Minimum = Xb + Minimum.reshape((-1,1)) # Xr = Minimum DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1] # - # Obtention de l'analyse - # ---------------------- Xa = Xr + #-------------------------- # selfA.StoredVariables["Analysis"].store( Xa ) # @@ -2077,8 +2413,6 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): else: HXa = Hm( Xa ) # - # Calcul de la covariance d'analyse - # --------------------------------- if selfA._toStore("APosterioriCovariance") or \ selfA._toStore("SimulationQuantiles") or \ selfA._toStore("JacobianMatrixAtOptimum") or \ @@ -2092,25 +2426,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape if selfA._toStore("APosterioriCovariance") or \ selfA._toStore("SimulationQuantiles"): - HessienneI = [] - nb = Xa.size - for i in range(nb): - _ee = numpy.matrix(numpy.zeros(nb)).T - _ee[i] = 1. - _HtEE = numpy.dot(HtM,_ee) - _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T - HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) - HessienneI = numpy.matrix( HessienneI ) - A = HessienneI.I - if min(A.shape) != max(A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) - if (numpy.diag(A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) - if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug - try: - L = numpy.linalg.cholesky( A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) + A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI) if selfA._toStore("APosterioriCovariance"): selfA.StoredVariables["APosterioriCovariance"].store( A ) if selfA._toStore("JacobianMatrixAtOptimum"): @@ -2128,30 +2444,32 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): selfA._toStore("OMB"): d = Y - HXb if selfA._toStore("Innovation"): - selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) + selfA.StoredVariables["Innovation"].store( d ) if selfA._toStore("BMA"): selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) if selfA._toStore("OMA"): selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) if selfA._toStore("OMB"): - selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) + selfA.StoredVariables["OMB"].store( d ) if selfA._toStore("SigmaObs2"): TraceR = R.trace(Y.size) - selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) + selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR ) if selfA._toStore("MahalanobisConsistency"): selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) if selfA._toStore("SimulationQuantiles"): QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM) if selfA._toStore("SimulatedObservationAtBackground"): - selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) + selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb ) if selfA._toStore("SimulatedObservationAtOptimum"): - selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) + selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa ) # return 0 # ============================================================================== -def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", - BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000): +def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, + VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000, + Hybrid=None, + ): """ Maximum Likelihood Ensemble Filter """ @@ -2189,6 +2507,8 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", # __n = Xb.size __m = selfA._parameters["NumberOfMembers"] + nbPreviousSteps = len(selfA.StoredVariables["Analysis"]) + previousJMinimum = numpy.finfo(float).max # if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m ) @@ -2202,8 +2522,6 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", elif selfA._parameters["nextStep"]: Xn = selfA._getInternalState("Xn") # - previousJMinimum = numpy.finfo(float).max - # for step in range(duration-1): numpy.random.set_state(selfA._getInternalState("seed")) if hasattr(Y,"store"): @@ -2213,11 +2531,11 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", # if U is not None: if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T + Un = numpy.ravel( U[step] ).reshape((-1,1)) elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T + Un = numpy.ravel( U[0] ).reshape((-1,1)) else: - Un = numpy.asmatrix(numpy.ravel( U )).T + Un = numpy.ravel( U ).reshape((-1,1)) else: Un = None # @@ -2234,7 +2552,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q ) if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un + Xn_predicted = Xn_predicted + Cm @ Un elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast # --- > Par principe, M = Id, Q = 0 Xn_predicted = EMX = Xn @@ -2292,7 +2610,11 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", selfA._parameters["InflationFactor"], ) # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + if Hybrid == "E3DVAR": + betaf = selfA._parameters["HybridCovarianceEquilibrium"] + Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf) + # + Xa = EnsembleMean( Xn ) #-------------------------- selfA._setInternalState("Xn", Xn) selfA._setInternalState("seed", numpy.random.get_state()) @@ -2306,7 +2628,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", or selfA._toStore("InnovationAtCurrentAnalysis") \ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T + _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1)) _Innovation = Ynpu - _HXa # selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) @@ -2338,8 +2660,8 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", or selfA._toStore("CostFunctionJo") \ or selfA._toStore("CurrentOptimum") \ or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) ) + Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) ) J = Jb + Jo selfA.StoredVariables["CostFunctionJb"].store( Jb ) selfA.StoredVariables["CostFunctionJo"].store( Jo ) @@ -2434,7 +2756,7 @@ def mmqr( iteration += 1 # Derivees = numpy.array(fprime(variables)) - Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS + Derivees = Derivees.reshape(n,p) # ADAO & check shape DeriveesT = Derivees.transpose() M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) ) SM = numpy.transpose(numpy.dot( DeriveesT , veps )) @@ -2473,8 +2795,13 @@ def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle): """ # # Initialisation + # -------------- if selfA._parameters["EstimationOf"] == "State": - M = EM["Direct"].appliedTo + M = EM["Direct"].appliedControledFormTo + if CM is not None and "Tangent" in CM and U is not None: + Cm = CM["Tangent"].asMatrix(Xb) + else: + Cm = None # if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: Xn = numpy.ravel(Xb).reshape((-1,1)) @@ -2503,16 +2830,29 @@ def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle): else: Ynpu = numpy.ravel( Y ).reshape((-1,1)) # + if U is not None: + if hasattr(U,"store") and len(U)>1: + Un = numpy.ravel( U[step] ).reshape((-1,1)) + elif hasattr(U,"store") and len(U)==1: + Un = numpy.ravel( U[0] ).reshape((-1,1)) + else: + Un = numpy.ravel( U ).reshape((-1,1)) + else: + Un = None + # if selfA._parameters["EstimationOf"] == "State": # Forecast - Xn_predicted = M( Xn ) + Xn_predicted = M( (Xn, Un) ) if selfA._toStore("ForecastState"): selfA.StoredVariables["ForecastState"].store( Xn_predicted ) + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! + Cm = Cm.reshape(__n,Un.size) # ADAO & check shape + Xn_predicted = Xn_predicted + Cm @ Un elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast # --- > Par principe, M = Id, Q = 0 Xn_predicted = Xn Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1)) # - oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None) + oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None) # Xn = selfA.StoredVariables["Analysis"][-1] #-------------------------- @@ -2528,16 +2868,13 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): # # Initialisations # --------------- - # - # Opérateurs Hm = HO["Direct"].appliedTo # - # Utilisation éventuelle d'un vecteur H(Xb) précalculé if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]: - HXb = Hm( Xb, HO["AppliedInX"]["HXb"] ) + HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] )) else: - HXb = Hm( Xb ) - HXb = numpy.asmatrix(numpy.ravel( HXb )).T + HXb = numpy.asarray(Hm( Xb )) + HXb = numpy.ravel( HXb ).reshape((-1,1)) if Y.size != HXb.size: raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size)) if max(Y.shape) != max(HXb.shape): @@ -2553,25 +2890,24 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): HBHTpR = R + Ht * BHT Innovation = Y - HXb # - # Point de démarrage de l'optimisation - Xini = numpy.zeros(Xb.shape) + Xini = numpy.zeros(Y.size) # # Définition de la fonction-coût # ------------------------------ def CostFunction(w): - _W = numpy.asmatrix(numpy.ravel( w )).T + _W = numpy.asarray(w).reshape((-1,1)) if selfA._parameters["StoreInternalVariables"] or \ selfA._toStore("CurrentState") or \ selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W ) + selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W ) if selfA._toStore("SimulatedObservationAtCurrentState") or \ selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) ) + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) ) if selfA._toStore("InnovationAtCurrentState"): selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation ) # - Jb = float( 0.5 * _W.T * HBHTpR * _W ) - Jo = float( - _W.T * Innovation ) + Jb = float( 0.5 * _W.T @ (HBHTpR @ _W) ) + Jo = float( - _W.T @ Innovation ) J = Jb + Jo # selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) @@ -2600,8 +2936,8 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): return J # def GradientOfCostFunction(w): - _W = numpy.asmatrix(numpy.ravel( w )).T - GradJb = HBHTpR * _W + _W = numpy.asarray(w).reshape((-1,1)) + GradJb = HBHTpR @ _W GradJo = - Innovation GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) return GradJ @@ -2620,7 +2956,6 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): x0 = Xini, fprime = GradientOfCostFunction, args = (), - bounds = RecentredBounds(selfA._parameters["Bounds"], Xb), maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, factr = selfA._parameters["CostDecrementTolerance"]*1.e14, pgtol = selfA._parameters["ProjectedGradientTolerance"], @@ -2634,7 +2969,6 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): x0 = Xini, fprime = GradientOfCostFunction, args = (), - bounds = RecentredBounds(selfA._parameters["Bounds"], Xb), maxfun = selfA._parameters["MaximumNumberOfSteps"], pgtol = selfA._parameters["ProjectedGradientTolerance"], ftol = selfA._parameters["CostDecrementTolerance"], @@ -2683,13 +3017,11 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): # ---------------------------------------------------------------- if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): Minimum = selfA.StoredVariables["CurrentState"][IndexMin] - Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T else: - Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T + Minimum = Xb + BHT @ Minimum.reshape((-1,1)) # - # Obtention de l'analyse - # ---------------------- Xa = Minimum + #-------------------------- # selfA.StoredVariables["Analysis"].store( Xa ) # @@ -2704,8 +3036,6 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): else: HXa = Hm( Xa ) # - # Calcul de la covariance d'analyse - # --------------------------------- if selfA._toStore("APosterioriCovariance") or \ selfA._toStore("SimulationQuantiles") or \ selfA._toStore("JacobianMatrixAtOptimum") or \ @@ -2721,25 +3051,7 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): selfA._toStore("SimulationQuantiles"): BI = B.getI() RI = R.getI() - HessienneI = [] - nb = Xa.size - for i in range(nb): - _ee = numpy.matrix(numpy.zeros(nb)).T - _ee[i] = 1. - _HtEE = numpy.dot(HtM,_ee) - _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T - HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) - HessienneI = numpy.matrix( HessienneI ) - A = HessienneI.I - if min(A.shape) != max(A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) - if (numpy.diag(A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) - if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug - try: - L = numpy.linalg.cholesky( A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) + A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI) if selfA._toStore("APosterioriCovariance"): selfA.StoredVariables["APosterioriCovariance"].store( A ) if selfA._toStore("JacobianMatrixAtOptimum"): @@ -2757,29 +3069,32 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): selfA._toStore("OMB"): d = Y - HXb if selfA._toStore("Innovation"): - selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) + selfA.StoredVariables["Innovation"].store( d ) if selfA._toStore("BMA"): selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) if selfA._toStore("OMA"): selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) if selfA._toStore("OMB"): - selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) + selfA.StoredVariables["OMB"].store( d ) if selfA._toStore("SigmaObs2"): TraceR = R.trace(Y.size) - selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) + selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR ) if selfA._toStore("MahalanobisConsistency"): selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) if selfA._toStore("SimulationQuantiles"): QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM) if selfA._toStore("SimulatedObservationAtBackground"): - selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) + selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb ) if selfA._toStore("SimulatedObservationAtOptimum"): - selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) + selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa ) # return 0 # ============================================================================== -def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"): +def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, + VariantM="KalmanFilterFormula16", + Hybrid=None, + ): """ Stochastic EnKF """ @@ -2817,6 +3132,8 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16" # __n = Xb.size __m = selfA._parameters["NumberOfMembers"] + nbPreviousSteps = len(selfA.StoredVariables["Analysis"]) + previousJMinimum = numpy.finfo(float).max # if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p) else: Rn = R @@ -2832,8 +3149,6 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16" elif selfA._parameters["nextStep"]: Xn = selfA._getInternalState("Xn") # - previousJMinimum = numpy.finfo(float).max - # for step in range(duration-1): numpy.random.set_state(selfA._getInternalState("seed")) if hasattr(Y,"store"): @@ -2843,11 +3158,11 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16" # if U is not None: if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T + Un = numpy.ravel( U[step] ).reshape((-1,1)) elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T + Un = numpy.ravel( U[0] ).reshape((-1,1)) else: - Un = numpy.asmatrix(numpy.ravel( U )).T + Un = numpy.ravel( U ).reshape((-1,1)) else: Un = None # @@ -2867,7 +3182,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16" returnSerieAsArrayMatrix = True ) if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un + Xn_predicted = Xn_predicted + Cm @ Un elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast # --- > Par principe, M = Id, Q = 0 Xn_predicted = EMX = Xn @@ -2876,8 +3191,8 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16" returnSerieAsArrayMatrix = True ) # # Mean of forecast and observation of forecast - Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) - Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) + Xfm = EnsembleMean( Xn_predicted ) + Hfm = EnsembleMean( HX_predicted ) # #-------------------------- if VariantM == "KalmanFilterFormula05": @@ -2917,7 +3232,11 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16" selfA._parameters["InflationFactor"], ) # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + if Hybrid == "E3DVAR": + betaf = selfA._parameters["HybridCovarianceEquilibrium"] + Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf) + # + Xa = EnsembleMean( Xn ) #-------------------------- selfA._setInternalState("Xn", Xn) selfA._setInternalState("seed", numpy.random.get_state()) @@ -2931,7 +3250,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16" or selfA._toStore("InnovationAtCurrentAnalysis") \ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T + _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1)) _Innovation = Ynpu - _HXa # selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) @@ -2963,8 +3282,8 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16" or selfA._toStore("CostFunctionJo") \ or selfA._toStore("CurrentOptimum") \ or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) ) + Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) ) J = Jb + Jo selfA.StoredVariables["CostFunctionJb"].store( Jb ) selfA.StoredVariables["CostFunctionJo"].store( Jo ) @@ -3021,16 +3340,13 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): # # Initialisations # --------------- - # - # Opérateurs Hm = HO["Direct"].appliedTo Ha = HO["Adjoint"].appliedInXTo # - # Utilisation éventuelle d'un vecteur H(Xb) précalculé if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]: - HXb = Hm( Xb, HO["AppliedInX"]["HXb"] ) + HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] )) else: - HXb = Hm( Xb ) + HXb = numpy.asarray(Hm( Xb )) HXb = HXb.reshape((-1,1)) if Y.size != HXb.size: raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size)) @@ -3042,22 +3358,20 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb ) # - # Précalcul des inversions de B et R BI = B.getI() RI = R.getI() # - # Point de démarrage de l'optimisation Xini = selfA._parameters["InitializationPoint"] # # Définition de la fonction-coût # ------------------------------ def CostFunction(x): - _X = numpy.ravel( x ).reshape((-1,1)) + _X = numpy.asarray(x).reshape((-1,1)) if selfA._parameters["StoreInternalVariables"] or \ selfA._toStore("CurrentState") or \ selfA._toStore("CurrentOptimum"): selfA.StoredVariables["CurrentState"].store( _X ) - _HX = Hm( _X ).reshape((-1,1)) + _HX = numpy.asarray(Hm( _X )).reshape((-1,1)) _Innovation = Y - _HX if selfA._toStore("SimulatedObservationAtCurrentState") or \ selfA._toStore("SimulatedObservationAtCurrentOptimum"): @@ -3095,9 +3409,8 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): return J # def GradientOfCostFunction(x): - _X = numpy.ravel( x ).reshape((-1,1)) - _HX = Hm( _X ) - _HX = numpy.ravel( _HX ).reshape((-1,1)) + _X = numpy.asarray(x).reshape((-1,1)) + _HX = numpy.asarray(Hm( _X )).reshape((-1,1)) GradJb = BI * (_X - Xb) GradJo = - Ha( (_X, RI * (Y - _HX)) ) GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) @@ -3210,23 +3523,7 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape if selfA._toStore("APosterioriCovariance") or \ selfA._toStore("SimulationQuantiles"): - HessienneI = [] - nb = Xa.size - for i in range(nb): - _ee = numpy.zeros(nb) - _ee[i] = 1. - _HtEE = numpy.dot(HtM,_ee) - HessienneI.append( numpy.ravel( BI * _ee.reshape((-1,1)) + HaM * (RI * _HtEE.reshape((-1,1))) ) ) - A = numpy.linalg.inv(numpy.array( HessienneI )) - if min(A.shape) != max(A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) - if (numpy.diag(A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) - if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug - try: - L = numpy.linalg.cholesky( A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) + A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI) if selfA._toStore("APosterioriCovariance"): selfA.StoredVariables["APosterioriCovariance"].store( A ) if selfA._toStore("JacobianMatrixAtOptimum"): @@ -3236,6 +3533,8 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI selfA.StoredVariables["KalmanGainAtOptimum"].store( KG ) # + # Calculs et/ou stockages supplémentaires + # --------------------------------------- if selfA._toStore("Innovation") or \ selfA._toStore("SigmaObs2") or \ selfA._toStore("MahalanobisConsistency") or \ @@ -3284,18 +3583,18 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): def Un(_step): if U is not None: if hasattr(U,"store") and 1<=_step1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T + Un = numpy.ravel( U[step] ).reshape((-1,1)) elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T + Un = numpy.ravel( U[0] ).reshape((-1,1)) else: - Un = numpy.asmatrix(numpy.ravel( U )).T + Un = numpy.ravel( U ).reshape((-1,1)) else: Un = None # if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast - Xn_predicted = Mt * Xn + Xn_predicted = Mt @ Xn if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - Pn_predicted = Q + Mt * Pn * Ma + Xn_predicted = Xn_predicted + Cm @ Un + Pn_predicted = Q + Mt * (Pn * Ma) elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast # --- > Par principe, M = Id, Q = 0 Xn_predicted = Xn Pn_predicted = Pn # if selfA._parameters["EstimationOf"] == "State": - HX_predicted = Ht * Xn_predicted + HX_predicted = Ht @ Xn_predicted _Innovation = Ynpu - HX_predicted elif selfA._parameters["EstimationOf"] == "Parameters": - HX_predicted = Ht * Xn_predicted + HX_predicted = Ht @ Xn_predicted _Innovation = Ynpu - HX_predicted if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! - _Innovation = _Innovation - Cm * Un + _Innovation = _Innovation - Cm @ Un # Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha)) Xn = Xn_predicted + Kn * _Innovation @@ -3632,8 +3932,8 @@ def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): or selfA._toStore("CostFunctionJo") \ or selfA._toStore("CurrentOptimum") \ or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) ) + Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) ) J = Jb + Jo selfA.StoredVariables["CostFunctionJb"].store( Jb ) selfA.StoredVariables["CostFunctionJo"].store( Jo ) @@ -3680,13 +3980,12 @@ def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): return 0 # ============================================================================== -def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): +def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): """ - Constrained Unscented Kalman Filter + Unscented Kalman Filter """ if selfA._parameters["EstimationOf"] == "Parameters": selfA._parameters["StoreInternalVariables"] = True - selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] ) # L = Xb.size Alpha = selfA._parameters["Alpha"] @@ -3741,6 +4040,7 @@ def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): RI = R.getI() # __n = Xb.size + nbPreviousSteps = len(selfA.StoredVariables["Analysis"]) # if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: Xn = Xb @@ -3776,79 +4076,60 @@ def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): else: Un = None # - Pndemi = numpy.linalg.cholesky(Pn) + Pndemi = numpy.real(scipy.linalg.sqrtm(Pn)) Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi]) nbSpts = 2*Xn.size+1 # - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - for point in range(nbSpts): - Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] ) - # XEtnnp = [] for point in range(nbSpts): if selfA._parameters["EstimationOf"] == "State": - XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T + XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1)) if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape - XEtnnpi = XEtnnpi + Cm * Un - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] ) + XEtnnpi = XEtnnpi + Cm @ Un elif selfA._parameters["EstimationOf"] == "Parameters": # --- > Par principe, M = Id, Q = 0 XEtnnpi = Xnp[:,point] - XEtnnp.append( XEtnnpi ) - XEtnnp = numpy.hstack( XEtnnp ) + XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) ) + XEtnnp = numpy.concatenate( XEtnnp, axis=1 ) # - Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1) - # - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] ) + Xncm = ( XEtnnp * Wm ).sum(axis=1) # if selfA._parameters["EstimationOf"] == "State": Pnm = Q elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0. for point in range(nbSpts): - Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T + Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm)) # - if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None: - Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm) - else: - Pnmdemi = numpy.linalg.cholesky(Pnm) - # - Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi]) + Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm)) # - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - for point in range(nbSpts): - Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] ) + Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi]) # Ynnp = [] for point in range(nbSpts): if selfA._parameters["EstimationOf"] == "State": - Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T + Ynnpi = Hm( (Xnnp[:,point], None) ) elif selfA._parameters["EstimationOf"] == "Parameters": - Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T - Ynnp.append( Ynnpi ) - Ynnp = numpy.hstack( Ynnp ) + Ynnpi = Hm( (Xnnp[:,point], Un) ) + Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) ) + Ynnp = numpy.concatenate( Ynnp, axis=1 ) # - Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1) + Yncm = ( Ynnp * Wm ).sum(axis=1) # Pyyn = R Pxyn = 0. for point in range(nbSpts): - Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T - Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T + Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm)) + Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm)) # - _Innovation = Ynpu - Yncm + _Innovation = Ynpu - Yncm.reshape((-1,1)) if selfA._parameters["EstimationOf"] == "Parameters": if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon ! - _Innovation = _Innovation - Cm * Un + _Innovation = _Innovation - Cm @ Un # Kn = Pxyn * Pyyn.I - Xn = Xncm + Kn * _Innovation + Xn = Xncm.reshape((-1,1)) + Kn * _Innovation Pn = Pnm - Kn * Pyyn * Kn.T # - if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": - Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] ) - # Xa = Xn # Pointeurs #-------------------------- selfA._setInternalState("Xn", Xn) @@ -3884,8 +4165,8 @@ def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): or selfA._toStore("CostFunctionJo") \ or selfA._toStore("CurrentOptimum") \ or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) ) + Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) ) J = Jb + Jo selfA.StoredVariables["CostFunctionJb"].store( Jb ) selfA.StoredVariables["CostFunctionJo"].store( Jo ) @@ -3939,29 +4220,24 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): # # Initialisations # --------------- - # - # Opérateurs Hm = HO["Direct"].appliedTo Ha = HO["Adjoint"].appliedInXTo # - # Précalcul des inversions de B et R BT = B.getT() RI = R.getI() # - # Point de démarrage de l'optimisation - Xini = numpy.zeros(Xb.shape) + Xini = numpy.zeros(Xb.size) # # Définition de la fonction-coût # ------------------------------ def CostFunction(v): - _V = numpy.asmatrix(numpy.ravel( v )).T - _X = Xb + B * _V + _V = numpy.asarray(v).reshape((-1,1)) + _X = Xb + (B @ _V).reshape((-1,1)) if selfA._parameters["StoreInternalVariables"] or \ selfA._toStore("CurrentState") or \ selfA._toStore("CurrentOptimum"): selfA.StoredVariables["CurrentState"].store( _X ) - _HX = Hm( _X ) - _HX = numpy.asmatrix(numpy.ravel( _HX )).T + _HX = numpy.asarray(Hm( _X )).reshape((-1,1)) _Innovation = Y - _HX if selfA._toStore("SimulatedObservationAtCurrentState") or \ selfA._toStore("SimulatedObservationAtCurrentOptimum"): @@ -3969,8 +4245,8 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): if selfA._toStore("InnovationAtCurrentState"): selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) # - Jb = float( 0.5 * _V.T * BT * _V ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + Jb = float( 0.5 * _V.T * (BT * _V) ) + Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) ) J = Jb + Jo # selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) @@ -3999,10 +4275,9 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): return J # def GradientOfCostFunction(v): - _V = numpy.asmatrix(numpy.ravel( v )).T - _X = Xb + B * _V - _HX = Hm( _X ) - _HX = numpy.asmatrix(numpy.ravel( _HX )).T + _V = numpy.asarray(v).reshape((-1,1)) + _X = Xb + (B @ _V).reshape((-1,1)) + _HX = numpy.asarray(Hm( _X )).reshape((-1,1)) GradJb = BT * _V GradJo = - Ha( (_X, RI * (Y - _HX)) ) GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) @@ -4085,13 +4360,11 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): # ---------------------------------------------------------------- if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): Minimum = selfA.StoredVariables["CurrentState"][IndexMin] - Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T else: - Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T + Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @ # - # Obtention de l'analyse - # ---------------------- Xa = Minimum + #-------------------------- # selfA.StoredVariables["Analysis"].store( Xa ) # @@ -4106,8 +4379,6 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): else: HXa = Hm( Xa ) # - # Calcul de la covariance d'analyse - # --------------------------------- if selfA._toStore("APosterioriCovariance") or \ selfA._toStore("SimulationQuantiles") or \ selfA._toStore("JacobianMatrixAtOptimum") or \ @@ -4122,25 +4393,7 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): if selfA._toStore("APosterioriCovariance") or \ selfA._toStore("SimulationQuantiles"): BI = B.getI() - HessienneI = [] - nb = Xa.size - for i in range(nb): - _ee = numpy.matrix(numpy.zeros(nb)).T - _ee[i] = 1. - _HtEE = numpy.dot(HtM,_ee) - _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T - HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) - HessienneI = numpy.matrix( HessienneI ) - A = HessienneI.I - if min(A.shape) != max(A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) - if (numpy.diag(A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) - if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug - try: - L = numpy.linalg.cholesky( A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) + A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI) if selfA._toStore("APosterioriCovariance"): selfA.StoredVariables["APosterioriCovariance"].store( A ) if selfA._toStore("JacobianMatrixAtOptimum"): @@ -4158,24 +4411,24 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): selfA._toStore("OMB"): d = Y - HXb if selfA._toStore("Innovation"): - selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) + selfA.StoredVariables["Innovation"].store( d ) if selfA._toStore("BMA"): selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) if selfA._toStore("OMA"): selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) if selfA._toStore("OMB"): - selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) + selfA.StoredVariables["OMB"].store( d ) if selfA._toStore("SigmaObs2"): TraceR = R.trace(Y.size) - selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) + selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR ) if selfA._toStore("MahalanobisConsistency"): selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) if selfA._toStore("SimulationQuantiles"): QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM) if selfA._toStore("SimulatedObservationAtBackground"): - selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) + selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb ) if selfA._toStore("SimulatedObservationAtOptimum"): - selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) + selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa ) # return 0