# -*- coding: utf-8 -*-
#
-# Copyright (C) 2008-2021 EDF R&D
+# Copyright (C) 2008-2022 EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
import os, time, copy, types, sys, logging
import math, numpy, scipy, scipy.optimize, scipy.version
-from daCore.BasicObjects import Operator
+from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
from daCore.PlatformInfo import PlatformInfo
mpr = PlatformInfo().MachinePrecision()
mfp = PlatformInfo().MaximumPrecision()
increment = 0.01,
dX = None,
extraArguments = None,
+ reducingMemoryUse = False,
avoidingRedundancy = True,
toleranceInRedundancy = 1.e-18,
lenghtOfRedundancy = -1,
):
self.__name = str(name)
self.__extraArgs = extraArguments
+ #
if mpEnabled:
try:
import multiprocessing
self.__mpWorkers = None
logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
#
- if mfEnabled:
- self.__mfEnabled = True
- else:
- self.__mfEnabled = False
+ self.__mfEnabled = bool(mfEnabled)
logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
#
+ self.__rmEnabled = bool(reducingMemoryUse)
+ logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
+ #
if avoidingRedundancy:
self.__avoidRC = True
self.__tolerBP = float(toleranceInRedundancy)
self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
else:
self.__avoidRC = False
+ logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
+ if self.__avoidRC:
+ logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
#
if self.__mpEnabled:
if isinstance(Function,types.FunctionType):
self.__dX = None
else:
self.__dX = numpy.ravel( dX )
- logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
- if self.__avoidRC:
- logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
# ---------------------------------------------------------
def __doublon__(self, e, l, n, v=None):
break
return __ac, __iac
+ # ---------------------------------------------------------
+ def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
+ "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
+ if not isinstance(__LMatrix, (list,tuple)):
+ raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
+ if __dotWith is not None:
+ __Idwx = numpy.ravel( __dotWith )
+ assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
+ __Produit = numpy.zeros(__LMatrix[0].size)
+ for i, col in enumerate(__LMatrix):
+ __Produit += float(__Idwx[i]) * col
+ return __Produit
+ elif __dotTWith is not None:
+ _Idwy = numpy.ravel( __dotTWith ).T
+ assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
+ __Produit = numpy.zeros(len(__LMatrix))
+ for i, col in enumerate(__LMatrix):
+ __Produit[i] = float( _Idwy @ col)
+ return __Produit
+ else:
+ __Produit = None
+ return __Produit
+
# ---------------------------------------------------------
def DirectOperator(self, X, **extraArgs ):
"""
return _HX
# ---------------------------------------------------------
- def TangentMatrix(self, X ):
+ def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
"""
Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
c'est-à-dire le gradient de H en X. On utilise des différences finies
- directionnelles autour du point X. X est un numpy.matrix.
+ directionnelles autour du point X. X est un numpy.ndarray.
Différences finies centrées (approximation d'ordre 2):
1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
if __alreadyCalculated:
logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
_Jacobienne = self.__listJPCR[__i]
+ logging.debug("FDA Fin du calcul de la Jacobienne")
+ if dotWith is not None:
+ return numpy.dot(_Jacobienne, numpy.ravel( dotWith ))
+ elif dotTWith is not None:
+ return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
else:
logging.debug("FDA Calcul Jacobienne (explicite)")
if self.__centeredDF:
_HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
#
_Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
- #
#
- _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
- if self.__avoidRC:
- if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
- while len(self.__listJPCP) > self.__lenghtRJ:
- self.__listJPCP.pop(0)
- self.__listJPCI.pop(0)
- self.__listJPCR.pop(0)
- self.__listJPPN.pop(0)
- self.__listJPIN.pop(0)
- self.__listJPCP.append( copy.copy(_X) )
- self.__listJPCI.append( copy.copy(_dX) )
- self.__listJPCR.append( copy.copy(_Jacobienne) )
- self.__listJPPN.append( numpy.linalg.norm(_X) )
- self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
- #
- logging.debug("FDA Fin du calcul de la Jacobienne")
+ if (dotWith is not None) or (dotTWith is not None):
+ __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
+ else:
+ __Produit = None
+ if __Produit is None or self.__avoidRC:
+ _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
+ if self.__avoidRC:
+ if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
+ while len(self.__listJPCP) > self.__lenghtRJ:
+ self.__listJPCP.pop(0)
+ self.__listJPCI.pop(0)
+ self.__listJPCR.pop(0)
+ self.__listJPPN.pop(0)
+ self.__listJPIN.pop(0)
+ self.__listJPCP.append( copy.copy(_X) )
+ self.__listJPCI.append( copy.copy(_dX) )
+ self.__listJPCR.append( copy.copy(_Jacobienne) )
+ self.__listJPPN.append( numpy.linalg.norm(_X) )
+ self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
+ logging.debug("FDA Fin du calcul de la Jacobienne")
+ if __Produit is not None:
+ return __Produit
#
return _Jacobienne
ne doivent pas être données ici à la fonction utilisateur.
"""
if self.__mfEnabled:
- assert len(paire) == 1, "Incorrect lenght of arguments"
+ assert len(paire) == 1, "Incorrect length of arguments"
_paire = paire[0]
assert len(_paire) == 2, "Incorrect number of arguments"
else:
assert len(paire) == 2, "Incorrect number of arguments"
_paire = paire
X, dX = _paire
- _Jacobienne = self.TangentMatrix( X )
if dX is None or len(dX) == 0:
#
# Calcul de la forme matricielle si le second argument est None
# -------------------------------------------------------------
+ _Jacobienne = self.TangentMatrix( X )
if self.__mfEnabled: return [_Jacobienne,]
else: return _Jacobienne
else:
#
# Calcul de la valeur linéarisée de H en X appliqué à dX
# ------------------------------------------------------
- _dX = numpy.ravel( dX )
- _HtX = numpy.dot(_Jacobienne, _dX)
+ _HtX = self.TangentMatrix( X, dotWith = dX )
if self.__mfEnabled: return [_HtX,]
else: return _HtX
ne doivent pas être données ici à la fonction utilisateur.
"""
if self.__mfEnabled:
- assert len(paire) == 1, "Incorrect lenght of arguments"
+ assert len(paire) == 1, "Incorrect length of arguments"
_paire = paire[0]
assert len(_paire) == 2, "Incorrect number of arguments"
else:
assert len(paire) == 2, "Incorrect number of arguments"
_paire = paire
X, Y = _paire
- _JacobienneT = self.TangentMatrix( X ).T
if Y is None or len(Y) == 0:
#
# Calcul de la forme matricielle si le second argument est None
# -------------------------------------------------------------
+ _JacobienneT = self.TangentMatrix( X ).T
if self.__mfEnabled: return [_JacobienneT,]
else: return _JacobienneT
else:
#
# Calcul de la valeur de l'adjoint en X appliqué à Y
# --------------------------------------------------
- _Y = numpy.ravel( Y )
- _HaY = numpy.dot(_JacobienneT, _Y)
+ _HaY = self.TangentMatrix( X, dotTWith = Y )
if self.__mfEnabled: return [_HaY,]
else: return _HaY
raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
#
if _bgcovariance is None:
- BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+ _Perturbations = numpy.tile( _bgcenter, _nbmembers)
else:
_Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
- BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
+ _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
#
- return BackgroundEnsemble
+ return _Perturbations
# ==============================================================================
def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
if _nbmembers < 1:
raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
if _bgcovariance is None:
- BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+ _Perturbations = numpy.tile( _bgcenter, _nbmembers)
else:
if _withSVD:
- U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
+ _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
_nbctl = _bgcenter.size
if _nbmembers > _nbctl:
_Z = numpy.concatenate((numpy.dot(
- numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
+ numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
else:
- _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
+ _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
_Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
- BackgroundEnsemble = _bgcenter + _Zca
+ _Perturbations = _bgcenter + _Zca
else:
if max(abs(_bgcovariance.flatten())) > 0:
_nbctl = _bgcenter.size
_Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
_Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
- BackgroundEnsemble = _bgcenter + _Zca
+ _Perturbations = _bgcenter + _Zca
else:
- BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+ _Perturbations = numpy.tile( _bgcenter, _nbmembers)
#
- return BackgroundEnsemble
+ return _Perturbations
# ==============================================================================
def EnsembleMean( __Ensemble ):
#
return OutputCovOrEns
+# ==============================================================================
+def HessienneEstimation(nb, HaM, HtM, BI, RI):
+ "Estimation de la Hessienne"
+ #
+ HessienneI = []
+ for i in range(int(nb)):
+ _ee = numpy.zeros((nb,1))
+ _ee[i] = 1.
+ _HtEE = numpy.dot(HtM,_ee).reshape((-1,1))
+ HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
+ #
+ A = numpy.linalg.inv(numpy.array( HessienneI ))
+ #
+ if min(A.shape) != max(A.shape):
+ raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+ if (numpy.diag(A) < 0).any():
+ raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+ if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+ try:
+ L = numpy.linalg.cholesky( A )
+ except:
+ raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ #
+ return A
+
# ==============================================================================
def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
"Estimation des quantiles a posteriori (selfA est modifié)"
if LBounds is not None: # "EstimateProjection" par défaut
Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
- Yr = Hm( Xr )
+ Yr = numpy.asarray(Hm( Xr ))
else:
raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
#
# Conserve une valeur par défaut à None s'il n'y a pas de bornes
if __Bounds is None: return None
# Recentre les valeurs numériques de bornes
- return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
+ return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))
# ==============================================================================
def ApplyBounds( __Vector, __Bounds, __newClip = True):
if not isinstance(__Bounds, numpy.ndarray): # Is an array
raise ValueError("Incorrect array definition of bounds data")
if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
- raise ValueError("Incorrect bounds number to be applied for this vector")
+ raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
raise ValueError("Incorrectly shaped bounds data")
#
#
return __Vector
+# ==============================================================================
+def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
+ "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
+ #
+ Xf = EnsembleMean( __EnXf )
+ Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
+ Pf = (1 - __Betaf) * __B + __Betaf * Pf
+ #
+ selfB = PartialAlgorithm("3DVAR")
+ selfB._parameters["Minimizer"] = "LBFGSB"
+ selfB._parameters["MaximumNumberOfSteps"] = 15000
+ selfB._parameters["CostDecrementTolerance"] = 1.e-7
+ selfB._parameters["ProjectedGradientTolerance"] = -1
+ selfB._parameters["GradientNormTolerance"] = 1.e-05
+ selfB._parameters["StoreInternalVariables"] = False
+ selfB._parameters["optiprint"] = -1
+ selfB._parameters["optdisp"] = 0
+ selfB._parameters["Bounds"] = None
+ selfB._parameters["InitializationPoint"] = Xf
+ std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
+ Xa = selfB.get("Analysis")[-1].reshape((-1,1))
+ del selfB
+ #
+ return Xa + EnsembleOfAnomalies( __EnXn )
+
+# ==============================================================================
+def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+ """
+ Constrained Unscented Kalman Filter
+ """
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA._parameters["StoreInternalVariables"] = True
+ selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
+ #
+ L = Xb.size
+ Alpha = selfA._parameters["Alpha"]
+ Beta = selfA._parameters["Beta"]
+ if selfA._parameters["Kappa"] == 0:
+ if selfA._parameters["EstimationOf"] == "State":
+ Kappa = 0
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ Kappa = 3 - L
+ else:
+ Kappa = selfA._parameters["Kappa"]
+ Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
+ Gamma = math.sqrt( L + Lambda )
+ #
+ Ww = []
+ Ww.append( 0. )
+ for i in range(2*L):
+ Ww.append( 1. / (2.*(L + Lambda)) )
+ #
+ Wm = numpy.array( Ww )
+ Wm[0] = Lambda / (L + Lambda)
+ Wc = numpy.array( Ww )
+ Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
+ #
+ # Opérateurs
+ Hm = HO["Direct"].appliedControledFormTo
+ #
+ if selfA._parameters["EstimationOf"] == "State":
+ Mm = EM["Direct"].appliedControledFormTo
+ #
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
+ #
+ # Durée d'observation et tailles
+ if hasattr(Y,"stepnumber"):
+ duration = Y.stepnumber()
+ __p = numpy.cumprod(Y.shape())[-1]
+ else:
+ duration = 2
+ __p = numpy.array(Y).size
+ #
+ # Précalcul des inversions de B et R
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ BI = B.getI()
+ RI = R.getI()
+ #
+ __n = Xb.size
+ nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
+ #
+ if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+ Xn = Xb
+ if hasattr(B,"asfullmatrix"):
+ Pn = B.asfullmatrix(__n)
+ else:
+ Pn = B
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( Xb )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ elif selfA._parameters["nextStep"]:
+ Xn = selfA._getInternalState("Xn")
+ Pn = selfA._getInternalState("Pn")
+ #
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ XaMin = Xn
+ previousJMinimum = numpy.finfo(float).max
+ #
+ for step in range(duration-1):
+ if hasattr(Y,"store"):
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+ else:
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
+ #
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
+ else:
+ Un = numpy.ravel( U ).reshape((-1,1))
+ else:
+ Un = None
+ #
+ Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
+ Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
+ nbSpts = 2*Xn.size+1
+ #
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ for point in range(nbSpts):
+ Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
+ #
+ XEtnnp = []
+ for point in range(nbSpts):
+ if selfA._parameters["EstimationOf"] == "State":
+ XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
+ XEtnnpi = XEtnnpi + Cm @ Un
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ # --- > Par principe, M = Id, Q = 0
+ XEtnnpi = Xnp[:,point]
+ XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
+ XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
+ #
+ Xncm = ( XEtnnp * Wm ).sum(axis=1)
+ #
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
+ #
+ if selfA._parameters["EstimationOf"] == "State": Pnm = Q
+ elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
+ for point in range(nbSpts):
+ Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
+ #
+ if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
+ Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
+ else:
+ Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
+ #
+ Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
+ #
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ for point in range(nbSpts):
+ Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
+ #
+ Ynnp = []
+ for point in range(nbSpts):
+ if selfA._parameters["EstimationOf"] == "State":
+ Ynnpi = Hm( (Xnnp[:,point], None) )
+ elif selfA._parameters["EstimationOf"] == "Parameters":
+ Ynnpi = Hm( (Xnnp[:,point], Un) )
+ Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
+ Ynnp = numpy.concatenate( Ynnp, axis=1 )
+ #
+ Yncm = ( Ynnp * Wm ).sum(axis=1)
+ #
+ Pyyn = R
+ Pxyn = 0.
+ for point in range(nbSpts):
+ Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
+ Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
+ #
+ _Innovation = Ynpu - Yncm.reshape((-1,1))
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+ _Innovation = _Innovation - Cm @ Un
+ #
+ Kn = Pxyn * Pyyn.I
+ Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
+ Pn = Pnm - Kn * Pyyn * Kn.T
+ #
+ if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+ Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
+ #
+ Xa = Xn # Pointeurs
+ #--------------------------
+ selfA._setInternalState("Xn", Xn)
+ selfA._setInternalState("Pn", Pn)
+ #--------------------------
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( Xncm )
+ if selfA._toStore("ForecastCovariance"):
+ selfA.StoredVariables["ForecastCovariance"].store( Pnm )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( Xncm - Xa )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+ #
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ #
+ return 0
+
# ==============================================================================
def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
"""
RI = R.getI()
#
__n = Xb.size
+ nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
#
if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
Xn = Xb
Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
Pn_predicted = Q + Mt * (Pn * Ma)
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
_Innovation = Ynpu - HX_predicted
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
- _Innovation = _Innovation - Cm * Un
+ _Innovation = _Innovation - Cm @ Un
#
Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
Xn = Xn_predicted + Kn * _Innovation
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
returnSerieAsArrayMatrix = True )
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- EZ = EZ + Cm * Un
+ EZ = EZ + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters":
# --- > Par principe, M = Id, Q = 0
EZ = H( [(EL[:,i], Un) for i in range(__m)],
return 0
# ==============================================================================
-def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
+def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+ VariantM="KalmanFilterFormula",
+ Hybrid=None,
+ ):
"""
Ensemble-Transform EnKF
"""
#
__n = Xb.size
__m = selfA._parameters["NumberOfMembers"]
+ nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
+ previousJMinimum = numpy.finfo(float).max
#
if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
elif selfA._parameters["nextStep"]:
Xn = selfA._getInternalState("Xn")
#
- previousJMinimum = numpy.finfo(float).max
- #
for step in range(duration-1):
numpy.random.set_state(selfA._getInternalState("seed"))
if hasattr(Y,"store"):
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
returnSerieAsArrayMatrix = True )
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
Xn_predicted = EMX = Xn
returnSerieAsArrayMatrix = True )
#
# Mean of forecast and observation of forecast
- Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
- Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ Xfm = EnsembleMean( Xn_predicted )
+ Hfm = EnsembleMean( HX_predicted )
#
# Anomalies
EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
def CostFunction(w):
_A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
- _Jo = 0.5 * _A.T * RI * _A
+ _Jo = 0.5 * _A.T * (RI * _A)
_Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
_J = _Jo + _Jb
return float(_J)
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ if Hybrid == "E3DVAR":
+ betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+ Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+ #
+ Xa = EnsembleMean( Xn )
#--------------------------
selfA._setInternalState("Xn", Xn)
selfA._setInternalState("seed", numpy.random.get_state())
or selfA._toStore("InnovationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
_Innovation = Ynpu - _HXa
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
RI = R.getI()
#
__n = Xb.size
+ nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
#
if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
Xn = Xb
Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
Pn_predicted = Q + Mt * (Pn * Ma)
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
_Innovation = Ynpu - HX_predicted
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
- _Innovation = _Innovation - Cm * Un
+ _Innovation = _Innovation - Cm @ Un
#
Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
Xn = Xn_predicted + Kn * _Innovation
#
__n = Xb.size
__m = selfA._parameters["NumberOfMembers"]
+ nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
+ previousJMinimum = numpy.finfo(float).max
#
if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
elif selfA._parameters["nextStep"]:
Xn = selfA._getInternalState("Xn")
#
- previousJMinimum = numpy.finfo(float).max
- #
for step in range(duration-1):
numpy.random.set_state(selfA._getInternalState("seed"))
if hasattr(Y,"store"):
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ Xa = EnsembleMean( Xn )
#--------------------------
selfA._setInternalState("Xn", Xn)
selfA._setInternalState("seed", numpy.random.get_state())
or selfA._toStore("InnovationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
_Innovation = Ynpu - _HXa
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
#
# Initialisations
# ---------------
- #
- # Opérateur non-linéaire pour la boucle externe
Hm = HO["Direct"].appliedTo
#
- # Précalcul des inversions de B et R
BI = B.getI()
RI = R.getI()
#
- # Point de démarrage de l'optimisation
- Xini = selfA._parameters["InitializationPoint"]
- #
- HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
+ HXb = numpy.asarray(Hm( Xb )).reshape((-1,1))
Innovation = Y - HXb
#
# Outer Loop
iOuter = 0
J = 1./mpr
DeltaJ = 1./mpr
- Xr = Xini.reshape((-1,1))
+ Xr = numpy.asarray(selfA._parameters["InitializationPoint"]).reshape((-1,1))
while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
#
# Inner Loop
# Définition de la fonction-coût
# ------------------------------
def CostFunction(dx):
- _dX = numpy.asmatrix(numpy.ravel( dx )).T
+ _dX = numpy.asarray(dx).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
selfA.StoredVariables["CurrentState"].store( Xb + _dX )
- _HdX = Ht * _dX
- _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
+ _HdX = (Ht @ _dX).reshape((-1,1))
_dInnovation = Innovation - _HdX
if selfA._toStore("SimulatedObservationAtCurrentState") or \
selfA._toStore("SimulatedObservationAtCurrentOptimum"):
if selfA._toStore("InnovationAtCurrentState"):
selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
#
- Jb = float( 0.5 * _dX.T * BI * _dX )
- Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
+ Jb = float( 0.5 * _dX.T * (BI * _dX) )
+ Jo = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
J = Jb + Jo
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
return J
#
def GradientOfCostFunction(dx):
- _dX = numpy.asmatrix(numpy.ravel( dx )).T
- _HdX = Ht * _dX
- _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
+ _dX = numpy.ravel( dx )
+ _HdX = (Ht @ _dX).reshape((-1,1))
_dInnovation = Innovation - _HdX
- GradJb = BI * _dX
+ GradJb = BI @ _dX
GradJo = - Ht.T @ (RI * _dInnovation)
GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
return GradJ
import scipy.optimize as optimiseur
Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
func = CostFunction,
- x0 = numpy.zeros(Xini.size),
+ x0 = numpy.zeros(Xb.size),
fprime = GradientOfCostFunction,
args = (),
bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
elif selfA._parameters["Minimizer"] == "TNC":
Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
func = CostFunction,
- x0 = numpy.zeros(Xini.size),
+ x0 = numpy.zeros(Xb.size),
fprime = GradientOfCostFunction,
args = (),
bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
elif selfA._parameters["Minimizer"] == "CG":
Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
f = CostFunction,
- x0 = numpy.zeros(Xini.size),
+ x0 = numpy.zeros(Xb.size),
fprime = GradientOfCostFunction,
args = (),
maxiter = selfA._parameters["MaximumNumberOfSteps"],
elif selfA._parameters["Minimizer"] == "NCG":
Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
f = CostFunction,
- x0 = numpy.zeros(Xini.size),
+ x0 = numpy.zeros(Xb.size),
fprime = GradientOfCostFunction,
args = (),
maxiter = selfA._parameters["MaximumNumberOfSteps"],
elif selfA._parameters["Minimizer"] == "BFGS":
Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
f = CostFunction,
- x0 = numpy.zeros(Xini.size),
+ x0 = numpy.zeros(Xb.size),
fprime = GradientOfCostFunction,
args = (),
maxiter = selfA._parameters["MaximumNumberOfSteps"],
#
if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
- Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
else:
- Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
+ Minimum = Xb + Minimum.reshape((-1,1))
#
Xr = Minimum
DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
#
- # Obtention de l'analyse
- # ----------------------
Xa = Xr
+ #--------------------------
#
selfA.StoredVariables["Analysis"].store( Xa )
#
else:
HXa = Hm( Xa )
#
- # Calcul de la covariance d'analyse
- # ---------------------------------
if selfA._toStore("APosterioriCovariance") or \
selfA._toStore("SimulationQuantiles") or \
selfA._toStore("JacobianMatrixAtOptimum") or \
HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
if selfA._toStore("APosterioriCovariance") or \
selfA._toStore("SimulationQuantiles"):
- HessienneI = []
- nb = Xa.size
- for i in range(nb):
- _ee = numpy.matrix(numpy.zeros(nb)).T
- _ee[i] = 1.
- _HtEE = numpy.dot(HtM,_ee)
- _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
- HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
- HessienneI = numpy.matrix( HessienneI )
- A = HessienneI.I
- if min(A.shape) != max(A.shape):
- raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
- if (numpy.diag(A) < 0).any():
- raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
- if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
- try:
- L = numpy.linalg.cholesky( A )
- except:
- raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
if selfA._toStore("APosterioriCovariance"):
selfA.StoredVariables["APosterioriCovariance"].store( A )
if selfA._toStore("JacobianMatrixAtOptimum"):
selfA._toStore("OMB"):
d = Y - HXb
if selfA._toStore("Innovation"):
- selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ selfA.StoredVariables["Innovation"].store( d )
if selfA._toStore("BMA"):
selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
if selfA._toStore("OMA"):
selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
if selfA._toStore("OMB"):
- selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+ selfA.StoredVariables["OMB"].store( d )
if selfA._toStore("SigmaObs2"):
TraceR = R.trace(Y.size)
- selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+ selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
if selfA._toStore("MahalanobisConsistency"):
selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
if selfA._toStore("SimulationQuantiles"):
QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
if selfA._toStore("SimulatedObservationAtBackground"):
- selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
if selfA._toStore("SimulatedObservationAtOptimum"):
- selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
#
return 0
# ==============================================================================
-def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
- BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
+def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+ VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
+ Hybrid=None,
+ ):
"""
Maximum Likelihood Ensemble Filter
"""
#
__n = Xb.size
__m = selfA._parameters["NumberOfMembers"]
+ nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
+ previousJMinimum = numpy.finfo(float).max
#
if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
elif selfA._parameters["nextStep"]:
Xn = selfA._getInternalState("Xn")
#
- previousJMinimum = numpy.finfo(float).max
- #
for step in range(duration-1):
numpy.random.set_state(selfA._getInternalState("seed"))
if hasattr(Y,"store"):
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
Xn_predicted = EMX = Xn
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ if Hybrid == "E3DVAR":
+ betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+ Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+ #
+ Xa = EnsembleMean( Xn )
#--------------------------
selfA._setInternalState("Xn", Xn)
selfA._setInternalState("seed", numpy.random.get_state())
or selfA._toStore("InnovationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
_Innovation = Ynpu - _HXa
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
iteration += 1
#
Derivees = numpy.array(fprime(variables))
- Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
+ Derivees = Derivees.reshape(n,p) # ADAO & check shape
DeriveesT = Derivees.transpose()
M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
"""
#
# Initialisation
+ # --------------
if selfA._parameters["EstimationOf"] == "State":
- M = EM["Direct"].appliedTo
+ M = EM["Direct"].appliedControledFormTo
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
#
if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
Xn = numpy.ravel(Xb).reshape((-1,1))
else:
Ynpu = numpy.ravel( Y ).reshape((-1,1))
#
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
+ else:
+ Un = numpy.ravel( U ).reshape((-1,1))
+ else:
+ Un = None
+ #
if selfA._parameters["EstimationOf"] == "State": # Forecast
- Xn_predicted = M( Xn )
+ Xn_predicted = M( (Xn, Un) )
if selfA._toStore("ForecastState"):
selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ Xn_predicted = Xn_predicted + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
# --- > Par principe, M = Id, Q = 0
Xn_predicted = Xn
Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
#
- oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
+ oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
#
Xn = selfA.StoredVariables["Analysis"][-1]
#--------------------------
#
# Initialisations
# ---------------
- #
- # Opérateurs
Hm = HO["Direct"].appliedTo
#
- # Utilisation éventuelle d'un vecteur H(Xb) précalculé
if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
- HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
+ HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
else:
- HXb = Hm( Xb )
- HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+ HXb = numpy.asarray(Hm( Xb ))
+ HXb = numpy.ravel( HXb ).reshape((-1,1))
if Y.size != HXb.size:
raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
if max(Y.shape) != max(HXb.shape):
HBHTpR = R + Ht * BHT
Innovation = Y - HXb
#
- # Point de démarrage de l'optimisation
- Xini = numpy.zeros(Xb.shape)
+ Xini = numpy.zeros(Y.size)
#
# Définition de la fonction-coût
# ------------------------------
def CostFunction(w):
- _W = numpy.asmatrix(numpy.ravel( w )).T
+ _W = numpy.asarray(w).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
+ selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
if selfA._toStore("SimulatedObservationAtCurrentState") or \
selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
if selfA._toStore("InnovationAtCurrentState"):
selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
#
- Jb = float( 0.5 * _W.T * HBHTpR * _W )
- Jo = float( - _W.T * Innovation )
+ Jb = float( 0.5 * _W.T @ (HBHTpR @ _W) )
+ Jo = float( - _W.T @ Innovation )
J = Jb + Jo
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
return J
#
def GradientOfCostFunction(w):
- _W = numpy.asmatrix(numpy.ravel( w )).T
- GradJb = HBHTpR * _W
+ _W = numpy.asarray(w).reshape((-1,1))
+ GradJb = HBHTpR @ _W
GradJo = - Innovation
GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
return GradJ
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
pgtol = selfA._parameters["ProjectedGradientTolerance"],
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
maxfun = selfA._parameters["MaximumNumberOfSteps"],
pgtol = selfA._parameters["ProjectedGradientTolerance"],
ftol = selfA._parameters["CostDecrementTolerance"],
# ----------------------------------------------------------------
if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
- Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
else:
- Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
+ Minimum = Xb + BHT @ Minimum.reshape((-1,1))
#
- # Obtention de l'analyse
- # ----------------------
Xa = Minimum
+ #--------------------------
#
selfA.StoredVariables["Analysis"].store( Xa )
#
else:
HXa = Hm( Xa )
#
- # Calcul de la covariance d'analyse
- # ---------------------------------
if selfA._toStore("APosterioriCovariance") or \
selfA._toStore("SimulationQuantiles") or \
selfA._toStore("JacobianMatrixAtOptimum") or \
selfA._toStore("SimulationQuantiles"):
BI = B.getI()
RI = R.getI()
- HessienneI = []
- nb = Xa.size
- for i in range(nb):
- _ee = numpy.matrix(numpy.zeros(nb)).T
- _ee[i] = 1.
- _HtEE = numpy.dot(HtM,_ee)
- _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
- HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
- HessienneI = numpy.matrix( HessienneI )
- A = HessienneI.I
- if min(A.shape) != max(A.shape):
- raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
- if (numpy.diag(A) < 0).any():
- raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
- if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
- try:
- L = numpy.linalg.cholesky( A )
- except:
- raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
if selfA._toStore("APosterioriCovariance"):
selfA.StoredVariables["APosterioriCovariance"].store( A )
if selfA._toStore("JacobianMatrixAtOptimum"):
selfA._toStore("OMB"):
d = Y - HXb
if selfA._toStore("Innovation"):
- selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ selfA.StoredVariables["Innovation"].store( d )
if selfA._toStore("BMA"):
selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
if selfA._toStore("OMA"):
selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
if selfA._toStore("OMB"):
- selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+ selfA.StoredVariables["OMB"].store( d )
if selfA._toStore("SigmaObs2"):
TraceR = R.trace(Y.size)
- selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+ selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
if selfA._toStore("MahalanobisConsistency"):
selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
if selfA._toStore("SimulationQuantiles"):
QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
if selfA._toStore("SimulatedObservationAtBackground"):
- selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
if selfA._toStore("SimulatedObservationAtOptimum"):
- selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
#
return 0
# ==============================================================================
-def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
+def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+ VariantM="KalmanFilterFormula16",
+ Hybrid=None,
+ ):
"""
Stochastic EnKF
"""
#
__n = Xb.size
__m = selfA._parameters["NumberOfMembers"]
+ nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
+ previousJMinimum = numpy.finfo(float).max
#
if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
else: Rn = R
elif selfA._parameters["nextStep"]:
Xn = selfA._getInternalState("Xn")
#
- previousJMinimum = numpy.finfo(float).max
- #
for step in range(duration-1):
numpy.random.set_state(selfA._getInternalState("seed"))
if hasattr(Y,"store"):
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
returnSerieAsArrayMatrix = True )
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
Xn_predicted = EMX = Xn
returnSerieAsArrayMatrix = True )
#
# Mean of forecast and observation of forecast
- Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
- Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ Xfm = EnsembleMean( Xn_predicted )
+ Hfm = EnsembleMean( HX_predicted )
#
#--------------------------
if VariantM == "KalmanFilterFormula05":
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ if Hybrid == "E3DVAR":
+ betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+ Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+ #
+ Xa = EnsembleMean( Xn )
#--------------------------
selfA._setInternalState("Xn", Xn)
selfA._setInternalState("seed", numpy.random.get_state())
or selfA._toStore("InnovationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
_Innovation = Ynpu - _HXa
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
#
# Initialisations
# ---------------
- #
- # Opérateurs
Hm = HO["Direct"].appliedTo
Ha = HO["Adjoint"].appliedInXTo
#
- # Utilisation éventuelle d'un vecteur H(Xb) précalculé
if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
- HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
+ HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
else:
- HXb = Hm( Xb )
+ HXb = numpy.asarray(Hm( Xb ))
HXb = HXb.reshape((-1,1))
if Y.size != HXb.size:
raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
#
- # Précalcul des inversions de B et R
BI = B.getI()
RI = R.getI()
#
- # Point de démarrage de l'optimisation
Xini = selfA._parameters["InitializationPoint"]
#
# Définition de la fonction-coût
# ------------------------------
def CostFunction(x):
- _X = numpy.ravel( x ).reshape((-1,1))
+ _X = numpy.asarray(x).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
selfA.StoredVariables["CurrentState"].store( _X )
- _HX = Hm( _X ).reshape((-1,1))
+ _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
_Innovation = Y - _HX
if selfA._toStore("SimulatedObservationAtCurrentState") or \
selfA._toStore("SimulatedObservationAtCurrentOptimum"):
return J
#
def GradientOfCostFunction(x):
- _X = numpy.ravel( x ).reshape((-1,1))
- _HX = Hm( _X )
- _HX = numpy.ravel( _HX ).reshape((-1,1))
+ _X = numpy.asarray(x).reshape((-1,1))
+ _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
GradJb = BI * (_X - Xb)
GradJo = - Ha( (_X, RI * (Y - _HX)) )
GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
if selfA._toStore("APosterioriCovariance") or \
selfA._toStore("SimulationQuantiles"):
- HessienneI = []
- nb = Xa.size
- for i in range(nb):
- _ee = numpy.zeros(nb)
- _ee[i] = 1.
- _HtEE = numpy.dot(HtM,_ee)
- HessienneI.append( numpy.ravel( BI * _ee.reshape((-1,1)) + HaM * (RI * _HtEE.reshape((-1,1))) ) )
- A = numpy.linalg.inv(numpy.array( HessienneI ))
- if min(A.shape) != max(A.shape):
- raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
- if (numpy.diag(A) < 0).any():
- raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
- if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
- try:
- L = numpy.linalg.cholesky( A )
- except:
- raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
if selfA._toStore("APosterioriCovariance"):
selfA.StoredVariables["APosterioriCovariance"].store( A )
if selfA._toStore("JacobianMatrixAtOptimum"):
elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
#
+ # Calculs et/ou stockages supplémentaires
+ # ---------------------------------------
if selfA._toStore("Innovation") or \
selfA._toStore("SigmaObs2") or \
selfA._toStore("MahalanobisConsistency") or \
def Un(_step):
if U is not None:
if hasattr(U,"store") and 1<=_step<len(U) :
- _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
+ _Un = numpy.ravel( U[_step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ _Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- _Un = numpy.asmatrix(numpy.ravel( U )).T
+ _Un = numpy.ravel( U ).reshape((-1,1))
else:
_Un = None
return _Un
def CmUn(_xn,_un):
if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
_Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
- _CmUn = _Cm * _un
+ _CmUn = (_Cm @ _un).reshape((-1,1))
else:
_CmUn = 0.
return _CmUn
selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
def CostFunction(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
+ _X = numpy.asarray(x).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
selfA.StoredVariables["CurrentState"].store( _X )
- Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
+ Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
selfA.DirectCalculation = [None,]
selfA.DirectInnovation = [None,]
Jo = 0.
_Xn = _X
for step in range(0,duration-1):
if hasattr(Y,"store"):
- _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
+ _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
else:
- _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
+ _Ynpu = numpy.ravel( Y ).reshape((-1,1))
_Un = Un(step)
#
# Etape d'évolution
if selfA._parameters["EstimationOf"] == "State":
- _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
+ _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
elif selfA._parameters["EstimationOf"] == "Parameters":
pass
#
#
# Etape de différence aux observations
if selfA._parameters["EstimationOf"] == "State":
- _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
+ _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
elif selfA._parameters["EstimationOf"] == "Parameters":
- _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
+ _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
#
# Stockage de l'état
selfA.DirectCalculation.append( _Xn )
selfA.DirectInnovation.append( _YmHMX )
#
# Ajout dans la fonctionnelle d'observation
- Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
+ Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
J = Jb + Jo
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
return J
#
def GradientOfCostFunction(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
+ _X = numpy.asarray(x).reshape((-1,1))
GradJb = BI * (_X - Xb)
GradJo = 0.
for step in range(duration-1,0,-1):
RI = R.getI()
#
__n = Xb.size
+ nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
#
if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
Xn = Xb
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
- Xn_predicted = Mt * Xn
+ Xn_predicted = Mt @ Xn
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
Pn_predicted = Q + Mt * (Pn * Ma)
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
Pn_predicted = Pn
#
if selfA._parameters["EstimationOf"] == "State":
- HX_predicted = Ht * Xn_predicted
+ HX_predicted = Ht @ Xn_predicted
_Innovation = Ynpu - HX_predicted
elif selfA._parameters["EstimationOf"] == "Parameters":
- HX_predicted = Ht * Xn_predicted
+ HX_predicted = Ht @ Xn_predicted
_Innovation = Ynpu - HX_predicted
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
- _Innovation = _Innovation - Cm * Un
+ _Innovation = _Innovation - Cm @ Un
#
Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
Xn = Xn_predicted + Kn * _Innovation
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
- J = Jb + Jo
- selfA.StoredVariables["CostFunctionJb"].store( Jb )
- selfA.StoredVariables["CostFunctionJo"].store( Jo )
- selfA.StoredVariables["CostFunctionJ" ].store( J )
- #
- if selfA._toStore("IndexOfOptimum") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("CostFunctionJAtCurrentOptimum") \
- or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
- or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
- if selfA._toStore("IndexOfOptimum"):
- selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
- if selfA._toStore("CurrentOptimum"):
- selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
- if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
- if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
- if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
- if selfA._toStore("CostFunctionJAtCurrentOptimum"):
- selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( Pn )
- if selfA._parameters["EstimationOf"] == "Parameters" \
- and J < previousJMinimum:
- previousJMinimum = J
- XaMin = Xa
- if selfA._toStore("APosterioriCovariance"):
- covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
- #
- # Stockage final supplémentaire de l'optimum en estimation de paramètres
- # ----------------------------------------------------------------------
- if selfA._parameters["EstimationOf"] == "Parameters":
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- selfA.StoredVariables["Analysis"].store( XaMin )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
- #
- return 0
-
-# ==============================================================================
-def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
- """
- Constrained Unscented Kalman Filter
- """
- if selfA._parameters["EstimationOf"] == "Parameters":
- selfA._parameters["StoreInternalVariables"] = True
- selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
- #
- L = Xb.size
- Alpha = selfA._parameters["Alpha"]
- Beta = selfA._parameters["Beta"]
- if selfA._parameters["Kappa"] == 0:
- if selfA._parameters["EstimationOf"] == "State":
- Kappa = 0
- elif selfA._parameters["EstimationOf"] == "Parameters":
- Kappa = 3 - L
- else:
- Kappa = selfA._parameters["Kappa"]
- Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
- Gamma = math.sqrt( L + Lambda )
- #
- Ww = []
- Ww.append( 0. )
- for i in range(2*L):
- Ww.append( 1. / (2.*(L + Lambda)) )
- #
- Wm = numpy.array( Ww )
- Wm[0] = Lambda / (L + Lambda)
- Wc = numpy.array( Ww )
- Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
- #
- # Opérateurs
- Hm = HO["Direct"].appliedControledFormTo
- #
- if selfA._parameters["EstimationOf"] == "State":
- Mm = EM["Direct"].appliedControledFormTo
- #
- if CM is not None and "Tangent" in CM and U is not None:
- Cm = CM["Tangent"].asMatrix(Xb)
- else:
- Cm = None
- #
- # Durée d'observation et tailles
- if hasattr(Y,"stepnumber"):
- duration = Y.stepnumber()
- __p = numpy.cumprod(Y.shape())[-1]
- else:
- duration = 2
- __p = numpy.array(Y).size
- #
- # Précalcul des inversions de B et R
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("APosterioriCovariance"):
- BI = B.getI()
- RI = R.getI()
- #
- __n = Xb.size
- #
- if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
- Xn = Xb
- if hasattr(B,"asfullmatrix"):
- Pn = B.asfullmatrix(__n)
- else:
- Pn = B
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- selfA.StoredVariables["Analysis"].store( Xb )
- if selfA._toStore("APosterioriCovariance"):
- selfA.StoredVariables["APosterioriCovariance"].store( Pn )
- elif selfA._parameters["nextStep"]:
- Xn = selfA._getInternalState("Xn")
- Pn = selfA._getInternalState("Pn")
- #
- if selfA._parameters["EstimationOf"] == "Parameters":
- XaMin = Xn
- previousJMinimum = numpy.finfo(float).max
- #
- for step in range(duration-1):
- if hasattr(Y,"store"):
- Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
- else:
- Ynpu = numpy.ravel( Y ).reshape((__p,1))
- #
- if U is not None:
- if hasattr(U,"store") and len(U)>1:
- Un = numpy.ravel( U[step] ).reshape((-1,1))
- elif hasattr(U,"store") and len(U)==1:
- Un = numpy.ravel( U[0] ).reshape((-1,1))
- else:
- Un = numpy.ravel( U ).reshape((-1,1))
- else:
- Un = None
- #
- Pndemi = numpy.linalg.cholesky(Pn)
- Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
- nbSpts = 2*Xn.size+1
- #
- if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
- for point in range(nbSpts):
- Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
- #
- XEtnnp = []
- for point in range(nbSpts):
- if selfA._parameters["EstimationOf"] == "State":
- XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
- if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
- Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
- XEtnnpi = XEtnnpi + Cm * Un
- if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
- XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
- elif selfA._parameters["EstimationOf"] == "Parameters":
- # --- > Par principe, M = Id, Q = 0
- XEtnnpi = Xnp[:,point]
- XEtnnp.append( XEtnnpi )
- XEtnnp = numpy.hstack( XEtnnp )
- #
- Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
- #
- if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
- Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
- #
- if selfA._parameters["EstimationOf"] == "State": Pnm = Q
- elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
- for point in range(nbSpts):
- Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
- #
- if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
- Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm)
- else:
- Pnmdemi = numpy.linalg.cholesky(Pnm)
- #
- Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
- #
- if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
- for point in range(nbSpts):
- Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
- #
- Ynnp = []
- for point in range(nbSpts):
- if selfA._parameters["EstimationOf"] == "State":
- Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
- elif selfA._parameters["EstimationOf"] == "Parameters":
- Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
- Ynnp.append( Ynnpi )
- Ynnp = numpy.hstack( Ynnp )
- #
- Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
- #
- Pyyn = R
- Pxyn = 0.
- for point in range(nbSpts):
- Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
- Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
- #
- _Innovation = Ynpu - Yncm
- if selfA._parameters["EstimationOf"] == "Parameters":
- if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
- _Innovation = _Innovation - Cm * Un
- #
- Kn = Pxyn * Pyyn.I
- Xn = Xncm + Kn * _Innovation
- Pn = Pnm - Kn * Pyyn * Kn.T
- #
- if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
- Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
- #
- Xa = Xn # Pointeurs
- #--------------------------
- selfA._setInternalState("Xn", Xn)
- selfA._setInternalState("Pn", Pn)
- #--------------------------
- #
- selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
- # ---> avec analysis
- selfA.StoredVariables["Analysis"].store( Xa )
- if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
- selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
- if selfA._toStore("InnovationAtCurrentAnalysis"):
- selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
- # ---> avec current state
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CurrentState"):
- selfA.StoredVariables["CurrentState"].store( Xn )
- if selfA._toStore("ForecastState"):
- selfA.StoredVariables["ForecastState"].store( Xncm )
- if selfA._toStore("ForecastCovariance"):
- selfA.StoredVariables["ForecastCovariance"].store( Pnm )
- if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( Xncm - Xa )
- if selfA._toStore("InnovationAtCurrentState"):
- selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
- if selfA._toStore("SimulatedObservationAtCurrentState") \
- or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
- # ---> autres
- if selfA._parameters["StoreInternalVariables"] \
- or selfA._toStore("CostFunctionJ") \
- or selfA._toStore("CostFunctionJb") \
- or selfA._toStore("CostFunctionJo") \
- or selfA._toStore("CurrentOptimum") \
- or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
RI = R.getI()
#
__n = Xb.size
+ nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
#
if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
Xn = Xb
else:
Un = None
#
- Pndemi = numpy.linalg.cholesky(Pn)
+ Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
nbSpts = 2*Xn.size+1
#
XEtnnp = []
for point in range(nbSpts):
if selfA._parameters["EstimationOf"] == "State":
- XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
+ XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
- XEtnnpi = XEtnnpi + Cm * Un
+ XEtnnpi = XEtnnpi + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters":
# --- > Par principe, M = Id, Q = 0
XEtnnpi = Xnp[:,point]
- XEtnnp.append( XEtnnpi )
- XEtnnp = numpy.hstack( XEtnnp )
+ XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
+ XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
#
- Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
+ Xncm = ( XEtnnp * Wm ).sum(axis=1)
#
if selfA._parameters["EstimationOf"] == "State": Pnm = Q
elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
for point in range(nbSpts):
- Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
+ Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
#
- Pnmdemi = numpy.linalg.cholesky(Pnm)
+ Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
#
- Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
+ Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
#
Ynnp = []
for point in range(nbSpts):
if selfA._parameters["EstimationOf"] == "State":
- Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
+ Ynnpi = Hm( (Xnnp[:,point], None) )
elif selfA._parameters["EstimationOf"] == "Parameters":
- Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
- Ynnp.append( Ynnpi )
- Ynnp = numpy.hstack( Ynnp )
+ Ynnpi = Hm( (Xnnp[:,point], Un) )
+ Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
+ Ynnp = numpy.concatenate( Ynnp, axis=1 )
#
- Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
+ Yncm = ( Ynnp * Wm ).sum(axis=1)
#
Pyyn = R
Pxyn = 0.
for point in range(nbSpts):
- Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
- Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
+ Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
+ Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
#
- _Innovation = Ynpu - Yncm
+ _Innovation = Ynpu - Yncm.reshape((-1,1))
if selfA._parameters["EstimationOf"] == "Parameters":
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
- _Innovation = _Innovation - Cm * Un
+ _Innovation = _Innovation - Cm @ Un
#
Kn = Pxyn * Pyyn.I
- Xn = Xncm + Kn * _Innovation
+ Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
Pn = Pnm - Kn * Pyyn * Kn.T
#
Xa = Xn # Pointeurs
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
#
# Initialisations
# ---------------
- #
- # Opérateurs
Hm = HO["Direct"].appliedTo
Ha = HO["Adjoint"].appliedInXTo
#
- # Précalcul des inversions de B et R
BT = B.getT()
RI = R.getI()
#
- # Point de démarrage de l'optimisation
- Xini = numpy.zeros(Xb.shape)
+ Xini = numpy.zeros(Xb.size)
#
# Définition de la fonction-coût
# ------------------------------
def CostFunction(v):
- _V = numpy.asmatrix(numpy.ravel( v )).T
- _X = Xb + B * _V
+ _V = numpy.asarray(v).reshape((-1,1))
+ _X = Xb + (B @ _V).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
selfA.StoredVariables["CurrentState"].store( _X )
- _HX = Hm( _X )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
_Innovation = Y - _HX
if selfA._toStore("SimulatedObservationAtCurrentState") or \
selfA._toStore("SimulatedObservationAtCurrentOptimum"):
if selfA._toStore("InnovationAtCurrentState"):
selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
#
- Jb = float( 0.5 * _V.T * BT * _V )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * _V.T * (BT * _V) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
return J
#
def GradientOfCostFunction(v):
- _V = numpy.asmatrix(numpy.ravel( v )).T
- _X = Xb + B * _V
- _HX = Hm( _X )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _V = numpy.asarray(v).reshape((-1,1))
+ _X = Xb + (B @ _V).reshape((-1,1))
+ _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
GradJb = BT * _V
GradJo = - Ha( (_X, RI * (Y - _HX)) )
GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
# ----------------------------------------------------------------
if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
- Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
else:
- Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
+ Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
#
- # Obtention de l'analyse
- # ----------------------
Xa = Minimum
+ #--------------------------
#
selfA.StoredVariables["Analysis"].store( Xa )
#
else:
HXa = Hm( Xa )
#
- # Calcul de la covariance d'analyse
- # ---------------------------------
if selfA._toStore("APosterioriCovariance") or \
selfA._toStore("SimulationQuantiles") or \
selfA._toStore("JacobianMatrixAtOptimum") or \
if selfA._toStore("APosterioriCovariance") or \
selfA._toStore("SimulationQuantiles"):
BI = B.getI()
- HessienneI = []
- nb = Xa.size
- for i in range(nb):
- _ee = numpy.matrix(numpy.zeros(nb)).T
- _ee[i] = 1.
- _HtEE = numpy.dot(HtM,_ee)
- _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
- HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
- HessienneI = numpy.matrix( HessienneI )
- A = HessienneI.I
- if min(A.shape) != max(A.shape):
- raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
- if (numpy.diag(A) < 0).any():
- raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
- if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
- try:
- L = numpy.linalg.cholesky( A )
- except:
- raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+ A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
if selfA._toStore("APosterioriCovariance"):
selfA.StoredVariables["APosterioriCovariance"].store( A )
if selfA._toStore("JacobianMatrixAtOptimum"):
selfA._toStore("OMB"):
d = Y - HXb
if selfA._toStore("Innovation"):
- selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ selfA.StoredVariables["Innovation"].store( d )
if selfA._toStore("BMA"):
selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
if selfA._toStore("OMA"):
selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
if selfA._toStore("OMB"):
- selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+ selfA.StoredVariables["OMB"].store( d )
if selfA._toStore("SigmaObs2"):
TraceR = R.trace(Y.size)
- selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+ selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
if selfA._toStore("MahalanobisConsistency"):
selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
if selfA._toStore("SimulationQuantiles"):
QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
if selfA._toStore("SimulatedObservationAtBackground"):
- selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
if selfA._toStore("SimulatedObservationAtOptimum"):
- selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
#
return 0