1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.ravel( X ).reshape((-1,1))
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 reducingMemoryUse = False,
70 avoidingRedundancy = True,
71 toleranceInRedundancy = 1.e-18,
72 lenghtOfRedundancy = -1,
77 self.__name = str(name)
78 self.__extraArgs = extraArguments
82 import multiprocessing
83 self.__mpEnabled = True
85 self.__mpEnabled = False
87 self.__mpEnabled = False
88 self.__mpWorkers = mpWorkers
89 if self.__mpWorkers is not None and self.__mpWorkers < 1:
90 self.__mpWorkers = None
91 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
93 self.__mfEnabled = bool(mfEnabled)
94 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96 self.__rmEnabled = bool(reducingMemoryUse)
97 logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
99 if avoidingRedundancy:
100 self.__avoidRC = True
101 self.__tolerBP = float(toleranceInRedundancy)
102 self.__lenghtRJ = int(lenghtOfRedundancy)
103 self.__listJPCP = [] # Jacobian Previous Calculated Points
104 self.__listJPCI = [] # Jacobian Previous Calculated Increment
105 self.__listJPCR = [] # Jacobian Previous Calculated Results
106 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
107 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
109 self.__avoidRC = False
110 logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
112 logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
115 if isinstance(Function,types.FunctionType):
116 logging.debug("FDA Calculs en multiprocessing : FunctionType")
117 self.__userFunction__name = Function.__name__
119 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
121 mod = os.path.abspath(Function.__globals__['__file__'])
122 if not os.path.isfile(mod):
123 raise ImportError("No user defined function or method found with the name %s"%(mod,))
124 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
125 self.__userFunction__path = os.path.dirname(mod)
127 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
128 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
129 elif isinstance(Function,types.MethodType):
130 logging.debug("FDA Calculs en multiprocessing : MethodType")
131 self.__userFunction__name = Function.__name__
133 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
135 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
136 if not os.path.isfile(mod):
137 raise ImportError("No user defined function or method found with the name %s"%(mod,))
138 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
139 self.__userFunction__path = os.path.dirname(mod)
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
144 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
146 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
147 self.__userFunction = self.__userOperator.appliedTo
149 self.__centeredDF = bool(centeredDF)
150 if abs(float(increment)) > 1.e-15:
151 self.__increment = float(increment)
153 self.__increment = 0.01
157 self.__dX = numpy.ravel( dX )
159 # ---------------------------------------------------------
160 def __doublon__(self, e, l, n, v=None):
161 __ac, __iac = False, -1
162 for i in range(len(l)-1,-1,-1):
163 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
164 __ac, __iac = True, i
165 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
169 # ---------------------------------------------------------
170 def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
171 "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
172 if not isinstance(__LMatrix, (list,tuple)):
173 raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
174 if __dotWith is not None:
175 __Idwx = numpy.ravel( __dotWith )
176 assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
177 __Produit = numpy.zeros(__LMatrix[0].size)
178 for i, col in enumerate(__LMatrix):
179 __Produit += float(__Idwx[i]) * col
181 elif __dotTWith is not None:
182 _Idwy = numpy.ravel( __dotTWith ).T
183 assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
184 __Produit = numpy.zeros(len(__LMatrix))
185 for i, col in enumerate(__LMatrix):
186 __Produit[i] = float( _Idwy @ col)
192 # ---------------------------------------------------------
193 def DirectOperator(self, X, **extraArgs ):
195 Calcul du direct à l'aide de la fonction fournie.
197 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
198 ne doivent pas être données ici à la fonction utilisateur.
200 logging.debug("FDA Calcul DirectOperator (explicite)")
202 _HX = self.__userFunction( X, argsAsSerie = True )
204 _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
208 # ---------------------------------------------------------
209 def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
211 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
212 c'est-à-dire le gradient de H en X. On utilise des différences finies
213 directionnelles autour du point X. X est un numpy.ndarray.
215 Différences finies centrées (approximation d'ordre 2):
216 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
217 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
218 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
220 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
222 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
224 Différences finies non centrées (approximation d'ordre 1):
225 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
226 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
227 HX_plus_dXi = H( X_plus_dXi )
228 2/ On calcule la valeur centrale HX = H(X)
229 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
231 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
234 logging.debug("FDA Début du calcul de la Jacobienne")
235 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
236 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
238 if X is None or len(X)==0:
239 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
241 _X = numpy.ravel( X )
243 if self.__dX is None:
244 _dX = self.__increment * _X
246 _dX = numpy.ravel( self.__dX )
247 assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
248 assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
250 if (_dX == 0.).any():
253 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
255 _dX = numpy.where( _dX == 0., moyenne, _dX )
257 __alreadyCalculated = False
259 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
260 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
261 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
262 __alreadyCalculated, __i = True, __alreadyCalculatedP
263 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
265 if __alreadyCalculated:
266 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
267 _Jacobienne = self.__listJPCR[__i]
268 logging.debug("FDA Fin du calcul de la Jacobienne")
269 if dotWith is not None:
270 return numpy.dot(_Jacobienne, numpy.ravel( dotWith ))
271 elif dotTWith is not None:
272 return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
274 logging.debug("FDA Calcul Jacobienne (explicite)")
275 if self.__centeredDF:
277 if self.__mpEnabled and not self.__mfEnabled:
279 "__userFunction__path" : self.__userFunction__path,
280 "__userFunction__modl" : self.__userFunction__modl,
281 "__userFunction__name" : self.__userFunction__name,
284 for i in range( len(_dX) ):
286 _X_plus_dXi = numpy.array( _X, dtype=float )
287 _X_plus_dXi[i] = _X[i] + _dXi
288 _X_moins_dXi = numpy.array( _X, dtype=float )
289 _X_moins_dXi[i] = _X[i] - _dXi
291 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
292 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
294 import multiprocessing
295 self.__pool = multiprocessing.Pool(self.__mpWorkers)
296 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
301 for i in range( len(_dX) ):
302 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
304 elif self.__mfEnabled:
306 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dXi
310 _X_moins_dXi = numpy.array( _X, dtype=float )
311 _X_moins_dXi[i] = _X[i] - _dXi
313 _xserie.append( _X_plus_dXi )
314 _xserie.append( _X_moins_dXi )
316 _HX_plusmoins_dX = self.DirectOperator( _xserie )
319 for i in range( len(_dX) ):
320 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
324 for i in range( _dX.size ):
326 _X_plus_dXi = numpy.array( _X, dtype=float )
327 _X_plus_dXi[i] = _X[i] + _dXi
328 _X_moins_dXi = numpy.array( _X, dtype=float )
329 _X_moins_dXi[i] = _X[i] - _dXi
331 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
332 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
334 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
338 if self.__mpEnabled and not self.__mfEnabled:
340 "__userFunction__path" : self.__userFunction__path,
341 "__userFunction__modl" : self.__userFunction__modl,
342 "__userFunction__name" : self.__userFunction__name,
345 _jobs.append( (_X, self.__extraArgs, funcrepr) )
346 for i in range( len(_dX) ):
347 _X_plus_dXi = numpy.array( _X, dtype=float )
348 _X_plus_dXi[i] = _X[i] + _dX[i]
350 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
352 import multiprocessing
353 self.__pool = multiprocessing.Pool(self.__mpWorkers)
354 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
358 _HX = _HX_plus_dX.pop(0)
361 for i in range( len(_dX) ):
362 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
364 elif self.__mfEnabled:
367 for i in range( len(_dX) ):
368 _X_plus_dXi = numpy.array( _X, dtype=float )
369 _X_plus_dXi[i] = _X[i] + _dX[i]
371 _xserie.append( _X_plus_dXi )
373 _HX_plus_dX = self.DirectOperator( _xserie )
375 _HX = _HX_plus_dX.pop(0)
378 for i in range( len(_dX) ):
379 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
383 _HX = self.DirectOperator( _X )
384 for i in range( _dX.size ):
386 _X_plus_dXi = numpy.array( _X, dtype=float )
387 _X_plus_dXi[i] = _X[i] + _dXi
389 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
391 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
393 if (dotWith is not None) or (dotTWith is not None):
394 __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
397 if __Produit is None or self.__avoidRC:
398 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
400 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
401 while len(self.__listJPCP) > self.__lenghtRJ:
402 self.__listJPCP.pop(0)
403 self.__listJPCI.pop(0)
404 self.__listJPCR.pop(0)
405 self.__listJPPN.pop(0)
406 self.__listJPIN.pop(0)
407 self.__listJPCP.append( copy.copy(_X) )
408 self.__listJPCI.append( copy.copy(_dX) )
409 self.__listJPCR.append( copy.copy(_Jacobienne) )
410 self.__listJPPN.append( numpy.linalg.norm(_X) )
411 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
412 logging.debug("FDA Fin du calcul de la Jacobienne")
413 if __Produit is not None:
418 # ---------------------------------------------------------
419 def TangentOperator(self, paire, **extraArgs ):
421 Calcul du tangent à l'aide de la Jacobienne.
423 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
424 ne doivent pas être données ici à la fonction utilisateur.
427 assert len(paire) == 1, "Incorrect length of arguments"
429 assert len(_paire) == 2, "Incorrect number of arguments"
431 assert len(paire) == 2, "Incorrect number of arguments"
434 if dX is None or len(dX) == 0:
436 # Calcul de la forme matricielle si le second argument est None
437 # -------------------------------------------------------------
438 _Jacobienne = self.TangentMatrix( X )
439 if self.__mfEnabled: return [_Jacobienne,]
440 else: return _Jacobienne
443 # Calcul de la valeur linéarisée de H en X appliqué à dX
444 # ------------------------------------------------------
445 _HtX = self.TangentMatrix( X, dotWith = dX )
446 if self.__mfEnabled: return [_HtX,]
449 # ---------------------------------------------------------
450 def AdjointOperator(self, paire, **extraArgs ):
452 Calcul de l'adjoint à l'aide de la Jacobienne.
454 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
455 ne doivent pas être données ici à la fonction utilisateur.
458 assert len(paire) == 1, "Incorrect length of arguments"
460 assert len(_paire) == 2, "Incorrect number of arguments"
462 assert len(paire) == 2, "Incorrect number of arguments"
465 if Y is None or len(Y) == 0:
467 # Calcul de la forme matricielle si le second argument est None
468 # -------------------------------------------------------------
469 _JacobienneT = self.TangentMatrix( X ).T
470 if self.__mfEnabled: return [_JacobienneT,]
471 else: return _JacobienneT
474 # Calcul de la valeur de l'adjoint en X appliqué à Y
475 # --------------------------------------------------
476 _HaY = self.TangentMatrix( X, dotTWith = Y )
477 if self.__mfEnabled: return [_HaY,]
480 # ==============================================================================
481 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
482 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
484 _bgcenter = numpy.ravel(_bgcenter)[:,None]
486 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
488 if _bgcovariance is None:
489 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
491 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
492 _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
494 return _Perturbations
496 # ==============================================================================
497 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
498 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
499 def __CenteredRandomAnomalies(Zr, N):
501 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
502 notes manuscrites de MB et conforme au code de PS avec eps = -1
505 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
506 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
507 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
512 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
514 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
515 if _bgcovariance is None:
516 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
519 _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
520 _nbctl = _bgcenter.size
521 if _nbmembers > _nbctl:
522 _Z = numpy.concatenate((numpy.dot(
523 numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
524 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
526 _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
527 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
528 _Perturbations = _bgcenter + _Zca
530 if max(abs(_bgcovariance.flatten())) > 0:
531 _nbctl = _bgcenter.size
532 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
533 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
534 _Perturbations = _bgcenter + _Zca
536 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
538 return _Perturbations
540 # ==============================================================================
541 def EnsembleMean( __Ensemble ):
542 "Renvoie la moyenne empirique d'un ensemble"
543 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
545 # ==============================================================================
546 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
547 "Renvoie les anomalies centrées à partir d'un ensemble"
548 if __OptMean is None:
549 __Em = EnsembleMean( __Ensemble )
551 __Em = numpy.ravel( __OptMean ).reshape((-1,1))
553 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
555 # ==============================================================================
556 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
557 "Renvoie l'estimation empirique de la covariance d'ensemble"
559 # Covariance rapide mais rarement définie positive
560 __Covariance = numpy.cov( __Ensemble )
562 # Résultat souvent identique à numpy.cov, mais plus robuste
563 __n, __m = numpy.asarray( __Ensemble ).shape
564 __Anomalies = EnsembleOfAnomalies( __Ensemble )
565 # Estimation empirique
566 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
568 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
569 # Assure la positivité
570 __epsilon = mpr*numpy.trace( __Covariance )
571 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
575 # ==============================================================================
576 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
577 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
578 if hasattr(__Covariance,"assparsematrix"):
579 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
580 # Traitement d'une covariance nulle ou presque
582 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
583 # Traitement d'une covariance nulle ou presque
586 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
587 # Traitement d'une covariance nulle ou presque
589 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
590 # Traitement d'une covariance nulle ou presque
593 __n, __m = __Ensemble.shape
594 if __Seed is not None: numpy.random.seed(__Seed)
596 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
597 # Traitement d'une covariance multiple de l'identité
599 __std = numpy.sqrt(__Covariance.assparsematrix())
600 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
602 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
603 # Traitement d'une covariance diagonale avec variances non identiques
604 __zero = numpy.zeros(__n)
605 __std = numpy.sqrt(__Covariance.assparsematrix())
606 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
608 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
609 # Traitement d'une covariance pleine
610 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
612 elif isinstance(__Covariance, numpy.ndarray):
613 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
614 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
617 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
621 # ==============================================================================
622 def CovarianceInflation(
624 InflationType = None,
625 InflationFactor = None,
626 BackgroundCov = None,
629 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
631 Synthèse : Hunt 2007, section 2.3.5
633 if InflationFactor is None:
636 InflationFactor = float(InflationFactor)
638 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
639 if InflationFactor < 1.:
640 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
641 if InflationFactor < 1.+mpr:
643 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
645 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
646 if InflationFactor < 1.:
647 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
648 if InflationFactor < 1.+mpr:
650 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
651 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
652 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
654 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
655 if InflationFactor < 0.:
656 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
657 if InflationFactor < mpr:
659 __n, __m = numpy.asarray(InputCovOrEns).shape
661 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
662 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
664 elif InflationType == "HybridOnBackgroundCovariance":
665 if InflationFactor < 0.:
666 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
667 if InflationFactor < mpr:
669 __n, __m = numpy.asarray(InputCovOrEns).shape
671 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
672 if BackgroundCov is None:
673 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
674 if InputCovOrEns.shape != BackgroundCov.shape:
675 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
676 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
678 elif InflationType == "Relaxation":
679 raise NotImplementedError("InflationType Relaxation")
682 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
684 return OutputCovOrEns
686 # ==============================================================================
687 def HessienneEstimation(nb, HaM, HtM, BI, RI):
688 "Estimation de la Hessienne"
691 for i in range(int(nb)):
692 _ee = numpy.zeros((nb,1))
694 _HtEE = numpy.dot(HtM,_ee).reshape((-1,1))
695 HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
697 A = numpy.linalg.inv(numpy.array( HessienneI ))
699 if min(A.shape) != max(A.shape):
700 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
701 if (numpy.diag(A) < 0).any():
702 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
703 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
705 L = numpy.linalg.cholesky( A )
707 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
711 # ==============================================================================
712 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
713 "Estimation des quantiles a posteriori (selfA est modifié)"
714 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
716 # Traitement des bornes
717 if "StateBoundsForQuantiles" in selfA._parameters:
718 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
719 elif "Bounds" in selfA._parameters:
720 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
723 if LBounds is not None:
724 LBounds = ForceNumericBounds( LBounds )
725 _Xa = numpy.ravel(Xa)
727 # Échantillonnage des états
730 for i in range(nbsamples):
731 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
732 dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
733 if LBounds is not None: # "EstimateProjection" par défaut
734 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
735 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
737 Yr = HXa.reshape((-1,1)) + dYr
738 if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
739 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
740 Xr = numpy.random.multivariate_normal(_Xa,A)
741 if LBounds is not None: # "EstimateProjection" par défaut
742 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
743 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
746 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
749 YfQ = Yr.reshape((-1,1))
750 if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
752 YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
753 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
755 # Extraction des quantiles
758 for quantile in selfA._parameters["Quantiles"]:
759 if not (0. <= float(quantile) <= 1.): continue
760 indice = int(nbsamples * float(quantile) - 1./nbsamples)
761 if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
762 else: YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
763 if YQ is not None: # Liste non vide de quantiles
764 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
765 if selfA._toStore("SampledStateForQuantiles"):
766 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
770 # ==============================================================================
771 def ForceNumericBounds( __Bounds ):
772 "Force les bornes à être des valeurs numériques, sauf si globalement None"
773 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
774 if __Bounds is None: return None
775 # Converti toutes les bornes individuelles None à +/- l'infini
776 __Bounds = numpy.asarray( __Bounds, dtype=float )
777 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
778 raise ValueError("Incorrectly shaped bounds data")
779 __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
780 __Bounds[numpy.isnan(__Bounds[:,1]),1] = sys.float_info.max
783 # ==============================================================================
784 def RecentredBounds( __Bounds, __Center):
785 "Recentre les bornes autour de 0, sauf si globalement None"
786 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
787 if __Bounds is None: return None
788 # Recentre les valeurs numériques de bornes
789 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
791 # ==============================================================================
792 def ApplyBounds( __Vector, __Bounds, __newClip = True):
793 "Applique des bornes numériques à un point"
794 # Conserve une valeur par défaut s'il n'y a pas de bornes
795 if __Bounds is None: return __Vector
797 if not isinstance(__Vector, numpy.ndarray): # Is an array
798 raise ValueError("Incorrect array definition of vector data")
799 if not isinstance(__Bounds, numpy.ndarray): # Is an array
800 raise ValueError("Incorrect array definition of bounds data")
801 if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
802 raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
803 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
804 raise ValueError("Incorrectly shaped bounds data")
807 __Vector = __Vector.clip(
808 __Bounds[:,0].reshape(__Vector.shape),
809 __Bounds[:,1].reshape(__Vector.shape),
812 __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
813 __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
814 __Vector = numpy.asarray(__Vector)
818 # ==============================================================================
819 def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
820 "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
822 Xf = EnsembleMean( __EnXf )
823 Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
824 Pf = (1 - __Betaf) * __B + __Betaf * Pf
826 selfB = PartialAlgorithm("3DVAR")
827 selfB._parameters["Minimizer"] = "LBFGSB"
828 selfB._parameters["MaximumNumberOfSteps"] = 15000
829 selfB._parameters["CostDecrementTolerance"] = 1.e-7
830 selfB._parameters["ProjectedGradientTolerance"] = -1
831 selfB._parameters["GradientNormTolerance"] = 1.e-05
832 selfB._parameters["StoreInternalVariables"] = False
833 selfB._parameters["optiprint"] = -1
834 selfB._parameters["optdisp"] = 0
835 selfB._parameters["Bounds"] = None
836 selfB._parameters["InitializationPoint"] = Xf
837 std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
838 Xa = selfB.get("Analysis")[-1].reshape((-1,1))
841 return Xa + EnsembleOfAnomalies( __EnXn )
843 # ==============================================================================
844 def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
846 Constrained Unscented Kalman Filter
848 if selfA._parameters["EstimationOf"] == "Parameters":
849 selfA._parameters["StoreInternalVariables"] = True
850 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
853 Alpha = selfA._parameters["Alpha"]
854 Beta = selfA._parameters["Beta"]
855 if selfA._parameters["Kappa"] == 0:
856 if selfA._parameters["EstimationOf"] == "State":
858 elif selfA._parameters["EstimationOf"] == "Parameters":
861 Kappa = selfA._parameters["Kappa"]
862 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
863 Gamma = math.sqrt( L + Lambda )
868 Ww.append( 1. / (2.*(L + Lambda)) )
870 Wm = numpy.array( Ww )
871 Wm[0] = Lambda / (L + Lambda)
872 Wc = numpy.array( Ww )
873 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
876 Hm = HO["Direct"].appliedControledFormTo
878 if selfA._parameters["EstimationOf"] == "State":
879 Mm = EM["Direct"].appliedControledFormTo
881 if CM is not None and "Tangent" in CM and U is not None:
882 Cm = CM["Tangent"].asMatrix(Xb)
886 # Durée d'observation et tailles
887 if hasattr(Y,"stepnumber"):
888 duration = Y.stepnumber()
889 __p = numpy.cumprod(Y.shape())[-1]
892 __p = numpy.array(Y).size
894 # Précalcul des inversions de B et R
895 if selfA._parameters["StoreInternalVariables"] \
896 or selfA._toStore("CostFunctionJ") \
897 or selfA._toStore("CostFunctionJb") \
898 or selfA._toStore("CostFunctionJo") \
899 or selfA._toStore("CurrentOptimum") \
900 or selfA._toStore("APosterioriCovariance"):
906 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
908 if hasattr(B,"asfullmatrix"):
909 Pn = B.asfullmatrix(__n)
912 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
913 selfA.StoredVariables["Analysis"].store( Xb )
914 if selfA._toStore("APosterioriCovariance"):
915 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
916 elif selfA._parameters["nextStep"]:
917 Xn = selfA._getInternalState("Xn")
918 Pn = selfA._getInternalState("Pn")
920 if selfA._parameters["EstimationOf"] == "Parameters":
922 previousJMinimum = numpy.finfo(float).max
924 for step in range(duration-1):
925 if hasattr(Y,"store"):
926 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
928 Ynpu = numpy.ravel( Y ).reshape((__p,1))
931 if hasattr(U,"store") and len(U)>1:
932 Un = numpy.ravel( U[step] ).reshape((-1,1))
933 elif hasattr(U,"store") and len(U)==1:
934 Un = numpy.ravel( U[0] ).reshape((-1,1))
936 Un = numpy.ravel( U ).reshape((-1,1))
940 Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
941 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
944 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
945 for point in range(nbSpts):
946 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
949 for point in range(nbSpts):
950 if selfA._parameters["EstimationOf"] == "State":
951 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
952 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
953 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
954 XEtnnpi = XEtnnpi + Cm @ Un
955 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
956 XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
957 elif selfA._parameters["EstimationOf"] == "Parameters":
958 # --- > Par principe, M = Id, Q = 0
959 XEtnnpi = Xnp[:,point]
960 XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
961 XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
963 Xncm = ( XEtnnp * Wm ).sum(axis=1)
965 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
966 Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
968 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
969 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
970 for point in range(nbSpts):
971 Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
973 if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
974 Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
976 Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
978 Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
980 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
981 for point in range(nbSpts):
982 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
985 for point in range(nbSpts):
986 if selfA._parameters["EstimationOf"] == "State":
987 Ynnpi = Hm( (Xnnp[:,point], None) )
988 elif selfA._parameters["EstimationOf"] == "Parameters":
989 Ynnpi = Hm( (Xnnp[:,point], Un) )
990 Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
991 Ynnp = numpy.concatenate( Ynnp, axis=1 )
993 Yncm = ( Ynnp * Wm ).sum(axis=1)
997 for point in range(nbSpts):
998 Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
999 Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
1001 _Innovation = Ynpu - Yncm.reshape((-1,1))
1002 if selfA._parameters["EstimationOf"] == "Parameters":
1003 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1004 _Innovation = _Innovation - Cm @ Un
1007 Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
1008 Pn = Pnm - Kn * Pyyn * Kn.T
1010 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1011 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1014 #--------------------------
1015 selfA._setInternalState("Xn", Xn)
1016 selfA._setInternalState("Pn", Pn)
1017 #--------------------------
1019 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1020 # ---> avec analysis
1021 selfA.StoredVariables["Analysis"].store( Xa )
1022 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1023 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
1024 if selfA._toStore("InnovationAtCurrentAnalysis"):
1025 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1026 # ---> avec current state
1027 if selfA._parameters["StoreInternalVariables"] \
1028 or selfA._toStore("CurrentState"):
1029 selfA.StoredVariables["CurrentState"].store( Xn )
1030 if selfA._toStore("ForecastState"):
1031 selfA.StoredVariables["ForecastState"].store( Xncm )
1032 if selfA._toStore("ForecastCovariance"):
1033 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
1034 if selfA._toStore("BMA"):
1035 selfA.StoredVariables["BMA"].store( Xncm - Xa )
1036 if selfA._toStore("InnovationAtCurrentState"):
1037 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1038 if selfA._toStore("SimulatedObservationAtCurrentState") \
1039 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1040 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
1042 if selfA._parameters["StoreInternalVariables"] \
1043 or selfA._toStore("CostFunctionJ") \
1044 or selfA._toStore("CostFunctionJb") \
1045 or selfA._toStore("CostFunctionJo") \
1046 or selfA._toStore("CurrentOptimum") \
1047 or selfA._toStore("APosterioriCovariance"):
1048 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1049 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1051 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1052 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1053 selfA.StoredVariables["CostFunctionJ" ].store( J )
1055 if selfA._toStore("IndexOfOptimum") \
1056 or selfA._toStore("CurrentOptimum") \
1057 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1058 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1059 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1060 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1061 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1062 if selfA._toStore("IndexOfOptimum"):
1063 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1064 if selfA._toStore("CurrentOptimum"):
1065 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1066 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1067 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1068 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1069 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1070 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1071 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1072 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1073 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1074 if selfA._toStore("APosterioriCovariance"):
1075 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1076 if selfA._parameters["EstimationOf"] == "Parameters" \
1077 and J < previousJMinimum:
1078 previousJMinimum = J
1080 if selfA._toStore("APosterioriCovariance"):
1081 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1083 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1084 # ----------------------------------------------------------------------
1085 if selfA._parameters["EstimationOf"] == "Parameters":
1086 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1087 selfA.StoredVariables["Analysis"].store( XaMin )
1088 if selfA._toStore("APosterioriCovariance"):
1089 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1090 if selfA._toStore("BMA"):
1091 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1095 # ==============================================================================
1096 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1098 Contrained Extended Kalman Filter
1100 if selfA._parameters["EstimationOf"] == "Parameters":
1101 selfA._parameters["StoreInternalVariables"] = True
1102 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
1105 H = HO["Direct"].appliedControledFormTo
1107 if selfA._parameters["EstimationOf"] == "State":
1108 M = EM["Direct"].appliedControledFormTo
1110 if CM is not None and "Tangent" in CM and U is not None:
1111 Cm = CM["Tangent"].asMatrix(Xb)
1115 # Durée d'observation et tailles
1116 if hasattr(Y,"stepnumber"):
1117 duration = Y.stepnumber()
1118 __p = numpy.cumprod(Y.shape())[-1]
1121 __p = numpy.array(Y).size
1123 # Précalcul des inversions de B et R
1124 if selfA._parameters["StoreInternalVariables"] \
1125 or selfA._toStore("CostFunctionJ") \
1126 or selfA._toStore("CostFunctionJb") \
1127 or selfA._toStore("CostFunctionJo") \
1128 or selfA._toStore("CurrentOptimum") \
1129 or selfA._toStore("APosterioriCovariance"):
1135 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1138 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1139 selfA.StoredVariables["Analysis"].store( Xb )
1140 if selfA._toStore("APosterioriCovariance"):
1141 if hasattr(B,"asfullmatrix"):
1142 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1144 selfA.StoredVariables["APosterioriCovariance"].store( B )
1145 selfA._setInternalState("seed", numpy.random.get_state())
1146 elif selfA._parameters["nextStep"]:
1147 Xn = selfA._getInternalState("Xn")
1148 Pn = selfA._getInternalState("Pn")
1150 if selfA._parameters["EstimationOf"] == "Parameters":
1152 previousJMinimum = numpy.finfo(float).max
1154 for step in range(duration-1):
1155 if hasattr(Y,"store"):
1156 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1158 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1160 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1161 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1162 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1163 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1165 if selfA._parameters["EstimationOf"] == "State":
1166 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1167 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1168 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1169 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1172 if hasattr(U,"store") and len(U)>1:
1173 Un = numpy.ravel( U[step] ).reshape((-1,1))
1174 elif hasattr(U,"store") and len(U)==1:
1175 Un = numpy.ravel( U[0] ).reshape((-1,1))
1177 Un = numpy.ravel( U ).reshape((-1,1))
1181 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1182 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1184 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1185 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1186 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1187 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1188 Xn_predicted = Xn_predicted + Cm @ Un
1189 Pn_predicted = Q + Mt * (Pn * Ma)
1190 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1191 # --- > Par principe, M = Id, Q = 0
1195 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1196 Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
1198 if selfA._parameters["EstimationOf"] == "State":
1199 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1200 _Innovation = Ynpu - HX_predicted
1201 elif selfA._parameters["EstimationOf"] == "Parameters":
1202 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1203 _Innovation = Ynpu - HX_predicted
1204 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1205 _Innovation = _Innovation - Cm @ Un
1207 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1208 Xn = Xn_predicted + Kn * _Innovation
1209 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1211 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1212 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1215 #--------------------------
1216 selfA._setInternalState("Xn", Xn)
1217 selfA._setInternalState("Pn", Pn)
1218 #--------------------------
1220 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1221 # ---> avec analysis
1222 selfA.StoredVariables["Analysis"].store( Xa )
1223 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1224 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1225 if selfA._toStore("InnovationAtCurrentAnalysis"):
1226 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1227 # ---> avec current state
1228 if selfA._parameters["StoreInternalVariables"] \
1229 or selfA._toStore("CurrentState"):
1230 selfA.StoredVariables["CurrentState"].store( Xn )
1231 if selfA._toStore("ForecastState"):
1232 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1233 if selfA._toStore("ForecastCovariance"):
1234 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1235 if selfA._toStore("BMA"):
1236 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1237 if selfA._toStore("InnovationAtCurrentState"):
1238 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1239 if selfA._toStore("SimulatedObservationAtCurrentState") \
1240 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1241 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1243 if selfA._parameters["StoreInternalVariables"] \
1244 or selfA._toStore("CostFunctionJ") \
1245 or selfA._toStore("CostFunctionJb") \
1246 or selfA._toStore("CostFunctionJo") \
1247 or selfA._toStore("CurrentOptimum") \
1248 or selfA._toStore("APosterioriCovariance"):
1249 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1250 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1252 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1253 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1254 selfA.StoredVariables["CostFunctionJ" ].store( J )
1256 if selfA._toStore("IndexOfOptimum") \
1257 or selfA._toStore("CurrentOptimum") \
1258 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1259 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1260 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1261 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1262 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1263 if selfA._toStore("IndexOfOptimum"):
1264 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1265 if selfA._toStore("CurrentOptimum"):
1266 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1267 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1268 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1269 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1270 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1271 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1272 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1273 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1274 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1275 if selfA._toStore("APosterioriCovariance"):
1276 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1277 if selfA._parameters["EstimationOf"] == "Parameters" \
1278 and J < previousJMinimum:
1279 previousJMinimum = J
1281 if selfA._toStore("APosterioriCovariance"):
1282 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1284 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1285 # ----------------------------------------------------------------------
1286 if selfA._parameters["EstimationOf"] == "Parameters":
1287 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1288 selfA.StoredVariables["Analysis"].store( XaMin )
1289 if selfA._toStore("APosterioriCovariance"):
1290 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1291 if selfA._toStore("BMA"):
1292 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1296 # ==============================================================================
1297 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
1303 H = HO["Direct"].appliedControledFormTo
1305 if selfA._parameters["EstimationOf"] == "State":
1306 M = EM["Direct"].appliedControledFormTo
1308 if CM is not None and "Tangent" in CM and U is not None:
1309 Cm = CM["Tangent"].asMatrix(Xb)
1313 # Précalcul des inversions de B et R
1316 # Durée d'observation et tailles
1317 LagL = selfA._parameters["SmootherLagL"]
1318 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
1319 raise ValueError("Fixed-lag smoother requires a series of observation")
1320 if Y.stepnumber() < LagL:
1321 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
1322 duration = Y.stepnumber()
1323 __p = numpy.cumprod(Y.shape())[-1]
1325 __m = selfA._parameters["NumberOfMembers"]
1327 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1328 selfA.StoredVariables["Analysis"].store( Xb )
1329 if selfA._toStore("APosterioriCovariance"):
1330 if hasattr(B,"asfullmatrix"):
1331 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1333 selfA.StoredVariables["APosterioriCovariance"].store( B )
1335 # Calcul direct initial (on privilégie la mémorisation au recalcul)
1336 __seed = numpy.random.get_state()
1337 selfB = copy.deepcopy(selfA)
1338 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1339 if VariantM == "EnKS16-KalmanFilterFormula":
1340 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1342 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1344 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1346 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1347 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1349 for step in range(LagL,duration-1):
1351 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1354 if hasattr(Y,"store"):
1355 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1357 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1360 if hasattr(U,"store") and len(U)>1:
1361 Un = numpy.ravel( U[step] ).reshape((-1,1))
1362 elif hasattr(U,"store") and len(U)==1:
1363 Un = numpy.ravel( U[0] ).reshape((-1,1))
1365 Un = numpy.ravel( U ).reshape((-1,1))
1369 #--------------------------
1370 if VariantM == "EnKS16-KalmanFilterFormula":
1371 if selfA._parameters["EstimationOf"] == "State": # Forecast
1372 EL = M( [(EL[:,i], Un) for i in range(__m)],
1374 returnSerieAsArrayMatrix = True )
1375 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1376 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1378 returnSerieAsArrayMatrix = True )
1379 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1380 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1382 elif selfA._parameters["EstimationOf"] == "Parameters":
1383 # --- > Par principe, M = Id, Q = 0
1384 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1386 returnSerieAsArrayMatrix = True )
1388 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1389 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1391 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1392 mS = mS.reshape((-1,__m)) # Pour dimension 1
1393 delta = RIdemi @ ( Ynpu - vZm )
1394 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1395 vw = mT @ mS.T @ delta
1397 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1398 mU = numpy.identity(__m)
1399 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1401 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1405 for irl in range(LagL): # Lissage des L précédentes analysis
1406 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1407 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1408 sEL[irl] = vEm + EX @ wTU
1410 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1411 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1412 if selfA._toStore("APosterioriCovariance"):
1415 for irl in range(LagL):
1416 sEL[irl] = sEL[irl+1]
1418 #--------------------------
1420 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1422 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1423 # ---> avec analysis
1424 selfA.StoredVariables["Analysis"].store( Xa )
1425 if selfA._toStore("APosterioriCovariance"):
1426 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1428 # Stockage des dernières analyses incomplètement remises à jour
1429 for irl in range(LagL):
1430 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1431 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1432 selfA.StoredVariables["Analysis"].store( Xa )
1436 # ==============================================================================
1437 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
1438 VariantM="KalmanFilterFormula",
1442 Ensemble-Transform EnKF
1444 if selfA._parameters["EstimationOf"] == "Parameters":
1445 selfA._parameters["StoreInternalVariables"] = True
1448 H = HO["Direct"].appliedControledFormTo
1450 if selfA._parameters["EstimationOf"] == "State":
1451 M = EM["Direct"].appliedControledFormTo
1453 if CM is not None and "Tangent" in CM and U is not None:
1454 Cm = CM["Tangent"].asMatrix(Xb)
1458 # Durée d'observation et tailles
1459 if hasattr(Y,"stepnumber"):
1460 duration = Y.stepnumber()
1461 __p = numpy.cumprod(Y.shape())[-1]
1464 __p = numpy.array(Y).size
1466 # Précalcul des inversions de B et R
1467 if selfA._parameters["StoreInternalVariables"] \
1468 or selfA._toStore("CostFunctionJ") \
1469 or selfA._toStore("CostFunctionJb") \
1470 or selfA._toStore("CostFunctionJo") \
1471 or selfA._toStore("CurrentOptimum") \
1472 or selfA._toStore("APosterioriCovariance"):
1475 elif VariantM != "KalmanFilterFormula":
1477 if VariantM == "KalmanFilterFormula":
1481 __m = selfA._parameters["NumberOfMembers"]
1483 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1484 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1485 selfA.StoredVariables["Analysis"].store( Xb )
1486 if selfA._toStore("APosterioriCovariance"):
1487 if hasattr(B,"asfullmatrix"):
1488 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1490 selfA.StoredVariables["APosterioriCovariance"].store( B )
1491 selfA._setInternalState("seed", numpy.random.get_state())
1492 elif selfA._parameters["nextStep"]:
1493 Xn = selfA._getInternalState("Xn")
1495 previousJMinimum = numpy.finfo(float).max
1497 for step in range(duration-1):
1498 numpy.random.set_state(selfA._getInternalState("seed"))
1499 if hasattr(Y,"store"):
1500 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1502 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1505 if hasattr(U,"store") and len(U)>1:
1506 Un = numpy.ravel( U[step] ).reshape((-1,1))
1507 elif hasattr(U,"store") and len(U)==1:
1508 Un = numpy.ravel( U[0] ).reshape((-1,1))
1510 Un = numpy.ravel( U ).reshape((-1,1))
1514 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1515 Xn = CovarianceInflation( Xn,
1516 selfA._parameters["InflationType"],
1517 selfA._parameters["InflationFactor"],
1520 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1521 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1523 returnSerieAsArrayMatrix = True )
1524 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1525 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1527 returnSerieAsArrayMatrix = True )
1528 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1529 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1530 Xn_predicted = Xn_predicted + Cm @ Un
1531 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1532 # --- > Par principe, M = Id, Q = 0
1533 Xn_predicted = EMX = Xn
1534 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1536 returnSerieAsArrayMatrix = True )
1538 # Mean of forecast and observation of forecast
1539 Xfm = EnsembleMean( Xn_predicted )
1540 Hfm = EnsembleMean( HX_predicted )
1543 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
1544 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
1546 #--------------------------
1547 if VariantM == "KalmanFilterFormula":
1548 mS = RIdemi * EaHX / math.sqrt(__m-1)
1549 mS = mS.reshape((-1,__m)) # Pour dimension 1
1550 delta = RIdemi * ( Ynpu - Hfm )
1551 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1552 vw = mT @ mS.T @ delta
1554 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1555 mU = numpy.identity(__m)
1557 EaX = EaX / math.sqrt(__m-1)
1558 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1559 #--------------------------
1560 elif VariantM == "Variational":
1561 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1562 def CostFunction(w):
1563 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1564 _Jo = 0.5 * _A.T @ (RI * _A)
1565 _Jb = 0.5 * (__m-1) * w.T @ w
1568 def GradientOfCostFunction(w):
1569 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1570 _GardJo = - EaHX.T @ (RI * _A)
1571 _GradJb = (__m-1) * w.reshape((__m,1))
1572 _GradJ = _GardJo + _GradJb
1573 return numpy.ravel(_GradJ)
1574 vw = scipy.optimize.fmin_cg(
1576 x0 = numpy.zeros(__m),
1577 fprime = GradientOfCostFunction,
1582 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1583 Htb = (__m-1) * numpy.identity(__m)
1586 Pta = numpy.linalg.inv( Hta )
1587 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1589 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1590 #--------------------------
1591 elif VariantM == "FiniteSize11": # Jauge Boc2011
1592 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1593 def CostFunction(w):
1594 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1595 _Jo = 0.5 * _A.T @ (RI * _A)
1596 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1599 def GradientOfCostFunction(w):
1600 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1601 _GardJo = - EaHX.T @ (RI * _A)
1602 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1603 _GradJ = _GardJo + _GradJb
1604 return numpy.ravel(_GradJ)
1605 vw = scipy.optimize.fmin_cg(
1607 x0 = numpy.zeros(__m),
1608 fprime = GradientOfCostFunction,
1613 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1615 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1616 / (1 + 1/__m + vw.T @ vw)**2
1619 Pta = numpy.linalg.inv( Hta )
1620 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1622 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1623 #--------------------------
1624 elif VariantM == "FiniteSize15": # Jauge Boc2015
1625 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1626 def CostFunction(w):
1627 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1628 _Jo = 0.5 * _A.T * (RI * _A)
1629 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1632 def GradientOfCostFunction(w):
1633 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1634 _GardJo = - EaHX.T @ (RI * _A)
1635 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1636 _GradJ = _GardJo + _GradJb
1637 return numpy.ravel(_GradJ)
1638 vw = scipy.optimize.fmin_cg(
1640 x0 = numpy.zeros(__m),
1641 fprime = GradientOfCostFunction,
1646 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1648 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1649 / (1 + 1/__m + vw.T @ vw)**2
1652 Pta = numpy.linalg.inv( Hta )
1653 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1655 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1656 #--------------------------
1657 elif VariantM == "FiniteSize16": # Jauge Boc2016
1658 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1659 def CostFunction(w):
1660 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1661 _Jo = 0.5 * _A.T @ (RI * _A)
1662 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1665 def GradientOfCostFunction(w):
1666 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1667 _GardJo = - EaHX.T @ (RI * _A)
1668 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1669 _GradJ = _GardJo + _GradJb
1670 return numpy.ravel(_GradJ)
1671 vw = scipy.optimize.fmin_cg(
1673 x0 = numpy.zeros(__m),
1674 fprime = GradientOfCostFunction,
1679 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1680 Htb = ((__m+1) / (__m-1)) * \
1681 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1682 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1685 Pta = numpy.linalg.inv( Hta )
1686 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1688 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1689 #--------------------------
1691 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1693 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1694 Xn = CovarianceInflation( Xn,
1695 selfA._parameters["InflationType"],
1696 selfA._parameters["InflationFactor"],
1699 if Hybrid == "E3DVAR":
1700 betaf = selfA._parameters["HybridCovarianceEquilibrium"]
1701 Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
1703 Xa = EnsembleMean( Xn )
1704 #--------------------------
1705 selfA._setInternalState("Xn", Xn)
1706 selfA._setInternalState("seed", numpy.random.get_state())
1707 #--------------------------
1709 if selfA._parameters["StoreInternalVariables"] \
1710 or selfA._toStore("CostFunctionJ") \
1711 or selfA._toStore("CostFunctionJb") \
1712 or selfA._toStore("CostFunctionJo") \
1713 or selfA._toStore("APosterioriCovariance") \
1714 or selfA._toStore("InnovationAtCurrentAnalysis") \
1715 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1716 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1717 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
1718 _Innovation = Ynpu - _HXa
1720 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1721 # ---> avec analysis
1722 selfA.StoredVariables["Analysis"].store( Xa )
1723 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1724 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1725 if selfA._toStore("InnovationAtCurrentAnalysis"):
1726 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1727 # ---> avec current state
1728 if selfA._parameters["StoreInternalVariables"] \
1729 or selfA._toStore("CurrentState"):
1730 selfA.StoredVariables["CurrentState"].store( Xn )
1731 if selfA._toStore("ForecastState"):
1732 selfA.StoredVariables["ForecastState"].store( EMX )
1733 if selfA._toStore("ForecastCovariance"):
1734 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1735 if selfA._toStore("BMA"):
1736 selfA.StoredVariables["BMA"].store( EMX - Xa )
1737 if selfA._toStore("InnovationAtCurrentState"):
1738 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1739 if selfA._toStore("SimulatedObservationAtCurrentState") \
1740 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1741 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1743 if selfA._parameters["StoreInternalVariables"] \
1744 or selfA._toStore("CostFunctionJ") \
1745 or selfA._toStore("CostFunctionJb") \
1746 or selfA._toStore("CostFunctionJo") \
1747 or selfA._toStore("CurrentOptimum") \
1748 or selfA._toStore("APosterioriCovariance"):
1749 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1750 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1752 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1753 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1754 selfA.StoredVariables["CostFunctionJ" ].store( J )
1756 if selfA._toStore("IndexOfOptimum") \
1757 or selfA._toStore("CurrentOptimum") \
1758 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1759 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1760 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1761 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1762 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1763 if selfA._toStore("IndexOfOptimum"):
1764 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1765 if selfA._toStore("CurrentOptimum"):
1766 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1767 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1768 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1769 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1770 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1771 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1772 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1773 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1774 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1775 if selfA._toStore("APosterioriCovariance"):
1776 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1777 if selfA._parameters["EstimationOf"] == "Parameters" \
1778 and J < previousJMinimum:
1779 previousJMinimum = J
1781 if selfA._toStore("APosterioriCovariance"):
1782 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1783 # ---> Pour les smoothers
1784 if selfA._toStore("CurrentEnsembleState"):
1785 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1787 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1788 # ----------------------------------------------------------------------
1789 if selfA._parameters["EstimationOf"] == "Parameters":
1790 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1791 selfA.StoredVariables["Analysis"].store( XaMin )
1792 if selfA._toStore("APosterioriCovariance"):
1793 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1794 if selfA._toStore("BMA"):
1795 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1799 # ==============================================================================
1800 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1802 Extended Kalman Filter
1804 if selfA._parameters["EstimationOf"] == "Parameters":
1805 selfA._parameters["StoreInternalVariables"] = True
1808 H = HO["Direct"].appliedControledFormTo
1810 if selfA._parameters["EstimationOf"] == "State":
1811 M = EM["Direct"].appliedControledFormTo
1813 if CM is not None and "Tangent" in CM and U is not None:
1814 Cm = CM["Tangent"].asMatrix(Xb)
1818 # Durée d'observation et tailles
1819 if hasattr(Y,"stepnumber"):
1820 duration = Y.stepnumber()
1821 __p = numpy.cumprod(Y.shape())[-1]
1824 __p = numpy.array(Y).size
1826 # Précalcul des inversions de B et R
1827 if selfA._parameters["StoreInternalVariables"] \
1828 or selfA._toStore("CostFunctionJ") \
1829 or selfA._toStore("CostFunctionJb") \
1830 or selfA._toStore("CostFunctionJo") \
1831 or selfA._toStore("CurrentOptimum") \
1832 or selfA._toStore("APosterioriCovariance"):
1838 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1841 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1842 selfA.StoredVariables["Analysis"].store( Xb )
1843 if selfA._toStore("APosterioriCovariance"):
1844 if hasattr(B,"asfullmatrix"):
1845 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1847 selfA.StoredVariables["APosterioriCovariance"].store( B )
1848 selfA._setInternalState("seed", numpy.random.get_state())
1849 elif selfA._parameters["nextStep"]:
1850 Xn = selfA._getInternalState("Xn")
1851 Pn = selfA._getInternalState("Pn")
1853 if selfA._parameters["EstimationOf"] == "Parameters":
1855 previousJMinimum = numpy.finfo(float).max
1857 for step in range(duration-1):
1858 if hasattr(Y,"store"):
1859 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1861 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1863 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1864 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1865 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1866 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1868 if selfA._parameters["EstimationOf"] == "State":
1869 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1870 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1871 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1872 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1875 if hasattr(U,"store") and len(U)>1:
1876 Un = numpy.ravel( U[step] ).reshape((-1,1))
1877 elif hasattr(U,"store") and len(U)==1:
1878 Un = numpy.ravel( U[0] ).reshape((-1,1))
1880 Un = numpy.ravel( U ).reshape((-1,1))
1884 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1885 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1886 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1887 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1888 Xn_predicted = Xn_predicted + Cm @ Un
1889 Pn_predicted = Q + Mt * (Pn * Ma)
1890 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1891 # --- > Par principe, M = Id, Q = 0
1895 if selfA._parameters["EstimationOf"] == "State":
1896 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1897 _Innovation = Ynpu - HX_predicted
1898 elif selfA._parameters["EstimationOf"] == "Parameters":
1899 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1900 _Innovation = Ynpu - HX_predicted
1901 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1902 _Innovation = _Innovation - Cm @ Un
1904 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1905 Xn = Xn_predicted + Kn * _Innovation
1906 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1909 #--------------------------
1910 selfA._setInternalState("Xn", Xn)
1911 selfA._setInternalState("Pn", Pn)
1912 #--------------------------
1914 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1915 # ---> avec analysis
1916 selfA.StoredVariables["Analysis"].store( Xa )
1917 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1918 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1919 if selfA._toStore("InnovationAtCurrentAnalysis"):
1920 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1921 # ---> avec current state
1922 if selfA._parameters["StoreInternalVariables"] \
1923 or selfA._toStore("CurrentState"):
1924 selfA.StoredVariables["CurrentState"].store( Xn )
1925 if selfA._toStore("ForecastState"):
1926 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1927 if selfA._toStore("ForecastCovariance"):
1928 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1929 if selfA._toStore("BMA"):
1930 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1931 if selfA._toStore("InnovationAtCurrentState"):
1932 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1933 if selfA._toStore("SimulatedObservationAtCurrentState") \
1934 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1935 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1937 if selfA._parameters["StoreInternalVariables"] \
1938 or selfA._toStore("CostFunctionJ") \
1939 or selfA._toStore("CostFunctionJb") \
1940 or selfA._toStore("CostFunctionJo") \
1941 or selfA._toStore("CurrentOptimum") \
1942 or selfA._toStore("APosterioriCovariance"):
1943 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1944 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1946 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1947 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1948 selfA.StoredVariables["CostFunctionJ" ].store( J )
1950 if selfA._toStore("IndexOfOptimum") \
1951 or selfA._toStore("CurrentOptimum") \
1952 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1953 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1954 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1955 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1956 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1957 if selfA._toStore("IndexOfOptimum"):
1958 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1959 if selfA._toStore("CurrentOptimum"):
1960 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1961 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1962 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1963 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1964 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1965 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1966 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1967 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1968 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1969 if selfA._toStore("APosterioriCovariance"):
1970 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1971 if selfA._parameters["EstimationOf"] == "Parameters" \
1972 and J < previousJMinimum:
1973 previousJMinimum = J
1975 if selfA._toStore("APosterioriCovariance"):
1976 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1978 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1979 # ----------------------------------------------------------------------
1980 if selfA._parameters["EstimationOf"] == "Parameters":
1981 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1982 selfA.StoredVariables["Analysis"].store( XaMin )
1983 if selfA._toStore("APosterioriCovariance"):
1984 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1985 if selfA._toStore("BMA"):
1986 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1990 # ==============================================================================
1991 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1992 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1996 if selfA._parameters["EstimationOf"] == "Parameters":
1997 selfA._parameters["StoreInternalVariables"] = True
2000 H = HO["Direct"].appliedControledFormTo
2002 if selfA._parameters["EstimationOf"] == "State":
2003 M = EM["Direct"].appliedControledFormTo
2005 if CM is not None and "Tangent" in CM and U is not None:
2006 Cm = CM["Tangent"].asMatrix(Xb)
2010 # Durée d'observation et tailles
2011 if hasattr(Y,"stepnumber"):
2012 duration = Y.stepnumber()
2013 __p = numpy.cumprod(Y.shape())[-1]
2016 __p = numpy.array(Y).size
2018 # Précalcul des inversions de B et R
2019 if selfA._parameters["StoreInternalVariables"] \
2020 or selfA._toStore("CostFunctionJ") \
2021 or selfA._toStore("CostFunctionJb") \
2022 or selfA._toStore("CostFunctionJo") \
2023 or selfA._toStore("CurrentOptimum") \
2024 or selfA._toStore("APosterioriCovariance"):
2029 __m = selfA._parameters["NumberOfMembers"]
2031 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2032 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2034 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2035 selfA.StoredVariables["Analysis"].store( Xb )
2036 if selfA._toStore("APosterioriCovariance"):
2037 if hasattr(B,"asfullmatrix"):
2038 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2040 selfA.StoredVariables["APosterioriCovariance"].store( B )
2041 selfA._setInternalState("seed", numpy.random.get_state())
2042 elif selfA._parameters["nextStep"]:
2043 Xn = selfA._getInternalState("Xn")
2045 previousJMinimum = numpy.finfo(float).max
2047 for step in range(duration-1):
2048 numpy.random.set_state(selfA._getInternalState("seed"))
2049 if hasattr(Y,"store"):
2050 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2052 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2055 if hasattr(U,"store") and len(U)>1:
2056 Un = numpy.ravel( U[step] ).reshape((-1,1))
2057 elif hasattr(U,"store") and len(U)==1:
2058 Un = numpy.ravel( U[0] ).reshape((-1,1))
2060 Un = numpy.ravel( U ).reshape((-1,1))
2064 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2065 Xn = CovarianceInflation( Xn,
2066 selfA._parameters["InflationType"],
2067 selfA._parameters["InflationFactor"],
2070 #--------------------------
2071 if VariantM == "IEnKF12":
2072 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2073 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
2077 Ta = numpy.identity(__m)
2078 vw = numpy.zeros(__m)
2079 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2080 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2083 E1 = vx1 + _epsilon * EaX
2085 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2087 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2088 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2090 returnSerieAsArrayMatrix = True )
2091 elif selfA._parameters["EstimationOf"] == "Parameters":
2092 # --- > Par principe, M = Id
2094 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2095 vy1 = H((vx2, Un)).reshape((__p,1))
2097 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2099 returnSerieAsArrayMatrix = True )
2100 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2103 EaY = (HE2 - vy2) / _epsilon
2105 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2107 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2108 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2109 Deltaw = - numpy.linalg.solve(mH,GradJ)
2114 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2118 A2 = EnsembleOfAnomalies( E2 )
2121 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2122 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
2125 #--------------------------
2127 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2129 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2130 Xn = CovarianceInflation( Xn,
2131 selfA._parameters["InflationType"],
2132 selfA._parameters["InflationFactor"],
2135 Xa = EnsembleMean( Xn )
2136 #--------------------------
2137 selfA._setInternalState("Xn", Xn)
2138 selfA._setInternalState("seed", numpy.random.get_state())
2139 #--------------------------
2141 if selfA._parameters["StoreInternalVariables"] \
2142 or selfA._toStore("CostFunctionJ") \
2143 or selfA._toStore("CostFunctionJb") \
2144 or selfA._toStore("CostFunctionJo") \
2145 or selfA._toStore("APosterioriCovariance") \
2146 or selfA._toStore("InnovationAtCurrentAnalysis") \
2147 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2148 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2149 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2150 _Innovation = Ynpu - _HXa
2152 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2153 # ---> avec analysis
2154 selfA.StoredVariables["Analysis"].store( Xa )
2155 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2156 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2157 if selfA._toStore("InnovationAtCurrentAnalysis"):
2158 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2159 # ---> avec current state
2160 if selfA._parameters["StoreInternalVariables"] \
2161 or selfA._toStore("CurrentState"):
2162 selfA.StoredVariables["CurrentState"].store( Xn )
2163 if selfA._toStore("ForecastState"):
2164 selfA.StoredVariables["ForecastState"].store( E2 )
2165 if selfA._toStore("ForecastCovariance"):
2166 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
2167 if selfA._toStore("BMA"):
2168 selfA.StoredVariables["BMA"].store( E2 - Xa )
2169 if selfA._toStore("InnovationAtCurrentState"):
2170 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2171 if selfA._toStore("SimulatedObservationAtCurrentState") \
2172 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2173 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2175 if selfA._parameters["StoreInternalVariables"] \
2176 or selfA._toStore("CostFunctionJ") \
2177 or selfA._toStore("CostFunctionJb") \
2178 or selfA._toStore("CostFunctionJo") \
2179 or selfA._toStore("CurrentOptimum") \
2180 or selfA._toStore("APosterioriCovariance"):
2181 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2182 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2184 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2185 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2186 selfA.StoredVariables["CostFunctionJ" ].store( J )
2188 if selfA._toStore("IndexOfOptimum") \
2189 or selfA._toStore("CurrentOptimum") \
2190 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2191 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2192 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2193 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2194 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2195 if selfA._toStore("IndexOfOptimum"):
2196 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2197 if selfA._toStore("CurrentOptimum"):
2198 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2199 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2200 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2201 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2202 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2203 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2204 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2205 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2206 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2207 if selfA._toStore("APosterioriCovariance"):
2208 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2209 if selfA._parameters["EstimationOf"] == "Parameters" \
2210 and J < previousJMinimum:
2211 previousJMinimum = J
2213 if selfA._toStore("APosterioriCovariance"):
2214 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2215 # ---> Pour les smoothers
2216 if selfA._toStore("CurrentEnsembleState"):
2217 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2219 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2220 # ----------------------------------------------------------------------
2221 if selfA._parameters["EstimationOf"] == "Parameters":
2222 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2223 selfA.StoredVariables["Analysis"].store( XaMin )
2224 if selfA._toStore("APosterioriCovariance"):
2225 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2226 if selfA._toStore("BMA"):
2227 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2231 # ==============================================================================
2232 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2239 Hm = HO["Direct"].appliedTo
2244 Xini = selfA._parameters["InitializationPoint"]
2246 HXb = numpy.ravel( Hm( Xb ) ).reshape((-1,1))
2247 Innovation = Y - HXb
2254 Xr = Xini.reshape((-1,1))
2255 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
2259 Ht = HO["Tangent"].asMatrix(Xr)
2260 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
2262 # Définition de la fonction-coût
2263 # ------------------------------
2264 def CostFunction(dx):
2265 _dX = numpy.ravel( dx ).reshape((-1,1))
2266 if selfA._parameters["StoreInternalVariables"] or \
2267 selfA._toStore("CurrentState") or \
2268 selfA._toStore("CurrentOptimum"):
2269 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
2271 _HdX = numpy.ravel( _HdX ).reshape((-1,1))
2272 _dInnovation = Innovation - _HdX
2273 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2274 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2275 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
2276 if selfA._toStore("InnovationAtCurrentState"):
2277 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
2279 Jb = float( 0.5 * _dX.T * (BI * _dX) )
2280 Jo = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
2283 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2284 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2285 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2286 selfA.StoredVariables["CostFunctionJ" ].store( J )
2287 if selfA._toStore("IndexOfOptimum") or \
2288 selfA._toStore("CurrentOptimum") or \
2289 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2290 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2291 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2292 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2293 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2294 if selfA._toStore("IndexOfOptimum"):
2295 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2296 if selfA._toStore("CurrentOptimum"):
2297 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2298 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2299 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2300 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2301 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2302 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2303 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2304 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2305 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2308 def GradientOfCostFunction(dx):
2309 _dX = numpy.ravel( dx )
2311 _HdX = numpy.ravel( _HdX ).reshape((-1,1))
2312 _dInnovation = Innovation - _HdX
2314 GradJo = - Ht.T @ (RI * _dInnovation)
2315 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2318 # Minimisation de la fonctionnelle
2319 # --------------------------------
2320 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2322 if selfA._parameters["Minimizer"] == "LBFGSB":
2323 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
2324 if "0.19" <= scipy.version.version <= "1.1.0":
2325 import lbfgsbhlt as optimiseur
2327 import scipy.optimize as optimiseur
2328 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2329 func = CostFunction,
2330 x0 = numpy.zeros(Xini.size),
2331 fprime = GradientOfCostFunction,
2333 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2334 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2335 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2336 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2337 iprint = selfA._parameters["optiprint"],
2339 nfeval = Informations['funcalls']
2340 rc = Informations['warnflag']
2341 elif selfA._parameters["Minimizer"] == "TNC":
2342 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2343 func = CostFunction,
2344 x0 = numpy.zeros(Xini.size),
2345 fprime = GradientOfCostFunction,
2347 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2348 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2349 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2350 ftol = selfA._parameters["CostDecrementTolerance"],
2351 messages = selfA._parameters["optmessages"],
2353 elif selfA._parameters["Minimizer"] == "CG":
2354 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2356 x0 = numpy.zeros(Xini.size),
2357 fprime = GradientOfCostFunction,
2359 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2360 gtol = selfA._parameters["GradientNormTolerance"],
2361 disp = selfA._parameters["optdisp"],
2364 elif selfA._parameters["Minimizer"] == "NCG":
2365 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2367 x0 = numpy.zeros(Xini.size),
2368 fprime = GradientOfCostFunction,
2370 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2371 avextol = selfA._parameters["CostDecrementTolerance"],
2372 disp = selfA._parameters["optdisp"],
2375 elif selfA._parameters["Minimizer"] == "BFGS":
2376 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2378 x0 = numpy.zeros(Xini.size),
2379 fprime = GradientOfCostFunction,
2381 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2382 gtol = selfA._parameters["GradientNormTolerance"],
2383 disp = selfA._parameters["optdisp"],
2387 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2389 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2390 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2392 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2393 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2395 Minimum = Xb + Minimum.reshape((-1,1))
2398 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2399 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2402 #--------------------------
2404 selfA.StoredVariables["Analysis"].store( Xa )
2406 if selfA._toStore("OMA") or \
2407 selfA._toStore("SigmaObs2") or \
2408 selfA._toStore("SimulationQuantiles") or \
2409 selfA._toStore("SimulatedObservationAtOptimum"):
2410 if selfA._toStore("SimulatedObservationAtCurrentState"):
2411 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2412 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2413 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2417 if selfA._toStore("APosterioriCovariance") or \
2418 selfA._toStore("SimulationQuantiles") or \
2419 selfA._toStore("JacobianMatrixAtOptimum") or \
2420 selfA._toStore("KalmanGainAtOptimum"):
2421 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2422 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2423 if selfA._toStore("APosterioriCovariance") or \
2424 selfA._toStore("SimulationQuantiles") or \
2425 selfA._toStore("KalmanGainAtOptimum"):
2426 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2427 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2428 if selfA._toStore("APosterioriCovariance") or \
2429 selfA._toStore("SimulationQuantiles"):
2430 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
2431 if selfA._toStore("APosterioriCovariance"):
2432 selfA.StoredVariables["APosterioriCovariance"].store( A )
2433 if selfA._toStore("JacobianMatrixAtOptimum"):
2434 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2435 if selfA._toStore("KalmanGainAtOptimum"):
2436 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2437 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2438 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2440 # Calculs et/ou stockages supplémentaires
2441 # ---------------------------------------
2442 if selfA._toStore("Innovation") or \
2443 selfA._toStore("SigmaObs2") or \
2444 selfA._toStore("MahalanobisConsistency") or \
2445 selfA._toStore("OMB"):
2447 if selfA._toStore("Innovation"):
2448 selfA.StoredVariables["Innovation"].store( d )
2449 if selfA._toStore("BMA"):
2450 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2451 if selfA._toStore("OMA"):
2452 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2453 if selfA._toStore("OMB"):
2454 selfA.StoredVariables["OMB"].store( d )
2455 if selfA._toStore("SigmaObs2"):
2456 TraceR = R.trace(Y.size)
2457 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
2458 if selfA._toStore("MahalanobisConsistency"):
2459 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2460 if selfA._toStore("SimulationQuantiles"):
2461 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2462 if selfA._toStore("SimulatedObservationAtBackground"):
2463 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
2464 if selfA._toStore("SimulatedObservationAtOptimum"):
2465 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
2469 # ==============================================================================
2470 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
2471 VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
2475 Maximum Likelihood Ensemble Filter
2477 if selfA._parameters["EstimationOf"] == "Parameters":
2478 selfA._parameters["StoreInternalVariables"] = True
2481 H = HO["Direct"].appliedControledFormTo
2483 if selfA._parameters["EstimationOf"] == "State":
2484 M = EM["Direct"].appliedControledFormTo
2486 if CM is not None and "Tangent" in CM and U is not None:
2487 Cm = CM["Tangent"].asMatrix(Xb)
2491 # Durée d'observation et tailles
2492 if hasattr(Y,"stepnumber"):
2493 duration = Y.stepnumber()
2494 __p = numpy.cumprod(Y.shape())[-1]
2497 __p = numpy.array(Y).size
2499 # Précalcul des inversions de B et R
2500 if selfA._parameters["StoreInternalVariables"] \
2501 or selfA._toStore("CostFunctionJ") \
2502 or selfA._toStore("CostFunctionJb") \
2503 or selfA._toStore("CostFunctionJo") \
2504 or selfA._toStore("CurrentOptimum") \
2505 or selfA._toStore("APosterioriCovariance"):
2510 __m = selfA._parameters["NumberOfMembers"]
2512 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2513 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2514 selfA.StoredVariables["Analysis"].store( Xb )
2515 if selfA._toStore("APosterioriCovariance"):
2516 if hasattr(B,"asfullmatrix"):
2517 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2519 selfA.StoredVariables["APosterioriCovariance"].store( B )
2520 selfA._setInternalState("seed", numpy.random.get_state())
2521 elif selfA._parameters["nextStep"]:
2522 Xn = selfA._getInternalState("Xn")
2524 previousJMinimum = numpy.finfo(float).max
2526 for step in range(duration-1):
2527 numpy.random.set_state(selfA._getInternalState("seed"))
2528 if hasattr(Y,"store"):
2529 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2531 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2534 if hasattr(U,"store") and len(U)>1:
2535 Un = numpy.ravel( U[step] ).reshape((-1,1))
2536 elif hasattr(U,"store") and len(U)==1:
2537 Un = numpy.ravel( U[0] ).reshape((-1,1))
2539 Un = numpy.ravel( U ).reshape((-1,1))
2543 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2544 Xn = CovarianceInflation( Xn,
2545 selfA._parameters["InflationType"],
2546 selfA._parameters["InflationFactor"],
2549 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2550 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2552 returnSerieAsArrayMatrix = True )
2553 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2554 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2555 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2556 Xn_predicted = Xn_predicted + Cm @ Un
2557 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2558 # --- > Par principe, M = Id, Q = 0
2559 Xn_predicted = EMX = Xn
2561 #--------------------------
2562 if VariantM == "MLEF13":
2563 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2564 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2565 Ua = numpy.identity(__m)
2569 Ta = numpy.identity(__m)
2570 vw = numpy.zeros(__m)
2571 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2572 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2575 E1 = vx1 + _epsilon * EaX
2577 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2579 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2581 returnSerieAsArrayMatrix = True )
2582 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2585 EaY = (HE2 - vy2) / _epsilon
2587 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2589 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2590 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2591 Deltaw = - numpy.linalg.solve(mH,GradJ)
2596 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2601 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2603 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2604 #--------------------------
2606 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2608 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2609 Xn = CovarianceInflation( Xn,
2610 selfA._parameters["InflationType"],
2611 selfA._parameters["InflationFactor"],
2614 if Hybrid == "E3DVAR":
2615 betaf = selfA._parameters["HybridCovarianceEquilibrium"]
2616 Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
2618 Xa = EnsembleMean( Xn )
2619 #--------------------------
2620 selfA._setInternalState("Xn", Xn)
2621 selfA._setInternalState("seed", numpy.random.get_state())
2622 #--------------------------
2624 if selfA._parameters["StoreInternalVariables"] \
2625 or selfA._toStore("CostFunctionJ") \
2626 or selfA._toStore("CostFunctionJb") \
2627 or selfA._toStore("CostFunctionJo") \
2628 or selfA._toStore("APosterioriCovariance") \
2629 or selfA._toStore("InnovationAtCurrentAnalysis") \
2630 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2631 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2632 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2633 _Innovation = Ynpu - _HXa
2635 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2636 # ---> avec analysis
2637 selfA.StoredVariables["Analysis"].store( Xa )
2638 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2639 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2640 if selfA._toStore("InnovationAtCurrentAnalysis"):
2641 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2642 # ---> avec current state
2643 if selfA._parameters["StoreInternalVariables"] \
2644 or selfA._toStore("CurrentState"):
2645 selfA.StoredVariables["CurrentState"].store( Xn )
2646 if selfA._toStore("ForecastState"):
2647 selfA.StoredVariables["ForecastState"].store( EMX )
2648 if selfA._toStore("ForecastCovariance"):
2649 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2650 if selfA._toStore("BMA"):
2651 selfA.StoredVariables["BMA"].store( EMX - Xa )
2652 if selfA._toStore("InnovationAtCurrentState"):
2653 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2654 if selfA._toStore("SimulatedObservationAtCurrentState") \
2655 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2656 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2658 if selfA._parameters["StoreInternalVariables"] \
2659 or selfA._toStore("CostFunctionJ") \
2660 or selfA._toStore("CostFunctionJb") \
2661 or selfA._toStore("CostFunctionJo") \
2662 or selfA._toStore("CurrentOptimum") \
2663 or selfA._toStore("APosterioriCovariance"):
2664 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2665 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2667 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2668 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2669 selfA.StoredVariables["CostFunctionJ" ].store( J )
2671 if selfA._toStore("IndexOfOptimum") \
2672 or selfA._toStore("CurrentOptimum") \
2673 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2674 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2675 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2676 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2677 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2678 if selfA._toStore("IndexOfOptimum"):
2679 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2680 if selfA._toStore("CurrentOptimum"):
2681 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2682 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2683 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2684 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2685 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2686 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2687 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2688 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2689 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2690 if selfA._toStore("APosterioriCovariance"):
2691 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2692 if selfA._parameters["EstimationOf"] == "Parameters" \
2693 and J < previousJMinimum:
2694 previousJMinimum = J
2696 if selfA._toStore("APosterioriCovariance"):
2697 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2698 # ---> Pour les smoothers
2699 if selfA._toStore("CurrentEnsembleState"):
2700 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2702 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2703 # ----------------------------------------------------------------------
2704 if selfA._parameters["EstimationOf"] == "Parameters":
2705 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2706 selfA.StoredVariables["Analysis"].store( XaMin )
2707 if selfA._toStore("APosterioriCovariance"):
2708 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2709 if selfA._toStore("BMA"):
2710 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2714 # ==============================================================================
2726 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2727 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2728 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2731 # Recuperation des donnees et informations initiales
2732 # --------------------------------------------------
2733 variables = numpy.ravel( x0 )
2734 mesures = numpy.ravel( y )
2735 increment = sys.float_info[0]
2738 quantile = float(quantile)
2740 # Calcul des parametres du MM
2741 # ---------------------------
2742 tn = float(toler) / n
2743 e0 = -tn / math.log(tn)
2744 epsilon = (e0-tn)/(1+math.log(e0))
2746 # Calculs d'initialisation
2747 # ------------------------
2748 residus = mesures - numpy.ravel( func( variables ) )
2749 poids = 1./(epsilon+numpy.abs(residus))
2750 veps = 1. - 2. * quantile - residus * poids
2751 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2754 # Recherche iterative
2755 # -------------------
2756 while (increment > toler) and (iteration < maxfun) :
2759 Derivees = numpy.array(fprime(variables))
2760 Derivees = Derivees.reshape(n,p) # ADAO & check shape
2761 DeriveesT = Derivees.transpose()
2762 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2763 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
2764 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2766 variables = variables + step
2767 if bounds is not None:
2768 # Attention : boucle infinie à éviter si un intervalle est trop petit
2769 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2771 variables = variables - step
2772 residus = mesures - numpy.ravel( func(variables) )
2773 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2775 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2777 variables = variables - step
2778 residus = mesures - numpy.ravel( func(variables) )
2779 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2781 increment = lastsurrogate-surrogate
2782 poids = 1./(epsilon+numpy.abs(residus))
2783 veps = 1. - 2. * quantile - residus * poids
2784 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2788 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2790 return variables, Ecart, [n,p,iteration,increment,0]
2792 # ==============================================================================
2793 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2795 3DVAR multi-pas et multi-méthodes
2800 if selfA._parameters["EstimationOf"] == "State":
2801 M = EM["Direct"].appliedControledFormTo
2802 if CM is not None and "Tangent" in CM and U is not None:
2803 Cm = CM["Tangent"].asMatrix(Xb)
2807 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2808 Xn = numpy.ravel(Xb).reshape((-1,1))
2809 selfA.StoredVariables["Analysis"].store( Xn )
2810 if selfA._toStore("APosterioriCovariance"):
2811 if hasattr(B,"asfullmatrix"):
2812 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2814 selfA.StoredVariables["APosterioriCovariance"].store( B )
2815 if selfA._toStore("ForecastState"):
2816 selfA.StoredVariables["ForecastState"].store( Xn )
2817 elif selfA._parameters["nextStep"]:
2818 Xn = selfA._getInternalState("Xn")
2820 Xn = numpy.ravel(Xb).reshape((-1,1))
2822 if hasattr(Y,"stepnumber"):
2823 duration = Y.stepnumber()
2828 for step in range(duration-1):
2829 if hasattr(Y,"store"):
2830 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2832 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2835 if hasattr(U,"store") and len(U)>1:
2836 Un = numpy.ravel( U[step] ).reshape((-1,1))
2837 elif hasattr(U,"store") and len(U)==1:
2838 Un = numpy.ravel( U[0] ).reshape((-1,1))
2840 Un = numpy.ravel( U ).reshape((-1,1))
2844 if selfA._parameters["EstimationOf"] == "State": # Forecast
2845 Xn_predicted = M( (Xn, Un) )
2846 if selfA._toStore("ForecastState"):
2847 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2848 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2849 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2850 Xn_predicted = Xn_predicted + Cm @ Un
2851 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2852 # --- > Par principe, M = Id, Q = 0
2854 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2856 oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
2858 Xn = selfA.StoredVariables["Analysis"][-1]
2859 #--------------------------
2860 selfA._setInternalState("Xn", Xn)
2864 # ==============================================================================
2865 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2872 Hm = HO["Direct"].appliedTo
2874 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2875 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2878 HXb = numpy.ravel( HXb ).reshape((-1,1))
2879 if Y.size != HXb.size:
2880 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2881 if max(Y.shape) != max(HXb.shape):
2882 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2884 if selfA._toStore("JacobianMatrixAtBackground"):
2885 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2886 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2887 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2889 Ht = HO["Tangent"].asMatrix(Xb)
2891 HBHTpR = R + Ht * BHT
2892 Innovation = Y - HXb
2894 Xini = numpy.zeros(Xb.shape)
2896 # Définition de la fonction-coût
2897 # ------------------------------
2898 def CostFunction(w):
2899 _W = w.reshape((-1,1))
2900 if selfA._parameters["StoreInternalVariables"] or \
2901 selfA._toStore("CurrentState") or \
2902 selfA._toStore("CurrentOptimum"):
2903 selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
2904 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2905 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2906 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
2907 if selfA._toStore("InnovationAtCurrentState"):
2908 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2910 Jb = float( 0.5 * _W.T @ (HBHTpR @ _W) )
2911 Jo = float( - _W.T @ Innovation )
2914 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2915 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2916 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2917 selfA.StoredVariables["CostFunctionJ" ].store( J )
2918 if selfA._toStore("IndexOfOptimum") or \
2919 selfA._toStore("CurrentOptimum") or \
2920 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2921 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2922 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2923 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2924 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2925 if selfA._toStore("IndexOfOptimum"):
2926 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2927 if selfA._toStore("CurrentOptimum"):
2928 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2929 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2930 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2931 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2932 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2933 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2934 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2935 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2936 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2939 def GradientOfCostFunction(w):
2940 _W = w.reshape((-1,1))
2941 GradJb = HBHTpR @ _W
2942 GradJo = - Innovation
2943 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2946 # Minimisation de la fonctionnelle
2947 # --------------------------------
2948 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2950 if selfA._parameters["Minimizer"] == "LBFGSB":
2951 if "0.19" <= scipy.version.version <= "1.1.0":
2952 import lbfgsbhlt as optimiseur
2954 import scipy.optimize as optimiseur
2955 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2956 func = CostFunction,
2958 fprime = GradientOfCostFunction,
2960 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2961 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2962 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2963 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2964 iprint = selfA._parameters["optiprint"],
2966 nfeval = Informations['funcalls']
2967 rc = Informations['warnflag']
2968 elif selfA._parameters["Minimizer"] == "TNC":
2969 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2970 func = CostFunction,
2972 fprime = GradientOfCostFunction,
2974 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2975 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2976 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2977 ftol = selfA._parameters["CostDecrementTolerance"],
2978 messages = selfA._parameters["optmessages"],
2980 elif selfA._parameters["Minimizer"] == "CG":
2981 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2984 fprime = GradientOfCostFunction,
2986 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2987 gtol = selfA._parameters["GradientNormTolerance"],
2988 disp = selfA._parameters["optdisp"],
2991 elif selfA._parameters["Minimizer"] == "NCG":
2992 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2995 fprime = GradientOfCostFunction,
2997 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2998 avextol = selfA._parameters["CostDecrementTolerance"],
2999 disp = selfA._parameters["optdisp"],
3002 elif selfA._parameters["Minimizer"] == "BFGS":
3003 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3006 fprime = GradientOfCostFunction,
3008 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3009 gtol = selfA._parameters["GradientNormTolerance"],
3010 disp = selfA._parameters["optdisp"],
3014 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3016 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3017 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3019 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3020 # ----------------------------------------------------------------
3021 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3022 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3024 Minimum = Xb + BHT @ Minimum.reshape((-1,1))
3027 #--------------------------
3029 selfA.StoredVariables["Analysis"].store( Xa )
3031 if selfA._toStore("OMA") or \
3032 selfA._toStore("SigmaObs2") or \
3033 selfA._toStore("SimulationQuantiles") or \
3034 selfA._toStore("SimulatedObservationAtOptimum"):
3035 if selfA._toStore("SimulatedObservationAtCurrentState"):
3036 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3037 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3038 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3042 if selfA._toStore("APosterioriCovariance") or \
3043 selfA._toStore("SimulationQuantiles") or \
3044 selfA._toStore("JacobianMatrixAtOptimum") or \
3045 selfA._toStore("KalmanGainAtOptimum"):
3046 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3047 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3048 if selfA._toStore("APosterioriCovariance") or \
3049 selfA._toStore("SimulationQuantiles") or \
3050 selfA._toStore("KalmanGainAtOptimum"):
3051 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3052 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3053 if selfA._toStore("APosterioriCovariance") or \
3054 selfA._toStore("SimulationQuantiles"):
3057 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3058 if selfA._toStore("APosterioriCovariance"):
3059 selfA.StoredVariables["APosterioriCovariance"].store( A )
3060 if selfA._toStore("JacobianMatrixAtOptimum"):
3061 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3062 if selfA._toStore("KalmanGainAtOptimum"):
3063 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3064 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3065 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3067 # Calculs et/ou stockages supplémentaires
3068 # ---------------------------------------
3069 if selfA._toStore("Innovation") or \
3070 selfA._toStore("SigmaObs2") or \
3071 selfA._toStore("MahalanobisConsistency") or \
3072 selfA._toStore("OMB"):
3074 if selfA._toStore("Innovation"):
3075 selfA.StoredVariables["Innovation"].store( d )
3076 if selfA._toStore("BMA"):
3077 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3078 if selfA._toStore("OMA"):
3079 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3080 if selfA._toStore("OMB"):
3081 selfA.StoredVariables["OMB"].store( d )
3082 if selfA._toStore("SigmaObs2"):
3083 TraceR = R.trace(Y.size)
3084 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3085 if selfA._toStore("MahalanobisConsistency"):
3086 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3087 if selfA._toStore("SimulationQuantiles"):
3088 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3089 if selfA._toStore("SimulatedObservationAtBackground"):
3090 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3091 if selfA._toStore("SimulatedObservationAtOptimum"):
3092 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3096 # ==============================================================================
3097 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
3098 VariantM="KalmanFilterFormula16",
3104 if selfA._parameters["EstimationOf"] == "Parameters":
3105 selfA._parameters["StoreInternalVariables"] = True
3108 H = HO["Direct"].appliedControledFormTo
3110 if selfA._parameters["EstimationOf"] == "State":
3111 M = EM["Direct"].appliedControledFormTo
3113 if CM is not None and "Tangent" in CM and U is not None:
3114 Cm = CM["Tangent"].asMatrix(Xb)
3118 # Durée d'observation et tailles
3119 if hasattr(Y,"stepnumber"):
3120 duration = Y.stepnumber()
3121 __p = numpy.cumprod(Y.shape())[-1]
3124 __p = numpy.array(Y).size
3126 # Précalcul des inversions de B et R
3127 if selfA._parameters["StoreInternalVariables"] \
3128 or selfA._toStore("CostFunctionJ") \
3129 or selfA._toStore("CostFunctionJb") \
3130 or selfA._toStore("CostFunctionJo") \
3131 or selfA._toStore("CurrentOptimum") \
3132 or selfA._toStore("APosterioriCovariance"):
3137 __m = selfA._parameters["NumberOfMembers"]
3139 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
3142 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3143 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
3145 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
3146 selfA.StoredVariables["Analysis"].store( Xb )
3147 if selfA._toStore("APosterioriCovariance"):
3148 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3149 selfA._setInternalState("seed", numpy.random.get_state())
3150 elif selfA._parameters["nextStep"]:
3151 Xn = selfA._getInternalState("Xn")
3153 previousJMinimum = numpy.finfo(float).max
3155 for step in range(duration-1):
3156 numpy.random.set_state(selfA._getInternalState("seed"))
3157 if hasattr(Y,"store"):
3158 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3160 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3163 if hasattr(U,"store") and len(U)>1:
3164 Un = numpy.ravel( U[step] ).reshape((-1,1))
3165 elif hasattr(U,"store") and len(U)==1:
3166 Un = numpy.ravel( U[0] ).reshape((-1,1))
3168 Un = numpy.ravel( U ).reshape((-1,1))
3172 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
3173 Xn = CovarianceInflation( Xn,
3174 selfA._parameters["InflationType"],
3175 selfA._parameters["InflationFactor"],
3178 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3179 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
3181 returnSerieAsArrayMatrix = True )
3182 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
3183 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3185 returnSerieAsArrayMatrix = True )
3186 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3187 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3188 Xn_predicted = Xn_predicted + Cm @ Un
3189 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3190 # --- > Par principe, M = Id, Q = 0
3191 Xn_predicted = EMX = Xn
3192 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3194 returnSerieAsArrayMatrix = True )
3196 # Mean of forecast and observation of forecast
3197 Xfm = EnsembleMean( Xn_predicted )
3198 Hfm = EnsembleMean( HX_predicted )
3200 #--------------------------
3201 if VariantM == "KalmanFilterFormula05":
3202 PfHT, HPfHT = 0., 0.
3203 for i in range(__m):
3204 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
3205 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
3206 PfHT += Exfi * Eyfi.T
3207 HPfHT += Eyfi * Eyfi.T
3208 PfHT = (1./(__m-1)) * PfHT
3209 HPfHT = (1./(__m-1)) * HPfHT
3210 Kn = PfHT * ( R + HPfHT ).I
3213 for i in range(__m):
3214 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
3215 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
3216 #--------------------------
3217 elif VariantM == "KalmanFilterFormula16":
3218 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
3219 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3221 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
3222 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
3224 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
3226 for i in range(__m):
3227 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
3228 #--------------------------
3230 raise ValueError("VariantM has to be chosen in the authorized methods list.")
3232 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3233 Xn = CovarianceInflation( Xn,
3234 selfA._parameters["InflationType"],
3235 selfA._parameters["InflationFactor"],
3238 if Hybrid == "E3DVAR":
3239 betaf = selfA._parameters["HybridCovarianceEquilibrium"]
3240 Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
3242 Xa = EnsembleMean( Xn )
3243 #--------------------------
3244 selfA._setInternalState("Xn", Xn)
3245 selfA._setInternalState("seed", numpy.random.get_state())
3246 #--------------------------
3248 if selfA._parameters["StoreInternalVariables"] \
3249 or selfA._toStore("CostFunctionJ") \
3250 or selfA._toStore("CostFunctionJb") \
3251 or selfA._toStore("CostFunctionJo") \
3252 or selfA._toStore("APosterioriCovariance") \
3253 or selfA._toStore("InnovationAtCurrentAnalysis") \
3254 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3255 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3256 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
3257 _Innovation = Ynpu - _HXa
3259 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3260 # ---> avec analysis
3261 selfA.StoredVariables["Analysis"].store( Xa )
3262 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3263 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3264 if selfA._toStore("InnovationAtCurrentAnalysis"):
3265 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3266 # ---> avec current state
3267 if selfA._parameters["StoreInternalVariables"] \
3268 or selfA._toStore("CurrentState"):
3269 selfA.StoredVariables["CurrentState"].store( Xn )
3270 if selfA._toStore("ForecastState"):
3271 selfA.StoredVariables["ForecastState"].store( EMX )
3272 if selfA._toStore("ForecastCovariance"):
3273 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
3274 if selfA._toStore("BMA"):
3275 selfA.StoredVariables["BMA"].store( EMX - Xa )
3276 if selfA._toStore("InnovationAtCurrentState"):
3277 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
3278 if selfA._toStore("SimulatedObservationAtCurrentState") \
3279 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3280 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3282 if selfA._parameters["StoreInternalVariables"] \
3283 or selfA._toStore("CostFunctionJ") \
3284 or selfA._toStore("CostFunctionJb") \
3285 or selfA._toStore("CostFunctionJo") \
3286 or selfA._toStore("CurrentOptimum") \
3287 or selfA._toStore("APosterioriCovariance"):
3288 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3289 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3291 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3292 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3293 selfA.StoredVariables["CostFunctionJ" ].store( J )
3295 if selfA._toStore("IndexOfOptimum") \
3296 or selfA._toStore("CurrentOptimum") \
3297 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3298 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3299 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3300 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3301 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3302 if selfA._toStore("IndexOfOptimum"):
3303 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3304 if selfA._toStore("CurrentOptimum"):
3305 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3306 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3307 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3308 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3309 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3310 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3311 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3312 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3313 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3314 if selfA._toStore("APosterioriCovariance"):
3315 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
3316 if selfA._parameters["EstimationOf"] == "Parameters" \
3317 and J < previousJMinimum:
3318 previousJMinimum = J
3320 if selfA._toStore("APosterioriCovariance"):
3321 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3322 # ---> Pour les smoothers
3323 if selfA._toStore("CurrentEnsembleState"):
3324 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3326 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3327 # ----------------------------------------------------------------------
3328 if selfA._parameters["EstimationOf"] == "Parameters":
3329 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3330 selfA.StoredVariables["Analysis"].store( XaMin )
3331 if selfA._toStore("APosterioriCovariance"):
3332 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3333 if selfA._toStore("BMA"):
3334 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3338 # ==============================================================================
3339 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3346 Hm = HO["Direct"].appliedTo
3347 Ha = HO["Adjoint"].appliedInXTo
3349 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3350 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3353 HXb = HXb.reshape((-1,1))
3354 if Y.size != HXb.size:
3355 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3356 if max(Y.shape) != max(HXb.shape):
3357 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3359 if selfA._toStore("JacobianMatrixAtBackground"):
3360 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3361 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3362 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3367 Xini = selfA._parameters["InitializationPoint"]
3369 # Définition de la fonction-coût
3370 # ------------------------------
3371 def CostFunction(x):
3372 _X = numpy.ravel( x ).reshape((-1,1))
3373 if selfA._parameters["StoreInternalVariables"] or \
3374 selfA._toStore("CurrentState") or \
3375 selfA._toStore("CurrentOptimum"):
3376 selfA.StoredVariables["CurrentState"].store( _X )
3377 _HX = Hm( _X ).reshape((-1,1))
3378 _Innovation = Y - _HX
3379 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3380 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3381 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3382 if selfA._toStore("InnovationAtCurrentState"):
3383 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3385 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3386 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3389 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3390 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3391 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3392 selfA.StoredVariables["CostFunctionJ" ].store( J )
3393 if selfA._toStore("IndexOfOptimum") or \
3394 selfA._toStore("CurrentOptimum") or \
3395 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3396 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3397 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3398 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3399 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3400 if selfA._toStore("IndexOfOptimum"):
3401 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3402 if selfA._toStore("CurrentOptimum"):
3403 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3404 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3405 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3406 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3407 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3408 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3409 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3410 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3411 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3414 def GradientOfCostFunction(x):
3415 _X = x.reshape((-1,1))
3416 _HX = Hm( _X ).reshape((-1,1))
3417 GradJb = BI * (_X - Xb)
3418 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3419 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3422 # Minimisation de la fonctionnelle
3423 # --------------------------------
3424 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3426 if selfA._parameters["Minimizer"] == "LBFGSB":
3427 if "0.19" <= scipy.version.version <= "1.1.0":
3428 import lbfgsbhlt as optimiseur
3430 import scipy.optimize as optimiseur
3431 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3432 func = CostFunction,
3434 fprime = GradientOfCostFunction,
3436 bounds = selfA._parameters["Bounds"],
3437 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3438 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3439 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3440 iprint = selfA._parameters["optiprint"],
3442 nfeval = Informations['funcalls']
3443 rc = Informations['warnflag']
3444 elif selfA._parameters["Minimizer"] == "TNC":
3445 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3446 func = CostFunction,
3448 fprime = GradientOfCostFunction,
3450 bounds = selfA._parameters["Bounds"],
3451 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3452 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3453 ftol = selfA._parameters["CostDecrementTolerance"],
3454 messages = selfA._parameters["optmessages"],
3456 elif selfA._parameters["Minimizer"] == "CG":
3457 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3460 fprime = GradientOfCostFunction,
3462 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3463 gtol = selfA._parameters["GradientNormTolerance"],
3464 disp = selfA._parameters["optdisp"],
3467 elif selfA._parameters["Minimizer"] == "NCG":
3468 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3471 fprime = GradientOfCostFunction,
3473 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3474 avextol = selfA._parameters["CostDecrementTolerance"],
3475 disp = selfA._parameters["optdisp"],
3478 elif selfA._parameters["Minimizer"] == "BFGS":
3479 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3482 fprime = GradientOfCostFunction,
3484 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3485 gtol = selfA._parameters["GradientNormTolerance"],
3486 disp = selfA._parameters["optdisp"],
3490 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3492 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3493 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3495 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3496 # ----------------------------------------------------------------
3497 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3498 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3501 #--------------------------
3503 selfA.StoredVariables["Analysis"].store( Xa )
3505 if selfA._toStore("OMA") or \
3506 selfA._toStore("SigmaObs2") or \
3507 selfA._toStore("SimulationQuantiles") or \
3508 selfA._toStore("SimulatedObservationAtOptimum"):
3509 if selfA._toStore("SimulatedObservationAtCurrentState"):
3510 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3511 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3512 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3516 if selfA._toStore("APosterioriCovariance") or \
3517 selfA._toStore("SimulationQuantiles") or \
3518 selfA._toStore("JacobianMatrixAtOptimum") or \
3519 selfA._toStore("KalmanGainAtOptimum"):
3520 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3521 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3522 if selfA._toStore("APosterioriCovariance") or \
3523 selfA._toStore("SimulationQuantiles") or \
3524 selfA._toStore("KalmanGainAtOptimum"):
3525 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3526 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3527 if selfA._toStore("APosterioriCovariance") or \
3528 selfA._toStore("SimulationQuantiles"):
3529 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3530 if selfA._toStore("APosterioriCovariance"):
3531 selfA.StoredVariables["APosterioriCovariance"].store( A )
3532 if selfA._toStore("JacobianMatrixAtOptimum"):
3533 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3534 if selfA._toStore("KalmanGainAtOptimum"):
3535 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3536 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3537 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3539 # Calculs et/ou stockages supplémentaires
3540 # ---------------------------------------
3541 if selfA._toStore("Innovation") or \
3542 selfA._toStore("SigmaObs2") or \
3543 selfA._toStore("MahalanobisConsistency") or \
3544 selfA._toStore("OMB"):
3546 if selfA._toStore("Innovation"):
3547 selfA.StoredVariables["Innovation"].store( d )
3548 if selfA._toStore("BMA"):
3549 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3550 if selfA._toStore("OMA"):
3551 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3552 if selfA._toStore("OMB"):
3553 selfA.StoredVariables["OMB"].store( d )
3554 if selfA._toStore("SigmaObs2"):
3555 TraceR = R.trace(Y.size)
3556 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3557 if selfA._toStore("MahalanobisConsistency"):
3558 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3559 if selfA._toStore("SimulationQuantiles"):
3560 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3561 if selfA._toStore("SimulatedObservationAtBackground"):
3562 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3563 if selfA._toStore("SimulatedObservationAtOptimum"):
3564 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3568 # ==============================================================================
3569 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3578 Hm = HO["Direct"].appliedControledFormTo
3579 Mm = EM["Direct"].appliedControledFormTo
3581 if CM is not None and "Tangent" in CM and U is not None:
3582 Cm = CM["Tangent"].asMatrix(Xb)
3588 if hasattr(U,"store") and 1<=_step<len(U) :
3589 _Un = numpy.ravel( U[_step] ).reshape((-1,1))
3590 elif hasattr(U,"store") and len(U)==1:
3591 _Un = numpy.ravel( U[0] ).reshape((-1,1))
3593 _Un = numpy.ravel( U ).reshape((-1,1))
3598 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3599 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3600 _CmUn = (_Cm @ _un).reshape((-1,1))
3605 # Remarque : les observations sont exploitées à partir du pas de temps
3606 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3607 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3608 # avec l'observation du pas 1.
3610 # Nombre de pas identique au nombre de pas d'observations
3611 if hasattr(Y,"stepnumber"):
3612 duration = Y.stepnumber()
3616 # Précalcul des inversions de B et R
3620 # Point de démarrage de l'optimisation
3621 Xini = selfA._parameters["InitializationPoint"]
3623 # Définition de la fonction-coût
3624 # ------------------------------
3625 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3626 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
3627 def CostFunction(x):
3628 _X = numpy.ravel( x ).reshape((-1,1))
3629 if selfA._parameters["StoreInternalVariables"] or \
3630 selfA._toStore("CurrentState") or \
3631 selfA._toStore("CurrentOptimum"):
3632 selfA.StoredVariables["CurrentState"].store( _X )
3633 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3634 selfA.DirectCalculation = [None,]
3635 selfA.DirectInnovation = [None,]
3638 for step in range(0,duration-1):
3639 if hasattr(Y,"store"):
3640 _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
3642 _Ynpu = numpy.ravel( Y ).reshape((-1,1))
3646 if selfA._parameters["EstimationOf"] == "State":
3647 _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
3648 elif selfA._parameters["EstimationOf"] == "Parameters":
3651 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3652 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3654 # Etape de différence aux observations
3655 if selfA._parameters["EstimationOf"] == "State":
3656 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
3657 elif selfA._parameters["EstimationOf"] == "Parameters":
3658 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
3660 # Stockage de l'état
3661 selfA.DirectCalculation.append( _Xn )
3662 selfA.DirectInnovation.append( _YmHMX )
3664 # Ajout dans la fonctionnelle d'observation
3665 Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
3668 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3669 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3670 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3671 selfA.StoredVariables["CostFunctionJ" ].store( J )
3672 if selfA._toStore("IndexOfOptimum") or \
3673 selfA._toStore("CurrentOptimum") or \
3674 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3675 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3676 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3677 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3678 if selfA._toStore("IndexOfOptimum"):
3679 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3680 if selfA._toStore("CurrentOptimum"):
3681 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3682 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3683 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3684 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3685 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3686 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3687 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3690 def GradientOfCostFunction(x):
3691 _X = numpy.ravel( x ).reshape((-1,1))
3692 GradJb = BI * (_X - Xb)
3694 for step in range(duration-1,0,-1):
3695 # Étape de récupération du dernier stockage de l'évolution
3696 _Xn = selfA.DirectCalculation.pop()
3697 # Étape de récupération du dernier stockage de l'innovation
3698 _YmHMX = selfA.DirectInnovation.pop()
3699 # Calcul des adjoints
3700 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3701 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3702 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3703 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3704 # Calcul du gradient par état adjoint
3705 GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3706 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3707 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3710 # Minimisation de la fonctionnelle
3711 # --------------------------------
3712 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3714 if selfA._parameters["Minimizer"] == "LBFGSB":
3715 if "0.19" <= scipy.version.version <= "1.1.0":
3716 import lbfgsbhlt as optimiseur
3718 import scipy.optimize as optimiseur
3719 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3720 func = CostFunction,
3722 fprime = GradientOfCostFunction,
3724 bounds = selfA._parameters["Bounds"],
3725 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3726 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3727 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3728 iprint = selfA._parameters["optiprint"],
3730 nfeval = Informations['funcalls']
3731 rc = Informations['warnflag']
3732 elif selfA._parameters["Minimizer"] == "TNC":
3733 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3734 func = CostFunction,
3736 fprime = GradientOfCostFunction,
3738 bounds = selfA._parameters["Bounds"],
3739 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3740 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3741 ftol = selfA._parameters["CostDecrementTolerance"],
3742 messages = selfA._parameters["optmessages"],
3744 elif selfA._parameters["Minimizer"] == "CG":
3745 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3748 fprime = GradientOfCostFunction,
3750 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3751 gtol = selfA._parameters["GradientNormTolerance"],
3752 disp = selfA._parameters["optdisp"],
3755 elif selfA._parameters["Minimizer"] == "NCG":
3756 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3759 fprime = GradientOfCostFunction,
3761 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3762 avextol = selfA._parameters["CostDecrementTolerance"],
3763 disp = selfA._parameters["optdisp"],
3766 elif selfA._parameters["Minimizer"] == "BFGS":
3767 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3770 fprime = GradientOfCostFunction,
3772 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3773 gtol = selfA._parameters["GradientNormTolerance"],
3774 disp = selfA._parameters["optdisp"],
3778 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3780 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3781 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3783 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3784 # ----------------------------------------------------------------
3785 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3786 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3788 # Obtention de l'analyse
3789 # ----------------------
3792 selfA.StoredVariables["Analysis"].store( Xa )
3794 # Calculs et/ou stockages supplémentaires
3795 # ---------------------------------------
3796 if selfA._toStore("BMA"):
3797 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3801 # ==============================================================================
3802 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3804 Standard Kalman Filter
3806 if selfA._parameters["EstimationOf"] == "Parameters":
3807 selfA._parameters["StoreInternalVariables"] = True
3811 Ht = HO["Tangent"].asMatrix(Xb)
3812 Ha = HO["Adjoint"].asMatrix(Xb)
3814 if selfA._parameters["EstimationOf"] == "State":
3815 Mt = EM["Tangent"].asMatrix(Xb)
3816 Ma = EM["Adjoint"].asMatrix(Xb)
3818 if CM is not None and "Tangent" in CM and U is not None:
3819 Cm = CM["Tangent"].asMatrix(Xb)
3823 # Durée d'observation et tailles
3824 if hasattr(Y,"stepnumber"):
3825 duration = Y.stepnumber()
3826 __p = numpy.cumprod(Y.shape())[-1]
3829 __p = numpy.array(Y).size
3831 # Précalcul des inversions de B et R
3832 if selfA._parameters["StoreInternalVariables"] \
3833 or selfA._toStore("CostFunctionJ") \
3834 or selfA._toStore("CostFunctionJb") \
3835 or selfA._toStore("CostFunctionJo") \
3836 or selfA._toStore("CurrentOptimum") \
3837 or selfA._toStore("APosterioriCovariance"):
3843 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3846 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3847 selfA.StoredVariables["Analysis"].store( Xb )
3848 if selfA._toStore("APosterioriCovariance"):
3849 if hasattr(B,"asfullmatrix"):
3850 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3852 selfA.StoredVariables["APosterioriCovariance"].store( B )
3853 selfA._setInternalState("seed", numpy.random.get_state())
3854 elif selfA._parameters["nextStep"]:
3855 Xn = selfA._getInternalState("Xn")
3856 Pn = selfA._getInternalState("Pn")
3858 if selfA._parameters["EstimationOf"] == "Parameters":
3860 previousJMinimum = numpy.finfo(float).max
3862 for step in range(duration-1):
3863 if hasattr(Y,"store"):
3864 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3866 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3869 if hasattr(U,"store") and len(U)>1:
3870 Un = numpy.ravel( U[step] ).reshape((-1,1))
3871 elif hasattr(U,"store") and len(U)==1:
3872 Un = numpy.ravel( U[0] ).reshape((-1,1))
3874 Un = numpy.ravel( U ).reshape((-1,1))
3878 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3879 Xn_predicted = Mt @ Xn
3880 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3881 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3882 Xn_predicted = Xn_predicted + Cm @ Un
3883 Pn_predicted = Q + Mt * (Pn * Ma)
3884 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3885 # --- > Par principe, M = Id, Q = 0
3889 if selfA._parameters["EstimationOf"] == "State":
3890 HX_predicted = Ht @ Xn_predicted
3891 _Innovation = Ynpu - HX_predicted
3892 elif selfA._parameters["EstimationOf"] == "Parameters":
3893 HX_predicted = Ht @ Xn_predicted
3894 _Innovation = Ynpu - HX_predicted
3895 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3896 _Innovation = _Innovation - Cm @ Un
3898 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3899 Xn = Xn_predicted + Kn * _Innovation
3900 Pn = Pn_predicted - Kn * Ht * Pn_predicted
3903 #--------------------------
3904 selfA._setInternalState("Xn", Xn)
3905 selfA._setInternalState("Pn", Pn)
3906 #--------------------------
3908 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3909 # ---> avec analysis
3910 selfA.StoredVariables["Analysis"].store( Xa )
3911 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3912 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3913 if selfA._toStore("InnovationAtCurrentAnalysis"):
3914 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3915 # ---> avec current state
3916 if selfA._parameters["StoreInternalVariables"] \
3917 or selfA._toStore("CurrentState"):
3918 selfA.StoredVariables["CurrentState"].store( Xn )
3919 if selfA._toStore("ForecastState"):
3920 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3921 if selfA._toStore("ForecastCovariance"):
3922 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3923 if selfA._toStore("BMA"):
3924 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3925 if selfA._toStore("InnovationAtCurrentState"):
3926 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3927 if selfA._toStore("SimulatedObservationAtCurrentState") \
3928 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3929 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3931 if selfA._parameters["StoreInternalVariables"] \
3932 or selfA._toStore("CostFunctionJ") \
3933 or selfA._toStore("CostFunctionJb") \
3934 or selfA._toStore("CostFunctionJo") \
3935 or selfA._toStore("CurrentOptimum") \
3936 or selfA._toStore("APosterioriCovariance"):
3937 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3938 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3940 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3941 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3942 selfA.StoredVariables["CostFunctionJ" ].store( J )
3944 if selfA._toStore("IndexOfOptimum") \
3945 or selfA._toStore("CurrentOptimum") \
3946 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3947 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3948 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3949 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3950 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3951 if selfA._toStore("IndexOfOptimum"):
3952 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3953 if selfA._toStore("CurrentOptimum"):
3954 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3955 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3956 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3957 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3958 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3959 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3960 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3961 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3962 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3963 if selfA._toStore("APosterioriCovariance"):
3964 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3965 if selfA._parameters["EstimationOf"] == "Parameters" \
3966 and J < previousJMinimum:
3967 previousJMinimum = J
3969 if selfA._toStore("APosterioriCovariance"):
3970 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3972 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3973 # ----------------------------------------------------------------------
3974 if selfA._parameters["EstimationOf"] == "Parameters":
3975 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3976 selfA.StoredVariables["Analysis"].store( XaMin )
3977 if selfA._toStore("APosterioriCovariance"):
3978 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3979 if selfA._toStore("BMA"):
3980 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3984 # ==============================================================================
3985 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3987 Unscented Kalman Filter
3989 if selfA._parameters["EstimationOf"] == "Parameters":
3990 selfA._parameters["StoreInternalVariables"] = True
3993 Alpha = selfA._parameters["Alpha"]
3994 Beta = selfA._parameters["Beta"]
3995 if selfA._parameters["Kappa"] == 0:
3996 if selfA._parameters["EstimationOf"] == "State":
3998 elif selfA._parameters["EstimationOf"] == "Parameters":
4001 Kappa = selfA._parameters["Kappa"]
4002 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
4003 Gamma = math.sqrt( L + Lambda )
4007 for i in range(2*L):
4008 Ww.append( 1. / (2.*(L + Lambda)) )
4010 Wm = numpy.array( Ww )
4011 Wm[0] = Lambda / (L + Lambda)
4012 Wc = numpy.array( Ww )
4013 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
4016 Hm = HO["Direct"].appliedControledFormTo
4018 if selfA._parameters["EstimationOf"] == "State":
4019 Mm = EM["Direct"].appliedControledFormTo
4021 if CM is not None and "Tangent" in CM and U is not None:
4022 Cm = CM["Tangent"].asMatrix(Xb)
4026 # Durée d'observation et tailles
4027 if hasattr(Y,"stepnumber"):
4028 duration = Y.stepnumber()
4029 __p = numpy.cumprod(Y.shape())[-1]
4032 __p = numpy.array(Y).size
4034 # Précalcul des inversions de B et R
4035 if selfA._parameters["StoreInternalVariables"] \
4036 or selfA._toStore("CostFunctionJ") \
4037 or selfA._toStore("CostFunctionJb") \
4038 or selfA._toStore("CostFunctionJo") \
4039 or selfA._toStore("CurrentOptimum") \
4040 or selfA._toStore("APosterioriCovariance"):
4046 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
4048 if hasattr(B,"asfullmatrix"):
4049 Pn = B.asfullmatrix(__n)
4052 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4053 selfA.StoredVariables["Analysis"].store( Xb )
4054 if selfA._toStore("APosterioriCovariance"):
4055 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4056 elif selfA._parameters["nextStep"]:
4057 Xn = selfA._getInternalState("Xn")
4058 Pn = selfA._getInternalState("Pn")
4060 if selfA._parameters["EstimationOf"] == "Parameters":
4062 previousJMinimum = numpy.finfo(float).max
4064 for step in range(duration-1):
4065 if hasattr(Y,"store"):
4066 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
4068 Ynpu = numpy.ravel( Y ).reshape((__p,1))
4071 if hasattr(U,"store") and len(U)>1:
4072 Un = numpy.ravel( U[step] ).reshape((-1,1))
4073 elif hasattr(U,"store") and len(U)==1:
4074 Un = numpy.ravel( U[0] ).reshape((-1,1))
4076 Un = numpy.ravel( U ).reshape((-1,1))
4080 Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
4081 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
4082 nbSpts = 2*Xn.size+1
4085 for point in range(nbSpts):
4086 if selfA._parameters["EstimationOf"] == "State":
4087 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
4088 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
4089 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
4090 XEtnnpi = XEtnnpi + Cm @ Un
4091 elif selfA._parameters["EstimationOf"] == "Parameters":
4092 # --- > Par principe, M = Id, Q = 0
4093 XEtnnpi = Xnp[:,point]
4094 XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
4095 XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
4097 Xncm = ( XEtnnp * Wm ).sum(axis=1)
4099 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
4100 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4101 for point in range(nbSpts):
4102 Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
4104 Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
4106 Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
4109 for point in range(nbSpts):
4110 if selfA._parameters["EstimationOf"] == "State":
4111 Ynnpi = Hm( (Xnnp[:,point], None) )
4112 elif selfA._parameters["EstimationOf"] == "Parameters":
4113 Ynnpi = Hm( (Xnnp[:,point], Un) )
4114 Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
4115 Ynnp = numpy.concatenate( Ynnp, axis=1 )
4117 Yncm = ( Ynnp * Wm ).sum(axis=1)
4121 for point in range(nbSpts):
4122 Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4123 Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4125 _Innovation = Ynpu - Yncm.reshape((-1,1))
4126 if selfA._parameters["EstimationOf"] == "Parameters":
4127 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4128 _Innovation = _Innovation - Cm @ Un
4131 Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
4132 Pn = Pnm - Kn * Pyyn * Kn.T
4135 #--------------------------
4136 selfA._setInternalState("Xn", Xn)
4137 selfA._setInternalState("Pn", Pn)
4138 #--------------------------
4140 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4141 # ---> avec analysis
4142 selfA.StoredVariables["Analysis"].store( Xa )
4143 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4144 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4145 if selfA._toStore("InnovationAtCurrentAnalysis"):
4146 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4147 # ---> avec current state
4148 if selfA._parameters["StoreInternalVariables"] \
4149 or selfA._toStore("CurrentState"):
4150 selfA.StoredVariables["CurrentState"].store( Xn )
4151 if selfA._toStore("ForecastState"):
4152 selfA.StoredVariables["ForecastState"].store( Xncm )
4153 if selfA._toStore("ForecastCovariance"):
4154 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4155 if selfA._toStore("BMA"):
4156 selfA.StoredVariables["BMA"].store( Xncm - Xa )
4157 if selfA._toStore("InnovationAtCurrentState"):
4158 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4159 if selfA._toStore("SimulatedObservationAtCurrentState") \
4160 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4161 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4163 if selfA._parameters["StoreInternalVariables"] \
4164 or selfA._toStore("CostFunctionJ") \
4165 or selfA._toStore("CostFunctionJb") \
4166 or selfA._toStore("CostFunctionJo") \
4167 or selfA._toStore("CurrentOptimum") \
4168 or selfA._toStore("APosterioriCovariance"):
4169 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
4170 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4172 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4173 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4174 selfA.StoredVariables["CostFunctionJ" ].store( J )
4176 if selfA._toStore("IndexOfOptimum") \
4177 or selfA._toStore("CurrentOptimum") \
4178 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4179 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4180 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4181 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4182 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4183 if selfA._toStore("IndexOfOptimum"):
4184 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4185 if selfA._toStore("CurrentOptimum"):
4186 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4187 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4188 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4189 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4190 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4191 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4192 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4193 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4194 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4195 if selfA._toStore("APosterioriCovariance"):
4196 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4197 if selfA._parameters["EstimationOf"] == "Parameters" \
4198 and J < previousJMinimum:
4199 previousJMinimum = J
4201 if selfA._toStore("APosterioriCovariance"):
4202 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4204 # Stockage final supplémentaire de l'optimum en estimation de paramètres
4205 # ----------------------------------------------------------------------
4206 if selfA._parameters["EstimationOf"] == "Parameters":
4207 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4208 selfA.StoredVariables["Analysis"].store( XaMin )
4209 if selfA._toStore("APosterioriCovariance"):
4210 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4211 if selfA._toStore("BMA"):
4212 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4216 # ==============================================================================
4217 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4219 3DVAR variational analysis with no inversion of B
4224 Hm = HO["Direct"].appliedTo
4225 Ha = HO["Adjoint"].appliedInXTo
4230 Xini = numpy.zeros(Xb.shape)
4232 # Définition de la fonction-coût
4233 # ------------------------------
4234 def CostFunction(v):
4235 _V = numpy.ravel( v ).reshape((-1,1))
4237 if selfA._parameters["StoreInternalVariables"] or \
4238 selfA._toStore("CurrentState") or \
4239 selfA._toStore("CurrentOptimum"):
4240 selfA.StoredVariables["CurrentState"].store( _X )
4242 _HX = numpy.ravel( _HX ).reshape((-1,1))
4243 _Innovation = Y - _HX
4244 if selfA._toStore("SimulatedObservationAtCurrentState") or \
4245 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4246 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4247 if selfA._toStore("InnovationAtCurrentState"):
4248 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4250 Jb = float( 0.5 * _V.T * (BT * _V) )
4251 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4254 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4255 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4256 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4257 selfA.StoredVariables["CostFunctionJ" ].store( J )
4258 if selfA._toStore("IndexOfOptimum") or \
4259 selfA._toStore("CurrentOptimum") or \
4260 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4261 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4262 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4263 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4264 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4265 if selfA._toStore("IndexOfOptimum"):
4266 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4267 if selfA._toStore("CurrentOptimum"):
4268 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4269 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4270 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4271 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4272 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4273 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4274 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4275 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4276 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4279 def GradientOfCostFunction(v):
4280 _V = v.reshape((-1,1))
4281 _X = Xb + (B @ _V).reshape((-1,1))
4282 _HX = Hm( _X ).reshape((-1,1))
4284 GradJo = - Ha( (_X, RI * (Y - _HX)) )
4285 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4288 # Minimisation de la fonctionnelle
4289 # --------------------------------
4290 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4292 if selfA._parameters["Minimizer"] == "LBFGSB":
4293 if "0.19" <= scipy.version.version <= "1.1.0":
4294 import lbfgsbhlt as optimiseur
4296 import scipy.optimize as optimiseur
4297 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4298 func = CostFunction,
4300 fprime = GradientOfCostFunction,
4302 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4303 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
4304 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
4305 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4306 iprint = selfA._parameters["optiprint"],
4308 nfeval = Informations['funcalls']
4309 rc = Informations['warnflag']
4310 elif selfA._parameters["Minimizer"] == "TNC":
4311 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4312 func = CostFunction,
4314 fprime = GradientOfCostFunction,
4316 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4317 maxfun = selfA._parameters["MaximumNumberOfSteps"],
4318 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4319 ftol = selfA._parameters["CostDecrementTolerance"],
4320 messages = selfA._parameters["optmessages"],
4322 elif selfA._parameters["Minimizer"] == "CG":
4323 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4326 fprime = GradientOfCostFunction,
4328 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4329 gtol = selfA._parameters["GradientNormTolerance"],
4330 disp = selfA._parameters["optdisp"],
4333 elif selfA._parameters["Minimizer"] == "NCG":
4334 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4337 fprime = GradientOfCostFunction,
4339 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4340 avextol = selfA._parameters["CostDecrementTolerance"],
4341 disp = selfA._parameters["optdisp"],
4344 elif selfA._parameters["Minimizer"] == "BFGS":
4345 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4348 fprime = GradientOfCostFunction,
4350 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4351 gtol = selfA._parameters["GradientNormTolerance"],
4352 disp = selfA._parameters["optdisp"],
4356 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4358 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4359 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4361 # Correction pour pallier a un bug de TNC sur le retour du Minimum
4362 # ----------------------------------------------------------------
4363 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4364 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4366 Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
4369 #--------------------------
4371 selfA.StoredVariables["Analysis"].store( Xa )
4373 if selfA._toStore("OMA") or \
4374 selfA._toStore("SigmaObs2") or \
4375 selfA._toStore("SimulationQuantiles") or \
4376 selfA._toStore("SimulatedObservationAtOptimum"):
4377 if selfA._toStore("SimulatedObservationAtCurrentState"):
4378 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4379 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4380 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4384 if selfA._toStore("APosterioriCovariance") or \
4385 selfA._toStore("SimulationQuantiles") or \
4386 selfA._toStore("JacobianMatrixAtOptimum") or \
4387 selfA._toStore("KalmanGainAtOptimum"):
4388 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4389 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4390 if selfA._toStore("APosterioriCovariance") or \
4391 selfA._toStore("SimulationQuantiles") or \
4392 selfA._toStore("KalmanGainAtOptimum"):
4393 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4394 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4395 if selfA._toStore("APosterioriCovariance") or \
4396 selfA._toStore("SimulationQuantiles"):
4398 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
4399 if selfA._toStore("APosterioriCovariance"):
4400 selfA.StoredVariables["APosterioriCovariance"].store( A )
4401 if selfA._toStore("JacobianMatrixAtOptimum"):
4402 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4403 if selfA._toStore("KalmanGainAtOptimum"):
4404 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4405 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4406 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4408 # Calculs et/ou stockages supplémentaires
4409 # ---------------------------------------
4410 if selfA._toStore("Innovation") or \
4411 selfA._toStore("SigmaObs2") or \
4412 selfA._toStore("MahalanobisConsistency") or \
4413 selfA._toStore("OMB"):
4415 if selfA._toStore("Innovation"):
4416 selfA.StoredVariables["Innovation"].store( d )
4417 if selfA._toStore("BMA"):
4418 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4419 if selfA._toStore("OMA"):
4420 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4421 if selfA._toStore("OMB"):
4422 selfA.StoredVariables["OMB"].store( d )
4423 if selfA._toStore("SigmaObs2"):
4424 TraceR = R.trace(Y.size)
4425 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
4426 if selfA._toStore("MahalanobisConsistency"):
4427 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4428 if selfA._toStore("SimulationQuantiles"):
4429 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4430 if selfA._toStore("SimulatedObservationAtBackground"):
4431 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
4432 if selfA._toStore("SimulatedObservationAtOptimum"):
4433 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
4437 # ==============================================================================
4438 if __name__ == "__main__":
4439 print('\n AUTODIAGNOSTIC\n')