1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.ravel( X ).reshape((-1,1))
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 reducingMemoryUse = False,
70 avoidingRedundancy = True,
71 toleranceInRedundancy = 1.e-18,
72 lenghtOfRedundancy = -1,
77 self.__name = str(name)
78 self.__extraArgs = extraArguments
82 import multiprocessing
83 self.__mpEnabled = True
85 self.__mpEnabled = False
87 self.__mpEnabled = False
88 self.__mpWorkers = mpWorkers
89 if self.__mpWorkers is not None and self.__mpWorkers < 1:
90 self.__mpWorkers = None
91 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
93 self.__mfEnabled = bool(mfEnabled)
94 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96 self.__rmEnabled = bool(reducingMemoryUse)
97 logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
99 if avoidingRedundancy:
100 self.__avoidRC = True
101 self.__tolerBP = float(toleranceInRedundancy)
102 self.__lenghtRJ = int(lenghtOfRedundancy)
103 self.__listJPCP = [] # Jacobian Previous Calculated Points
104 self.__listJPCI = [] # Jacobian Previous Calculated Increment
105 self.__listJPCR = [] # Jacobian Previous Calculated Results
106 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
107 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
109 self.__avoidRC = False
110 logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
112 logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
115 if isinstance(Function,types.FunctionType):
116 logging.debug("FDA Calculs en multiprocessing : FunctionType")
117 self.__userFunction__name = Function.__name__
119 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
121 mod = os.path.abspath(Function.__globals__['__file__'])
122 if not os.path.isfile(mod):
123 raise ImportError("No user defined function or method found with the name %s"%(mod,))
124 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
125 self.__userFunction__path = os.path.dirname(mod)
127 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
128 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
129 elif isinstance(Function,types.MethodType):
130 logging.debug("FDA Calculs en multiprocessing : MethodType")
131 self.__userFunction__name = Function.__name__
133 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
135 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
136 if not os.path.isfile(mod):
137 raise ImportError("No user defined function or method found with the name %s"%(mod,))
138 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
139 self.__userFunction__path = os.path.dirname(mod)
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
144 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
146 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
147 self.__userFunction = self.__userOperator.appliedTo
149 self.__centeredDF = bool(centeredDF)
150 if abs(float(increment)) > 1.e-15:
151 self.__increment = float(increment)
153 self.__increment = 0.01
157 self.__dX = numpy.ravel( dX )
159 # ---------------------------------------------------------
160 def __doublon__(self, e, l, n, v=None):
161 __ac, __iac = False, -1
162 for i in range(len(l)-1,-1,-1):
163 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
164 __ac, __iac = True, i
165 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
169 # ---------------------------------------------------------
170 def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
171 "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
172 if not isinstance(__LMatrix, (list,tuple)):
173 raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
174 if __dotWith is not None:
175 __Idwx = numpy.ravel( __dotWith )
176 assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
177 __Produit = numpy.zeros(__LMatrix[0].size)
178 for i, col in enumerate(__LMatrix):
179 __Produit += float(__Idwx[i]) * col
181 elif __dotTWith is not None:
182 _Idwy = numpy.ravel( __dotTWith ).T
183 assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
184 __Produit = numpy.zeros(len(__LMatrix))
185 for i, col in enumerate(__LMatrix):
186 __Produit[i] = float( _Idwy @ col)
192 # ---------------------------------------------------------
193 def DirectOperator(self, X, **extraArgs ):
195 Calcul du direct à l'aide de la fonction fournie.
197 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
198 ne doivent pas être données ici à la fonction utilisateur.
200 logging.debug("FDA Calcul DirectOperator (explicite)")
202 _HX = self.__userFunction( X, argsAsSerie = True )
204 _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
208 # ---------------------------------------------------------
209 def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
211 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
212 c'est-à-dire le gradient de H en X. On utilise des différences finies
213 directionnelles autour du point X. X est un numpy.ndarray.
215 Différences finies centrées (approximation d'ordre 2):
216 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
217 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
218 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
220 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
222 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
224 Différences finies non centrées (approximation d'ordre 1):
225 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
226 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
227 HX_plus_dXi = H( X_plus_dXi )
228 2/ On calcule la valeur centrale HX = H(X)
229 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
231 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
234 logging.debug("FDA Début du calcul de la Jacobienne")
235 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
236 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
238 if X is None or len(X)==0:
239 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
241 _X = numpy.ravel( X )
243 if self.__dX is None:
244 _dX = self.__increment * _X
246 _dX = numpy.ravel( self.__dX )
247 assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
248 assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
250 if (_dX == 0.).any():
253 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
255 _dX = numpy.where( _dX == 0., moyenne, _dX )
257 __alreadyCalculated = False
259 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
260 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
261 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
262 __alreadyCalculated, __i = True, __alreadyCalculatedP
263 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
265 if __alreadyCalculated:
266 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
267 _Jacobienne = self.__listJPCR[__i]
268 logging.debug("FDA Fin du calcul de la Jacobienne")
269 if dotWith is not None:
270 return numpy.dot(_Jacobienne, numpy.ravel( dotWith ))
271 elif dotTWith is not None:
272 return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
274 logging.debug("FDA Calcul Jacobienne (explicite)")
275 if self.__centeredDF:
277 if self.__mpEnabled and not self.__mfEnabled:
279 "__userFunction__path" : self.__userFunction__path,
280 "__userFunction__modl" : self.__userFunction__modl,
281 "__userFunction__name" : self.__userFunction__name,
284 for i in range( len(_dX) ):
286 _X_plus_dXi = numpy.array( _X, dtype=float )
287 _X_plus_dXi[i] = _X[i] + _dXi
288 _X_moins_dXi = numpy.array( _X, dtype=float )
289 _X_moins_dXi[i] = _X[i] - _dXi
291 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
292 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
294 import multiprocessing
295 self.__pool = multiprocessing.Pool(self.__mpWorkers)
296 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
301 for i in range( len(_dX) ):
302 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
304 elif self.__mfEnabled:
306 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dXi
310 _X_moins_dXi = numpy.array( _X, dtype=float )
311 _X_moins_dXi[i] = _X[i] - _dXi
313 _xserie.append( _X_plus_dXi )
314 _xserie.append( _X_moins_dXi )
316 _HX_plusmoins_dX = self.DirectOperator( _xserie )
319 for i in range( len(_dX) ):
320 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
324 for i in range( _dX.size ):
326 _X_plus_dXi = numpy.array( _X, dtype=float )
327 _X_plus_dXi[i] = _X[i] + _dXi
328 _X_moins_dXi = numpy.array( _X, dtype=float )
329 _X_moins_dXi[i] = _X[i] - _dXi
331 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
332 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
334 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
338 if self.__mpEnabled and not self.__mfEnabled:
340 "__userFunction__path" : self.__userFunction__path,
341 "__userFunction__modl" : self.__userFunction__modl,
342 "__userFunction__name" : self.__userFunction__name,
345 _jobs.append( (_X, self.__extraArgs, funcrepr) )
346 for i in range( len(_dX) ):
347 _X_plus_dXi = numpy.array( _X, dtype=float )
348 _X_plus_dXi[i] = _X[i] + _dX[i]
350 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
352 import multiprocessing
353 self.__pool = multiprocessing.Pool(self.__mpWorkers)
354 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
358 _HX = _HX_plus_dX.pop(0)
361 for i in range( len(_dX) ):
362 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
364 elif self.__mfEnabled:
367 for i in range( len(_dX) ):
368 _X_plus_dXi = numpy.array( _X, dtype=float )
369 _X_plus_dXi[i] = _X[i] + _dX[i]
371 _xserie.append( _X_plus_dXi )
373 _HX_plus_dX = self.DirectOperator( _xserie )
375 _HX = _HX_plus_dX.pop(0)
378 for i in range( len(_dX) ):
379 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
383 _HX = self.DirectOperator( _X )
384 for i in range( _dX.size ):
386 _X_plus_dXi = numpy.array( _X, dtype=float )
387 _X_plus_dXi[i] = _X[i] + _dXi
389 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
391 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
393 if (dotWith is not None) or (dotTWith is not None):
394 __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
397 if __Produit is None or self.__avoidRC:
398 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
400 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
401 while len(self.__listJPCP) > self.__lenghtRJ:
402 self.__listJPCP.pop(0)
403 self.__listJPCI.pop(0)
404 self.__listJPCR.pop(0)
405 self.__listJPPN.pop(0)
406 self.__listJPIN.pop(0)
407 self.__listJPCP.append( copy.copy(_X) )
408 self.__listJPCI.append( copy.copy(_dX) )
409 self.__listJPCR.append( copy.copy(_Jacobienne) )
410 self.__listJPPN.append( numpy.linalg.norm(_X) )
411 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
412 logging.debug("FDA Fin du calcul de la Jacobienne")
413 if __Produit is not None:
418 # ---------------------------------------------------------
419 def TangentOperator(self, paire, **extraArgs ):
421 Calcul du tangent à l'aide de la Jacobienne.
423 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
424 ne doivent pas être données ici à la fonction utilisateur.
427 assert len(paire) == 1, "Incorrect length of arguments"
429 assert len(_paire) == 2, "Incorrect number of arguments"
431 assert len(paire) == 2, "Incorrect number of arguments"
434 if dX is None or len(dX) == 0:
436 # Calcul de la forme matricielle si le second argument est None
437 # -------------------------------------------------------------
438 _Jacobienne = self.TangentMatrix( X )
439 if self.__mfEnabled: return [_Jacobienne,]
440 else: return _Jacobienne
443 # Calcul de la valeur linéarisée de H en X appliqué à dX
444 # ------------------------------------------------------
445 _HtX = self.TangentMatrix( X, dotWith = dX )
446 if self.__mfEnabled: return [_HtX,]
449 # ---------------------------------------------------------
450 def AdjointOperator(self, paire, **extraArgs ):
452 Calcul de l'adjoint à l'aide de la Jacobienne.
454 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
455 ne doivent pas être données ici à la fonction utilisateur.
458 assert len(paire) == 1, "Incorrect length of arguments"
460 assert len(_paire) == 2, "Incorrect number of arguments"
462 assert len(paire) == 2, "Incorrect number of arguments"
465 if Y is None or len(Y) == 0:
467 # Calcul de la forme matricielle si le second argument est None
468 # -------------------------------------------------------------
469 _JacobienneT = self.TangentMatrix( X ).T
470 if self.__mfEnabled: return [_JacobienneT,]
471 else: return _JacobienneT
474 # Calcul de la valeur de l'adjoint en X appliqué à Y
475 # --------------------------------------------------
476 _HaY = self.TangentMatrix( X, dotTWith = Y )
477 if self.__mfEnabled: return [_HaY,]
480 # ==============================================================================
481 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
482 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
484 _bgcenter = numpy.ravel(_bgcenter)[:,None]
486 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
488 if _bgcovariance is None:
489 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
491 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
492 _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
494 return _Perturbations
496 # ==============================================================================
497 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
498 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
499 def __CenteredRandomAnomalies(Zr, N):
501 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
502 notes manuscrites de MB et conforme au code de PS avec eps = -1
505 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
506 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
507 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
512 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
514 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
515 if _bgcovariance is None:
516 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
519 _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
520 _nbctl = _bgcenter.size
521 if _nbmembers > _nbctl:
522 _Z = numpy.concatenate((numpy.dot(
523 numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
524 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
526 _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
527 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
528 _Perturbations = _bgcenter + _Zca
530 if max(abs(_bgcovariance.flatten())) > 0:
531 _nbctl = _bgcenter.size
532 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
533 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
534 _Perturbations = _bgcenter + _Zca
536 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
538 return _Perturbations
540 # ==============================================================================
541 def EnsembleMean( __Ensemble ):
542 "Renvoie la moyenne empirique d'un ensemble"
543 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
545 # ==============================================================================
546 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
547 "Renvoie les anomalies centrées à partir d'un ensemble"
548 if __OptMean is None:
549 __Em = EnsembleMean( __Ensemble )
551 __Em = numpy.ravel( __OptMean ).reshape((-1,1))
553 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
555 # ==============================================================================
556 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
557 "Renvoie l'estimation empirique de la covariance d'ensemble"
559 # Covariance rapide mais rarement définie positive
560 __Covariance = numpy.cov( __Ensemble )
562 # Résultat souvent identique à numpy.cov, mais plus robuste
563 __n, __m = numpy.asarray( __Ensemble ).shape
564 __Anomalies = EnsembleOfAnomalies( __Ensemble )
565 # Estimation empirique
566 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
568 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
569 # Assure la positivité
570 __epsilon = mpr*numpy.trace( __Covariance )
571 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
575 # ==============================================================================
576 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
577 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
578 if hasattr(__Covariance,"assparsematrix"):
579 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
580 # Traitement d'une covariance nulle ou presque
582 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
583 # Traitement d'une covariance nulle ou presque
586 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
587 # Traitement d'une covariance nulle ou presque
589 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
590 # Traitement d'une covariance nulle ou presque
593 __n, __m = __Ensemble.shape
594 if __Seed is not None: numpy.random.seed(__Seed)
596 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
597 # Traitement d'une covariance multiple de l'identité
599 __std = numpy.sqrt(__Covariance.assparsematrix())
600 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
602 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
603 # Traitement d'une covariance diagonale avec variances non identiques
604 __zero = numpy.zeros(__n)
605 __std = numpy.sqrt(__Covariance.assparsematrix())
606 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
608 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
609 # Traitement d'une covariance pleine
610 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
612 elif isinstance(__Covariance, numpy.ndarray):
613 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
614 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
617 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
621 # ==============================================================================
622 def CovarianceInflation(
624 InflationType = None,
625 InflationFactor = None,
626 BackgroundCov = None,
629 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
631 Synthèse : Hunt 2007, section 2.3.5
633 if InflationFactor is None:
636 InflationFactor = float(InflationFactor)
638 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
639 if InflationFactor < 1.:
640 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
641 if InflationFactor < 1.+mpr:
643 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
645 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
646 if InflationFactor < 1.:
647 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
648 if InflationFactor < 1.+mpr:
650 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
651 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
652 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
654 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
655 if InflationFactor < 0.:
656 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
657 if InflationFactor < mpr:
659 __n, __m = numpy.asarray(InputCovOrEns).shape
661 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
662 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
664 elif InflationType == "HybridOnBackgroundCovariance":
665 if InflationFactor < 0.:
666 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
667 if InflationFactor < mpr:
669 __n, __m = numpy.asarray(InputCovOrEns).shape
671 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
672 if BackgroundCov is None:
673 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
674 if InputCovOrEns.shape != BackgroundCov.shape:
675 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
676 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
678 elif InflationType == "Relaxation":
679 raise NotImplementedError("InflationType Relaxation")
682 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
684 return OutputCovOrEns
686 # ==============================================================================
687 def HessienneEstimation(nb, HaM, HtM, BI, RI):
688 "Estimation de la Hessienne"
691 for i in range(int(nb)):
692 _ee = numpy.zeros((nb,1))
694 _HtEE = numpy.dot(HtM,_ee).reshape((-1,1))
695 HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
697 A = numpy.linalg.inv(numpy.array( HessienneI ))
699 if min(A.shape) != max(A.shape):
700 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
701 if (numpy.diag(A) < 0).any():
702 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
703 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
705 L = numpy.linalg.cholesky( A )
707 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
711 # ==============================================================================
712 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
713 "Estimation des quantiles a posteriori (selfA est modifié)"
714 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
716 # Traitement des bornes
717 if "StateBoundsForQuantiles" in selfA._parameters:
718 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
719 elif "Bounds" in selfA._parameters:
720 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
723 if LBounds is not None:
724 LBounds = ForceNumericBounds( LBounds )
725 _Xa = numpy.ravel(Xa)
727 # Échantillonnage des états
730 for i in range(nbsamples):
731 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
732 dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
733 if LBounds is not None: # "EstimateProjection" par défaut
734 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
735 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
737 Yr = HXa.reshape((-1,1)) + dYr
738 if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
739 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
740 Xr = numpy.random.multivariate_normal(_Xa,A)
741 if LBounds is not None: # "EstimateProjection" par défaut
742 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
743 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
746 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
749 YfQ = Yr.reshape((-1,1))
750 if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
752 YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
753 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
755 # Extraction des quantiles
758 for quantile in selfA._parameters["Quantiles"]:
759 if not (0. <= float(quantile) <= 1.): continue
760 indice = int(nbsamples * float(quantile) - 1./nbsamples)
761 if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
762 else: YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
763 if YQ is not None: # Liste non vide de quantiles
764 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
765 if selfA._toStore("SampledStateForQuantiles"):
766 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
770 # ==============================================================================
771 def ForceNumericBounds( __Bounds ):
772 "Force les bornes à être des valeurs numériques, sauf si globalement None"
773 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
774 if __Bounds is None: return None
775 # Converti toutes les bornes individuelles None à +/- l'infini
776 __Bounds = numpy.asarray( __Bounds, dtype=float )
777 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
778 raise ValueError("Incorrectly shaped bounds data")
779 __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
780 __Bounds[numpy.isnan(__Bounds[:,1]),1] = sys.float_info.max
783 # ==============================================================================
784 def RecentredBounds( __Bounds, __Center):
785 "Recentre les bornes autour de 0, sauf si globalement None"
786 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
787 if __Bounds is None: return None
788 # Recentre les valeurs numériques de bornes
789 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))
791 # ==============================================================================
792 def ApplyBounds( __Vector, __Bounds, __newClip = True):
793 "Applique des bornes numériques à un point"
794 # Conserve une valeur par défaut s'il n'y a pas de bornes
795 if __Bounds is None: return __Vector
797 if not isinstance(__Vector, numpy.ndarray): # Is an array
798 raise ValueError("Incorrect array definition of vector data")
799 if not isinstance(__Bounds, numpy.ndarray): # Is an array
800 raise ValueError("Incorrect array definition of bounds data")
801 if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
802 raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
803 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
804 raise ValueError("Incorrectly shaped bounds data")
807 __Vector = __Vector.clip(
808 __Bounds[:,0].reshape(__Vector.shape),
809 __Bounds[:,1].reshape(__Vector.shape),
812 __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
813 __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
814 __Vector = numpy.asarray(__Vector)
818 # ==============================================================================
819 def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
820 "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
822 Xf = EnsembleMean( __EnXf )
823 Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
824 Pf = (1 - __Betaf) * __B + __Betaf * Pf
826 selfB = PartialAlgorithm("3DVAR")
827 selfB._parameters["Minimizer"] = "LBFGSB"
828 selfB._parameters["MaximumNumberOfSteps"] = 15000
829 selfB._parameters["CostDecrementTolerance"] = 1.e-7
830 selfB._parameters["ProjectedGradientTolerance"] = -1
831 selfB._parameters["GradientNormTolerance"] = 1.e-05
832 selfB._parameters["StoreInternalVariables"] = False
833 selfB._parameters["optiprint"] = -1
834 selfB._parameters["optdisp"] = 0
835 selfB._parameters["Bounds"] = None
836 selfB._parameters["InitializationPoint"] = Xf
837 std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
838 Xa = selfB.get("Analysis")[-1].reshape((-1,1))
841 return Xa + EnsembleOfAnomalies( __EnXn )
843 # ==============================================================================
844 def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
846 Constrained Unscented Kalman Filter
848 if selfA._parameters["EstimationOf"] == "Parameters":
849 selfA._parameters["StoreInternalVariables"] = True
850 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
853 Alpha = selfA._parameters["Alpha"]
854 Beta = selfA._parameters["Beta"]
855 if selfA._parameters["Kappa"] == 0:
856 if selfA._parameters["EstimationOf"] == "State":
858 elif selfA._parameters["EstimationOf"] == "Parameters":
861 Kappa = selfA._parameters["Kappa"]
862 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
863 Gamma = math.sqrt( L + Lambda )
868 Ww.append( 1. / (2.*(L + Lambda)) )
870 Wm = numpy.array( Ww )
871 Wm[0] = Lambda / (L + Lambda)
872 Wc = numpy.array( Ww )
873 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
876 Hm = HO["Direct"].appliedControledFormTo
878 if selfA._parameters["EstimationOf"] == "State":
879 Mm = EM["Direct"].appliedControledFormTo
881 if CM is not None and "Tangent" in CM and U is not None:
882 Cm = CM["Tangent"].asMatrix(Xb)
886 # Durée d'observation et tailles
887 if hasattr(Y,"stepnumber"):
888 duration = Y.stepnumber()
889 __p = numpy.cumprod(Y.shape())[-1]
892 __p = numpy.array(Y).size
894 # Précalcul des inversions de B et R
895 if selfA._parameters["StoreInternalVariables"] \
896 or selfA._toStore("CostFunctionJ") \
897 or selfA._toStore("CostFunctionJb") \
898 or selfA._toStore("CostFunctionJo") \
899 or selfA._toStore("CurrentOptimum") \
900 or selfA._toStore("APosterioriCovariance"):
905 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
907 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
909 if hasattr(B,"asfullmatrix"):
910 Pn = B.asfullmatrix(__n)
913 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
914 selfA.StoredVariables["Analysis"].store( Xb )
915 if selfA._toStore("APosterioriCovariance"):
916 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
917 elif selfA._parameters["nextStep"]:
918 Xn = selfA._getInternalState("Xn")
919 Pn = selfA._getInternalState("Pn")
921 if selfA._parameters["EstimationOf"] == "Parameters":
923 previousJMinimum = numpy.finfo(float).max
925 for step in range(duration-1):
926 if hasattr(Y,"store"):
927 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
929 Ynpu = numpy.ravel( Y ).reshape((__p,1))
932 if hasattr(U,"store") and len(U)>1:
933 Un = numpy.ravel( U[step] ).reshape((-1,1))
934 elif hasattr(U,"store") and len(U)==1:
935 Un = numpy.ravel( U[0] ).reshape((-1,1))
937 Un = numpy.ravel( U ).reshape((-1,1))
941 Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
942 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
945 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
946 for point in range(nbSpts):
947 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
950 for point in range(nbSpts):
951 if selfA._parameters["EstimationOf"] == "State":
952 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
953 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
954 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
955 XEtnnpi = XEtnnpi + Cm @ Un
956 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
957 XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
958 elif selfA._parameters["EstimationOf"] == "Parameters":
959 # --- > Par principe, M = Id, Q = 0
960 XEtnnpi = Xnp[:,point]
961 XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
962 XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
964 Xncm = ( XEtnnp * Wm ).sum(axis=1)
966 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
967 Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
969 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
970 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
971 for point in range(nbSpts):
972 Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
974 if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
975 Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
977 Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
979 Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
981 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
982 for point in range(nbSpts):
983 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
986 for point in range(nbSpts):
987 if selfA._parameters["EstimationOf"] == "State":
988 Ynnpi = Hm( (Xnnp[:,point], None) )
989 elif selfA._parameters["EstimationOf"] == "Parameters":
990 Ynnpi = Hm( (Xnnp[:,point], Un) )
991 Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
992 Ynnp = numpy.concatenate( Ynnp, axis=1 )
994 Yncm = ( Ynnp * Wm ).sum(axis=1)
998 for point in range(nbSpts):
999 Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
1000 Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
1002 _Innovation = Ynpu - Yncm.reshape((-1,1))
1003 if selfA._parameters["EstimationOf"] == "Parameters":
1004 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1005 _Innovation = _Innovation - Cm @ Un
1008 Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
1009 Pn = Pnm - Kn * Pyyn * Kn.T
1011 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1012 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1015 #--------------------------
1016 selfA._setInternalState("Xn", Xn)
1017 selfA._setInternalState("Pn", Pn)
1018 #--------------------------
1020 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1021 # ---> avec analysis
1022 selfA.StoredVariables["Analysis"].store( Xa )
1023 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1024 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
1025 if selfA._toStore("InnovationAtCurrentAnalysis"):
1026 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1027 # ---> avec current state
1028 if selfA._parameters["StoreInternalVariables"] \
1029 or selfA._toStore("CurrentState"):
1030 selfA.StoredVariables["CurrentState"].store( Xn )
1031 if selfA._toStore("ForecastState"):
1032 selfA.StoredVariables["ForecastState"].store( Xncm )
1033 if selfA._toStore("ForecastCovariance"):
1034 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
1035 if selfA._toStore("BMA"):
1036 selfA.StoredVariables["BMA"].store( Xncm - Xa )
1037 if selfA._toStore("InnovationAtCurrentState"):
1038 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1039 if selfA._toStore("SimulatedObservationAtCurrentState") \
1040 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1041 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
1043 if selfA._parameters["StoreInternalVariables"] \
1044 or selfA._toStore("CostFunctionJ") \
1045 or selfA._toStore("CostFunctionJb") \
1046 or selfA._toStore("CostFunctionJo") \
1047 or selfA._toStore("CurrentOptimum") \
1048 or selfA._toStore("APosterioriCovariance"):
1049 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1050 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1052 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1053 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1054 selfA.StoredVariables["CostFunctionJ" ].store( J )
1056 if selfA._toStore("IndexOfOptimum") \
1057 or selfA._toStore("CurrentOptimum") \
1058 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1059 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1060 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1061 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1062 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1063 if selfA._toStore("IndexOfOptimum"):
1064 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1065 if selfA._toStore("CurrentOptimum"):
1066 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1067 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1068 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1069 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1070 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1071 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1072 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1073 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1074 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1075 if selfA._toStore("APosterioriCovariance"):
1076 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1077 if selfA._parameters["EstimationOf"] == "Parameters" \
1078 and J < previousJMinimum:
1079 previousJMinimum = J
1081 if selfA._toStore("APosterioriCovariance"):
1082 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1084 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1085 # ----------------------------------------------------------------------
1086 if selfA._parameters["EstimationOf"] == "Parameters":
1087 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1088 selfA.StoredVariables["Analysis"].store( XaMin )
1089 if selfA._toStore("APosterioriCovariance"):
1090 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1091 if selfA._toStore("BMA"):
1092 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1096 # ==============================================================================
1097 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1099 Contrained Extended Kalman Filter
1101 if selfA._parameters["EstimationOf"] == "Parameters":
1102 selfA._parameters["StoreInternalVariables"] = True
1103 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
1106 H = HO["Direct"].appliedControledFormTo
1108 if selfA._parameters["EstimationOf"] == "State":
1109 M = EM["Direct"].appliedControledFormTo
1111 if CM is not None and "Tangent" in CM and U is not None:
1112 Cm = CM["Tangent"].asMatrix(Xb)
1116 # Durée d'observation et tailles
1117 if hasattr(Y,"stepnumber"):
1118 duration = Y.stepnumber()
1119 __p = numpy.cumprod(Y.shape())[-1]
1122 __p = numpy.array(Y).size
1124 # Précalcul des inversions de B et R
1125 if selfA._parameters["StoreInternalVariables"] \
1126 or selfA._toStore("CostFunctionJ") \
1127 or selfA._toStore("CostFunctionJb") \
1128 or selfA._toStore("CostFunctionJo") \
1129 or selfA._toStore("CurrentOptimum") \
1130 or selfA._toStore("APosterioriCovariance"):
1135 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
1137 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1140 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1141 selfA.StoredVariables["Analysis"].store( Xb )
1142 if selfA._toStore("APosterioriCovariance"):
1143 if hasattr(B,"asfullmatrix"):
1144 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1146 selfA.StoredVariables["APosterioriCovariance"].store( B )
1147 selfA._setInternalState("seed", numpy.random.get_state())
1148 elif selfA._parameters["nextStep"]:
1149 Xn = selfA._getInternalState("Xn")
1150 Pn = selfA._getInternalState("Pn")
1152 if selfA._parameters["EstimationOf"] == "Parameters":
1154 previousJMinimum = numpy.finfo(float).max
1156 for step in range(duration-1):
1157 if hasattr(Y,"store"):
1158 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1160 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1162 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1163 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1164 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1165 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1167 if selfA._parameters["EstimationOf"] == "State":
1168 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1169 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1170 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1171 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1174 if hasattr(U,"store") and len(U)>1:
1175 Un = numpy.ravel( U[step] ).reshape((-1,1))
1176 elif hasattr(U,"store") and len(U)==1:
1177 Un = numpy.ravel( U[0] ).reshape((-1,1))
1179 Un = numpy.ravel( U ).reshape((-1,1))
1183 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1184 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1186 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1187 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1188 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1189 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1190 Xn_predicted = Xn_predicted + Cm @ Un
1191 Pn_predicted = Q + Mt * (Pn * Ma)
1192 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1193 # --- > Par principe, M = Id, Q = 0
1197 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1198 Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
1200 if selfA._parameters["EstimationOf"] == "State":
1201 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1202 _Innovation = Ynpu - HX_predicted
1203 elif selfA._parameters["EstimationOf"] == "Parameters":
1204 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1205 _Innovation = Ynpu - HX_predicted
1206 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1207 _Innovation = _Innovation - Cm @ Un
1209 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1210 Xn = Xn_predicted + Kn * _Innovation
1211 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1213 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1214 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1217 #--------------------------
1218 selfA._setInternalState("Xn", Xn)
1219 selfA._setInternalState("Pn", Pn)
1220 #--------------------------
1222 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1223 # ---> avec analysis
1224 selfA.StoredVariables["Analysis"].store( Xa )
1225 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1226 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1227 if selfA._toStore("InnovationAtCurrentAnalysis"):
1228 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1229 # ---> avec current state
1230 if selfA._parameters["StoreInternalVariables"] \
1231 or selfA._toStore("CurrentState"):
1232 selfA.StoredVariables["CurrentState"].store( Xn )
1233 if selfA._toStore("ForecastState"):
1234 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1235 if selfA._toStore("ForecastCovariance"):
1236 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1237 if selfA._toStore("BMA"):
1238 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1239 if selfA._toStore("InnovationAtCurrentState"):
1240 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1241 if selfA._toStore("SimulatedObservationAtCurrentState") \
1242 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1243 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1245 if selfA._parameters["StoreInternalVariables"] \
1246 or selfA._toStore("CostFunctionJ") \
1247 or selfA._toStore("CostFunctionJb") \
1248 or selfA._toStore("CostFunctionJo") \
1249 or selfA._toStore("CurrentOptimum") \
1250 or selfA._toStore("APosterioriCovariance"):
1251 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1252 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1254 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1255 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1256 selfA.StoredVariables["CostFunctionJ" ].store( J )
1258 if selfA._toStore("IndexOfOptimum") \
1259 or selfA._toStore("CurrentOptimum") \
1260 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1261 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1262 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1263 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1264 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1265 if selfA._toStore("IndexOfOptimum"):
1266 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1267 if selfA._toStore("CurrentOptimum"):
1268 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1269 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1270 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1271 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1272 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1273 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1274 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1275 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1276 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1277 if selfA._toStore("APosterioriCovariance"):
1278 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1279 if selfA._parameters["EstimationOf"] == "Parameters" \
1280 and J < previousJMinimum:
1281 previousJMinimum = J
1283 if selfA._toStore("APosterioriCovariance"):
1284 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1286 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1287 # ----------------------------------------------------------------------
1288 if selfA._parameters["EstimationOf"] == "Parameters":
1289 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1290 selfA.StoredVariables["Analysis"].store( XaMin )
1291 if selfA._toStore("APosterioriCovariance"):
1292 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1293 if selfA._toStore("BMA"):
1294 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1298 # ==============================================================================
1299 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
1305 H = HO["Direct"].appliedControledFormTo
1307 if selfA._parameters["EstimationOf"] == "State":
1308 M = EM["Direct"].appliedControledFormTo
1310 if CM is not None and "Tangent" in CM and U is not None:
1311 Cm = CM["Tangent"].asMatrix(Xb)
1315 # Précalcul des inversions de B et R
1318 # Durée d'observation et tailles
1319 LagL = selfA._parameters["SmootherLagL"]
1320 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
1321 raise ValueError("Fixed-lag smoother requires a series of observation")
1322 if Y.stepnumber() < LagL:
1323 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
1324 duration = Y.stepnumber()
1325 __p = numpy.cumprod(Y.shape())[-1]
1327 __m = selfA._parameters["NumberOfMembers"]
1329 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1330 selfA.StoredVariables["Analysis"].store( Xb )
1331 if selfA._toStore("APosterioriCovariance"):
1332 if hasattr(B,"asfullmatrix"):
1333 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1335 selfA.StoredVariables["APosterioriCovariance"].store( B )
1337 # Calcul direct initial (on privilégie la mémorisation au recalcul)
1338 __seed = numpy.random.get_state()
1339 selfB = copy.deepcopy(selfA)
1340 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1341 if VariantM == "EnKS16-KalmanFilterFormula":
1342 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1344 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1346 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1348 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1349 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1351 for step in range(LagL,duration-1):
1353 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1356 if hasattr(Y,"store"):
1357 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1359 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1362 if hasattr(U,"store") and len(U)>1:
1363 Un = numpy.ravel( U[step] ).reshape((-1,1))
1364 elif hasattr(U,"store") and len(U)==1:
1365 Un = numpy.ravel( U[0] ).reshape((-1,1))
1367 Un = numpy.ravel( U ).reshape((-1,1))
1371 #--------------------------
1372 if VariantM == "EnKS16-KalmanFilterFormula":
1373 if selfA._parameters["EstimationOf"] == "State": # Forecast
1374 EL = M( [(EL[:,i], Un) for i in range(__m)],
1376 returnSerieAsArrayMatrix = True )
1377 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1378 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1380 returnSerieAsArrayMatrix = True )
1381 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1382 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1384 elif selfA._parameters["EstimationOf"] == "Parameters":
1385 # --- > Par principe, M = Id, Q = 0
1386 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1388 returnSerieAsArrayMatrix = True )
1390 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1391 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1393 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1394 mS = mS.reshape((-1,__m)) # Pour dimension 1
1395 delta = RIdemi @ ( Ynpu - vZm )
1396 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1397 vw = mT @ mS.T @ delta
1399 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1400 mU = numpy.identity(__m)
1401 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1403 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1407 for irl in range(LagL): # Lissage des L précédentes analysis
1408 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1409 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1410 sEL[irl] = vEm + EX @ wTU
1412 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1413 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1414 if selfA._toStore("APosterioriCovariance"):
1417 for irl in range(LagL):
1418 sEL[irl] = sEL[irl+1]
1420 #--------------------------
1422 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1424 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1425 # ---> avec analysis
1426 selfA.StoredVariables["Analysis"].store( Xa )
1427 if selfA._toStore("APosterioriCovariance"):
1428 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1430 # Stockage des dernières analyses incomplètement remises à jour
1431 for irl in range(LagL):
1432 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1433 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1434 selfA.StoredVariables["Analysis"].store( Xa )
1438 # ==============================================================================
1439 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
1440 VariantM="KalmanFilterFormula",
1444 Ensemble-Transform EnKF
1446 if selfA._parameters["EstimationOf"] == "Parameters":
1447 selfA._parameters["StoreInternalVariables"] = True
1450 H = HO["Direct"].appliedControledFormTo
1452 if selfA._parameters["EstimationOf"] == "State":
1453 M = EM["Direct"].appliedControledFormTo
1455 if CM is not None and "Tangent" in CM and U is not None:
1456 Cm = CM["Tangent"].asMatrix(Xb)
1460 # Durée d'observation et tailles
1461 if hasattr(Y,"stepnumber"):
1462 duration = Y.stepnumber()
1463 __p = numpy.cumprod(Y.shape())[-1]
1466 __p = numpy.array(Y).size
1468 # Précalcul des inversions de B et R
1469 if selfA._parameters["StoreInternalVariables"] \
1470 or selfA._toStore("CostFunctionJ") \
1471 or selfA._toStore("CostFunctionJb") \
1472 or selfA._toStore("CostFunctionJo") \
1473 or selfA._toStore("CurrentOptimum") \
1474 or selfA._toStore("APosterioriCovariance"):
1477 elif VariantM != "KalmanFilterFormula":
1479 if VariantM == "KalmanFilterFormula":
1483 __m = selfA._parameters["NumberOfMembers"]
1484 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
1485 previousJMinimum = numpy.finfo(float).max
1487 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1488 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1489 selfA.StoredVariables["Analysis"].store( Xb )
1490 if selfA._toStore("APosterioriCovariance"):
1491 if hasattr(B,"asfullmatrix"):
1492 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1494 selfA.StoredVariables["APosterioriCovariance"].store( B )
1495 selfA._setInternalState("seed", numpy.random.get_state())
1496 elif selfA._parameters["nextStep"]:
1497 Xn = selfA._getInternalState("Xn")
1499 for step in range(duration-1):
1500 numpy.random.set_state(selfA._getInternalState("seed"))
1501 if hasattr(Y,"store"):
1502 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1504 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1507 if hasattr(U,"store") and len(U)>1:
1508 Un = numpy.ravel( U[step] ).reshape((-1,1))
1509 elif hasattr(U,"store") and len(U)==1:
1510 Un = numpy.ravel( U[0] ).reshape((-1,1))
1512 Un = numpy.ravel( U ).reshape((-1,1))
1516 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1517 Xn = CovarianceInflation( Xn,
1518 selfA._parameters["InflationType"],
1519 selfA._parameters["InflationFactor"],
1522 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1523 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1525 returnSerieAsArrayMatrix = True )
1526 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1527 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1529 returnSerieAsArrayMatrix = True )
1530 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1531 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1532 Xn_predicted = Xn_predicted + Cm @ Un
1533 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1534 # --- > Par principe, M = Id, Q = 0
1535 Xn_predicted = EMX = Xn
1536 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1538 returnSerieAsArrayMatrix = True )
1540 # Mean of forecast and observation of forecast
1541 Xfm = EnsembleMean( Xn_predicted )
1542 Hfm = EnsembleMean( HX_predicted )
1545 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
1546 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
1548 #--------------------------
1549 if VariantM == "KalmanFilterFormula":
1550 mS = RIdemi * EaHX / math.sqrt(__m-1)
1551 mS = mS.reshape((-1,__m)) # Pour dimension 1
1552 delta = RIdemi * ( Ynpu - Hfm )
1553 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1554 vw = mT @ mS.T @ delta
1556 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1557 mU = numpy.identity(__m)
1559 EaX = EaX / math.sqrt(__m-1)
1560 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1561 #--------------------------
1562 elif VariantM == "Variational":
1563 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1564 def CostFunction(w):
1565 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1566 _Jo = 0.5 * _A.T @ (RI * _A)
1567 _Jb = 0.5 * (__m-1) * w.T @ w
1570 def GradientOfCostFunction(w):
1571 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1572 _GardJo = - EaHX.T @ (RI * _A)
1573 _GradJb = (__m-1) * w.reshape((__m,1))
1574 _GradJ = _GardJo + _GradJb
1575 return numpy.ravel(_GradJ)
1576 vw = scipy.optimize.fmin_cg(
1578 x0 = numpy.zeros(__m),
1579 fprime = GradientOfCostFunction,
1584 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1585 Htb = (__m-1) * numpy.identity(__m)
1588 Pta = numpy.linalg.inv( Hta )
1589 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1591 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1592 #--------------------------
1593 elif VariantM == "FiniteSize11": # Jauge Boc2011
1594 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1595 def CostFunction(w):
1596 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1597 _Jo = 0.5 * _A.T @ (RI * _A)
1598 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1601 def GradientOfCostFunction(w):
1602 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1603 _GardJo = - EaHX.T @ (RI * _A)
1604 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1605 _GradJ = _GardJo + _GradJb
1606 return numpy.ravel(_GradJ)
1607 vw = scipy.optimize.fmin_cg(
1609 x0 = numpy.zeros(__m),
1610 fprime = GradientOfCostFunction,
1615 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1617 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1618 / (1 + 1/__m + vw.T @ vw)**2
1621 Pta = numpy.linalg.inv( Hta )
1622 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1624 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1625 #--------------------------
1626 elif VariantM == "FiniteSize15": # Jauge Boc2015
1627 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1628 def CostFunction(w):
1629 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1630 _Jo = 0.5 * _A.T * (RI * _A)
1631 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1634 def GradientOfCostFunction(w):
1635 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1636 _GardJo = - EaHX.T @ (RI * _A)
1637 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1638 _GradJ = _GardJo + _GradJb
1639 return numpy.ravel(_GradJ)
1640 vw = scipy.optimize.fmin_cg(
1642 x0 = numpy.zeros(__m),
1643 fprime = GradientOfCostFunction,
1648 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1650 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1651 / (1 + 1/__m + vw.T @ vw)**2
1654 Pta = numpy.linalg.inv( Hta )
1655 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1657 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1658 #--------------------------
1659 elif VariantM == "FiniteSize16": # Jauge Boc2016
1660 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1661 def CostFunction(w):
1662 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1663 _Jo = 0.5 * _A.T @ (RI * _A)
1664 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1667 def GradientOfCostFunction(w):
1668 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1669 _GardJo = - EaHX.T @ (RI * _A)
1670 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1671 _GradJ = _GardJo + _GradJb
1672 return numpy.ravel(_GradJ)
1673 vw = scipy.optimize.fmin_cg(
1675 x0 = numpy.zeros(__m),
1676 fprime = GradientOfCostFunction,
1681 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1682 Htb = ((__m+1) / (__m-1)) * \
1683 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1684 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1687 Pta = numpy.linalg.inv( Hta )
1688 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1690 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1691 #--------------------------
1693 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1695 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1696 Xn = CovarianceInflation( Xn,
1697 selfA._parameters["InflationType"],
1698 selfA._parameters["InflationFactor"],
1701 if Hybrid == "E3DVAR":
1702 betaf = selfA._parameters["HybridCovarianceEquilibrium"]
1703 Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
1705 Xa = EnsembleMean( Xn )
1706 #--------------------------
1707 selfA._setInternalState("Xn", Xn)
1708 selfA._setInternalState("seed", numpy.random.get_state())
1709 #--------------------------
1711 if selfA._parameters["StoreInternalVariables"] \
1712 or selfA._toStore("CostFunctionJ") \
1713 or selfA._toStore("CostFunctionJb") \
1714 or selfA._toStore("CostFunctionJo") \
1715 or selfA._toStore("APosterioriCovariance") \
1716 or selfA._toStore("InnovationAtCurrentAnalysis") \
1717 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1718 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1719 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
1720 _Innovation = Ynpu - _HXa
1722 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1723 # ---> avec analysis
1724 selfA.StoredVariables["Analysis"].store( Xa )
1725 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1726 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1727 if selfA._toStore("InnovationAtCurrentAnalysis"):
1728 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1729 # ---> avec current state
1730 if selfA._parameters["StoreInternalVariables"] \
1731 or selfA._toStore("CurrentState"):
1732 selfA.StoredVariables["CurrentState"].store( Xn )
1733 if selfA._toStore("ForecastState"):
1734 selfA.StoredVariables["ForecastState"].store( EMX )
1735 if selfA._toStore("ForecastCovariance"):
1736 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1737 if selfA._toStore("BMA"):
1738 selfA.StoredVariables["BMA"].store( EMX - Xa )
1739 if selfA._toStore("InnovationAtCurrentState"):
1740 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1741 if selfA._toStore("SimulatedObservationAtCurrentState") \
1742 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1743 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1745 if selfA._parameters["StoreInternalVariables"] \
1746 or selfA._toStore("CostFunctionJ") \
1747 or selfA._toStore("CostFunctionJb") \
1748 or selfA._toStore("CostFunctionJo") \
1749 or selfA._toStore("CurrentOptimum") \
1750 or selfA._toStore("APosterioriCovariance"):
1751 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1752 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1754 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1755 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1756 selfA.StoredVariables["CostFunctionJ" ].store( J )
1758 if selfA._toStore("IndexOfOptimum") \
1759 or selfA._toStore("CurrentOptimum") \
1760 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1761 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1762 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1763 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1764 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1765 if selfA._toStore("IndexOfOptimum"):
1766 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1767 if selfA._toStore("CurrentOptimum"):
1768 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1769 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1770 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1771 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1772 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1773 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1774 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1775 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1776 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1777 if selfA._toStore("APosterioriCovariance"):
1778 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1779 if selfA._parameters["EstimationOf"] == "Parameters" \
1780 and J < previousJMinimum:
1781 previousJMinimum = J
1783 if selfA._toStore("APosterioriCovariance"):
1784 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1785 # ---> Pour les smoothers
1786 if selfA._toStore("CurrentEnsembleState"):
1787 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1789 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1790 # ----------------------------------------------------------------------
1791 if selfA._parameters["EstimationOf"] == "Parameters":
1792 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1793 selfA.StoredVariables["Analysis"].store( XaMin )
1794 if selfA._toStore("APosterioriCovariance"):
1795 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1796 if selfA._toStore("BMA"):
1797 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1801 # ==============================================================================
1802 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1804 Extended Kalman Filter
1806 if selfA._parameters["EstimationOf"] == "Parameters":
1807 selfA._parameters["StoreInternalVariables"] = True
1810 H = HO["Direct"].appliedControledFormTo
1812 if selfA._parameters["EstimationOf"] == "State":
1813 M = EM["Direct"].appliedControledFormTo
1815 if CM is not None and "Tangent" in CM and U is not None:
1816 Cm = CM["Tangent"].asMatrix(Xb)
1820 # Durée d'observation et tailles
1821 if hasattr(Y,"stepnumber"):
1822 duration = Y.stepnumber()
1823 __p = numpy.cumprod(Y.shape())[-1]
1826 __p = numpy.array(Y).size
1828 # Précalcul des inversions de B et R
1829 if selfA._parameters["StoreInternalVariables"] \
1830 or selfA._toStore("CostFunctionJ") \
1831 or selfA._toStore("CostFunctionJb") \
1832 or selfA._toStore("CostFunctionJo") \
1833 or selfA._toStore("CurrentOptimum") \
1834 or selfA._toStore("APosterioriCovariance"):
1839 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
1841 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1844 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1845 selfA.StoredVariables["Analysis"].store( Xb )
1846 if selfA._toStore("APosterioriCovariance"):
1847 if hasattr(B,"asfullmatrix"):
1848 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1850 selfA.StoredVariables["APosterioriCovariance"].store( B )
1851 selfA._setInternalState("seed", numpy.random.get_state())
1852 elif selfA._parameters["nextStep"]:
1853 Xn = selfA._getInternalState("Xn")
1854 Pn = selfA._getInternalState("Pn")
1856 if selfA._parameters["EstimationOf"] == "Parameters":
1858 previousJMinimum = numpy.finfo(float).max
1860 for step in range(duration-1):
1861 if hasattr(Y,"store"):
1862 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1864 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1866 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1867 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1868 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1869 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1871 if selfA._parameters["EstimationOf"] == "State":
1872 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1873 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1874 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1875 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1878 if hasattr(U,"store") and len(U)>1:
1879 Un = numpy.ravel( U[step] ).reshape((-1,1))
1880 elif hasattr(U,"store") and len(U)==1:
1881 Un = numpy.ravel( U[0] ).reshape((-1,1))
1883 Un = numpy.ravel( U ).reshape((-1,1))
1887 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1888 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1889 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1890 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1891 Xn_predicted = Xn_predicted + Cm @ Un
1892 Pn_predicted = Q + Mt * (Pn * Ma)
1893 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1894 # --- > Par principe, M = Id, Q = 0
1898 if selfA._parameters["EstimationOf"] == "State":
1899 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1900 _Innovation = Ynpu - HX_predicted
1901 elif selfA._parameters["EstimationOf"] == "Parameters":
1902 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1903 _Innovation = Ynpu - HX_predicted
1904 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1905 _Innovation = _Innovation - Cm @ Un
1907 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1908 Xn = Xn_predicted + Kn * _Innovation
1909 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1912 #--------------------------
1913 selfA._setInternalState("Xn", Xn)
1914 selfA._setInternalState("Pn", Pn)
1915 #--------------------------
1917 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1918 # ---> avec analysis
1919 selfA.StoredVariables["Analysis"].store( Xa )
1920 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1921 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1922 if selfA._toStore("InnovationAtCurrentAnalysis"):
1923 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1924 # ---> avec current state
1925 if selfA._parameters["StoreInternalVariables"] \
1926 or selfA._toStore("CurrentState"):
1927 selfA.StoredVariables["CurrentState"].store( Xn )
1928 if selfA._toStore("ForecastState"):
1929 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1930 if selfA._toStore("ForecastCovariance"):
1931 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1932 if selfA._toStore("BMA"):
1933 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1934 if selfA._toStore("InnovationAtCurrentState"):
1935 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1936 if selfA._toStore("SimulatedObservationAtCurrentState") \
1937 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1938 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1940 if selfA._parameters["StoreInternalVariables"] \
1941 or selfA._toStore("CostFunctionJ") \
1942 or selfA._toStore("CostFunctionJb") \
1943 or selfA._toStore("CostFunctionJo") \
1944 or selfA._toStore("CurrentOptimum") \
1945 or selfA._toStore("APosterioriCovariance"):
1946 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1947 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1949 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1950 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1951 selfA.StoredVariables["CostFunctionJ" ].store( J )
1953 if selfA._toStore("IndexOfOptimum") \
1954 or selfA._toStore("CurrentOptimum") \
1955 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1956 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1957 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1958 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1959 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1960 if selfA._toStore("IndexOfOptimum"):
1961 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1962 if selfA._toStore("CurrentOptimum"):
1963 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1964 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1965 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1966 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1967 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1968 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1969 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1970 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1971 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1972 if selfA._toStore("APosterioriCovariance"):
1973 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1974 if selfA._parameters["EstimationOf"] == "Parameters" \
1975 and J < previousJMinimum:
1976 previousJMinimum = J
1978 if selfA._toStore("APosterioriCovariance"):
1979 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1981 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1982 # ----------------------------------------------------------------------
1983 if selfA._parameters["EstimationOf"] == "Parameters":
1984 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1985 selfA.StoredVariables["Analysis"].store( XaMin )
1986 if selfA._toStore("APosterioriCovariance"):
1987 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1988 if selfA._toStore("BMA"):
1989 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1993 # ==============================================================================
1994 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1995 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1999 if selfA._parameters["EstimationOf"] == "Parameters":
2000 selfA._parameters["StoreInternalVariables"] = True
2003 H = HO["Direct"].appliedControledFormTo
2005 if selfA._parameters["EstimationOf"] == "State":
2006 M = EM["Direct"].appliedControledFormTo
2008 if CM is not None and "Tangent" in CM and U is not None:
2009 Cm = CM["Tangent"].asMatrix(Xb)
2013 # Durée d'observation et tailles
2014 if hasattr(Y,"stepnumber"):
2015 duration = Y.stepnumber()
2016 __p = numpy.cumprod(Y.shape())[-1]
2019 __p = numpy.array(Y).size
2021 # Précalcul des inversions de B et R
2022 if selfA._parameters["StoreInternalVariables"] \
2023 or selfA._toStore("CostFunctionJ") \
2024 or selfA._toStore("CostFunctionJb") \
2025 or selfA._toStore("CostFunctionJo") \
2026 or selfA._toStore("CurrentOptimum") \
2027 or selfA._toStore("APosterioriCovariance"):
2032 __m = selfA._parameters["NumberOfMembers"]
2033 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
2034 previousJMinimum = numpy.finfo(float).max
2036 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2037 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2039 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2040 selfA.StoredVariables["Analysis"].store( Xb )
2041 if selfA._toStore("APosterioriCovariance"):
2042 if hasattr(B,"asfullmatrix"):
2043 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2045 selfA.StoredVariables["APosterioriCovariance"].store( B )
2046 selfA._setInternalState("seed", numpy.random.get_state())
2047 elif selfA._parameters["nextStep"]:
2048 Xn = selfA._getInternalState("Xn")
2050 for step in range(duration-1):
2051 numpy.random.set_state(selfA._getInternalState("seed"))
2052 if hasattr(Y,"store"):
2053 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2055 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2058 if hasattr(U,"store") and len(U)>1:
2059 Un = numpy.ravel( U[step] ).reshape((-1,1))
2060 elif hasattr(U,"store") and len(U)==1:
2061 Un = numpy.ravel( U[0] ).reshape((-1,1))
2063 Un = numpy.ravel( U ).reshape((-1,1))
2067 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2068 Xn = CovarianceInflation( Xn,
2069 selfA._parameters["InflationType"],
2070 selfA._parameters["InflationFactor"],
2073 #--------------------------
2074 if VariantM == "IEnKF12":
2075 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2076 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
2080 Ta = numpy.identity(__m)
2081 vw = numpy.zeros(__m)
2082 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2083 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2086 E1 = vx1 + _epsilon * EaX
2088 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2090 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2091 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2093 returnSerieAsArrayMatrix = True )
2094 elif selfA._parameters["EstimationOf"] == "Parameters":
2095 # --- > Par principe, M = Id
2097 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2098 vy1 = H((vx2, Un)).reshape((__p,1))
2100 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2102 returnSerieAsArrayMatrix = True )
2103 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2106 EaY = (HE2 - vy2) / _epsilon
2108 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2110 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2111 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2112 Deltaw = - numpy.linalg.solve(mH,GradJ)
2117 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2121 A2 = EnsembleOfAnomalies( E2 )
2124 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2125 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
2128 #--------------------------
2130 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2132 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2133 Xn = CovarianceInflation( Xn,
2134 selfA._parameters["InflationType"],
2135 selfA._parameters["InflationFactor"],
2138 Xa = EnsembleMean( Xn )
2139 #--------------------------
2140 selfA._setInternalState("Xn", Xn)
2141 selfA._setInternalState("seed", numpy.random.get_state())
2142 #--------------------------
2144 if selfA._parameters["StoreInternalVariables"] \
2145 or selfA._toStore("CostFunctionJ") \
2146 or selfA._toStore("CostFunctionJb") \
2147 or selfA._toStore("CostFunctionJo") \
2148 or selfA._toStore("APosterioriCovariance") \
2149 or selfA._toStore("InnovationAtCurrentAnalysis") \
2150 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2151 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2152 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2153 _Innovation = Ynpu - _HXa
2155 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2156 # ---> avec analysis
2157 selfA.StoredVariables["Analysis"].store( Xa )
2158 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2159 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2160 if selfA._toStore("InnovationAtCurrentAnalysis"):
2161 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2162 # ---> avec current state
2163 if selfA._parameters["StoreInternalVariables"] \
2164 or selfA._toStore("CurrentState"):
2165 selfA.StoredVariables["CurrentState"].store( Xn )
2166 if selfA._toStore("ForecastState"):
2167 selfA.StoredVariables["ForecastState"].store( E2 )
2168 if selfA._toStore("ForecastCovariance"):
2169 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
2170 if selfA._toStore("BMA"):
2171 selfA.StoredVariables["BMA"].store( E2 - Xa )
2172 if selfA._toStore("InnovationAtCurrentState"):
2173 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2174 if selfA._toStore("SimulatedObservationAtCurrentState") \
2175 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2176 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2178 if selfA._parameters["StoreInternalVariables"] \
2179 or selfA._toStore("CostFunctionJ") \
2180 or selfA._toStore("CostFunctionJb") \
2181 or selfA._toStore("CostFunctionJo") \
2182 or selfA._toStore("CurrentOptimum") \
2183 or selfA._toStore("APosterioriCovariance"):
2184 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2185 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2187 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2188 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2189 selfA.StoredVariables["CostFunctionJ" ].store( J )
2191 if selfA._toStore("IndexOfOptimum") \
2192 or selfA._toStore("CurrentOptimum") \
2193 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2194 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2195 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2196 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2197 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2198 if selfA._toStore("IndexOfOptimum"):
2199 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2200 if selfA._toStore("CurrentOptimum"):
2201 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2202 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2203 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2204 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2205 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2206 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2207 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2208 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2209 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2210 if selfA._toStore("APosterioriCovariance"):
2211 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2212 if selfA._parameters["EstimationOf"] == "Parameters" \
2213 and J < previousJMinimum:
2214 previousJMinimum = J
2216 if selfA._toStore("APosterioriCovariance"):
2217 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2218 # ---> Pour les smoothers
2219 if selfA._toStore("CurrentEnsembleState"):
2220 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2222 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2223 # ----------------------------------------------------------------------
2224 if selfA._parameters["EstimationOf"] == "Parameters":
2225 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2226 selfA.StoredVariables["Analysis"].store( XaMin )
2227 if selfA._toStore("APosterioriCovariance"):
2228 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2229 if selfA._toStore("BMA"):
2230 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2234 # ==============================================================================
2235 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2242 Hm = HO["Direct"].appliedTo
2247 Xini = selfA._parameters["InitializationPoint"]
2249 HXb = numpy.ravel( Hm( Xb ) ).reshape((-1,1))
2250 Innovation = Y - HXb
2257 Xr = Xini.reshape((-1,1))
2258 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
2262 Ht = HO["Tangent"].asMatrix(Xr)
2263 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
2265 # Définition de la fonction-coût
2266 # ------------------------------
2267 def CostFunction(dx):
2268 _dX = numpy.ravel( dx ).reshape((-1,1))
2269 if selfA._parameters["StoreInternalVariables"] or \
2270 selfA._toStore("CurrentState") or \
2271 selfA._toStore("CurrentOptimum"):
2272 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
2274 _HdX = numpy.ravel( _HdX ).reshape((-1,1))
2275 _dInnovation = Innovation - _HdX
2276 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2277 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2278 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
2279 if selfA._toStore("InnovationAtCurrentState"):
2280 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
2282 Jb = float( 0.5 * _dX.T * (BI * _dX) )
2283 Jo = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
2286 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2287 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2288 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2289 selfA.StoredVariables["CostFunctionJ" ].store( J )
2290 if selfA._toStore("IndexOfOptimum") or \
2291 selfA._toStore("CurrentOptimum") or \
2292 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2293 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2294 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2295 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2296 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2297 if selfA._toStore("IndexOfOptimum"):
2298 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2299 if selfA._toStore("CurrentOptimum"):
2300 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2301 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2302 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2303 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2304 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2305 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2306 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2307 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2308 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2311 def GradientOfCostFunction(dx):
2312 _dX = numpy.ravel( dx )
2314 _HdX = numpy.ravel( _HdX ).reshape((-1,1))
2315 _dInnovation = Innovation - _HdX
2317 GradJo = - Ht.T @ (RI * _dInnovation)
2318 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2321 # Minimisation de la fonctionnelle
2322 # --------------------------------
2323 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2325 if selfA._parameters["Minimizer"] == "LBFGSB":
2326 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
2327 if "0.19" <= scipy.version.version <= "1.1.0":
2328 import lbfgsbhlt as optimiseur
2330 import scipy.optimize as optimiseur
2331 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2332 func = CostFunction,
2333 x0 = numpy.zeros(Xini.size),
2334 fprime = GradientOfCostFunction,
2336 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2337 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2338 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2339 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2340 iprint = selfA._parameters["optiprint"],
2342 nfeval = Informations['funcalls']
2343 rc = Informations['warnflag']
2344 elif selfA._parameters["Minimizer"] == "TNC":
2345 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2346 func = CostFunction,
2347 x0 = numpy.zeros(Xini.size),
2348 fprime = GradientOfCostFunction,
2350 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2351 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2352 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2353 ftol = selfA._parameters["CostDecrementTolerance"],
2354 messages = selfA._parameters["optmessages"],
2356 elif selfA._parameters["Minimizer"] == "CG":
2357 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2359 x0 = numpy.zeros(Xini.size),
2360 fprime = GradientOfCostFunction,
2362 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2363 gtol = selfA._parameters["GradientNormTolerance"],
2364 disp = selfA._parameters["optdisp"],
2367 elif selfA._parameters["Minimizer"] == "NCG":
2368 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2370 x0 = numpy.zeros(Xini.size),
2371 fprime = GradientOfCostFunction,
2373 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2374 avextol = selfA._parameters["CostDecrementTolerance"],
2375 disp = selfA._parameters["optdisp"],
2378 elif selfA._parameters["Minimizer"] == "BFGS":
2379 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2381 x0 = numpy.zeros(Xini.size),
2382 fprime = GradientOfCostFunction,
2384 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2385 gtol = selfA._parameters["GradientNormTolerance"],
2386 disp = selfA._parameters["optdisp"],
2390 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2392 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2393 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2395 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2396 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2398 Minimum = Xb + Minimum.reshape((-1,1))
2401 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2402 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2405 #--------------------------
2407 selfA.StoredVariables["Analysis"].store( Xa )
2409 if selfA._toStore("OMA") or \
2410 selfA._toStore("SigmaObs2") or \
2411 selfA._toStore("SimulationQuantiles") or \
2412 selfA._toStore("SimulatedObservationAtOptimum"):
2413 if selfA._toStore("SimulatedObservationAtCurrentState"):
2414 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2415 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2416 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2420 if selfA._toStore("APosterioriCovariance") or \
2421 selfA._toStore("SimulationQuantiles") or \
2422 selfA._toStore("JacobianMatrixAtOptimum") or \
2423 selfA._toStore("KalmanGainAtOptimum"):
2424 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2425 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2426 if selfA._toStore("APosterioriCovariance") or \
2427 selfA._toStore("SimulationQuantiles") or \
2428 selfA._toStore("KalmanGainAtOptimum"):
2429 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2430 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2431 if selfA._toStore("APosterioriCovariance") or \
2432 selfA._toStore("SimulationQuantiles"):
2433 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
2434 if selfA._toStore("APosterioriCovariance"):
2435 selfA.StoredVariables["APosterioriCovariance"].store( A )
2436 if selfA._toStore("JacobianMatrixAtOptimum"):
2437 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2438 if selfA._toStore("KalmanGainAtOptimum"):
2439 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2440 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2441 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2443 # Calculs et/ou stockages supplémentaires
2444 # ---------------------------------------
2445 if selfA._toStore("Innovation") or \
2446 selfA._toStore("SigmaObs2") or \
2447 selfA._toStore("MahalanobisConsistency") or \
2448 selfA._toStore("OMB"):
2450 if selfA._toStore("Innovation"):
2451 selfA.StoredVariables["Innovation"].store( d )
2452 if selfA._toStore("BMA"):
2453 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2454 if selfA._toStore("OMA"):
2455 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2456 if selfA._toStore("OMB"):
2457 selfA.StoredVariables["OMB"].store( d )
2458 if selfA._toStore("SigmaObs2"):
2459 TraceR = R.trace(Y.size)
2460 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
2461 if selfA._toStore("MahalanobisConsistency"):
2462 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2463 if selfA._toStore("SimulationQuantiles"):
2464 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2465 if selfA._toStore("SimulatedObservationAtBackground"):
2466 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
2467 if selfA._toStore("SimulatedObservationAtOptimum"):
2468 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
2472 # ==============================================================================
2473 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
2474 VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
2478 Maximum Likelihood Ensemble Filter
2480 if selfA._parameters["EstimationOf"] == "Parameters":
2481 selfA._parameters["StoreInternalVariables"] = True
2484 H = HO["Direct"].appliedControledFormTo
2486 if selfA._parameters["EstimationOf"] == "State":
2487 M = EM["Direct"].appliedControledFormTo
2489 if CM is not None and "Tangent" in CM and U is not None:
2490 Cm = CM["Tangent"].asMatrix(Xb)
2494 # Durée d'observation et tailles
2495 if hasattr(Y,"stepnumber"):
2496 duration = Y.stepnumber()
2497 __p = numpy.cumprod(Y.shape())[-1]
2500 __p = numpy.array(Y).size
2502 # Précalcul des inversions de B et R
2503 if selfA._parameters["StoreInternalVariables"] \
2504 or selfA._toStore("CostFunctionJ") \
2505 or selfA._toStore("CostFunctionJb") \
2506 or selfA._toStore("CostFunctionJo") \
2507 or selfA._toStore("CurrentOptimum") \
2508 or selfA._toStore("APosterioriCovariance"):
2513 __m = selfA._parameters["NumberOfMembers"]
2514 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
2515 previousJMinimum = numpy.finfo(float).max
2517 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2518 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2519 selfA.StoredVariables["Analysis"].store( Xb )
2520 if selfA._toStore("APosterioriCovariance"):
2521 if hasattr(B,"asfullmatrix"):
2522 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2524 selfA.StoredVariables["APosterioriCovariance"].store( B )
2525 selfA._setInternalState("seed", numpy.random.get_state())
2526 elif selfA._parameters["nextStep"]:
2527 Xn = selfA._getInternalState("Xn")
2529 for step in range(duration-1):
2530 numpy.random.set_state(selfA._getInternalState("seed"))
2531 if hasattr(Y,"store"):
2532 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2534 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2537 if hasattr(U,"store") and len(U)>1:
2538 Un = numpy.ravel( U[step] ).reshape((-1,1))
2539 elif hasattr(U,"store") and len(U)==1:
2540 Un = numpy.ravel( U[0] ).reshape((-1,1))
2542 Un = numpy.ravel( U ).reshape((-1,1))
2546 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2547 Xn = CovarianceInflation( Xn,
2548 selfA._parameters["InflationType"],
2549 selfA._parameters["InflationFactor"],
2552 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2553 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2555 returnSerieAsArrayMatrix = True )
2556 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2557 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2558 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2559 Xn_predicted = Xn_predicted + Cm @ Un
2560 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2561 # --- > Par principe, M = Id, Q = 0
2562 Xn_predicted = EMX = Xn
2564 #--------------------------
2565 if VariantM == "MLEF13":
2566 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2567 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2568 Ua = numpy.identity(__m)
2572 Ta = numpy.identity(__m)
2573 vw = numpy.zeros(__m)
2574 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2575 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2578 E1 = vx1 + _epsilon * EaX
2580 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2582 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2584 returnSerieAsArrayMatrix = True )
2585 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2588 EaY = (HE2 - vy2) / _epsilon
2590 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2592 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2593 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2594 Deltaw = - numpy.linalg.solve(mH,GradJ)
2599 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2604 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2606 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2607 #--------------------------
2609 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2611 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2612 Xn = CovarianceInflation( Xn,
2613 selfA._parameters["InflationType"],
2614 selfA._parameters["InflationFactor"],
2617 if Hybrid == "E3DVAR":
2618 betaf = selfA._parameters["HybridCovarianceEquilibrium"]
2619 Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
2621 Xa = EnsembleMean( Xn )
2622 #--------------------------
2623 selfA._setInternalState("Xn", Xn)
2624 selfA._setInternalState("seed", numpy.random.get_state())
2625 #--------------------------
2627 if selfA._parameters["StoreInternalVariables"] \
2628 or selfA._toStore("CostFunctionJ") \
2629 or selfA._toStore("CostFunctionJb") \
2630 or selfA._toStore("CostFunctionJo") \
2631 or selfA._toStore("APosterioriCovariance") \
2632 or selfA._toStore("InnovationAtCurrentAnalysis") \
2633 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2634 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2635 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2636 _Innovation = Ynpu - _HXa
2638 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2639 # ---> avec analysis
2640 selfA.StoredVariables["Analysis"].store( Xa )
2641 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2642 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2643 if selfA._toStore("InnovationAtCurrentAnalysis"):
2644 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2645 # ---> avec current state
2646 if selfA._parameters["StoreInternalVariables"] \
2647 or selfA._toStore("CurrentState"):
2648 selfA.StoredVariables["CurrentState"].store( Xn )
2649 if selfA._toStore("ForecastState"):
2650 selfA.StoredVariables["ForecastState"].store( EMX )
2651 if selfA._toStore("ForecastCovariance"):
2652 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2653 if selfA._toStore("BMA"):
2654 selfA.StoredVariables["BMA"].store( EMX - Xa )
2655 if selfA._toStore("InnovationAtCurrentState"):
2656 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2657 if selfA._toStore("SimulatedObservationAtCurrentState") \
2658 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2659 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2661 if selfA._parameters["StoreInternalVariables"] \
2662 or selfA._toStore("CostFunctionJ") \
2663 or selfA._toStore("CostFunctionJb") \
2664 or selfA._toStore("CostFunctionJo") \
2665 or selfA._toStore("CurrentOptimum") \
2666 or selfA._toStore("APosterioriCovariance"):
2667 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2668 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2670 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2671 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2672 selfA.StoredVariables["CostFunctionJ" ].store( J )
2674 if selfA._toStore("IndexOfOptimum") \
2675 or selfA._toStore("CurrentOptimum") \
2676 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2677 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2678 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2679 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2680 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2681 if selfA._toStore("IndexOfOptimum"):
2682 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2683 if selfA._toStore("CurrentOptimum"):
2684 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2685 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2686 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2687 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2688 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2689 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2690 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2691 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2692 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2693 if selfA._toStore("APosterioriCovariance"):
2694 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2695 if selfA._parameters["EstimationOf"] == "Parameters" \
2696 and J < previousJMinimum:
2697 previousJMinimum = J
2699 if selfA._toStore("APosterioriCovariance"):
2700 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2701 # ---> Pour les smoothers
2702 if selfA._toStore("CurrentEnsembleState"):
2703 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2705 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2706 # ----------------------------------------------------------------------
2707 if selfA._parameters["EstimationOf"] == "Parameters":
2708 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2709 selfA.StoredVariables["Analysis"].store( XaMin )
2710 if selfA._toStore("APosterioriCovariance"):
2711 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2712 if selfA._toStore("BMA"):
2713 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2717 # ==============================================================================
2729 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2730 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2731 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2734 # Recuperation des donnees et informations initiales
2735 # --------------------------------------------------
2736 variables = numpy.ravel( x0 )
2737 mesures = numpy.ravel( y )
2738 increment = sys.float_info[0]
2741 quantile = float(quantile)
2743 # Calcul des parametres du MM
2744 # ---------------------------
2745 tn = float(toler) / n
2746 e0 = -tn / math.log(tn)
2747 epsilon = (e0-tn)/(1+math.log(e0))
2749 # Calculs d'initialisation
2750 # ------------------------
2751 residus = mesures - numpy.ravel( func( variables ) )
2752 poids = 1./(epsilon+numpy.abs(residus))
2753 veps = 1. - 2. * quantile - residus * poids
2754 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2757 # Recherche iterative
2758 # -------------------
2759 while (increment > toler) and (iteration < maxfun) :
2762 Derivees = numpy.array(fprime(variables))
2763 Derivees = Derivees.reshape(n,p) # ADAO & check shape
2764 DeriveesT = Derivees.transpose()
2765 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2766 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
2767 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2769 variables = variables + step
2770 if bounds is not None:
2771 # Attention : boucle infinie à éviter si un intervalle est trop petit
2772 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2774 variables = variables - step
2775 residus = mesures - numpy.ravel( func(variables) )
2776 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2778 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2780 variables = variables - step
2781 residus = mesures - numpy.ravel( func(variables) )
2782 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2784 increment = lastsurrogate-surrogate
2785 poids = 1./(epsilon+numpy.abs(residus))
2786 veps = 1. - 2. * quantile - residus * poids
2787 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2791 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2793 return variables, Ecart, [n,p,iteration,increment,0]
2795 # ==============================================================================
2796 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2798 3DVAR multi-pas et multi-méthodes
2803 if selfA._parameters["EstimationOf"] == "State":
2804 M = EM["Direct"].appliedControledFormTo
2805 if CM is not None and "Tangent" in CM and U is not None:
2806 Cm = CM["Tangent"].asMatrix(Xb)
2810 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2811 Xn = numpy.ravel(Xb).reshape((-1,1))
2812 selfA.StoredVariables["Analysis"].store( Xn )
2813 if selfA._toStore("APosterioriCovariance"):
2814 if hasattr(B,"asfullmatrix"):
2815 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2817 selfA.StoredVariables["APosterioriCovariance"].store( B )
2818 if selfA._toStore("ForecastState"):
2819 selfA.StoredVariables["ForecastState"].store( Xn )
2820 elif selfA._parameters["nextStep"]:
2821 Xn = selfA._getInternalState("Xn")
2823 Xn = numpy.ravel(Xb).reshape((-1,1))
2825 if hasattr(Y,"stepnumber"):
2826 duration = Y.stepnumber()
2831 for step in range(duration-1):
2832 if hasattr(Y,"store"):
2833 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2835 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2838 if hasattr(U,"store") and len(U)>1:
2839 Un = numpy.ravel( U[step] ).reshape((-1,1))
2840 elif hasattr(U,"store") and len(U)==1:
2841 Un = numpy.ravel( U[0] ).reshape((-1,1))
2843 Un = numpy.ravel( U ).reshape((-1,1))
2847 if selfA._parameters["EstimationOf"] == "State": # Forecast
2848 Xn_predicted = M( (Xn, Un) )
2849 if selfA._toStore("ForecastState"):
2850 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2851 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2852 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2853 Xn_predicted = Xn_predicted + Cm @ Un
2854 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2855 # --- > Par principe, M = Id, Q = 0
2857 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2859 oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
2861 Xn = selfA.StoredVariables["Analysis"][-1]
2862 #--------------------------
2863 selfA._setInternalState("Xn", Xn)
2867 # ==============================================================================
2868 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2875 Hm = HO["Direct"].appliedTo
2877 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2878 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2881 HXb = numpy.ravel( HXb ).reshape((-1,1))
2882 if Y.size != HXb.size:
2883 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2884 if max(Y.shape) != max(HXb.shape):
2885 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2887 if selfA._toStore("JacobianMatrixAtBackground"):
2888 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2889 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2890 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2892 Ht = HO["Tangent"].asMatrix(Xb)
2894 HBHTpR = R + Ht * BHT
2895 Innovation = Y - HXb
2897 Xini = numpy.zeros(Xb.shape)
2899 # Définition de la fonction-coût
2900 # ------------------------------
2901 def CostFunction(w):
2902 _W = w.reshape((-1,1))
2903 if selfA._parameters["StoreInternalVariables"] or \
2904 selfA._toStore("CurrentState") or \
2905 selfA._toStore("CurrentOptimum"):
2906 selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
2907 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2908 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2909 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
2910 if selfA._toStore("InnovationAtCurrentState"):
2911 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2913 Jb = float( 0.5 * _W.T @ (HBHTpR @ _W) )
2914 Jo = float( - _W.T @ Innovation )
2917 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2918 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2919 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2920 selfA.StoredVariables["CostFunctionJ" ].store( J )
2921 if selfA._toStore("IndexOfOptimum") or \
2922 selfA._toStore("CurrentOptimum") or \
2923 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2924 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2925 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2926 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2927 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2928 if selfA._toStore("IndexOfOptimum"):
2929 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2930 if selfA._toStore("CurrentOptimum"):
2931 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2932 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2933 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2934 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2935 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2936 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2937 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2938 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2939 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2942 def GradientOfCostFunction(w):
2943 _W = w.reshape((-1,1))
2944 GradJb = HBHTpR @ _W
2945 GradJo = - Innovation
2946 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2949 # Minimisation de la fonctionnelle
2950 # --------------------------------
2951 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2953 if selfA._parameters["Minimizer"] == "LBFGSB":
2954 if "0.19" <= scipy.version.version <= "1.1.0":
2955 import lbfgsbhlt as optimiseur
2957 import scipy.optimize as optimiseur
2958 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2959 func = CostFunction,
2961 fprime = GradientOfCostFunction,
2963 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2964 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2965 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2966 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2967 iprint = selfA._parameters["optiprint"],
2969 nfeval = Informations['funcalls']
2970 rc = Informations['warnflag']
2971 elif selfA._parameters["Minimizer"] == "TNC":
2972 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2973 func = CostFunction,
2975 fprime = GradientOfCostFunction,
2977 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2978 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2979 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2980 ftol = selfA._parameters["CostDecrementTolerance"],
2981 messages = selfA._parameters["optmessages"],
2983 elif selfA._parameters["Minimizer"] == "CG":
2984 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2987 fprime = GradientOfCostFunction,
2989 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2990 gtol = selfA._parameters["GradientNormTolerance"],
2991 disp = selfA._parameters["optdisp"],
2994 elif selfA._parameters["Minimizer"] == "NCG":
2995 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2998 fprime = GradientOfCostFunction,
3000 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3001 avextol = selfA._parameters["CostDecrementTolerance"],
3002 disp = selfA._parameters["optdisp"],
3005 elif selfA._parameters["Minimizer"] == "BFGS":
3006 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3009 fprime = GradientOfCostFunction,
3011 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3012 gtol = selfA._parameters["GradientNormTolerance"],
3013 disp = selfA._parameters["optdisp"],
3017 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3019 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3020 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3022 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3023 # ----------------------------------------------------------------
3024 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3025 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3027 Minimum = Xb + BHT @ Minimum.reshape((-1,1))
3030 #--------------------------
3032 selfA.StoredVariables["Analysis"].store( Xa )
3034 if selfA._toStore("OMA") or \
3035 selfA._toStore("SigmaObs2") or \
3036 selfA._toStore("SimulationQuantiles") or \
3037 selfA._toStore("SimulatedObservationAtOptimum"):
3038 if selfA._toStore("SimulatedObservationAtCurrentState"):
3039 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3040 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3041 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3045 if selfA._toStore("APosterioriCovariance") or \
3046 selfA._toStore("SimulationQuantiles") or \
3047 selfA._toStore("JacobianMatrixAtOptimum") or \
3048 selfA._toStore("KalmanGainAtOptimum"):
3049 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3050 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3051 if selfA._toStore("APosterioriCovariance") or \
3052 selfA._toStore("SimulationQuantiles") or \
3053 selfA._toStore("KalmanGainAtOptimum"):
3054 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3055 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3056 if selfA._toStore("APosterioriCovariance") or \
3057 selfA._toStore("SimulationQuantiles"):
3060 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3061 if selfA._toStore("APosterioriCovariance"):
3062 selfA.StoredVariables["APosterioriCovariance"].store( A )
3063 if selfA._toStore("JacobianMatrixAtOptimum"):
3064 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3065 if selfA._toStore("KalmanGainAtOptimum"):
3066 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3067 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3068 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3070 # Calculs et/ou stockages supplémentaires
3071 # ---------------------------------------
3072 if selfA._toStore("Innovation") or \
3073 selfA._toStore("SigmaObs2") or \
3074 selfA._toStore("MahalanobisConsistency") or \
3075 selfA._toStore("OMB"):
3077 if selfA._toStore("Innovation"):
3078 selfA.StoredVariables["Innovation"].store( d )
3079 if selfA._toStore("BMA"):
3080 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3081 if selfA._toStore("OMA"):
3082 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3083 if selfA._toStore("OMB"):
3084 selfA.StoredVariables["OMB"].store( d )
3085 if selfA._toStore("SigmaObs2"):
3086 TraceR = R.trace(Y.size)
3087 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3088 if selfA._toStore("MahalanobisConsistency"):
3089 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3090 if selfA._toStore("SimulationQuantiles"):
3091 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3092 if selfA._toStore("SimulatedObservationAtBackground"):
3093 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3094 if selfA._toStore("SimulatedObservationAtOptimum"):
3095 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3099 # ==============================================================================
3100 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
3101 VariantM="KalmanFilterFormula16",
3107 if selfA._parameters["EstimationOf"] == "Parameters":
3108 selfA._parameters["StoreInternalVariables"] = True
3111 H = HO["Direct"].appliedControledFormTo
3113 if selfA._parameters["EstimationOf"] == "State":
3114 M = EM["Direct"].appliedControledFormTo
3116 if CM is not None and "Tangent" in CM and U is not None:
3117 Cm = CM["Tangent"].asMatrix(Xb)
3121 # Durée d'observation et tailles
3122 if hasattr(Y,"stepnumber"):
3123 duration = Y.stepnumber()
3124 __p = numpy.cumprod(Y.shape())[-1]
3127 __p = numpy.array(Y).size
3129 # Précalcul des inversions de B et R
3130 if selfA._parameters["StoreInternalVariables"] \
3131 or selfA._toStore("CostFunctionJ") \
3132 or selfA._toStore("CostFunctionJb") \
3133 or selfA._toStore("CostFunctionJo") \
3134 or selfA._toStore("CurrentOptimum") \
3135 or selfA._toStore("APosterioriCovariance"):
3140 __m = selfA._parameters["NumberOfMembers"]
3141 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
3142 previousJMinimum = numpy.finfo(float).max
3144 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
3147 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3148 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
3150 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
3151 selfA.StoredVariables["Analysis"].store( Xb )
3152 if selfA._toStore("APosterioriCovariance"):
3153 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3154 selfA._setInternalState("seed", numpy.random.get_state())
3155 elif selfA._parameters["nextStep"]:
3156 Xn = selfA._getInternalState("Xn")
3158 for step in range(duration-1):
3159 numpy.random.set_state(selfA._getInternalState("seed"))
3160 if hasattr(Y,"store"):
3161 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3163 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3166 if hasattr(U,"store") and len(U)>1:
3167 Un = numpy.ravel( U[step] ).reshape((-1,1))
3168 elif hasattr(U,"store") and len(U)==1:
3169 Un = numpy.ravel( U[0] ).reshape((-1,1))
3171 Un = numpy.ravel( U ).reshape((-1,1))
3175 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
3176 Xn = CovarianceInflation( Xn,
3177 selfA._parameters["InflationType"],
3178 selfA._parameters["InflationFactor"],
3181 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3182 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
3184 returnSerieAsArrayMatrix = True )
3185 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
3186 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3188 returnSerieAsArrayMatrix = True )
3189 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3190 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3191 Xn_predicted = Xn_predicted + Cm @ Un
3192 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3193 # --- > Par principe, M = Id, Q = 0
3194 Xn_predicted = EMX = Xn
3195 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3197 returnSerieAsArrayMatrix = True )
3199 # Mean of forecast and observation of forecast
3200 Xfm = EnsembleMean( Xn_predicted )
3201 Hfm = EnsembleMean( HX_predicted )
3203 #--------------------------
3204 if VariantM == "KalmanFilterFormula05":
3205 PfHT, HPfHT = 0., 0.
3206 for i in range(__m):
3207 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
3208 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
3209 PfHT += Exfi * Eyfi.T
3210 HPfHT += Eyfi * Eyfi.T
3211 PfHT = (1./(__m-1)) * PfHT
3212 HPfHT = (1./(__m-1)) * HPfHT
3213 Kn = PfHT * ( R + HPfHT ).I
3216 for i in range(__m):
3217 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
3218 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
3219 #--------------------------
3220 elif VariantM == "KalmanFilterFormula16":
3221 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
3222 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3224 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
3225 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
3227 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
3229 for i in range(__m):
3230 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
3231 #--------------------------
3233 raise ValueError("VariantM has to be chosen in the authorized methods list.")
3235 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3236 Xn = CovarianceInflation( Xn,
3237 selfA._parameters["InflationType"],
3238 selfA._parameters["InflationFactor"],
3241 if Hybrid == "E3DVAR":
3242 betaf = selfA._parameters["HybridCovarianceEquilibrium"]
3243 Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
3245 Xa = EnsembleMean( Xn )
3246 #--------------------------
3247 selfA._setInternalState("Xn", Xn)
3248 selfA._setInternalState("seed", numpy.random.get_state())
3249 #--------------------------
3251 if selfA._parameters["StoreInternalVariables"] \
3252 or selfA._toStore("CostFunctionJ") \
3253 or selfA._toStore("CostFunctionJb") \
3254 or selfA._toStore("CostFunctionJo") \
3255 or selfA._toStore("APosterioriCovariance") \
3256 or selfA._toStore("InnovationAtCurrentAnalysis") \
3257 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3258 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3259 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
3260 _Innovation = Ynpu - _HXa
3262 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3263 # ---> avec analysis
3264 selfA.StoredVariables["Analysis"].store( Xa )
3265 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3266 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3267 if selfA._toStore("InnovationAtCurrentAnalysis"):
3268 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3269 # ---> avec current state
3270 if selfA._parameters["StoreInternalVariables"] \
3271 or selfA._toStore("CurrentState"):
3272 selfA.StoredVariables["CurrentState"].store( Xn )
3273 if selfA._toStore("ForecastState"):
3274 selfA.StoredVariables["ForecastState"].store( EMX )
3275 if selfA._toStore("ForecastCovariance"):
3276 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
3277 if selfA._toStore("BMA"):
3278 selfA.StoredVariables["BMA"].store( EMX - Xa )
3279 if selfA._toStore("InnovationAtCurrentState"):
3280 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
3281 if selfA._toStore("SimulatedObservationAtCurrentState") \
3282 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3283 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3285 if selfA._parameters["StoreInternalVariables"] \
3286 or selfA._toStore("CostFunctionJ") \
3287 or selfA._toStore("CostFunctionJb") \
3288 or selfA._toStore("CostFunctionJo") \
3289 or selfA._toStore("CurrentOptimum") \
3290 or selfA._toStore("APosterioriCovariance"):
3291 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3292 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3294 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3295 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3296 selfA.StoredVariables["CostFunctionJ" ].store( J )
3298 if selfA._toStore("IndexOfOptimum") \
3299 or selfA._toStore("CurrentOptimum") \
3300 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3301 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3302 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3303 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3304 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3305 if selfA._toStore("IndexOfOptimum"):
3306 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3307 if selfA._toStore("CurrentOptimum"):
3308 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3309 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3310 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3311 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3312 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3313 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3314 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3315 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3316 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3317 if selfA._toStore("APosterioriCovariance"):
3318 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
3319 if selfA._parameters["EstimationOf"] == "Parameters" \
3320 and J < previousJMinimum:
3321 previousJMinimum = J
3323 if selfA._toStore("APosterioriCovariance"):
3324 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3325 # ---> Pour les smoothers
3326 if selfA._toStore("CurrentEnsembleState"):
3327 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3329 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3330 # ----------------------------------------------------------------------
3331 if selfA._parameters["EstimationOf"] == "Parameters":
3332 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3333 selfA.StoredVariables["Analysis"].store( XaMin )
3334 if selfA._toStore("APosterioriCovariance"):
3335 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3336 if selfA._toStore("BMA"):
3337 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3341 # ==============================================================================
3342 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3349 Hm = HO["Direct"].appliedTo
3350 Ha = HO["Adjoint"].appliedInXTo
3352 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3353 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3356 HXb = HXb.reshape((-1,1))
3357 if Y.size != HXb.size:
3358 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3359 if max(Y.shape) != max(HXb.shape):
3360 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3362 if selfA._toStore("JacobianMatrixAtBackground"):
3363 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3364 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3365 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3370 Xini = selfA._parameters["InitializationPoint"]
3372 # Définition de la fonction-coût
3373 # ------------------------------
3374 def CostFunction(x):
3375 _X = numpy.ravel( x ).reshape((-1,1))
3376 if selfA._parameters["StoreInternalVariables"] or \
3377 selfA._toStore("CurrentState") or \
3378 selfA._toStore("CurrentOptimum"):
3379 selfA.StoredVariables["CurrentState"].store( _X )
3380 _HX = Hm( _X ).reshape((-1,1))
3381 _Innovation = Y - _HX
3382 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3383 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3384 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3385 if selfA._toStore("InnovationAtCurrentState"):
3386 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3388 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3389 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3392 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3393 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3394 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3395 selfA.StoredVariables["CostFunctionJ" ].store( J )
3396 if selfA._toStore("IndexOfOptimum") or \
3397 selfA._toStore("CurrentOptimum") or \
3398 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3399 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3400 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3401 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3402 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3403 if selfA._toStore("IndexOfOptimum"):
3404 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3405 if selfA._toStore("CurrentOptimum"):
3406 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3407 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3408 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3409 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3410 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3411 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3412 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3413 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3414 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3417 def GradientOfCostFunction(x):
3418 _X = x.reshape((-1,1))
3419 _HX = Hm( _X ).reshape((-1,1))
3420 GradJb = BI * (_X - Xb)
3421 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3422 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3425 # Minimisation de la fonctionnelle
3426 # --------------------------------
3427 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3429 if selfA._parameters["Minimizer"] == "LBFGSB":
3430 if "0.19" <= scipy.version.version <= "1.1.0":
3431 import lbfgsbhlt as optimiseur
3433 import scipy.optimize as optimiseur
3434 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3435 func = CostFunction,
3437 fprime = GradientOfCostFunction,
3439 bounds = selfA._parameters["Bounds"],
3440 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3441 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3442 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3443 iprint = selfA._parameters["optiprint"],
3445 nfeval = Informations['funcalls']
3446 rc = Informations['warnflag']
3447 elif selfA._parameters["Minimizer"] == "TNC":
3448 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3449 func = CostFunction,
3451 fprime = GradientOfCostFunction,
3453 bounds = selfA._parameters["Bounds"],
3454 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3455 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3456 ftol = selfA._parameters["CostDecrementTolerance"],
3457 messages = selfA._parameters["optmessages"],
3459 elif selfA._parameters["Minimizer"] == "CG":
3460 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3463 fprime = GradientOfCostFunction,
3465 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3466 gtol = selfA._parameters["GradientNormTolerance"],
3467 disp = selfA._parameters["optdisp"],
3470 elif selfA._parameters["Minimizer"] == "NCG":
3471 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3474 fprime = GradientOfCostFunction,
3476 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3477 avextol = selfA._parameters["CostDecrementTolerance"],
3478 disp = selfA._parameters["optdisp"],
3481 elif selfA._parameters["Minimizer"] == "BFGS":
3482 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3485 fprime = GradientOfCostFunction,
3487 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3488 gtol = selfA._parameters["GradientNormTolerance"],
3489 disp = selfA._parameters["optdisp"],
3493 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3495 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3496 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3498 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3499 # ----------------------------------------------------------------
3500 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3501 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3504 #--------------------------
3506 selfA.StoredVariables["Analysis"].store( Xa )
3508 if selfA._toStore("OMA") or \
3509 selfA._toStore("SigmaObs2") or \
3510 selfA._toStore("SimulationQuantiles") or \
3511 selfA._toStore("SimulatedObservationAtOptimum"):
3512 if selfA._toStore("SimulatedObservationAtCurrentState"):
3513 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3514 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3515 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3519 if selfA._toStore("APosterioriCovariance") or \
3520 selfA._toStore("SimulationQuantiles") or \
3521 selfA._toStore("JacobianMatrixAtOptimum") or \
3522 selfA._toStore("KalmanGainAtOptimum"):
3523 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3524 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3525 if selfA._toStore("APosterioriCovariance") or \
3526 selfA._toStore("SimulationQuantiles") or \
3527 selfA._toStore("KalmanGainAtOptimum"):
3528 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3529 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3530 if selfA._toStore("APosterioriCovariance") or \
3531 selfA._toStore("SimulationQuantiles"):
3532 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3533 if selfA._toStore("APosterioriCovariance"):
3534 selfA.StoredVariables["APosterioriCovariance"].store( A )
3535 if selfA._toStore("JacobianMatrixAtOptimum"):
3536 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3537 if selfA._toStore("KalmanGainAtOptimum"):
3538 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3539 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3540 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3542 # Calculs et/ou stockages supplémentaires
3543 # ---------------------------------------
3544 if selfA._toStore("Innovation") or \
3545 selfA._toStore("SigmaObs2") or \
3546 selfA._toStore("MahalanobisConsistency") or \
3547 selfA._toStore("OMB"):
3549 if selfA._toStore("Innovation"):
3550 selfA.StoredVariables["Innovation"].store( d )
3551 if selfA._toStore("BMA"):
3552 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3553 if selfA._toStore("OMA"):
3554 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3555 if selfA._toStore("OMB"):
3556 selfA.StoredVariables["OMB"].store( d )
3557 if selfA._toStore("SigmaObs2"):
3558 TraceR = R.trace(Y.size)
3559 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3560 if selfA._toStore("MahalanobisConsistency"):
3561 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3562 if selfA._toStore("SimulationQuantiles"):
3563 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3564 if selfA._toStore("SimulatedObservationAtBackground"):
3565 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3566 if selfA._toStore("SimulatedObservationAtOptimum"):
3567 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3571 # ==============================================================================
3572 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3581 Hm = HO["Direct"].appliedControledFormTo
3582 Mm = EM["Direct"].appliedControledFormTo
3584 if CM is not None and "Tangent" in CM and U is not None:
3585 Cm = CM["Tangent"].asMatrix(Xb)
3591 if hasattr(U,"store") and 1<=_step<len(U) :
3592 _Un = numpy.ravel( U[_step] ).reshape((-1,1))
3593 elif hasattr(U,"store") and len(U)==1:
3594 _Un = numpy.ravel( U[0] ).reshape((-1,1))
3596 _Un = numpy.ravel( U ).reshape((-1,1))
3601 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3602 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3603 _CmUn = (_Cm @ _un).reshape((-1,1))
3608 # Remarque : les observations sont exploitées à partir du pas de temps
3609 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3610 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3611 # avec l'observation du pas 1.
3613 # Nombre de pas identique au nombre de pas d'observations
3614 if hasattr(Y,"stepnumber"):
3615 duration = Y.stepnumber()
3619 # Précalcul des inversions de B et R
3623 # Point de démarrage de l'optimisation
3624 Xini = selfA._parameters["InitializationPoint"]
3626 # Définition de la fonction-coût
3627 # ------------------------------
3628 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3629 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
3630 def CostFunction(x):
3631 _X = numpy.ravel( x ).reshape((-1,1))
3632 if selfA._parameters["StoreInternalVariables"] or \
3633 selfA._toStore("CurrentState") or \
3634 selfA._toStore("CurrentOptimum"):
3635 selfA.StoredVariables["CurrentState"].store( _X )
3636 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3637 selfA.DirectCalculation = [None,]
3638 selfA.DirectInnovation = [None,]
3641 for step in range(0,duration-1):
3642 if hasattr(Y,"store"):
3643 _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
3645 _Ynpu = numpy.ravel( Y ).reshape((-1,1))
3649 if selfA._parameters["EstimationOf"] == "State":
3650 _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
3651 elif selfA._parameters["EstimationOf"] == "Parameters":
3654 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3655 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3657 # Etape de différence aux observations
3658 if selfA._parameters["EstimationOf"] == "State":
3659 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
3660 elif selfA._parameters["EstimationOf"] == "Parameters":
3661 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
3663 # Stockage de l'état
3664 selfA.DirectCalculation.append( _Xn )
3665 selfA.DirectInnovation.append( _YmHMX )
3667 # Ajout dans la fonctionnelle d'observation
3668 Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
3671 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3672 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3673 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3674 selfA.StoredVariables["CostFunctionJ" ].store( J )
3675 if selfA._toStore("IndexOfOptimum") or \
3676 selfA._toStore("CurrentOptimum") or \
3677 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3678 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3679 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3680 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3681 if selfA._toStore("IndexOfOptimum"):
3682 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3683 if selfA._toStore("CurrentOptimum"):
3684 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3685 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3686 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3687 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3688 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3689 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3690 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3693 def GradientOfCostFunction(x):
3694 _X = numpy.ravel( x ).reshape((-1,1))
3695 GradJb = BI * (_X - Xb)
3697 for step in range(duration-1,0,-1):
3698 # Étape de récupération du dernier stockage de l'évolution
3699 _Xn = selfA.DirectCalculation.pop()
3700 # Étape de récupération du dernier stockage de l'innovation
3701 _YmHMX = selfA.DirectInnovation.pop()
3702 # Calcul des adjoints
3703 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3704 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3705 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3706 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3707 # Calcul du gradient par état adjoint
3708 GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3709 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3710 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3713 # Minimisation de la fonctionnelle
3714 # --------------------------------
3715 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3717 if selfA._parameters["Minimizer"] == "LBFGSB":
3718 if "0.19" <= scipy.version.version <= "1.1.0":
3719 import lbfgsbhlt as optimiseur
3721 import scipy.optimize as optimiseur
3722 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3723 func = CostFunction,
3725 fprime = GradientOfCostFunction,
3727 bounds = selfA._parameters["Bounds"],
3728 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3729 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3730 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3731 iprint = selfA._parameters["optiprint"],
3733 nfeval = Informations['funcalls']
3734 rc = Informations['warnflag']
3735 elif selfA._parameters["Minimizer"] == "TNC":
3736 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3737 func = CostFunction,
3739 fprime = GradientOfCostFunction,
3741 bounds = selfA._parameters["Bounds"],
3742 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3743 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3744 ftol = selfA._parameters["CostDecrementTolerance"],
3745 messages = selfA._parameters["optmessages"],
3747 elif selfA._parameters["Minimizer"] == "CG":
3748 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3751 fprime = GradientOfCostFunction,
3753 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3754 gtol = selfA._parameters["GradientNormTolerance"],
3755 disp = selfA._parameters["optdisp"],
3758 elif selfA._parameters["Minimizer"] == "NCG":
3759 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3762 fprime = GradientOfCostFunction,
3764 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3765 avextol = selfA._parameters["CostDecrementTolerance"],
3766 disp = selfA._parameters["optdisp"],
3769 elif selfA._parameters["Minimizer"] == "BFGS":
3770 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3773 fprime = GradientOfCostFunction,
3775 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3776 gtol = selfA._parameters["GradientNormTolerance"],
3777 disp = selfA._parameters["optdisp"],
3781 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3783 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3784 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3786 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3787 # ----------------------------------------------------------------
3788 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3789 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3791 # Obtention de l'analyse
3792 # ----------------------
3795 selfA.StoredVariables["Analysis"].store( Xa )
3797 # Calculs et/ou stockages supplémentaires
3798 # ---------------------------------------
3799 if selfA._toStore("BMA"):
3800 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3804 # ==============================================================================
3805 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3807 Standard Kalman Filter
3809 if selfA._parameters["EstimationOf"] == "Parameters":
3810 selfA._parameters["StoreInternalVariables"] = True
3814 Ht = HO["Tangent"].asMatrix(Xb)
3815 Ha = HO["Adjoint"].asMatrix(Xb)
3817 if selfA._parameters["EstimationOf"] == "State":
3818 Mt = EM["Tangent"].asMatrix(Xb)
3819 Ma = EM["Adjoint"].asMatrix(Xb)
3821 if CM is not None and "Tangent" in CM and U is not None:
3822 Cm = CM["Tangent"].asMatrix(Xb)
3826 # Durée d'observation et tailles
3827 if hasattr(Y,"stepnumber"):
3828 duration = Y.stepnumber()
3829 __p = numpy.cumprod(Y.shape())[-1]
3832 __p = numpy.array(Y).size
3834 # Précalcul des inversions de B et R
3835 if selfA._parameters["StoreInternalVariables"] \
3836 or selfA._toStore("CostFunctionJ") \
3837 or selfA._toStore("CostFunctionJb") \
3838 or selfA._toStore("CostFunctionJo") \
3839 or selfA._toStore("CurrentOptimum") \
3840 or selfA._toStore("APosterioriCovariance"):
3845 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
3847 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3850 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3851 selfA.StoredVariables["Analysis"].store( Xb )
3852 if selfA._toStore("APosterioriCovariance"):
3853 if hasattr(B,"asfullmatrix"):
3854 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3856 selfA.StoredVariables["APosterioriCovariance"].store( B )
3857 selfA._setInternalState("seed", numpy.random.get_state())
3858 elif selfA._parameters["nextStep"]:
3859 Xn = selfA._getInternalState("Xn")
3860 Pn = selfA._getInternalState("Pn")
3862 if selfA._parameters["EstimationOf"] == "Parameters":
3864 previousJMinimum = numpy.finfo(float).max
3866 for step in range(duration-1):
3867 if hasattr(Y,"store"):
3868 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3870 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3873 if hasattr(U,"store") and len(U)>1:
3874 Un = numpy.ravel( U[step] ).reshape((-1,1))
3875 elif hasattr(U,"store") and len(U)==1:
3876 Un = numpy.ravel( U[0] ).reshape((-1,1))
3878 Un = numpy.ravel( U ).reshape((-1,1))
3882 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3883 Xn_predicted = Mt @ Xn
3884 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3885 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3886 Xn_predicted = Xn_predicted + Cm @ Un
3887 Pn_predicted = Q + Mt * (Pn * Ma)
3888 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3889 # --- > Par principe, M = Id, Q = 0
3893 if selfA._parameters["EstimationOf"] == "State":
3894 HX_predicted = Ht @ Xn_predicted
3895 _Innovation = Ynpu - HX_predicted
3896 elif selfA._parameters["EstimationOf"] == "Parameters":
3897 HX_predicted = Ht @ Xn_predicted
3898 _Innovation = Ynpu - HX_predicted
3899 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3900 _Innovation = _Innovation - Cm @ Un
3902 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3903 Xn = Xn_predicted + Kn * _Innovation
3904 Pn = Pn_predicted - Kn * Ht * Pn_predicted
3907 #--------------------------
3908 selfA._setInternalState("Xn", Xn)
3909 selfA._setInternalState("Pn", Pn)
3910 #--------------------------
3912 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3913 # ---> avec analysis
3914 selfA.StoredVariables["Analysis"].store( Xa )
3915 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3916 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3917 if selfA._toStore("InnovationAtCurrentAnalysis"):
3918 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3919 # ---> avec current state
3920 if selfA._parameters["StoreInternalVariables"] \
3921 or selfA._toStore("CurrentState"):
3922 selfA.StoredVariables["CurrentState"].store( Xn )
3923 if selfA._toStore("ForecastState"):
3924 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3925 if selfA._toStore("ForecastCovariance"):
3926 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3927 if selfA._toStore("BMA"):
3928 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3929 if selfA._toStore("InnovationAtCurrentState"):
3930 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3931 if selfA._toStore("SimulatedObservationAtCurrentState") \
3932 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3933 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3935 if selfA._parameters["StoreInternalVariables"] \
3936 or selfA._toStore("CostFunctionJ") \
3937 or selfA._toStore("CostFunctionJb") \
3938 or selfA._toStore("CostFunctionJo") \
3939 or selfA._toStore("CurrentOptimum") \
3940 or selfA._toStore("APosterioriCovariance"):
3941 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3942 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3944 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3945 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3946 selfA.StoredVariables["CostFunctionJ" ].store( J )
3948 if selfA._toStore("IndexOfOptimum") \
3949 or selfA._toStore("CurrentOptimum") \
3950 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3951 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3952 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3953 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3954 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3955 if selfA._toStore("IndexOfOptimum"):
3956 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3957 if selfA._toStore("CurrentOptimum"):
3958 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3959 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3960 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3961 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3962 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3963 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3964 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3965 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3966 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3967 if selfA._toStore("APosterioriCovariance"):
3968 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3969 if selfA._parameters["EstimationOf"] == "Parameters" \
3970 and J < previousJMinimum:
3971 previousJMinimum = J
3973 if selfA._toStore("APosterioriCovariance"):
3974 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3976 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3977 # ----------------------------------------------------------------------
3978 if selfA._parameters["EstimationOf"] == "Parameters":
3979 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3980 selfA.StoredVariables["Analysis"].store( XaMin )
3981 if selfA._toStore("APosterioriCovariance"):
3982 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3983 if selfA._toStore("BMA"):
3984 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3988 # ==============================================================================
3989 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3991 Unscented Kalman Filter
3993 if selfA._parameters["EstimationOf"] == "Parameters":
3994 selfA._parameters["StoreInternalVariables"] = True
3997 Alpha = selfA._parameters["Alpha"]
3998 Beta = selfA._parameters["Beta"]
3999 if selfA._parameters["Kappa"] == 0:
4000 if selfA._parameters["EstimationOf"] == "State":
4002 elif selfA._parameters["EstimationOf"] == "Parameters":
4005 Kappa = selfA._parameters["Kappa"]
4006 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
4007 Gamma = math.sqrt( L + Lambda )
4011 for i in range(2*L):
4012 Ww.append( 1. / (2.*(L + Lambda)) )
4014 Wm = numpy.array( Ww )
4015 Wm[0] = Lambda / (L + Lambda)
4016 Wc = numpy.array( Ww )
4017 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
4020 Hm = HO["Direct"].appliedControledFormTo
4022 if selfA._parameters["EstimationOf"] == "State":
4023 Mm = EM["Direct"].appliedControledFormTo
4025 if CM is not None and "Tangent" in CM and U is not None:
4026 Cm = CM["Tangent"].asMatrix(Xb)
4030 # Durée d'observation et tailles
4031 if hasattr(Y,"stepnumber"):
4032 duration = Y.stepnumber()
4033 __p = numpy.cumprod(Y.shape())[-1]
4036 __p = numpy.array(Y).size
4038 # Précalcul des inversions de B et R
4039 if selfA._parameters["StoreInternalVariables"] \
4040 or selfA._toStore("CostFunctionJ") \
4041 or selfA._toStore("CostFunctionJb") \
4042 or selfA._toStore("CostFunctionJo") \
4043 or selfA._toStore("CurrentOptimum") \
4044 or selfA._toStore("APosterioriCovariance"):
4049 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
4051 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
4053 if hasattr(B,"asfullmatrix"):
4054 Pn = B.asfullmatrix(__n)
4057 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4058 selfA.StoredVariables["Analysis"].store( Xb )
4059 if selfA._toStore("APosterioriCovariance"):
4060 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4061 elif selfA._parameters["nextStep"]:
4062 Xn = selfA._getInternalState("Xn")
4063 Pn = selfA._getInternalState("Pn")
4065 if selfA._parameters["EstimationOf"] == "Parameters":
4067 previousJMinimum = numpy.finfo(float).max
4069 for step in range(duration-1):
4070 if hasattr(Y,"store"):
4071 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
4073 Ynpu = numpy.ravel( Y ).reshape((__p,1))
4076 if hasattr(U,"store") and len(U)>1:
4077 Un = numpy.ravel( U[step] ).reshape((-1,1))
4078 elif hasattr(U,"store") and len(U)==1:
4079 Un = numpy.ravel( U[0] ).reshape((-1,1))
4081 Un = numpy.ravel( U ).reshape((-1,1))
4085 Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
4086 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
4087 nbSpts = 2*Xn.size+1
4090 for point in range(nbSpts):
4091 if selfA._parameters["EstimationOf"] == "State":
4092 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
4093 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
4094 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
4095 XEtnnpi = XEtnnpi + Cm @ Un
4096 elif selfA._parameters["EstimationOf"] == "Parameters":
4097 # --- > Par principe, M = Id, Q = 0
4098 XEtnnpi = Xnp[:,point]
4099 XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
4100 XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
4102 Xncm = ( XEtnnp * Wm ).sum(axis=1)
4104 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
4105 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4106 for point in range(nbSpts):
4107 Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
4109 Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
4111 Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
4114 for point in range(nbSpts):
4115 if selfA._parameters["EstimationOf"] == "State":
4116 Ynnpi = Hm( (Xnnp[:,point], None) )
4117 elif selfA._parameters["EstimationOf"] == "Parameters":
4118 Ynnpi = Hm( (Xnnp[:,point], Un) )
4119 Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
4120 Ynnp = numpy.concatenate( Ynnp, axis=1 )
4122 Yncm = ( Ynnp * Wm ).sum(axis=1)
4126 for point in range(nbSpts):
4127 Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4128 Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4130 _Innovation = Ynpu - Yncm.reshape((-1,1))
4131 if selfA._parameters["EstimationOf"] == "Parameters":
4132 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4133 _Innovation = _Innovation - Cm @ Un
4136 Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
4137 Pn = Pnm - Kn * Pyyn * Kn.T
4140 #--------------------------
4141 selfA._setInternalState("Xn", Xn)
4142 selfA._setInternalState("Pn", Pn)
4143 #--------------------------
4145 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4146 # ---> avec analysis
4147 selfA.StoredVariables["Analysis"].store( Xa )
4148 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4149 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4150 if selfA._toStore("InnovationAtCurrentAnalysis"):
4151 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4152 # ---> avec current state
4153 if selfA._parameters["StoreInternalVariables"] \
4154 or selfA._toStore("CurrentState"):
4155 selfA.StoredVariables["CurrentState"].store( Xn )
4156 if selfA._toStore("ForecastState"):
4157 selfA.StoredVariables["ForecastState"].store( Xncm )
4158 if selfA._toStore("ForecastCovariance"):
4159 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4160 if selfA._toStore("BMA"):
4161 selfA.StoredVariables["BMA"].store( Xncm - Xa )
4162 if selfA._toStore("InnovationAtCurrentState"):
4163 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4164 if selfA._toStore("SimulatedObservationAtCurrentState") \
4165 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4166 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4168 if selfA._parameters["StoreInternalVariables"] \
4169 or selfA._toStore("CostFunctionJ") \
4170 or selfA._toStore("CostFunctionJb") \
4171 or selfA._toStore("CostFunctionJo") \
4172 or selfA._toStore("CurrentOptimum") \
4173 or selfA._toStore("APosterioriCovariance"):
4174 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
4175 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4177 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4178 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4179 selfA.StoredVariables["CostFunctionJ" ].store( J )
4181 if selfA._toStore("IndexOfOptimum") \
4182 or selfA._toStore("CurrentOptimum") \
4183 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4184 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4185 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4186 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4187 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4188 if selfA._toStore("IndexOfOptimum"):
4189 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4190 if selfA._toStore("CurrentOptimum"):
4191 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4192 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4193 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4194 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4195 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4196 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4197 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4198 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4199 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4200 if selfA._toStore("APosterioriCovariance"):
4201 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4202 if selfA._parameters["EstimationOf"] == "Parameters" \
4203 and J < previousJMinimum:
4204 previousJMinimum = J
4206 if selfA._toStore("APosterioriCovariance"):
4207 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4209 # Stockage final supplémentaire de l'optimum en estimation de paramètres
4210 # ----------------------------------------------------------------------
4211 if selfA._parameters["EstimationOf"] == "Parameters":
4212 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4213 selfA.StoredVariables["Analysis"].store( XaMin )
4214 if selfA._toStore("APosterioriCovariance"):
4215 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4216 if selfA._toStore("BMA"):
4217 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4221 # ==============================================================================
4222 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4224 3DVAR variational analysis with no inversion of B
4229 Hm = HO["Direct"].appliedTo
4230 Ha = HO["Adjoint"].appliedInXTo
4235 Xini = numpy.zeros(Xb.shape)
4237 # Définition de la fonction-coût
4238 # ------------------------------
4239 def CostFunction(v):
4240 _V = numpy.ravel( v ).reshape((-1,1))
4242 if selfA._parameters["StoreInternalVariables"] or \
4243 selfA._toStore("CurrentState") or \
4244 selfA._toStore("CurrentOptimum"):
4245 selfA.StoredVariables["CurrentState"].store( _X )
4247 _HX = numpy.ravel( _HX ).reshape((-1,1))
4248 _Innovation = Y - _HX
4249 if selfA._toStore("SimulatedObservationAtCurrentState") or \
4250 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4251 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4252 if selfA._toStore("InnovationAtCurrentState"):
4253 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4255 Jb = float( 0.5 * _V.T * (BT * _V) )
4256 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4259 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4260 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4261 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4262 selfA.StoredVariables["CostFunctionJ" ].store( J )
4263 if selfA._toStore("IndexOfOptimum") or \
4264 selfA._toStore("CurrentOptimum") or \
4265 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4266 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4267 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4268 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4269 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4270 if selfA._toStore("IndexOfOptimum"):
4271 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4272 if selfA._toStore("CurrentOptimum"):
4273 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4274 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4275 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4276 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4277 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4278 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4279 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4280 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4281 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4284 def GradientOfCostFunction(v):
4285 _V = v.reshape((-1,1))
4286 _X = Xb + (B @ _V).reshape((-1,1))
4287 _HX = Hm( _X ).reshape((-1,1))
4289 GradJo = - Ha( (_X, RI * (Y - _HX)) )
4290 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4293 # Minimisation de la fonctionnelle
4294 # --------------------------------
4295 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4297 if selfA._parameters["Minimizer"] == "LBFGSB":
4298 if "0.19" <= scipy.version.version <= "1.1.0":
4299 import lbfgsbhlt as optimiseur
4301 import scipy.optimize as optimiseur
4302 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4303 func = CostFunction,
4305 fprime = GradientOfCostFunction,
4307 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4308 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
4309 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
4310 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4311 iprint = selfA._parameters["optiprint"],
4313 nfeval = Informations['funcalls']
4314 rc = Informations['warnflag']
4315 elif selfA._parameters["Minimizer"] == "TNC":
4316 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4317 func = CostFunction,
4319 fprime = GradientOfCostFunction,
4321 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4322 maxfun = selfA._parameters["MaximumNumberOfSteps"],
4323 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4324 ftol = selfA._parameters["CostDecrementTolerance"],
4325 messages = selfA._parameters["optmessages"],
4327 elif selfA._parameters["Minimizer"] == "CG":
4328 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4331 fprime = GradientOfCostFunction,
4333 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4334 gtol = selfA._parameters["GradientNormTolerance"],
4335 disp = selfA._parameters["optdisp"],
4338 elif selfA._parameters["Minimizer"] == "NCG":
4339 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4342 fprime = GradientOfCostFunction,
4344 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4345 avextol = selfA._parameters["CostDecrementTolerance"],
4346 disp = selfA._parameters["optdisp"],
4349 elif selfA._parameters["Minimizer"] == "BFGS":
4350 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4353 fprime = GradientOfCostFunction,
4355 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4356 gtol = selfA._parameters["GradientNormTolerance"],
4357 disp = selfA._parameters["optdisp"],
4361 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4363 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4364 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4366 # Correction pour pallier a un bug de TNC sur le retour du Minimum
4367 # ----------------------------------------------------------------
4368 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4369 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4371 Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
4374 #--------------------------
4376 selfA.StoredVariables["Analysis"].store( Xa )
4378 if selfA._toStore("OMA") or \
4379 selfA._toStore("SigmaObs2") or \
4380 selfA._toStore("SimulationQuantiles") or \
4381 selfA._toStore("SimulatedObservationAtOptimum"):
4382 if selfA._toStore("SimulatedObservationAtCurrentState"):
4383 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4384 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4385 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4389 if selfA._toStore("APosterioriCovariance") or \
4390 selfA._toStore("SimulationQuantiles") or \
4391 selfA._toStore("JacobianMatrixAtOptimum") or \
4392 selfA._toStore("KalmanGainAtOptimum"):
4393 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4394 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4395 if selfA._toStore("APosterioriCovariance") or \
4396 selfA._toStore("SimulationQuantiles") or \
4397 selfA._toStore("KalmanGainAtOptimum"):
4398 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4399 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4400 if selfA._toStore("APosterioriCovariance") or \
4401 selfA._toStore("SimulationQuantiles"):
4403 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
4404 if selfA._toStore("APosterioriCovariance"):
4405 selfA.StoredVariables["APosterioriCovariance"].store( A )
4406 if selfA._toStore("JacobianMatrixAtOptimum"):
4407 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4408 if selfA._toStore("KalmanGainAtOptimum"):
4409 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4410 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4411 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4413 # Calculs et/ou stockages supplémentaires
4414 # ---------------------------------------
4415 if selfA._toStore("Innovation") or \
4416 selfA._toStore("SigmaObs2") or \
4417 selfA._toStore("MahalanobisConsistency") or \
4418 selfA._toStore("OMB"):
4420 if selfA._toStore("Innovation"):
4421 selfA.StoredVariables["Innovation"].store( d )
4422 if selfA._toStore("BMA"):
4423 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4424 if selfA._toStore("OMA"):
4425 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4426 if selfA._toStore("OMB"):
4427 selfA.StoredVariables["OMB"].store( d )
4428 if selfA._toStore("SigmaObs2"):
4429 TraceR = R.trace(Y.size)
4430 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
4431 if selfA._toStore("MahalanobisConsistency"):
4432 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4433 if selfA._toStore("SimulationQuantiles"):
4434 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4435 if selfA._toStore("SimulatedObservationAtBackground"):
4436 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
4437 if selfA._toStore("SimulatedObservationAtOptimum"):
4438 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
4442 # ==============================================================================
4443 if __name__ == "__main__":
4444 print('\n AUTODIAGNOSTIC\n')