1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.ravel( X ).reshape((-1,1))
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 reducingMemoryUse = False,
70 avoidingRedundancy = True,
71 toleranceInRedundancy = 1.e-18,
72 lenghtOfRedundancy = -1,
77 self.__name = str(name)
78 self.__extraArgs = extraArguments
82 import multiprocessing
83 self.__mpEnabled = True
85 self.__mpEnabled = False
87 self.__mpEnabled = False
88 self.__mpWorkers = mpWorkers
89 if self.__mpWorkers is not None and self.__mpWorkers < 1:
90 self.__mpWorkers = None
91 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
93 self.__mfEnabled = bool(mfEnabled)
94 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96 self.__rmEnabled = bool(reducingMemoryUse)
97 logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
99 if avoidingRedundancy:
100 self.__avoidRC = True
101 self.__tolerBP = float(toleranceInRedundancy)
102 self.__lenghtRJ = int(lenghtOfRedundancy)
103 self.__listJPCP = [] # Jacobian Previous Calculated Points
104 self.__listJPCI = [] # Jacobian Previous Calculated Increment
105 self.__listJPCR = [] # Jacobian Previous Calculated Results
106 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
107 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
109 self.__avoidRC = False
110 logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
112 logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
115 if isinstance(Function,types.FunctionType):
116 logging.debug("FDA Calculs en multiprocessing : FunctionType")
117 self.__userFunction__name = Function.__name__
119 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
121 mod = os.path.abspath(Function.__globals__['__file__'])
122 if not os.path.isfile(mod):
123 raise ImportError("No user defined function or method found with the name %s"%(mod,))
124 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
125 self.__userFunction__path = os.path.dirname(mod)
127 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
128 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
129 elif isinstance(Function,types.MethodType):
130 logging.debug("FDA Calculs en multiprocessing : MethodType")
131 self.__userFunction__name = Function.__name__
133 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
135 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
136 if not os.path.isfile(mod):
137 raise ImportError("No user defined function or method found with the name %s"%(mod,))
138 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
139 self.__userFunction__path = os.path.dirname(mod)
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
144 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
146 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
147 self.__userFunction = self.__userOperator.appliedTo
149 self.__centeredDF = bool(centeredDF)
150 if abs(float(increment)) > 1.e-15:
151 self.__increment = float(increment)
153 self.__increment = 0.01
157 self.__dX = numpy.ravel( dX )
159 # ---------------------------------------------------------
160 def __doublon__(self, e, l, n, v=None):
161 __ac, __iac = False, -1
162 for i in range(len(l)-1,-1,-1):
163 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
164 __ac, __iac = True, i
165 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
169 # ---------------------------------------------------------
170 def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
171 "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
172 if not isinstance(__LMatrix, (list,tuple)):
173 raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
174 if __dotWith is not None:
175 __Idwx = numpy.ravel( __dotWith )
176 assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
177 __Produit = numpy.zeros(__LMatrix[0].size)
178 for i, col in enumerate(__LMatrix):
179 __Produit += float(__Idwx[i]) * col
181 elif __dotTWith is not None:
182 _Idwy = numpy.ravel( __dotTWith ).T
183 assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
184 __Produit = numpy.zeros(len(__LMatrix))
185 for i, col in enumerate(__LMatrix):
186 __Produit[i] = float( _Idwy @ col)
192 # ---------------------------------------------------------
193 def DirectOperator(self, X, **extraArgs ):
195 Calcul du direct à l'aide de la fonction fournie.
197 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
198 ne doivent pas être données ici à la fonction utilisateur.
200 logging.debug("FDA Calcul DirectOperator (explicite)")
202 _HX = self.__userFunction( X, argsAsSerie = True )
204 _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
208 # ---------------------------------------------------------
209 def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
211 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
212 c'est-à-dire le gradient de H en X. On utilise des différences finies
213 directionnelles autour du point X. X est un numpy.ndarray.
215 Différences finies centrées (approximation d'ordre 2):
216 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
217 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
218 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
220 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
222 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
224 Différences finies non centrées (approximation d'ordre 1):
225 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
226 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
227 HX_plus_dXi = H( X_plus_dXi )
228 2/ On calcule la valeur centrale HX = H(X)
229 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
231 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
234 logging.debug("FDA Début du calcul de la Jacobienne")
235 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
236 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
238 if X is None or len(X)==0:
239 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
241 _X = numpy.ravel( X )
243 if self.__dX is None:
244 _dX = self.__increment * _X
246 _dX = numpy.ravel( self.__dX )
247 assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
248 assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
250 if (_dX == 0.).any():
253 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
255 _dX = numpy.where( _dX == 0., moyenne, _dX )
257 __alreadyCalculated = False
259 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
260 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
261 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
262 __alreadyCalculated, __i = True, __alreadyCalculatedP
263 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
265 if __alreadyCalculated:
266 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
267 _Jacobienne = self.__listJPCR[__i]
268 logging.debug("FDA Fin du calcul de la Jacobienne")
269 if dotWith is not None:
270 return numpy.dot(_Jacobienne, numpy.ravel( dotWith ))
271 elif dotTWith is not None:
272 return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
274 logging.debug("FDA Calcul Jacobienne (explicite)")
275 if self.__centeredDF:
277 if self.__mpEnabled and not self.__mfEnabled:
279 "__userFunction__path" : self.__userFunction__path,
280 "__userFunction__modl" : self.__userFunction__modl,
281 "__userFunction__name" : self.__userFunction__name,
284 for i in range( len(_dX) ):
286 _X_plus_dXi = numpy.array( _X, dtype=float )
287 _X_plus_dXi[i] = _X[i] + _dXi
288 _X_moins_dXi = numpy.array( _X, dtype=float )
289 _X_moins_dXi[i] = _X[i] - _dXi
291 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
292 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
294 import multiprocessing
295 self.__pool = multiprocessing.Pool(self.__mpWorkers)
296 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
301 for i in range( len(_dX) ):
302 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
304 elif self.__mfEnabled:
306 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dXi
310 _X_moins_dXi = numpy.array( _X, dtype=float )
311 _X_moins_dXi[i] = _X[i] - _dXi
313 _xserie.append( _X_plus_dXi )
314 _xserie.append( _X_moins_dXi )
316 _HX_plusmoins_dX = self.DirectOperator( _xserie )
319 for i in range( len(_dX) ):
320 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
324 for i in range( _dX.size ):
326 _X_plus_dXi = numpy.array( _X, dtype=float )
327 _X_plus_dXi[i] = _X[i] + _dXi
328 _X_moins_dXi = numpy.array( _X, dtype=float )
329 _X_moins_dXi[i] = _X[i] - _dXi
331 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
332 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
334 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
338 if self.__mpEnabled and not self.__mfEnabled:
340 "__userFunction__path" : self.__userFunction__path,
341 "__userFunction__modl" : self.__userFunction__modl,
342 "__userFunction__name" : self.__userFunction__name,
345 _jobs.append( (_X, self.__extraArgs, funcrepr) )
346 for i in range( len(_dX) ):
347 _X_plus_dXi = numpy.array( _X, dtype=float )
348 _X_plus_dXi[i] = _X[i] + _dX[i]
350 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
352 import multiprocessing
353 self.__pool = multiprocessing.Pool(self.__mpWorkers)
354 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
358 _HX = _HX_plus_dX.pop(0)
361 for i in range( len(_dX) ):
362 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
364 elif self.__mfEnabled:
367 for i in range( len(_dX) ):
368 _X_plus_dXi = numpy.array( _X, dtype=float )
369 _X_plus_dXi[i] = _X[i] + _dX[i]
371 _xserie.append( _X_plus_dXi )
373 _HX_plus_dX = self.DirectOperator( _xserie )
375 _HX = _HX_plus_dX.pop(0)
378 for i in range( len(_dX) ):
379 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
383 _HX = self.DirectOperator( _X )
384 for i in range( _dX.size ):
386 _X_plus_dXi = numpy.array( _X, dtype=float )
387 _X_plus_dXi[i] = _X[i] + _dXi
389 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
391 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
393 if (dotWith is not None) or (dotTWith is not None):
394 __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
397 if __Produit is None or self.__avoidRC:
398 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
400 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
401 while len(self.__listJPCP) > self.__lenghtRJ:
402 self.__listJPCP.pop(0)
403 self.__listJPCI.pop(0)
404 self.__listJPCR.pop(0)
405 self.__listJPPN.pop(0)
406 self.__listJPIN.pop(0)
407 self.__listJPCP.append( copy.copy(_X) )
408 self.__listJPCI.append( copy.copy(_dX) )
409 self.__listJPCR.append( copy.copy(_Jacobienne) )
410 self.__listJPPN.append( numpy.linalg.norm(_X) )
411 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
412 logging.debug("FDA Fin du calcul de la Jacobienne")
413 if __Produit is not None:
418 # ---------------------------------------------------------
419 def TangentOperator(self, paire, **extraArgs ):
421 Calcul du tangent à l'aide de la Jacobienne.
423 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
424 ne doivent pas être données ici à la fonction utilisateur.
427 assert len(paire) == 1, "Incorrect length of arguments"
429 assert len(_paire) == 2, "Incorrect number of arguments"
431 assert len(paire) == 2, "Incorrect number of arguments"
434 if dX is None or len(dX) == 0:
436 # Calcul de la forme matricielle si le second argument est None
437 # -------------------------------------------------------------
438 _Jacobienne = self.TangentMatrix( X )
439 if self.__mfEnabled: return [_Jacobienne,]
440 else: return _Jacobienne
443 # Calcul de la valeur linéarisée de H en X appliqué à dX
444 # ------------------------------------------------------
445 _HtX = self.TangentMatrix( X, dotWith = dX )
446 if self.__mfEnabled: return [_HtX,]
449 # ---------------------------------------------------------
450 def AdjointOperator(self, paire, **extraArgs ):
452 Calcul de l'adjoint à l'aide de la Jacobienne.
454 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
455 ne doivent pas être données ici à la fonction utilisateur.
458 assert len(paire) == 1, "Incorrect length of arguments"
460 assert len(_paire) == 2, "Incorrect number of arguments"
462 assert len(paire) == 2, "Incorrect number of arguments"
465 if Y is None or len(Y) == 0:
467 # Calcul de la forme matricielle si le second argument est None
468 # -------------------------------------------------------------
469 _JacobienneT = self.TangentMatrix( X ).T
470 if self.__mfEnabled: return [_JacobienneT,]
471 else: return _JacobienneT
474 # Calcul de la valeur de l'adjoint en X appliqué à Y
475 # --------------------------------------------------
476 _HaY = self.TangentMatrix( X, dotTWith = Y )
477 if self.__mfEnabled: return [_HaY,]
480 # ==============================================================================
481 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
482 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
484 _bgcenter = numpy.ravel(_bgcenter)[:,None]
486 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
488 if _bgcovariance is None:
489 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
491 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
492 _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
494 return _Perturbations
496 # ==============================================================================
497 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
498 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
499 def __CenteredRandomAnomalies(Zr, N):
501 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
502 notes manuscrites de MB et conforme au code de PS avec eps = -1
505 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
506 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
507 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
512 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
514 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
515 if _bgcovariance is None:
516 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
519 _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
520 _nbctl = _bgcenter.size
521 if _nbmembers > _nbctl:
522 _Z = numpy.concatenate((numpy.dot(
523 numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
524 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
526 _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
527 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
528 _Perturbations = _bgcenter + _Zca
530 if max(abs(_bgcovariance.flatten())) > 0:
531 _nbctl = _bgcenter.size
532 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
533 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
534 _Perturbations = _bgcenter + _Zca
536 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
538 return _Perturbations
540 # ==============================================================================
541 def EnsembleMean( __Ensemble ):
542 "Renvoie la moyenne empirique d'un ensemble"
543 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
545 # ==============================================================================
546 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
547 "Renvoie les anomalies centrées à partir d'un ensemble"
548 if __OptMean is None:
549 __Em = EnsembleMean( __Ensemble )
551 __Em = numpy.ravel( __OptMean ).reshape((-1,1))
553 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
555 # ==============================================================================
556 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
557 "Renvoie l'estimation empirique de la covariance d'ensemble"
559 # Covariance rapide mais rarement définie positive
560 __Covariance = numpy.cov( __Ensemble )
562 # Résultat souvent identique à numpy.cov, mais plus robuste
563 __n, __m = numpy.asarray( __Ensemble ).shape
564 __Anomalies = EnsembleOfAnomalies( __Ensemble )
565 # Estimation empirique
566 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
568 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
569 # Assure la positivité
570 __epsilon = mpr*numpy.trace( __Covariance )
571 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
575 # ==============================================================================
576 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
577 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
578 if hasattr(__Covariance,"assparsematrix"):
579 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
580 # Traitement d'une covariance nulle ou presque
582 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
583 # Traitement d'une covariance nulle ou presque
586 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
587 # Traitement d'une covariance nulle ou presque
589 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
590 # Traitement d'une covariance nulle ou presque
593 __n, __m = __Ensemble.shape
594 if __Seed is not None: numpy.random.seed(__Seed)
596 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
597 # Traitement d'une covariance multiple de l'identité
599 __std = numpy.sqrt(__Covariance.assparsematrix())
600 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
602 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
603 # Traitement d'une covariance diagonale avec variances non identiques
604 __zero = numpy.zeros(__n)
605 __std = numpy.sqrt(__Covariance.assparsematrix())
606 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
608 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
609 # Traitement d'une covariance pleine
610 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
612 elif isinstance(__Covariance, numpy.ndarray):
613 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
614 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
617 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
621 # ==============================================================================
622 def CovarianceInflation(
624 InflationType = None,
625 InflationFactor = None,
626 BackgroundCov = None,
629 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
631 Synthèse : Hunt 2007, section 2.3.5
633 if InflationFactor is None:
636 InflationFactor = float(InflationFactor)
638 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
639 if InflationFactor < 1.:
640 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
641 if InflationFactor < 1.+mpr:
643 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
645 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
646 if InflationFactor < 1.:
647 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
648 if InflationFactor < 1.+mpr:
650 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
651 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
652 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
654 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
655 if InflationFactor < 0.:
656 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
657 if InflationFactor < mpr:
659 __n, __m = numpy.asarray(InputCovOrEns).shape
661 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
662 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
664 elif InflationType == "HybridOnBackgroundCovariance":
665 if InflationFactor < 0.:
666 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
667 if InflationFactor < mpr:
669 __n, __m = numpy.asarray(InputCovOrEns).shape
671 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
672 if BackgroundCov is None:
673 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
674 if InputCovOrEns.shape != BackgroundCov.shape:
675 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
676 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
678 elif InflationType == "Relaxation":
679 raise NotImplementedError("InflationType Relaxation")
682 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
684 return OutputCovOrEns
686 # ==============================================================================
687 def HessienneEstimation(nb, HaM, HtM, BI, RI):
688 "Estimation de la Hessienne"
691 for i in range(int(nb)):
692 _ee = numpy.zeros((nb,1))
694 _HtEE = numpy.dot(HtM,_ee).reshape((-1,1))
695 HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
697 A = numpy.linalg.inv(numpy.array( HessienneI ))
699 if min(A.shape) != max(A.shape):
700 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
701 if (numpy.diag(A) < 0).any():
702 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
703 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
705 L = numpy.linalg.cholesky( A )
707 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
711 # ==============================================================================
712 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
713 "Estimation des quantiles a posteriori (selfA est modifié)"
714 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
716 # Traitement des bornes
717 if "StateBoundsForQuantiles" in selfA._parameters:
718 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
719 elif "Bounds" in selfA._parameters:
720 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
723 if LBounds is not None:
724 LBounds = ForceNumericBounds( LBounds )
725 _Xa = numpy.ravel(Xa)
727 # Échantillonnage des états
730 for i in range(nbsamples):
731 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
732 dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
733 if LBounds is not None: # "EstimateProjection" par défaut
734 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
735 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
737 Yr = HXa.reshape((-1,1)) + dYr
738 if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
739 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
740 Xr = numpy.random.multivariate_normal(_Xa,A)
741 if LBounds is not None: # "EstimateProjection" par défaut
742 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
743 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
744 Yr = numpy.asarray(Hm( Xr ))
746 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
749 YfQ = Yr.reshape((-1,1))
750 if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
752 YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
753 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
755 # Extraction des quantiles
758 for quantile in selfA._parameters["Quantiles"]:
759 if not (0. <= float(quantile) <= 1.): continue
760 indice = int(nbsamples * float(quantile) - 1./nbsamples)
761 if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
762 else: YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
763 if YQ is not None: # Liste non vide de quantiles
764 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
765 if selfA._toStore("SampledStateForQuantiles"):
766 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
770 # ==============================================================================
771 def ForceNumericBounds( __Bounds ):
772 "Force les bornes à être des valeurs numériques, sauf si globalement None"
773 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
774 if __Bounds is None: return None
775 # Converti toutes les bornes individuelles None à +/- l'infini
776 __Bounds = numpy.asarray( __Bounds, dtype=float )
777 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
778 raise ValueError("Incorrectly shaped bounds data")
779 __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
780 __Bounds[numpy.isnan(__Bounds[:,1]),1] = sys.float_info.max
783 # ==============================================================================
784 def RecentredBounds( __Bounds, __Center):
785 "Recentre les bornes autour de 0, sauf si globalement None"
786 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
787 if __Bounds is None: return None
788 # Recentre les valeurs numériques de bornes
789 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))
791 # ==============================================================================
792 def ApplyBounds( __Vector, __Bounds, __newClip = True):
793 "Applique des bornes numériques à un point"
794 # Conserve une valeur par défaut s'il n'y a pas de bornes
795 if __Bounds is None: return __Vector
797 if not isinstance(__Vector, numpy.ndarray): # Is an array
798 raise ValueError("Incorrect array definition of vector data")
799 if not isinstance(__Bounds, numpy.ndarray): # Is an array
800 raise ValueError("Incorrect array definition of bounds data")
801 if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
802 raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
803 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
804 raise ValueError("Incorrectly shaped bounds data")
807 __Vector = __Vector.clip(
808 __Bounds[:,0].reshape(__Vector.shape),
809 __Bounds[:,1].reshape(__Vector.shape),
812 __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
813 __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
814 __Vector = numpy.asarray(__Vector)
818 # ==============================================================================
819 def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
820 "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
822 Xf = EnsembleMean( __EnXf )
823 Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
824 Pf = (1 - __Betaf) * __B + __Betaf * Pf
826 selfB = PartialAlgorithm("3DVAR")
827 selfB._parameters["Minimizer"] = "LBFGSB"
828 selfB._parameters["MaximumNumberOfSteps"] = 15000
829 selfB._parameters["CostDecrementTolerance"] = 1.e-7
830 selfB._parameters["ProjectedGradientTolerance"] = -1
831 selfB._parameters["GradientNormTolerance"] = 1.e-05
832 selfB._parameters["StoreInternalVariables"] = False
833 selfB._parameters["optiprint"] = -1
834 selfB._parameters["optdisp"] = 0
835 selfB._parameters["Bounds"] = None
836 selfB._parameters["InitializationPoint"] = Xf
837 std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
838 Xa = selfB.get("Analysis")[-1].reshape((-1,1))
841 return Xa + EnsembleOfAnomalies( __EnXn )
843 # ==============================================================================
844 def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
846 Constrained Unscented Kalman Filter
848 if selfA._parameters["EstimationOf"] == "Parameters":
849 selfA._parameters["StoreInternalVariables"] = True
850 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
853 Alpha = selfA._parameters["Alpha"]
854 Beta = selfA._parameters["Beta"]
855 if selfA._parameters["Kappa"] == 0:
856 if selfA._parameters["EstimationOf"] == "State":
858 elif selfA._parameters["EstimationOf"] == "Parameters":
861 Kappa = selfA._parameters["Kappa"]
862 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
863 Gamma = math.sqrt( L + Lambda )
868 Ww.append( 1. / (2.*(L + Lambda)) )
870 Wm = numpy.array( Ww )
871 Wm[0] = Lambda / (L + Lambda)
872 Wc = numpy.array( Ww )
873 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
876 Hm = HO["Direct"].appliedControledFormTo
878 if selfA._parameters["EstimationOf"] == "State":
879 Mm = EM["Direct"].appliedControledFormTo
881 if CM is not None and "Tangent" in CM and U is not None:
882 Cm = CM["Tangent"].asMatrix(Xb)
886 # Durée d'observation et tailles
887 if hasattr(Y,"stepnumber"):
888 duration = Y.stepnumber()
889 __p = numpy.cumprod(Y.shape())[-1]
892 __p = numpy.array(Y).size
894 # Précalcul des inversions de B et R
895 if selfA._parameters["StoreInternalVariables"] \
896 or selfA._toStore("CostFunctionJ") \
897 or selfA._toStore("CostFunctionJb") \
898 or selfA._toStore("CostFunctionJo") \
899 or selfA._toStore("CurrentOptimum") \
900 or selfA._toStore("APosterioriCovariance"):
905 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
907 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
909 if hasattr(B,"asfullmatrix"):
910 Pn = B.asfullmatrix(__n)
913 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
914 selfA.StoredVariables["Analysis"].store( Xb )
915 if selfA._toStore("APosterioriCovariance"):
916 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
917 elif selfA._parameters["nextStep"]:
918 Xn = selfA._getInternalState("Xn")
919 Pn = selfA._getInternalState("Pn")
921 if selfA._parameters["EstimationOf"] == "Parameters":
923 previousJMinimum = numpy.finfo(float).max
925 for step in range(duration-1):
926 if hasattr(Y,"store"):
927 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
929 Ynpu = numpy.ravel( Y ).reshape((__p,1))
932 if hasattr(U,"store") and len(U)>1:
933 Un = numpy.ravel( U[step] ).reshape((-1,1))
934 elif hasattr(U,"store") and len(U)==1:
935 Un = numpy.ravel( U[0] ).reshape((-1,1))
937 Un = numpy.ravel( U ).reshape((-1,1))
941 Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
942 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
945 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
946 for point in range(nbSpts):
947 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
950 for point in range(nbSpts):
951 if selfA._parameters["EstimationOf"] == "State":
952 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
953 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
954 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
955 XEtnnpi = XEtnnpi + Cm @ Un
956 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
957 XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
958 elif selfA._parameters["EstimationOf"] == "Parameters":
959 # --- > Par principe, M = Id, Q = 0
960 XEtnnpi = Xnp[:,point]
961 XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
962 XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
964 Xncm = ( XEtnnp * Wm ).sum(axis=1)
966 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
967 Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
969 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
970 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
971 for point in range(nbSpts):
972 Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
974 if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
975 Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
977 Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
979 Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
981 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
982 for point in range(nbSpts):
983 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
986 for point in range(nbSpts):
987 if selfA._parameters["EstimationOf"] == "State":
988 Ynnpi = Hm( (Xnnp[:,point], None) )
989 elif selfA._parameters["EstimationOf"] == "Parameters":
990 Ynnpi = Hm( (Xnnp[:,point], Un) )
991 Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
992 Ynnp = numpy.concatenate( Ynnp, axis=1 )
994 Yncm = ( Ynnp * Wm ).sum(axis=1)
998 for point in range(nbSpts):
999 Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
1000 Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
1002 _Innovation = Ynpu - Yncm.reshape((-1,1))
1003 if selfA._parameters["EstimationOf"] == "Parameters":
1004 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1005 _Innovation = _Innovation - Cm @ Un
1008 Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
1009 Pn = Pnm - Kn * Pyyn * Kn.T
1011 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1012 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1015 #--------------------------
1016 selfA._setInternalState("Xn", Xn)
1017 selfA._setInternalState("Pn", Pn)
1018 #--------------------------
1020 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1021 # ---> avec analysis
1022 selfA.StoredVariables["Analysis"].store( Xa )
1023 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1024 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
1025 if selfA._toStore("InnovationAtCurrentAnalysis"):
1026 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1027 # ---> avec current state
1028 if selfA._parameters["StoreInternalVariables"] \
1029 or selfA._toStore("CurrentState"):
1030 selfA.StoredVariables["CurrentState"].store( Xn )
1031 if selfA._toStore("ForecastState"):
1032 selfA.StoredVariables["ForecastState"].store( Xncm )
1033 if selfA._toStore("ForecastCovariance"):
1034 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
1035 if selfA._toStore("BMA"):
1036 selfA.StoredVariables["BMA"].store( Xncm - Xa )
1037 if selfA._toStore("InnovationAtCurrentState"):
1038 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1039 if selfA._toStore("SimulatedObservationAtCurrentState") \
1040 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1041 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
1043 if selfA._parameters["StoreInternalVariables"] \
1044 or selfA._toStore("CostFunctionJ") \
1045 or selfA._toStore("CostFunctionJb") \
1046 or selfA._toStore("CostFunctionJo") \
1047 or selfA._toStore("CurrentOptimum") \
1048 or selfA._toStore("APosterioriCovariance"):
1049 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1050 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1052 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1053 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1054 selfA.StoredVariables["CostFunctionJ" ].store( J )
1056 if selfA._toStore("IndexOfOptimum") \
1057 or selfA._toStore("CurrentOptimum") \
1058 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1059 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1060 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1061 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1062 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1063 if selfA._toStore("IndexOfOptimum"):
1064 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1065 if selfA._toStore("CurrentOptimum"):
1066 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1067 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1068 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1069 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1070 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1071 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1072 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1073 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1074 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1075 if selfA._toStore("APosterioriCovariance"):
1076 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1077 if selfA._parameters["EstimationOf"] == "Parameters" \
1078 and J < previousJMinimum:
1079 previousJMinimum = J
1081 if selfA._toStore("APosterioriCovariance"):
1082 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1084 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1085 # ----------------------------------------------------------------------
1086 if selfA._parameters["EstimationOf"] == "Parameters":
1087 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1088 selfA.StoredVariables["Analysis"].store( XaMin )
1089 if selfA._toStore("APosterioriCovariance"):
1090 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1091 if selfA._toStore("BMA"):
1092 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1096 # ==============================================================================
1097 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1099 Contrained Extended Kalman Filter
1101 if selfA._parameters["EstimationOf"] == "Parameters":
1102 selfA._parameters["StoreInternalVariables"] = True
1103 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
1106 H = HO["Direct"].appliedControledFormTo
1108 if selfA._parameters["EstimationOf"] == "State":
1109 M = EM["Direct"].appliedControledFormTo
1111 if CM is not None and "Tangent" in CM and U is not None:
1112 Cm = CM["Tangent"].asMatrix(Xb)
1116 # Durée d'observation et tailles
1117 if hasattr(Y,"stepnumber"):
1118 duration = Y.stepnumber()
1119 __p = numpy.cumprod(Y.shape())[-1]
1122 __p = numpy.array(Y).size
1124 # Précalcul des inversions de B et R
1125 if selfA._parameters["StoreInternalVariables"] \
1126 or selfA._toStore("CostFunctionJ") \
1127 or selfA._toStore("CostFunctionJb") \
1128 or selfA._toStore("CostFunctionJo") \
1129 or selfA._toStore("CurrentOptimum") \
1130 or selfA._toStore("APosterioriCovariance"):
1135 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
1137 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1140 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1141 selfA.StoredVariables["Analysis"].store( Xb )
1142 if selfA._toStore("APosterioriCovariance"):
1143 if hasattr(B,"asfullmatrix"):
1144 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1146 selfA.StoredVariables["APosterioriCovariance"].store( B )
1147 selfA._setInternalState("seed", numpy.random.get_state())
1148 elif selfA._parameters["nextStep"]:
1149 Xn = selfA._getInternalState("Xn")
1150 Pn = selfA._getInternalState("Pn")
1152 if selfA._parameters["EstimationOf"] == "Parameters":
1154 previousJMinimum = numpy.finfo(float).max
1156 for step in range(duration-1):
1157 if hasattr(Y,"store"):
1158 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1160 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1162 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1163 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1164 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1165 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1167 if selfA._parameters["EstimationOf"] == "State":
1168 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1169 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1170 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1171 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1174 if hasattr(U,"store") and len(U)>1:
1175 Un = numpy.ravel( U[step] ).reshape((-1,1))
1176 elif hasattr(U,"store") and len(U)==1:
1177 Un = numpy.ravel( U[0] ).reshape((-1,1))
1179 Un = numpy.ravel( U ).reshape((-1,1))
1183 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1184 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1186 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1187 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1188 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1189 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1190 Xn_predicted = Xn_predicted + Cm @ Un
1191 Pn_predicted = Q + Mt * (Pn * Ma)
1192 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1193 # --- > Par principe, M = Id, Q = 0
1197 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1198 Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
1200 if selfA._parameters["EstimationOf"] == "State":
1201 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1202 _Innovation = Ynpu - HX_predicted
1203 elif selfA._parameters["EstimationOf"] == "Parameters":
1204 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1205 _Innovation = Ynpu - HX_predicted
1206 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1207 _Innovation = _Innovation - Cm @ Un
1209 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1210 Xn = Xn_predicted + Kn * _Innovation
1211 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1213 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1214 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1217 #--------------------------
1218 selfA._setInternalState("Xn", Xn)
1219 selfA._setInternalState("Pn", Pn)
1220 #--------------------------
1222 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1223 # ---> avec analysis
1224 selfA.StoredVariables["Analysis"].store( Xa )
1225 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1226 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1227 if selfA._toStore("InnovationAtCurrentAnalysis"):
1228 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1229 # ---> avec current state
1230 if selfA._parameters["StoreInternalVariables"] \
1231 or selfA._toStore("CurrentState"):
1232 selfA.StoredVariables["CurrentState"].store( Xn )
1233 if selfA._toStore("ForecastState"):
1234 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1235 if selfA._toStore("ForecastCovariance"):
1236 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1237 if selfA._toStore("BMA"):
1238 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1239 if selfA._toStore("InnovationAtCurrentState"):
1240 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1241 if selfA._toStore("SimulatedObservationAtCurrentState") \
1242 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1243 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1245 if selfA._parameters["StoreInternalVariables"] \
1246 or selfA._toStore("CostFunctionJ") \
1247 or selfA._toStore("CostFunctionJb") \
1248 or selfA._toStore("CostFunctionJo") \
1249 or selfA._toStore("CurrentOptimum") \
1250 or selfA._toStore("APosterioriCovariance"):
1251 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1252 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1254 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1255 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1256 selfA.StoredVariables["CostFunctionJ" ].store( J )
1258 if selfA._toStore("IndexOfOptimum") \
1259 or selfA._toStore("CurrentOptimum") \
1260 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1261 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1262 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1263 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1264 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1265 if selfA._toStore("IndexOfOptimum"):
1266 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1267 if selfA._toStore("CurrentOptimum"):
1268 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1269 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1270 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1271 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1272 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1273 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1274 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1275 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1276 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1277 if selfA._toStore("APosterioriCovariance"):
1278 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1279 if selfA._parameters["EstimationOf"] == "Parameters" \
1280 and J < previousJMinimum:
1281 previousJMinimum = J
1283 if selfA._toStore("APosterioriCovariance"):
1284 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1286 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1287 # ----------------------------------------------------------------------
1288 if selfA._parameters["EstimationOf"] == "Parameters":
1289 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1290 selfA.StoredVariables["Analysis"].store( XaMin )
1291 if selfA._toStore("APosterioriCovariance"):
1292 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1293 if selfA._toStore("BMA"):
1294 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1298 # ==============================================================================
1299 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
1305 H = HO["Direct"].appliedControledFormTo
1307 if selfA._parameters["EstimationOf"] == "State":
1308 M = EM["Direct"].appliedControledFormTo
1310 if CM is not None and "Tangent" in CM and U is not None:
1311 Cm = CM["Tangent"].asMatrix(Xb)
1315 # Précalcul des inversions de B et R
1318 # Durée d'observation et tailles
1319 LagL = selfA._parameters["SmootherLagL"]
1320 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
1321 raise ValueError("Fixed-lag smoother requires a series of observation")
1322 if Y.stepnumber() < LagL:
1323 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
1324 duration = Y.stepnumber()
1325 __p = numpy.cumprod(Y.shape())[-1]
1327 __m = selfA._parameters["NumberOfMembers"]
1329 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1330 selfA.StoredVariables["Analysis"].store( Xb )
1331 if selfA._toStore("APosterioriCovariance"):
1332 if hasattr(B,"asfullmatrix"):
1333 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1335 selfA.StoredVariables["APosterioriCovariance"].store( B )
1337 # Calcul direct initial (on privilégie la mémorisation au recalcul)
1338 __seed = numpy.random.get_state()
1339 selfB = copy.deepcopy(selfA)
1340 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1341 if VariantM == "EnKS16-KalmanFilterFormula":
1342 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1344 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1346 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1348 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1349 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1351 for step in range(LagL,duration-1):
1353 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1356 if hasattr(Y,"store"):
1357 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1359 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1362 if hasattr(U,"store") and len(U)>1:
1363 Un = numpy.ravel( U[step] ).reshape((-1,1))
1364 elif hasattr(U,"store") and len(U)==1:
1365 Un = numpy.ravel( U[0] ).reshape((-1,1))
1367 Un = numpy.ravel( U ).reshape((-1,1))
1371 #--------------------------
1372 if VariantM == "EnKS16-KalmanFilterFormula":
1373 if selfA._parameters["EstimationOf"] == "State": # Forecast
1374 EL = M( [(EL[:,i], Un) for i in range(__m)],
1376 returnSerieAsArrayMatrix = True )
1377 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1378 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1380 returnSerieAsArrayMatrix = True )
1381 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1382 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1384 elif selfA._parameters["EstimationOf"] == "Parameters":
1385 # --- > Par principe, M = Id, Q = 0
1386 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1388 returnSerieAsArrayMatrix = True )
1390 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1391 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1393 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1394 mS = mS.reshape((-1,__m)) # Pour dimension 1
1395 delta = RIdemi @ ( Ynpu - vZm )
1396 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1397 vw = mT @ mS.T @ delta
1399 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1400 mU = numpy.identity(__m)
1401 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1403 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1407 for irl in range(LagL): # Lissage des L précédentes analysis
1408 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1409 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1410 sEL[irl] = vEm + EX @ wTU
1412 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1413 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1414 if selfA._toStore("APosterioriCovariance"):
1417 for irl in range(LagL):
1418 sEL[irl] = sEL[irl+1]
1420 #--------------------------
1422 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1424 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1425 # ---> avec analysis
1426 selfA.StoredVariables["Analysis"].store( Xa )
1427 if selfA._toStore("APosterioriCovariance"):
1428 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1430 # Stockage des dernières analyses incomplètement remises à jour
1431 for irl in range(LagL):
1432 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1433 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1434 selfA.StoredVariables["Analysis"].store( Xa )
1438 # ==============================================================================
1439 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
1440 VariantM="KalmanFilterFormula",
1444 Ensemble-Transform EnKF
1446 if selfA._parameters["EstimationOf"] == "Parameters":
1447 selfA._parameters["StoreInternalVariables"] = True
1450 H = HO["Direct"].appliedControledFormTo
1452 if selfA._parameters["EstimationOf"] == "State":
1453 M = EM["Direct"].appliedControledFormTo
1455 if CM is not None and "Tangent" in CM and U is not None:
1456 Cm = CM["Tangent"].asMatrix(Xb)
1460 # Durée d'observation et tailles
1461 if hasattr(Y,"stepnumber"):
1462 duration = Y.stepnumber()
1463 __p = numpy.cumprod(Y.shape())[-1]
1466 __p = numpy.array(Y).size
1468 # Précalcul des inversions de B et R
1469 if selfA._parameters["StoreInternalVariables"] \
1470 or selfA._toStore("CostFunctionJ") \
1471 or selfA._toStore("CostFunctionJb") \
1472 or selfA._toStore("CostFunctionJo") \
1473 or selfA._toStore("CurrentOptimum") \
1474 or selfA._toStore("APosterioriCovariance"):
1477 elif VariantM != "KalmanFilterFormula":
1479 if VariantM == "KalmanFilterFormula":
1483 __m = selfA._parameters["NumberOfMembers"]
1484 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
1485 previousJMinimum = numpy.finfo(float).max
1487 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1488 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1489 selfA.StoredVariables["Analysis"].store( Xb )
1490 if selfA._toStore("APosterioriCovariance"):
1491 if hasattr(B,"asfullmatrix"):
1492 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1494 selfA.StoredVariables["APosterioriCovariance"].store( B )
1495 selfA._setInternalState("seed", numpy.random.get_state())
1496 elif selfA._parameters["nextStep"]:
1497 Xn = selfA._getInternalState("Xn")
1499 for step in range(duration-1):
1500 numpy.random.set_state(selfA._getInternalState("seed"))
1501 if hasattr(Y,"store"):
1502 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1504 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1507 if hasattr(U,"store") and len(U)>1:
1508 Un = numpy.ravel( U[step] ).reshape((-1,1))
1509 elif hasattr(U,"store") and len(U)==1:
1510 Un = numpy.ravel( U[0] ).reshape((-1,1))
1512 Un = numpy.ravel( U ).reshape((-1,1))
1516 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1517 Xn = CovarianceInflation( Xn,
1518 selfA._parameters["InflationType"],
1519 selfA._parameters["InflationFactor"],
1522 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1523 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1525 returnSerieAsArrayMatrix = True )
1526 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1527 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1529 returnSerieAsArrayMatrix = True )
1530 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1531 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1532 Xn_predicted = Xn_predicted + Cm @ Un
1533 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1534 # --- > Par principe, M = Id, Q = 0
1535 Xn_predicted = EMX = Xn
1536 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1538 returnSerieAsArrayMatrix = True )
1540 # Mean of forecast and observation of forecast
1541 Xfm = EnsembleMean( Xn_predicted )
1542 Hfm = EnsembleMean( HX_predicted )
1545 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
1546 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
1548 #--------------------------
1549 if VariantM == "KalmanFilterFormula":
1550 mS = RIdemi * EaHX / math.sqrt(__m-1)
1551 mS = mS.reshape((-1,__m)) # Pour dimension 1
1552 delta = RIdemi * ( Ynpu - Hfm )
1553 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1554 vw = mT @ mS.T @ delta
1556 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1557 mU = numpy.identity(__m)
1559 EaX = EaX / math.sqrt(__m-1)
1560 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1561 #--------------------------
1562 elif VariantM == "Variational":
1563 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1564 def CostFunction(w):
1565 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1566 _Jo = 0.5 * _A.T @ (RI * _A)
1567 _Jb = 0.5 * (__m-1) * w.T @ w
1570 def GradientOfCostFunction(w):
1571 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1572 _GardJo = - EaHX.T @ (RI * _A)
1573 _GradJb = (__m-1) * w.reshape((__m,1))
1574 _GradJ = _GardJo + _GradJb
1575 return numpy.ravel(_GradJ)
1576 vw = scipy.optimize.fmin_cg(
1578 x0 = numpy.zeros(__m),
1579 fprime = GradientOfCostFunction,
1584 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1585 Htb = (__m-1) * numpy.identity(__m)
1588 Pta = numpy.linalg.inv( Hta )
1589 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1591 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1592 #--------------------------
1593 elif VariantM == "FiniteSize11": # Jauge Boc2011
1594 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1595 def CostFunction(w):
1596 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1597 _Jo = 0.5 * _A.T @ (RI * _A)
1598 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1601 def GradientOfCostFunction(w):
1602 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1603 _GardJo = - EaHX.T @ (RI * _A)
1604 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1605 _GradJ = _GardJo + _GradJb
1606 return numpy.ravel(_GradJ)
1607 vw = scipy.optimize.fmin_cg(
1609 x0 = numpy.zeros(__m),
1610 fprime = GradientOfCostFunction,
1615 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1617 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1618 / (1 + 1/__m + vw.T @ vw)**2
1621 Pta = numpy.linalg.inv( Hta )
1622 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1624 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1625 #--------------------------
1626 elif VariantM == "FiniteSize15": # Jauge Boc2015
1627 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1628 def CostFunction(w):
1629 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1630 _Jo = 0.5 * _A.T * (RI * _A)
1631 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1634 def GradientOfCostFunction(w):
1635 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1636 _GardJo = - EaHX.T @ (RI * _A)
1637 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1638 _GradJ = _GardJo + _GradJb
1639 return numpy.ravel(_GradJ)
1640 vw = scipy.optimize.fmin_cg(
1642 x0 = numpy.zeros(__m),
1643 fprime = GradientOfCostFunction,
1648 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1650 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1651 / (1 + 1/__m + vw.T @ vw)**2
1654 Pta = numpy.linalg.inv( Hta )
1655 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1657 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1658 #--------------------------
1659 elif VariantM == "FiniteSize16": # Jauge Boc2016
1660 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1661 def CostFunction(w):
1662 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1663 _Jo = 0.5 * _A.T @ (RI * _A)
1664 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1667 def GradientOfCostFunction(w):
1668 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1669 _GardJo = - EaHX.T @ (RI * _A)
1670 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1671 _GradJ = _GardJo + _GradJb
1672 return numpy.ravel(_GradJ)
1673 vw = scipy.optimize.fmin_cg(
1675 x0 = numpy.zeros(__m),
1676 fprime = GradientOfCostFunction,
1681 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1682 Htb = ((__m+1) / (__m-1)) * \
1683 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1684 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1687 Pta = numpy.linalg.inv( Hta )
1688 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1690 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1691 #--------------------------
1693 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1695 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1696 Xn = CovarianceInflation( Xn,
1697 selfA._parameters["InflationType"],
1698 selfA._parameters["InflationFactor"],
1701 if Hybrid == "E3DVAR":
1702 betaf = selfA._parameters["HybridCovarianceEquilibrium"]
1703 Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
1705 Xa = EnsembleMean( Xn )
1706 #--------------------------
1707 selfA._setInternalState("Xn", Xn)
1708 selfA._setInternalState("seed", numpy.random.get_state())
1709 #--------------------------
1711 if selfA._parameters["StoreInternalVariables"] \
1712 or selfA._toStore("CostFunctionJ") \
1713 or selfA._toStore("CostFunctionJb") \
1714 or selfA._toStore("CostFunctionJo") \
1715 or selfA._toStore("APosterioriCovariance") \
1716 or selfA._toStore("InnovationAtCurrentAnalysis") \
1717 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1718 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1719 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
1720 _Innovation = Ynpu - _HXa
1722 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1723 # ---> avec analysis
1724 selfA.StoredVariables["Analysis"].store( Xa )
1725 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1726 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1727 if selfA._toStore("InnovationAtCurrentAnalysis"):
1728 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1729 # ---> avec current state
1730 if selfA._parameters["StoreInternalVariables"] \
1731 or selfA._toStore("CurrentState"):
1732 selfA.StoredVariables["CurrentState"].store( Xn )
1733 if selfA._toStore("ForecastState"):
1734 selfA.StoredVariables["ForecastState"].store( EMX )
1735 if selfA._toStore("ForecastCovariance"):
1736 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1737 if selfA._toStore("BMA"):
1738 selfA.StoredVariables["BMA"].store( EMX - Xa )
1739 if selfA._toStore("InnovationAtCurrentState"):
1740 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1741 if selfA._toStore("SimulatedObservationAtCurrentState") \
1742 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1743 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1745 if selfA._parameters["StoreInternalVariables"] \
1746 or selfA._toStore("CostFunctionJ") \
1747 or selfA._toStore("CostFunctionJb") \
1748 or selfA._toStore("CostFunctionJo") \
1749 or selfA._toStore("CurrentOptimum") \
1750 or selfA._toStore("APosterioriCovariance"):
1751 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1752 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1754 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1755 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1756 selfA.StoredVariables["CostFunctionJ" ].store( J )
1758 if selfA._toStore("IndexOfOptimum") \
1759 or selfA._toStore("CurrentOptimum") \
1760 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1761 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1762 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1763 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1764 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1765 if selfA._toStore("IndexOfOptimum"):
1766 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1767 if selfA._toStore("CurrentOptimum"):
1768 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1769 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1770 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1771 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1772 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1773 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1774 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1775 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1776 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1777 if selfA._toStore("APosterioriCovariance"):
1778 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1779 if selfA._parameters["EstimationOf"] == "Parameters" \
1780 and J < previousJMinimum:
1781 previousJMinimum = J
1783 if selfA._toStore("APosterioriCovariance"):
1784 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1785 # ---> Pour les smoothers
1786 if selfA._toStore("CurrentEnsembleState"):
1787 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1789 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1790 # ----------------------------------------------------------------------
1791 if selfA._parameters["EstimationOf"] == "Parameters":
1792 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1793 selfA.StoredVariables["Analysis"].store( XaMin )
1794 if selfA._toStore("APosterioriCovariance"):
1795 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1796 if selfA._toStore("BMA"):
1797 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1801 # ==============================================================================
1802 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1804 Extended Kalman Filter
1806 if selfA._parameters["EstimationOf"] == "Parameters":
1807 selfA._parameters["StoreInternalVariables"] = True
1810 H = HO["Direct"].appliedControledFormTo
1812 if selfA._parameters["EstimationOf"] == "State":
1813 M = EM["Direct"].appliedControledFormTo
1815 if CM is not None and "Tangent" in CM and U is not None:
1816 Cm = CM["Tangent"].asMatrix(Xb)
1820 # Durée d'observation et tailles
1821 if hasattr(Y,"stepnumber"):
1822 duration = Y.stepnumber()
1823 __p = numpy.cumprod(Y.shape())[-1]
1826 __p = numpy.array(Y).size
1828 # Précalcul des inversions de B et R
1829 if selfA._parameters["StoreInternalVariables"] \
1830 or selfA._toStore("CostFunctionJ") \
1831 or selfA._toStore("CostFunctionJb") \
1832 or selfA._toStore("CostFunctionJo") \
1833 or selfA._toStore("CurrentOptimum") \
1834 or selfA._toStore("APosterioriCovariance"):
1839 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
1841 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1844 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1845 selfA.StoredVariables["Analysis"].store( Xb )
1846 if selfA._toStore("APosterioriCovariance"):
1847 if hasattr(B,"asfullmatrix"):
1848 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1850 selfA.StoredVariables["APosterioriCovariance"].store( B )
1851 selfA._setInternalState("seed", numpy.random.get_state())
1852 elif selfA._parameters["nextStep"]:
1853 Xn = selfA._getInternalState("Xn")
1854 Pn = selfA._getInternalState("Pn")
1856 if selfA._parameters["EstimationOf"] == "Parameters":
1858 previousJMinimum = numpy.finfo(float).max
1860 for step in range(duration-1):
1861 if hasattr(Y,"store"):
1862 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1864 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1866 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1867 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1868 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1869 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1871 if selfA._parameters["EstimationOf"] == "State":
1872 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1873 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1874 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1875 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1878 if hasattr(U,"store") and len(U)>1:
1879 Un = numpy.ravel( U[step] ).reshape((-1,1))
1880 elif hasattr(U,"store") and len(U)==1:
1881 Un = numpy.ravel( U[0] ).reshape((-1,1))
1883 Un = numpy.ravel( U ).reshape((-1,1))
1887 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1888 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1889 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1890 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1891 Xn_predicted = Xn_predicted + Cm @ Un
1892 Pn_predicted = Q + Mt * (Pn * Ma)
1893 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1894 # --- > Par principe, M = Id, Q = 0
1898 if selfA._parameters["EstimationOf"] == "State":
1899 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1900 _Innovation = Ynpu - HX_predicted
1901 elif selfA._parameters["EstimationOf"] == "Parameters":
1902 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1903 _Innovation = Ynpu - HX_predicted
1904 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1905 _Innovation = _Innovation - Cm @ Un
1907 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1908 Xn = Xn_predicted + Kn * _Innovation
1909 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1912 #--------------------------
1913 selfA._setInternalState("Xn", Xn)
1914 selfA._setInternalState("Pn", Pn)
1915 #--------------------------
1917 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1918 # ---> avec analysis
1919 selfA.StoredVariables["Analysis"].store( Xa )
1920 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1921 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1922 if selfA._toStore("InnovationAtCurrentAnalysis"):
1923 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1924 # ---> avec current state
1925 if selfA._parameters["StoreInternalVariables"] \
1926 or selfA._toStore("CurrentState"):
1927 selfA.StoredVariables["CurrentState"].store( Xn )
1928 if selfA._toStore("ForecastState"):
1929 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1930 if selfA._toStore("ForecastCovariance"):
1931 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1932 if selfA._toStore("BMA"):
1933 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1934 if selfA._toStore("InnovationAtCurrentState"):
1935 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1936 if selfA._toStore("SimulatedObservationAtCurrentState") \
1937 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1938 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1940 if selfA._parameters["StoreInternalVariables"] \
1941 or selfA._toStore("CostFunctionJ") \
1942 or selfA._toStore("CostFunctionJb") \
1943 or selfA._toStore("CostFunctionJo") \
1944 or selfA._toStore("CurrentOptimum") \
1945 or selfA._toStore("APosterioriCovariance"):
1946 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1947 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1949 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1950 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1951 selfA.StoredVariables["CostFunctionJ" ].store( J )
1953 if selfA._toStore("IndexOfOptimum") \
1954 or selfA._toStore("CurrentOptimum") \
1955 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1956 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1957 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1958 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1959 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1960 if selfA._toStore("IndexOfOptimum"):
1961 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1962 if selfA._toStore("CurrentOptimum"):
1963 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1964 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1965 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1966 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1967 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1968 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1969 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1970 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1971 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1972 if selfA._toStore("APosterioriCovariance"):
1973 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1974 if selfA._parameters["EstimationOf"] == "Parameters" \
1975 and J < previousJMinimum:
1976 previousJMinimum = J
1978 if selfA._toStore("APosterioriCovariance"):
1979 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1981 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1982 # ----------------------------------------------------------------------
1983 if selfA._parameters["EstimationOf"] == "Parameters":
1984 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1985 selfA.StoredVariables["Analysis"].store( XaMin )
1986 if selfA._toStore("APosterioriCovariance"):
1987 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1988 if selfA._toStore("BMA"):
1989 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1993 # ==============================================================================
1994 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1995 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1999 if selfA._parameters["EstimationOf"] == "Parameters":
2000 selfA._parameters["StoreInternalVariables"] = True
2003 H = HO["Direct"].appliedControledFormTo
2005 if selfA._parameters["EstimationOf"] == "State":
2006 M = EM["Direct"].appliedControledFormTo
2008 if CM is not None and "Tangent" in CM and U is not None:
2009 Cm = CM["Tangent"].asMatrix(Xb)
2013 # Durée d'observation et tailles
2014 if hasattr(Y,"stepnumber"):
2015 duration = Y.stepnumber()
2016 __p = numpy.cumprod(Y.shape())[-1]
2019 __p = numpy.array(Y).size
2021 # Précalcul des inversions de B et R
2022 if selfA._parameters["StoreInternalVariables"] \
2023 or selfA._toStore("CostFunctionJ") \
2024 or selfA._toStore("CostFunctionJb") \
2025 or selfA._toStore("CostFunctionJo") \
2026 or selfA._toStore("CurrentOptimum") \
2027 or selfA._toStore("APosterioriCovariance"):
2032 __m = selfA._parameters["NumberOfMembers"]
2033 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
2034 previousJMinimum = numpy.finfo(float).max
2036 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2037 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2039 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2040 selfA.StoredVariables["Analysis"].store( Xb )
2041 if selfA._toStore("APosterioriCovariance"):
2042 if hasattr(B,"asfullmatrix"):
2043 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2045 selfA.StoredVariables["APosterioriCovariance"].store( B )
2046 selfA._setInternalState("seed", numpy.random.get_state())
2047 elif selfA._parameters["nextStep"]:
2048 Xn = selfA._getInternalState("Xn")
2050 for step in range(duration-1):
2051 numpy.random.set_state(selfA._getInternalState("seed"))
2052 if hasattr(Y,"store"):
2053 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2055 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2058 if hasattr(U,"store") and len(U)>1:
2059 Un = numpy.ravel( U[step] ).reshape((-1,1))
2060 elif hasattr(U,"store") and len(U)==1:
2061 Un = numpy.ravel( U[0] ).reshape((-1,1))
2063 Un = numpy.ravel( U ).reshape((-1,1))
2067 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2068 Xn = CovarianceInflation( Xn,
2069 selfA._parameters["InflationType"],
2070 selfA._parameters["InflationFactor"],
2073 #--------------------------
2074 if VariantM == "IEnKF12":
2075 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2076 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
2080 Ta = numpy.identity(__m)
2081 vw = numpy.zeros(__m)
2082 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2083 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2086 E1 = vx1 + _epsilon * EaX
2088 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2090 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2091 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2093 returnSerieAsArrayMatrix = True )
2094 elif selfA._parameters["EstimationOf"] == "Parameters":
2095 # --- > Par principe, M = Id
2097 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2098 vy1 = H((vx2, Un)).reshape((__p,1))
2100 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2102 returnSerieAsArrayMatrix = True )
2103 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2106 EaY = (HE2 - vy2) / _epsilon
2108 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2110 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2111 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2112 Deltaw = - numpy.linalg.solve(mH,GradJ)
2117 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2121 A2 = EnsembleOfAnomalies( E2 )
2124 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2125 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
2128 #--------------------------
2130 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2132 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2133 Xn = CovarianceInflation( Xn,
2134 selfA._parameters["InflationType"],
2135 selfA._parameters["InflationFactor"],
2138 Xa = EnsembleMean( Xn )
2139 #--------------------------
2140 selfA._setInternalState("Xn", Xn)
2141 selfA._setInternalState("seed", numpy.random.get_state())
2142 #--------------------------
2144 if selfA._parameters["StoreInternalVariables"] \
2145 or selfA._toStore("CostFunctionJ") \
2146 or selfA._toStore("CostFunctionJb") \
2147 or selfA._toStore("CostFunctionJo") \
2148 or selfA._toStore("APosterioriCovariance") \
2149 or selfA._toStore("InnovationAtCurrentAnalysis") \
2150 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2151 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2152 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2153 _Innovation = Ynpu - _HXa
2155 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2156 # ---> avec analysis
2157 selfA.StoredVariables["Analysis"].store( Xa )
2158 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2159 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2160 if selfA._toStore("InnovationAtCurrentAnalysis"):
2161 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2162 # ---> avec current state
2163 if selfA._parameters["StoreInternalVariables"] \
2164 or selfA._toStore("CurrentState"):
2165 selfA.StoredVariables["CurrentState"].store( Xn )
2166 if selfA._toStore("ForecastState"):
2167 selfA.StoredVariables["ForecastState"].store( E2 )
2168 if selfA._toStore("ForecastCovariance"):
2169 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
2170 if selfA._toStore("BMA"):
2171 selfA.StoredVariables["BMA"].store( E2 - Xa )
2172 if selfA._toStore("InnovationAtCurrentState"):
2173 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2174 if selfA._toStore("SimulatedObservationAtCurrentState") \
2175 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2176 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2178 if selfA._parameters["StoreInternalVariables"] \
2179 or selfA._toStore("CostFunctionJ") \
2180 or selfA._toStore("CostFunctionJb") \
2181 or selfA._toStore("CostFunctionJo") \
2182 or selfA._toStore("CurrentOptimum") \
2183 or selfA._toStore("APosterioriCovariance"):
2184 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2185 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2187 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2188 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2189 selfA.StoredVariables["CostFunctionJ" ].store( J )
2191 if selfA._toStore("IndexOfOptimum") \
2192 or selfA._toStore("CurrentOptimum") \
2193 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2194 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2195 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2196 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2197 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2198 if selfA._toStore("IndexOfOptimum"):
2199 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2200 if selfA._toStore("CurrentOptimum"):
2201 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2202 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2203 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2204 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2205 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2206 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2207 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2208 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2209 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2210 if selfA._toStore("APosterioriCovariance"):
2211 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2212 if selfA._parameters["EstimationOf"] == "Parameters" \
2213 and J < previousJMinimum:
2214 previousJMinimum = J
2216 if selfA._toStore("APosterioriCovariance"):
2217 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2218 # ---> Pour les smoothers
2219 if selfA._toStore("CurrentEnsembleState"):
2220 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2222 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2223 # ----------------------------------------------------------------------
2224 if selfA._parameters["EstimationOf"] == "Parameters":
2225 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2226 selfA.StoredVariables["Analysis"].store( XaMin )
2227 if selfA._toStore("APosterioriCovariance"):
2228 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2229 if selfA._toStore("BMA"):
2230 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2234 # ==============================================================================
2235 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2242 Hm = HO["Direct"].appliedTo
2247 HXb = numpy.asarray(Hm( Xb )).reshape((-1,1))
2248 Innovation = Y - HXb
2255 Xr = numpy.asarray(selfA._parameters["InitializationPoint"]).reshape((-1,1))
2256 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
2260 Ht = HO["Tangent"].asMatrix(Xr)
2261 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
2263 # Définition de la fonction-coût
2264 # ------------------------------
2265 def CostFunction(dx):
2266 _dX = numpy.asarray(dx).reshape((-1,1))
2267 if selfA._parameters["StoreInternalVariables"] or \
2268 selfA._toStore("CurrentState") or \
2269 selfA._toStore("CurrentOptimum"):
2270 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
2271 _HdX = (Ht @ _dX).reshape((-1,1))
2272 _dInnovation = Innovation - _HdX
2273 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2274 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2275 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
2276 if selfA._toStore("InnovationAtCurrentState"):
2277 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
2279 Jb = float( 0.5 * _dX.T * (BI * _dX) )
2280 Jo = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
2283 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2284 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2285 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2286 selfA.StoredVariables["CostFunctionJ" ].store( J )
2287 if selfA._toStore("IndexOfOptimum") or \
2288 selfA._toStore("CurrentOptimum") or \
2289 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2290 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2291 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2292 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2293 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2294 if selfA._toStore("IndexOfOptimum"):
2295 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2296 if selfA._toStore("CurrentOptimum"):
2297 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2298 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2299 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2300 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2301 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2302 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2303 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2304 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2305 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2308 def GradientOfCostFunction(dx):
2309 _dX = numpy.ravel( dx )
2310 _HdX = (Ht @ _dX).reshape((-1,1))
2311 _dInnovation = Innovation - _HdX
2313 GradJo = - Ht.T @ (RI * _dInnovation)
2314 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2317 # Minimisation de la fonctionnelle
2318 # --------------------------------
2319 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2321 if selfA._parameters["Minimizer"] == "LBFGSB":
2322 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
2323 if "0.19" <= scipy.version.version <= "1.1.0":
2324 import lbfgsbhlt as optimiseur
2326 import scipy.optimize as optimiseur
2327 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2328 func = CostFunction,
2329 x0 = numpy.zeros(Xb.size),
2330 fprime = GradientOfCostFunction,
2332 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2333 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2334 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2335 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2336 iprint = selfA._parameters["optiprint"],
2338 nfeval = Informations['funcalls']
2339 rc = Informations['warnflag']
2340 elif selfA._parameters["Minimizer"] == "TNC":
2341 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2342 func = CostFunction,
2343 x0 = numpy.zeros(Xb.size),
2344 fprime = GradientOfCostFunction,
2346 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2347 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2348 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2349 ftol = selfA._parameters["CostDecrementTolerance"],
2350 messages = selfA._parameters["optmessages"],
2352 elif selfA._parameters["Minimizer"] == "CG":
2353 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2355 x0 = numpy.zeros(Xb.size),
2356 fprime = GradientOfCostFunction,
2358 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2359 gtol = selfA._parameters["GradientNormTolerance"],
2360 disp = selfA._parameters["optdisp"],
2363 elif selfA._parameters["Minimizer"] == "NCG":
2364 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2366 x0 = numpy.zeros(Xb.size),
2367 fprime = GradientOfCostFunction,
2369 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2370 avextol = selfA._parameters["CostDecrementTolerance"],
2371 disp = selfA._parameters["optdisp"],
2374 elif selfA._parameters["Minimizer"] == "BFGS":
2375 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2377 x0 = numpy.zeros(Xb.size),
2378 fprime = GradientOfCostFunction,
2380 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2381 gtol = selfA._parameters["GradientNormTolerance"],
2382 disp = selfA._parameters["optdisp"],
2386 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2388 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2389 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2391 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2392 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2394 Minimum = Xb + Minimum.reshape((-1,1))
2397 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2398 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2401 #--------------------------
2403 selfA.StoredVariables["Analysis"].store( Xa )
2405 if selfA._toStore("OMA") or \
2406 selfA._toStore("SigmaObs2") or \
2407 selfA._toStore("SimulationQuantiles") or \
2408 selfA._toStore("SimulatedObservationAtOptimum"):
2409 if selfA._toStore("SimulatedObservationAtCurrentState"):
2410 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2411 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2412 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2416 if selfA._toStore("APosterioriCovariance") or \
2417 selfA._toStore("SimulationQuantiles") or \
2418 selfA._toStore("JacobianMatrixAtOptimum") or \
2419 selfA._toStore("KalmanGainAtOptimum"):
2420 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2421 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2422 if selfA._toStore("APosterioriCovariance") or \
2423 selfA._toStore("SimulationQuantiles") or \
2424 selfA._toStore("KalmanGainAtOptimum"):
2425 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2426 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2427 if selfA._toStore("APosterioriCovariance") or \
2428 selfA._toStore("SimulationQuantiles"):
2429 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
2430 if selfA._toStore("APosterioriCovariance"):
2431 selfA.StoredVariables["APosterioriCovariance"].store( A )
2432 if selfA._toStore("JacobianMatrixAtOptimum"):
2433 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2434 if selfA._toStore("KalmanGainAtOptimum"):
2435 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2436 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2437 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2439 # Calculs et/ou stockages supplémentaires
2440 # ---------------------------------------
2441 if selfA._toStore("Innovation") or \
2442 selfA._toStore("SigmaObs2") or \
2443 selfA._toStore("MahalanobisConsistency") or \
2444 selfA._toStore("OMB"):
2446 if selfA._toStore("Innovation"):
2447 selfA.StoredVariables["Innovation"].store( d )
2448 if selfA._toStore("BMA"):
2449 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2450 if selfA._toStore("OMA"):
2451 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2452 if selfA._toStore("OMB"):
2453 selfA.StoredVariables["OMB"].store( d )
2454 if selfA._toStore("SigmaObs2"):
2455 TraceR = R.trace(Y.size)
2456 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
2457 if selfA._toStore("MahalanobisConsistency"):
2458 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2459 if selfA._toStore("SimulationQuantiles"):
2460 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2461 if selfA._toStore("SimulatedObservationAtBackground"):
2462 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
2463 if selfA._toStore("SimulatedObservationAtOptimum"):
2464 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
2468 # ==============================================================================
2469 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
2470 VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
2474 Maximum Likelihood Ensemble Filter
2476 if selfA._parameters["EstimationOf"] == "Parameters":
2477 selfA._parameters["StoreInternalVariables"] = True
2480 H = HO["Direct"].appliedControledFormTo
2482 if selfA._parameters["EstimationOf"] == "State":
2483 M = EM["Direct"].appliedControledFormTo
2485 if CM is not None and "Tangent" in CM and U is not None:
2486 Cm = CM["Tangent"].asMatrix(Xb)
2490 # Durée d'observation et tailles
2491 if hasattr(Y,"stepnumber"):
2492 duration = Y.stepnumber()
2493 __p = numpy.cumprod(Y.shape())[-1]
2496 __p = numpy.array(Y).size
2498 # Précalcul des inversions de B et R
2499 if selfA._parameters["StoreInternalVariables"] \
2500 or selfA._toStore("CostFunctionJ") \
2501 or selfA._toStore("CostFunctionJb") \
2502 or selfA._toStore("CostFunctionJo") \
2503 or selfA._toStore("CurrentOptimum") \
2504 or selfA._toStore("APosterioriCovariance"):
2509 __m = selfA._parameters["NumberOfMembers"]
2510 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
2511 previousJMinimum = numpy.finfo(float).max
2513 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2514 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2515 selfA.StoredVariables["Analysis"].store( Xb )
2516 if selfA._toStore("APosterioriCovariance"):
2517 if hasattr(B,"asfullmatrix"):
2518 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2520 selfA.StoredVariables["APosterioriCovariance"].store( B )
2521 selfA._setInternalState("seed", numpy.random.get_state())
2522 elif selfA._parameters["nextStep"]:
2523 Xn = selfA._getInternalState("Xn")
2525 for step in range(duration-1):
2526 numpy.random.set_state(selfA._getInternalState("seed"))
2527 if hasattr(Y,"store"):
2528 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2530 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2533 if hasattr(U,"store") and len(U)>1:
2534 Un = numpy.ravel( U[step] ).reshape((-1,1))
2535 elif hasattr(U,"store") and len(U)==1:
2536 Un = numpy.ravel( U[0] ).reshape((-1,1))
2538 Un = numpy.ravel( U ).reshape((-1,1))
2542 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2543 Xn = CovarianceInflation( Xn,
2544 selfA._parameters["InflationType"],
2545 selfA._parameters["InflationFactor"],
2548 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2549 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2551 returnSerieAsArrayMatrix = True )
2552 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2553 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2554 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2555 Xn_predicted = Xn_predicted + Cm @ Un
2556 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2557 # --- > Par principe, M = Id, Q = 0
2558 Xn_predicted = EMX = Xn
2560 #--------------------------
2561 if VariantM == "MLEF13":
2562 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2563 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2564 Ua = numpy.identity(__m)
2568 Ta = numpy.identity(__m)
2569 vw = numpy.zeros(__m)
2570 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2571 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2574 E1 = vx1 + _epsilon * EaX
2576 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2578 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2580 returnSerieAsArrayMatrix = True )
2581 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2584 EaY = (HE2 - vy2) / _epsilon
2586 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2588 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2589 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2590 Deltaw = - numpy.linalg.solve(mH,GradJ)
2595 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2600 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2602 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2603 #--------------------------
2605 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2607 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2608 Xn = CovarianceInflation( Xn,
2609 selfA._parameters["InflationType"],
2610 selfA._parameters["InflationFactor"],
2613 if Hybrid == "E3DVAR":
2614 betaf = selfA._parameters["HybridCovarianceEquilibrium"]
2615 Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
2617 Xa = EnsembleMean( Xn )
2618 #--------------------------
2619 selfA._setInternalState("Xn", Xn)
2620 selfA._setInternalState("seed", numpy.random.get_state())
2621 #--------------------------
2623 if selfA._parameters["StoreInternalVariables"] \
2624 or selfA._toStore("CostFunctionJ") \
2625 or selfA._toStore("CostFunctionJb") \
2626 or selfA._toStore("CostFunctionJo") \
2627 or selfA._toStore("APosterioriCovariance") \
2628 or selfA._toStore("InnovationAtCurrentAnalysis") \
2629 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2630 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2631 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2632 _Innovation = Ynpu - _HXa
2634 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2635 # ---> avec analysis
2636 selfA.StoredVariables["Analysis"].store( Xa )
2637 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2638 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2639 if selfA._toStore("InnovationAtCurrentAnalysis"):
2640 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2641 # ---> avec current state
2642 if selfA._parameters["StoreInternalVariables"] \
2643 or selfA._toStore("CurrentState"):
2644 selfA.StoredVariables["CurrentState"].store( Xn )
2645 if selfA._toStore("ForecastState"):
2646 selfA.StoredVariables["ForecastState"].store( EMX )
2647 if selfA._toStore("ForecastCovariance"):
2648 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2649 if selfA._toStore("BMA"):
2650 selfA.StoredVariables["BMA"].store( EMX - Xa )
2651 if selfA._toStore("InnovationAtCurrentState"):
2652 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2653 if selfA._toStore("SimulatedObservationAtCurrentState") \
2654 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2655 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2657 if selfA._parameters["StoreInternalVariables"] \
2658 or selfA._toStore("CostFunctionJ") \
2659 or selfA._toStore("CostFunctionJb") \
2660 or selfA._toStore("CostFunctionJo") \
2661 or selfA._toStore("CurrentOptimum") \
2662 or selfA._toStore("APosterioriCovariance"):
2663 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2664 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2666 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2667 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2668 selfA.StoredVariables["CostFunctionJ" ].store( J )
2670 if selfA._toStore("IndexOfOptimum") \
2671 or selfA._toStore("CurrentOptimum") \
2672 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2673 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2674 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2675 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2676 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2677 if selfA._toStore("IndexOfOptimum"):
2678 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2679 if selfA._toStore("CurrentOptimum"):
2680 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2681 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2682 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2683 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2684 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2685 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2686 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2687 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2688 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2689 if selfA._toStore("APosterioriCovariance"):
2690 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2691 if selfA._parameters["EstimationOf"] == "Parameters" \
2692 and J < previousJMinimum:
2693 previousJMinimum = J
2695 if selfA._toStore("APosterioriCovariance"):
2696 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2697 # ---> Pour les smoothers
2698 if selfA._toStore("CurrentEnsembleState"):
2699 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2701 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2702 # ----------------------------------------------------------------------
2703 if selfA._parameters["EstimationOf"] == "Parameters":
2704 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2705 selfA.StoredVariables["Analysis"].store( XaMin )
2706 if selfA._toStore("APosterioriCovariance"):
2707 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2708 if selfA._toStore("BMA"):
2709 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2713 # ==============================================================================
2725 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2726 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2727 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2730 # Recuperation des donnees et informations initiales
2731 # --------------------------------------------------
2732 variables = numpy.ravel( x0 )
2733 mesures = numpy.ravel( y )
2734 increment = sys.float_info[0]
2737 quantile = float(quantile)
2739 # Calcul des parametres du MM
2740 # ---------------------------
2741 tn = float(toler) / n
2742 e0 = -tn / math.log(tn)
2743 epsilon = (e0-tn)/(1+math.log(e0))
2745 # Calculs d'initialisation
2746 # ------------------------
2747 residus = mesures - numpy.ravel( func( variables ) )
2748 poids = 1./(epsilon+numpy.abs(residus))
2749 veps = 1. - 2. * quantile - residus * poids
2750 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2753 # Recherche iterative
2754 # -------------------
2755 while (increment > toler) and (iteration < maxfun) :
2758 Derivees = numpy.array(fprime(variables))
2759 Derivees = Derivees.reshape(n,p) # ADAO & check shape
2760 DeriveesT = Derivees.transpose()
2761 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2762 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
2763 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2765 variables = variables + step
2766 if bounds is not None:
2767 # Attention : boucle infinie à éviter si un intervalle est trop petit
2768 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2770 variables = variables - step
2771 residus = mesures - numpy.ravel( func(variables) )
2772 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2774 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2776 variables = variables - step
2777 residus = mesures - numpy.ravel( func(variables) )
2778 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2780 increment = lastsurrogate-surrogate
2781 poids = 1./(epsilon+numpy.abs(residus))
2782 veps = 1. - 2. * quantile - residus * poids
2783 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2787 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2789 return variables, Ecart, [n,p,iteration,increment,0]
2791 # ==============================================================================
2792 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2794 3DVAR multi-pas et multi-méthodes
2799 if selfA._parameters["EstimationOf"] == "State":
2800 M = EM["Direct"].appliedControledFormTo
2801 if CM is not None and "Tangent" in CM and U is not None:
2802 Cm = CM["Tangent"].asMatrix(Xb)
2806 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2807 Xn = numpy.ravel(Xb).reshape((-1,1))
2808 selfA.StoredVariables["Analysis"].store( Xn )
2809 if selfA._toStore("APosterioriCovariance"):
2810 if hasattr(B,"asfullmatrix"):
2811 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2813 selfA.StoredVariables["APosterioriCovariance"].store( B )
2814 if selfA._toStore("ForecastState"):
2815 selfA.StoredVariables["ForecastState"].store( Xn )
2816 elif selfA._parameters["nextStep"]:
2817 Xn = selfA._getInternalState("Xn")
2819 Xn = numpy.ravel(Xb).reshape((-1,1))
2821 if hasattr(Y,"stepnumber"):
2822 duration = Y.stepnumber()
2827 for step in range(duration-1):
2828 if hasattr(Y,"store"):
2829 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2831 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2834 if hasattr(U,"store") and len(U)>1:
2835 Un = numpy.ravel( U[step] ).reshape((-1,1))
2836 elif hasattr(U,"store") and len(U)==1:
2837 Un = numpy.ravel( U[0] ).reshape((-1,1))
2839 Un = numpy.ravel( U ).reshape((-1,1))
2843 if selfA._parameters["EstimationOf"] == "State": # Forecast
2844 Xn_predicted = M( (Xn, Un) )
2845 if selfA._toStore("ForecastState"):
2846 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2847 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2848 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2849 Xn_predicted = Xn_predicted + Cm @ Un
2850 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2851 # --- > Par principe, M = Id, Q = 0
2853 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2855 oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
2857 Xn = selfA.StoredVariables["Analysis"][-1]
2858 #--------------------------
2859 selfA._setInternalState("Xn", Xn)
2863 # ==============================================================================
2864 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2871 Hm = HO["Direct"].appliedTo
2873 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2874 HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
2876 HXb = numpy.asarray(Hm( Xb ))
2877 HXb = numpy.ravel( HXb ).reshape((-1,1))
2878 if Y.size != HXb.size:
2879 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2880 if max(Y.shape) != max(HXb.shape):
2881 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2883 if selfA._toStore("JacobianMatrixAtBackground"):
2884 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2885 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2886 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2888 Ht = HO["Tangent"].asMatrix(Xb)
2890 HBHTpR = R + Ht * BHT
2891 Innovation = Y - HXb
2893 Xini = numpy.zeros(Y.size)
2895 # Définition de la fonction-coût
2896 # ------------------------------
2897 def CostFunction(w):
2898 _W = numpy.asarray(w).reshape((-1,1))
2899 if selfA._parameters["StoreInternalVariables"] or \
2900 selfA._toStore("CurrentState") or \
2901 selfA._toStore("CurrentOptimum"):
2902 selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
2903 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2904 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2905 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
2906 if selfA._toStore("InnovationAtCurrentState"):
2907 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2909 Jb = float( 0.5 * _W.T @ (HBHTpR @ _W) )
2910 Jo = float( - _W.T @ Innovation )
2913 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2914 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2915 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2916 selfA.StoredVariables["CostFunctionJ" ].store( J )
2917 if selfA._toStore("IndexOfOptimum") or \
2918 selfA._toStore("CurrentOptimum") or \
2919 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2920 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2921 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2922 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2923 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2924 if selfA._toStore("IndexOfOptimum"):
2925 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2926 if selfA._toStore("CurrentOptimum"):
2927 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2928 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2929 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2930 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2931 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2932 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2933 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2934 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2935 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2938 def GradientOfCostFunction(w):
2939 _W = numpy.asarray(w).reshape((-1,1))
2940 GradJb = HBHTpR @ _W
2941 GradJo = - Innovation
2942 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2945 # Minimisation de la fonctionnelle
2946 # --------------------------------
2947 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2949 if selfA._parameters["Minimizer"] == "LBFGSB":
2950 if "0.19" <= scipy.version.version <= "1.1.0":
2951 import lbfgsbhlt as optimiseur
2953 import scipy.optimize as optimiseur
2954 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2955 func = CostFunction,
2957 fprime = GradientOfCostFunction,
2959 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2960 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2961 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2962 iprint = selfA._parameters["optiprint"],
2964 nfeval = Informations['funcalls']
2965 rc = Informations['warnflag']
2966 elif selfA._parameters["Minimizer"] == "TNC":
2967 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2968 func = CostFunction,
2970 fprime = GradientOfCostFunction,
2972 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2973 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2974 ftol = selfA._parameters["CostDecrementTolerance"],
2975 messages = selfA._parameters["optmessages"],
2977 elif selfA._parameters["Minimizer"] == "CG":
2978 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2981 fprime = GradientOfCostFunction,
2983 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2984 gtol = selfA._parameters["GradientNormTolerance"],
2985 disp = selfA._parameters["optdisp"],
2988 elif selfA._parameters["Minimizer"] == "NCG":
2989 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2992 fprime = GradientOfCostFunction,
2994 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2995 avextol = selfA._parameters["CostDecrementTolerance"],
2996 disp = selfA._parameters["optdisp"],
2999 elif selfA._parameters["Minimizer"] == "BFGS":
3000 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3003 fprime = GradientOfCostFunction,
3005 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3006 gtol = selfA._parameters["GradientNormTolerance"],
3007 disp = selfA._parameters["optdisp"],
3011 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3013 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3014 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3016 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3017 # ----------------------------------------------------------------
3018 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3019 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3021 Minimum = Xb + BHT @ Minimum.reshape((-1,1))
3024 #--------------------------
3026 selfA.StoredVariables["Analysis"].store( Xa )
3028 if selfA._toStore("OMA") or \
3029 selfA._toStore("SigmaObs2") or \
3030 selfA._toStore("SimulationQuantiles") or \
3031 selfA._toStore("SimulatedObservationAtOptimum"):
3032 if selfA._toStore("SimulatedObservationAtCurrentState"):
3033 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3034 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3035 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3039 if selfA._toStore("APosterioriCovariance") or \
3040 selfA._toStore("SimulationQuantiles") or \
3041 selfA._toStore("JacobianMatrixAtOptimum") or \
3042 selfA._toStore("KalmanGainAtOptimum"):
3043 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3044 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3045 if selfA._toStore("APosterioriCovariance") or \
3046 selfA._toStore("SimulationQuantiles") or \
3047 selfA._toStore("KalmanGainAtOptimum"):
3048 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3049 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3050 if selfA._toStore("APosterioriCovariance") or \
3051 selfA._toStore("SimulationQuantiles"):
3054 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3055 if selfA._toStore("APosterioriCovariance"):
3056 selfA.StoredVariables["APosterioriCovariance"].store( A )
3057 if selfA._toStore("JacobianMatrixAtOptimum"):
3058 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3059 if selfA._toStore("KalmanGainAtOptimum"):
3060 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3061 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3062 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3064 # Calculs et/ou stockages supplémentaires
3065 # ---------------------------------------
3066 if selfA._toStore("Innovation") or \
3067 selfA._toStore("SigmaObs2") or \
3068 selfA._toStore("MahalanobisConsistency") or \
3069 selfA._toStore("OMB"):
3071 if selfA._toStore("Innovation"):
3072 selfA.StoredVariables["Innovation"].store( d )
3073 if selfA._toStore("BMA"):
3074 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3075 if selfA._toStore("OMA"):
3076 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3077 if selfA._toStore("OMB"):
3078 selfA.StoredVariables["OMB"].store( d )
3079 if selfA._toStore("SigmaObs2"):
3080 TraceR = R.trace(Y.size)
3081 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3082 if selfA._toStore("MahalanobisConsistency"):
3083 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3084 if selfA._toStore("SimulationQuantiles"):
3085 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3086 if selfA._toStore("SimulatedObservationAtBackground"):
3087 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3088 if selfA._toStore("SimulatedObservationAtOptimum"):
3089 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3093 # ==============================================================================
3094 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
3095 VariantM="KalmanFilterFormula16",
3101 if selfA._parameters["EstimationOf"] == "Parameters":
3102 selfA._parameters["StoreInternalVariables"] = True
3105 H = HO["Direct"].appliedControledFormTo
3107 if selfA._parameters["EstimationOf"] == "State":
3108 M = EM["Direct"].appliedControledFormTo
3110 if CM is not None and "Tangent" in CM and U is not None:
3111 Cm = CM["Tangent"].asMatrix(Xb)
3115 # Durée d'observation et tailles
3116 if hasattr(Y,"stepnumber"):
3117 duration = Y.stepnumber()
3118 __p = numpy.cumprod(Y.shape())[-1]
3121 __p = numpy.array(Y).size
3123 # Précalcul des inversions de B et R
3124 if selfA._parameters["StoreInternalVariables"] \
3125 or selfA._toStore("CostFunctionJ") \
3126 or selfA._toStore("CostFunctionJb") \
3127 or selfA._toStore("CostFunctionJo") \
3128 or selfA._toStore("CurrentOptimum") \
3129 or selfA._toStore("APosterioriCovariance"):
3134 __m = selfA._parameters["NumberOfMembers"]
3135 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
3136 previousJMinimum = numpy.finfo(float).max
3138 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
3141 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3142 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
3144 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
3145 selfA.StoredVariables["Analysis"].store( Xb )
3146 if selfA._toStore("APosterioriCovariance"):
3147 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3148 selfA._setInternalState("seed", numpy.random.get_state())
3149 elif selfA._parameters["nextStep"]:
3150 Xn = selfA._getInternalState("Xn")
3152 for step in range(duration-1):
3153 numpy.random.set_state(selfA._getInternalState("seed"))
3154 if hasattr(Y,"store"):
3155 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3157 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3160 if hasattr(U,"store") and len(U)>1:
3161 Un = numpy.ravel( U[step] ).reshape((-1,1))
3162 elif hasattr(U,"store") and len(U)==1:
3163 Un = numpy.ravel( U[0] ).reshape((-1,1))
3165 Un = numpy.ravel( U ).reshape((-1,1))
3169 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
3170 Xn = CovarianceInflation( Xn,
3171 selfA._parameters["InflationType"],
3172 selfA._parameters["InflationFactor"],
3175 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3176 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
3178 returnSerieAsArrayMatrix = True )
3179 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
3180 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3182 returnSerieAsArrayMatrix = True )
3183 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3184 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3185 Xn_predicted = Xn_predicted + Cm @ Un
3186 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3187 # --- > Par principe, M = Id, Q = 0
3188 Xn_predicted = EMX = Xn
3189 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3191 returnSerieAsArrayMatrix = True )
3193 # Mean of forecast and observation of forecast
3194 Xfm = EnsembleMean( Xn_predicted )
3195 Hfm = EnsembleMean( HX_predicted )
3197 #--------------------------
3198 if VariantM == "KalmanFilterFormula05":
3199 PfHT, HPfHT = 0., 0.
3200 for i in range(__m):
3201 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
3202 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
3203 PfHT += Exfi * Eyfi.T
3204 HPfHT += Eyfi * Eyfi.T
3205 PfHT = (1./(__m-1)) * PfHT
3206 HPfHT = (1./(__m-1)) * HPfHT
3207 Kn = PfHT * ( R + HPfHT ).I
3210 for i in range(__m):
3211 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
3212 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
3213 #--------------------------
3214 elif VariantM == "KalmanFilterFormula16":
3215 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
3216 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3218 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
3219 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
3221 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
3223 for i in range(__m):
3224 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
3225 #--------------------------
3227 raise ValueError("VariantM has to be chosen in the authorized methods list.")
3229 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3230 Xn = CovarianceInflation( Xn,
3231 selfA._parameters["InflationType"],
3232 selfA._parameters["InflationFactor"],
3235 if Hybrid == "E3DVAR":
3236 betaf = selfA._parameters["HybridCovarianceEquilibrium"]
3237 Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
3239 Xa = EnsembleMean( Xn )
3240 #--------------------------
3241 selfA._setInternalState("Xn", Xn)
3242 selfA._setInternalState("seed", numpy.random.get_state())
3243 #--------------------------
3245 if selfA._parameters["StoreInternalVariables"] \
3246 or selfA._toStore("CostFunctionJ") \
3247 or selfA._toStore("CostFunctionJb") \
3248 or selfA._toStore("CostFunctionJo") \
3249 or selfA._toStore("APosterioriCovariance") \
3250 or selfA._toStore("InnovationAtCurrentAnalysis") \
3251 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3252 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3253 _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
3254 _Innovation = Ynpu - _HXa
3256 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3257 # ---> avec analysis
3258 selfA.StoredVariables["Analysis"].store( Xa )
3259 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3260 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3261 if selfA._toStore("InnovationAtCurrentAnalysis"):
3262 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3263 # ---> avec current state
3264 if selfA._parameters["StoreInternalVariables"] \
3265 or selfA._toStore("CurrentState"):
3266 selfA.StoredVariables["CurrentState"].store( Xn )
3267 if selfA._toStore("ForecastState"):
3268 selfA.StoredVariables["ForecastState"].store( EMX )
3269 if selfA._toStore("ForecastCovariance"):
3270 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
3271 if selfA._toStore("BMA"):
3272 selfA.StoredVariables["BMA"].store( EMX - Xa )
3273 if selfA._toStore("InnovationAtCurrentState"):
3274 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
3275 if selfA._toStore("SimulatedObservationAtCurrentState") \
3276 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3277 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3279 if selfA._parameters["StoreInternalVariables"] \
3280 or selfA._toStore("CostFunctionJ") \
3281 or selfA._toStore("CostFunctionJb") \
3282 or selfA._toStore("CostFunctionJo") \
3283 or selfA._toStore("CurrentOptimum") \
3284 or selfA._toStore("APosterioriCovariance"):
3285 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3286 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3288 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3289 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3290 selfA.StoredVariables["CostFunctionJ" ].store( J )
3292 if selfA._toStore("IndexOfOptimum") \
3293 or selfA._toStore("CurrentOptimum") \
3294 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3295 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3296 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3297 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3298 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3299 if selfA._toStore("IndexOfOptimum"):
3300 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3301 if selfA._toStore("CurrentOptimum"):
3302 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3303 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3304 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3305 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3306 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3307 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3308 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3309 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3310 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3311 if selfA._toStore("APosterioriCovariance"):
3312 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
3313 if selfA._parameters["EstimationOf"] == "Parameters" \
3314 and J < previousJMinimum:
3315 previousJMinimum = J
3317 if selfA._toStore("APosterioriCovariance"):
3318 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3319 # ---> Pour les smoothers
3320 if selfA._toStore("CurrentEnsembleState"):
3321 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3323 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3324 # ----------------------------------------------------------------------
3325 if selfA._parameters["EstimationOf"] == "Parameters":
3326 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3327 selfA.StoredVariables["Analysis"].store( XaMin )
3328 if selfA._toStore("APosterioriCovariance"):
3329 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3330 if selfA._toStore("BMA"):
3331 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3335 # ==============================================================================
3336 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3343 Hm = HO["Direct"].appliedTo
3344 Ha = HO["Adjoint"].appliedInXTo
3346 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3347 HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
3349 HXb = numpy.asarray(Hm( Xb ))
3350 HXb = HXb.reshape((-1,1))
3351 if Y.size != HXb.size:
3352 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3353 if max(Y.shape) != max(HXb.shape):
3354 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3356 if selfA._toStore("JacobianMatrixAtBackground"):
3357 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3358 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3359 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3364 Xini = selfA._parameters["InitializationPoint"]
3366 # Définition de la fonction-coût
3367 # ------------------------------
3368 def CostFunction(x):
3369 _X = numpy.asarray(x).reshape((-1,1))
3370 if selfA._parameters["StoreInternalVariables"] or \
3371 selfA._toStore("CurrentState") or \
3372 selfA._toStore("CurrentOptimum"):
3373 selfA.StoredVariables["CurrentState"].store( _X )
3374 _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
3375 _Innovation = Y - _HX
3376 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3377 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3378 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3379 if selfA._toStore("InnovationAtCurrentState"):
3380 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3382 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3383 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3386 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3387 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3388 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3389 selfA.StoredVariables["CostFunctionJ" ].store( J )
3390 if selfA._toStore("IndexOfOptimum") or \
3391 selfA._toStore("CurrentOptimum") or \
3392 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3393 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3394 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3395 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3396 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3397 if selfA._toStore("IndexOfOptimum"):
3398 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3399 if selfA._toStore("CurrentOptimum"):
3400 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3401 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3402 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3403 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3404 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3405 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3406 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3407 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3408 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3411 def GradientOfCostFunction(x):
3412 _X = numpy.asarray(x).reshape((-1,1))
3413 _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
3414 GradJb = BI * (_X - Xb)
3415 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3416 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3419 # Minimisation de la fonctionnelle
3420 # --------------------------------
3421 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3423 if selfA._parameters["Minimizer"] == "LBFGSB":
3424 if "0.19" <= scipy.version.version <= "1.1.0":
3425 import lbfgsbhlt as optimiseur
3427 import scipy.optimize as optimiseur
3428 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3429 func = CostFunction,
3431 fprime = GradientOfCostFunction,
3433 bounds = selfA._parameters["Bounds"],
3434 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3435 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3436 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3437 iprint = selfA._parameters["optiprint"],
3439 nfeval = Informations['funcalls']
3440 rc = Informations['warnflag']
3441 elif selfA._parameters["Minimizer"] == "TNC":
3442 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3443 func = CostFunction,
3445 fprime = GradientOfCostFunction,
3447 bounds = selfA._parameters["Bounds"],
3448 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3449 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3450 ftol = selfA._parameters["CostDecrementTolerance"],
3451 messages = selfA._parameters["optmessages"],
3453 elif selfA._parameters["Minimizer"] == "CG":
3454 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3457 fprime = GradientOfCostFunction,
3459 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3460 gtol = selfA._parameters["GradientNormTolerance"],
3461 disp = selfA._parameters["optdisp"],
3464 elif selfA._parameters["Minimizer"] == "NCG":
3465 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3468 fprime = GradientOfCostFunction,
3470 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3471 avextol = selfA._parameters["CostDecrementTolerance"],
3472 disp = selfA._parameters["optdisp"],
3475 elif selfA._parameters["Minimizer"] == "BFGS":
3476 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3479 fprime = GradientOfCostFunction,
3481 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3482 gtol = selfA._parameters["GradientNormTolerance"],
3483 disp = selfA._parameters["optdisp"],
3487 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3489 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3490 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3492 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3493 # ----------------------------------------------------------------
3494 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3495 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3498 #--------------------------
3500 selfA.StoredVariables["Analysis"].store( Xa )
3502 if selfA._toStore("OMA") or \
3503 selfA._toStore("SigmaObs2") or \
3504 selfA._toStore("SimulationQuantiles") or \
3505 selfA._toStore("SimulatedObservationAtOptimum"):
3506 if selfA._toStore("SimulatedObservationAtCurrentState"):
3507 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3508 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3509 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3513 if selfA._toStore("APosterioriCovariance") or \
3514 selfA._toStore("SimulationQuantiles") or \
3515 selfA._toStore("JacobianMatrixAtOptimum") or \
3516 selfA._toStore("KalmanGainAtOptimum"):
3517 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3518 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3519 if selfA._toStore("APosterioriCovariance") or \
3520 selfA._toStore("SimulationQuantiles") or \
3521 selfA._toStore("KalmanGainAtOptimum"):
3522 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3523 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3524 if selfA._toStore("APosterioriCovariance") or \
3525 selfA._toStore("SimulationQuantiles"):
3526 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3527 if selfA._toStore("APosterioriCovariance"):
3528 selfA.StoredVariables["APosterioriCovariance"].store( A )
3529 if selfA._toStore("JacobianMatrixAtOptimum"):
3530 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3531 if selfA._toStore("KalmanGainAtOptimum"):
3532 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3533 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3534 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3536 # Calculs et/ou stockages supplémentaires
3537 # ---------------------------------------
3538 if selfA._toStore("Innovation") or \
3539 selfA._toStore("SigmaObs2") or \
3540 selfA._toStore("MahalanobisConsistency") or \
3541 selfA._toStore("OMB"):
3543 if selfA._toStore("Innovation"):
3544 selfA.StoredVariables["Innovation"].store( d )
3545 if selfA._toStore("BMA"):
3546 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3547 if selfA._toStore("OMA"):
3548 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3549 if selfA._toStore("OMB"):
3550 selfA.StoredVariables["OMB"].store( d )
3551 if selfA._toStore("SigmaObs2"):
3552 TraceR = R.trace(Y.size)
3553 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3554 if selfA._toStore("MahalanobisConsistency"):
3555 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3556 if selfA._toStore("SimulationQuantiles"):
3557 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3558 if selfA._toStore("SimulatedObservationAtBackground"):
3559 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3560 if selfA._toStore("SimulatedObservationAtOptimum"):
3561 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3565 # ==============================================================================
3566 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3575 Hm = HO["Direct"].appliedControledFormTo
3576 Mm = EM["Direct"].appliedControledFormTo
3578 if CM is not None and "Tangent" in CM and U is not None:
3579 Cm = CM["Tangent"].asMatrix(Xb)
3585 if hasattr(U,"store") and 1<=_step<len(U) :
3586 _Un = numpy.ravel( U[_step] ).reshape((-1,1))
3587 elif hasattr(U,"store") and len(U)==1:
3588 _Un = numpy.ravel( U[0] ).reshape((-1,1))
3590 _Un = numpy.ravel( U ).reshape((-1,1))
3595 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3596 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3597 _CmUn = (_Cm @ _un).reshape((-1,1))
3602 # Remarque : les observations sont exploitées à partir du pas de temps
3603 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3604 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3605 # avec l'observation du pas 1.
3607 # Nombre de pas identique au nombre de pas d'observations
3608 if hasattr(Y,"stepnumber"):
3609 duration = Y.stepnumber()
3613 # Précalcul des inversions de B et R
3617 # Point de démarrage de l'optimisation
3618 Xini = selfA._parameters["InitializationPoint"]
3620 # Définition de la fonction-coût
3621 # ------------------------------
3622 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3623 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
3624 def CostFunction(x):
3625 _X = numpy.asarray(x).reshape((-1,1))
3626 if selfA._parameters["StoreInternalVariables"] or \
3627 selfA._toStore("CurrentState") or \
3628 selfA._toStore("CurrentOptimum"):
3629 selfA.StoredVariables["CurrentState"].store( _X )
3630 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3631 selfA.DirectCalculation = [None,]
3632 selfA.DirectInnovation = [None,]
3635 for step in range(0,duration-1):
3636 if hasattr(Y,"store"):
3637 _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
3639 _Ynpu = numpy.ravel( Y ).reshape((-1,1))
3643 if selfA._parameters["EstimationOf"] == "State":
3644 _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
3645 elif selfA._parameters["EstimationOf"] == "Parameters":
3648 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3649 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3651 # Etape de différence aux observations
3652 if selfA._parameters["EstimationOf"] == "State":
3653 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
3654 elif selfA._parameters["EstimationOf"] == "Parameters":
3655 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
3657 # Stockage de l'état
3658 selfA.DirectCalculation.append( _Xn )
3659 selfA.DirectInnovation.append( _YmHMX )
3661 # Ajout dans la fonctionnelle d'observation
3662 Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
3665 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3666 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3667 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3668 selfA.StoredVariables["CostFunctionJ" ].store( J )
3669 if selfA._toStore("IndexOfOptimum") or \
3670 selfA._toStore("CurrentOptimum") or \
3671 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3672 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3673 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3674 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3675 if selfA._toStore("IndexOfOptimum"):
3676 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3677 if selfA._toStore("CurrentOptimum"):
3678 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3679 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3680 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3681 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3682 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3683 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3684 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3687 def GradientOfCostFunction(x):
3688 _X = numpy.asarray(x).reshape((-1,1))
3689 GradJb = BI * (_X - Xb)
3691 for step in range(duration-1,0,-1):
3692 # Étape de récupération du dernier stockage de l'évolution
3693 _Xn = selfA.DirectCalculation.pop()
3694 # Étape de récupération du dernier stockage de l'innovation
3695 _YmHMX = selfA.DirectInnovation.pop()
3696 # Calcul des adjoints
3697 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3698 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3699 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3700 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3701 # Calcul du gradient par état adjoint
3702 GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3703 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3704 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3707 # Minimisation de la fonctionnelle
3708 # --------------------------------
3709 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3711 if selfA._parameters["Minimizer"] == "LBFGSB":
3712 if "0.19" <= scipy.version.version <= "1.1.0":
3713 import lbfgsbhlt as optimiseur
3715 import scipy.optimize as optimiseur
3716 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3717 func = CostFunction,
3719 fprime = GradientOfCostFunction,
3721 bounds = selfA._parameters["Bounds"],
3722 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3723 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3724 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3725 iprint = selfA._parameters["optiprint"],
3727 nfeval = Informations['funcalls']
3728 rc = Informations['warnflag']
3729 elif selfA._parameters["Minimizer"] == "TNC":
3730 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3731 func = CostFunction,
3733 fprime = GradientOfCostFunction,
3735 bounds = selfA._parameters["Bounds"],
3736 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3737 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3738 ftol = selfA._parameters["CostDecrementTolerance"],
3739 messages = selfA._parameters["optmessages"],
3741 elif selfA._parameters["Minimizer"] == "CG":
3742 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3745 fprime = GradientOfCostFunction,
3747 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3748 gtol = selfA._parameters["GradientNormTolerance"],
3749 disp = selfA._parameters["optdisp"],
3752 elif selfA._parameters["Minimizer"] == "NCG":
3753 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3756 fprime = GradientOfCostFunction,
3758 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3759 avextol = selfA._parameters["CostDecrementTolerance"],
3760 disp = selfA._parameters["optdisp"],
3763 elif selfA._parameters["Minimizer"] == "BFGS":
3764 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3767 fprime = GradientOfCostFunction,
3769 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3770 gtol = selfA._parameters["GradientNormTolerance"],
3771 disp = selfA._parameters["optdisp"],
3775 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3777 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3778 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3780 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3781 # ----------------------------------------------------------------
3782 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3783 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3785 # Obtention de l'analyse
3786 # ----------------------
3789 selfA.StoredVariables["Analysis"].store( Xa )
3791 # Calculs et/ou stockages supplémentaires
3792 # ---------------------------------------
3793 if selfA._toStore("BMA"):
3794 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3798 # ==============================================================================
3799 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3801 Standard Kalman Filter
3803 if selfA._parameters["EstimationOf"] == "Parameters":
3804 selfA._parameters["StoreInternalVariables"] = True
3808 Ht = HO["Tangent"].asMatrix(Xb)
3809 Ha = HO["Adjoint"].asMatrix(Xb)
3811 if selfA._parameters["EstimationOf"] == "State":
3812 Mt = EM["Tangent"].asMatrix(Xb)
3813 Ma = EM["Adjoint"].asMatrix(Xb)
3815 if CM is not None and "Tangent" in CM and U is not None:
3816 Cm = CM["Tangent"].asMatrix(Xb)
3820 # Durée d'observation et tailles
3821 if hasattr(Y,"stepnumber"):
3822 duration = Y.stepnumber()
3823 __p = numpy.cumprod(Y.shape())[-1]
3826 __p = numpy.array(Y).size
3828 # Précalcul des inversions de B et R
3829 if selfA._parameters["StoreInternalVariables"] \
3830 or selfA._toStore("CostFunctionJ") \
3831 or selfA._toStore("CostFunctionJb") \
3832 or selfA._toStore("CostFunctionJo") \
3833 or selfA._toStore("CurrentOptimum") \
3834 or selfA._toStore("APosterioriCovariance"):
3839 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
3841 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3844 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3845 selfA.StoredVariables["Analysis"].store( Xb )
3846 if selfA._toStore("APosterioriCovariance"):
3847 if hasattr(B,"asfullmatrix"):
3848 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3850 selfA.StoredVariables["APosterioriCovariance"].store( B )
3851 selfA._setInternalState("seed", numpy.random.get_state())
3852 elif selfA._parameters["nextStep"]:
3853 Xn = selfA._getInternalState("Xn")
3854 Pn = selfA._getInternalState("Pn")
3856 if selfA._parameters["EstimationOf"] == "Parameters":
3858 previousJMinimum = numpy.finfo(float).max
3860 for step in range(duration-1):
3861 if hasattr(Y,"store"):
3862 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3864 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3867 if hasattr(U,"store") and len(U)>1:
3868 Un = numpy.ravel( U[step] ).reshape((-1,1))
3869 elif hasattr(U,"store") and len(U)==1:
3870 Un = numpy.ravel( U[0] ).reshape((-1,1))
3872 Un = numpy.ravel( U ).reshape((-1,1))
3876 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3877 Xn_predicted = Mt @ Xn
3878 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3879 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3880 Xn_predicted = Xn_predicted + Cm @ Un
3881 Pn_predicted = Q + Mt * (Pn * Ma)
3882 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3883 # --- > Par principe, M = Id, Q = 0
3887 if selfA._parameters["EstimationOf"] == "State":
3888 HX_predicted = Ht @ Xn_predicted
3889 _Innovation = Ynpu - HX_predicted
3890 elif selfA._parameters["EstimationOf"] == "Parameters":
3891 HX_predicted = Ht @ Xn_predicted
3892 _Innovation = Ynpu - HX_predicted
3893 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3894 _Innovation = _Innovation - Cm @ Un
3896 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3897 Xn = Xn_predicted + Kn * _Innovation
3898 Pn = Pn_predicted - Kn * Ht * Pn_predicted
3901 #--------------------------
3902 selfA._setInternalState("Xn", Xn)
3903 selfA._setInternalState("Pn", Pn)
3904 #--------------------------
3906 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3907 # ---> avec analysis
3908 selfA.StoredVariables["Analysis"].store( Xa )
3909 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3910 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3911 if selfA._toStore("InnovationAtCurrentAnalysis"):
3912 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3913 # ---> avec current state
3914 if selfA._parameters["StoreInternalVariables"] \
3915 or selfA._toStore("CurrentState"):
3916 selfA.StoredVariables["CurrentState"].store( Xn )
3917 if selfA._toStore("ForecastState"):
3918 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3919 if selfA._toStore("ForecastCovariance"):
3920 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3921 if selfA._toStore("BMA"):
3922 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3923 if selfA._toStore("InnovationAtCurrentState"):
3924 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3925 if selfA._toStore("SimulatedObservationAtCurrentState") \
3926 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3927 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3929 if selfA._parameters["StoreInternalVariables"] \
3930 or selfA._toStore("CostFunctionJ") \
3931 or selfA._toStore("CostFunctionJb") \
3932 or selfA._toStore("CostFunctionJo") \
3933 or selfA._toStore("CurrentOptimum") \
3934 or selfA._toStore("APosterioriCovariance"):
3935 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3936 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3938 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3939 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3940 selfA.StoredVariables["CostFunctionJ" ].store( J )
3942 if selfA._toStore("IndexOfOptimum") \
3943 or selfA._toStore("CurrentOptimum") \
3944 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3945 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3946 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3947 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3948 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3949 if selfA._toStore("IndexOfOptimum"):
3950 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3951 if selfA._toStore("CurrentOptimum"):
3952 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3953 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3954 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3955 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3956 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3957 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3958 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3959 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3960 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3961 if selfA._toStore("APosterioriCovariance"):
3962 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3963 if selfA._parameters["EstimationOf"] == "Parameters" \
3964 and J < previousJMinimum:
3965 previousJMinimum = J
3967 if selfA._toStore("APosterioriCovariance"):
3968 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3970 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3971 # ----------------------------------------------------------------------
3972 if selfA._parameters["EstimationOf"] == "Parameters":
3973 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3974 selfA.StoredVariables["Analysis"].store( XaMin )
3975 if selfA._toStore("APosterioriCovariance"):
3976 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3977 if selfA._toStore("BMA"):
3978 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3982 # ==============================================================================
3983 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3985 Unscented Kalman Filter
3987 if selfA._parameters["EstimationOf"] == "Parameters":
3988 selfA._parameters["StoreInternalVariables"] = True
3991 Alpha = selfA._parameters["Alpha"]
3992 Beta = selfA._parameters["Beta"]
3993 if selfA._parameters["Kappa"] == 0:
3994 if selfA._parameters["EstimationOf"] == "State":
3996 elif selfA._parameters["EstimationOf"] == "Parameters":
3999 Kappa = selfA._parameters["Kappa"]
4000 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
4001 Gamma = math.sqrt( L + Lambda )
4005 for i in range(2*L):
4006 Ww.append( 1. / (2.*(L + Lambda)) )
4008 Wm = numpy.array( Ww )
4009 Wm[0] = Lambda / (L + Lambda)
4010 Wc = numpy.array( Ww )
4011 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
4014 Hm = HO["Direct"].appliedControledFormTo
4016 if selfA._parameters["EstimationOf"] == "State":
4017 Mm = EM["Direct"].appliedControledFormTo
4019 if CM is not None and "Tangent" in CM and U is not None:
4020 Cm = CM["Tangent"].asMatrix(Xb)
4024 # Durée d'observation et tailles
4025 if hasattr(Y,"stepnumber"):
4026 duration = Y.stepnumber()
4027 __p = numpy.cumprod(Y.shape())[-1]
4030 __p = numpy.array(Y).size
4032 # Précalcul des inversions de B et R
4033 if selfA._parameters["StoreInternalVariables"] \
4034 or selfA._toStore("CostFunctionJ") \
4035 or selfA._toStore("CostFunctionJb") \
4036 or selfA._toStore("CostFunctionJo") \
4037 or selfA._toStore("CurrentOptimum") \
4038 or selfA._toStore("APosterioriCovariance"):
4043 nbPreviousSteps = len(selfA.StoredVariables["Analysis"])
4045 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
4047 if hasattr(B,"asfullmatrix"):
4048 Pn = B.asfullmatrix(__n)
4051 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4052 selfA.StoredVariables["Analysis"].store( Xb )
4053 if selfA._toStore("APosterioriCovariance"):
4054 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4055 elif selfA._parameters["nextStep"]:
4056 Xn = selfA._getInternalState("Xn")
4057 Pn = selfA._getInternalState("Pn")
4059 if selfA._parameters["EstimationOf"] == "Parameters":
4061 previousJMinimum = numpy.finfo(float).max
4063 for step in range(duration-1):
4064 if hasattr(Y,"store"):
4065 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
4067 Ynpu = numpy.ravel( Y ).reshape((__p,1))
4070 if hasattr(U,"store") and len(U)>1:
4071 Un = numpy.ravel( U[step] ).reshape((-1,1))
4072 elif hasattr(U,"store") and len(U)==1:
4073 Un = numpy.ravel( U[0] ).reshape((-1,1))
4075 Un = numpy.ravel( U ).reshape((-1,1))
4079 Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
4080 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
4081 nbSpts = 2*Xn.size+1
4084 for point in range(nbSpts):
4085 if selfA._parameters["EstimationOf"] == "State":
4086 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
4087 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
4088 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
4089 XEtnnpi = XEtnnpi + Cm @ Un
4090 elif selfA._parameters["EstimationOf"] == "Parameters":
4091 # --- > Par principe, M = Id, Q = 0
4092 XEtnnpi = Xnp[:,point]
4093 XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
4094 XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
4096 Xncm = ( XEtnnp * Wm ).sum(axis=1)
4098 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
4099 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4100 for point in range(nbSpts):
4101 Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
4103 Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
4105 Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
4108 for point in range(nbSpts):
4109 if selfA._parameters["EstimationOf"] == "State":
4110 Ynnpi = Hm( (Xnnp[:,point], None) )
4111 elif selfA._parameters["EstimationOf"] == "Parameters":
4112 Ynnpi = Hm( (Xnnp[:,point], Un) )
4113 Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
4114 Ynnp = numpy.concatenate( Ynnp, axis=1 )
4116 Yncm = ( Ynnp * Wm ).sum(axis=1)
4120 for point in range(nbSpts):
4121 Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4122 Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4124 _Innovation = Ynpu - Yncm.reshape((-1,1))
4125 if selfA._parameters["EstimationOf"] == "Parameters":
4126 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4127 _Innovation = _Innovation - Cm @ Un
4130 Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
4131 Pn = Pnm - Kn * Pyyn * Kn.T
4134 #--------------------------
4135 selfA._setInternalState("Xn", Xn)
4136 selfA._setInternalState("Pn", Pn)
4137 #--------------------------
4139 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4140 # ---> avec analysis
4141 selfA.StoredVariables["Analysis"].store( Xa )
4142 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4143 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4144 if selfA._toStore("InnovationAtCurrentAnalysis"):
4145 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4146 # ---> avec current state
4147 if selfA._parameters["StoreInternalVariables"] \
4148 or selfA._toStore("CurrentState"):
4149 selfA.StoredVariables["CurrentState"].store( Xn )
4150 if selfA._toStore("ForecastState"):
4151 selfA.StoredVariables["ForecastState"].store( Xncm )
4152 if selfA._toStore("ForecastCovariance"):
4153 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4154 if selfA._toStore("BMA"):
4155 selfA.StoredVariables["BMA"].store( Xncm - Xa )
4156 if selfA._toStore("InnovationAtCurrentState"):
4157 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4158 if selfA._toStore("SimulatedObservationAtCurrentState") \
4159 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4160 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4162 if selfA._parameters["StoreInternalVariables"] \
4163 or selfA._toStore("CostFunctionJ") \
4164 or selfA._toStore("CostFunctionJb") \
4165 or selfA._toStore("CostFunctionJo") \
4166 or selfA._toStore("CurrentOptimum") \
4167 or selfA._toStore("APosterioriCovariance"):
4168 Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
4169 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4171 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4172 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4173 selfA.StoredVariables["CostFunctionJ" ].store( J )
4175 if selfA._toStore("IndexOfOptimum") \
4176 or selfA._toStore("CurrentOptimum") \
4177 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4178 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4179 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4180 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4181 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4182 if selfA._toStore("IndexOfOptimum"):
4183 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4184 if selfA._toStore("CurrentOptimum"):
4185 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4186 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4187 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4188 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4189 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4190 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4191 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4192 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4193 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4194 if selfA._toStore("APosterioriCovariance"):
4195 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4196 if selfA._parameters["EstimationOf"] == "Parameters" \
4197 and J < previousJMinimum:
4198 previousJMinimum = J
4200 if selfA._toStore("APosterioriCovariance"):
4201 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4203 # Stockage final supplémentaire de l'optimum en estimation de paramètres
4204 # ----------------------------------------------------------------------
4205 if selfA._parameters["EstimationOf"] == "Parameters":
4206 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4207 selfA.StoredVariables["Analysis"].store( XaMin )
4208 if selfA._toStore("APosterioriCovariance"):
4209 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4210 if selfA._toStore("BMA"):
4211 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4215 # ==============================================================================
4216 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4218 3DVAR variational analysis with no inversion of B
4223 Hm = HO["Direct"].appliedTo
4224 Ha = HO["Adjoint"].appliedInXTo
4229 Xini = numpy.zeros(Xb.size)
4231 # Définition de la fonction-coût
4232 # ------------------------------
4233 def CostFunction(v):
4234 _V = numpy.asarray(v).reshape((-1,1))
4235 _X = Xb + (B @ _V).reshape((-1,1))
4236 if selfA._parameters["StoreInternalVariables"] or \
4237 selfA._toStore("CurrentState") or \
4238 selfA._toStore("CurrentOptimum"):
4239 selfA.StoredVariables["CurrentState"].store( _X )
4240 _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
4241 _Innovation = Y - _HX
4242 if selfA._toStore("SimulatedObservationAtCurrentState") or \
4243 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4244 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4245 if selfA._toStore("InnovationAtCurrentState"):
4246 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4248 Jb = float( 0.5 * _V.T * (BT * _V) )
4249 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4252 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4253 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4254 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4255 selfA.StoredVariables["CostFunctionJ" ].store( J )
4256 if selfA._toStore("IndexOfOptimum") or \
4257 selfA._toStore("CurrentOptimum") or \
4258 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4259 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4260 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4261 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4262 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4263 if selfA._toStore("IndexOfOptimum"):
4264 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4265 if selfA._toStore("CurrentOptimum"):
4266 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4267 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4268 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4269 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4270 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4271 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4272 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4273 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4274 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4277 def GradientOfCostFunction(v):
4278 _V = numpy.asarray(v).reshape((-1,1))
4279 _X = Xb + (B @ _V).reshape((-1,1))
4280 _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
4282 GradJo = - Ha( (_X, RI * (Y - _HX)) )
4283 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4286 # Minimisation de la fonctionnelle
4287 # --------------------------------
4288 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4290 if selfA._parameters["Minimizer"] == "LBFGSB":
4291 if "0.19" <= scipy.version.version <= "1.1.0":
4292 import lbfgsbhlt as optimiseur
4294 import scipy.optimize as optimiseur
4295 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4296 func = CostFunction,
4298 fprime = GradientOfCostFunction,
4300 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4301 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
4302 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
4303 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4304 iprint = selfA._parameters["optiprint"],
4306 nfeval = Informations['funcalls']
4307 rc = Informations['warnflag']
4308 elif selfA._parameters["Minimizer"] == "TNC":
4309 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4310 func = CostFunction,
4312 fprime = GradientOfCostFunction,
4314 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4315 maxfun = selfA._parameters["MaximumNumberOfSteps"],
4316 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4317 ftol = selfA._parameters["CostDecrementTolerance"],
4318 messages = selfA._parameters["optmessages"],
4320 elif selfA._parameters["Minimizer"] == "CG":
4321 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4324 fprime = GradientOfCostFunction,
4326 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4327 gtol = selfA._parameters["GradientNormTolerance"],
4328 disp = selfA._parameters["optdisp"],
4331 elif selfA._parameters["Minimizer"] == "NCG":
4332 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4335 fprime = GradientOfCostFunction,
4337 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4338 avextol = selfA._parameters["CostDecrementTolerance"],
4339 disp = selfA._parameters["optdisp"],
4342 elif selfA._parameters["Minimizer"] == "BFGS":
4343 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4346 fprime = GradientOfCostFunction,
4348 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4349 gtol = selfA._parameters["GradientNormTolerance"],
4350 disp = selfA._parameters["optdisp"],
4354 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4356 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4357 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4359 # Correction pour pallier a un bug de TNC sur le retour du Minimum
4360 # ----------------------------------------------------------------
4361 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4362 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4364 Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
4367 #--------------------------
4369 selfA.StoredVariables["Analysis"].store( Xa )
4371 if selfA._toStore("OMA") or \
4372 selfA._toStore("SigmaObs2") or \
4373 selfA._toStore("SimulationQuantiles") or \
4374 selfA._toStore("SimulatedObservationAtOptimum"):
4375 if selfA._toStore("SimulatedObservationAtCurrentState"):
4376 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4377 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4378 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4382 if selfA._toStore("APosterioriCovariance") or \
4383 selfA._toStore("SimulationQuantiles") or \
4384 selfA._toStore("JacobianMatrixAtOptimum") or \
4385 selfA._toStore("KalmanGainAtOptimum"):
4386 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4387 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4388 if selfA._toStore("APosterioriCovariance") or \
4389 selfA._toStore("SimulationQuantiles") or \
4390 selfA._toStore("KalmanGainAtOptimum"):
4391 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4392 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4393 if selfA._toStore("APosterioriCovariance") or \
4394 selfA._toStore("SimulationQuantiles"):
4396 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
4397 if selfA._toStore("APosterioriCovariance"):
4398 selfA.StoredVariables["APosterioriCovariance"].store( A )
4399 if selfA._toStore("JacobianMatrixAtOptimum"):
4400 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4401 if selfA._toStore("KalmanGainAtOptimum"):
4402 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4403 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4404 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4406 # Calculs et/ou stockages supplémentaires
4407 # ---------------------------------------
4408 if selfA._toStore("Innovation") or \
4409 selfA._toStore("SigmaObs2") or \
4410 selfA._toStore("MahalanobisConsistency") or \
4411 selfA._toStore("OMB"):
4413 if selfA._toStore("Innovation"):
4414 selfA.StoredVariables["Innovation"].store( d )
4415 if selfA._toStore("BMA"):
4416 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4417 if selfA._toStore("OMA"):
4418 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4419 if selfA._toStore("OMB"):
4420 selfA.StoredVariables["OMB"].store( d )
4421 if selfA._toStore("SigmaObs2"):
4422 TraceR = R.trace(Y.size)
4423 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
4424 if selfA._toStore("MahalanobisConsistency"):
4425 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4426 if selfA._toStore("SimulationQuantiles"):
4427 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4428 if selfA._toStore("SimulatedObservationAtBackground"):
4429 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
4430 if selfA._toStore("SimulatedObservationAtOptimum"):
4431 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
4435 # ==============================================================================
4436 if __name__ == "__main__":
4437 print('\n AUTODIAGNOSTIC\n')