1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.ravel( X ).reshape((-1,1))
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 reducingMemoryUse = False,
70 avoidingRedundancy = True,
71 toleranceInRedundancy = 1.e-18,
72 lenghtOfRedundancy = -1,
77 self.__name = str(name)
78 self.__extraArgs = extraArguments
82 import multiprocessing
83 self.__mpEnabled = True
85 self.__mpEnabled = False
87 self.__mpEnabled = False
88 self.__mpWorkers = mpWorkers
89 if self.__mpWorkers is not None and self.__mpWorkers < 1:
90 self.__mpWorkers = None
91 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
93 self.__mfEnabled = bool(mfEnabled)
94 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96 self.__rmEnabled = bool(reducingMemoryUse)
97 logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
99 if avoidingRedundancy:
100 self.__avoidRC = True
101 self.__tolerBP = float(toleranceInRedundancy)
102 self.__lenghtRJ = int(lenghtOfRedundancy)
103 self.__listJPCP = [] # Jacobian Previous Calculated Points
104 self.__listJPCI = [] # Jacobian Previous Calculated Increment
105 self.__listJPCR = [] # Jacobian Previous Calculated Results
106 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
107 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
109 self.__avoidRC = False
110 logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
112 logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
115 if isinstance(Function,types.FunctionType):
116 logging.debug("FDA Calculs en multiprocessing : FunctionType")
117 self.__userFunction__name = Function.__name__
119 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
121 mod = os.path.abspath(Function.__globals__['__file__'])
122 if not os.path.isfile(mod):
123 raise ImportError("No user defined function or method found with the name %s"%(mod,))
124 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
125 self.__userFunction__path = os.path.dirname(mod)
127 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
128 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
129 elif isinstance(Function,types.MethodType):
130 logging.debug("FDA Calculs en multiprocessing : MethodType")
131 self.__userFunction__name = Function.__name__
133 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
135 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
136 if not os.path.isfile(mod):
137 raise ImportError("No user defined function or method found with the name %s"%(mod,))
138 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
139 self.__userFunction__path = os.path.dirname(mod)
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
144 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
146 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
147 self.__userFunction = self.__userOperator.appliedTo
149 self.__centeredDF = bool(centeredDF)
150 if abs(float(increment)) > 1.e-15:
151 self.__increment = float(increment)
153 self.__increment = 0.01
157 self.__dX = numpy.ravel( dX )
159 # ---------------------------------------------------------
160 def __doublon__(self, e, l, n, v=None):
161 __ac, __iac = False, -1
162 for i in range(len(l)-1,-1,-1):
163 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
164 __ac, __iac = True, i
165 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
169 # ---------------------------------------------------------
170 def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
171 "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
172 if not isinstance(__LMatrix, (list,tuple)):
173 raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
174 if __dotWith is not None:
175 __Idwx = numpy.ravel( __dotWith )
176 assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
177 __Produit = numpy.zeros(__LMatrix[0].size)
178 for i, col in enumerate(__LMatrix):
179 __Produit += float(__Idwx[i]) * col
181 elif __dotTWith is not None:
182 _Idwy = numpy.ravel( __dotTWith ).T
183 assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
184 __Produit = numpy.zeros(len(__LMatrix))
185 for i, col in enumerate(__LMatrix):
186 __Produit[i] = float( _Idwy @ col)
192 # ---------------------------------------------------------
193 def DirectOperator(self, X, **extraArgs ):
195 Calcul du direct à l'aide de la fonction fournie.
197 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
198 ne doivent pas être données ici à la fonction utilisateur.
200 logging.debug("FDA Calcul DirectOperator (explicite)")
202 _HX = self.__userFunction( X, argsAsSerie = True )
204 _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
208 # ---------------------------------------------------------
209 def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
211 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
212 c'est-à-dire le gradient de H en X. On utilise des différences finies
213 directionnelles autour du point X. X est un numpy.ndarray.
215 Différences finies centrées (approximation d'ordre 2):
216 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
217 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
218 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
220 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
222 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
224 Différences finies non centrées (approximation d'ordre 1):
225 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
226 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
227 HX_plus_dXi = H( X_plus_dXi )
228 2/ On calcule la valeur centrale HX = H(X)
229 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
231 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
234 logging.debug("FDA Début du calcul de la Jacobienne")
235 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
236 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
238 if X is None or len(X)==0:
239 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
241 _X = numpy.ravel( X )
243 if self.__dX is None:
244 _dX = self.__increment * _X
246 _dX = numpy.ravel( self.__dX )
247 assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
248 assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
250 if (_dX == 0.).any():
253 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
255 _dX = numpy.where( _dX == 0., moyenne, _dX )
257 __alreadyCalculated = False
259 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
260 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
261 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
262 __alreadyCalculated, __i = True, __alreadyCalculatedP
263 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
265 if __alreadyCalculated:
266 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
267 _Jacobienne = self.__listJPCR[__i]
268 logging.debug("FDA Fin du calcul de la Jacobienne")
269 if dotWith is not None:
270 return numpy.dot(_Jacobienne, numpy.ravel( dotWith ))
271 elif dotTWith is not None:
272 return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
274 logging.debug("FDA Calcul Jacobienne (explicite)")
275 if self.__centeredDF:
277 if self.__mpEnabled and not self.__mfEnabled:
279 "__userFunction__path" : self.__userFunction__path,
280 "__userFunction__modl" : self.__userFunction__modl,
281 "__userFunction__name" : self.__userFunction__name,
284 for i in range( len(_dX) ):
286 _X_plus_dXi = numpy.array( _X, dtype=float )
287 _X_plus_dXi[i] = _X[i] + _dXi
288 _X_moins_dXi = numpy.array( _X, dtype=float )
289 _X_moins_dXi[i] = _X[i] - _dXi
291 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
292 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
294 import multiprocessing
295 self.__pool = multiprocessing.Pool(self.__mpWorkers)
296 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
301 for i in range( len(_dX) ):
302 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
304 elif self.__mfEnabled:
306 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dXi
310 _X_moins_dXi = numpy.array( _X, dtype=float )
311 _X_moins_dXi[i] = _X[i] - _dXi
313 _xserie.append( _X_plus_dXi )
314 _xserie.append( _X_moins_dXi )
316 _HX_plusmoins_dX = self.DirectOperator( _xserie )
319 for i in range( len(_dX) ):
320 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
324 for i in range( _dX.size ):
326 _X_plus_dXi = numpy.array( _X, dtype=float )
327 _X_plus_dXi[i] = _X[i] + _dXi
328 _X_moins_dXi = numpy.array( _X, dtype=float )
329 _X_moins_dXi[i] = _X[i] - _dXi
331 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
332 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
334 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
338 if self.__mpEnabled and not self.__mfEnabled:
340 "__userFunction__path" : self.__userFunction__path,
341 "__userFunction__modl" : self.__userFunction__modl,
342 "__userFunction__name" : self.__userFunction__name,
345 _jobs.append( (_X, self.__extraArgs, funcrepr) )
346 for i in range( len(_dX) ):
347 _X_plus_dXi = numpy.array( _X, dtype=float )
348 _X_plus_dXi[i] = _X[i] + _dX[i]
350 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
352 import multiprocessing
353 self.__pool = multiprocessing.Pool(self.__mpWorkers)
354 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
358 _HX = _HX_plus_dX.pop(0)
361 for i in range( len(_dX) ):
362 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
364 elif self.__mfEnabled:
367 for i in range( len(_dX) ):
368 _X_plus_dXi = numpy.array( _X, dtype=float )
369 _X_plus_dXi[i] = _X[i] + _dX[i]
371 _xserie.append( _X_plus_dXi )
373 _HX_plus_dX = self.DirectOperator( _xserie )
375 _HX = _HX_plus_dX.pop(0)
378 for i in range( len(_dX) ):
379 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
383 _HX = self.DirectOperator( _X )
384 for i in range( _dX.size ):
386 _X_plus_dXi = numpy.array( _X, dtype=float )
387 _X_plus_dXi[i] = _X[i] + _dXi
389 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
391 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
393 if (dotWith is not None) or (dotTWith is not None):
394 __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
397 if __Produit is None or self.__avoidRC:
398 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
400 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
401 while len(self.__listJPCP) > self.__lenghtRJ:
402 self.__listJPCP.pop(0)
403 self.__listJPCI.pop(0)
404 self.__listJPCR.pop(0)
405 self.__listJPPN.pop(0)
406 self.__listJPIN.pop(0)
407 self.__listJPCP.append( copy.copy(_X) )
408 self.__listJPCI.append( copy.copy(_dX) )
409 self.__listJPCR.append( copy.copy(_Jacobienne) )
410 self.__listJPPN.append( numpy.linalg.norm(_X) )
411 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
412 logging.debug("FDA Fin du calcul de la Jacobienne")
413 if __Produit is not None:
418 # ---------------------------------------------------------
419 def TangentOperator(self, paire, **extraArgs ):
421 Calcul du tangent à l'aide de la Jacobienne.
423 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
424 ne doivent pas être données ici à la fonction utilisateur.
427 assert len(paire) == 1, "Incorrect length of arguments"
429 assert len(_paire) == 2, "Incorrect number of arguments"
431 assert len(paire) == 2, "Incorrect number of arguments"
434 if dX is None or len(dX) == 0:
436 # Calcul de la forme matricielle si le second argument est None
437 # -------------------------------------------------------------
438 _Jacobienne = self.TangentMatrix( X )
439 if self.__mfEnabled: return [_Jacobienne,]
440 else: return _Jacobienne
443 # Calcul de la valeur linéarisée de H en X appliqué à dX
444 # ------------------------------------------------------
445 _HtX = self.TangentMatrix( X, dotWith = dX )
446 if self.__mfEnabled: return [_HtX,]
449 # ---------------------------------------------------------
450 def AdjointOperator(self, paire, **extraArgs ):
452 Calcul de l'adjoint à l'aide de la Jacobienne.
454 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
455 ne doivent pas être données ici à la fonction utilisateur.
458 assert len(paire) == 1, "Incorrect length of arguments"
460 assert len(_paire) == 2, "Incorrect number of arguments"
462 assert len(paire) == 2, "Incorrect number of arguments"
465 if Y is None or len(Y) == 0:
467 # Calcul de la forme matricielle si le second argument est None
468 # -------------------------------------------------------------
469 _JacobienneT = self.TangentMatrix( X ).T
470 if self.__mfEnabled: return [_JacobienneT,]
471 else: return _JacobienneT
474 # Calcul de la valeur de l'adjoint en X appliqué à Y
475 # --------------------------------------------------
476 _HaY = self.TangentMatrix( X, dotTWith = Y )
477 if self.__mfEnabled: return [_HaY,]
480 # ==============================================================================
481 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
482 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
484 _bgcenter = numpy.ravel(_bgcenter)[:,None]
486 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
488 if _bgcovariance is None:
489 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
491 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
492 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
494 return BackgroundEnsemble
496 # ==============================================================================
497 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
498 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
499 def __CenteredRandomAnomalies(Zr, N):
501 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
502 notes manuscrites de MB et conforme au code de PS avec eps = -1
505 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
506 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
507 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
512 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
514 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
515 if _bgcovariance is None:
516 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
519 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
520 _nbctl = _bgcenter.size
521 if _nbmembers > _nbctl:
522 _Z = numpy.concatenate((numpy.dot(
523 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
524 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
526 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
527 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
528 BackgroundEnsemble = _bgcenter + _Zca
530 if max(abs(_bgcovariance.flatten())) > 0:
531 _nbctl = _bgcenter.size
532 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
533 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
534 BackgroundEnsemble = _bgcenter + _Zca
536 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
538 return BackgroundEnsemble
540 # ==============================================================================
541 def EnsembleMean( __Ensemble ):
542 "Renvoie la moyenne empirique d'un ensemble"
543 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
545 # ==============================================================================
546 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
547 "Renvoie les anomalies centrées à partir d'un ensemble"
548 if __OptMean is None:
549 __Em = EnsembleMean( __Ensemble )
551 __Em = numpy.ravel( __OptMean ).reshape((-1,1))
553 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
555 # ==============================================================================
556 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
557 "Renvoie l'estimation empirique de la covariance d'ensemble"
559 # Covariance rapide mais rarement définie positive
560 __Covariance = numpy.cov( __Ensemble )
562 # Résultat souvent identique à numpy.cov, mais plus robuste
563 __n, __m = numpy.asarray( __Ensemble ).shape
564 __Anomalies = EnsembleOfAnomalies( __Ensemble )
565 # Estimation empirique
566 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
568 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
569 # Assure la positivité
570 __epsilon = mpr*numpy.trace( __Covariance )
571 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
575 # ==============================================================================
576 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
577 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
578 if hasattr(__Covariance,"assparsematrix"):
579 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
580 # Traitement d'une covariance nulle ou presque
582 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
583 # Traitement d'une covariance nulle ou presque
586 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
587 # Traitement d'une covariance nulle ou presque
589 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
590 # Traitement d'une covariance nulle ou presque
593 __n, __m = __Ensemble.shape
594 if __Seed is not None: numpy.random.seed(__Seed)
596 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
597 # Traitement d'une covariance multiple de l'identité
599 __std = numpy.sqrt(__Covariance.assparsematrix())
600 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
602 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
603 # Traitement d'une covariance diagonale avec variances non identiques
604 __zero = numpy.zeros(__n)
605 __std = numpy.sqrt(__Covariance.assparsematrix())
606 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
608 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
609 # Traitement d'une covariance pleine
610 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
612 elif isinstance(__Covariance, numpy.ndarray):
613 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
614 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
617 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
621 # ==============================================================================
622 def CovarianceInflation(
624 InflationType = None,
625 InflationFactor = None,
626 BackgroundCov = None,
629 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
631 Synthèse : Hunt 2007, section 2.3.5
633 if InflationFactor is None:
636 InflationFactor = float(InflationFactor)
638 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
639 if InflationFactor < 1.:
640 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
641 if InflationFactor < 1.+mpr:
643 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
645 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
646 if InflationFactor < 1.:
647 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
648 if InflationFactor < 1.+mpr:
650 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
651 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
652 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
654 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
655 if InflationFactor < 0.:
656 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
657 if InflationFactor < mpr:
659 __n, __m = numpy.asarray(InputCovOrEns).shape
661 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
662 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
664 elif InflationType == "HybridOnBackgroundCovariance":
665 if InflationFactor < 0.:
666 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
667 if InflationFactor < mpr:
669 __n, __m = numpy.asarray(InputCovOrEns).shape
671 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
672 if BackgroundCov is None:
673 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
674 if InputCovOrEns.shape != BackgroundCov.shape:
675 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
676 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
678 elif InflationType == "Relaxation":
679 raise NotImplementedError("InflationType Relaxation")
682 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
684 return OutputCovOrEns
686 # ==============================================================================
687 def HessienneEstimation(nb, HaM, HtM, BI, RI):
688 "Estimation de la Hessienne"
691 for i in range(int(nb)):
692 _ee = numpy.zeros((nb,1))
694 _HtEE = numpy.dot(HtM,_ee).reshape((-1,1))
695 HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
697 A = numpy.linalg.inv(numpy.array( HessienneI ))
699 if min(A.shape) != max(A.shape):
700 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
701 if (numpy.diag(A) < 0).any():
702 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
703 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
705 L = numpy.linalg.cholesky( A )
707 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
711 # ==============================================================================
712 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
713 "Estimation des quantiles a posteriori (selfA est modifié)"
714 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
716 # Traitement des bornes
717 if "StateBoundsForQuantiles" in selfA._parameters:
718 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
719 elif "Bounds" in selfA._parameters:
720 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
723 if LBounds is not None:
724 LBounds = ForceNumericBounds( LBounds )
725 _Xa = numpy.ravel(Xa)
727 # Échantillonnage des états
730 for i in range(nbsamples):
731 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
732 dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
733 if LBounds is not None: # "EstimateProjection" par défaut
734 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
735 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
737 Yr = HXa.reshape((-1,1)) + dYr
738 if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
739 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
740 Xr = numpy.random.multivariate_normal(_Xa,A)
741 if LBounds is not None: # "EstimateProjection" par défaut
742 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
743 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
746 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
749 YfQ = Yr.reshape((-1,1))
750 if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
752 YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
753 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
755 # Extraction des quantiles
758 for quantile in selfA._parameters["Quantiles"]:
759 if not (0. <= float(quantile) <= 1.): continue
760 indice = int(nbsamples * float(quantile) - 1./nbsamples)
761 if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
762 else: YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
763 if YQ is not None: # Liste non vide de quantiles
764 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
765 if selfA._toStore("SampledStateForQuantiles"):
766 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
770 # ==============================================================================
771 def ForceNumericBounds( __Bounds ):
772 "Force les bornes à être des valeurs numériques, sauf si globalement None"
773 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
774 if __Bounds is None: return None
775 # Converti toutes les bornes individuelles None à +/- l'infini
776 __Bounds = numpy.asarray( __Bounds, dtype=float )
777 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
778 raise ValueError("Incorrectly shaped bounds data")
779 __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
780 __Bounds[numpy.isnan(__Bounds[:,1]),1] = sys.float_info.max
783 # ==============================================================================
784 def RecentredBounds( __Bounds, __Center):
785 "Recentre les bornes autour de 0, sauf si globalement None"
786 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
787 if __Bounds is None: return None
788 # Recentre les valeurs numériques de bornes
789 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
791 # ==============================================================================
792 def ApplyBounds( __Vector, __Bounds, __newClip = True):
793 "Applique des bornes numériques à un point"
794 # Conserve une valeur par défaut s'il n'y a pas de bornes
795 if __Bounds is None: return __Vector
797 if not isinstance(__Vector, numpy.ndarray): # Is an array
798 raise ValueError("Incorrect array definition of vector data")
799 if not isinstance(__Bounds, numpy.ndarray): # Is an array
800 raise ValueError("Incorrect array definition of bounds data")
801 if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
802 raise ValueError("Incorrect bounds number to be applied for this vector")
803 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
804 raise ValueError("Incorrectly shaped bounds data")
807 __Vector = __Vector.clip(
808 __Bounds[:,0].reshape(__Vector.shape),
809 __Bounds[:,1].reshape(__Vector.shape),
812 __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
813 __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
814 __Vector = numpy.asarray(__Vector)
818 # ==============================================================================
819 def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
821 Constrained Unscented Kalman Filter
823 if selfA._parameters["EstimationOf"] == "Parameters":
824 selfA._parameters["StoreInternalVariables"] = True
825 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
828 Alpha = selfA._parameters["Alpha"]
829 Beta = selfA._parameters["Beta"]
830 if selfA._parameters["Kappa"] == 0:
831 if selfA._parameters["EstimationOf"] == "State":
833 elif selfA._parameters["EstimationOf"] == "Parameters":
836 Kappa = selfA._parameters["Kappa"]
837 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
838 Gamma = math.sqrt( L + Lambda )
843 Ww.append( 1. / (2.*(L + Lambda)) )
845 Wm = numpy.array( Ww )
846 Wm[0] = Lambda / (L + Lambda)
847 Wc = numpy.array( Ww )
848 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
851 Hm = HO["Direct"].appliedControledFormTo
853 if selfA._parameters["EstimationOf"] == "State":
854 Mm = EM["Direct"].appliedControledFormTo
856 if CM is not None and "Tangent" in CM and U is not None:
857 Cm = CM["Tangent"].asMatrix(Xb)
861 # Durée d'observation et tailles
862 if hasattr(Y,"stepnumber"):
863 duration = Y.stepnumber()
864 __p = numpy.cumprod(Y.shape())[-1]
867 __p = numpy.array(Y).size
869 # Précalcul des inversions de B et R
870 if selfA._parameters["StoreInternalVariables"] \
871 or selfA._toStore("CostFunctionJ") \
872 or selfA._toStore("CostFunctionJb") \
873 or selfA._toStore("CostFunctionJo") \
874 or selfA._toStore("CurrentOptimum") \
875 or selfA._toStore("APosterioriCovariance"):
881 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
883 if hasattr(B,"asfullmatrix"):
884 Pn = B.asfullmatrix(__n)
887 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
888 selfA.StoredVariables["Analysis"].store( Xb )
889 if selfA._toStore("APosterioriCovariance"):
890 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
891 elif selfA._parameters["nextStep"]:
892 Xn = selfA._getInternalState("Xn")
893 Pn = selfA._getInternalState("Pn")
895 if selfA._parameters["EstimationOf"] == "Parameters":
897 previousJMinimum = numpy.finfo(float).max
899 for step in range(duration-1):
900 if hasattr(Y,"store"):
901 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
903 Ynpu = numpy.ravel( Y ).reshape((__p,1))
906 if hasattr(U,"store") and len(U)>1:
907 Un = numpy.ravel( U[step] ).reshape((-1,1))
908 elif hasattr(U,"store") and len(U)==1:
909 Un = numpy.ravel( U[0] ).reshape((-1,1))
911 Un = numpy.ravel( U ).reshape((-1,1))
915 Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
916 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
919 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
920 for point in range(nbSpts):
921 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
924 for point in range(nbSpts):
925 if selfA._parameters["EstimationOf"] == "State":
926 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
927 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
928 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
929 XEtnnpi = XEtnnpi + Cm * Un
930 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
931 XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
932 elif selfA._parameters["EstimationOf"] == "Parameters":
933 # --- > Par principe, M = Id, Q = 0
934 XEtnnpi = Xnp[:,point]
935 XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
936 XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
938 Xncm = ( XEtnnp * Wm ).sum(axis=1)
940 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
941 Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
943 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
944 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
945 for point in range(nbSpts):
946 Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
948 if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
949 Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
951 Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
953 Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
955 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
956 for point in range(nbSpts):
957 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
960 for point in range(nbSpts):
961 if selfA._parameters["EstimationOf"] == "State":
962 Ynnpi = Hm( (Xnnp[:,point], None) )
963 elif selfA._parameters["EstimationOf"] == "Parameters":
964 Ynnpi = Hm( (Xnnp[:,point], Un) )
965 Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
966 Ynnp = numpy.concatenate( Ynnp, axis=1 )
968 Yncm = ( Ynnp * Wm ).sum(axis=1)
972 for point in range(nbSpts):
973 Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
974 Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
976 _Innovation = Ynpu - Yncm.reshape((-1,1))
977 if selfA._parameters["EstimationOf"] == "Parameters":
978 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
979 _Innovation = _Innovation - Cm * Un
982 Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
983 Pn = Pnm - Kn * Pyyn * Kn.T
985 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
986 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
989 #--------------------------
990 selfA._setInternalState("Xn", Xn)
991 selfA._setInternalState("Pn", Pn)
992 #--------------------------
994 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
996 selfA.StoredVariables["Analysis"].store( Xa )
997 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
998 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
999 if selfA._toStore("InnovationAtCurrentAnalysis"):
1000 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1001 # ---> avec current state
1002 if selfA._parameters["StoreInternalVariables"] \
1003 or selfA._toStore("CurrentState"):
1004 selfA.StoredVariables["CurrentState"].store( Xn )
1005 if selfA._toStore("ForecastState"):
1006 selfA.StoredVariables["ForecastState"].store( Xncm )
1007 if selfA._toStore("ForecastCovariance"):
1008 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
1009 if selfA._toStore("BMA"):
1010 selfA.StoredVariables["BMA"].store( Xncm - Xa )
1011 if selfA._toStore("InnovationAtCurrentState"):
1012 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1013 if selfA._toStore("SimulatedObservationAtCurrentState") \
1014 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1015 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
1017 if selfA._parameters["StoreInternalVariables"] \
1018 or selfA._toStore("CostFunctionJ") \
1019 or selfA._toStore("CostFunctionJb") \
1020 or selfA._toStore("CostFunctionJo") \
1021 or selfA._toStore("CurrentOptimum") \
1022 or selfA._toStore("APosterioriCovariance"):
1023 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1024 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1026 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1027 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1028 selfA.StoredVariables["CostFunctionJ" ].store( J )
1030 if selfA._toStore("IndexOfOptimum") \
1031 or selfA._toStore("CurrentOptimum") \
1032 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1033 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1034 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1035 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1036 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1037 if selfA._toStore("IndexOfOptimum"):
1038 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1039 if selfA._toStore("CurrentOptimum"):
1040 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1041 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1042 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1043 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1044 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1045 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1046 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1047 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1048 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1049 if selfA._toStore("APosterioriCovariance"):
1050 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1051 if selfA._parameters["EstimationOf"] == "Parameters" \
1052 and J < previousJMinimum:
1053 previousJMinimum = J
1055 if selfA._toStore("APosterioriCovariance"):
1056 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1058 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1059 # ----------------------------------------------------------------------
1060 if selfA._parameters["EstimationOf"] == "Parameters":
1061 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1062 selfA.StoredVariables["Analysis"].store( XaMin )
1063 if selfA._toStore("APosterioriCovariance"):
1064 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1065 if selfA._toStore("BMA"):
1066 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1070 # ==============================================================================
1071 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1073 Contrained Extended Kalman Filter
1075 if selfA._parameters["EstimationOf"] == "Parameters":
1076 selfA._parameters["StoreInternalVariables"] = True
1077 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
1080 H = HO["Direct"].appliedControledFormTo
1082 if selfA._parameters["EstimationOf"] == "State":
1083 M = EM["Direct"].appliedControledFormTo
1085 if CM is not None and "Tangent" in CM and U is not None:
1086 Cm = CM["Tangent"].asMatrix(Xb)
1090 # Durée d'observation et tailles
1091 if hasattr(Y,"stepnumber"):
1092 duration = Y.stepnumber()
1093 __p = numpy.cumprod(Y.shape())[-1]
1096 __p = numpy.array(Y).size
1098 # Précalcul des inversions de B et R
1099 if selfA._parameters["StoreInternalVariables"] \
1100 or selfA._toStore("CostFunctionJ") \
1101 or selfA._toStore("CostFunctionJb") \
1102 or selfA._toStore("CostFunctionJo") \
1103 or selfA._toStore("CurrentOptimum") \
1104 or selfA._toStore("APosterioriCovariance"):
1110 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1113 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1114 selfA.StoredVariables["Analysis"].store( Xb )
1115 if selfA._toStore("APosterioriCovariance"):
1116 if hasattr(B,"asfullmatrix"):
1117 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1119 selfA.StoredVariables["APosterioriCovariance"].store( B )
1120 selfA._setInternalState("seed", numpy.random.get_state())
1121 elif selfA._parameters["nextStep"]:
1122 Xn = selfA._getInternalState("Xn")
1123 Pn = selfA._getInternalState("Pn")
1125 if selfA._parameters["EstimationOf"] == "Parameters":
1127 previousJMinimum = numpy.finfo(float).max
1129 for step in range(duration-1):
1130 if hasattr(Y,"store"):
1131 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1133 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1135 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1136 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1137 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1138 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1140 if selfA._parameters["EstimationOf"] == "State":
1141 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1142 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1143 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1144 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1147 if hasattr(U,"store") and len(U)>1:
1148 Un = numpy.ravel( U[step] ).reshape((-1,1))
1149 elif hasattr(U,"store") and len(U)==1:
1150 Un = numpy.ravel( U[0] ).reshape((-1,1))
1152 Un = numpy.ravel( U ).reshape((-1,1))
1156 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1157 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1159 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1160 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1161 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1162 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1163 Xn_predicted = Xn_predicted + Cm * Un
1164 Pn_predicted = Q + Mt * (Pn * Ma)
1165 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1166 # --- > Par principe, M = Id, Q = 0
1170 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1171 Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
1173 if selfA._parameters["EstimationOf"] == "State":
1174 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1175 _Innovation = Ynpu - HX_predicted
1176 elif selfA._parameters["EstimationOf"] == "Parameters":
1177 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1178 _Innovation = Ynpu - HX_predicted
1179 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1180 _Innovation = _Innovation - Cm * Un
1182 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1183 Xn = Xn_predicted + Kn * _Innovation
1184 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1186 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1187 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1190 #--------------------------
1191 selfA._setInternalState("Xn", Xn)
1192 selfA._setInternalState("Pn", Pn)
1193 #--------------------------
1195 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1196 # ---> avec analysis
1197 selfA.StoredVariables["Analysis"].store( Xa )
1198 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1199 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1200 if selfA._toStore("InnovationAtCurrentAnalysis"):
1201 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1202 # ---> avec current state
1203 if selfA._parameters["StoreInternalVariables"] \
1204 or selfA._toStore("CurrentState"):
1205 selfA.StoredVariables["CurrentState"].store( Xn )
1206 if selfA._toStore("ForecastState"):
1207 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1208 if selfA._toStore("ForecastCovariance"):
1209 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1210 if selfA._toStore("BMA"):
1211 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1212 if selfA._toStore("InnovationAtCurrentState"):
1213 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1214 if selfA._toStore("SimulatedObservationAtCurrentState") \
1215 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1216 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1218 if selfA._parameters["StoreInternalVariables"] \
1219 or selfA._toStore("CostFunctionJ") \
1220 or selfA._toStore("CostFunctionJb") \
1221 or selfA._toStore("CostFunctionJo") \
1222 or selfA._toStore("CurrentOptimum") \
1223 or selfA._toStore("APosterioriCovariance"):
1224 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1225 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1227 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1228 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1229 selfA.StoredVariables["CostFunctionJ" ].store( J )
1231 if selfA._toStore("IndexOfOptimum") \
1232 or selfA._toStore("CurrentOptimum") \
1233 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1234 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1235 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1236 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1237 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1238 if selfA._toStore("IndexOfOptimum"):
1239 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1240 if selfA._toStore("CurrentOptimum"):
1241 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1242 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1243 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1244 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1245 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1246 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1247 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1248 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1249 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1250 if selfA._toStore("APosterioriCovariance"):
1251 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1252 if selfA._parameters["EstimationOf"] == "Parameters" \
1253 and J < previousJMinimum:
1254 previousJMinimum = J
1256 if selfA._toStore("APosterioriCovariance"):
1257 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1259 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1260 # ----------------------------------------------------------------------
1261 if selfA._parameters["EstimationOf"] == "Parameters":
1262 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1263 selfA.StoredVariables["Analysis"].store( XaMin )
1264 if selfA._toStore("APosterioriCovariance"):
1265 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1266 if selfA._toStore("BMA"):
1267 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1271 # ==============================================================================
1272 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
1278 H = HO["Direct"].appliedControledFormTo
1280 if selfA._parameters["EstimationOf"] == "State":
1281 M = EM["Direct"].appliedControledFormTo
1283 if CM is not None and "Tangent" in CM and U is not None:
1284 Cm = CM["Tangent"].asMatrix(Xb)
1288 # Précalcul des inversions de B et R
1291 # Durée d'observation et tailles
1292 LagL = selfA._parameters["SmootherLagL"]
1293 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
1294 raise ValueError("Fixed-lag smoother requires a series of observation")
1295 if Y.stepnumber() < LagL:
1296 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
1297 duration = Y.stepnumber()
1298 __p = numpy.cumprod(Y.shape())[-1]
1300 __m = selfA._parameters["NumberOfMembers"]
1302 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1303 selfA.StoredVariables["Analysis"].store( Xb )
1304 if selfA._toStore("APosterioriCovariance"):
1305 if hasattr(B,"asfullmatrix"):
1306 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1308 selfA.StoredVariables["APosterioriCovariance"].store( B )
1310 # Calcul direct initial (on privilégie la mémorisation au recalcul)
1311 __seed = numpy.random.get_state()
1312 selfB = copy.deepcopy(selfA)
1313 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1314 if VariantM == "EnKS16-KalmanFilterFormula":
1315 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1317 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1319 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1321 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1322 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1324 for step in range(LagL,duration-1):
1326 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1329 if hasattr(Y,"store"):
1330 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1332 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1335 if hasattr(U,"store") and len(U)>1:
1336 Un = numpy.ravel( U[step] ).reshape((-1,1))
1337 elif hasattr(U,"store") and len(U)==1:
1338 Un = numpy.ravel( U[0] ).reshape((-1,1))
1340 Un = numpy.ravel( U ).reshape((-1,1))
1344 #--------------------------
1345 if VariantM == "EnKS16-KalmanFilterFormula":
1346 if selfA._parameters["EstimationOf"] == "State": # Forecast
1347 EL = M( [(EL[:,i], Un) for i in range(__m)],
1349 returnSerieAsArrayMatrix = True )
1350 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1351 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1353 returnSerieAsArrayMatrix = True )
1354 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1355 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1357 elif selfA._parameters["EstimationOf"] == "Parameters":
1358 # --- > Par principe, M = Id, Q = 0
1359 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1361 returnSerieAsArrayMatrix = True )
1363 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1364 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1366 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1367 mS = mS.reshape((-1,__m)) # Pour dimension 1
1368 delta = RIdemi @ ( Ynpu - vZm )
1369 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1370 vw = mT @ mS.T @ delta
1372 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1373 mU = numpy.identity(__m)
1374 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1376 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1380 for irl in range(LagL): # Lissage des L précédentes analysis
1381 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1382 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1383 sEL[irl] = vEm + EX @ wTU
1385 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1386 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1387 if selfA._toStore("APosterioriCovariance"):
1390 for irl in range(LagL):
1391 sEL[irl] = sEL[irl+1]
1393 #--------------------------
1395 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1397 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1398 # ---> avec analysis
1399 selfA.StoredVariables["Analysis"].store( Xa )
1400 if selfA._toStore("APosterioriCovariance"):
1401 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1403 # Stockage des dernières analyses incomplètement remises à jour
1404 for irl in range(LagL):
1405 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1406 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1407 selfA.StoredVariables["Analysis"].store( Xa )
1411 # ==============================================================================
1412 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1414 Ensemble-Transform EnKF
1416 if selfA._parameters["EstimationOf"] == "Parameters":
1417 selfA._parameters["StoreInternalVariables"] = True
1420 H = HO["Direct"].appliedControledFormTo
1422 if selfA._parameters["EstimationOf"] == "State":
1423 M = EM["Direct"].appliedControledFormTo
1425 if CM is not None and "Tangent" in CM and U is not None:
1426 Cm = CM["Tangent"].asMatrix(Xb)
1430 # Durée d'observation et tailles
1431 if hasattr(Y,"stepnumber"):
1432 duration = Y.stepnumber()
1433 __p = numpy.cumprod(Y.shape())[-1]
1436 __p = numpy.array(Y).size
1438 # Précalcul des inversions de B et R
1439 if selfA._parameters["StoreInternalVariables"] \
1440 or selfA._toStore("CostFunctionJ") \
1441 or selfA._toStore("CostFunctionJb") \
1442 or selfA._toStore("CostFunctionJo") \
1443 or selfA._toStore("CurrentOptimum") \
1444 or selfA._toStore("APosterioriCovariance"):
1447 elif VariantM != "KalmanFilterFormula":
1449 if VariantM == "KalmanFilterFormula":
1453 __m = selfA._parameters["NumberOfMembers"]
1455 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1456 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1457 selfA.StoredVariables["Analysis"].store( Xb )
1458 if selfA._toStore("APosterioriCovariance"):
1459 if hasattr(B,"asfullmatrix"):
1460 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1462 selfA.StoredVariables["APosterioriCovariance"].store( B )
1463 selfA._setInternalState("seed", numpy.random.get_state())
1464 elif selfA._parameters["nextStep"]:
1465 Xn = selfA._getInternalState("Xn")
1467 previousJMinimum = numpy.finfo(float).max
1469 for step in range(duration-1):
1470 numpy.random.set_state(selfA._getInternalState("seed"))
1471 if hasattr(Y,"store"):
1472 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1474 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1477 if hasattr(U,"store") and len(U)>1:
1478 Un = numpy.ravel( U[step] ).reshape((-1,1))
1479 elif hasattr(U,"store") and len(U)==1:
1480 Un = numpy.ravel( U[0] ).reshape((-1,1))
1482 Un = numpy.ravel( U ).reshape((-1,1))
1486 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1487 Xn = CovarianceInflation( Xn,
1488 selfA._parameters["InflationType"],
1489 selfA._parameters["InflationFactor"],
1492 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1493 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1495 returnSerieAsArrayMatrix = True )
1496 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1497 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1499 returnSerieAsArrayMatrix = True )
1500 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1501 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1502 Xn_predicted = Xn_predicted + Cm * Un
1503 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1504 # --- > Par principe, M = Id, Q = 0
1505 Xn_predicted = EMX = Xn
1506 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1508 returnSerieAsArrayMatrix = True )
1510 # Mean of forecast and observation of forecast
1511 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1512 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1515 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
1516 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
1518 #--------------------------
1519 if VariantM == "KalmanFilterFormula":
1520 mS = RIdemi * EaHX / math.sqrt(__m-1)
1521 mS = mS.reshape((-1,__m)) # Pour dimension 1
1522 delta = RIdemi * ( Ynpu - Hfm )
1523 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1524 vw = mT @ mS.T @ delta
1526 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1527 mU = numpy.identity(__m)
1529 EaX = EaX / math.sqrt(__m-1)
1530 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1531 #--------------------------
1532 elif VariantM == "Variational":
1533 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1534 def CostFunction(w):
1535 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1536 _Jo = 0.5 * _A.T @ (RI * _A)
1537 _Jb = 0.5 * (__m-1) * w.T @ w
1540 def GradientOfCostFunction(w):
1541 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1542 _GardJo = - EaHX.T @ (RI * _A)
1543 _GradJb = (__m-1) * w.reshape((__m,1))
1544 _GradJ = _GardJo + _GradJb
1545 return numpy.ravel(_GradJ)
1546 vw = scipy.optimize.fmin_cg(
1548 x0 = numpy.zeros(__m),
1549 fprime = GradientOfCostFunction,
1554 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1555 Htb = (__m-1) * numpy.identity(__m)
1558 Pta = numpy.linalg.inv( Hta )
1559 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1561 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1562 #--------------------------
1563 elif VariantM == "FiniteSize11": # Jauge Boc2011
1564 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1565 def CostFunction(w):
1566 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1567 _Jo = 0.5 * _A.T @ (RI * _A)
1568 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1571 def GradientOfCostFunction(w):
1572 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1573 _GardJo = - EaHX.T @ (RI * _A)
1574 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1575 _GradJ = _GardJo + _GradJb
1576 return numpy.ravel(_GradJ)
1577 vw = scipy.optimize.fmin_cg(
1579 x0 = numpy.zeros(__m),
1580 fprime = GradientOfCostFunction,
1585 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1587 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1588 / (1 + 1/__m + vw.T @ vw)**2
1591 Pta = numpy.linalg.inv( Hta )
1592 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1594 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1595 #--------------------------
1596 elif VariantM == "FiniteSize15": # Jauge Boc2015
1597 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1598 def CostFunction(w):
1599 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1600 _Jo = 0.5 * _A.T * RI * _A
1601 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1604 def GradientOfCostFunction(w):
1605 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1606 _GardJo = - EaHX.T @ (RI * _A)
1607 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1608 _GradJ = _GardJo + _GradJb
1609 return numpy.ravel(_GradJ)
1610 vw = scipy.optimize.fmin_cg(
1612 x0 = numpy.zeros(__m),
1613 fprime = GradientOfCostFunction,
1618 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1620 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1621 / (1 + 1/__m + vw.T @ vw)**2
1624 Pta = numpy.linalg.inv( Hta )
1625 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1627 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1628 #--------------------------
1629 elif VariantM == "FiniteSize16": # Jauge Boc2016
1630 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1631 def CostFunction(w):
1632 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1633 _Jo = 0.5 * _A.T @ (RI * _A)
1634 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1637 def GradientOfCostFunction(w):
1638 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1639 _GardJo = - EaHX.T @ (RI * _A)
1640 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1641 _GradJ = _GardJo + _GradJb
1642 return numpy.ravel(_GradJ)
1643 vw = scipy.optimize.fmin_cg(
1645 x0 = numpy.zeros(__m),
1646 fprime = GradientOfCostFunction,
1651 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1652 Htb = ((__m+1) / (__m-1)) * \
1653 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1654 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1657 Pta = numpy.linalg.inv( Hta )
1658 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1660 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1661 #--------------------------
1663 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1665 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1666 Xn = CovarianceInflation( Xn,
1667 selfA._parameters["InflationType"],
1668 selfA._parameters["InflationFactor"],
1671 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1672 #--------------------------
1673 selfA._setInternalState("Xn", Xn)
1674 selfA._setInternalState("seed", numpy.random.get_state())
1675 #--------------------------
1677 if selfA._parameters["StoreInternalVariables"] \
1678 or selfA._toStore("CostFunctionJ") \
1679 or selfA._toStore("CostFunctionJb") \
1680 or selfA._toStore("CostFunctionJo") \
1681 or selfA._toStore("APosterioriCovariance") \
1682 or selfA._toStore("InnovationAtCurrentAnalysis") \
1683 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1684 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1685 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1686 _Innovation = Ynpu - _HXa
1688 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1689 # ---> avec analysis
1690 selfA.StoredVariables["Analysis"].store( Xa )
1691 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1692 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1693 if selfA._toStore("InnovationAtCurrentAnalysis"):
1694 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1695 # ---> avec current state
1696 if selfA._parameters["StoreInternalVariables"] \
1697 or selfA._toStore("CurrentState"):
1698 selfA.StoredVariables["CurrentState"].store( Xn )
1699 if selfA._toStore("ForecastState"):
1700 selfA.StoredVariables["ForecastState"].store( EMX )
1701 if selfA._toStore("ForecastCovariance"):
1702 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1703 if selfA._toStore("BMA"):
1704 selfA.StoredVariables["BMA"].store( EMX - Xa )
1705 if selfA._toStore("InnovationAtCurrentState"):
1706 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1707 if selfA._toStore("SimulatedObservationAtCurrentState") \
1708 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1709 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1711 if selfA._parameters["StoreInternalVariables"] \
1712 or selfA._toStore("CostFunctionJ") \
1713 or selfA._toStore("CostFunctionJb") \
1714 or selfA._toStore("CostFunctionJo") \
1715 or selfA._toStore("CurrentOptimum") \
1716 or selfA._toStore("APosterioriCovariance"):
1717 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1718 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1720 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1721 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1722 selfA.StoredVariables["CostFunctionJ" ].store( J )
1724 if selfA._toStore("IndexOfOptimum") \
1725 or selfA._toStore("CurrentOptimum") \
1726 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1727 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1728 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1729 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1730 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1731 if selfA._toStore("IndexOfOptimum"):
1732 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1733 if selfA._toStore("CurrentOptimum"):
1734 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1735 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1736 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1737 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1738 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1739 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1740 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1741 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1742 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1743 if selfA._toStore("APosterioriCovariance"):
1744 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1745 if selfA._parameters["EstimationOf"] == "Parameters" \
1746 and J < previousJMinimum:
1747 previousJMinimum = J
1749 if selfA._toStore("APosterioriCovariance"):
1750 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1751 # ---> Pour les smoothers
1752 if selfA._toStore("CurrentEnsembleState"):
1753 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1755 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1756 # ----------------------------------------------------------------------
1757 if selfA._parameters["EstimationOf"] == "Parameters":
1758 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1759 selfA.StoredVariables["Analysis"].store( XaMin )
1760 if selfA._toStore("APosterioriCovariance"):
1761 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1762 if selfA._toStore("BMA"):
1763 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1767 # ==============================================================================
1768 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1770 Extended Kalman Filter
1772 if selfA._parameters["EstimationOf"] == "Parameters":
1773 selfA._parameters["StoreInternalVariables"] = True
1776 H = HO["Direct"].appliedControledFormTo
1778 if selfA._parameters["EstimationOf"] == "State":
1779 M = EM["Direct"].appliedControledFormTo
1781 if CM is not None and "Tangent" in CM and U is not None:
1782 Cm = CM["Tangent"].asMatrix(Xb)
1786 # Durée d'observation et tailles
1787 if hasattr(Y,"stepnumber"):
1788 duration = Y.stepnumber()
1789 __p = numpy.cumprod(Y.shape())[-1]
1792 __p = numpy.array(Y).size
1794 # Précalcul des inversions de B et R
1795 if selfA._parameters["StoreInternalVariables"] \
1796 or selfA._toStore("CostFunctionJ") \
1797 or selfA._toStore("CostFunctionJb") \
1798 or selfA._toStore("CostFunctionJo") \
1799 or selfA._toStore("CurrentOptimum") \
1800 or selfA._toStore("APosterioriCovariance"):
1806 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1809 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1810 selfA.StoredVariables["Analysis"].store( Xb )
1811 if selfA._toStore("APosterioriCovariance"):
1812 if hasattr(B,"asfullmatrix"):
1813 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1815 selfA.StoredVariables["APosterioriCovariance"].store( B )
1816 selfA._setInternalState("seed", numpy.random.get_state())
1817 elif selfA._parameters["nextStep"]:
1818 Xn = selfA._getInternalState("Xn")
1819 Pn = selfA._getInternalState("Pn")
1821 if selfA._parameters["EstimationOf"] == "Parameters":
1823 previousJMinimum = numpy.finfo(float).max
1825 for step in range(duration-1):
1826 if hasattr(Y,"store"):
1827 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1829 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1831 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1832 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1833 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1834 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1836 if selfA._parameters["EstimationOf"] == "State":
1837 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1838 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1839 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1840 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1843 if hasattr(U,"store") and len(U)>1:
1844 Un = numpy.ravel( U[step] ).reshape((-1,1))
1845 elif hasattr(U,"store") and len(U)==1:
1846 Un = numpy.ravel( U[0] ).reshape((-1,1))
1848 Un = numpy.ravel( U ).reshape((-1,1))
1852 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1853 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1854 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1855 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1856 Xn_predicted = Xn_predicted + Cm * Un
1857 Pn_predicted = Q + Mt * (Pn * Ma)
1858 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1859 # --- > Par principe, M = Id, Q = 0
1863 if selfA._parameters["EstimationOf"] == "State":
1864 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1865 _Innovation = Ynpu - HX_predicted
1866 elif selfA._parameters["EstimationOf"] == "Parameters":
1867 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1868 _Innovation = Ynpu - HX_predicted
1869 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1870 _Innovation = _Innovation - Cm * Un
1872 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1873 Xn = Xn_predicted + Kn * _Innovation
1874 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1877 #--------------------------
1878 selfA._setInternalState("Xn", Xn)
1879 selfA._setInternalState("Pn", Pn)
1880 #--------------------------
1882 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1883 # ---> avec analysis
1884 selfA.StoredVariables["Analysis"].store( Xa )
1885 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1886 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1887 if selfA._toStore("InnovationAtCurrentAnalysis"):
1888 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1889 # ---> avec current state
1890 if selfA._parameters["StoreInternalVariables"] \
1891 or selfA._toStore("CurrentState"):
1892 selfA.StoredVariables["CurrentState"].store( Xn )
1893 if selfA._toStore("ForecastState"):
1894 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1895 if selfA._toStore("ForecastCovariance"):
1896 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1897 if selfA._toStore("BMA"):
1898 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1899 if selfA._toStore("InnovationAtCurrentState"):
1900 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1901 if selfA._toStore("SimulatedObservationAtCurrentState") \
1902 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1903 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1905 if selfA._parameters["StoreInternalVariables"] \
1906 or selfA._toStore("CostFunctionJ") \
1907 or selfA._toStore("CostFunctionJb") \
1908 or selfA._toStore("CostFunctionJo") \
1909 or selfA._toStore("CurrentOptimum") \
1910 or selfA._toStore("APosterioriCovariance"):
1911 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1912 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1914 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1915 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1916 selfA.StoredVariables["CostFunctionJ" ].store( J )
1918 if selfA._toStore("IndexOfOptimum") \
1919 or selfA._toStore("CurrentOptimum") \
1920 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1921 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1922 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1923 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1924 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1925 if selfA._toStore("IndexOfOptimum"):
1926 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1927 if selfA._toStore("CurrentOptimum"):
1928 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1929 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1930 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1931 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1932 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1933 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1934 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1935 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1936 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1937 if selfA._toStore("APosterioriCovariance"):
1938 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1939 if selfA._parameters["EstimationOf"] == "Parameters" \
1940 and J < previousJMinimum:
1941 previousJMinimum = J
1943 if selfA._toStore("APosterioriCovariance"):
1944 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1946 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1947 # ----------------------------------------------------------------------
1948 if selfA._parameters["EstimationOf"] == "Parameters":
1949 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1950 selfA.StoredVariables["Analysis"].store( XaMin )
1951 if selfA._toStore("APosterioriCovariance"):
1952 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1953 if selfA._toStore("BMA"):
1954 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1958 # ==============================================================================
1959 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1960 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1964 if selfA._parameters["EstimationOf"] == "Parameters":
1965 selfA._parameters["StoreInternalVariables"] = True
1968 H = HO["Direct"].appliedControledFormTo
1970 if selfA._parameters["EstimationOf"] == "State":
1971 M = EM["Direct"].appliedControledFormTo
1973 if CM is not None and "Tangent" in CM and U is not None:
1974 Cm = CM["Tangent"].asMatrix(Xb)
1978 # Durée d'observation et tailles
1979 if hasattr(Y,"stepnumber"):
1980 duration = Y.stepnumber()
1981 __p = numpy.cumprod(Y.shape())[-1]
1984 __p = numpy.array(Y).size
1986 # Précalcul des inversions de B et R
1987 if selfA._parameters["StoreInternalVariables"] \
1988 or selfA._toStore("CostFunctionJ") \
1989 or selfA._toStore("CostFunctionJb") \
1990 or selfA._toStore("CostFunctionJo") \
1991 or selfA._toStore("CurrentOptimum") \
1992 or selfA._toStore("APosterioriCovariance"):
1997 __m = selfA._parameters["NumberOfMembers"]
1999 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2000 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2002 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2003 selfA.StoredVariables["Analysis"].store( Xb )
2004 if selfA._toStore("APosterioriCovariance"):
2005 if hasattr(B,"asfullmatrix"):
2006 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2008 selfA.StoredVariables["APosterioriCovariance"].store( B )
2009 selfA._setInternalState("seed", numpy.random.get_state())
2010 elif selfA._parameters["nextStep"]:
2011 Xn = selfA._getInternalState("Xn")
2013 previousJMinimum = numpy.finfo(float).max
2015 for step in range(duration-1):
2016 numpy.random.set_state(selfA._getInternalState("seed"))
2017 if hasattr(Y,"store"):
2018 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2020 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2023 if hasattr(U,"store") and len(U)>1:
2024 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2025 elif hasattr(U,"store") and len(U)==1:
2026 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2028 Un = numpy.asmatrix(numpy.ravel( U )).T
2032 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2033 Xn = CovarianceInflation( Xn,
2034 selfA._parameters["InflationType"],
2035 selfA._parameters["InflationFactor"],
2038 #--------------------------
2039 if VariantM == "IEnKF12":
2040 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2041 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
2045 Ta = numpy.identity(__m)
2046 vw = numpy.zeros(__m)
2047 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2048 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2051 E1 = vx1 + _epsilon * EaX
2053 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2055 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2056 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2058 returnSerieAsArrayMatrix = True )
2059 elif selfA._parameters["EstimationOf"] == "Parameters":
2060 # --- > Par principe, M = Id
2062 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2063 vy1 = H((vx2, Un)).reshape((__p,1))
2065 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2067 returnSerieAsArrayMatrix = True )
2068 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2071 EaY = (HE2 - vy2) / _epsilon
2073 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2075 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2076 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2077 Deltaw = - numpy.linalg.solve(mH,GradJ)
2082 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2086 A2 = EnsembleOfAnomalies( E2 )
2089 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2090 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
2093 #--------------------------
2095 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2097 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2098 Xn = CovarianceInflation( Xn,
2099 selfA._parameters["InflationType"],
2100 selfA._parameters["InflationFactor"],
2103 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2104 #--------------------------
2105 selfA._setInternalState("Xn", Xn)
2106 selfA._setInternalState("seed", numpy.random.get_state())
2107 #--------------------------
2109 if selfA._parameters["StoreInternalVariables"] \
2110 or selfA._toStore("CostFunctionJ") \
2111 or selfA._toStore("CostFunctionJb") \
2112 or selfA._toStore("CostFunctionJo") \
2113 or selfA._toStore("APosterioriCovariance") \
2114 or selfA._toStore("InnovationAtCurrentAnalysis") \
2115 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2116 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2117 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2118 _Innovation = Ynpu - _HXa
2120 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2121 # ---> avec analysis
2122 selfA.StoredVariables["Analysis"].store( Xa )
2123 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2124 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2125 if selfA._toStore("InnovationAtCurrentAnalysis"):
2126 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2127 # ---> avec current state
2128 if selfA._parameters["StoreInternalVariables"] \
2129 or selfA._toStore("CurrentState"):
2130 selfA.StoredVariables["CurrentState"].store( Xn )
2131 if selfA._toStore("ForecastState"):
2132 selfA.StoredVariables["ForecastState"].store( E2 )
2133 if selfA._toStore("ForecastCovariance"):
2134 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
2135 if selfA._toStore("BMA"):
2136 selfA.StoredVariables["BMA"].store( E2 - Xa )
2137 if selfA._toStore("InnovationAtCurrentState"):
2138 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2139 if selfA._toStore("SimulatedObservationAtCurrentState") \
2140 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2141 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2143 if selfA._parameters["StoreInternalVariables"] \
2144 or selfA._toStore("CostFunctionJ") \
2145 or selfA._toStore("CostFunctionJb") \
2146 or selfA._toStore("CostFunctionJo") \
2147 or selfA._toStore("CurrentOptimum") \
2148 or selfA._toStore("APosterioriCovariance"):
2149 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2150 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2152 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2153 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2154 selfA.StoredVariables["CostFunctionJ" ].store( J )
2156 if selfA._toStore("IndexOfOptimum") \
2157 or selfA._toStore("CurrentOptimum") \
2158 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2159 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2160 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2161 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2162 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2163 if selfA._toStore("IndexOfOptimum"):
2164 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2165 if selfA._toStore("CurrentOptimum"):
2166 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2167 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2168 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2169 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2170 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2171 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2172 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2173 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2174 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2175 if selfA._toStore("APosterioriCovariance"):
2176 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2177 if selfA._parameters["EstimationOf"] == "Parameters" \
2178 and J < previousJMinimum:
2179 previousJMinimum = J
2181 if selfA._toStore("APosterioriCovariance"):
2182 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2183 # ---> Pour les smoothers
2184 if selfA._toStore("CurrentEnsembleState"):
2185 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2187 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2188 # ----------------------------------------------------------------------
2189 if selfA._parameters["EstimationOf"] == "Parameters":
2190 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2191 selfA.StoredVariables["Analysis"].store( XaMin )
2192 if selfA._toStore("APosterioriCovariance"):
2193 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2194 if selfA._toStore("BMA"):
2195 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2199 # ==============================================================================
2200 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2207 Hm = HO["Direct"].appliedTo
2212 Xini = selfA._parameters["InitializationPoint"]
2214 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
2215 Innovation = Y - HXb
2222 Xr = Xini.reshape((-1,1))
2223 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
2227 Ht = HO["Tangent"].asMatrix(Xr)
2228 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
2230 # Définition de la fonction-coût
2231 # ------------------------------
2232 def CostFunction(dx):
2233 _dX = numpy.asmatrix(numpy.ravel( dx )).T
2234 if selfA._parameters["StoreInternalVariables"] or \
2235 selfA._toStore("CurrentState") or \
2236 selfA._toStore("CurrentOptimum"):
2237 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
2239 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
2240 _dInnovation = Innovation - _HdX
2241 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2242 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2243 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
2244 if selfA._toStore("InnovationAtCurrentState"):
2245 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
2247 Jb = float( 0.5 * _dX.T * BI * _dX )
2248 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
2251 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2252 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2253 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2254 selfA.StoredVariables["CostFunctionJ" ].store( J )
2255 if selfA._toStore("IndexOfOptimum") or \
2256 selfA._toStore("CurrentOptimum") or \
2257 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2258 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2259 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2260 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2261 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2262 if selfA._toStore("IndexOfOptimum"):
2263 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2264 if selfA._toStore("CurrentOptimum"):
2265 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2266 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2267 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2268 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2269 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2270 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2271 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2272 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2273 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2276 def GradientOfCostFunction(dx):
2277 _dX = numpy.asmatrix(numpy.ravel( dx )).T
2279 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
2280 _dInnovation = Innovation - _HdX
2282 GradJo = - Ht.T @ (RI * _dInnovation)
2283 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2286 # Minimisation de la fonctionnelle
2287 # --------------------------------
2288 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2290 if selfA._parameters["Minimizer"] == "LBFGSB":
2291 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
2292 if "0.19" <= scipy.version.version <= "1.1.0":
2293 import lbfgsbhlt as optimiseur
2295 import scipy.optimize as optimiseur
2296 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2297 func = CostFunction,
2298 x0 = numpy.zeros(Xini.size),
2299 fprime = GradientOfCostFunction,
2301 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2302 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2303 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2304 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2305 iprint = selfA._parameters["optiprint"],
2307 nfeval = Informations['funcalls']
2308 rc = Informations['warnflag']
2309 elif selfA._parameters["Minimizer"] == "TNC":
2310 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2311 func = CostFunction,
2312 x0 = numpy.zeros(Xini.size),
2313 fprime = GradientOfCostFunction,
2315 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2316 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2317 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2318 ftol = selfA._parameters["CostDecrementTolerance"],
2319 messages = selfA._parameters["optmessages"],
2321 elif selfA._parameters["Minimizer"] == "CG":
2322 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2324 x0 = numpy.zeros(Xini.size),
2325 fprime = GradientOfCostFunction,
2327 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2328 gtol = selfA._parameters["GradientNormTolerance"],
2329 disp = selfA._parameters["optdisp"],
2332 elif selfA._parameters["Minimizer"] == "NCG":
2333 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2335 x0 = numpy.zeros(Xini.size),
2336 fprime = GradientOfCostFunction,
2338 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2339 avextol = selfA._parameters["CostDecrementTolerance"],
2340 disp = selfA._parameters["optdisp"],
2343 elif selfA._parameters["Minimizer"] == "BFGS":
2344 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2346 x0 = numpy.zeros(Xini.size),
2347 fprime = GradientOfCostFunction,
2349 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2350 gtol = selfA._parameters["GradientNormTolerance"],
2351 disp = selfA._parameters["optdisp"],
2355 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2357 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2358 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2360 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2361 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2363 Minimum = Xb + Minimum.reshape((-1,1))
2366 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2367 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2370 #--------------------------
2372 selfA.StoredVariables["Analysis"].store( Xa )
2374 if selfA._toStore("OMA") or \
2375 selfA._toStore("SigmaObs2") or \
2376 selfA._toStore("SimulationQuantiles") or \
2377 selfA._toStore("SimulatedObservationAtOptimum"):
2378 if selfA._toStore("SimulatedObservationAtCurrentState"):
2379 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2380 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2381 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2385 if selfA._toStore("APosterioriCovariance") or \
2386 selfA._toStore("SimulationQuantiles") or \
2387 selfA._toStore("JacobianMatrixAtOptimum") or \
2388 selfA._toStore("KalmanGainAtOptimum"):
2389 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2390 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2391 if selfA._toStore("APosterioriCovariance") or \
2392 selfA._toStore("SimulationQuantiles") or \
2393 selfA._toStore("KalmanGainAtOptimum"):
2394 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2395 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2396 if selfA._toStore("APosterioriCovariance") or \
2397 selfA._toStore("SimulationQuantiles"):
2398 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
2399 if selfA._toStore("APosterioriCovariance"):
2400 selfA.StoredVariables["APosterioriCovariance"].store( A )
2401 if selfA._toStore("JacobianMatrixAtOptimum"):
2402 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2403 if selfA._toStore("KalmanGainAtOptimum"):
2404 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2405 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2406 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2408 # Calculs et/ou stockages supplémentaires
2409 # ---------------------------------------
2410 if selfA._toStore("Innovation") or \
2411 selfA._toStore("SigmaObs2") or \
2412 selfA._toStore("MahalanobisConsistency") or \
2413 selfA._toStore("OMB"):
2415 if selfA._toStore("Innovation"):
2416 selfA.StoredVariables["Innovation"].store( d )
2417 if selfA._toStore("BMA"):
2418 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2419 if selfA._toStore("OMA"):
2420 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2421 if selfA._toStore("OMB"):
2422 selfA.StoredVariables["OMB"].store( d )
2423 if selfA._toStore("SigmaObs2"):
2424 TraceR = R.trace(Y.size)
2425 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
2426 if selfA._toStore("MahalanobisConsistency"):
2427 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2428 if selfA._toStore("SimulationQuantiles"):
2429 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2430 if selfA._toStore("SimulatedObservationAtBackground"):
2431 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
2432 if selfA._toStore("SimulatedObservationAtOptimum"):
2433 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
2437 # ==============================================================================
2438 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2439 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2441 Maximum Likelihood Ensemble Filter
2443 if selfA._parameters["EstimationOf"] == "Parameters":
2444 selfA._parameters["StoreInternalVariables"] = True
2447 H = HO["Direct"].appliedControledFormTo
2449 if selfA._parameters["EstimationOf"] == "State":
2450 M = EM["Direct"].appliedControledFormTo
2452 if CM is not None and "Tangent" in CM and U is not None:
2453 Cm = CM["Tangent"].asMatrix(Xb)
2457 # Durée d'observation et tailles
2458 if hasattr(Y,"stepnumber"):
2459 duration = Y.stepnumber()
2460 __p = numpy.cumprod(Y.shape())[-1]
2463 __p = numpy.array(Y).size
2465 # Précalcul des inversions de B et R
2466 if selfA._parameters["StoreInternalVariables"] \
2467 or selfA._toStore("CostFunctionJ") \
2468 or selfA._toStore("CostFunctionJb") \
2469 or selfA._toStore("CostFunctionJo") \
2470 or selfA._toStore("CurrentOptimum") \
2471 or selfA._toStore("APosterioriCovariance"):
2476 __m = selfA._parameters["NumberOfMembers"]
2478 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2479 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2480 selfA.StoredVariables["Analysis"].store( Xb )
2481 if selfA._toStore("APosterioriCovariance"):
2482 if hasattr(B,"asfullmatrix"):
2483 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2485 selfA.StoredVariables["APosterioriCovariance"].store( B )
2486 selfA._setInternalState("seed", numpy.random.get_state())
2487 elif selfA._parameters["nextStep"]:
2488 Xn = selfA._getInternalState("Xn")
2490 previousJMinimum = numpy.finfo(float).max
2492 for step in range(duration-1):
2493 numpy.random.set_state(selfA._getInternalState("seed"))
2494 if hasattr(Y,"store"):
2495 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2497 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2500 if hasattr(U,"store") and len(U)>1:
2501 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2502 elif hasattr(U,"store") and len(U)==1:
2503 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2505 Un = numpy.asmatrix(numpy.ravel( U )).T
2509 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2510 Xn = CovarianceInflation( Xn,
2511 selfA._parameters["InflationType"],
2512 selfA._parameters["InflationFactor"],
2515 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2516 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2518 returnSerieAsArrayMatrix = True )
2519 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2520 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2521 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2522 Xn_predicted = Xn_predicted + Cm * Un
2523 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2524 # --- > Par principe, M = Id, Q = 0
2525 Xn_predicted = EMX = Xn
2527 #--------------------------
2528 if VariantM == "MLEF13":
2529 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2530 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2531 Ua = numpy.identity(__m)
2535 Ta = numpy.identity(__m)
2536 vw = numpy.zeros(__m)
2537 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2538 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2541 E1 = vx1 + _epsilon * EaX
2543 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2545 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2547 returnSerieAsArrayMatrix = True )
2548 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2551 EaY = (HE2 - vy2) / _epsilon
2553 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2555 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2556 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2557 Deltaw = - numpy.linalg.solve(mH,GradJ)
2562 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2567 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2569 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2570 #--------------------------
2572 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2574 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2575 Xn = CovarianceInflation( Xn,
2576 selfA._parameters["InflationType"],
2577 selfA._parameters["InflationFactor"],
2580 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2581 #--------------------------
2582 selfA._setInternalState("Xn", Xn)
2583 selfA._setInternalState("seed", numpy.random.get_state())
2584 #--------------------------
2586 if selfA._parameters["StoreInternalVariables"] \
2587 or selfA._toStore("CostFunctionJ") \
2588 or selfA._toStore("CostFunctionJb") \
2589 or selfA._toStore("CostFunctionJo") \
2590 or selfA._toStore("APosterioriCovariance") \
2591 or selfA._toStore("InnovationAtCurrentAnalysis") \
2592 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2593 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2594 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2595 _Innovation = Ynpu - _HXa
2597 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2598 # ---> avec analysis
2599 selfA.StoredVariables["Analysis"].store( Xa )
2600 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2601 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2602 if selfA._toStore("InnovationAtCurrentAnalysis"):
2603 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2604 # ---> avec current state
2605 if selfA._parameters["StoreInternalVariables"] \
2606 or selfA._toStore("CurrentState"):
2607 selfA.StoredVariables["CurrentState"].store( Xn )
2608 if selfA._toStore("ForecastState"):
2609 selfA.StoredVariables["ForecastState"].store( EMX )
2610 if selfA._toStore("ForecastCovariance"):
2611 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2612 if selfA._toStore("BMA"):
2613 selfA.StoredVariables["BMA"].store( EMX - Xa )
2614 if selfA._toStore("InnovationAtCurrentState"):
2615 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2616 if selfA._toStore("SimulatedObservationAtCurrentState") \
2617 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2618 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2620 if selfA._parameters["StoreInternalVariables"] \
2621 or selfA._toStore("CostFunctionJ") \
2622 or selfA._toStore("CostFunctionJb") \
2623 or selfA._toStore("CostFunctionJo") \
2624 or selfA._toStore("CurrentOptimum") \
2625 or selfA._toStore("APosterioriCovariance"):
2626 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2627 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2629 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2630 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2631 selfA.StoredVariables["CostFunctionJ" ].store( J )
2633 if selfA._toStore("IndexOfOptimum") \
2634 or selfA._toStore("CurrentOptimum") \
2635 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2636 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2637 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2638 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2639 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2640 if selfA._toStore("IndexOfOptimum"):
2641 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2642 if selfA._toStore("CurrentOptimum"):
2643 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2644 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2645 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2646 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2647 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2648 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2649 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2650 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2651 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2652 if selfA._toStore("APosterioriCovariance"):
2653 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2654 if selfA._parameters["EstimationOf"] == "Parameters" \
2655 and J < previousJMinimum:
2656 previousJMinimum = J
2658 if selfA._toStore("APosterioriCovariance"):
2659 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2660 # ---> Pour les smoothers
2661 if selfA._toStore("CurrentEnsembleState"):
2662 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2664 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2665 # ----------------------------------------------------------------------
2666 if selfA._parameters["EstimationOf"] == "Parameters":
2667 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2668 selfA.StoredVariables["Analysis"].store( XaMin )
2669 if selfA._toStore("APosterioriCovariance"):
2670 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2671 if selfA._toStore("BMA"):
2672 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2676 # ==============================================================================
2688 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2689 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2690 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2693 # Recuperation des donnees et informations initiales
2694 # --------------------------------------------------
2695 variables = numpy.ravel( x0 )
2696 mesures = numpy.ravel( y )
2697 increment = sys.float_info[0]
2700 quantile = float(quantile)
2702 # Calcul des parametres du MM
2703 # ---------------------------
2704 tn = float(toler) / n
2705 e0 = -tn / math.log(tn)
2706 epsilon = (e0-tn)/(1+math.log(e0))
2708 # Calculs d'initialisation
2709 # ------------------------
2710 residus = mesures - numpy.ravel( func( variables ) )
2711 poids = 1./(epsilon+numpy.abs(residus))
2712 veps = 1. - 2. * quantile - residus * poids
2713 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2716 # Recherche iterative
2717 # -------------------
2718 while (increment > toler) and (iteration < maxfun) :
2721 Derivees = numpy.array(fprime(variables))
2722 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
2723 DeriveesT = Derivees.transpose()
2724 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2725 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
2726 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2728 variables = variables + step
2729 if bounds is not None:
2730 # Attention : boucle infinie à éviter si un intervalle est trop petit
2731 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2733 variables = variables - step
2734 residus = mesures - numpy.ravel( func(variables) )
2735 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2737 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2739 variables = variables - step
2740 residus = mesures - numpy.ravel( func(variables) )
2741 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2743 increment = lastsurrogate-surrogate
2744 poids = 1./(epsilon+numpy.abs(residus))
2745 veps = 1. - 2. * quantile - residus * poids
2746 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2750 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2752 return variables, Ecart, [n,p,iteration,increment,0]
2754 # ==============================================================================
2755 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2757 3DVAR multi-pas et multi-méthodes
2762 if selfA._parameters["EstimationOf"] == "State":
2763 M = EM["Direct"].appliedTo
2765 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2766 Xn = numpy.ravel(Xb).reshape((-1,1))
2767 selfA.StoredVariables["Analysis"].store( Xn )
2768 if selfA._toStore("APosterioriCovariance"):
2769 if hasattr(B,"asfullmatrix"):
2770 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2772 selfA.StoredVariables["APosterioriCovariance"].store( B )
2773 if selfA._toStore("ForecastState"):
2774 selfA.StoredVariables["ForecastState"].store( Xn )
2775 elif selfA._parameters["nextStep"]:
2776 Xn = selfA._getInternalState("Xn")
2778 Xn = numpy.ravel(Xb).reshape((-1,1))
2780 if hasattr(Y,"stepnumber"):
2781 duration = Y.stepnumber()
2786 for step in range(duration-1):
2787 if hasattr(Y,"store"):
2788 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2790 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2792 if selfA._parameters["EstimationOf"] == "State": # Forecast
2793 Xn_predicted = M( Xn )
2794 if selfA._toStore("ForecastState"):
2795 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2796 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2797 # --- > Par principe, M = Id, Q = 0
2799 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2801 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2803 Xn = selfA.StoredVariables["Analysis"][-1]
2804 #--------------------------
2805 selfA._setInternalState("Xn", Xn)
2809 # ==============================================================================
2810 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2817 Hm = HO["Direct"].appliedTo
2819 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2820 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2823 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2824 if Y.size != HXb.size:
2825 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2826 if max(Y.shape) != max(HXb.shape):
2827 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2829 if selfA._toStore("JacobianMatrixAtBackground"):
2830 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2831 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2832 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2834 Ht = HO["Tangent"].asMatrix(Xb)
2836 HBHTpR = R + Ht * BHT
2837 Innovation = Y - HXb
2839 Xini = numpy.zeros(Xb.shape)
2841 # Définition de la fonction-coût
2842 # ------------------------------
2843 def CostFunction(w):
2844 _W = w.reshape((-1,1))
2845 if selfA._parameters["StoreInternalVariables"] or \
2846 selfA._toStore("CurrentState") or \
2847 selfA._toStore("CurrentOptimum"):
2848 selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
2849 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2850 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2851 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
2852 if selfA._toStore("InnovationAtCurrentState"):
2853 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2855 Jb = float( 0.5 * _W.T @ (HBHTpR @ _W) )
2856 Jo = float( - _W.T @ Innovation )
2859 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2860 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2861 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2862 selfA.StoredVariables["CostFunctionJ" ].store( J )
2863 if selfA._toStore("IndexOfOptimum") or \
2864 selfA._toStore("CurrentOptimum") or \
2865 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2866 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2867 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2868 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2869 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2870 if selfA._toStore("IndexOfOptimum"):
2871 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2872 if selfA._toStore("CurrentOptimum"):
2873 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2874 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2875 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2876 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2877 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2878 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2879 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2880 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2881 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2884 def GradientOfCostFunction(w):
2885 _W = w.reshape((-1,1))
2886 GradJb = HBHTpR @ _W
2887 GradJo = - Innovation
2888 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2891 # Minimisation de la fonctionnelle
2892 # --------------------------------
2893 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2895 if selfA._parameters["Minimizer"] == "LBFGSB":
2896 if "0.19" <= scipy.version.version <= "1.1.0":
2897 import lbfgsbhlt as optimiseur
2899 import scipy.optimize as optimiseur
2900 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2901 func = CostFunction,
2903 fprime = GradientOfCostFunction,
2905 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2906 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2907 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2908 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2909 iprint = selfA._parameters["optiprint"],
2911 nfeval = Informations['funcalls']
2912 rc = Informations['warnflag']
2913 elif selfA._parameters["Minimizer"] == "TNC":
2914 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2915 func = CostFunction,
2917 fprime = GradientOfCostFunction,
2919 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2920 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2921 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2922 ftol = selfA._parameters["CostDecrementTolerance"],
2923 messages = selfA._parameters["optmessages"],
2925 elif selfA._parameters["Minimizer"] == "CG":
2926 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2929 fprime = GradientOfCostFunction,
2931 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2932 gtol = selfA._parameters["GradientNormTolerance"],
2933 disp = selfA._parameters["optdisp"],
2936 elif selfA._parameters["Minimizer"] == "NCG":
2937 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2940 fprime = GradientOfCostFunction,
2942 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2943 avextol = selfA._parameters["CostDecrementTolerance"],
2944 disp = selfA._parameters["optdisp"],
2947 elif selfA._parameters["Minimizer"] == "BFGS":
2948 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2951 fprime = GradientOfCostFunction,
2953 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2954 gtol = selfA._parameters["GradientNormTolerance"],
2955 disp = selfA._parameters["optdisp"],
2959 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2961 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2962 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2964 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2965 # ----------------------------------------------------------------
2966 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2967 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2969 Minimum = Xb + BHT @ Minimum.reshape((-1,1))
2972 #--------------------------
2974 selfA.StoredVariables["Analysis"].store( Xa )
2976 if selfA._toStore("OMA") or \
2977 selfA._toStore("SigmaObs2") or \
2978 selfA._toStore("SimulationQuantiles") or \
2979 selfA._toStore("SimulatedObservationAtOptimum"):
2980 if selfA._toStore("SimulatedObservationAtCurrentState"):
2981 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2982 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2983 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2987 if selfA._toStore("APosterioriCovariance") or \
2988 selfA._toStore("SimulationQuantiles") or \
2989 selfA._toStore("JacobianMatrixAtOptimum") or \
2990 selfA._toStore("KalmanGainAtOptimum"):
2991 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2992 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2993 if selfA._toStore("APosterioriCovariance") or \
2994 selfA._toStore("SimulationQuantiles") or \
2995 selfA._toStore("KalmanGainAtOptimum"):
2996 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2997 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2998 if selfA._toStore("APosterioriCovariance") or \
2999 selfA._toStore("SimulationQuantiles"):
3002 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3003 if selfA._toStore("APosterioriCovariance"):
3004 selfA.StoredVariables["APosterioriCovariance"].store( A )
3005 if selfA._toStore("JacobianMatrixAtOptimum"):
3006 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3007 if selfA._toStore("KalmanGainAtOptimum"):
3008 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3009 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3010 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3012 # Calculs et/ou stockages supplémentaires
3013 # ---------------------------------------
3014 if selfA._toStore("Innovation") or \
3015 selfA._toStore("SigmaObs2") or \
3016 selfA._toStore("MahalanobisConsistency") or \
3017 selfA._toStore("OMB"):
3019 if selfA._toStore("Innovation"):
3020 selfA.StoredVariables["Innovation"].store( d )
3021 if selfA._toStore("BMA"):
3022 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3023 if selfA._toStore("OMA"):
3024 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3025 if selfA._toStore("OMB"):
3026 selfA.StoredVariables["OMB"].store( d )
3027 if selfA._toStore("SigmaObs2"):
3028 TraceR = R.trace(Y.size)
3029 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3030 if selfA._toStore("MahalanobisConsistency"):
3031 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3032 if selfA._toStore("SimulationQuantiles"):
3033 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3034 if selfA._toStore("SimulatedObservationAtBackground"):
3035 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3036 if selfA._toStore("SimulatedObservationAtOptimum"):
3037 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3041 # ==============================================================================
3042 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
3046 if selfA._parameters["EstimationOf"] == "Parameters":
3047 selfA._parameters["StoreInternalVariables"] = True
3050 H = HO["Direct"].appliedControledFormTo
3052 if selfA._parameters["EstimationOf"] == "State":
3053 M = EM["Direct"].appliedControledFormTo
3055 if CM is not None and "Tangent" in CM and U is not None:
3056 Cm = CM["Tangent"].asMatrix(Xb)
3060 # Durée d'observation et tailles
3061 if hasattr(Y,"stepnumber"):
3062 duration = Y.stepnumber()
3063 __p = numpy.cumprod(Y.shape())[-1]
3066 __p = numpy.array(Y).size
3068 # Précalcul des inversions de B et R
3069 if selfA._parameters["StoreInternalVariables"] \
3070 or selfA._toStore("CostFunctionJ") \
3071 or selfA._toStore("CostFunctionJb") \
3072 or selfA._toStore("CostFunctionJo") \
3073 or selfA._toStore("CurrentOptimum") \
3074 or selfA._toStore("APosterioriCovariance"):
3079 __m = selfA._parameters["NumberOfMembers"]
3081 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
3084 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3085 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
3087 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
3088 selfA.StoredVariables["Analysis"].store( Xb )
3089 if selfA._toStore("APosterioriCovariance"):
3090 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3091 selfA._setInternalState("seed", numpy.random.get_state())
3092 elif selfA._parameters["nextStep"]:
3093 Xn = selfA._getInternalState("Xn")
3095 previousJMinimum = numpy.finfo(float).max
3097 for step in range(duration-1):
3098 numpy.random.set_state(selfA._getInternalState("seed"))
3099 if hasattr(Y,"store"):
3100 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3102 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3105 if hasattr(U,"store") and len(U)>1:
3106 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3107 elif hasattr(U,"store") and len(U)==1:
3108 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3110 Un = numpy.asmatrix(numpy.ravel( U )).T
3114 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
3115 Xn = CovarianceInflation( Xn,
3116 selfA._parameters["InflationType"],
3117 selfA._parameters["InflationFactor"],
3120 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3121 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
3123 returnSerieAsArrayMatrix = True )
3124 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
3125 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3127 returnSerieAsArrayMatrix = True )
3128 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3129 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3130 Xn_predicted = Xn_predicted + Cm * Un
3131 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3132 # --- > Par principe, M = Id, Q = 0
3133 Xn_predicted = EMX = Xn
3134 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3136 returnSerieAsArrayMatrix = True )
3138 # Mean of forecast and observation of forecast
3139 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3140 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3142 #--------------------------
3143 if VariantM == "KalmanFilterFormula05":
3144 PfHT, HPfHT = 0., 0.
3145 for i in range(__m):
3146 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
3147 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
3148 PfHT += Exfi * Eyfi.T
3149 HPfHT += Eyfi * Eyfi.T
3150 PfHT = (1./(__m-1)) * PfHT
3151 HPfHT = (1./(__m-1)) * HPfHT
3152 Kn = PfHT * ( R + HPfHT ).I
3155 for i in range(__m):
3156 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
3157 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
3158 #--------------------------
3159 elif VariantM == "KalmanFilterFormula16":
3160 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
3161 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3163 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
3164 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
3166 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
3168 for i in range(__m):
3169 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
3170 #--------------------------
3172 raise ValueError("VariantM has to be chosen in the authorized methods list.")
3174 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3175 Xn = CovarianceInflation( Xn,
3176 selfA._parameters["InflationType"],
3177 selfA._parameters["InflationFactor"],
3180 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3181 #--------------------------
3182 selfA._setInternalState("Xn", Xn)
3183 selfA._setInternalState("seed", numpy.random.get_state())
3184 #--------------------------
3186 if selfA._parameters["StoreInternalVariables"] \
3187 or selfA._toStore("CostFunctionJ") \
3188 or selfA._toStore("CostFunctionJb") \
3189 or selfA._toStore("CostFunctionJo") \
3190 or selfA._toStore("APosterioriCovariance") \
3191 or selfA._toStore("InnovationAtCurrentAnalysis") \
3192 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3193 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3194 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
3195 _Innovation = Ynpu - _HXa
3197 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3198 # ---> avec analysis
3199 selfA.StoredVariables["Analysis"].store( Xa )
3200 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3201 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3202 if selfA._toStore("InnovationAtCurrentAnalysis"):
3203 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3204 # ---> avec current state
3205 if selfA._parameters["StoreInternalVariables"] \
3206 or selfA._toStore("CurrentState"):
3207 selfA.StoredVariables["CurrentState"].store( Xn )
3208 if selfA._toStore("ForecastState"):
3209 selfA.StoredVariables["ForecastState"].store( EMX )
3210 if selfA._toStore("ForecastCovariance"):
3211 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
3212 if selfA._toStore("BMA"):
3213 selfA.StoredVariables["BMA"].store( EMX - Xa )
3214 if selfA._toStore("InnovationAtCurrentState"):
3215 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
3216 if selfA._toStore("SimulatedObservationAtCurrentState") \
3217 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3218 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3220 if selfA._parameters["StoreInternalVariables"] \
3221 or selfA._toStore("CostFunctionJ") \
3222 or selfA._toStore("CostFunctionJb") \
3223 or selfA._toStore("CostFunctionJo") \
3224 or selfA._toStore("CurrentOptimum") \
3225 or selfA._toStore("APosterioriCovariance"):
3226 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3227 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3229 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3230 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3231 selfA.StoredVariables["CostFunctionJ" ].store( J )
3233 if selfA._toStore("IndexOfOptimum") \
3234 or selfA._toStore("CurrentOptimum") \
3235 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3236 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3237 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3238 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3239 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3240 if selfA._toStore("IndexOfOptimum"):
3241 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3242 if selfA._toStore("CurrentOptimum"):
3243 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3244 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3245 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3246 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3247 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3248 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3249 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3250 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3251 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3252 if selfA._toStore("APosterioriCovariance"):
3253 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
3254 if selfA._parameters["EstimationOf"] == "Parameters" \
3255 and J < previousJMinimum:
3256 previousJMinimum = J
3258 if selfA._toStore("APosterioriCovariance"):
3259 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3260 # ---> Pour les smoothers
3261 if selfA._toStore("CurrentEnsembleState"):
3262 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3264 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3265 # ----------------------------------------------------------------------
3266 if selfA._parameters["EstimationOf"] == "Parameters":
3267 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3268 selfA.StoredVariables["Analysis"].store( XaMin )
3269 if selfA._toStore("APosterioriCovariance"):
3270 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3271 if selfA._toStore("BMA"):
3272 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3276 # ==============================================================================
3277 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3284 Hm = HO["Direct"].appliedTo
3285 Ha = HO["Adjoint"].appliedInXTo
3287 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3288 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3291 HXb = HXb.reshape((-1,1))
3292 if Y.size != HXb.size:
3293 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3294 if max(Y.shape) != max(HXb.shape):
3295 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3297 if selfA._toStore("JacobianMatrixAtBackground"):
3298 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3299 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3300 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3305 Xini = selfA._parameters["InitializationPoint"]
3307 # Définition de la fonction-coût
3308 # ------------------------------
3309 def CostFunction(x):
3310 _X = numpy.ravel( x ).reshape((-1,1))
3311 if selfA._parameters["StoreInternalVariables"] or \
3312 selfA._toStore("CurrentState") or \
3313 selfA._toStore("CurrentOptimum"):
3314 selfA.StoredVariables["CurrentState"].store( _X )
3315 _HX = Hm( _X ).reshape((-1,1))
3316 _Innovation = Y - _HX
3317 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3318 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3319 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3320 if selfA._toStore("InnovationAtCurrentState"):
3321 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3323 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3324 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3327 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3328 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3329 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3330 selfA.StoredVariables["CostFunctionJ" ].store( J )
3331 if selfA._toStore("IndexOfOptimum") or \
3332 selfA._toStore("CurrentOptimum") or \
3333 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3334 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3335 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3336 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3337 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3338 if selfA._toStore("IndexOfOptimum"):
3339 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3340 if selfA._toStore("CurrentOptimum"):
3341 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3342 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3343 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3344 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3345 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3346 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3347 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3348 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3349 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3352 def GradientOfCostFunction(x):
3353 _X = x.reshape((-1,1))
3354 _HX = Hm( _X ).reshape((-1,1))
3355 GradJb = BI * (_X - Xb)
3356 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3357 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3360 # Minimisation de la fonctionnelle
3361 # --------------------------------
3362 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3364 if selfA._parameters["Minimizer"] == "LBFGSB":
3365 if "0.19" <= scipy.version.version <= "1.1.0":
3366 import lbfgsbhlt as optimiseur
3368 import scipy.optimize as optimiseur
3369 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3370 func = CostFunction,
3372 fprime = GradientOfCostFunction,
3374 bounds = selfA._parameters["Bounds"],
3375 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3376 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3377 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3378 iprint = selfA._parameters["optiprint"],
3380 nfeval = Informations['funcalls']
3381 rc = Informations['warnflag']
3382 elif selfA._parameters["Minimizer"] == "TNC":
3383 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3384 func = CostFunction,
3386 fprime = GradientOfCostFunction,
3388 bounds = selfA._parameters["Bounds"],
3389 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3390 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3391 ftol = selfA._parameters["CostDecrementTolerance"],
3392 messages = selfA._parameters["optmessages"],
3394 elif selfA._parameters["Minimizer"] == "CG":
3395 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3398 fprime = GradientOfCostFunction,
3400 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3401 gtol = selfA._parameters["GradientNormTolerance"],
3402 disp = selfA._parameters["optdisp"],
3405 elif selfA._parameters["Minimizer"] == "NCG":
3406 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3409 fprime = GradientOfCostFunction,
3411 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3412 avextol = selfA._parameters["CostDecrementTolerance"],
3413 disp = selfA._parameters["optdisp"],
3416 elif selfA._parameters["Minimizer"] == "BFGS":
3417 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3420 fprime = GradientOfCostFunction,
3422 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3423 gtol = selfA._parameters["GradientNormTolerance"],
3424 disp = selfA._parameters["optdisp"],
3428 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3430 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3431 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3433 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3434 # ----------------------------------------------------------------
3435 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3436 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3439 #--------------------------
3441 selfA.StoredVariables["Analysis"].store( Xa )
3443 if selfA._toStore("OMA") or \
3444 selfA._toStore("SigmaObs2") or \
3445 selfA._toStore("SimulationQuantiles") or \
3446 selfA._toStore("SimulatedObservationAtOptimum"):
3447 if selfA._toStore("SimulatedObservationAtCurrentState"):
3448 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3449 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3450 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3454 if selfA._toStore("APosterioriCovariance") or \
3455 selfA._toStore("SimulationQuantiles") or \
3456 selfA._toStore("JacobianMatrixAtOptimum") or \
3457 selfA._toStore("KalmanGainAtOptimum"):
3458 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3459 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3460 if selfA._toStore("APosterioriCovariance") or \
3461 selfA._toStore("SimulationQuantiles") or \
3462 selfA._toStore("KalmanGainAtOptimum"):
3463 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3464 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3465 if selfA._toStore("APosterioriCovariance") or \
3466 selfA._toStore("SimulationQuantiles"):
3467 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3468 if selfA._toStore("APosterioriCovariance"):
3469 selfA.StoredVariables["APosterioriCovariance"].store( A )
3470 if selfA._toStore("JacobianMatrixAtOptimum"):
3471 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3472 if selfA._toStore("KalmanGainAtOptimum"):
3473 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3474 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3475 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3477 # Calculs et/ou stockages supplémentaires
3478 # ---------------------------------------
3479 if selfA._toStore("Innovation") or \
3480 selfA._toStore("SigmaObs2") or \
3481 selfA._toStore("MahalanobisConsistency") or \
3482 selfA._toStore("OMB"):
3484 if selfA._toStore("Innovation"):
3485 selfA.StoredVariables["Innovation"].store( d )
3486 if selfA._toStore("BMA"):
3487 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3488 if selfA._toStore("OMA"):
3489 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3490 if selfA._toStore("OMB"):
3491 selfA.StoredVariables["OMB"].store( d )
3492 if selfA._toStore("SigmaObs2"):
3493 TraceR = R.trace(Y.size)
3494 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3495 if selfA._toStore("MahalanobisConsistency"):
3496 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3497 if selfA._toStore("SimulationQuantiles"):
3498 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3499 if selfA._toStore("SimulatedObservationAtBackground"):
3500 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3501 if selfA._toStore("SimulatedObservationAtOptimum"):
3502 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3506 # ==============================================================================
3507 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3516 Hm = HO["Direct"].appliedControledFormTo
3517 Mm = EM["Direct"].appliedControledFormTo
3519 if CM is not None and "Tangent" in CM and U is not None:
3520 Cm = CM["Tangent"].asMatrix(Xb)
3526 if hasattr(U,"store") and 1<=_step<len(U) :
3527 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
3528 elif hasattr(U,"store") and len(U)==1:
3529 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3531 _Un = numpy.asmatrix(numpy.ravel( U )).T
3536 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3537 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3543 # Remarque : les observations sont exploitées à partir du pas de temps
3544 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3545 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3546 # avec l'observation du pas 1.
3548 # Nombre de pas identique au nombre de pas d'observations
3549 if hasattr(Y,"stepnumber"):
3550 duration = Y.stepnumber()
3554 # Précalcul des inversions de B et R
3558 # Point de démarrage de l'optimisation
3559 Xini = selfA._parameters["InitializationPoint"]
3561 # Définition de la fonction-coût
3562 # ------------------------------
3563 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3564 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
3565 def CostFunction(x):
3566 _X = numpy.asmatrix(numpy.ravel( x )).T
3567 if selfA._parameters["StoreInternalVariables"] or \
3568 selfA._toStore("CurrentState") or \
3569 selfA._toStore("CurrentOptimum"):
3570 selfA.StoredVariables["CurrentState"].store( _X )
3571 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3572 selfA.DirectCalculation = [None,]
3573 selfA.DirectInnovation = [None,]
3576 for step in range(0,duration-1):
3577 if hasattr(Y,"store"):
3578 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
3580 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
3584 if selfA._parameters["EstimationOf"] == "State":
3585 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
3586 elif selfA._parameters["EstimationOf"] == "Parameters":
3589 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3590 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3592 # Etape de différence aux observations
3593 if selfA._parameters["EstimationOf"] == "State":
3594 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
3595 elif selfA._parameters["EstimationOf"] == "Parameters":
3596 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
3598 # Stockage de l'état
3599 selfA.DirectCalculation.append( _Xn )
3600 selfA.DirectInnovation.append( _YmHMX )
3602 # Ajout dans la fonctionnelle d'observation
3603 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
3606 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3607 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3608 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3609 selfA.StoredVariables["CostFunctionJ" ].store( J )
3610 if selfA._toStore("IndexOfOptimum") or \
3611 selfA._toStore("CurrentOptimum") or \
3612 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3613 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3614 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3615 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3616 if selfA._toStore("IndexOfOptimum"):
3617 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3618 if selfA._toStore("CurrentOptimum"):
3619 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3620 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3621 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3622 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3623 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3624 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3625 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3628 def GradientOfCostFunction(x):
3629 _X = numpy.asmatrix(numpy.ravel( x )).T
3630 GradJb = BI * (_X - Xb)
3632 for step in range(duration-1,0,-1):
3633 # Étape de récupération du dernier stockage de l'évolution
3634 _Xn = selfA.DirectCalculation.pop()
3635 # Étape de récupération du dernier stockage de l'innovation
3636 _YmHMX = selfA.DirectInnovation.pop()
3637 # Calcul des adjoints
3638 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3639 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3640 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3641 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3642 # Calcul du gradient par état adjoint
3643 GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3644 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3645 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3648 # Minimisation de la fonctionnelle
3649 # --------------------------------
3650 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3652 if selfA._parameters["Minimizer"] == "LBFGSB":
3653 if "0.19" <= scipy.version.version <= "1.1.0":
3654 import lbfgsbhlt as optimiseur
3656 import scipy.optimize as optimiseur
3657 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3658 func = CostFunction,
3660 fprime = GradientOfCostFunction,
3662 bounds = selfA._parameters["Bounds"],
3663 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3664 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3665 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3666 iprint = selfA._parameters["optiprint"],
3668 nfeval = Informations['funcalls']
3669 rc = Informations['warnflag']
3670 elif selfA._parameters["Minimizer"] == "TNC":
3671 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3672 func = CostFunction,
3674 fprime = GradientOfCostFunction,
3676 bounds = selfA._parameters["Bounds"],
3677 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3678 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3679 ftol = selfA._parameters["CostDecrementTolerance"],
3680 messages = selfA._parameters["optmessages"],
3682 elif selfA._parameters["Minimizer"] == "CG":
3683 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3686 fprime = GradientOfCostFunction,
3688 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3689 gtol = selfA._parameters["GradientNormTolerance"],
3690 disp = selfA._parameters["optdisp"],
3693 elif selfA._parameters["Minimizer"] == "NCG":
3694 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3697 fprime = GradientOfCostFunction,
3699 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3700 avextol = selfA._parameters["CostDecrementTolerance"],
3701 disp = selfA._parameters["optdisp"],
3704 elif selfA._parameters["Minimizer"] == "BFGS":
3705 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3708 fprime = GradientOfCostFunction,
3710 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3711 gtol = selfA._parameters["GradientNormTolerance"],
3712 disp = selfA._parameters["optdisp"],
3716 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3718 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3719 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3721 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3722 # ----------------------------------------------------------------
3723 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3724 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3726 # Obtention de l'analyse
3727 # ----------------------
3730 selfA.StoredVariables["Analysis"].store( Xa )
3732 # Calculs et/ou stockages supplémentaires
3733 # ---------------------------------------
3734 if selfA._toStore("BMA"):
3735 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3739 # ==============================================================================
3740 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3742 Standard Kalman Filter
3744 if selfA._parameters["EstimationOf"] == "Parameters":
3745 selfA._parameters["StoreInternalVariables"] = True
3749 Ht = HO["Tangent"].asMatrix(Xb)
3750 Ha = HO["Adjoint"].asMatrix(Xb)
3752 if selfA._parameters["EstimationOf"] == "State":
3753 Mt = EM["Tangent"].asMatrix(Xb)
3754 Ma = EM["Adjoint"].asMatrix(Xb)
3756 if CM is not None and "Tangent" in CM and U is not None:
3757 Cm = CM["Tangent"].asMatrix(Xb)
3761 # Durée d'observation et tailles
3762 if hasattr(Y,"stepnumber"):
3763 duration = Y.stepnumber()
3764 __p = numpy.cumprod(Y.shape())[-1]
3767 __p = numpy.array(Y).size
3769 # Précalcul des inversions de B et R
3770 if selfA._parameters["StoreInternalVariables"] \
3771 or selfA._toStore("CostFunctionJ") \
3772 or selfA._toStore("CostFunctionJb") \
3773 or selfA._toStore("CostFunctionJo") \
3774 or selfA._toStore("CurrentOptimum") \
3775 or selfA._toStore("APosterioriCovariance"):
3781 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3784 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3785 selfA.StoredVariables["Analysis"].store( Xb )
3786 if selfA._toStore("APosterioriCovariance"):
3787 if hasattr(B,"asfullmatrix"):
3788 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3790 selfA.StoredVariables["APosterioriCovariance"].store( B )
3791 selfA._setInternalState("seed", numpy.random.get_state())
3792 elif selfA._parameters["nextStep"]:
3793 Xn = selfA._getInternalState("Xn")
3794 Pn = selfA._getInternalState("Pn")
3796 if selfA._parameters["EstimationOf"] == "Parameters":
3798 previousJMinimum = numpy.finfo(float).max
3800 for step in range(duration-1):
3801 if hasattr(Y,"store"):
3802 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3804 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3807 if hasattr(U,"store") and len(U)>1:
3808 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3809 elif hasattr(U,"store") and len(U)==1:
3810 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3812 Un = numpy.asmatrix(numpy.ravel( U )).T
3816 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3817 Xn_predicted = Mt * Xn
3818 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3819 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3820 Xn_predicted = Xn_predicted + Cm * Un
3821 Pn_predicted = Q + Mt * (Pn * Ma)
3822 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3823 # --- > Par principe, M = Id, Q = 0
3827 if selfA._parameters["EstimationOf"] == "State":
3828 HX_predicted = Ht * Xn_predicted
3829 _Innovation = Ynpu - HX_predicted
3830 elif selfA._parameters["EstimationOf"] == "Parameters":
3831 HX_predicted = Ht * Xn_predicted
3832 _Innovation = Ynpu - HX_predicted
3833 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3834 _Innovation = _Innovation - Cm * Un
3836 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3837 Xn = Xn_predicted + Kn * _Innovation
3838 Pn = Pn_predicted - Kn * Ht * Pn_predicted
3841 #--------------------------
3842 selfA._setInternalState("Xn", Xn)
3843 selfA._setInternalState("Pn", Pn)
3844 #--------------------------
3846 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3847 # ---> avec analysis
3848 selfA.StoredVariables["Analysis"].store( Xa )
3849 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3850 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3851 if selfA._toStore("InnovationAtCurrentAnalysis"):
3852 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3853 # ---> avec current state
3854 if selfA._parameters["StoreInternalVariables"] \
3855 or selfA._toStore("CurrentState"):
3856 selfA.StoredVariables["CurrentState"].store( Xn )
3857 if selfA._toStore("ForecastState"):
3858 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3859 if selfA._toStore("ForecastCovariance"):
3860 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3861 if selfA._toStore("BMA"):
3862 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3863 if selfA._toStore("InnovationAtCurrentState"):
3864 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3865 if selfA._toStore("SimulatedObservationAtCurrentState") \
3866 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3867 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3869 if selfA._parameters["StoreInternalVariables"] \
3870 or selfA._toStore("CostFunctionJ") \
3871 or selfA._toStore("CostFunctionJb") \
3872 or selfA._toStore("CostFunctionJo") \
3873 or selfA._toStore("CurrentOptimum") \
3874 or selfA._toStore("APosterioriCovariance"):
3875 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3876 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3878 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3879 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3880 selfA.StoredVariables["CostFunctionJ" ].store( J )
3882 if selfA._toStore("IndexOfOptimum") \
3883 or selfA._toStore("CurrentOptimum") \
3884 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3885 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3886 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3887 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3888 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3889 if selfA._toStore("IndexOfOptimum"):
3890 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3891 if selfA._toStore("CurrentOptimum"):
3892 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3893 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3894 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3895 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3896 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3897 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3898 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3899 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3900 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3901 if selfA._toStore("APosterioriCovariance"):
3902 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3903 if selfA._parameters["EstimationOf"] == "Parameters" \
3904 and J < previousJMinimum:
3905 previousJMinimum = J
3907 if selfA._toStore("APosterioriCovariance"):
3908 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3910 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3911 # ----------------------------------------------------------------------
3912 if selfA._parameters["EstimationOf"] == "Parameters":
3913 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3914 selfA.StoredVariables["Analysis"].store( XaMin )
3915 if selfA._toStore("APosterioriCovariance"):
3916 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3917 if selfA._toStore("BMA"):
3918 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3922 # ==============================================================================
3923 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3925 Unscented Kalman Filter
3927 if selfA._parameters["EstimationOf"] == "Parameters":
3928 selfA._parameters["StoreInternalVariables"] = True
3931 Alpha = selfA._parameters["Alpha"]
3932 Beta = selfA._parameters["Beta"]
3933 if selfA._parameters["Kappa"] == 0:
3934 if selfA._parameters["EstimationOf"] == "State":
3936 elif selfA._parameters["EstimationOf"] == "Parameters":
3939 Kappa = selfA._parameters["Kappa"]
3940 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3941 Gamma = math.sqrt( L + Lambda )
3945 for i in range(2*L):
3946 Ww.append( 1. / (2.*(L + Lambda)) )
3948 Wm = numpy.array( Ww )
3949 Wm[0] = Lambda / (L + Lambda)
3950 Wc = numpy.array( Ww )
3951 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3954 Hm = HO["Direct"].appliedControledFormTo
3956 if selfA._parameters["EstimationOf"] == "State":
3957 Mm = EM["Direct"].appliedControledFormTo
3959 if CM is not None and "Tangent" in CM and U is not None:
3960 Cm = CM["Tangent"].asMatrix(Xb)
3964 # Durée d'observation et tailles
3965 if hasattr(Y,"stepnumber"):
3966 duration = Y.stepnumber()
3967 __p = numpy.cumprod(Y.shape())[-1]
3970 __p = numpy.array(Y).size
3972 # Précalcul des inversions de B et R
3973 if selfA._parameters["StoreInternalVariables"] \
3974 or selfA._toStore("CostFunctionJ") \
3975 or selfA._toStore("CostFunctionJb") \
3976 or selfA._toStore("CostFunctionJo") \
3977 or selfA._toStore("CurrentOptimum") \
3978 or selfA._toStore("APosterioriCovariance"):
3984 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3986 if hasattr(B,"asfullmatrix"):
3987 Pn = B.asfullmatrix(__n)
3990 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3991 selfA.StoredVariables["Analysis"].store( Xb )
3992 if selfA._toStore("APosterioriCovariance"):
3993 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3994 elif selfA._parameters["nextStep"]:
3995 Xn = selfA._getInternalState("Xn")
3996 Pn = selfA._getInternalState("Pn")
3998 if selfA._parameters["EstimationOf"] == "Parameters":
4000 previousJMinimum = numpy.finfo(float).max
4002 for step in range(duration-1):
4003 if hasattr(Y,"store"):
4004 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
4006 Ynpu = numpy.ravel( Y ).reshape((__p,1))
4009 if hasattr(U,"store") and len(U)>1:
4010 Un = numpy.ravel( U[step] ).reshape((-1,1))
4011 elif hasattr(U,"store") and len(U)==1:
4012 Un = numpy.ravel( U[0] ).reshape((-1,1))
4014 Un = numpy.ravel( U ).reshape((-1,1))
4018 Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
4019 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
4020 nbSpts = 2*Xn.size+1
4023 for point in range(nbSpts):
4024 if selfA._parameters["EstimationOf"] == "State":
4025 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
4026 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
4027 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
4028 XEtnnpi = XEtnnpi + Cm * Un
4029 elif selfA._parameters["EstimationOf"] == "Parameters":
4030 # --- > Par principe, M = Id, Q = 0
4031 XEtnnpi = Xnp[:,point]
4032 XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
4033 XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
4035 Xncm = ( XEtnnp * Wm ).sum(axis=1)
4037 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
4038 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4039 for point in range(nbSpts):
4040 Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
4042 Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
4044 Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
4047 for point in range(nbSpts):
4048 if selfA._parameters["EstimationOf"] == "State":
4049 Ynnpi = Hm( (Xnnp[:,point], None) )
4050 elif selfA._parameters["EstimationOf"] == "Parameters":
4051 Ynnpi = Hm( (Xnnp[:,point], Un) )
4052 Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
4053 Ynnp = numpy.concatenate( Ynnp, axis=1 )
4055 Yncm = ( Ynnp * Wm ).sum(axis=1)
4059 for point in range(nbSpts):
4060 Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4061 Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4063 _Innovation = Ynpu - Yncm.reshape((-1,1))
4064 if selfA._parameters["EstimationOf"] == "Parameters":
4065 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4066 _Innovation = _Innovation - Cm * Un
4069 Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
4070 Pn = Pnm - Kn * Pyyn * Kn.T
4073 #--------------------------
4074 selfA._setInternalState("Xn", Xn)
4075 selfA._setInternalState("Pn", Pn)
4076 #--------------------------
4078 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4079 # ---> avec analysis
4080 selfA.StoredVariables["Analysis"].store( Xa )
4081 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4082 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4083 if selfA._toStore("InnovationAtCurrentAnalysis"):
4084 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4085 # ---> avec current state
4086 if selfA._parameters["StoreInternalVariables"] \
4087 or selfA._toStore("CurrentState"):
4088 selfA.StoredVariables["CurrentState"].store( Xn )
4089 if selfA._toStore("ForecastState"):
4090 selfA.StoredVariables["ForecastState"].store( Xncm )
4091 if selfA._toStore("ForecastCovariance"):
4092 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4093 if selfA._toStore("BMA"):
4094 selfA.StoredVariables["BMA"].store( Xncm - Xa )
4095 if selfA._toStore("InnovationAtCurrentState"):
4096 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4097 if selfA._toStore("SimulatedObservationAtCurrentState") \
4098 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4099 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4101 if selfA._parameters["StoreInternalVariables"] \
4102 or selfA._toStore("CostFunctionJ") \
4103 or selfA._toStore("CostFunctionJb") \
4104 or selfA._toStore("CostFunctionJo") \
4105 or selfA._toStore("CurrentOptimum") \
4106 or selfA._toStore("APosterioriCovariance"):
4107 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
4108 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4110 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4111 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4112 selfA.StoredVariables["CostFunctionJ" ].store( J )
4114 if selfA._toStore("IndexOfOptimum") \
4115 or selfA._toStore("CurrentOptimum") \
4116 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4117 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4118 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4119 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4120 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4121 if selfA._toStore("IndexOfOptimum"):
4122 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4123 if selfA._toStore("CurrentOptimum"):
4124 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4125 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4126 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4127 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4128 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4129 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4130 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4131 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4132 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4133 if selfA._toStore("APosterioriCovariance"):
4134 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4135 if selfA._parameters["EstimationOf"] == "Parameters" \
4136 and J < previousJMinimum:
4137 previousJMinimum = J
4139 if selfA._toStore("APosterioriCovariance"):
4140 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4142 # Stockage final supplémentaire de l'optimum en estimation de paramètres
4143 # ----------------------------------------------------------------------
4144 if selfA._parameters["EstimationOf"] == "Parameters":
4145 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4146 selfA.StoredVariables["Analysis"].store( XaMin )
4147 if selfA._toStore("APosterioriCovariance"):
4148 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4149 if selfA._toStore("BMA"):
4150 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4154 # ==============================================================================
4155 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4157 3DVAR variational analysis with no inversion of B
4162 Hm = HO["Direct"].appliedTo
4163 Ha = HO["Adjoint"].appliedInXTo
4168 Xini = numpy.zeros(Xb.shape)
4170 # Définition de la fonction-coût
4171 # ------------------------------
4172 def CostFunction(v):
4173 _V = numpy.asmatrix(numpy.ravel( v )).T
4175 if selfA._parameters["StoreInternalVariables"] or \
4176 selfA._toStore("CurrentState") or \
4177 selfA._toStore("CurrentOptimum"):
4178 selfA.StoredVariables["CurrentState"].store( _X )
4180 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
4181 _Innovation = Y - _HX
4182 if selfA._toStore("SimulatedObservationAtCurrentState") or \
4183 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4184 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4185 if selfA._toStore("InnovationAtCurrentState"):
4186 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4188 Jb = float( 0.5 * _V.T * BT * _V )
4189 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
4192 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4193 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4194 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4195 selfA.StoredVariables["CostFunctionJ" ].store( J )
4196 if selfA._toStore("IndexOfOptimum") or \
4197 selfA._toStore("CurrentOptimum") or \
4198 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4199 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4200 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4201 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4202 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4203 if selfA._toStore("IndexOfOptimum"):
4204 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4205 if selfA._toStore("CurrentOptimum"):
4206 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4207 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4208 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4209 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4210 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4211 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4212 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4213 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4214 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4217 def GradientOfCostFunction(v):
4218 _V = v.reshape((-1,1))
4219 _X = Xb + (B @ _V).reshape((-1,1))
4220 _HX = Hm( _X ).reshape((-1,1))
4222 GradJo = - Ha( (_X, RI * (Y - _HX)) )
4223 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4226 # Minimisation de la fonctionnelle
4227 # --------------------------------
4228 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4230 if selfA._parameters["Minimizer"] == "LBFGSB":
4231 if "0.19" <= scipy.version.version <= "1.1.0":
4232 import lbfgsbhlt as optimiseur
4234 import scipy.optimize as optimiseur
4235 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4236 func = CostFunction,
4238 fprime = GradientOfCostFunction,
4240 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4241 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
4242 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
4243 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4244 iprint = selfA._parameters["optiprint"],
4246 nfeval = Informations['funcalls']
4247 rc = Informations['warnflag']
4248 elif selfA._parameters["Minimizer"] == "TNC":
4249 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4250 func = CostFunction,
4252 fprime = GradientOfCostFunction,
4254 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4255 maxfun = selfA._parameters["MaximumNumberOfSteps"],
4256 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4257 ftol = selfA._parameters["CostDecrementTolerance"],
4258 messages = selfA._parameters["optmessages"],
4260 elif selfA._parameters["Minimizer"] == "CG":
4261 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4264 fprime = GradientOfCostFunction,
4266 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4267 gtol = selfA._parameters["GradientNormTolerance"],
4268 disp = selfA._parameters["optdisp"],
4271 elif selfA._parameters["Minimizer"] == "NCG":
4272 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4275 fprime = GradientOfCostFunction,
4277 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4278 avextol = selfA._parameters["CostDecrementTolerance"],
4279 disp = selfA._parameters["optdisp"],
4282 elif selfA._parameters["Minimizer"] == "BFGS":
4283 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4286 fprime = GradientOfCostFunction,
4288 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4289 gtol = selfA._parameters["GradientNormTolerance"],
4290 disp = selfA._parameters["optdisp"],
4294 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4296 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4297 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4299 # Correction pour pallier a un bug de TNC sur le retour du Minimum
4300 # ----------------------------------------------------------------
4301 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4302 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4304 Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
4307 #--------------------------
4309 selfA.StoredVariables["Analysis"].store( Xa )
4311 if selfA._toStore("OMA") or \
4312 selfA._toStore("SigmaObs2") or \
4313 selfA._toStore("SimulationQuantiles") or \
4314 selfA._toStore("SimulatedObservationAtOptimum"):
4315 if selfA._toStore("SimulatedObservationAtCurrentState"):
4316 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4317 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4318 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4322 if selfA._toStore("APosterioriCovariance") or \
4323 selfA._toStore("SimulationQuantiles") or \
4324 selfA._toStore("JacobianMatrixAtOptimum") or \
4325 selfA._toStore("KalmanGainAtOptimum"):
4326 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4327 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4328 if selfA._toStore("APosterioriCovariance") or \
4329 selfA._toStore("SimulationQuantiles") or \
4330 selfA._toStore("KalmanGainAtOptimum"):
4331 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4332 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4333 if selfA._toStore("APosterioriCovariance") or \
4334 selfA._toStore("SimulationQuantiles"):
4336 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
4337 if selfA._toStore("APosterioriCovariance"):
4338 selfA.StoredVariables["APosterioriCovariance"].store( A )
4339 if selfA._toStore("JacobianMatrixAtOptimum"):
4340 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4341 if selfA._toStore("KalmanGainAtOptimum"):
4342 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4343 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4344 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4346 # Calculs et/ou stockages supplémentaires
4347 # ---------------------------------------
4348 if selfA._toStore("Innovation") or \
4349 selfA._toStore("SigmaObs2") or \
4350 selfA._toStore("MahalanobisConsistency") or \
4351 selfA._toStore("OMB"):
4353 if selfA._toStore("Innovation"):
4354 selfA.StoredVariables["Innovation"].store( d )
4355 if selfA._toStore("BMA"):
4356 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4357 if selfA._toStore("OMA"):
4358 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4359 if selfA._toStore("OMB"):
4360 selfA.StoredVariables["OMB"].store( d )
4361 if selfA._toStore("SigmaObs2"):
4362 TraceR = R.trace(Y.size)
4363 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
4364 if selfA._toStore("MahalanobisConsistency"):
4365 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4366 if selfA._toStore("SimulationQuantiles"):
4367 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4368 if selfA._toStore("SimulatedObservationAtBackground"):
4369 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
4370 if selfA._toStore("SimulatedObservationAtOptimum"):
4371 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
4375 # ==============================================================================
4376 if __name__ == "__main__":
4377 print('\n AUTODIAGNOSTIC\n')