1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450 _bgcenter = numpy.ravel(_bgcenter)[:,None]
452 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454 if _bgcovariance is None:
455 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460 return BackgroundEnsemble
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465 def __CenteredRandomAnomalies(Zr, N):
467 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468 notes manuscrites de MB et conforme au code de PS avec eps = -1
471 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
478 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481 if _bgcovariance is None:
482 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
485 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486 _nbctl = _bgcenter.size
487 if _nbmembers > _nbctl:
488 _Z = numpy.concatenate((numpy.dot(
489 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494 BackgroundEnsemble = _bgcenter + _Zca
496 if max(abs(_bgcovariance.flatten())) > 0:
497 _nbctl = _bgcenter.size
498 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500 BackgroundEnsemble = _bgcenter + _Zca
502 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504 return BackgroundEnsemble
506 # ==============================================================================
507 def EnsembleMean( __Ensemble ):
508 "Renvoie la moyenne empirique d'un ensemble"
509 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511 # ==============================================================================
512 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
513 "Renvoie les anomalies centrées à partir d'un ensemble"
514 if __OptMean is None:
515 __Em = EnsembleMean( __Ensemble )
517 __Em = numpy.ravel( __OptMean ).reshape((-1,1))
519 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
521 # ==============================================================================
522 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
523 "Renvoie l'estimation empirique de la covariance d'ensemble"
525 # Covariance rapide mais rarement définie positive
526 __Covariance = numpy.cov( __Ensemble )
528 # Résultat souvent identique à numpy.cov, mais plus robuste
529 __n, __m = numpy.asarray( __Ensemble ).shape
530 __Anomalies = EnsembleOfAnomalies( __Ensemble )
531 # Estimation empirique
532 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
534 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
535 # Assure la positivité
536 __epsilon = mpr*numpy.trace( __Covariance )
537 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
541 # ==============================================================================
542 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
543 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
544 if hasattr(__Covariance,"assparsematrix"):
545 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
546 # Traitement d'une covariance nulle ou presque
548 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
549 # Traitement d'une covariance nulle ou presque
552 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
553 # Traitement d'une covariance nulle ou presque
555 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
556 # Traitement d'une covariance nulle ou presque
559 __n, __m = __Ensemble.shape
560 if __Seed is not None: numpy.random.seed(__Seed)
562 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
563 # Traitement d'une covariance multiple de l'identité
565 __std = numpy.sqrt(__Covariance.assparsematrix())
566 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
568 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
569 # Traitement d'une covariance diagonale avec variances non identiques
570 __zero = numpy.zeros(__n)
571 __std = numpy.sqrt(__Covariance.assparsematrix())
572 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
574 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
575 # Traitement d'une covariance pleine
576 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
578 elif isinstance(__Covariance, numpy.ndarray):
579 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
580 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
583 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
587 # ==============================================================================
588 def CovarianceInflation(
590 InflationType = None,
591 InflationFactor = None,
592 BackgroundCov = None,
595 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
597 Synthèse : Hunt 2007, section 2.3.5
599 if InflationFactor is None:
602 InflationFactor = float(InflationFactor)
604 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
605 if InflationFactor < 1.:
606 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607 if InflationFactor < 1.+mpr:
609 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
611 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
612 if InflationFactor < 1.:
613 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
614 if InflationFactor < 1.+mpr:
616 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
617 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
618 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
620 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
621 if InflationFactor < 0.:
622 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
623 if InflationFactor < mpr:
625 __n, __m = numpy.asarray(InputCovOrEns).shape
627 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
628 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
630 elif InflationType == "HybridOnBackgroundCovariance":
631 if InflationFactor < 0.:
632 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
633 if InflationFactor < mpr:
635 __n, __m = numpy.asarray(InputCovOrEns).shape
637 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
638 if BackgroundCov is None:
639 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
640 if InputCovOrEns.shape != BackgroundCov.shape:
641 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
642 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
644 elif InflationType == "Relaxation":
645 raise NotImplementedError("InflationType Relaxation")
648 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
650 return OutputCovOrEns
652 # ==============================================================================
653 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
654 "Estimation des quantiles a posteriori (selfA est modifié)"
655 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
657 # Traitement des bornes
658 if "StateBoundsForQuantiles" in selfA._parameters:
659 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
660 elif "Bounds" in selfA._parameters:
661 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
664 if LBounds is not None:
665 LBounds = ForceNumericBounds( LBounds )
666 _Xa = numpy.ravel(Xa)
668 # Échantillonnage des états
671 for i in range(nbsamples):
672 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
673 dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
674 if LBounds is not None: # "EstimateProjection" par défaut
675 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
676 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
678 Yr = HXa.reshape((-1,1)) + dYr
679 if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
680 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
681 Xr = numpy.random.multivariate_normal(_Xa,A)
682 if LBounds is not None: # "EstimateProjection" par défaut
683 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
684 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
687 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
690 YfQ = Yr.reshape((-1,1))
691 if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
693 YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
694 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
696 # Extraction des quantiles
699 for quantile in selfA._parameters["Quantiles"]:
700 if not (0. <= float(quantile) <= 1.): continue
701 indice = int(nbsamples * float(quantile) - 1./nbsamples)
702 if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
703 else: YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
704 if YQ is not None: # Liste non vide de quantiles
705 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
706 if selfA._toStore("SampledStateForQuantiles"):
707 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
711 # ==============================================================================
712 def ForceNumericBounds( __Bounds ):
713 "Force les bornes à être des valeurs numériques, sauf si globalement None"
714 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
715 if __Bounds is None: return None
716 # Converti toutes les bornes individuelles None à +/- l'infini
717 __Bounds = numpy.asarray( __Bounds, dtype=float )
718 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
719 raise ValueError("Incorrectly defined bounds data")
720 __Bounds[numpy.isnan(__Bounds)[:,0],0] = -sys.float_info.max
721 __Bounds[numpy.isnan(__Bounds)[:,1],1] = sys.float_info.max
724 # ==============================================================================
725 def RecentredBounds( __Bounds, __Center):
726 "Recentre les bornes autour de 0, sauf si globalement None"
727 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
728 if __Bounds is None: return None
729 # Recentre les valeurs numériques de bornes
730 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
732 # ==============================================================================
733 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
735 Contrained Extended Kalman Filter
737 if selfA._parameters["EstimationOf"] == "Parameters":
738 selfA._parameters["StoreInternalVariables"] = True
739 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
742 H = HO["Direct"].appliedControledFormTo
744 if selfA._parameters["EstimationOf"] == "State":
745 M = EM["Direct"].appliedControledFormTo
747 if CM is not None and "Tangent" in CM and U is not None:
748 Cm = CM["Tangent"].asMatrix(Xb)
752 # Durée d'observation et tailles
753 if hasattr(Y,"stepnumber"):
754 duration = Y.stepnumber()
755 __p = numpy.cumprod(Y.shape())[-1]
758 __p = numpy.array(Y).size
760 # Précalcul des inversions de B et R
761 if selfA._parameters["StoreInternalVariables"] \
762 or selfA._toStore("CostFunctionJ") \
763 or selfA._toStore("CostFunctionJb") \
764 or selfA._toStore("CostFunctionJo") \
765 or selfA._toStore("CurrentOptimum") \
766 or selfA._toStore("APosterioriCovariance"):
772 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
775 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
776 selfA.StoredVariables["Analysis"].store( Xb )
777 if selfA._toStore("APosterioriCovariance"):
778 if hasattr(B,"asfullmatrix"):
779 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
781 selfA.StoredVariables["APosterioriCovariance"].store( B )
782 selfA._setInternalState("seed", numpy.random.get_state())
783 elif selfA._parameters["nextStep"]:
784 Xn = selfA._getInternalState("Xn")
785 Pn = selfA._getInternalState("Pn")
787 if selfA._parameters["EstimationOf"] == "Parameters":
789 previousJMinimum = numpy.finfo(float).max
791 for step in range(duration-1):
792 if hasattr(Y,"store"):
793 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
795 Ynpu = numpy.ravel( Y ).reshape((__p,1))
797 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
798 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
799 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
800 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
802 if selfA._parameters["EstimationOf"] == "State":
803 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
804 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
805 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
806 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
809 if hasattr(U,"store") and len(U)>1:
810 Un = numpy.ravel( U[step] ).reshape((-1,1))
811 elif hasattr(U,"store") and len(U)==1:
812 Un = numpy.ravel( U[0] ).reshape((-1,1))
814 Un = numpy.ravel( U ).reshape((-1,1))
818 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
819 Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
820 Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
822 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
823 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
824 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
825 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
826 Xn_predicted = Xn_predicted + Cm * Un
827 Pn_predicted = Q + Mt * Pn * Ma
828 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
829 # --- > Par principe, M = Id, Q = 0
833 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
834 Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
835 Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
837 if selfA._parameters["EstimationOf"] == "State":
838 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
839 _Innovation = Ynpu - HX_predicted
840 elif selfA._parameters["EstimationOf"] == "Parameters":
841 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
842 _Innovation = Ynpu - HX_predicted
843 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
844 _Innovation = _Innovation - Cm * Un
846 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
847 Xn = Xn_predicted + Kn * _Innovation
848 Pn = Pn_predicted - Kn * Ht * Pn_predicted
850 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
851 Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
852 Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
855 #--------------------------
856 selfA._setInternalState("Xn", Xn)
857 selfA._setInternalState("Pn", Pn)
858 #--------------------------
860 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
862 selfA.StoredVariables["Analysis"].store( Xa )
863 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
864 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
865 if selfA._toStore("InnovationAtCurrentAnalysis"):
866 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
867 # ---> avec current state
868 if selfA._parameters["StoreInternalVariables"] \
869 or selfA._toStore("CurrentState"):
870 selfA.StoredVariables["CurrentState"].store( Xn )
871 if selfA._toStore("ForecastState"):
872 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
873 if selfA._toStore("ForecastCovariance"):
874 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
875 if selfA._toStore("BMA"):
876 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
877 if selfA._toStore("InnovationAtCurrentState"):
878 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
879 if selfA._toStore("SimulatedObservationAtCurrentState") \
880 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
881 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
883 if selfA._parameters["StoreInternalVariables"] \
884 or selfA._toStore("CostFunctionJ") \
885 or selfA._toStore("CostFunctionJb") \
886 or selfA._toStore("CostFunctionJo") \
887 or selfA._toStore("CurrentOptimum") \
888 or selfA._toStore("APosterioriCovariance"):
889 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
890 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
892 selfA.StoredVariables["CostFunctionJb"].store( Jb )
893 selfA.StoredVariables["CostFunctionJo"].store( Jo )
894 selfA.StoredVariables["CostFunctionJ" ].store( J )
896 if selfA._toStore("IndexOfOptimum") \
897 or selfA._toStore("CurrentOptimum") \
898 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
899 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
900 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
901 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
902 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
903 if selfA._toStore("IndexOfOptimum"):
904 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
905 if selfA._toStore("CurrentOptimum"):
906 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
907 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
908 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
909 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
910 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
911 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
912 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
913 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
914 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
915 if selfA._toStore("APosterioriCovariance"):
916 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
917 if selfA._parameters["EstimationOf"] == "Parameters" \
918 and J < previousJMinimum:
921 if selfA._toStore("APosterioriCovariance"):
922 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
924 # Stockage final supplémentaire de l'optimum en estimation de paramètres
925 # ----------------------------------------------------------------------
926 if selfA._parameters["EstimationOf"] == "Parameters":
927 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
928 selfA.StoredVariables["Analysis"].store( XaMin )
929 if selfA._toStore("APosterioriCovariance"):
930 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
931 if selfA._toStore("BMA"):
932 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
936 # ==============================================================================
937 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
943 H = HO["Direct"].appliedControledFormTo
945 if selfA._parameters["EstimationOf"] == "State":
946 M = EM["Direct"].appliedControledFormTo
948 if CM is not None and "Tangent" in CM and U is not None:
949 Cm = CM["Tangent"].asMatrix(Xb)
953 # Précalcul des inversions de B et R
956 # Durée d'observation et tailles
957 LagL = selfA._parameters["SmootherLagL"]
958 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
959 raise ValueError("Fixed-lag smoother requires a series of observation")
960 if Y.stepnumber() < LagL:
961 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
962 duration = Y.stepnumber()
963 __p = numpy.cumprod(Y.shape())[-1]
965 __m = selfA._parameters["NumberOfMembers"]
967 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
968 selfA.StoredVariables["Analysis"].store( Xb )
969 if selfA._toStore("APosterioriCovariance"):
970 if hasattr(B,"asfullmatrix"):
971 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
973 selfA.StoredVariables["APosterioriCovariance"].store( B )
975 # Calcul direct initial (on privilégie la mémorisation au recalcul)
976 __seed = numpy.random.get_state()
977 selfB = copy.deepcopy(selfA)
978 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
979 if VariantM == "EnKS16-KalmanFilterFormula":
980 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
982 raise ValueError("VariantM has to be chosen in the authorized methods list.")
984 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
986 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
987 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
989 for step in range(LagL,duration-1):
991 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
994 if hasattr(Y,"store"):
995 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
997 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1000 if hasattr(U,"store") and len(U)>1:
1001 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1002 elif hasattr(U,"store") and len(U)==1:
1003 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1005 Un = numpy.asmatrix(numpy.ravel( U )).T
1009 #--------------------------
1010 if VariantM == "EnKS16-KalmanFilterFormula":
1011 if selfA._parameters["EstimationOf"] == "State": # Forecast
1012 EL = M( [(EL[:,i], Un) for i in range(__m)],
1014 returnSerieAsArrayMatrix = True )
1015 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1016 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1018 returnSerieAsArrayMatrix = True )
1019 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1020 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1022 elif selfA._parameters["EstimationOf"] == "Parameters":
1023 # --- > Par principe, M = Id, Q = 0
1024 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1026 returnSerieAsArrayMatrix = True )
1028 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1029 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1031 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1032 mS = mS.reshape((-1,__m)) # Pour dimension 1
1033 delta = RIdemi @ ( Ynpu - vZm )
1034 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1035 vw = mT @ mS.T @ delta
1037 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1038 mU = numpy.identity(__m)
1039 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1041 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1045 for irl in range(LagL): # Lissage des L précédentes analysis
1046 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1047 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1048 sEL[irl] = vEm + EX @ wTU
1050 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1051 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1052 if selfA._toStore("APosterioriCovariance"):
1055 for irl in range(LagL):
1056 sEL[irl] = sEL[irl+1]
1058 #--------------------------
1060 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1062 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1063 # ---> avec analysis
1064 selfA.StoredVariables["Analysis"].store( Xa )
1065 if selfA._toStore("APosterioriCovariance"):
1066 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1068 # Stockage des dernières analyses incomplètement remises à jour
1069 for irl in range(LagL):
1070 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1071 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1072 selfA.StoredVariables["Analysis"].store( Xa )
1076 # ==============================================================================
1077 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1079 Ensemble-Transform EnKF
1081 if selfA._parameters["EstimationOf"] == "Parameters":
1082 selfA._parameters["StoreInternalVariables"] = True
1085 H = HO["Direct"].appliedControledFormTo
1087 if selfA._parameters["EstimationOf"] == "State":
1088 M = EM["Direct"].appliedControledFormTo
1090 if CM is not None and "Tangent" in CM and U is not None:
1091 Cm = CM["Tangent"].asMatrix(Xb)
1095 # Durée d'observation et tailles
1096 if hasattr(Y,"stepnumber"):
1097 duration = Y.stepnumber()
1098 __p = numpy.cumprod(Y.shape())[-1]
1101 __p = numpy.array(Y).size
1103 # Précalcul des inversions de B et R
1104 if selfA._parameters["StoreInternalVariables"] \
1105 or selfA._toStore("CostFunctionJ") \
1106 or selfA._toStore("CostFunctionJb") \
1107 or selfA._toStore("CostFunctionJo") \
1108 or selfA._toStore("CurrentOptimum") \
1109 or selfA._toStore("APosterioriCovariance"):
1112 elif VariantM != "KalmanFilterFormula":
1114 if VariantM == "KalmanFilterFormula":
1118 __m = selfA._parameters["NumberOfMembers"]
1120 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1121 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1122 selfA.StoredVariables["Analysis"].store( Xb )
1123 if selfA._toStore("APosterioriCovariance"):
1124 if hasattr(B,"asfullmatrix"):
1125 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1127 selfA.StoredVariables["APosterioriCovariance"].store( B )
1128 selfA._setInternalState("seed", numpy.random.get_state())
1129 elif selfA._parameters["nextStep"]:
1130 Xn = selfA._getInternalState("Xn")
1132 previousJMinimum = numpy.finfo(float).max
1134 for step in range(duration-1):
1135 numpy.random.set_state(selfA._getInternalState("seed"))
1136 if hasattr(Y,"store"):
1137 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1139 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1142 if hasattr(U,"store") and len(U)>1:
1143 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1144 elif hasattr(U,"store") and len(U)==1:
1145 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1147 Un = numpy.asmatrix(numpy.ravel( U )).T
1151 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1152 Xn = CovarianceInflation( Xn,
1153 selfA._parameters["InflationType"],
1154 selfA._parameters["InflationFactor"],
1157 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1158 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1160 returnSerieAsArrayMatrix = True )
1161 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1162 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1164 returnSerieAsArrayMatrix = True )
1165 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1166 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1167 Xn_predicted = Xn_predicted + Cm * Un
1168 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1169 # --- > Par principe, M = Id, Q = 0
1170 Xn_predicted = EMX = Xn
1171 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1173 returnSerieAsArrayMatrix = True )
1175 # Mean of forecast and observation of forecast
1176 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1177 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1180 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
1181 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
1183 #--------------------------
1184 if VariantM == "KalmanFilterFormula":
1185 mS = RIdemi * EaHX / math.sqrt(__m-1)
1186 mS = mS.reshape((-1,__m)) # Pour dimension 1
1187 delta = RIdemi * ( Ynpu - Hfm )
1188 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1189 vw = mT @ mS.T @ delta
1191 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1192 mU = numpy.identity(__m)
1194 EaX = EaX / math.sqrt(__m-1)
1195 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1196 #--------------------------
1197 elif VariantM == "Variational":
1198 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1199 def CostFunction(w):
1200 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1201 _Jo = 0.5 * _A.T @ (RI * _A)
1202 _Jb = 0.5 * (__m-1) * w.T @ w
1205 def GradientOfCostFunction(w):
1206 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1207 _GardJo = - EaHX.T @ (RI * _A)
1208 _GradJb = (__m-1) * w.reshape((__m,1))
1209 _GradJ = _GardJo + _GradJb
1210 return numpy.ravel(_GradJ)
1211 vw = scipy.optimize.fmin_cg(
1213 x0 = numpy.zeros(__m),
1214 fprime = GradientOfCostFunction,
1219 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1220 Htb = (__m-1) * numpy.identity(__m)
1223 Pta = numpy.linalg.inv( Hta )
1224 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1226 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1227 #--------------------------
1228 elif VariantM == "FiniteSize11": # Jauge Boc2011
1229 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1230 def CostFunction(w):
1231 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1232 _Jo = 0.5 * _A.T @ (RI * _A)
1233 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1236 def GradientOfCostFunction(w):
1237 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1238 _GardJo = - EaHX.T @ (RI * _A)
1239 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1240 _GradJ = _GardJo + _GradJb
1241 return numpy.ravel(_GradJ)
1242 vw = scipy.optimize.fmin_cg(
1244 x0 = numpy.zeros(__m),
1245 fprime = GradientOfCostFunction,
1250 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1252 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1253 / (1 + 1/__m + vw.T @ vw)**2
1256 Pta = numpy.linalg.inv( Hta )
1257 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1259 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1260 #--------------------------
1261 elif VariantM == "FiniteSize15": # Jauge Boc2015
1262 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1263 def CostFunction(w):
1264 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1265 _Jo = 0.5 * _A.T * RI * _A
1266 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1269 def GradientOfCostFunction(w):
1270 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1271 _GardJo = - EaHX.T @ (RI * _A)
1272 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1273 _GradJ = _GardJo + _GradJb
1274 return numpy.ravel(_GradJ)
1275 vw = scipy.optimize.fmin_cg(
1277 x0 = numpy.zeros(__m),
1278 fprime = GradientOfCostFunction,
1283 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1285 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1286 / (1 + 1/__m + vw.T @ vw)**2
1289 Pta = numpy.linalg.inv( Hta )
1290 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1292 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1293 #--------------------------
1294 elif VariantM == "FiniteSize16": # Jauge Boc2016
1295 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1296 def CostFunction(w):
1297 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1298 _Jo = 0.5 * _A.T @ (RI * _A)
1299 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1302 def GradientOfCostFunction(w):
1303 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1304 _GardJo = - EaHX.T @ (RI * _A)
1305 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1306 _GradJ = _GardJo + _GradJb
1307 return numpy.ravel(_GradJ)
1308 vw = scipy.optimize.fmin_cg(
1310 x0 = numpy.zeros(__m),
1311 fprime = GradientOfCostFunction,
1316 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1317 Htb = ((__m+1) / (__m-1)) * \
1318 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1319 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1322 Pta = numpy.linalg.inv( Hta )
1323 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1325 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1326 #--------------------------
1328 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1330 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1331 Xn = CovarianceInflation( Xn,
1332 selfA._parameters["InflationType"],
1333 selfA._parameters["InflationFactor"],
1336 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1337 #--------------------------
1338 selfA._setInternalState("Xn", Xn)
1339 selfA._setInternalState("seed", numpy.random.get_state())
1340 #--------------------------
1342 if selfA._parameters["StoreInternalVariables"] \
1343 or selfA._toStore("CostFunctionJ") \
1344 or selfA._toStore("CostFunctionJb") \
1345 or selfA._toStore("CostFunctionJo") \
1346 or selfA._toStore("APosterioriCovariance") \
1347 or selfA._toStore("InnovationAtCurrentAnalysis") \
1348 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1349 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1350 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1351 _Innovation = Ynpu - _HXa
1353 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1354 # ---> avec analysis
1355 selfA.StoredVariables["Analysis"].store( Xa )
1356 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1357 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1358 if selfA._toStore("InnovationAtCurrentAnalysis"):
1359 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1360 # ---> avec current state
1361 if selfA._parameters["StoreInternalVariables"] \
1362 or selfA._toStore("CurrentState"):
1363 selfA.StoredVariables["CurrentState"].store( Xn )
1364 if selfA._toStore("ForecastState"):
1365 selfA.StoredVariables["ForecastState"].store( EMX )
1366 if selfA._toStore("ForecastCovariance"):
1367 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1368 if selfA._toStore("BMA"):
1369 selfA.StoredVariables["BMA"].store( EMX - Xa )
1370 if selfA._toStore("InnovationAtCurrentState"):
1371 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1372 if selfA._toStore("SimulatedObservationAtCurrentState") \
1373 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1374 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1376 if selfA._parameters["StoreInternalVariables"] \
1377 or selfA._toStore("CostFunctionJ") \
1378 or selfA._toStore("CostFunctionJb") \
1379 or selfA._toStore("CostFunctionJo") \
1380 or selfA._toStore("CurrentOptimum") \
1381 or selfA._toStore("APosterioriCovariance"):
1382 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1383 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1385 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1386 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1387 selfA.StoredVariables["CostFunctionJ" ].store( J )
1389 if selfA._toStore("IndexOfOptimum") \
1390 or selfA._toStore("CurrentOptimum") \
1391 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1392 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1393 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1394 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1395 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1396 if selfA._toStore("IndexOfOptimum"):
1397 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1398 if selfA._toStore("CurrentOptimum"):
1399 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1400 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1401 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1402 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1403 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1404 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1405 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1406 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1407 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1408 if selfA._toStore("APosterioriCovariance"):
1409 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1410 if selfA._parameters["EstimationOf"] == "Parameters" \
1411 and J < previousJMinimum:
1412 previousJMinimum = J
1414 if selfA._toStore("APosterioriCovariance"):
1415 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1416 # ---> Pour les smoothers
1417 if selfA._toStore("CurrentEnsembleState"):
1418 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1420 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1421 # ----------------------------------------------------------------------
1422 if selfA._parameters["EstimationOf"] == "Parameters":
1423 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1424 selfA.StoredVariables["Analysis"].store( XaMin )
1425 if selfA._toStore("APosterioriCovariance"):
1426 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1427 if selfA._toStore("BMA"):
1428 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1432 # ==============================================================================
1433 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1435 Extended Kalman Filter
1437 if selfA._parameters["EstimationOf"] == "Parameters":
1438 selfA._parameters["StoreInternalVariables"] = True
1441 H = HO["Direct"].appliedControledFormTo
1443 if selfA._parameters["EstimationOf"] == "State":
1444 M = EM["Direct"].appliedControledFormTo
1446 if CM is not None and "Tangent" in CM and U is not None:
1447 Cm = CM["Tangent"].asMatrix(Xb)
1451 # Durée d'observation et tailles
1452 if hasattr(Y,"stepnumber"):
1453 duration = Y.stepnumber()
1454 __p = numpy.cumprod(Y.shape())[-1]
1457 __p = numpy.array(Y).size
1459 # Précalcul des inversions de B et R
1460 if selfA._parameters["StoreInternalVariables"] \
1461 or selfA._toStore("CostFunctionJ") \
1462 or selfA._toStore("CostFunctionJb") \
1463 or selfA._toStore("CostFunctionJo") \
1464 or selfA._toStore("CurrentOptimum") \
1465 or selfA._toStore("APosterioriCovariance"):
1471 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1474 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1475 selfA.StoredVariables["Analysis"].store( Xb )
1476 if selfA._toStore("APosterioriCovariance"):
1477 if hasattr(B,"asfullmatrix"):
1478 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1480 selfA.StoredVariables["APosterioriCovariance"].store( B )
1481 selfA._setInternalState("seed", numpy.random.get_state())
1482 elif selfA._parameters["nextStep"]:
1483 Xn = selfA._getInternalState("Xn")
1484 Pn = selfA._getInternalState("Pn")
1486 if selfA._parameters["EstimationOf"] == "Parameters":
1488 previousJMinimum = numpy.finfo(float).max
1490 for step in range(duration-1):
1491 if hasattr(Y,"store"):
1492 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1494 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1496 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1497 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1498 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1499 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1501 if selfA._parameters["EstimationOf"] == "State":
1502 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1503 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1504 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1505 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1508 if hasattr(U,"store") and len(U)>1:
1509 Un = numpy.ravel( U[step] ).reshape((-1,1))
1510 elif hasattr(U,"store") and len(U)==1:
1511 Un = numpy.ravel( U[0] ).reshape((-1,1))
1513 Un = numpy.ravel( U ).reshape((-1,1))
1517 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1518 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1519 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1520 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1521 Xn_predicted = Xn_predicted + Cm * Un
1522 Pn_predicted = Q + Mt * Pn * Ma
1523 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1524 # --- > Par principe, M = Id, Q = 0
1528 if selfA._parameters["EstimationOf"] == "State":
1529 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1530 _Innovation = Ynpu - HX_predicted
1531 elif selfA._parameters["EstimationOf"] == "Parameters":
1532 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1533 _Innovation = Ynpu - HX_predicted
1534 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1535 _Innovation = _Innovation - Cm * Un
1537 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1538 Xn = Xn_predicted + Kn * _Innovation
1539 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1542 #--------------------------
1543 selfA._setInternalState("Xn", Xn)
1544 selfA._setInternalState("Pn", Pn)
1545 #--------------------------
1547 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1548 # ---> avec analysis
1549 selfA.StoredVariables["Analysis"].store( Xa )
1550 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1551 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1552 if selfA._toStore("InnovationAtCurrentAnalysis"):
1553 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1554 # ---> avec current state
1555 if selfA._parameters["StoreInternalVariables"] \
1556 or selfA._toStore("CurrentState"):
1557 selfA.StoredVariables["CurrentState"].store( Xn )
1558 if selfA._toStore("ForecastState"):
1559 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1560 if selfA._toStore("ForecastCovariance"):
1561 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1562 if selfA._toStore("BMA"):
1563 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1564 if selfA._toStore("InnovationAtCurrentState"):
1565 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1566 if selfA._toStore("SimulatedObservationAtCurrentState") \
1567 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1568 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1570 if selfA._parameters["StoreInternalVariables"] \
1571 or selfA._toStore("CostFunctionJ") \
1572 or selfA._toStore("CostFunctionJb") \
1573 or selfA._toStore("CostFunctionJo") \
1574 or selfA._toStore("CurrentOptimum") \
1575 or selfA._toStore("APosterioriCovariance"):
1576 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1577 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1579 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1580 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1581 selfA.StoredVariables["CostFunctionJ" ].store( J )
1583 if selfA._toStore("IndexOfOptimum") \
1584 or selfA._toStore("CurrentOptimum") \
1585 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1586 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1587 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1588 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1589 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1590 if selfA._toStore("IndexOfOptimum"):
1591 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1592 if selfA._toStore("CurrentOptimum"):
1593 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1594 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1595 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1596 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1597 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1598 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1599 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1600 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1601 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1602 if selfA._toStore("APosterioriCovariance"):
1603 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1604 if selfA._parameters["EstimationOf"] == "Parameters" \
1605 and J < previousJMinimum:
1606 previousJMinimum = J
1608 if selfA._toStore("APosterioriCovariance"):
1609 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1611 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1612 # ----------------------------------------------------------------------
1613 if selfA._parameters["EstimationOf"] == "Parameters":
1614 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1615 selfA.StoredVariables["Analysis"].store( XaMin )
1616 if selfA._toStore("APosterioriCovariance"):
1617 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1618 if selfA._toStore("BMA"):
1619 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1623 # ==============================================================================
1624 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1625 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1629 if selfA._parameters["EstimationOf"] == "Parameters":
1630 selfA._parameters["StoreInternalVariables"] = True
1633 H = HO["Direct"].appliedControledFormTo
1635 if selfA._parameters["EstimationOf"] == "State":
1636 M = EM["Direct"].appliedControledFormTo
1638 if CM is not None and "Tangent" in CM and U is not None:
1639 Cm = CM["Tangent"].asMatrix(Xb)
1643 # Durée d'observation et tailles
1644 if hasattr(Y,"stepnumber"):
1645 duration = Y.stepnumber()
1646 __p = numpy.cumprod(Y.shape())[-1]
1649 __p = numpy.array(Y).size
1651 # Précalcul des inversions de B et R
1652 if selfA._parameters["StoreInternalVariables"] \
1653 or selfA._toStore("CostFunctionJ") \
1654 or selfA._toStore("CostFunctionJb") \
1655 or selfA._toStore("CostFunctionJo") \
1656 or selfA._toStore("CurrentOptimum") \
1657 or selfA._toStore("APosterioriCovariance"):
1662 __m = selfA._parameters["NumberOfMembers"]
1664 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1665 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1667 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1668 selfA.StoredVariables["Analysis"].store( Xb )
1669 if selfA._toStore("APosterioriCovariance"):
1670 if hasattr(B,"asfullmatrix"):
1671 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1673 selfA.StoredVariables["APosterioriCovariance"].store( B )
1674 selfA._setInternalState("seed", numpy.random.get_state())
1675 elif selfA._parameters["nextStep"]:
1676 Xn = selfA._getInternalState("Xn")
1678 previousJMinimum = numpy.finfo(float).max
1680 for step in range(duration-1):
1681 numpy.random.set_state(selfA._getInternalState("seed"))
1682 if hasattr(Y,"store"):
1683 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1685 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1688 if hasattr(U,"store") and len(U)>1:
1689 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1690 elif hasattr(U,"store") and len(U)==1:
1691 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1693 Un = numpy.asmatrix(numpy.ravel( U )).T
1697 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1698 Xn = CovarianceInflation( Xn,
1699 selfA._parameters["InflationType"],
1700 selfA._parameters["InflationFactor"],
1703 #--------------------------
1704 if VariantM == "IEnKF12":
1705 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1706 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1710 Ta = numpy.identity(__m)
1711 vw = numpy.zeros(__m)
1712 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1713 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1716 E1 = vx1 + _epsilon * EaX
1718 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1720 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1721 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1723 returnSerieAsArrayMatrix = True )
1724 elif selfA._parameters["EstimationOf"] == "Parameters":
1725 # --- > Par principe, M = Id
1727 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1728 vy1 = H((vx2, Un)).reshape((__p,1))
1730 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1732 returnSerieAsArrayMatrix = True )
1733 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1736 EaY = (HE2 - vy2) / _epsilon
1738 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1740 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1741 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1742 Deltaw = - numpy.linalg.solve(mH,GradJ)
1747 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1751 A2 = EnsembleOfAnomalies( E2 )
1754 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1755 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1758 #--------------------------
1760 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1762 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1763 Xn = CovarianceInflation( Xn,
1764 selfA._parameters["InflationType"],
1765 selfA._parameters["InflationFactor"],
1768 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1769 #--------------------------
1770 selfA._setInternalState("Xn", Xn)
1771 selfA._setInternalState("seed", numpy.random.get_state())
1772 #--------------------------
1774 if selfA._parameters["StoreInternalVariables"] \
1775 or selfA._toStore("CostFunctionJ") \
1776 or selfA._toStore("CostFunctionJb") \
1777 or selfA._toStore("CostFunctionJo") \
1778 or selfA._toStore("APosterioriCovariance") \
1779 or selfA._toStore("InnovationAtCurrentAnalysis") \
1780 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1781 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1782 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1783 _Innovation = Ynpu - _HXa
1785 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1786 # ---> avec analysis
1787 selfA.StoredVariables["Analysis"].store( Xa )
1788 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1789 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1790 if selfA._toStore("InnovationAtCurrentAnalysis"):
1791 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1792 # ---> avec current state
1793 if selfA._parameters["StoreInternalVariables"] \
1794 or selfA._toStore("CurrentState"):
1795 selfA.StoredVariables["CurrentState"].store( Xn )
1796 if selfA._toStore("ForecastState"):
1797 selfA.StoredVariables["ForecastState"].store( E2 )
1798 if selfA._toStore("ForecastCovariance"):
1799 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
1800 if selfA._toStore("BMA"):
1801 selfA.StoredVariables["BMA"].store( E2 - Xa )
1802 if selfA._toStore("InnovationAtCurrentState"):
1803 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1804 if selfA._toStore("SimulatedObservationAtCurrentState") \
1805 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1806 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1808 if selfA._parameters["StoreInternalVariables"] \
1809 or selfA._toStore("CostFunctionJ") \
1810 or selfA._toStore("CostFunctionJb") \
1811 or selfA._toStore("CostFunctionJo") \
1812 or selfA._toStore("CurrentOptimum") \
1813 or selfA._toStore("APosterioriCovariance"):
1814 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1815 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1817 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1818 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1819 selfA.StoredVariables["CostFunctionJ" ].store( J )
1821 if selfA._toStore("IndexOfOptimum") \
1822 or selfA._toStore("CurrentOptimum") \
1823 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1824 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1825 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1826 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1827 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1828 if selfA._toStore("IndexOfOptimum"):
1829 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1830 if selfA._toStore("CurrentOptimum"):
1831 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1832 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1833 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1834 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1835 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1836 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1837 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1838 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1839 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1840 if selfA._toStore("APosterioriCovariance"):
1841 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1842 if selfA._parameters["EstimationOf"] == "Parameters" \
1843 and J < previousJMinimum:
1844 previousJMinimum = J
1846 if selfA._toStore("APosterioriCovariance"):
1847 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1848 # ---> Pour les smoothers
1849 if selfA._toStore("CurrentEnsembleState"):
1850 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1852 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1853 # ----------------------------------------------------------------------
1854 if selfA._parameters["EstimationOf"] == "Parameters":
1855 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1856 selfA.StoredVariables["Analysis"].store( XaMin )
1857 if selfA._toStore("APosterioriCovariance"):
1858 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1859 if selfA._toStore("BMA"):
1860 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1864 # ==============================================================================
1865 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1873 # Opérateur non-linéaire pour la boucle externe
1874 Hm = HO["Direct"].appliedTo
1876 # Précalcul des inversions de B et R
1880 # Point de démarrage de l'optimisation
1881 Xini = selfA._parameters["InitializationPoint"]
1883 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1884 Innovation = Y - HXb
1891 Xr = Xini.reshape((-1,1))
1892 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1896 Ht = HO["Tangent"].asMatrix(Xr)
1897 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1899 # Définition de la fonction-coût
1900 # ------------------------------
1901 def CostFunction(dx):
1902 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1903 if selfA._parameters["StoreInternalVariables"] or \
1904 selfA._toStore("CurrentState") or \
1905 selfA._toStore("CurrentOptimum"):
1906 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1908 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1909 _dInnovation = Innovation - _HdX
1910 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1911 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1912 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1913 if selfA._toStore("InnovationAtCurrentState"):
1914 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1916 Jb = float( 0.5 * _dX.T * BI * _dX )
1917 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1920 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1921 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1922 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1923 selfA.StoredVariables["CostFunctionJ" ].store( J )
1924 if selfA._toStore("IndexOfOptimum") or \
1925 selfA._toStore("CurrentOptimum") or \
1926 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1927 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1928 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1929 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1930 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1931 if selfA._toStore("IndexOfOptimum"):
1932 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1933 if selfA._toStore("CurrentOptimum"):
1934 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1935 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1936 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1937 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1938 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1939 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1940 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1941 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1942 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1945 def GradientOfCostFunction(dx):
1946 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1948 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1949 _dInnovation = Innovation - _HdX
1951 GradJo = - Ht.T @ (RI * _dInnovation)
1952 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1955 # Minimisation de la fonctionnelle
1956 # --------------------------------
1957 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1959 if selfA._parameters["Minimizer"] == "LBFGSB":
1960 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1961 if "0.19" <= scipy.version.version <= "1.1.0":
1962 import lbfgsbhlt as optimiseur
1964 import scipy.optimize as optimiseur
1965 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1966 func = CostFunction,
1967 x0 = numpy.zeros(Xini.size),
1968 fprime = GradientOfCostFunction,
1970 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
1971 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1972 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1973 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1974 iprint = selfA._parameters["optiprint"],
1976 nfeval = Informations['funcalls']
1977 rc = Informations['warnflag']
1978 elif selfA._parameters["Minimizer"] == "TNC":
1979 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1980 func = CostFunction,
1981 x0 = numpy.zeros(Xini.size),
1982 fprime = GradientOfCostFunction,
1984 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
1985 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1986 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1987 ftol = selfA._parameters["CostDecrementTolerance"],
1988 messages = selfA._parameters["optmessages"],
1990 elif selfA._parameters["Minimizer"] == "CG":
1991 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1993 x0 = numpy.zeros(Xini.size),
1994 fprime = GradientOfCostFunction,
1996 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1997 gtol = selfA._parameters["GradientNormTolerance"],
1998 disp = selfA._parameters["optdisp"],
2001 elif selfA._parameters["Minimizer"] == "NCG":
2002 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2004 x0 = numpy.zeros(Xini.size),
2005 fprime = GradientOfCostFunction,
2007 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2008 avextol = selfA._parameters["CostDecrementTolerance"],
2009 disp = selfA._parameters["optdisp"],
2012 elif selfA._parameters["Minimizer"] == "BFGS":
2013 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2015 x0 = numpy.zeros(Xini.size),
2016 fprime = GradientOfCostFunction,
2018 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2019 gtol = selfA._parameters["GradientNormTolerance"],
2020 disp = selfA._parameters["optdisp"],
2024 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2026 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2027 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2029 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2030 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2031 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2033 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
2036 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2037 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2039 # Obtention de l'analyse
2040 # ----------------------
2043 selfA.StoredVariables["Analysis"].store( Xa )
2045 if selfA._toStore("OMA") or \
2046 selfA._toStore("SigmaObs2") or \
2047 selfA._toStore("SimulationQuantiles") or \
2048 selfA._toStore("SimulatedObservationAtOptimum"):
2049 if selfA._toStore("SimulatedObservationAtCurrentState"):
2050 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2051 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2052 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2056 # Calcul de la covariance d'analyse
2057 # ---------------------------------
2058 if selfA._toStore("APosterioriCovariance") or \
2059 selfA._toStore("SimulationQuantiles") or \
2060 selfA._toStore("JacobianMatrixAtOptimum") or \
2061 selfA._toStore("KalmanGainAtOptimum"):
2062 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2063 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2064 if selfA._toStore("APosterioriCovariance") or \
2065 selfA._toStore("SimulationQuantiles") or \
2066 selfA._toStore("KalmanGainAtOptimum"):
2067 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2068 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2069 if selfA._toStore("APosterioriCovariance") or \
2070 selfA._toStore("SimulationQuantiles"):
2074 _ee = numpy.matrix(numpy.zeros(nb)).T
2076 _HtEE = numpy.dot(HtM,_ee)
2077 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2078 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2079 HessienneI = numpy.matrix( HessienneI )
2081 if min(A.shape) != max(A.shape):
2082 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2083 if (numpy.diag(A) < 0).any():
2084 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2085 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2087 L = numpy.linalg.cholesky( A )
2089 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2090 if selfA._toStore("APosterioriCovariance"):
2091 selfA.StoredVariables["APosterioriCovariance"].store( A )
2092 if selfA._toStore("JacobianMatrixAtOptimum"):
2093 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2094 if selfA._toStore("KalmanGainAtOptimum"):
2095 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2096 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2097 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2099 # Calculs et/ou stockages supplémentaires
2100 # ---------------------------------------
2101 if selfA._toStore("Innovation") or \
2102 selfA._toStore("SigmaObs2") or \
2103 selfA._toStore("MahalanobisConsistency") or \
2104 selfA._toStore("OMB"):
2106 if selfA._toStore("Innovation"):
2107 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2108 if selfA._toStore("BMA"):
2109 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2110 if selfA._toStore("OMA"):
2111 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2112 if selfA._toStore("OMB"):
2113 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2114 if selfA._toStore("SigmaObs2"):
2115 TraceR = R.trace(Y.size)
2116 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2117 if selfA._toStore("MahalanobisConsistency"):
2118 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2119 if selfA._toStore("SimulationQuantiles"):
2120 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2121 if selfA._toStore("SimulatedObservationAtBackground"):
2122 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2123 if selfA._toStore("SimulatedObservationAtOptimum"):
2124 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2128 # ==============================================================================
2129 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2130 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2132 Maximum Likelihood Ensemble Filter
2134 if selfA._parameters["EstimationOf"] == "Parameters":
2135 selfA._parameters["StoreInternalVariables"] = True
2138 H = HO["Direct"].appliedControledFormTo
2140 if selfA._parameters["EstimationOf"] == "State":
2141 M = EM["Direct"].appliedControledFormTo
2143 if CM is not None and "Tangent" in CM and U is not None:
2144 Cm = CM["Tangent"].asMatrix(Xb)
2148 # Durée d'observation et tailles
2149 if hasattr(Y,"stepnumber"):
2150 duration = Y.stepnumber()
2151 __p = numpy.cumprod(Y.shape())[-1]
2154 __p = numpy.array(Y).size
2156 # Précalcul des inversions de B et R
2157 if selfA._parameters["StoreInternalVariables"] \
2158 or selfA._toStore("CostFunctionJ") \
2159 or selfA._toStore("CostFunctionJb") \
2160 or selfA._toStore("CostFunctionJo") \
2161 or selfA._toStore("CurrentOptimum") \
2162 or selfA._toStore("APosterioriCovariance"):
2167 __m = selfA._parameters["NumberOfMembers"]
2169 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2170 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2171 selfA.StoredVariables["Analysis"].store( Xb )
2172 if selfA._toStore("APosterioriCovariance"):
2173 if hasattr(B,"asfullmatrix"):
2174 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2176 selfA.StoredVariables["APosterioriCovariance"].store( B )
2177 selfA._setInternalState("seed", numpy.random.get_state())
2178 elif selfA._parameters["nextStep"]:
2179 Xn = selfA._getInternalState("Xn")
2181 previousJMinimum = numpy.finfo(float).max
2183 for step in range(duration-1):
2184 numpy.random.set_state(selfA._getInternalState("seed"))
2185 if hasattr(Y,"store"):
2186 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2188 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2191 if hasattr(U,"store") and len(U)>1:
2192 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2193 elif hasattr(U,"store") and len(U)==1:
2194 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2196 Un = numpy.asmatrix(numpy.ravel( U )).T
2200 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2201 Xn = CovarianceInflation( Xn,
2202 selfA._parameters["InflationType"],
2203 selfA._parameters["InflationFactor"],
2206 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2207 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2209 returnSerieAsArrayMatrix = True )
2210 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2211 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2212 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2213 Xn_predicted = Xn_predicted + Cm * Un
2214 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2215 # --- > Par principe, M = Id, Q = 0
2216 Xn_predicted = EMX = Xn
2218 #--------------------------
2219 if VariantM == "MLEF13":
2220 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2221 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2222 Ua = numpy.identity(__m)
2226 Ta = numpy.identity(__m)
2227 vw = numpy.zeros(__m)
2228 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2229 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2232 E1 = vx1 + _epsilon * EaX
2234 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2236 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2238 returnSerieAsArrayMatrix = True )
2239 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2242 EaY = (HE2 - vy2) / _epsilon
2244 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2246 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2247 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2248 Deltaw = - numpy.linalg.solve(mH,GradJ)
2253 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2258 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2260 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2261 #--------------------------
2263 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2265 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2266 Xn = CovarianceInflation( Xn,
2267 selfA._parameters["InflationType"],
2268 selfA._parameters["InflationFactor"],
2271 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2272 #--------------------------
2273 selfA._setInternalState("Xn", Xn)
2274 selfA._setInternalState("seed", numpy.random.get_state())
2275 #--------------------------
2277 if selfA._parameters["StoreInternalVariables"] \
2278 or selfA._toStore("CostFunctionJ") \
2279 or selfA._toStore("CostFunctionJb") \
2280 or selfA._toStore("CostFunctionJo") \
2281 or selfA._toStore("APosterioriCovariance") \
2282 or selfA._toStore("InnovationAtCurrentAnalysis") \
2283 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2284 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2285 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2286 _Innovation = Ynpu - _HXa
2288 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2289 # ---> avec analysis
2290 selfA.StoredVariables["Analysis"].store( Xa )
2291 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2292 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2293 if selfA._toStore("InnovationAtCurrentAnalysis"):
2294 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2295 # ---> avec current state
2296 if selfA._parameters["StoreInternalVariables"] \
2297 or selfA._toStore("CurrentState"):
2298 selfA.StoredVariables["CurrentState"].store( Xn )
2299 if selfA._toStore("ForecastState"):
2300 selfA.StoredVariables["ForecastState"].store( EMX )
2301 if selfA._toStore("ForecastCovariance"):
2302 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2303 if selfA._toStore("BMA"):
2304 selfA.StoredVariables["BMA"].store( EMX - Xa )
2305 if selfA._toStore("InnovationAtCurrentState"):
2306 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2307 if selfA._toStore("SimulatedObservationAtCurrentState") \
2308 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2309 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2311 if selfA._parameters["StoreInternalVariables"] \
2312 or selfA._toStore("CostFunctionJ") \
2313 or selfA._toStore("CostFunctionJb") \
2314 or selfA._toStore("CostFunctionJo") \
2315 or selfA._toStore("CurrentOptimum") \
2316 or selfA._toStore("APosterioriCovariance"):
2317 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2318 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2320 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2321 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2322 selfA.StoredVariables["CostFunctionJ" ].store( J )
2324 if selfA._toStore("IndexOfOptimum") \
2325 or selfA._toStore("CurrentOptimum") \
2326 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2327 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2328 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2329 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2330 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2331 if selfA._toStore("IndexOfOptimum"):
2332 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2333 if selfA._toStore("CurrentOptimum"):
2334 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2335 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2336 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2337 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2338 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2339 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2340 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2341 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2342 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2343 if selfA._toStore("APosterioriCovariance"):
2344 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2345 if selfA._parameters["EstimationOf"] == "Parameters" \
2346 and J < previousJMinimum:
2347 previousJMinimum = J
2349 if selfA._toStore("APosterioriCovariance"):
2350 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2351 # ---> Pour les smoothers
2352 if selfA._toStore("CurrentEnsembleState"):
2353 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2355 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2356 # ----------------------------------------------------------------------
2357 if selfA._parameters["EstimationOf"] == "Parameters":
2358 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2359 selfA.StoredVariables["Analysis"].store( XaMin )
2360 if selfA._toStore("APosterioriCovariance"):
2361 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2362 if selfA._toStore("BMA"):
2363 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2367 # ==============================================================================
2379 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2380 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2381 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2384 # Recuperation des donnees et informations initiales
2385 # --------------------------------------------------
2386 variables = numpy.ravel( x0 )
2387 mesures = numpy.ravel( y )
2388 increment = sys.float_info[0]
2391 quantile = float(quantile)
2393 # Calcul des parametres du MM
2394 # ---------------------------
2395 tn = float(toler) / n
2396 e0 = -tn / math.log(tn)
2397 epsilon = (e0-tn)/(1+math.log(e0))
2399 # Calculs d'initialisation
2400 # ------------------------
2401 residus = mesures - numpy.ravel( func( variables ) )
2402 poids = 1./(epsilon+numpy.abs(residus))
2403 veps = 1. - 2. * quantile - residus * poids
2404 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2407 # Recherche iterative
2408 # -------------------
2409 while (increment > toler) and (iteration < maxfun) :
2412 Derivees = numpy.array(fprime(variables))
2413 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
2414 DeriveesT = Derivees.transpose()
2415 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2416 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
2417 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2419 variables = variables + step
2420 if bounds is not None:
2421 # Attention : boucle infinie à éviter si un intervalle est trop petit
2422 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2424 variables = variables - step
2425 residus = mesures - numpy.ravel( func(variables) )
2426 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2428 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2430 variables = variables - step
2431 residus = mesures - numpy.ravel( func(variables) )
2432 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2434 increment = lastsurrogate-surrogate
2435 poids = 1./(epsilon+numpy.abs(residus))
2436 veps = 1. - 2. * quantile - residus * poids
2437 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2441 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2443 return variables, Ecart, [n,p,iteration,increment,0]
2445 # ==============================================================================
2446 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2448 3DVAR multi-pas et multi-méthodes
2452 if selfA._parameters["EstimationOf"] == "State":
2453 M = EM["Direct"].appliedTo
2455 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2456 Xn = numpy.ravel(Xb).reshape((-1,1))
2457 selfA.StoredVariables["Analysis"].store( Xn )
2458 if selfA._toStore("APosterioriCovariance"):
2459 if hasattr(B,"asfullmatrix"):
2460 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2462 selfA.StoredVariables["APosterioriCovariance"].store( B )
2463 if selfA._toStore("ForecastState"):
2464 selfA.StoredVariables["ForecastState"].store( Xn )
2465 elif selfA._parameters["nextStep"]:
2466 Xn = selfA._getInternalState("Xn")
2468 Xn = numpy.ravel(Xb).reshape((-1,1))
2470 if hasattr(Y,"stepnumber"):
2471 duration = Y.stepnumber()
2476 for step in range(duration-1):
2477 if hasattr(Y,"store"):
2478 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2480 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2482 if selfA._parameters["EstimationOf"] == "State": # Forecast
2483 Xn_predicted = M( Xn )
2484 if selfA._toStore("ForecastState"):
2485 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2486 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2487 # --- > Par principe, M = Id, Q = 0
2489 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2491 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2493 Xn = selfA.StoredVariables["Analysis"][-1]
2494 #--------------------------
2495 selfA._setInternalState("Xn", Xn)
2499 # ==============================================================================
2500 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2509 Hm = HO["Direct"].appliedTo
2511 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2512 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2513 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2516 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2517 if Y.size != HXb.size:
2518 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2519 if max(Y.shape) != max(HXb.shape):
2520 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2522 if selfA._toStore("JacobianMatrixAtBackground"):
2523 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2524 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2525 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2527 Ht = HO["Tangent"].asMatrix(Xb)
2529 HBHTpR = R + Ht * BHT
2530 Innovation = Y - HXb
2532 # Point de démarrage de l'optimisation
2533 Xini = numpy.zeros(Xb.shape)
2535 # Définition de la fonction-coût
2536 # ------------------------------
2537 def CostFunction(w):
2538 _W = numpy.asmatrix(numpy.ravel( w )).T
2539 if selfA._parameters["StoreInternalVariables"] or \
2540 selfA._toStore("CurrentState") or \
2541 selfA._toStore("CurrentOptimum"):
2542 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2543 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2544 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2545 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2546 if selfA._toStore("InnovationAtCurrentState"):
2547 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2549 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2550 Jo = float( - _W.T * Innovation )
2553 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2554 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2555 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2556 selfA.StoredVariables["CostFunctionJ" ].store( J )
2557 if selfA._toStore("IndexOfOptimum") or \
2558 selfA._toStore("CurrentOptimum") or \
2559 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2560 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2561 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2562 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2563 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2564 if selfA._toStore("IndexOfOptimum"):
2565 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2566 if selfA._toStore("CurrentOptimum"):
2567 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2568 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2569 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2570 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2571 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2572 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2573 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2574 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2575 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2578 def GradientOfCostFunction(w):
2579 _W = numpy.asmatrix(numpy.ravel( w )).T
2580 GradJb = HBHTpR * _W
2581 GradJo = - Innovation
2582 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2585 # Minimisation de la fonctionnelle
2586 # --------------------------------
2587 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2589 if selfA._parameters["Minimizer"] == "LBFGSB":
2590 if "0.19" <= scipy.version.version <= "1.1.0":
2591 import lbfgsbhlt as optimiseur
2593 import scipy.optimize as optimiseur
2594 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2595 func = CostFunction,
2597 fprime = GradientOfCostFunction,
2599 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2600 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2601 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2602 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2603 iprint = selfA._parameters["optiprint"],
2605 nfeval = Informations['funcalls']
2606 rc = Informations['warnflag']
2607 elif selfA._parameters["Minimizer"] == "TNC":
2608 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2609 func = CostFunction,
2611 fprime = GradientOfCostFunction,
2613 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2614 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2615 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2616 ftol = selfA._parameters["CostDecrementTolerance"],
2617 messages = selfA._parameters["optmessages"],
2619 elif selfA._parameters["Minimizer"] == "CG":
2620 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2623 fprime = GradientOfCostFunction,
2625 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2626 gtol = selfA._parameters["GradientNormTolerance"],
2627 disp = selfA._parameters["optdisp"],
2630 elif selfA._parameters["Minimizer"] == "NCG":
2631 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2634 fprime = GradientOfCostFunction,
2636 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2637 avextol = selfA._parameters["CostDecrementTolerance"],
2638 disp = selfA._parameters["optdisp"],
2641 elif selfA._parameters["Minimizer"] == "BFGS":
2642 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2645 fprime = GradientOfCostFunction,
2647 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2648 gtol = selfA._parameters["GradientNormTolerance"],
2649 disp = selfA._parameters["optdisp"],
2653 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2655 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2656 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2658 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2659 # ----------------------------------------------------------------
2660 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2661 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2662 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2664 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2666 # Obtention de l'analyse
2667 # ----------------------
2670 selfA.StoredVariables["Analysis"].store( Xa )
2672 if selfA._toStore("OMA") or \
2673 selfA._toStore("SigmaObs2") or \
2674 selfA._toStore("SimulationQuantiles") or \
2675 selfA._toStore("SimulatedObservationAtOptimum"):
2676 if selfA._toStore("SimulatedObservationAtCurrentState"):
2677 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2678 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2679 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2683 # Calcul de la covariance d'analyse
2684 # ---------------------------------
2685 if selfA._toStore("APosterioriCovariance") or \
2686 selfA._toStore("SimulationQuantiles") or \
2687 selfA._toStore("JacobianMatrixAtOptimum") or \
2688 selfA._toStore("KalmanGainAtOptimum"):
2689 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2690 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2691 if selfA._toStore("APosterioriCovariance") or \
2692 selfA._toStore("SimulationQuantiles") or \
2693 selfA._toStore("KalmanGainAtOptimum"):
2694 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2695 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2696 if selfA._toStore("APosterioriCovariance") or \
2697 selfA._toStore("SimulationQuantiles"):
2703 _ee = numpy.matrix(numpy.zeros(nb)).T
2705 _HtEE = numpy.dot(HtM,_ee)
2706 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2707 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2708 HessienneI = numpy.matrix( HessienneI )
2710 if min(A.shape) != max(A.shape):
2711 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2712 if (numpy.diag(A) < 0).any():
2713 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2714 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2716 L = numpy.linalg.cholesky( A )
2718 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2719 if selfA._toStore("APosterioriCovariance"):
2720 selfA.StoredVariables["APosterioriCovariance"].store( A )
2721 if selfA._toStore("JacobianMatrixAtOptimum"):
2722 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2723 if selfA._toStore("KalmanGainAtOptimum"):
2724 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2725 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2726 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2728 # Calculs et/ou stockages supplémentaires
2729 # ---------------------------------------
2730 if selfA._toStore("Innovation") or \
2731 selfA._toStore("SigmaObs2") or \
2732 selfA._toStore("MahalanobisConsistency") or \
2733 selfA._toStore("OMB"):
2735 if selfA._toStore("Innovation"):
2736 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2737 if selfA._toStore("BMA"):
2738 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2739 if selfA._toStore("OMA"):
2740 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2741 if selfA._toStore("OMB"):
2742 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2743 if selfA._toStore("SigmaObs2"):
2744 TraceR = R.trace(Y.size)
2745 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2746 if selfA._toStore("MahalanobisConsistency"):
2747 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2748 if selfA._toStore("SimulationQuantiles"):
2749 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2750 if selfA._toStore("SimulatedObservationAtBackground"):
2751 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2752 if selfA._toStore("SimulatedObservationAtOptimum"):
2753 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2757 # ==============================================================================
2758 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
2762 if selfA._parameters["EstimationOf"] == "Parameters":
2763 selfA._parameters["StoreInternalVariables"] = True
2766 H = HO["Direct"].appliedControledFormTo
2768 if selfA._parameters["EstimationOf"] == "State":
2769 M = EM["Direct"].appliedControledFormTo
2771 if CM is not None and "Tangent" in CM and U is not None:
2772 Cm = CM["Tangent"].asMatrix(Xb)
2776 # Durée d'observation et tailles
2777 if hasattr(Y,"stepnumber"):
2778 duration = Y.stepnumber()
2779 __p = numpy.cumprod(Y.shape())[-1]
2782 __p = numpy.array(Y).size
2784 # Précalcul des inversions de B et R
2785 if selfA._parameters["StoreInternalVariables"] \
2786 or selfA._toStore("CostFunctionJ") \
2787 or selfA._toStore("CostFunctionJb") \
2788 or selfA._toStore("CostFunctionJo") \
2789 or selfA._toStore("CurrentOptimum") \
2790 or selfA._toStore("APosterioriCovariance"):
2795 __m = selfA._parameters["NumberOfMembers"]
2797 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2800 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2801 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2803 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2804 selfA.StoredVariables["Analysis"].store( Xb )
2805 if selfA._toStore("APosterioriCovariance"):
2806 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2807 selfA._setInternalState("seed", numpy.random.get_state())
2808 elif selfA._parameters["nextStep"]:
2809 Xn = selfA._getInternalState("Xn")
2811 previousJMinimum = numpy.finfo(float).max
2813 for step in range(duration-1):
2814 numpy.random.set_state(selfA._getInternalState("seed"))
2815 if hasattr(Y,"store"):
2816 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2818 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2821 if hasattr(U,"store") and len(U)>1:
2822 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2823 elif hasattr(U,"store") and len(U)==1:
2824 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2826 Un = numpy.asmatrix(numpy.ravel( U )).T
2830 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2831 Xn = CovarianceInflation( Xn,
2832 selfA._parameters["InflationType"],
2833 selfA._parameters["InflationFactor"],
2836 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2837 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2839 returnSerieAsArrayMatrix = True )
2840 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2841 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2843 returnSerieAsArrayMatrix = True )
2844 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2845 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2846 Xn_predicted = Xn_predicted + Cm * Un
2847 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2848 # --- > Par principe, M = Id, Q = 0
2849 Xn_predicted = EMX = Xn
2850 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2852 returnSerieAsArrayMatrix = True )
2854 # Mean of forecast and observation of forecast
2855 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2856 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2858 #--------------------------
2859 if VariantM == "KalmanFilterFormula05":
2860 PfHT, HPfHT = 0., 0.
2861 for i in range(__m):
2862 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2863 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2864 PfHT += Exfi * Eyfi.T
2865 HPfHT += Eyfi * Eyfi.T
2866 PfHT = (1./(__m-1)) * PfHT
2867 HPfHT = (1./(__m-1)) * HPfHT
2868 Kn = PfHT * ( R + HPfHT ).I
2871 for i in range(__m):
2872 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2873 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2874 #--------------------------
2875 elif VariantM == "KalmanFilterFormula16":
2876 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2877 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2879 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2880 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2882 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2884 for i in range(__m):
2885 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2886 #--------------------------
2888 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2890 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2891 Xn = CovarianceInflation( Xn,
2892 selfA._parameters["InflationType"],
2893 selfA._parameters["InflationFactor"],
2896 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2897 #--------------------------
2898 selfA._setInternalState("Xn", Xn)
2899 selfA._setInternalState("seed", numpy.random.get_state())
2900 #--------------------------
2902 if selfA._parameters["StoreInternalVariables"] \
2903 or selfA._toStore("CostFunctionJ") \
2904 or selfA._toStore("CostFunctionJb") \
2905 or selfA._toStore("CostFunctionJo") \
2906 or selfA._toStore("APosterioriCovariance") \
2907 or selfA._toStore("InnovationAtCurrentAnalysis") \
2908 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2909 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2910 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2911 _Innovation = Ynpu - _HXa
2913 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2914 # ---> avec analysis
2915 selfA.StoredVariables["Analysis"].store( Xa )
2916 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2917 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2918 if selfA._toStore("InnovationAtCurrentAnalysis"):
2919 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2920 # ---> avec current state
2921 if selfA._parameters["StoreInternalVariables"] \
2922 or selfA._toStore("CurrentState"):
2923 selfA.StoredVariables["CurrentState"].store( Xn )
2924 if selfA._toStore("ForecastState"):
2925 selfA.StoredVariables["ForecastState"].store( EMX )
2926 if selfA._toStore("ForecastCovariance"):
2927 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2928 if selfA._toStore("BMA"):
2929 selfA.StoredVariables["BMA"].store( EMX - Xa )
2930 if selfA._toStore("InnovationAtCurrentState"):
2931 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2932 if selfA._toStore("SimulatedObservationAtCurrentState") \
2933 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2934 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2936 if selfA._parameters["StoreInternalVariables"] \
2937 or selfA._toStore("CostFunctionJ") \
2938 or selfA._toStore("CostFunctionJb") \
2939 or selfA._toStore("CostFunctionJo") \
2940 or selfA._toStore("CurrentOptimum") \
2941 or selfA._toStore("APosterioriCovariance"):
2942 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2943 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2945 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2946 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2947 selfA.StoredVariables["CostFunctionJ" ].store( J )
2949 if selfA._toStore("IndexOfOptimum") \
2950 or selfA._toStore("CurrentOptimum") \
2951 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2952 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2953 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2954 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2955 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2956 if selfA._toStore("IndexOfOptimum"):
2957 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2958 if selfA._toStore("CurrentOptimum"):
2959 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2960 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2961 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2962 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2963 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2964 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2965 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2966 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2967 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2968 if selfA._toStore("APosterioriCovariance"):
2969 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2970 if selfA._parameters["EstimationOf"] == "Parameters" \
2971 and J < previousJMinimum:
2972 previousJMinimum = J
2974 if selfA._toStore("APosterioriCovariance"):
2975 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2976 # ---> Pour les smoothers
2977 if selfA._toStore("CurrentEnsembleState"):
2978 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2980 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2981 # ----------------------------------------------------------------------
2982 if selfA._parameters["EstimationOf"] == "Parameters":
2983 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2984 selfA.StoredVariables["Analysis"].store( XaMin )
2985 if selfA._toStore("APosterioriCovariance"):
2986 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2987 if selfA._toStore("BMA"):
2988 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2992 # ==============================================================================
2993 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3002 Hm = HO["Direct"].appliedTo
3003 Ha = HO["Adjoint"].appliedInXTo
3005 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
3006 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3007 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3010 HXb = HXb.reshape((-1,1))
3011 if Y.size != HXb.size:
3012 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3013 if max(Y.shape) != max(HXb.shape):
3014 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3016 if selfA._toStore("JacobianMatrixAtBackground"):
3017 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3018 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3019 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3021 # Précalcul des inversions de B et R
3025 # Point de démarrage de l'optimisation
3026 Xini = selfA._parameters["InitializationPoint"]
3028 # Définition de la fonction-coût
3029 # ------------------------------
3030 def CostFunction(x):
3031 _X = numpy.ravel( x ).reshape((-1,1))
3032 if selfA._parameters["StoreInternalVariables"] or \
3033 selfA._toStore("CurrentState") or \
3034 selfA._toStore("CurrentOptimum"):
3035 selfA.StoredVariables["CurrentState"].store( _X )
3036 _HX = Hm( _X ).reshape((-1,1))
3037 _Innovation = Y - _HX
3038 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3039 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3040 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3041 if selfA._toStore("InnovationAtCurrentState"):
3042 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3044 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3045 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3048 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3049 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3050 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3051 selfA.StoredVariables["CostFunctionJ" ].store( J )
3052 if selfA._toStore("IndexOfOptimum") or \
3053 selfA._toStore("CurrentOptimum") or \
3054 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3055 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3056 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3057 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3058 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3059 if selfA._toStore("IndexOfOptimum"):
3060 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3061 if selfA._toStore("CurrentOptimum"):
3062 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3063 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3064 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3065 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3066 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3067 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3068 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3069 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3070 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3073 def GradientOfCostFunction(x):
3074 _X = numpy.ravel( x ).reshape((-1,1))
3076 _HX = numpy.ravel( _HX ).reshape((-1,1))
3077 GradJb = BI * (_X - Xb)
3078 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3079 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3082 # Minimisation de la fonctionnelle
3083 # --------------------------------
3084 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3086 if selfA._parameters["Minimizer"] == "LBFGSB":
3087 if "0.19" <= scipy.version.version <= "1.1.0":
3088 import lbfgsbhlt as optimiseur
3090 import scipy.optimize as optimiseur
3091 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3092 func = CostFunction,
3094 fprime = GradientOfCostFunction,
3096 bounds = selfA._parameters["Bounds"],
3097 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3098 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3099 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3100 iprint = selfA._parameters["optiprint"],
3102 nfeval = Informations['funcalls']
3103 rc = Informations['warnflag']
3104 elif selfA._parameters["Minimizer"] == "TNC":
3105 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3106 func = CostFunction,
3108 fprime = GradientOfCostFunction,
3110 bounds = selfA._parameters["Bounds"],
3111 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3112 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3113 ftol = selfA._parameters["CostDecrementTolerance"],
3114 messages = selfA._parameters["optmessages"],
3116 elif selfA._parameters["Minimizer"] == "CG":
3117 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3120 fprime = GradientOfCostFunction,
3122 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3123 gtol = selfA._parameters["GradientNormTolerance"],
3124 disp = selfA._parameters["optdisp"],
3127 elif selfA._parameters["Minimizer"] == "NCG":
3128 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3131 fprime = GradientOfCostFunction,
3133 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3134 avextol = selfA._parameters["CostDecrementTolerance"],
3135 disp = selfA._parameters["optdisp"],
3138 elif selfA._parameters["Minimizer"] == "BFGS":
3139 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3142 fprime = GradientOfCostFunction,
3144 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3145 gtol = selfA._parameters["GradientNormTolerance"],
3146 disp = selfA._parameters["optdisp"],
3150 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3152 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3153 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3155 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3156 # ----------------------------------------------------------------
3157 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3158 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3161 #--------------------------
3163 selfA.StoredVariables["Analysis"].store( Xa )
3165 if selfA._toStore("OMA") or \
3166 selfA._toStore("SigmaObs2") or \
3167 selfA._toStore("SimulationQuantiles") or \
3168 selfA._toStore("SimulatedObservationAtOptimum"):
3169 if selfA._toStore("SimulatedObservationAtCurrentState"):
3170 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3171 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3172 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3176 if selfA._toStore("APosterioriCovariance") or \
3177 selfA._toStore("SimulationQuantiles") or \
3178 selfA._toStore("JacobianMatrixAtOptimum") or \
3179 selfA._toStore("KalmanGainAtOptimum"):
3180 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3181 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3182 if selfA._toStore("APosterioriCovariance") or \
3183 selfA._toStore("SimulationQuantiles") or \
3184 selfA._toStore("KalmanGainAtOptimum"):
3185 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3186 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3187 if selfA._toStore("APosterioriCovariance") or \
3188 selfA._toStore("SimulationQuantiles"):
3192 _ee = numpy.zeros(nb)
3194 _HtEE = numpy.dot(HtM,_ee)
3195 HessienneI.append( numpy.ravel( BI * _ee.reshape((-1,1)) + HaM * (RI * _HtEE.reshape((-1,1))) ) )
3196 A = numpy.linalg.inv(numpy.array( HessienneI ))
3197 if min(A.shape) != max(A.shape):
3198 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3199 if (numpy.diag(A) < 0).any():
3200 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3201 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3203 L = numpy.linalg.cholesky( A )
3205 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3206 if selfA._toStore("APosterioriCovariance"):
3207 selfA.StoredVariables["APosterioriCovariance"].store( A )
3208 if selfA._toStore("JacobianMatrixAtOptimum"):
3209 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3210 if selfA._toStore("KalmanGainAtOptimum"):
3211 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3212 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3213 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3215 if selfA._toStore("Innovation") or \
3216 selfA._toStore("SigmaObs2") or \
3217 selfA._toStore("MahalanobisConsistency") or \
3218 selfA._toStore("OMB"):
3220 if selfA._toStore("Innovation"):
3221 selfA.StoredVariables["Innovation"].store( d )
3222 if selfA._toStore("BMA"):
3223 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3224 if selfA._toStore("OMA"):
3225 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3226 if selfA._toStore("OMB"):
3227 selfA.StoredVariables["OMB"].store( d )
3228 if selfA._toStore("SigmaObs2"):
3229 TraceR = R.trace(Y.size)
3230 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3231 if selfA._toStore("MahalanobisConsistency"):
3232 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3233 if selfA._toStore("SimulationQuantiles"):
3234 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3235 if selfA._toStore("SimulatedObservationAtBackground"):
3236 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3237 if selfA._toStore("SimulatedObservationAtOptimum"):
3238 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3242 # ==============================================================================
3243 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3252 Hm = HO["Direct"].appliedControledFormTo
3253 Mm = EM["Direct"].appliedControledFormTo
3255 if CM is not None and "Tangent" in CM and U is not None:
3256 Cm = CM["Tangent"].asMatrix(Xb)
3262 if hasattr(U,"store") and 1<=_step<len(U) :
3263 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
3264 elif hasattr(U,"store") and len(U)==1:
3265 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3267 _Un = numpy.asmatrix(numpy.ravel( U )).T
3272 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3273 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3279 # Remarque : les observations sont exploitées à partir du pas de temps
3280 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3281 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3282 # avec l'observation du pas 1.
3284 # Nombre de pas identique au nombre de pas d'observations
3285 if hasattr(Y,"stepnumber"):
3286 duration = Y.stepnumber()
3290 # Précalcul des inversions de B et R
3294 # Point de démarrage de l'optimisation
3295 Xini = selfA._parameters["InitializationPoint"]
3297 # Définition de la fonction-coût
3298 # ------------------------------
3299 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3300 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
3301 def CostFunction(x):
3302 _X = numpy.asmatrix(numpy.ravel( x )).T
3303 if selfA._parameters["StoreInternalVariables"] or \
3304 selfA._toStore("CurrentState") or \
3305 selfA._toStore("CurrentOptimum"):
3306 selfA.StoredVariables["CurrentState"].store( _X )
3307 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3308 selfA.DirectCalculation = [None,]
3309 selfA.DirectInnovation = [None,]
3312 for step in range(0,duration-1):
3313 if hasattr(Y,"store"):
3314 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
3316 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
3320 if selfA._parameters["EstimationOf"] == "State":
3321 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
3322 elif selfA._parameters["EstimationOf"] == "Parameters":
3325 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3326 __Bounds = ForceNumericBounds( selfA._parameters["Bounds"] )
3327 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(__Bounds)[:,0])),axis=1)
3328 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(__Bounds)[:,1])),axis=1)
3330 # Etape de différence aux observations
3331 if selfA._parameters["EstimationOf"] == "State":
3332 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
3333 elif selfA._parameters["EstimationOf"] == "Parameters":
3334 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
3336 # Stockage de l'état
3337 selfA.DirectCalculation.append( _Xn )
3338 selfA.DirectInnovation.append( _YmHMX )
3340 # Ajout dans la fonctionnelle d'observation
3341 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
3344 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3345 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3346 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3347 selfA.StoredVariables["CostFunctionJ" ].store( J )
3348 if selfA._toStore("IndexOfOptimum") or \
3349 selfA._toStore("CurrentOptimum") or \
3350 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3351 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3352 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3353 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3354 if selfA._toStore("IndexOfOptimum"):
3355 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3356 if selfA._toStore("CurrentOptimum"):
3357 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3358 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3359 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3360 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3361 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3362 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3363 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3366 def GradientOfCostFunction(x):
3367 _X = numpy.asmatrix(numpy.ravel( x )).T
3368 GradJb = BI * (_X - Xb)
3370 for step in range(duration-1,0,-1):
3371 # Étape de récupération du dernier stockage de l'évolution
3372 _Xn = selfA.DirectCalculation.pop()
3373 # Étape de récupération du dernier stockage de l'innovation
3374 _YmHMX = selfA.DirectInnovation.pop()
3375 # Calcul des adjoints
3376 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3377 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3378 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3379 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3380 # Calcul du gradient par état adjoint
3381 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3382 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3383 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3386 # Minimisation de la fonctionnelle
3387 # --------------------------------
3388 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3390 if selfA._parameters["Minimizer"] == "LBFGSB":
3391 if "0.19" <= scipy.version.version <= "1.1.0":
3392 import lbfgsbhlt as optimiseur
3394 import scipy.optimize as optimiseur
3395 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3396 func = CostFunction,
3398 fprime = GradientOfCostFunction,
3400 bounds = selfA._parameters["Bounds"],
3401 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3402 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3403 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3404 iprint = selfA._parameters["optiprint"],
3406 nfeval = Informations['funcalls']
3407 rc = Informations['warnflag']
3408 elif selfA._parameters["Minimizer"] == "TNC":
3409 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3410 func = CostFunction,
3412 fprime = GradientOfCostFunction,
3414 bounds = selfA._parameters["Bounds"],
3415 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3416 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3417 ftol = selfA._parameters["CostDecrementTolerance"],
3418 messages = selfA._parameters["optmessages"],
3420 elif selfA._parameters["Minimizer"] == "CG":
3421 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3424 fprime = GradientOfCostFunction,
3426 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3427 gtol = selfA._parameters["GradientNormTolerance"],
3428 disp = selfA._parameters["optdisp"],
3431 elif selfA._parameters["Minimizer"] == "NCG":
3432 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3435 fprime = GradientOfCostFunction,
3437 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3438 avextol = selfA._parameters["CostDecrementTolerance"],
3439 disp = selfA._parameters["optdisp"],
3442 elif selfA._parameters["Minimizer"] == "BFGS":
3443 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3446 fprime = GradientOfCostFunction,
3448 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3449 gtol = selfA._parameters["GradientNormTolerance"],
3450 disp = selfA._parameters["optdisp"],
3454 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3456 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3457 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3459 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3460 # ----------------------------------------------------------------
3461 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3462 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3464 # Obtention de l'analyse
3465 # ----------------------
3466 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3468 selfA.StoredVariables["Analysis"].store( Xa )
3470 # Calculs et/ou stockages supplémentaires
3471 # ---------------------------------------
3472 if selfA._toStore("BMA"):
3473 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3477 # ==============================================================================
3478 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3480 Standard Kalman Filter
3482 if selfA._parameters["EstimationOf"] == "Parameters":
3483 selfA._parameters["StoreInternalVariables"] = True
3487 Ht = HO["Tangent"].asMatrix(Xb)
3488 Ha = HO["Adjoint"].asMatrix(Xb)
3490 if selfA._parameters["EstimationOf"] == "State":
3491 Mt = EM["Tangent"].asMatrix(Xb)
3492 Ma = EM["Adjoint"].asMatrix(Xb)
3494 if CM is not None and "Tangent" in CM and U is not None:
3495 Cm = CM["Tangent"].asMatrix(Xb)
3499 # Durée d'observation et tailles
3500 if hasattr(Y,"stepnumber"):
3501 duration = Y.stepnumber()
3502 __p = numpy.cumprod(Y.shape())[-1]
3505 __p = numpy.array(Y).size
3507 # Précalcul des inversions de B et R
3508 if selfA._parameters["StoreInternalVariables"] \
3509 or selfA._toStore("CostFunctionJ") \
3510 or selfA._toStore("CostFunctionJb") \
3511 or selfA._toStore("CostFunctionJo") \
3512 or selfA._toStore("CurrentOptimum") \
3513 or selfA._toStore("APosterioriCovariance"):
3519 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3522 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3523 selfA.StoredVariables["Analysis"].store( Xb )
3524 if selfA._toStore("APosterioriCovariance"):
3525 if hasattr(B,"asfullmatrix"):
3526 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3528 selfA.StoredVariables["APosterioriCovariance"].store( B )
3529 selfA._setInternalState("seed", numpy.random.get_state())
3530 elif selfA._parameters["nextStep"]:
3531 Xn = selfA._getInternalState("Xn")
3532 Pn = selfA._getInternalState("Pn")
3534 if selfA._parameters["EstimationOf"] == "Parameters":
3536 previousJMinimum = numpy.finfo(float).max
3538 for step in range(duration-1):
3539 if hasattr(Y,"store"):
3540 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3542 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3545 if hasattr(U,"store") and len(U)>1:
3546 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3547 elif hasattr(U,"store") and len(U)==1:
3548 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3550 Un = numpy.asmatrix(numpy.ravel( U )).T
3554 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3555 Xn_predicted = Mt * Xn
3556 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3557 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3558 Xn_predicted = Xn_predicted + Cm * Un
3559 Pn_predicted = Q + Mt * Pn * Ma
3560 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3561 # --- > Par principe, M = Id, Q = 0
3565 if selfA._parameters["EstimationOf"] == "State":
3566 HX_predicted = Ht * Xn_predicted
3567 _Innovation = Ynpu - HX_predicted
3568 elif selfA._parameters["EstimationOf"] == "Parameters":
3569 HX_predicted = Ht * Xn_predicted
3570 _Innovation = Ynpu - HX_predicted
3571 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3572 _Innovation = _Innovation - Cm * Un
3574 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3575 Xn = Xn_predicted + Kn * _Innovation
3576 Pn = Pn_predicted - Kn * Ht * Pn_predicted
3579 #--------------------------
3580 selfA._setInternalState("Xn", Xn)
3581 selfA._setInternalState("Pn", Pn)
3582 #--------------------------
3584 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3585 # ---> avec analysis
3586 selfA.StoredVariables["Analysis"].store( Xa )
3587 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3588 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3589 if selfA._toStore("InnovationAtCurrentAnalysis"):
3590 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3591 # ---> avec current state
3592 if selfA._parameters["StoreInternalVariables"] \
3593 or selfA._toStore("CurrentState"):
3594 selfA.StoredVariables["CurrentState"].store( Xn )
3595 if selfA._toStore("ForecastState"):
3596 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3597 if selfA._toStore("ForecastCovariance"):
3598 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3599 if selfA._toStore("BMA"):
3600 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3601 if selfA._toStore("InnovationAtCurrentState"):
3602 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3603 if selfA._toStore("SimulatedObservationAtCurrentState") \
3604 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3605 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3607 if selfA._parameters["StoreInternalVariables"] \
3608 or selfA._toStore("CostFunctionJ") \
3609 or selfA._toStore("CostFunctionJb") \
3610 or selfA._toStore("CostFunctionJo") \
3611 or selfA._toStore("CurrentOptimum") \
3612 or selfA._toStore("APosterioriCovariance"):
3613 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3614 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3616 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3617 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3618 selfA.StoredVariables["CostFunctionJ" ].store( J )
3620 if selfA._toStore("IndexOfOptimum") \
3621 or selfA._toStore("CurrentOptimum") \
3622 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3623 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3624 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3625 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3626 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3627 if selfA._toStore("IndexOfOptimum"):
3628 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3629 if selfA._toStore("CurrentOptimum"):
3630 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3631 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3632 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3633 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3634 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3635 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3636 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3637 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3638 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3639 if selfA._toStore("APosterioriCovariance"):
3640 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3641 if selfA._parameters["EstimationOf"] == "Parameters" \
3642 and J < previousJMinimum:
3643 previousJMinimum = J
3645 if selfA._toStore("APosterioriCovariance"):
3646 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3648 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3649 # ----------------------------------------------------------------------
3650 if selfA._parameters["EstimationOf"] == "Parameters":
3651 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3652 selfA.StoredVariables["Analysis"].store( XaMin )
3653 if selfA._toStore("APosterioriCovariance"):
3654 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3655 if selfA._toStore("BMA"):
3656 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3660 # ==============================================================================
3661 def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3663 Constrained Unscented Kalman Filter
3665 if selfA._parameters["EstimationOf"] == "Parameters":
3666 selfA._parameters["StoreInternalVariables"] = True
3667 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
3670 Alpha = selfA._parameters["Alpha"]
3671 Beta = selfA._parameters["Beta"]
3672 if selfA._parameters["Kappa"] == 0:
3673 if selfA._parameters["EstimationOf"] == "State":
3675 elif selfA._parameters["EstimationOf"] == "Parameters":
3678 Kappa = selfA._parameters["Kappa"]
3679 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3680 Gamma = math.sqrt( L + Lambda )
3684 for i in range(2*L):
3685 Ww.append( 1. / (2.*(L + Lambda)) )
3687 Wm = numpy.array( Ww )
3688 Wm[0] = Lambda / (L + Lambda)
3689 Wc = numpy.array( Ww )
3690 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3693 Hm = HO["Direct"].appliedControledFormTo
3695 if selfA._parameters["EstimationOf"] == "State":
3696 Mm = EM["Direct"].appliedControledFormTo
3698 if CM is not None and "Tangent" in CM and U is not None:
3699 Cm = CM["Tangent"].asMatrix(Xb)
3703 # Durée d'observation et tailles
3704 if hasattr(Y,"stepnumber"):
3705 duration = Y.stepnumber()
3706 __p = numpy.cumprod(Y.shape())[-1]
3709 __p = numpy.array(Y).size
3711 # Précalcul des inversions de B et R
3712 if selfA._parameters["StoreInternalVariables"] \
3713 or selfA._toStore("CostFunctionJ") \
3714 or selfA._toStore("CostFunctionJb") \
3715 or selfA._toStore("CostFunctionJo") \
3716 or selfA._toStore("CurrentOptimum") \
3717 or selfA._toStore("APosterioriCovariance"):
3723 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3725 if hasattr(B,"asfullmatrix"):
3726 Pn = B.asfullmatrix(__n)
3729 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3730 selfA.StoredVariables["Analysis"].store( Xb )
3731 if selfA._toStore("APosterioriCovariance"):
3732 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3733 elif selfA._parameters["nextStep"]:
3734 Xn = selfA._getInternalState("Xn")
3735 Pn = selfA._getInternalState("Pn")
3737 if selfA._parameters["EstimationOf"] == "Parameters":
3739 previousJMinimum = numpy.finfo(float).max
3741 for step in range(duration-1):
3742 if hasattr(Y,"store"):
3743 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3745 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3748 if hasattr(U,"store") and len(U)>1:
3749 Un = numpy.ravel( U[step] ).reshape((-1,1))
3750 elif hasattr(U,"store") and len(U)==1:
3751 Un = numpy.ravel( U[0] ).reshape((-1,1))
3753 Un = numpy.ravel( U ).reshape((-1,1))
3757 Pndemi = numpy.linalg.cholesky(Pn)
3758 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
3759 nbSpts = 2*Xn.size+1
3761 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3762 for point in range(nbSpts):
3763 Xnp[:,point] = numpy.max(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3764 Xnp[:,point] = numpy.min(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3767 for point in range(nbSpts):
3768 if selfA._parameters["EstimationOf"] == "State":
3769 XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
3770 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3771 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
3772 XEtnnpi = XEtnnpi + Cm * Un
3773 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3774 XEtnnpi = numpy.max(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3775 XEtnnpi = numpy.min(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3776 elif selfA._parameters["EstimationOf"] == "Parameters":
3777 # --- > Par principe, M = Id, Q = 0
3778 XEtnnpi = Xnp[:,point]
3779 XEtnnp.append( XEtnnpi )
3780 XEtnnp = numpy.hstack( XEtnnp )
3782 Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
3784 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3785 Xncm = numpy.max(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3786 Xncm = numpy.min(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3788 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
3789 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
3790 for point in range(nbSpts):
3791 Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
3793 if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
3794 Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm)
3796 Pnmdemi = numpy.linalg.cholesky(Pnm)
3798 Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
3800 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3801 for point in range(nbSpts):
3802 Xnnp[:,point] = numpy.max(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3803 Xnnp[:,point] = numpy.min(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3806 for point in range(nbSpts):
3807 if selfA._parameters["EstimationOf"] == "State":
3808 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
3809 elif selfA._parameters["EstimationOf"] == "Parameters":
3810 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
3811 Ynnp.append( Ynnpi )
3812 Ynnp = numpy.hstack( Ynnp )
3814 Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
3818 for point in range(nbSpts):
3819 Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
3820 Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
3822 _Innovation = Ynpu - Yncm
3823 if selfA._parameters["EstimationOf"] == "Parameters":
3824 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3825 _Innovation = _Innovation - Cm * Un
3828 Xn = Xncm + Kn * _Innovation
3829 Pn = Pnm - Kn * Pyyn * Kn.T
3831 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3832 Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3833 Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3836 #--------------------------
3837 selfA._setInternalState("Xn", Xn)
3838 selfA._setInternalState("Pn", Pn)
3839 #--------------------------
3841 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3842 # ---> avec analysis
3843 selfA.StoredVariables["Analysis"].store( Xa )
3844 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3845 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
3846 if selfA._toStore("InnovationAtCurrentAnalysis"):
3847 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3848 # ---> avec current state
3849 if selfA._parameters["StoreInternalVariables"] \
3850 or selfA._toStore("CurrentState"):
3851 selfA.StoredVariables["CurrentState"].store( Xn )
3852 if selfA._toStore("ForecastState"):
3853 selfA.StoredVariables["ForecastState"].store( Xncm )
3854 if selfA._toStore("ForecastCovariance"):
3855 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
3856 if selfA._toStore("BMA"):
3857 selfA.StoredVariables["BMA"].store( Xncm - Xa )
3858 if selfA._toStore("InnovationAtCurrentState"):
3859 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3860 if selfA._toStore("SimulatedObservationAtCurrentState") \
3861 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3862 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
3864 if selfA._parameters["StoreInternalVariables"] \
3865 or selfA._toStore("CostFunctionJ") \
3866 or selfA._toStore("CostFunctionJb") \
3867 or selfA._toStore("CostFunctionJo") \
3868 or selfA._toStore("CurrentOptimum") \
3869 or selfA._toStore("APosterioriCovariance"):
3870 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3871 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3873 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3874 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3875 selfA.StoredVariables["CostFunctionJ" ].store( J )
3877 if selfA._toStore("IndexOfOptimum") \
3878 or selfA._toStore("CurrentOptimum") \
3879 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3880 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3881 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3882 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3883 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3884 if selfA._toStore("IndexOfOptimum"):
3885 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3886 if selfA._toStore("CurrentOptimum"):
3887 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3888 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3889 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3890 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3891 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3892 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3893 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3894 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3895 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3896 if selfA._toStore("APosterioriCovariance"):
3897 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3898 if selfA._parameters["EstimationOf"] == "Parameters" \
3899 and J < previousJMinimum:
3900 previousJMinimum = J
3902 if selfA._toStore("APosterioriCovariance"):
3903 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3905 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3906 # ----------------------------------------------------------------------
3907 if selfA._parameters["EstimationOf"] == "Parameters":
3908 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3909 selfA.StoredVariables["Analysis"].store( XaMin )
3910 if selfA._toStore("APosterioriCovariance"):
3911 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3912 if selfA._toStore("BMA"):
3913 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3917 # ==============================================================================
3918 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3920 3DVAR variational analysis with no inversion of B
3927 Hm = HO["Direct"].appliedTo
3928 Ha = HO["Adjoint"].appliedInXTo
3930 # Précalcul des inversions de B et R
3934 # Point de démarrage de l'optimisation
3935 Xini = numpy.zeros(Xb.shape)
3937 # Définition de la fonction-coût
3938 # ------------------------------
3939 def CostFunction(v):
3940 _V = numpy.asmatrix(numpy.ravel( v )).T
3942 if selfA._parameters["StoreInternalVariables"] or \
3943 selfA._toStore("CurrentState") or \
3944 selfA._toStore("CurrentOptimum"):
3945 selfA.StoredVariables["CurrentState"].store( _X )
3947 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3948 _Innovation = Y - _HX
3949 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3950 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3951 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3952 if selfA._toStore("InnovationAtCurrentState"):
3953 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3955 Jb = float( 0.5 * _V.T * BT * _V )
3956 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3959 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3960 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3961 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3962 selfA.StoredVariables["CostFunctionJ" ].store( J )
3963 if selfA._toStore("IndexOfOptimum") or \
3964 selfA._toStore("CurrentOptimum") or \
3965 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3966 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3967 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3968 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3969 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3970 if selfA._toStore("IndexOfOptimum"):
3971 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3972 if selfA._toStore("CurrentOptimum"):
3973 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3974 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3975 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3976 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3977 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3978 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3979 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3980 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3981 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3984 def GradientOfCostFunction(v):
3985 _V = numpy.asmatrix(numpy.ravel( v )).T
3988 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3990 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3991 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3994 # Minimisation de la fonctionnelle
3995 # --------------------------------
3996 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3998 if selfA._parameters["Minimizer"] == "LBFGSB":
3999 if "0.19" <= scipy.version.version <= "1.1.0":
4000 import lbfgsbhlt as optimiseur
4002 import scipy.optimize as optimiseur
4003 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4004 func = CostFunction,
4006 fprime = GradientOfCostFunction,
4008 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4009 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
4010 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
4011 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4012 iprint = selfA._parameters["optiprint"],
4014 nfeval = Informations['funcalls']
4015 rc = Informations['warnflag']
4016 elif selfA._parameters["Minimizer"] == "TNC":
4017 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4018 func = CostFunction,
4020 fprime = GradientOfCostFunction,
4022 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4023 maxfun = selfA._parameters["MaximumNumberOfSteps"],
4024 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4025 ftol = selfA._parameters["CostDecrementTolerance"],
4026 messages = selfA._parameters["optmessages"],
4028 elif selfA._parameters["Minimizer"] == "CG":
4029 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4032 fprime = GradientOfCostFunction,
4034 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4035 gtol = selfA._parameters["GradientNormTolerance"],
4036 disp = selfA._parameters["optdisp"],
4039 elif selfA._parameters["Minimizer"] == "NCG":
4040 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4043 fprime = GradientOfCostFunction,
4045 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4046 avextol = selfA._parameters["CostDecrementTolerance"],
4047 disp = selfA._parameters["optdisp"],
4050 elif selfA._parameters["Minimizer"] == "BFGS":
4051 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4054 fprime = GradientOfCostFunction,
4056 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4057 gtol = selfA._parameters["GradientNormTolerance"],
4058 disp = selfA._parameters["optdisp"],
4062 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4064 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4065 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4067 # Correction pour pallier a un bug de TNC sur le retour du Minimum
4068 # ----------------------------------------------------------------
4069 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4070 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4071 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
4073 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
4075 # Obtention de l'analyse
4076 # ----------------------
4079 selfA.StoredVariables["Analysis"].store( Xa )
4081 if selfA._toStore("OMA") or \
4082 selfA._toStore("SigmaObs2") or \
4083 selfA._toStore("SimulationQuantiles") or \
4084 selfA._toStore("SimulatedObservationAtOptimum"):
4085 if selfA._toStore("SimulatedObservationAtCurrentState"):
4086 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4087 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4088 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4092 # Calcul de la covariance d'analyse
4093 # ---------------------------------
4094 if selfA._toStore("APosterioriCovariance") or \
4095 selfA._toStore("SimulationQuantiles") or \
4096 selfA._toStore("JacobianMatrixAtOptimum") or \
4097 selfA._toStore("KalmanGainAtOptimum"):
4098 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4099 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4100 if selfA._toStore("APosterioriCovariance") or \
4101 selfA._toStore("SimulationQuantiles") or \
4102 selfA._toStore("KalmanGainAtOptimum"):
4103 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4104 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4105 if selfA._toStore("APosterioriCovariance") or \
4106 selfA._toStore("SimulationQuantiles"):
4111 _ee = numpy.matrix(numpy.zeros(nb)).T
4113 _HtEE = numpy.dot(HtM,_ee)
4114 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
4115 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
4116 HessienneI = numpy.matrix( HessienneI )
4118 if min(A.shape) != max(A.shape):
4119 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
4120 if (numpy.diag(A) < 0).any():
4121 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
4122 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
4124 L = numpy.linalg.cholesky( A )
4126 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
4127 if selfA._toStore("APosterioriCovariance"):
4128 selfA.StoredVariables["APosterioriCovariance"].store( A )
4129 if selfA._toStore("JacobianMatrixAtOptimum"):
4130 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4131 if selfA._toStore("KalmanGainAtOptimum"):
4132 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4133 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4134 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4136 # Calculs et/ou stockages supplémentaires
4137 # ---------------------------------------
4138 if selfA._toStore("Innovation") or \
4139 selfA._toStore("SigmaObs2") or \
4140 selfA._toStore("MahalanobisConsistency") or \
4141 selfA._toStore("OMB"):
4143 if selfA._toStore("Innovation"):
4144 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
4145 if selfA._toStore("BMA"):
4146 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4147 if selfA._toStore("OMA"):
4148 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4149 if selfA._toStore("OMB"):
4150 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
4151 if selfA._toStore("SigmaObs2"):
4152 TraceR = R.trace(Y.size)
4153 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
4154 if selfA._toStore("MahalanobisConsistency"):
4155 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4156 if selfA._toStore("SimulationQuantiles"):
4157 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4158 if selfA._toStore("SimulatedObservationAtBackground"):
4159 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
4160 if selfA._toStore("SimulatedObservationAtOptimum"):
4161 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
4165 # ==============================================================================
4166 if __name__ == "__main__":
4167 print('\n AUTODIAGNOSTIC\n')