1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450 _bgcenter = numpy.ravel(_bgcenter)[:,None]
452 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454 if _bgcovariance is None:
455 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460 return BackgroundEnsemble
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465 def __CenteredRandomAnomalies(Zr, N):
467 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468 notes manuscrites de MB et conforme au code de PS avec eps = -1
471 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
478 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481 if _bgcovariance is None:
482 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
485 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486 _nbctl = _bgcenter.size
487 if _nbmembers > _nbctl:
488 _Z = numpy.concatenate((numpy.dot(
489 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494 BackgroundEnsemble = _bgcenter + _Zca
496 if max(abs(_bgcovariance.flatten())) > 0:
497 _nbctl = _bgcenter.size
498 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500 BackgroundEnsemble = _bgcenter + _Zca
502 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504 return BackgroundEnsemble
506 # ==============================================================================
507 def EnsembleMean( __Ensemble ):
508 "Renvoie la moyenne empirique d'un ensemble"
509 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511 # ==============================================================================
512 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
513 "Renvoie les anomalies centrées à partir d'un ensemble"
514 if __OptMean is None:
515 __Em = EnsembleMean( __Ensemble )
517 __Em = numpy.ravel( __OptMean ).reshape((-1,1))
519 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
521 # ==============================================================================
522 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
523 "Renvoie l'estimation empirique de la covariance d'ensemble"
525 # Covariance rapide mais rarement définie positive
526 __Covariance = numpy.cov( __Ensemble )
528 # Résultat souvent identique à numpy.cov, mais plus robuste
529 __n, __m = numpy.asarray( __Ensemble ).shape
530 __Anomalies = EnsembleOfAnomalies( __Ensemble )
531 # Estimation empirique
532 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
534 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
535 # Assure la positivité
536 __epsilon = mpr*numpy.trace( __Covariance )
537 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
541 # ==============================================================================
542 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
543 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
544 if hasattr(__Covariance,"assparsematrix"):
545 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
546 # Traitement d'une covariance nulle ou presque
548 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
549 # Traitement d'une covariance nulle ou presque
552 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
553 # Traitement d'une covariance nulle ou presque
555 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
556 # Traitement d'une covariance nulle ou presque
559 __n, __m = __Ensemble.shape
560 if __Seed is not None: numpy.random.seed(__Seed)
562 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
563 # Traitement d'une covariance multiple de l'identité
565 __std = numpy.sqrt(__Covariance.assparsematrix())
566 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
568 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
569 # Traitement d'une covariance diagonale avec variances non identiques
570 __zero = numpy.zeros(__n)
571 __std = numpy.sqrt(__Covariance.assparsematrix())
572 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
574 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
575 # Traitement d'une covariance pleine
576 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
578 elif isinstance(__Covariance, numpy.ndarray):
579 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
580 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
583 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
587 # ==============================================================================
588 def CovarianceInflation(
590 InflationType = None,
591 InflationFactor = None,
592 BackgroundCov = None,
595 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
597 Synthèse : Hunt 2007, section 2.3.5
599 if InflationFactor is None:
602 InflationFactor = float(InflationFactor)
604 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
605 if InflationFactor < 1.:
606 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607 if InflationFactor < 1.+mpr:
609 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
611 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
612 if InflationFactor < 1.:
613 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
614 if InflationFactor < 1.+mpr:
616 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
617 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
618 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
620 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
621 if InflationFactor < 0.:
622 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
623 if InflationFactor < mpr:
625 __n, __m = numpy.asarray(InputCovOrEns).shape
627 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
628 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
630 elif InflationType == "HybridOnBackgroundCovariance":
631 if InflationFactor < 0.:
632 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
633 if InflationFactor < mpr:
635 __n, __m = numpy.asarray(InputCovOrEns).shape
637 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
638 if BackgroundCov is None:
639 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
640 if InputCovOrEns.shape != BackgroundCov.shape:
641 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
642 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
644 elif InflationType == "Relaxation":
645 raise NotImplementedError("InflationType Relaxation")
648 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
650 return OutputCovOrEns
652 # ==============================================================================
653 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
654 "Estimation des quantiles a posteriori (selfA est modifié)"
655 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
657 # Traitement des bornes
658 if "StateBoundsForQuantiles" in selfA._parameters:
659 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
660 elif "Bounds" in selfA._parameters:
661 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
664 if LBounds is not None:
665 LBounds = ForceNumericBounds( LBounds )
666 _Xa = numpy.ravel(Xa)
668 # Échantillonnage des états
671 for i in range(nbsamples):
672 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
673 dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
674 if LBounds is not None: # "EstimateProjection" par défaut
675 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
676 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
678 Yr = HXa.reshape((-1,1)) + dYr
679 if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
680 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
681 Xr = numpy.random.multivariate_normal(_Xa,A)
682 if LBounds is not None: # "EstimateProjection" par défaut
683 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
684 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
687 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
690 YfQ = Yr.reshape((-1,1))
691 if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
693 YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
694 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
696 # Extraction des quantiles
699 for quantile in selfA._parameters["Quantiles"]:
700 if not (0. <= float(quantile) <= 1.): continue
701 indice = int(nbsamples * float(quantile) - 1./nbsamples)
702 if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
703 else: YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
704 if YQ is not None: # Liste non vide de quantiles
705 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
706 if selfA._toStore("SampledStateForQuantiles"):
707 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
711 # ==============================================================================
712 def ForceNumericBounds( __Bounds ):
713 "Force les bornes à être des valeurs numériques, sauf si globalement None"
714 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
715 if __Bounds is None: return None
716 # Converti toutes les bornes individuelles None à +/- l'infini
717 __Bounds = numpy.asarray( __Bounds, dtype=float )
718 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
719 raise ValueError("Incorrectly defined bounds data")
720 __Bounds[numpy.isnan(__Bounds)[:,0],0] = -sys.float_info.max
721 __Bounds[numpy.isnan(__Bounds)[:,1],1] = sys.float_info.max
724 # ==============================================================================
725 def RecentredBounds( __Bounds, __Center):
726 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
727 if __Bounds is None: return None
728 # Recentre les valeurs numériques de bornes
729 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
731 # ==============================================================================
732 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
734 Contrained Extended Kalman Filter
736 if selfA._parameters["EstimationOf"] == "Parameters":
737 selfA._parameters["StoreInternalVariables"] = True
738 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
741 H = HO["Direct"].appliedControledFormTo
743 if selfA._parameters["EstimationOf"] == "State":
744 M = EM["Direct"].appliedControledFormTo
746 if CM is not None and "Tangent" in CM and U is not None:
747 Cm = CM["Tangent"].asMatrix(Xb)
751 # Durée d'observation et tailles
752 if hasattr(Y,"stepnumber"):
753 duration = Y.stepnumber()
754 __p = numpy.cumprod(Y.shape())[-1]
757 __p = numpy.array(Y).size
759 # Précalcul des inversions de B et R
760 if selfA._parameters["StoreInternalVariables"] \
761 or selfA._toStore("CostFunctionJ") \
762 or selfA._toStore("CostFunctionJb") \
763 or selfA._toStore("CostFunctionJo") \
764 or selfA._toStore("CurrentOptimum") \
765 or selfA._toStore("APosterioriCovariance"):
771 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
774 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
775 selfA.StoredVariables["Analysis"].store( Xb )
776 if selfA._toStore("APosterioriCovariance"):
777 if hasattr(B,"asfullmatrix"):
778 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
780 selfA.StoredVariables["APosterioriCovariance"].store( B )
781 selfA._setInternalState("seed", numpy.random.get_state())
782 elif selfA._parameters["nextStep"]:
783 Xn = selfA._getInternalState("Xn")
784 Pn = selfA._getInternalState("Pn")
786 if selfA._parameters["EstimationOf"] == "Parameters":
788 previousJMinimum = numpy.finfo(float).max
790 for step in range(duration-1):
791 if hasattr(Y,"store"):
792 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
794 Ynpu = numpy.ravel( Y ).reshape((__p,1))
796 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
797 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
798 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
799 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
801 if selfA._parameters["EstimationOf"] == "State":
802 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
803 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
804 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
805 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
808 if hasattr(U,"store") and len(U)>1:
809 Un = numpy.ravel( U[step] ).reshape((-1,1))
810 elif hasattr(U,"store") and len(U)==1:
811 Un = numpy.ravel( U[0] ).reshape((-1,1))
813 Un = numpy.ravel( U ).reshape((-1,1))
817 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
818 Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
819 Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
821 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
822 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
823 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
824 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
825 Xn_predicted = Xn_predicted + Cm * Un
826 Pn_predicted = Q + Mt * Pn * Ma
827 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
828 # --- > Par principe, M = Id, Q = 0
832 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
833 Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
834 Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
836 if selfA._parameters["EstimationOf"] == "State":
837 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
838 _Innovation = Ynpu - HX_predicted
839 elif selfA._parameters["EstimationOf"] == "Parameters":
840 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
841 _Innovation = Ynpu - HX_predicted
842 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
843 _Innovation = _Innovation - Cm * Un
845 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
846 Xn = Xn_predicted + Kn * _Innovation
847 Pn = Pn_predicted - Kn * Ht * Pn_predicted
849 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
850 Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
851 Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
854 #--------------------------
855 selfA._setInternalState("Xn", Xn)
856 selfA._setInternalState("Pn", Pn)
857 #--------------------------
859 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
861 selfA.StoredVariables["Analysis"].store( Xa )
862 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
863 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
864 if selfA._toStore("InnovationAtCurrentAnalysis"):
865 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
866 # ---> avec current state
867 if selfA._parameters["StoreInternalVariables"] \
868 or selfA._toStore("CurrentState"):
869 selfA.StoredVariables["CurrentState"].store( Xn )
870 if selfA._toStore("ForecastState"):
871 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
872 if selfA._toStore("ForecastCovariance"):
873 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
874 if selfA._toStore("BMA"):
875 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
876 if selfA._toStore("InnovationAtCurrentState"):
877 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
878 if selfA._toStore("SimulatedObservationAtCurrentState") \
879 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
880 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
882 if selfA._parameters["StoreInternalVariables"] \
883 or selfA._toStore("CostFunctionJ") \
884 or selfA._toStore("CostFunctionJb") \
885 or selfA._toStore("CostFunctionJo") \
886 or selfA._toStore("CurrentOptimum") \
887 or selfA._toStore("APosterioriCovariance"):
888 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
889 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
891 selfA.StoredVariables["CostFunctionJb"].store( Jb )
892 selfA.StoredVariables["CostFunctionJo"].store( Jo )
893 selfA.StoredVariables["CostFunctionJ" ].store( J )
895 if selfA._toStore("IndexOfOptimum") \
896 or selfA._toStore("CurrentOptimum") \
897 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
898 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
899 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
900 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
901 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
902 if selfA._toStore("IndexOfOptimum"):
903 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
904 if selfA._toStore("CurrentOptimum"):
905 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
906 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
907 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
908 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
909 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
910 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
911 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
912 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
913 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
914 if selfA._toStore("APosterioriCovariance"):
915 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
916 if selfA._parameters["EstimationOf"] == "Parameters" \
917 and J < previousJMinimum:
920 if selfA._toStore("APosterioriCovariance"):
921 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
923 # Stockage final supplémentaire de l'optimum en estimation de paramètres
924 # ----------------------------------------------------------------------
925 if selfA._parameters["EstimationOf"] == "Parameters":
926 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
927 selfA.StoredVariables["Analysis"].store( XaMin )
928 if selfA._toStore("APosterioriCovariance"):
929 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
930 if selfA._toStore("BMA"):
931 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
935 # ==============================================================================
936 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
942 H = HO["Direct"].appliedControledFormTo
944 if selfA._parameters["EstimationOf"] == "State":
945 M = EM["Direct"].appliedControledFormTo
947 if CM is not None and "Tangent" in CM and U is not None:
948 Cm = CM["Tangent"].asMatrix(Xb)
952 # Précalcul des inversions de B et R
955 # Durée d'observation et tailles
956 LagL = selfA._parameters["SmootherLagL"]
957 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
958 raise ValueError("Fixed-lag smoother requires a series of observation")
959 if Y.stepnumber() < LagL:
960 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
961 duration = Y.stepnumber()
962 __p = numpy.cumprod(Y.shape())[-1]
964 __m = selfA._parameters["NumberOfMembers"]
966 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
967 selfA.StoredVariables["Analysis"].store( Xb )
968 if selfA._toStore("APosterioriCovariance"):
969 if hasattr(B,"asfullmatrix"):
970 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
972 selfA.StoredVariables["APosterioriCovariance"].store( B )
974 # Calcul direct initial (on privilégie la mémorisation au recalcul)
975 __seed = numpy.random.get_state()
976 selfB = copy.deepcopy(selfA)
977 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
978 if VariantM == "EnKS16-KalmanFilterFormula":
979 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
981 raise ValueError("VariantM has to be chosen in the authorized methods list.")
983 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
985 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
986 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
988 for step in range(LagL,duration-1):
990 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
993 if hasattr(Y,"store"):
994 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
996 Ynpu = numpy.ravel( Y ).reshape((__p,1))
999 if hasattr(U,"store") and len(U)>1:
1000 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1001 elif hasattr(U,"store") and len(U)==1:
1002 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1004 Un = numpy.asmatrix(numpy.ravel( U )).T
1008 #--------------------------
1009 if VariantM == "EnKS16-KalmanFilterFormula":
1010 if selfA._parameters["EstimationOf"] == "State": # Forecast
1011 EL = M( [(EL[:,i], Un) for i in range(__m)],
1013 returnSerieAsArrayMatrix = True )
1014 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1015 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1017 returnSerieAsArrayMatrix = True )
1018 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1019 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1021 elif selfA._parameters["EstimationOf"] == "Parameters":
1022 # --- > Par principe, M = Id, Q = 0
1023 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1025 returnSerieAsArrayMatrix = True )
1027 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1028 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1030 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1031 mS = mS.reshape((-1,__m)) # Pour dimension 1
1032 delta = RIdemi @ ( Ynpu - vZm )
1033 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1034 vw = mT @ mS.T @ delta
1036 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1037 mU = numpy.identity(__m)
1038 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1040 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1044 for irl in range(LagL): # Lissage des L précédentes analysis
1045 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1046 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1047 sEL[irl] = vEm + EX @ wTU
1049 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1050 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1051 if selfA._toStore("APosterioriCovariance"):
1054 for irl in range(LagL):
1055 sEL[irl] = sEL[irl+1]
1057 #--------------------------
1059 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1061 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1062 # ---> avec analysis
1063 selfA.StoredVariables["Analysis"].store( Xa )
1064 if selfA._toStore("APosterioriCovariance"):
1065 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1067 # Stockage des dernières analyses incomplètement remises à jour
1068 for irl in range(LagL):
1069 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1070 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1071 selfA.StoredVariables["Analysis"].store( Xa )
1075 # ==============================================================================
1076 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1078 Ensemble-Transform EnKF
1080 if selfA._parameters["EstimationOf"] == "Parameters":
1081 selfA._parameters["StoreInternalVariables"] = True
1084 H = HO["Direct"].appliedControledFormTo
1086 if selfA._parameters["EstimationOf"] == "State":
1087 M = EM["Direct"].appliedControledFormTo
1089 if CM is not None and "Tangent" in CM and U is not None:
1090 Cm = CM["Tangent"].asMatrix(Xb)
1094 # Durée d'observation et tailles
1095 if hasattr(Y,"stepnumber"):
1096 duration = Y.stepnumber()
1097 __p = numpy.cumprod(Y.shape())[-1]
1100 __p = numpy.array(Y).size
1102 # Précalcul des inversions de B et R
1103 if selfA._parameters["StoreInternalVariables"] \
1104 or selfA._toStore("CostFunctionJ") \
1105 or selfA._toStore("CostFunctionJb") \
1106 or selfA._toStore("CostFunctionJo") \
1107 or selfA._toStore("CurrentOptimum") \
1108 or selfA._toStore("APosterioriCovariance"):
1111 elif VariantM != "KalmanFilterFormula":
1113 if VariantM == "KalmanFilterFormula":
1117 __m = selfA._parameters["NumberOfMembers"]
1119 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1120 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1121 selfA.StoredVariables["Analysis"].store( Xb )
1122 if selfA._toStore("APosterioriCovariance"):
1123 if hasattr(B,"asfullmatrix"):
1124 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1126 selfA.StoredVariables["APosterioriCovariance"].store( B )
1127 selfA._setInternalState("seed", numpy.random.get_state())
1128 elif selfA._parameters["nextStep"]:
1129 Xn = selfA._getInternalState("Xn")
1131 previousJMinimum = numpy.finfo(float).max
1133 for step in range(duration-1):
1134 numpy.random.set_state(selfA._getInternalState("seed"))
1135 if hasattr(Y,"store"):
1136 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1138 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1141 if hasattr(U,"store") and len(U)>1:
1142 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1143 elif hasattr(U,"store") and len(U)==1:
1144 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1146 Un = numpy.asmatrix(numpy.ravel( U )).T
1150 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1151 Xn = CovarianceInflation( Xn,
1152 selfA._parameters["InflationType"],
1153 selfA._parameters["InflationFactor"],
1156 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1157 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1159 returnSerieAsArrayMatrix = True )
1160 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1161 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1163 returnSerieAsArrayMatrix = True )
1164 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1165 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1166 Xn_predicted = Xn_predicted + Cm * Un
1167 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1168 # --- > Par principe, M = Id, Q = 0
1169 Xn_predicted = EMX = Xn
1170 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1172 returnSerieAsArrayMatrix = True )
1174 # Mean of forecast and observation of forecast
1175 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1176 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1179 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
1180 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
1182 #--------------------------
1183 if VariantM == "KalmanFilterFormula":
1184 mS = RIdemi * EaHX / math.sqrt(__m-1)
1185 mS = mS.reshape((-1,__m)) # Pour dimension 1
1186 delta = RIdemi * ( Ynpu - Hfm )
1187 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1188 vw = mT @ mS.T @ delta
1190 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1191 mU = numpy.identity(__m)
1193 EaX = EaX / math.sqrt(__m-1)
1194 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1195 #--------------------------
1196 elif VariantM == "Variational":
1197 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1198 def CostFunction(w):
1199 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1200 _Jo = 0.5 * _A.T @ (RI * _A)
1201 _Jb = 0.5 * (__m-1) * w.T @ w
1204 def GradientOfCostFunction(w):
1205 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1206 _GardJo = - EaHX.T @ (RI * _A)
1207 _GradJb = (__m-1) * w.reshape((__m,1))
1208 _GradJ = _GardJo + _GradJb
1209 return numpy.ravel(_GradJ)
1210 vw = scipy.optimize.fmin_cg(
1212 x0 = numpy.zeros(__m),
1213 fprime = GradientOfCostFunction,
1218 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1219 Htb = (__m-1) * numpy.identity(__m)
1222 Pta = numpy.linalg.inv( Hta )
1223 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1225 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1226 #--------------------------
1227 elif VariantM == "FiniteSize11": # Jauge Boc2011
1228 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1229 def CostFunction(w):
1230 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1231 _Jo = 0.5 * _A.T @ (RI * _A)
1232 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1235 def GradientOfCostFunction(w):
1236 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1237 _GardJo = - EaHX.T @ (RI * _A)
1238 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1239 _GradJ = _GardJo + _GradJb
1240 return numpy.ravel(_GradJ)
1241 vw = scipy.optimize.fmin_cg(
1243 x0 = numpy.zeros(__m),
1244 fprime = GradientOfCostFunction,
1249 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1251 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1252 / (1 + 1/__m + vw.T @ vw)**2
1255 Pta = numpy.linalg.inv( Hta )
1256 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1258 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1259 #--------------------------
1260 elif VariantM == "FiniteSize15": # Jauge Boc2015
1261 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1262 def CostFunction(w):
1263 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1264 _Jo = 0.5 * _A.T * RI * _A
1265 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1268 def GradientOfCostFunction(w):
1269 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1270 _GardJo = - EaHX.T @ (RI * _A)
1271 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1272 _GradJ = _GardJo + _GradJb
1273 return numpy.ravel(_GradJ)
1274 vw = scipy.optimize.fmin_cg(
1276 x0 = numpy.zeros(__m),
1277 fprime = GradientOfCostFunction,
1282 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1284 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1285 / (1 + 1/__m + vw.T @ vw)**2
1288 Pta = numpy.linalg.inv( Hta )
1289 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1291 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1292 #--------------------------
1293 elif VariantM == "FiniteSize16": # Jauge Boc2016
1294 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1295 def CostFunction(w):
1296 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1297 _Jo = 0.5 * _A.T @ (RI * _A)
1298 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1301 def GradientOfCostFunction(w):
1302 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1303 _GardJo = - EaHX.T @ (RI * _A)
1304 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1305 _GradJ = _GardJo + _GradJb
1306 return numpy.ravel(_GradJ)
1307 vw = scipy.optimize.fmin_cg(
1309 x0 = numpy.zeros(__m),
1310 fprime = GradientOfCostFunction,
1315 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1316 Htb = ((__m+1) / (__m-1)) * \
1317 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1318 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1321 Pta = numpy.linalg.inv( Hta )
1322 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1324 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1325 #--------------------------
1327 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1329 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1330 Xn = CovarianceInflation( Xn,
1331 selfA._parameters["InflationType"],
1332 selfA._parameters["InflationFactor"],
1335 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1336 #--------------------------
1337 selfA._setInternalState("Xn", Xn)
1338 selfA._setInternalState("seed", numpy.random.get_state())
1339 #--------------------------
1341 if selfA._parameters["StoreInternalVariables"] \
1342 or selfA._toStore("CostFunctionJ") \
1343 or selfA._toStore("CostFunctionJb") \
1344 or selfA._toStore("CostFunctionJo") \
1345 or selfA._toStore("APosterioriCovariance") \
1346 or selfA._toStore("InnovationAtCurrentAnalysis") \
1347 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1348 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1349 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1350 _Innovation = Ynpu - _HXa
1352 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1353 # ---> avec analysis
1354 selfA.StoredVariables["Analysis"].store( Xa )
1355 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1356 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1357 if selfA._toStore("InnovationAtCurrentAnalysis"):
1358 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1359 # ---> avec current state
1360 if selfA._parameters["StoreInternalVariables"] \
1361 or selfA._toStore("CurrentState"):
1362 selfA.StoredVariables["CurrentState"].store( Xn )
1363 if selfA._toStore("ForecastState"):
1364 selfA.StoredVariables["ForecastState"].store( EMX )
1365 if selfA._toStore("ForecastCovariance"):
1366 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1367 if selfA._toStore("BMA"):
1368 selfA.StoredVariables["BMA"].store( EMX - Xa )
1369 if selfA._toStore("InnovationAtCurrentState"):
1370 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1371 if selfA._toStore("SimulatedObservationAtCurrentState") \
1372 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1373 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1375 if selfA._parameters["StoreInternalVariables"] \
1376 or selfA._toStore("CostFunctionJ") \
1377 or selfA._toStore("CostFunctionJb") \
1378 or selfA._toStore("CostFunctionJo") \
1379 or selfA._toStore("CurrentOptimum") \
1380 or selfA._toStore("APosterioriCovariance"):
1381 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1382 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1384 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1385 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1386 selfA.StoredVariables["CostFunctionJ" ].store( J )
1388 if selfA._toStore("IndexOfOptimum") \
1389 or selfA._toStore("CurrentOptimum") \
1390 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1391 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1392 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1393 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1394 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1395 if selfA._toStore("IndexOfOptimum"):
1396 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1397 if selfA._toStore("CurrentOptimum"):
1398 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1399 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1400 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1401 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1402 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1403 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1404 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1405 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1406 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1407 if selfA._toStore("APosterioriCovariance"):
1408 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1409 if selfA._parameters["EstimationOf"] == "Parameters" \
1410 and J < previousJMinimum:
1411 previousJMinimum = J
1413 if selfA._toStore("APosterioriCovariance"):
1414 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1415 # ---> Pour les smoothers
1416 if selfA._toStore("CurrentEnsembleState"):
1417 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1419 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1420 # ----------------------------------------------------------------------
1421 if selfA._parameters["EstimationOf"] == "Parameters":
1422 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1423 selfA.StoredVariables["Analysis"].store( XaMin )
1424 if selfA._toStore("APosterioriCovariance"):
1425 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1426 if selfA._toStore("BMA"):
1427 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1431 # ==============================================================================
1432 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1434 Extended Kalman Filter
1436 if selfA._parameters["EstimationOf"] == "Parameters":
1437 selfA._parameters["StoreInternalVariables"] = True
1440 H = HO["Direct"].appliedControledFormTo
1442 if selfA._parameters["EstimationOf"] == "State":
1443 M = EM["Direct"].appliedControledFormTo
1445 if CM is not None and "Tangent" in CM and U is not None:
1446 Cm = CM["Tangent"].asMatrix(Xb)
1450 # Durée d'observation et tailles
1451 if hasattr(Y,"stepnumber"):
1452 duration = Y.stepnumber()
1453 __p = numpy.cumprod(Y.shape())[-1]
1456 __p = numpy.array(Y).size
1458 # Précalcul des inversions de B et R
1459 if selfA._parameters["StoreInternalVariables"] \
1460 or selfA._toStore("CostFunctionJ") \
1461 or selfA._toStore("CostFunctionJb") \
1462 or selfA._toStore("CostFunctionJo") \
1463 or selfA._toStore("CurrentOptimum") \
1464 or selfA._toStore("APosterioriCovariance"):
1470 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1473 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1474 selfA.StoredVariables["Analysis"].store( Xb )
1475 if selfA._toStore("APosterioriCovariance"):
1476 if hasattr(B,"asfullmatrix"):
1477 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1479 selfA.StoredVariables["APosterioriCovariance"].store( B )
1480 selfA._setInternalState("seed", numpy.random.get_state())
1481 elif selfA._parameters["nextStep"]:
1482 Xn = selfA._getInternalState("Xn")
1483 Pn = selfA._getInternalState("Pn")
1485 if selfA._parameters["EstimationOf"] == "Parameters":
1487 previousJMinimum = numpy.finfo(float).max
1489 for step in range(duration-1):
1490 if hasattr(Y,"store"):
1491 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1493 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1495 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1496 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1497 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1498 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1500 if selfA._parameters["EstimationOf"] == "State":
1501 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1502 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1503 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1504 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1507 if hasattr(U,"store") and len(U)>1:
1508 Un = numpy.ravel( U[step] ).reshape((-1,1))
1509 elif hasattr(U,"store") and len(U)==1:
1510 Un = numpy.ravel( U[0] ).reshape((-1,1))
1512 Un = numpy.ravel( U ).reshape((-1,1))
1516 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1517 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1518 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1519 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1520 Xn_predicted = Xn_predicted + Cm * Un
1521 Pn_predicted = Q + Mt * Pn * Ma
1522 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1523 # --- > Par principe, M = Id, Q = 0
1527 if selfA._parameters["EstimationOf"] == "State":
1528 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1529 _Innovation = Ynpu - HX_predicted
1530 elif selfA._parameters["EstimationOf"] == "Parameters":
1531 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1532 _Innovation = Ynpu - HX_predicted
1533 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1534 _Innovation = _Innovation - Cm * Un
1536 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1537 Xn = Xn_predicted + Kn * _Innovation
1538 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1541 #--------------------------
1542 selfA._setInternalState("Xn", Xn)
1543 selfA._setInternalState("Pn", Pn)
1544 #--------------------------
1546 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1547 # ---> avec analysis
1548 selfA.StoredVariables["Analysis"].store( Xa )
1549 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1550 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1551 if selfA._toStore("InnovationAtCurrentAnalysis"):
1552 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1553 # ---> avec current state
1554 if selfA._parameters["StoreInternalVariables"] \
1555 or selfA._toStore("CurrentState"):
1556 selfA.StoredVariables["CurrentState"].store( Xn )
1557 if selfA._toStore("ForecastState"):
1558 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1559 if selfA._toStore("ForecastCovariance"):
1560 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1561 if selfA._toStore("BMA"):
1562 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1563 if selfA._toStore("InnovationAtCurrentState"):
1564 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1565 if selfA._toStore("SimulatedObservationAtCurrentState") \
1566 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1567 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1569 if selfA._parameters["StoreInternalVariables"] \
1570 or selfA._toStore("CostFunctionJ") \
1571 or selfA._toStore("CostFunctionJb") \
1572 or selfA._toStore("CostFunctionJo") \
1573 or selfA._toStore("CurrentOptimum") \
1574 or selfA._toStore("APosterioriCovariance"):
1575 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1576 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1578 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1579 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1580 selfA.StoredVariables["CostFunctionJ" ].store( J )
1582 if selfA._toStore("IndexOfOptimum") \
1583 or selfA._toStore("CurrentOptimum") \
1584 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1585 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1586 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1587 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1588 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1589 if selfA._toStore("IndexOfOptimum"):
1590 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1591 if selfA._toStore("CurrentOptimum"):
1592 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1593 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1594 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1595 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1596 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1597 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1598 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1599 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1600 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1601 if selfA._toStore("APosterioriCovariance"):
1602 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1603 if selfA._parameters["EstimationOf"] == "Parameters" \
1604 and J < previousJMinimum:
1605 previousJMinimum = J
1607 if selfA._toStore("APosterioriCovariance"):
1608 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1610 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1611 # ----------------------------------------------------------------------
1612 if selfA._parameters["EstimationOf"] == "Parameters":
1613 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1614 selfA.StoredVariables["Analysis"].store( XaMin )
1615 if selfA._toStore("APosterioriCovariance"):
1616 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1617 if selfA._toStore("BMA"):
1618 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1622 # ==============================================================================
1623 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1624 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1628 if selfA._parameters["EstimationOf"] == "Parameters":
1629 selfA._parameters["StoreInternalVariables"] = True
1632 H = HO["Direct"].appliedControledFormTo
1634 if selfA._parameters["EstimationOf"] == "State":
1635 M = EM["Direct"].appliedControledFormTo
1637 if CM is not None and "Tangent" in CM and U is not None:
1638 Cm = CM["Tangent"].asMatrix(Xb)
1642 # Durée d'observation et tailles
1643 if hasattr(Y,"stepnumber"):
1644 duration = Y.stepnumber()
1645 __p = numpy.cumprod(Y.shape())[-1]
1648 __p = numpy.array(Y).size
1650 # Précalcul des inversions de B et R
1651 if selfA._parameters["StoreInternalVariables"] \
1652 or selfA._toStore("CostFunctionJ") \
1653 or selfA._toStore("CostFunctionJb") \
1654 or selfA._toStore("CostFunctionJo") \
1655 or selfA._toStore("CurrentOptimum") \
1656 or selfA._toStore("APosterioriCovariance"):
1661 __m = selfA._parameters["NumberOfMembers"]
1663 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1664 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1666 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1667 selfA.StoredVariables["Analysis"].store( Xb )
1668 if selfA._toStore("APosterioriCovariance"):
1669 if hasattr(B,"asfullmatrix"):
1670 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1672 selfA.StoredVariables["APosterioriCovariance"].store( B )
1673 selfA._setInternalState("seed", numpy.random.get_state())
1674 elif selfA._parameters["nextStep"]:
1675 Xn = selfA._getInternalState("Xn")
1677 previousJMinimum = numpy.finfo(float).max
1679 for step in range(duration-1):
1680 numpy.random.set_state(selfA._getInternalState("seed"))
1681 if hasattr(Y,"store"):
1682 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1684 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1687 if hasattr(U,"store") and len(U)>1:
1688 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1689 elif hasattr(U,"store") and len(U)==1:
1690 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1692 Un = numpy.asmatrix(numpy.ravel( U )).T
1696 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1697 Xn = CovarianceInflation( Xn,
1698 selfA._parameters["InflationType"],
1699 selfA._parameters["InflationFactor"],
1702 #--------------------------
1703 if VariantM == "IEnKF12":
1704 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1705 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1709 Ta = numpy.identity(__m)
1710 vw = numpy.zeros(__m)
1711 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1712 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1715 E1 = vx1 + _epsilon * EaX
1717 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1719 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1720 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1722 returnSerieAsArrayMatrix = True )
1723 elif selfA._parameters["EstimationOf"] == "Parameters":
1724 # --- > Par principe, M = Id
1726 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1727 vy1 = H((vx2, Un)).reshape((__p,1))
1729 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1731 returnSerieAsArrayMatrix = True )
1732 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1735 EaY = (HE2 - vy2) / _epsilon
1737 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1739 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1740 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1741 Deltaw = - numpy.linalg.solve(mH,GradJ)
1746 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1750 A2 = EnsembleOfAnomalies( E2 )
1753 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1754 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1757 #--------------------------
1759 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1761 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1762 Xn = CovarianceInflation( Xn,
1763 selfA._parameters["InflationType"],
1764 selfA._parameters["InflationFactor"],
1767 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1768 #--------------------------
1769 selfA._setInternalState("Xn", Xn)
1770 selfA._setInternalState("seed", numpy.random.get_state())
1771 #--------------------------
1773 if selfA._parameters["StoreInternalVariables"] \
1774 or selfA._toStore("CostFunctionJ") \
1775 or selfA._toStore("CostFunctionJb") \
1776 or selfA._toStore("CostFunctionJo") \
1777 or selfA._toStore("APosterioriCovariance") \
1778 or selfA._toStore("InnovationAtCurrentAnalysis") \
1779 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1780 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1781 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1782 _Innovation = Ynpu - _HXa
1784 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1785 # ---> avec analysis
1786 selfA.StoredVariables["Analysis"].store( Xa )
1787 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1788 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1789 if selfA._toStore("InnovationAtCurrentAnalysis"):
1790 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1791 # ---> avec current state
1792 if selfA._parameters["StoreInternalVariables"] \
1793 or selfA._toStore("CurrentState"):
1794 selfA.StoredVariables["CurrentState"].store( Xn )
1795 if selfA._toStore("ForecastState"):
1796 selfA.StoredVariables["ForecastState"].store( E2 )
1797 if selfA._toStore("ForecastCovariance"):
1798 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
1799 if selfA._toStore("BMA"):
1800 selfA.StoredVariables["BMA"].store( E2 - Xa )
1801 if selfA._toStore("InnovationAtCurrentState"):
1802 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1803 if selfA._toStore("SimulatedObservationAtCurrentState") \
1804 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1805 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1807 if selfA._parameters["StoreInternalVariables"] \
1808 or selfA._toStore("CostFunctionJ") \
1809 or selfA._toStore("CostFunctionJb") \
1810 or selfA._toStore("CostFunctionJo") \
1811 or selfA._toStore("CurrentOptimum") \
1812 or selfA._toStore("APosterioriCovariance"):
1813 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1814 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1816 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1817 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1818 selfA.StoredVariables["CostFunctionJ" ].store( J )
1820 if selfA._toStore("IndexOfOptimum") \
1821 or selfA._toStore("CurrentOptimum") \
1822 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1823 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1824 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1825 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1826 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1827 if selfA._toStore("IndexOfOptimum"):
1828 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1829 if selfA._toStore("CurrentOptimum"):
1830 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1831 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1832 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1833 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1834 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1835 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1836 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1837 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1838 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1839 if selfA._toStore("APosterioriCovariance"):
1840 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1841 if selfA._parameters["EstimationOf"] == "Parameters" \
1842 and J < previousJMinimum:
1843 previousJMinimum = J
1845 if selfA._toStore("APosterioriCovariance"):
1846 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1847 # ---> Pour les smoothers
1848 if selfA._toStore("CurrentEnsembleState"):
1849 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1851 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1852 # ----------------------------------------------------------------------
1853 if selfA._parameters["EstimationOf"] == "Parameters":
1854 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1855 selfA.StoredVariables["Analysis"].store( XaMin )
1856 if selfA._toStore("APosterioriCovariance"):
1857 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1858 if selfA._toStore("BMA"):
1859 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1863 # ==============================================================================
1864 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1872 # Opérateur non-linéaire pour la boucle externe
1873 Hm = HO["Direct"].appliedTo
1875 # Précalcul des inversions de B et R
1879 # Point de démarrage de l'optimisation
1880 Xini = selfA._parameters["InitializationPoint"]
1882 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1883 Innovation = Y - HXb
1890 Xr = Xini.reshape((-1,1))
1891 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1895 Ht = HO["Tangent"].asMatrix(Xr)
1896 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1898 # Définition de la fonction-coût
1899 # ------------------------------
1900 def CostFunction(dx):
1901 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1902 if selfA._parameters["StoreInternalVariables"] or \
1903 selfA._toStore("CurrentState") or \
1904 selfA._toStore("CurrentOptimum"):
1905 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1907 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1908 _dInnovation = Innovation - _HdX
1909 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1910 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1911 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1912 if selfA._toStore("InnovationAtCurrentState"):
1913 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1915 Jb = float( 0.5 * _dX.T * BI * _dX )
1916 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1919 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1920 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1921 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1922 selfA.StoredVariables["CostFunctionJ" ].store( J )
1923 if selfA._toStore("IndexOfOptimum") or \
1924 selfA._toStore("CurrentOptimum") or \
1925 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1926 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1927 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1928 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1929 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1930 if selfA._toStore("IndexOfOptimum"):
1931 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1932 if selfA._toStore("CurrentOptimum"):
1933 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1934 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1935 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1936 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1937 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1938 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1939 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1940 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1941 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1944 def GradientOfCostFunction(dx):
1945 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1947 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1948 _dInnovation = Innovation - _HdX
1950 GradJo = - Ht.T @ (RI * _dInnovation)
1951 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1954 # Minimisation de la fonctionnelle
1955 # --------------------------------
1956 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1958 if selfA._parameters["Minimizer"] == "LBFGSB":
1959 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1960 if "0.19" <= scipy.version.version <= "1.1.0":
1961 import lbfgsbhlt as optimiseur
1963 import scipy.optimize as optimiseur
1964 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1965 func = CostFunction,
1966 x0 = numpy.zeros(Xini.size),
1967 fprime = GradientOfCostFunction,
1969 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
1970 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1971 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1972 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1973 iprint = selfA._parameters["optiprint"],
1975 nfeval = Informations['funcalls']
1976 rc = Informations['warnflag']
1977 elif selfA._parameters["Minimizer"] == "TNC":
1978 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1979 func = CostFunction,
1980 x0 = numpy.zeros(Xini.size),
1981 fprime = GradientOfCostFunction,
1983 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
1984 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1985 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1986 ftol = selfA._parameters["CostDecrementTolerance"],
1987 messages = selfA._parameters["optmessages"],
1989 elif selfA._parameters["Minimizer"] == "CG":
1990 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1992 x0 = numpy.zeros(Xini.size),
1993 fprime = GradientOfCostFunction,
1995 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1996 gtol = selfA._parameters["GradientNormTolerance"],
1997 disp = selfA._parameters["optdisp"],
2000 elif selfA._parameters["Minimizer"] == "NCG":
2001 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2003 x0 = numpy.zeros(Xini.size),
2004 fprime = GradientOfCostFunction,
2006 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2007 avextol = selfA._parameters["CostDecrementTolerance"],
2008 disp = selfA._parameters["optdisp"],
2011 elif selfA._parameters["Minimizer"] == "BFGS":
2012 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2014 x0 = numpy.zeros(Xini.size),
2015 fprime = GradientOfCostFunction,
2017 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2018 gtol = selfA._parameters["GradientNormTolerance"],
2019 disp = selfA._parameters["optdisp"],
2023 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2025 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2026 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2028 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2029 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2030 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2032 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
2035 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2036 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2038 # Obtention de l'analyse
2039 # ----------------------
2042 selfA.StoredVariables["Analysis"].store( Xa )
2044 if selfA._toStore("OMA") or \
2045 selfA._toStore("SigmaObs2") or \
2046 selfA._toStore("SimulationQuantiles") or \
2047 selfA._toStore("SimulatedObservationAtOptimum"):
2048 if selfA._toStore("SimulatedObservationAtCurrentState"):
2049 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2050 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2051 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2055 # Calcul de la covariance d'analyse
2056 # ---------------------------------
2057 if selfA._toStore("APosterioriCovariance") or \
2058 selfA._toStore("SimulationQuantiles") or \
2059 selfA._toStore("JacobianMatrixAtOptimum") or \
2060 selfA._toStore("KalmanGainAtOptimum"):
2061 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2062 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2063 if selfA._toStore("APosterioriCovariance") or \
2064 selfA._toStore("SimulationQuantiles") or \
2065 selfA._toStore("KalmanGainAtOptimum"):
2066 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2067 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2068 if selfA._toStore("APosterioriCovariance") or \
2069 selfA._toStore("SimulationQuantiles"):
2073 _ee = numpy.matrix(numpy.zeros(nb)).T
2075 _HtEE = numpy.dot(HtM,_ee)
2076 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2077 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2078 HessienneI = numpy.matrix( HessienneI )
2080 if min(A.shape) != max(A.shape):
2081 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2082 if (numpy.diag(A) < 0).any():
2083 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2084 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2086 L = numpy.linalg.cholesky( A )
2088 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2089 if selfA._toStore("APosterioriCovariance"):
2090 selfA.StoredVariables["APosterioriCovariance"].store( A )
2091 if selfA._toStore("JacobianMatrixAtOptimum"):
2092 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2093 if selfA._toStore("KalmanGainAtOptimum"):
2094 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2095 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2096 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2098 # Calculs et/ou stockages supplémentaires
2099 # ---------------------------------------
2100 if selfA._toStore("Innovation") or \
2101 selfA._toStore("SigmaObs2") or \
2102 selfA._toStore("MahalanobisConsistency") or \
2103 selfA._toStore("OMB"):
2105 if selfA._toStore("Innovation"):
2106 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2107 if selfA._toStore("BMA"):
2108 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2109 if selfA._toStore("OMA"):
2110 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2111 if selfA._toStore("OMB"):
2112 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2113 if selfA._toStore("SigmaObs2"):
2114 TraceR = R.trace(Y.size)
2115 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2116 if selfA._toStore("MahalanobisConsistency"):
2117 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2118 if selfA._toStore("SimulationQuantiles"):
2119 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2120 if selfA._toStore("SimulatedObservationAtBackground"):
2121 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2122 if selfA._toStore("SimulatedObservationAtOptimum"):
2123 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2127 # ==============================================================================
2128 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2129 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2131 Maximum Likelihood Ensemble Filter
2133 if selfA._parameters["EstimationOf"] == "Parameters":
2134 selfA._parameters["StoreInternalVariables"] = True
2137 H = HO["Direct"].appliedControledFormTo
2139 if selfA._parameters["EstimationOf"] == "State":
2140 M = EM["Direct"].appliedControledFormTo
2142 if CM is not None and "Tangent" in CM and U is not None:
2143 Cm = CM["Tangent"].asMatrix(Xb)
2147 # Durée d'observation et tailles
2148 if hasattr(Y,"stepnumber"):
2149 duration = Y.stepnumber()
2150 __p = numpy.cumprod(Y.shape())[-1]
2153 __p = numpy.array(Y).size
2155 # Précalcul des inversions de B et R
2156 if selfA._parameters["StoreInternalVariables"] \
2157 or selfA._toStore("CostFunctionJ") \
2158 or selfA._toStore("CostFunctionJb") \
2159 or selfA._toStore("CostFunctionJo") \
2160 or selfA._toStore("CurrentOptimum") \
2161 or selfA._toStore("APosterioriCovariance"):
2166 __m = selfA._parameters["NumberOfMembers"]
2168 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2169 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2170 selfA.StoredVariables["Analysis"].store( Xb )
2171 if selfA._toStore("APosterioriCovariance"):
2172 if hasattr(B,"asfullmatrix"):
2173 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2175 selfA.StoredVariables["APosterioriCovariance"].store( B )
2176 selfA._setInternalState("seed", numpy.random.get_state())
2177 elif selfA._parameters["nextStep"]:
2178 Xn = selfA._getInternalState("Xn")
2180 previousJMinimum = numpy.finfo(float).max
2182 for step in range(duration-1):
2183 numpy.random.set_state(selfA._getInternalState("seed"))
2184 if hasattr(Y,"store"):
2185 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2187 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2190 if hasattr(U,"store") and len(U)>1:
2191 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2192 elif hasattr(U,"store") and len(U)==1:
2193 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2195 Un = numpy.asmatrix(numpy.ravel( U )).T
2199 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2200 Xn = CovarianceInflation( Xn,
2201 selfA._parameters["InflationType"],
2202 selfA._parameters["InflationFactor"],
2205 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2206 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2208 returnSerieAsArrayMatrix = True )
2209 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2210 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2211 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2212 Xn_predicted = Xn_predicted + Cm * Un
2213 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2214 # --- > Par principe, M = Id, Q = 0
2215 Xn_predicted = EMX = Xn
2217 #--------------------------
2218 if VariantM == "MLEF13":
2219 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2220 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2221 Ua = numpy.identity(__m)
2225 Ta = numpy.identity(__m)
2226 vw = numpy.zeros(__m)
2227 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2228 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2231 E1 = vx1 + _epsilon * EaX
2233 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2235 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2237 returnSerieAsArrayMatrix = True )
2238 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2241 EaY = (HE2 - vy2) / _epsilon
2243 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2245 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2246 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2247 Deltaw = - numpy.linalg.solve(mH,GradJ)
2252 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2257 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2259 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2260 #--------------------------
2262 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2264 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2265 Xn = CovarianceInflation( Xn,
2266 selfA._parameters["InflationType"],
2267 selfA._parameters["InflationFactor"],
2270 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2271 #--------------------------
2272 selfA._setInternalState("Xn", Xn)
2273 selfA._setInternalState("seed", numpy.random.get_state())
2274 #--------------------------
2276 if selfA._parameters["StoreInternalVariables"] \
2277 or selfA._toStore("CostFunctionJ") \
2278 or selfA._toStore("CostFunctionJb") \
2279 or selfA._toStore("CostFunctionJo") \
2280 or selfA._toStore("APosterioriCovariance") \
2281 or selfA._toStore("InnovationAtCurrentAnalysis") \
2282 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2283 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2284 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2285 _Innovation = Ynpu - _HXa
2287 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2288 # ---> avec analysis
2289 selfA.StoredVariables["Analysis"].store( Xa )
2290 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2291 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2292 if selfA._toStore("InnovationAtCurrentAnalysis"):
2293 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2294 # ---> avec current state
2295 if selfA._parameters["StoreInternalVariables"] \
2296 or selfA._toStore("CurrentState"):
2297 selfA.StoredVariables["CurrentState"].store( Xn )
2298 if selfA._toStore("ForecastState"):
2299 selfA.StoredVariables["ForecastState"].store( EMX )
2300 if selfA._toStore("ForecastCovariance"):
2301 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2302 if selfA._toStore("BMA"):
2303 selfA.StoredVariables["BMA"].store( EMX - Xa )
2304 if selfA._toStore("InnovationAtCurrentState"):
2305 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2306 if selfA._toStore("SimulatedObservationAtCurrentState") \
2307 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2308 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2310 if selfA._parameters["StoreInternalVariables"] \
2311 or selfA._toStore("CostFunctionJ") \
2312 or selfA._toStore("CostFunctionJb") \
2313 or selfA._toStore("CostFunctionJo") \
2314 or selfA._toStore("CurrentOptimum") \
2315 or selfA._toStore("APosterioriCovariance"):
2316 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2317 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2319 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2320 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2321 selfA.StoredVariables["CostFunctionJ" ].store( J )
2323 if selfA._toStore("IndexOfOptimum") \
2324 or selfA._toStore("CurrentOptimum") \
2325 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2326 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2327 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2328 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2329 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2330 if selfA._toStore("IndexOfOptimum"):
2331 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2332 if selfA._toStore("CurrentOptimum"):
2333 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2334 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2335 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2336 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2337 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2338 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2339 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2340 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2341 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2342 if selfA._toStore("APosterioriCovariance"):
2343 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2344 if selfA._parameters["EstimationOf"] == "Parameters" \
2345 and J < previousJMinimum:
2346 previousJMinimum = J
2348 if selfA._toStore("APosterioriCovariance"):
2349 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2350 # ---> Pour les smoothers
2351 if selfA._toStore("CurrentEnsembleState"):
2352 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2354 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2355 # ----------------------------------------------------------------------
2356 if selfA._parameters["EstimationOf"] == "Parameters":
2357 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2358 selfA.StoredVariables["Analysis"].store( XaMin )
2359 if selfA._toStore("APosterioriCovariance"):
2360 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2361 if selfA._toStore("BMA"):
2362 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2366 # ==============================================================================
2378 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2379 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2380 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2383 # Recuperation des donnees et informations initiales
2384 # --------------------------------------------------
2385 variables = numpy.ravel( x0 )
2386 mesures = numpy.ravel( y )
2387 increment = sys.float_info[0]
2390 quantile = float(quantile)
2392 # Calcul des parametres du MM
2393 # ---------------------------
2394 tn = float(toler) / n
2395 e0 = -tn / math.log(tn)
2396 epsilon = (e0-tn)/(1+math.log(e0))
2398 # Calculs d'initialisation
2399 # ------------------------
2400 residus = mesures - numpy.ravel( func( variables ) )
2401 poids = 1./(epsilon+numpy.abs(residus))
2402 veps = 1. - 2. * quantile - residus * poids
2403 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2406 # Recherche iterative
2407 # -------------------
2408 while (increment > toler) and (iteration < maxfun) :
2411 Derivees = numpy.array(fprime(variables))
2412 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
2413 DeriveesT = Derivees.transpose()
2414 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2415 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
2416 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2418 variables = variables + step
2419 if bounds is not None:
2420 # Attention : boucle infinie à éviter si un intervalle est trop petit
2421 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2423 variables = variables - step
2424 residus = mesures - numpy.ravel( func(variables) )
2425 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2427 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2429 variables = variables - step
2430 residus = mesures - numpy.ravel( func(variables) )
2431 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2433 increment = lastsurrogate-surrogate
2434 poids = 1./(epsilon+numpy.abs(residus))
2435 veps = 1. - 2. * quantile - residus * poids
2436 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2440 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2442 return variables, Ecart, [n,p,iteration,increment,0]
2444 # ==============================================================================
2445 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2447 3DVAR multi-pas et multi-méthodes
2451 if selfA._parameters["EstimationOf"] == "State":
2452 M = EM["Direct"].appliedTo
2454 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2455 Xn = numpy.ravel(Xb).reshape((-1,1))
2456 selfA.StoredVariables["Analysis"].store( Xn )
2457 if selfA._toStore("APosterioriCovariance"):
2458 if hasattr(B,"asfullmatrix"):
2459 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2461 selfA.StoredVariables["APosterioriCovariance"].store( B )
2462 if selfA._toStore("ForecastState"):
2463 selfA.StoredVariables["ForecastState"].store( Xn )
2464 elif selfA._parameters["nextStep"]:
2465 Xn = selfA._getInternalState("Xn")
2467 Xn = numpy.ravel(Xb).reshape((-1,1))
2469 if hasattr(Y,"stepnumber"):
2470 duration = Y.stepnumber()
2475 for step in range(duration-1):
2476 if hasattr(Y,"store"):
2477 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2479 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2481 if selfA._parameters["EstimationOf"] == "State": # Forecast
2482 Xn_predicted = M( Xn )
2483 if selfA._toStore("ForecastState"):
2484 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2485 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2486 # --- > Par principe, M = Id, Q = 0
2488 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2490 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2492 Xn = selfA.StoredVariables["Analysis"][-1]
2493 #--------------------------
2494 selfA._setInternalState("Xn", Xn)
2498 # ==============================================================================
2499 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2508 Hm = HO["Direct"].appliedTo
2510 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2511 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2512 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2515 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2516 if Y.size != HXb.size:
2517 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2518 if max(Y.shape) != max(HXb.shape):
2519 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2521 if selfA._toStore("JacobianMatrixAtBackground"):
2522 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2523 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2524 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2526 Ht = HO["Tangent"].asMatrix(Xb)
2528 HBHTpR = R + Ht * BHT
2529 Innovation = Y - HXb
2531 # Point de démarrage de l'optimisation
2532 Xini = numpy.zeros(Xb.shape)
2534 # Définition de la fonction-coût
2535 # ------------------------------
2536 def CostFunction(w):
2537 _W = numpy.asmatrix(numpy.ravel( w )).T
2538 if selfA._parameters["StoreInternalVariables"] or \
2539 selfA._toStore("CurrentState") or \
2540 selfA._toStore("CurrentOptimum"):
2541 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2542 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2543 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2544 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2545 if selfA._toStore("InnovationAtCurrentState"):
2546 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2548 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2549 Jo = float( - _W.T * Innovation )
2552 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2553 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2554 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2555 selfA.StoredVariables["CostFunctionJ" ].store( J )
2556 if selfA._toStore("IndexOfOptimum") or \
2557 selfA._toStore("CurrentOptimum") or \
2558 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2559 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2560 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2561 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2562 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2563 if selfA._toStore("IndexOfOptimum"):
2564 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2565 if selfA._toStore("CurrentOptimum"):
2566 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2567 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2568 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2569 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2570 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2571 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2572 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2573 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2574 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2577 def GradientOfCostFunction(w):
2578 _W = numpy.asmatrix(numpy.ravel( w )).T
2579 GradJb = HBHTpR * _W
2580 GradJo = - Innovation
2581 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2584 # Minimisation de la fonctionnelle
2585 # --------------------------------
2586 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2588 if selfA._parameters["Minimizer"] == "LBFGSB":
2589 if "0.19" <= scipy.version.version <= "1.1.0":
2590 import lbfgsbhlt as optimiseur
2592 import scipy.optimize as optimiseur
2593 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2594 func = CostFunction,
2596 fprime = GradientOfCostFunction,
2598 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2599 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2600 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2601 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2602 iprint = selfA._parameters["optiprint"],
2604 nfeval = Informations['funcalls']
2605 rc = Informations['warnflag']
2606 elif selfA._parameters["Minimizer"] == "TNC":
2607 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2608 func = CostFunction,
2610 fprime = GradientOfCostFunction,
2612 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2613 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2614 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2615 ftol = selfA._parameters["CostDecrementTolerance"],
2616 messages = selfA._parameters["optmessages"],
2618 elif selfA._parameters["Minimizer"] == "CG":
2619 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2622 fprime = GradientOfCostFunction,
2624 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2625 gtol = selfA._parameters["GradientNormTolerance"],
2626 disp = selfA._parameters["optdisp"],
2629 elif selfA._parameters["Minimizer"] == "NCG":
2630 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2633 fprime = GradientOfCostFunction,
2635 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2636 avextol = selfA._parameters["CostDecrementTolerance"],
2637 disp = selfA._parameters["optdisp"],
2640 elif selfA._parameters["Minimizer"] == "BFGS":
2641 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2644 fprime = GradientOfCostFunction,
2646 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2647 gtol = selfA._parameters["GradientNormTolerance"],
2648 disp = selfA._parameters["optdisp"],
2652 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2654 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2655 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2657 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2658 # ----------------------------------------------------------------
2659 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2660 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2661 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2663 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2665 # Obtention de l'analyse
2666 # ----------------------
2669 selfA.StoredVariables["Analysis"].store( Xa )
2671 if selfA._toStore("OMA") or \
2672 selfA._toStore("SigmaObs2") or \
2673 selfA._toStore("SimulationQuantiles") or \
2674 selfA._toStore("SimulatedObservationAtOptimum"):
2675 if selfA._toStore("SimulatedObservationAtCurrentState"):
2676 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2677 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2678 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2682 # Calcul de la covariance d'analyse
2683 # ---------------------------------
2684 if selfA._toStore("APosterioriCovariance") or \
2685 selfA._toStore("SimulationQuantiles") or \
2686 selfA._toStore("JacobianMatrixAtOptimum") or \
2687 selfA._toStore("KalmanGainAtOptimum"):
2688 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2689 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2690 if selfA._toStore("APosterioriCovariance") or \
2691 selfA._toStore("SimulationQuantiles") or \
2692 selfA._toStore("KalmanGainAtOptimum"):
2693 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2694 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2695 if selfA._toStore("APosterioriCovariance") or \
2696 selfA._toStore("SimulationQuantiles"):
2702 _ee = numpy.matrix(numpy.zeros(nb)).T
2704 _HtEE = numpy.dot(HtM,_ee)
2705 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2706 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2707 HessienneI = numpy.matrix( HessienneI )
2709 if min(A.shape) != max(A.shape):
2710 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2711 if (numpy.diag(A) < 0).any():
2712 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2713 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2715 L = numpy.linalg.cholesky( A )
2717 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2718 if selfA._toStore("APosterioriCovariance"):
2719 selfA.StoredVariables["APosterioriCovariance"].store( A )
2720 if selfA._toStore("JacobianMatrixAtOptimum"):
2721 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2722 if selfA._toStore("KalmanGainAtOptimum"):
2723 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2724 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2725 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2727 # Calculs et/ou stockages supplémentaires
2728 # ---------------------------------------
2729 if selfA._toStore("Innovation") or \
2730 selfA._toStore("SigmaObs2") or \
2731 selfA._toStore("MahalanobisConsistency") or \
2732 selfA._toStore("OMB"):
2734 if selfA._toStore("Innovation"):
2735 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2736 if selfA._toStore("BMA"):
2737 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2738 if selfA._toStore("OMA"):
2739 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2740 if selfA._toStore("OMB"):
2741 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2742 if selfA._toStore("SigmaObs2"):
2743 TraceR = R.trace(Y.size)
2744 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2745 if selfA._toStore("MahalanobisConsistency"):
2746 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2747 if selfA._toStore("SimulationQuantiles"):
2748 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2749 if selfA._toStore("SimulatedObservationAtBackground"):
2750 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2751 if selfA._toStore("SimulatedObservationAtOptimum"):
2752 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2756 # ==============================================================================
2757 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
2761 if selfA._parameters["EstimationOf"] == "Parameters":
2762 selfA._parameters["StoreInternalVariables"] = True
2765 H = HO["Direct"].appliedControledFormTo
2767 if selfA._parameters["EstimationOf"] == "State":
2768 M = EM["Direct"].appliedControledFormTo
2770 if CM is not None and "Tangent" in CM and U is not None:
2771 Cm = CM["Tangent"].asMatrix(Xb)
2775 # Durée d'observation et tailles
2776 if hasattr(Y,"stepnumber"):
2777 duration = Y.stepnumber()
2778 __p = numpy.cumprod(Y.shape())[-1]
2781 __p = numpy.array(Y).size
2783 # Précalcul des inversions de B et R
2784 if selfA._parameters["StoreInternalVariables"] \
2785 or selfA._toStore("CostFunctionJ") \
2786 or selfA._toStore("CostFunctionJb") \
2787 or selfA._toStore("CostFunctionJo") \
2788 or selfA._toStore("CurrentOptimum") \
2789 or selfA._toStore("APosterioriCovariance"):
2794 __m = selfA._parameters["NumberOfMembers"]
2796 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2799 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2800 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2802 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2803 selfA.StoredVariables["Analysis"].store( Xb )
2804 if selfA._toStore("APosterioriCovariance"):
2805 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2806 selfA._setInternalState("seed", numpy.random.get_state())
2807 elif selfA._parameters["nextStep"]:
2808 Xn = selfA._getInternalState("Xn")
2810 previousJMinimum = numpy.finfo(float).max
2812 for step in range(duration-1):
2813 numpy.random.set_state(selfA._getInternalState("seed"))
2814 if hasattr(Y,"store"):
2815 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2817 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2820 if hasattr(U,"store") and len(U)>1:
2821 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2822 elif hasattr(U,"store") and len(U)==1:
2823 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2825 Un = numpy.asmatrix(numpy.ravel( U )).T
2829 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2830 Xn = CovarianceInflation( Xn,
2831 selfA._parameters["InflationType"],
2832 selfA._parameters["InflationFactor"],
2835 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2836 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2838 returnSerieAsArrayMatrix = True )
2839 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2840 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2842 returnSerieAsArrayMatrix = True )
2843 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2844 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2845 Xn_predicted = Xn_predicted + Cm * Un
2846 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2847 # --- > Par principe, M = Id, Q = 0
2848 Xn_predicted = EMX = Xn
2849 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2851 returnSerieAsArrayMatrix = True )
2853 # Mean of forecast and observation of forecast
2854 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2855 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2857 #--------------------------
2858 if VariantM == "KalmanFilterFormula05":
2859 PfHT, HPfHT = 0., 0.
2860 for i in range(__m):
2861 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2862 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2863 PfHT += Exfi * Eyfi.T
2864 HPfHT += Eyfi * Eyfi.T
2865 PfHT = (1./(__m-1)) * PfHT
2866 HPfHT = (1./(__m-1)) * HPfHT
2867 Kn = PfHT * ( R + HPfHT ).I
2870 for i in range(__m):
2871 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2872 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2873 #--------------------------
2874 elif VariantM == "KalmanFilterFormula16":
2875 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2876 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2878 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2879 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2881 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2883 for i in range(__m):
2884 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2885 #--------------------------
2887 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2889 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2890 Xn = CovarianceInflation( Xn,
2891 selfA._parameters["InflationType"],
2892 selfA._parameters["InflationFactor"],
2895 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2896 #--------------------------
2897 selfA._setInternalState("Xn", Xn)
2898 selfA._setInternalState("seed", numpy.random.get_state())
2899 #--------------------------
2901 if selfA._parameters["StoreInternalVariables"] \
2902 or selfA._toStore("CostFunctionJ") \
2903 or selfA._toStore("CostFunctionJb") \
2904 or selfA._toStore("CostFunctionJo") \
2905 or selfA._toStore("APosterioriCovariance") \
2906 or selfA._toStore("InnovationAtCurrentAnalysis") \
2907 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2908 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2909 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2910 _Innovation = Ynpu - _HXa
2912 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2913 # ---> avec analysis
2914 selfA.StoredVariables["Analysis"].store( Xa )
2915 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2916 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2917 if selfA._toStore("InnovationAtCurrentAnalysis"):
2918 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2919 # ---> avec current state
2920 if selfA._parameters["StoreInternalVariables"] \
2921 or selfA._toStore("CurrentState"):
2922 selfA.StoredVariables["CurrentState"].store( Xn )
2923 if selfA._toStore("ForecastState"):
2924 selfA.StoredVariables["ForecastState"].store( EMX )
2925 if selfA._toStore("ForecastCovariance"):
2926 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2927 if selfA._toStore("BMA"):
2928 selfA.StoredVariables["BMA"].store( EMX - Xa )
2929 if selfA._toStore("InnovationAtCurrentState"):
2930 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2931 if selfA._toStore("SimulatedObservationAtCurrentState") \
2932 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2933 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2935 if selfA._parameters["StoreInternalVariables"] \
2936 or selfA._toStore("CostFunctionJ") \
2937 or selfA._toStore("CostFunctionJb") \
2938 or selfA._toStore("CostFunctionJo") \
2939 or selfA._toStore("CurrentOptimum") \
2940 or selfA._toStore("APosterioriCovariance"):
2941 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2942 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2944 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2945 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2946 selfA.StoredVariables["CostFunctionJ" ].store( J )
2948 if selfA._toStore("IndexOfOptimum") \
2949 or selfA._toStore("CurrentOptimum") \
2950 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2951 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2952 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2953 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2954 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2955 if selfA._toStore("IndexOfOptimum"):
2956 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2957 if selfA._toStore("CurrentOptimum"):
2958 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2959 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2960 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2961 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2962 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2963 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2964 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2965 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2966 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2967 if selfA._toStore("APosterioriCovariance"):
2968 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2969 if selfA._parameters["EstimationOf"] == "Parameters" \
2970 and J < previousJMinimum:
2971 previousJMinimum = J
2973 if selfA._toStore("APosterioriCovariance"):
2974 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2975 # ---> Pour les smoothers
2976 if selfA._toStore("CurrentEnsembleState"):
2977 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2979 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2980 # ----------------------------------------------------------------------
2981 if selfA._parameters["EstimationOf"] == "Parameters":
2982 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2983 selfA.StoredVariables["Analysis"].store( XaMin )
2984 if selfA._toStore("APosterioriCovariance"):
2985 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2986 if selfA._toStore("BMA"):
2987 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2991 # ==============================================================================
2992 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3001 Hm = HO["Direct"].appliedTo
3002 Ha = HO["Adjoint"].appliedInXTo
3004 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
3005 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3006 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3009 HXb = HXb.reshape((-1,1))
3010 if Y.size != HXb.size:
3011 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3012 if max(Y.shape) != max(HXb.shape):
3013 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3015 if selfA._toStore("JacobianMatrixAtBackground"):
3016 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3017 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3018 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3020 # Précalcul des inversions de B et R
3024 # Point de démarrage de l'optimisation
3025 Xini = selfA._parameters["InitializationPoint"]
3027 # Définition de la fonction-coût
3028 # ------------------------------
3029 def CostFunction(x):
3030 _X = numpy.ravel( x ).reshape((-1,1))
3031 if selfA._parameters["StoreInternalVariables"] or \
3032 selfA._toStore("CurrentState") or \
3033 selfA._toStore("CurrentOptimum"):
3034 selfA.StoredVariables["CurrentState"].store( _X )
3035 _HX = Hm( _X ).reshape((-1,1))
3036 _Innovation = Y - _HX
3037 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3038 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3039 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3040 if selfA._toStore("InnovationAtCurrentState"):
3041 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3043 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3044 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3047 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3048 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3049 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3050 selfA.StoredVariables["CostFunctionJ" ].store( J )
3051 if selfA._toStore("IndexOfOptimum") or \
3052 selfA._toStore("CurrentOptimum") or \
3053 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3054 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3055 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3056 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3057 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3058 if selfA._toStore("IndexOfOptimum"):
3059 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3060 if selfA._toStore("CurrentOptimum"):
3061 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3062 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3063 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3064 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3065 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3066 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3067 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3068 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3069 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3072 def GradientOfCostFunction(x):
3073 _X = numpy.ravel( x ).reshape((-1,1))
3075 _HX = numpy.ravel( _HX ).reshape((-1,1))
3076 GradJb = BI * (_X - Xb)
3077 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3078 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3081 # Minimisation de la fonctionnelle
3082 # --------------------------------
3083 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3085 if selfA._parameters["Minimizer"] == "LBFGSB":
3086 if "0.19" <= scipy.version.version <= "1.1.0":
3087 import lbfgsbhlt as optimiseur
3089 import scipy.optimize as optimiseur
3090 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3091 func = CostFunction,
3093 fprime = GradientOfCostFunction,
3095 bounds = selfA._parameters["Bounds"],
3096 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3097 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3098 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3099 iprint = selfA._parameters["optiprint"],
3101 nfeval = Informations['funcalls']
3102 rc = Informations['warnflag']
3103 elif selfA._parameters["Minimizer"] == "TNC":
3104 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3105 func = CostFunction,
3107 fprime = GradientOfCostFunction,
3109 bounds = selfA._parameters["Bounds"],
3110 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3111 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3112 ftol = selfA._parameters["CostDecrementTolerance"],
3113 messages = selfA._parameters["optmessages"],
3115 elif selfA._parameters["Minimizer"] == "CG":
3116 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3119 fprime = GradientOfCostFunction,
3121 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3122 gtol = selfA._parameters["GradientNormTolerance"],
3123 disp = selfA._parameters["optdisp"],
3126 elif selfA._parameters["Minimizer"] == "NCG":
3127 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3130 fprime = GradientOfCostFunction,
3132 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3133 avextol = selfA._parameters["CostDecrementTolerance"],
3134 disp = selfA._parameters["optdisp"],
3137 elif selfA._parameters["Minimizer"] == "BFGS":
3138 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3141 fprime = GradientOfCostFunction,
3143 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3144 gtol = selfA._parameters["GradientNormTolerance"],
3145 disp = selfA._parameters["optdisp"],
3149 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3151 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3152 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3154 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3155 # ----------------------------------------------------------------
3156 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3157 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3160 #--------------------------
3162 selfA.StoredVariables["Analysis"].store( Xa )
3164 if selfA._toStore("OMA") or \
3165 selfA._toStore("SigmaObs2") or \
3166 selfA._toStore("SimulationQuantiles") or \
3167 selfA._toStore("SimulatedObservationAtOptimum"):
3168 if selfA._toStore("SimulatedObservationAtCurrentState"):
3169 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3170 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3171 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3175 if selfA._toStore("APosterioriCovariance") or \
3176 selfA._toStore("SimulationQuantiles") or \
3177 selfA._toStore("JacobianMatrixAtOptimum") or \
3178 selfA._toStore("KalmanGainAtOptimum"):
3179 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3180 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3181 if selfA._toStore("APosterioriCovariance") or \
3182 selfA._toStore("SimulationQuantiles") or \
3183 selfA._toStore("KalmanGainAtOptimum"):
3184 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3185 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3186 if selfA._toStore("APosterioriCovariance") or \
3187 selfA._toStore("SimulationQuantiles"):
3191 _ee = numpy.zeros(nb)
3193 _HtEE = numpy.dot(HtM,_ee)
3194 HessienneI.append( numpy.ravel( BI * _ee.reshape((-1,1)) + HaM * (RI * _HtEE.reshape((-1,1))) ) )
3195 A = numpy.linalg.inv(numpy.array( HessienneI ))
3196 if min(A.shape) != max(A.shape):
3197 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3198 if (numpy.diag(A) < 0).any():
3199 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3200 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3202 L = numpy.linalg.cholesky( A )
3204 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3205 if selfA._toStore("APosterioriCovariance"):
3206 selfA.StoredVariables["APosterioriCovariance"].store( A )
3207 if selfA._toStore("JacobianMatrixAtOptimum"):
3208 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3209 if selfA._toStore("KalmanGainAtOptimum"):
3210 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3211 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3212 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3214 if selfA._toStore("Innovation") or \
3215 selfA._toStore("SigmaObs2") or \
3216 selfA._toStore("MahalanobisConsistency") or \
3217 selfA._toStore("OMB"):
3219 if selfA._toStore("Innovation"):
3220 selfA.StoredVariables["Innovation"].store( d )
3221 if selfA._toStore("BMA"):
3222 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3223 if selfA._toStore("OMA"):
3224 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3225 if selfA._toStore("OMB"):
3226 selfA.StoredVariables["OMB"].store( d )
3227 if selfA._toStore("SigmaObs2"):
3228 TraceR = R.trace(Y.size)
3229 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3230 if selfA._toStore("MahalanobisConsistency"):
3231 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3232 if selfA._toStore("SimulationQuantiles"):
3233 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3234 if selfA._toStore("SimulatedObservationAtBackground"):
3235 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3236 if selfA._toStore("SimulatedObservationAtOptimum"):
3237 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3241 # ==============================================================================
3242 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3251 Hm = HO["Direct"].appliedControledFormTo
3252 Mm = EM["Direct"].appliedControledFormTo
3254 if CM is not None and "Tangent" in CM and U is not None:
3255 Cm = CM["Tangent"].asMatrix(Xb)
3261 if hasattr(U,"store") and 1<=_step<len(U) :
3262 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
3263 elif hasattr(U,"store") and len(U)==1:
3264 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3266 _Un = numpy.asmatrix(numpy.ravel( U )).T
3271 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3272 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3278 # Remarque : les observations sont exploitées à partir du pas de temps
3279 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3280 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3281 # avec l'observation du pas 1.
3283 # Nombre de pas identique au nombre de pas d'observations
3284 if hasattr(Y,"stepnumber"):
3285 duration = Y.stepnumber()
3289 # Précalcul des inversions de B et R
3293 # Point de démarrage de l'optimisation
3294 Xini = selfA._parameters["InitializationPoint"]
3296 # Définition de la fonction-coût
3297 # ------------------------------
3298 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3299 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
3300 def CostFunction(x):
3301 _X = numpy.asmatrix(numpy.ravel( x )).T
3302 if selfA._parameters["StoreInternalVariables"] or \
3303 selfA._toStore("CurrentState") or \
3304 selfA._toStore("CurrentOptimum"):
3305 selfA.StoredVariables["CurrentState"].store( _X )
3306 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3307 selfA.DirectCalculation = [None,]
3308 selfA.DirectInnovation = [None,]
3311 for step in range(0,duration-1):
3312 if hasattr(Y,"store"):
3313 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
3315 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
3319 if selfA._parameters["EstimationOf"] == "State":
3320 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
3321 elif selfA._parameters["EstimationOf"] == "Parameters":
3324 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3325 __Bounds = ForceNumericBounds( selfA._parameters["Bounds"] )
3326 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(__Bounds)[:,0])),axis=1)
3327 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(__Bounds)[:,1])),axis=1)
3329 # Etape de différence aux observations
3330 if selfA._parameters["EstimationOf"] == "State":
3331 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
3332 elif selfA._parameters["EstimationOf"] == "Parameters":
3333 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
3335 # Stockage de l'état
3336 selfA.DirectCalculation.append( _Xn )
3337 selfA.DirectInnovation.append( _YmHMX )
3339 # Ajout dans la fonctionnelle d'observation
3340 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
3343 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3344 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3345 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3346 selfA.StoredVariables["CostFunctionJ" ].store( J )
3347 if selfA._toStore("IndexOfOptimum") or \
3348 selfA._toStore("CurrentOptimum") or \
3349 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3350 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3351 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3352 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3353 if selfA._toStore("IndexOfOptimum"):
3354 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3355 if selfA._toStore("CurrentOptimum"):
3356 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3357 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3358 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3359 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3360 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3361 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3362 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3365 def GradientOfCostFunction(x):
3366 _X = numpy.asmatrix(numpy.ravel( x )).T
3367 GradJb = BI * (_X - Xb)
3369 for step in range(duration-1,0,-1):
3370 # Étape de récupération du dernier stockage de l'évolution
3371 _Xn = selfA.DirectCalculation.pop()
3372 # Étape de récupération du dernier stockage de l'innovation
3373 _YmHMX = selfA.DirectInnovation.pop()
3374 # Calcul des adjoints
3375 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3376 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3377 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3378 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3379 # Calcul du gradient par état adjoint
3380 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3381 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3382 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3385 # Minimisation de la fonctionnelle
3386 # --------------------------------
3387 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3389 if selfA._parameters["Minimizer"] == "LBFGSB":
3390 if "0.19" <= scipy.version.version <= "1.1.0":
3391 import lbfgsbhlt as optimiseur
3393 import scipy.optimize as optimiseur
3394 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3395 func = CostFunction,
3397 fprime = GradientOfCostFunction,
3399 bounds = selfA._parameters["Bounds"],
3400 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3401 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3402 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3403 iprint = selfA._parameters["optiprint"],
3405 nfeval = Informations['funcalls']
3406 rc = Informations['warnflag']
3407 elif selfA._parameters["Minimizer"] == "TNC":
3408 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3409 func = CostFunction,
3411 fprime = GradientOfCostFunction,
3413 bounds = selfA._parameters["Bounds"],
3414 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3415 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3416 ftol = selfA._parameters["CostDecrementTolerance"],
3417 messages = selfA._parameters["optmessages"],
3419 elif selfA._parameters["Minimizer"] == "CG":
3420 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3423 fprime = GradientOfCostFunction,
3425 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3426 gtol = selfA._parameters["GradientNormTolerance"],
3427 disp = selfA._parameters["optdisp"],
3430 elif selfA._parameters["Minimizer"] == "NCG":
3431 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3434 fprime = GradientOfCostFunction,
3436 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3437 avextol = selfA._parameters["CostDecrementTolerance"],
3438 disp = selfA._parameters["optdisp"],
3441 elif selfA._parameters["Minimizer"] == "BFGS":
3442 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3445 fprime = GradientOfCostFunction,
3447 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3448 gtol = selfA._parameters["GradientNormTolerance"],
3449 disp = selfA._parameters["optdisp"],
3453 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3455 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3456 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3458 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3459 # ----------------------------------------------------------------
3460 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3461 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3463 # Obtention de l'analyse
3464 # ----------------------
3465 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3467 selfA.StoredVariables["Analysis"].store( Xa )
3469 # Calculs et/ou stockages supplémentaires
3470 # ---------------------------------------
3471 if selfA._toStore("BMA"):
3472 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3476 # ==============================================================================
3477 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3479 Standard Kalman Filter
3481 if selfA._parameters["EstimationOf"] == "Parameters":
3482 selfA._parameters["StoreInternalVariables"] = True
3486 Ht = HO["Tangent"].asMatrix(Xb)
3487 Ha = HO["Adjoint"].asMatrix(Xb)
3489 if selfA._parameters["EstimationOf"] == "State":
3490 Mt = EM["Tangent"].asMatrix(Xb)
3491 Ma = EM["Adjoint"].asMatrix(Xb)
3493 if CM is not None and "Tangent" in CM and U is not None:
3494 Cm = CM["Tangent"].asMatrix(Xb)
3498 # Durée d'observation et tailles
3499 if hasattr(Y,"stepnumber"):
3500 duration = Y.stepnumber()
3501 __p = numpy.cumprod(Y.shape())[-1]
3504 __p = numpy.array(Y).size
3506 # Précalcul des inversions de B et R
3507 if selfA._parameters["StoreInternalVariables"] \
3508 or selfA._toStore("CostFunctionJ") \
3509 or selfA._toStore("CostFunctionJb") \
3510 or selfA._toStore("CostFunctionJo") \
3511 or selfA._toStore("CurrentOptimum") \
3512 or selfA._toStore("APosterioriCovariance"):
3518 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3521 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3522 selfA.StoredVariables["Analysis"].store( Xb )
3523 if selfA._toStore("APosterioriCovariance"):
3524 if hasattr(B,"asfullmatrix"):
3525 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3527 selfA.StoredVariables["APosterioriCovariance"].store( B )
3528 selfA._setInternalState("seed", numpy.random.get_state())
3529 elif selfA._parameters["nextStep"]:
3530 Xn = selfA._getInternalState("Xn")
3531 Pn = selfA._getInternalState("Pn")
3533 if selfA._parameters["EstimationOf"] == "Parameters":
3535 previousJMinimum = numpy.finfo(float).max
3537 for step in range(duration-1):
3538 if hasattr(Y,"store"):
3539 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3541 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3544 if hasattr(U,"store") and len(U)>1:
3545 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3546 elif hasattr(U,"store") and len(U)==1:
3547 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3549 Un = numpy.asmatrix(numpy.ravel( U )).T
3553 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3554 Xn_predicted = Mt * Xn
3555 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3556 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3557 Xn_predicted = Xn_predicted + Cm * Un
3558 Pn_predicted = Q + Mt * Pn * Ma
3559 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3560 # --- > Par principe, M = Id, Q = 0
3564 if selfA._parameters["EstimationOf"] == "State":
3565 HX_predicted = Ht * Xn_predicted
3566 _Innovation = Ynpu - HX_predicted
3567 elif selfA._parameters["EstimationOf"] == "Parameters":
3568 HX_predicted = Ht * Xn_predicted
3569 _Innovation = Ynpu - HX_predicted
3570 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3571 _Innovation = _Innovation - Cm * Un
3573 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3574 Xn = Xn_predicted + Kn * _Innovation
3575 Pn = Pn_predicted - Kn * Ht * Pn_predicted
3578 #--------------------------
3579 selfA._setInternalState("Xn", Xn)
3580 selfA._setInternalState("Pn", Pn)
3581 #--------------------------
3583 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3584 # ---> avec analysis
3585 selfA.StoredVariables["Analysis"].store( Xa )
3586 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3587 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3588 if selfA._toStore("InnovationAtCurrentAnalysis"):
3589 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3590 # ---> avec current state
3591 if selfA._parameters["StoreInternalVariables"] \
3592 or selfA._toStore("CurrentState"):
3593 selfA.StoredVariables["CurrentState"].store( Xn )
3594 if selfA._toStore("ForecastState"):
3595 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3596 if selfA._toStore("ForecastCovariance"):
3597 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3598 if selfA._toStore("BMA"):
3599 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3600 if selfA._toStore("InnovationAtCurrentState"):
3601 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3602 if selfA._toStore("SimulatedObservationAtCurrentState") \
3603 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3604 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3606 if selfA._parameters["StoreInternalVariables"] \
3607 or selfA._toStore("CostFunctionJ") \
3608 or selfA._toStore("CostFunctionJb") \
3609 or selfA._toStore("CostFunctionJo") \
3610 or selfA._toStore("CurrentOptimum") \
3611 or selfA._toStore("APosterioriCovariance"):
3612 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3613 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3615 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3616 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3617 selfA.StoredVariables["CostFunctionJ" ].store( J )
3619 if selfA._toStore("IndexOfOptimum") \
3620 or selfA._toStore("CurrentOptimum") \
3621 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3622 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3623 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3624 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3625 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3626 if selfA._toStore("IndexOfOptimum"):
3627 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3628 if selfA._toStore("CurrentOptimum"):
3629 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3630 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3631 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3632 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3633 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3634 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3635 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3636 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3637 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3638 if selfA._toStore("APosterioriCovariance"):
3639 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3640 if selfA._parameters["EstimationOf"] == "Parameters" \
3641 and J < previousJMinimum:
3642 previousJMinimum = J
3644 if selfA._toStore("APosterioriCovariance"):
3645 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3647 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3648 # ----------------------------------------------------------------------
3649 if selfA._parameters["EstimationOf"] == "Parameters":
3650 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3651 selfA.StoredVariables["Analysis"].store( XaMin )
3652 if selfA._toStore("APosterioriCovariance"):
3653 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3654 if selfA._toStore("BMA"):
3655 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3659 # ==============================================================================
3660 def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3662 Unscented Kalman Filter
3664 if selfA._parameters["EstimationOf"] == "Parameters":
3665 selfA._parameters["StoreInternalVariables"] = True
3666 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
3669 Alpha = selfA._parameters["Alpha"]
3670 Beta = selfA._parameters["Beta"]
3671 if selfA._parameters["Kappa"] == 0:
3672 if selfA._parameters["EstimationOf"] == "State":
3674 elif selfA._parameters["EstimationOf"] == "Parameters":
3677 Kappa = selfA._parameters["Kappa"]
3678 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3679 Gamma = math.sqrt( L + Lambda )
3683 for i in range(2*L):
3684 Ww.append( 1. / (2.*(L + Lambda)) )
3686 Wm = numpy.array( Ww )
3687 Wm[0] = Lambda / (L + Lambda)
3688 Wc = numpy.array( Ww )
3689 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3692 Hm = HO["Direct"].appliedControledFormTo
3694 if selfA._parameters["EstimationOf"] == "State":
3695 Mm = EM["Direct"].appliedControledFormTo
3697 if CM is not None and "Tangent" in CM and U is not None:
3698 Cm = CM["Tangent"].asMatrix(Xb)
3702 # Durée d'observation et tailles
3703 if hasattr(Y,"stepnumber"):
3704 duration = Y.stepnumber()
3705 __p = numpy.cumprod(Y.shape())[-1]
3708 __p = numpy.array(Y).size
3710 # Précalcul des inversions de B et R
3711 if selfA._parameters["StoreInternalVariables"] \
3712 or selfA._toStore("CostFunctionJ") \
3713 or selfA._toStore("CostFunctionJb") \
3714 or selfA._toStore("CostFunctionJo") \
3715 or selfA._toStore("CurrentOptimum") \
3716 or selfA._toStore("APosterioriCovariance"):
3722 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3724 if hasattr(B,"asfullmatrix"):
3725 Pn = B.asfullmatrix(__n)
3728 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3729 selfA.StoredVariables["Analysis"].store( Xb )
3730 if selfA._toStore("APosterioriCovariance"):
3731 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3732 elif selfA._parameters["nextStep"]:
3733 Xn = selfA._getInternalState("Xn")
3734 Pn = selfA._getInternalState("Pn")
3736 if selfA._parameters["EstimationOf"] == "Parameters":
3738 previousJMinimum = numpy.finfo(float).max
3740 for step in range(duration-1):
3741 if hasattr(Y,"store"):
3742 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3744 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3747 if hasattr(U,"store") and len(U)>1:
3748 Un = numpy.ravel( U[step] ).reshape((-1,1))
3749 elif hasattr(U,"store") and len(U)==1:
3750 Un = numpy.ravel( U[0] ).reshape((-1,1))
3752 Un = numpy.ravel( U ).reshape((-1,1))
3756 Pndemi = numpy.linalg.cholesky(Pn)
3757 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
3758 nbSpts = 2*Xn.size+1
3760 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3761 for point in range(nbSpts):
3762 Xnp[:,point] = numpy.max(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3763 Xnp[:,point] = numpy.min(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3766 for point in range(nbSpts):
3767 if selfA._parameters["EstimationOf"] == "State":
3768 XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
3769 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3770 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
3771 XEtnnpi = XEtnnpi + Cm * Un
3772 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3773 XEtnnpi = numpy.max(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3774 XEtnnpi = numpy.min(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3775 elif selfA._parameters["EstimationOf"] == "Parameters":
3776 # --- > Par principe, M = Id, Q = 0
3777 XEtnnpi = Xnp[:,point]
3778 XEtnnp.append( XEtnnpi )
3779 XEtnnp = numpy.hstack( XEtnnp )
3781 Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
3783 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3784 Xncm = numpy.max(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3785 Xncm = numpy.min(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3787 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
3788 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
3789 for point in range(nbSpts):
3790 Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
3792 if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
3793 Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm)
3795 Pnmdemi = numpy.linalg.cholesky(Pnm)
3797 Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
3799 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3800 for point in range(nbSpts):
3801 Xnnp[:,point] = numpy.max(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3802 Xnnp[:,point] = numpy.min(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3805 for point in range(nbSpts):
3806 if selfA._parameters["EstimationOf"] == "State":
3807 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
3808 elif selfA._parameters["EstimationOf"] == "Parameters":
3809 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
3810 Ynnp.append( Ynnpi )
3811 Ynnp = numpy.hstack( Ynnp )
3813 Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
3817 for point in range(nbSpts):
3818 Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
3819 Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
3821 _Innovation = Ynpu - Yncm
3822 if selfA._parameters["EstimationOf"] == "Parameters":
3823 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3824 _Innovation = _Innovation - Cm * Un
3827 Xn = Xncm + Kn * _Innovation
3828 Pn = Pnm - Kn * Pyyn * Kn.T
3830 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3831 Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3832 Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3835 #--------------------------
3836 selfA._setInternalState("Xn", Xn)
3837 selfA._setInternalState("Pn", Pn)
3838 #--------------------------
3840 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3841 # ---> avec analysis
3842 selfA.StoredVariables["Analysis"].store( Xa )
3843 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3844 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
3845 if selfA._toStore("InnovationAtCurrentAnalysis"):
3846 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3847 # ---> avec current state
3848 if selfA._parameters["StoreInternalVariables"] \
3849 or selfA._toStore("CurrentState"):
3850 selfA.StoredVariables["CurrentState"].store( Xn )
3851 if selfA._toStore("ForecastState"):
3852 selfA.StoredVariables["ForecastState"].store( Xncm )
3853 if selfA._toStore("ForecastCovariance"):
3854 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
3855 if selfA._toStore("BMA"):
3856 selfA.StoredVariables["BMA"].store( Xncm - Xa )
3857 if selfA._toStore("InnovationAtCurrentState"):
3858 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3859 if selfA._toStore("SimulatedObservationAtCurrentState") \
3860 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3861 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
3863 if selfA._parameters["StoreInternalVariables"] \
3864 or selfA._toStore("CostFunctionJ") \
3865 or selfA._toStore("CostFunctionJb") \
3866 or selfA._toStore("CostFunctionJo") \
3867 or selfA._toStore("CurrentOptimum") \
3868 or selfA._toStore("APosterioriCovariance"):
3869 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3870 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3872 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3873 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3874 selfA.StoredVariables["CostFunctionJ" ].store( J )
3876 if selfA._toStore("IndexOfOptimum") \
3877 or selfA._toStore("CurrentOptimum") \
3878 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3879 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3880 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3881 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3882 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3883 if selfA._toStore("IndexOfOptimum"):
3884 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3885 if selfA._toStore("CurrentOptimum"):
3886 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3887 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3888 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3889 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3890 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3891 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3892 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3893 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3894 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3895 if selfA._toStore("APosterioriCovariance"):
3896 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3897 if selfA._parameters["EstimationOf"] == "Parameters" \
3898 and J < previousJMinimum:
3899 previousJMinimum = J
3901 if selfA._toStore("APosterioriCovariance"):
3902 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3904 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3905 # ----------------------------------------------------------------------
3906 if selfA._parameters["EstimationOf"] == "Parameters":
3907 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3908 selfA.StoredVariables["Analysis"].store( XaMin )
3909 if selfA._toStore("APosterioriCovariance"):
3910 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3911 if selfA._toStore("BMA"):
3912 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3916 # ==============================================================================
3917 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3919 3DVAR variational analysis with no inversion of B
3926 Hm = HO["Direct"].appliedTo
3927 Ha = HO["Adjoint"].appliedInXTo
3929 # Précalcul des inversions de B et R
3933 # Point de démarrage de l'optimisation
3934 Xini = numpy.zeros(Xb.shape)
3936 # Définition de la fonction-coût
3937 # ------------------------------
3938 def CostFunction(v):
3939 _V = numpy.asmatrix(numpy.ravel( v )).T
3941 if selfA._parameters["StoreInternalVariables"] or \
3942 selfA._toStore("CurrentState") or \
3943 selfA._toStore("CurrentOptimum"):
3944 selfA.StoredVariables["CurrentState"].store( _X )
3946 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3947 _Innovation = Y - _HX
3948 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3949 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3950 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3951 if selfA._toStore("InnovationAtCurrentState"):
3952 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3954 Jb = float( 0.5 * _V.T * BT * _V )
3955 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3958 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3959 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3960 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3961 selfA.StoredVariables["CostFunctionJ" ].store( J )
3962 if selfA._toStore("IndexOfOptimum") or \
3963 selfA._toStore("CurrentOptimum") or \
3964 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3965 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3966 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3967 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3968 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3969 if selfA._toStore("IndexOfOptimum"):
3970 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3971 if selfA._toStore("CurrentOptimum"):
3972 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3973 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3974 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3975 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3976 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3977 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3978 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3979 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3980 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3983 def GradientOfCostFunction(v):
3984 _V = numpy.asmatrix(numpy.ravel( v )).T
3987 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3989 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3990 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3993 # Minimisation de la fonctionnelle
3994 # --------------------------------
3995 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3997 if selfA._parameters["Minimizer"] == "LBFGSB":
3998 if "0.19" <= scipy.version.version <= "1.1.0":
3999 import lbfgsbhlt as optimiseur
4001 import scipy.optimize as optimiseur
4002 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4003 func = CostFunction,
4005 fprime = GradientOfCostFunction,
4007 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4008 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
4009 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
4010 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4011 iprint = selfA._parameters["optiprint"],
4013 nfeval = Informations['funcalls']
4014 rc = Informations['warnflag']
4015 elif selfA._parameters["Minimizer"] == "TNC":
4016 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4017 func = CostFunction,
4019 fprime = GradientOfCostFunction,
4021 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4022 maxfun = selfA._parameters["MaximumNumberOfSteps"],
4023 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4024 ftol = selfA._parameters["CostDecrementTolerance"],
4025 messages = selfA._parameters["optmessages"],
4027 elif selfA._parameters["Minimizer"] == "CG":
4028 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4031 fprime = GradientOfCostFunction,
4033 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4034 gtol = selfA._parameters["GradientNormTolerance"],
4035 disp = selfA._parameters["optdisp"],
4038 elif selfA._parameters["Minimizer"] == "NCG":
4039 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4042 fprime = GradientOfCostFunction,
4044 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4045 avextol = selfA._parameters["CostDecrementTolerance"],
4046 disp = selfA._parameters["optdisp"],
4049 elif selfA._parameters["Minimizer"] == "BFGS":
4050 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4053 fprime = GradientOfCostFunction,
4055 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4056 gtol = selfA._parameters["GradientNormTolerance"],
4057 disp = selfA._parameters["optdisp"],
4061 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4063 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4064 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4066 # Correction pour pallier a un bug de TNC sur le retour du Minimum
4067 # ----------------------------------------------------------------
4068 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4069 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4070 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
4072 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
4074 # Obtention de l'analyse
4075 # ----------------------
4078 selfA.StoredVariables["Analysis"].store( Xa )
4080 if selfA._toStore("OMA") or \
4081 selfA._toStore("SigmaObs2") or \
4082 selfA._toStore("SimulationQuantiles") or \
4083 selfA._toStore("SimulatedObservationAtOptimum"):
4084 if selfA._toStore("SimulatedObservationAtCurrentState"):
4085 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4086 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4087 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4091 # Calcul de la covariance d'analyse
4092 # ---------------------------------
4093 if selfA._toStore("APosterioriCovariance") or \
4094 selfA._toStore("SimulationQuantiles") or \
4095 selfA._toStore("JacobianMatrixAtOptimum") or \
4096 selfA._toStore("KalmanGainAtOptimum"):
4097 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4098 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4099 if selfA._toStore("APosterioriCovariance") or \
4100 selfA._toStore("SimulationQuantiles") or \
4101 selfA._toStore("KalmanGainAtOptimum"):
4102 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4103 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4104 if selfA._toStore("APosterioriCovariance") or \
4105 selfA._toStore("SimulationQuantiles"):
4110 _ee = numpy.matrix(numpy.zeros(nb)).T
4112 _HtEE = numpy.dot(HtM,_ee)
4113 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
4114 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
4115 HessienneI = numpy.matrix( HessienneI )
4117 if min(A.shape) != max(A.shape):
4118 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
4119 if (numpy.diag(A) < 0).any():
4120 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
4121 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
4123 L = numpy.linalg.cholesky( A )
4125 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
4126 if selfA._toStore("APosterioriCovariance"):
4127 selfA.StoredVariables["APosterioriCovariance"].store( A )
4128 if selfA._toStore("JacobianMatrixAtOptimum"):
4129 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4130 if selfA._toStore("KalmanGainAtOptimum"):
4131 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4132 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4133 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4135 # Calculs et/ou stockages supplémentaires
4136 # ---------------------------------------
4137 if selfA._toStore("Innovation") or \
4138 selfA._toStore("SigmaObs2") or \
4139 selfA._toStore("MahalanobisConsistency") or \
4140 selfA._toStore("OMB"):
4142 if selfA._toStore("Innovation"):
4143 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
4144 if selfA._toStore("BMA"):
4145 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4146 if selfA._toStore("OMA"):
4147 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4148 if selfA._toStore("OMB"):
4149 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
4150 if selfA._toStore("SigmaObs2"):
4151 TraceR = R.trace(Y.size)
4152 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
4153 if selfA._toStore("MahalanobisConsistency"):
4154 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4155 if selfA._toStore("SimulationQuantiles"):
4156 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4157 if selfA._toStore("SimulatedObservationAtBackground"):
4158 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
4159 if selfA._toStore("SimulatedObservationAtOptimum"):
4160 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
4164 # ==============================================================================
4165 if __name__ == "__main__":
4166 print('\n AUTODIAGNOSTIC\n')